summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVinay Sajip <vinay_sajip@yahoo.co.uk>2012-04-08 00:49:12 (GMT)
committerVinay Sajip <vinay_sajip@yahoo.co.uk>2012-04-08 00:49:12 (GMT)
commit0292fa99efe267cbf3fad5725244ad874b585418 (patch)
tree55712d394da2a7dd9f897d11477110611c4c32f8
parent3abebca78015aad67c3aa79fe18c9b2649a04163 (diff)
downloadcpython-0292fa99efe267cbf3fad5725244ad874b585418.zip
cpython-0292fa99efe267cbf3fad5725244ad874b585418.tar.gz
cpython-0292fa99efe267cbf3fad5725244ad874b585418.tar.bz2
Added multiprocessing example to logging cookbook.
-rw-r--r--Doc/howto/logging-cookbook.rst229
1 files changed, 229 insertions, 0 deletions
diff --git a/Doc/howto/logging-cookbook.rst b/Doc/howto/logging-cookbook.rst
index aa4a7df..2b5444a 100644
--- a/Doc/howto/logging-cookbook.rst
+++ b/Doc/howto/logging-cookbook.rst
@@ -1315,3 +1315,232 @@ This dictionary is passed to :func:`~logging.config.dictConfig` to put the confi
For more information about this configuration, you can see the `relevant
section <https://docs.djangoproject.com/en/1.3/topics/logging/#configuring-logging>`_
of the Django documentation.
+
+A more elaborate multiprocessing example
+----------------------------------------
+
+The following working example shows how logging can be used with multiprocessing
+using configuration files. The configurations are fairly simple, but serve to
+illustrate how more complex ones could be implemented in a real multiprocessing
+scenario.
+
+In the example, the main process spawns a listener process and some worker
+processes. Each of the main process, the listener and the workers have three
+separate configurations (the workers all share the same configuration). We can
+see logging in the main process, how the workers log to a QueueHandler and how
+the listener implements a QueueListener and a more complex logging
+configuration, and arranges to dispatch events received via the queue to the
+handlers specified in the configuration. Note that these configurations are
+purely illustrative, but you should be able to adapt this example to your own
+scenario.
+
+Here's the script - the docstrings and the comments hopefully explain how it
+works::
+
+ import logging
+ import logging.config
+ import logging.handlers
+ from multiprocessing import Process, Queue, Event, current_process
+ import os
+ import random
+ import time
+
+ class MyHandler(object):
+ """
+ A simple handler for logging events. It runs in the listener process and
+ dispatches events to loggers based on the name in the received record,
+ which then get dispatched, by the logging system, to the handlers
+ configured for those records.
+ """
+ def handle(self, record):
+ logger = logging.getLogger(record.name)
+ # The process name is transformed just to show that it's the listener
+ # doing the logging to files and console
+ record.processName = '%s (for %s)' % (current_process().name, record.processName)
+ logger.handle(record)
+
+ def listener_process(q, stop_event, config):
+ """
+ This could be done in the main process, but is just done in a separate
+ process for illustrative purposes.
+
+ This initialises logging according to the specified configuration,
+ starts the listener and waits for the main process to signal completion
+ via the event. The listener is then stopped, and the process exits.
+ """
+ logging.config.dictConfig(config)
+ listener = logging.handlers.QueueListener(q, MyHandler())
+ listener.start()
+ if os.name == 'posix':
+ # On POSIX, the setup logger will have been configured in the
+ # parent process, but should have been disabled following the
+ # dictConfig call.
+ # On Windows, since fork isn't used, the setup logger won't
+ # exist in the child, so it would be created and the message
+ # would appear - hence the "if posix" clause.
+ logger = logging.getLogger('setup')
+ logger.critical('Should not appear, because of disabled logger ...')
+ stop_event.wait()
+ listener.stop()
+
+ def worker_process(config):
+ """
+ A number of these are spawned for the purpose of illustration. In
+ practice, they could be a heterogenous bunch of processes rather than
+ ones which are identical to each other.
+
+ This initialises logging according to the specified configuration,
+ and logs a hundred messages with random levels to randomly selected
+ loggers.
+
+ A small sleep is added to allow other processes a chance to run. This
+ is not strictly needed, but it mixes the output from the different
+ processes a bit more than if it's left out.
+ """
+ logging.config.dictConfig(config)
+ levels = [logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR,
+ logging.CRITICAL]
+ loggers = ['foo', 'foo.bar', 'foo.bar.baz',
+ 'spam', 'spam.ham', 'spam.ham.eggs']
+ if os.name == 'posix':
+ # On POSIX, the setup logger will have been configured in the
+ # parent process, but should have been disabled following the
+ # dictConfig call.
+ # On Windows, since fork isn't used, the setup logger won't
+ # exist in the child, so it would be created and the message
+ # would appear - hence the "if posix" clause.
+ logger = logging.getLogger('setup')
+ logger.critical('Should not appear, because of disabled logger ...')
+ for i in range(100):
+ lvl = random.choice(levels)
+ logger = logging.getLogger(random.choice(loggers))
+ logger.log(lvl, 'Message no. %d', i)
+ time.sleep(0.01)
+
+ def main():
+ q = Queue()
+ # The main process gets a simple configuration which prints to the console.
+ config_initial = {
+ 'version': 1,
+ 'formatters': {
+ 'detailed': {
+ 'class': 'logging.Formatter',
+ 'format': '%(asctime)s %(name)-15s %(levelname)-8s %(processName)-10s %(message)s'
+ }
+ },
+ 'handlers': {
+ 'console': {
+ 'class': 'logging.StreamHandler',
+ 'level': 'INFO',
+ },
+ },
+ 'root': {
+ 'level': 'DEBUG',
+ 'handlers': ['console']
+ },
+ }
+ # The worker process configuration is just a QueueHandler attached to the
+ # root logger, which allows all messages to be sent to the queue.
+ # We disable existing loggers to disable the "setup" logger used in the
+ # parent process. This is needed on POSIX because the logger will
+ # be there in the child following a fork().
+ config_worker = {
+ 'version': 1,
+ 'disable_existing_loggers': True,
+ 'handlers': {
+ 'queue': {
+ 'class': 'logging.handlers.QueueHandler',
+ 'queue': q,
+ },
+ },
+ 'root': {
+ 'level': 'DEBUG',
+ 'handlers': ['queue']
+ },
+ }
+ # The listener process configuration shows that the full flexibility of
+ # logging configuration is available to dispatch events to handlers however
+ # you want.
+ # We disable existing loggers to disable the "setup" logger used in the
+ # parent process. This is needed on POSIX because the logger will
+ # be there in the child following a fork().
+ config_listener = {
+ 'version': 1,
+ 'disable_existing_loggers': True,
+ 'formatters': {
+ 'detailed': {
+ 'class': 'logging.Formatter',
+ 'format': '%(asctime)s %(name)-15s %(levelname)-8s %(processName)-10s %(message)s'
+ },
+ 'simple': {
+ 'class': 'logging.Formatter',
+ 'format': '%(name)-15s %(levelname)-8s %(processName)-10s %(message)s'
+ }
+ },
+ 'handlers': {
+ 'console': {
+ 'class': 'logging.StreamHandler',
+ 'level': 'INFO',
+ 'formatter': 'simple',
+ },
+ 'file': {
+ 'class': 'logging.FileHandler',
+ 'filename': 'mplog.log',
+ 'mode': 'w',
+ 'formatter': 'detailed',
+ },
+ 'foofile': {
+ 'class': 'logging.FileHandler',
+ 'filename': 'mplog-foo.log',
+ 'mode': 'w',
+ 'formatter': 'detailed',
+ },
+ 'errors': {
+ 'class': 'logging.FileHandler',
+ 'filename': 'mplog-errors.log',
+ 'mode': 'w',
+ 'level': 'ERROR',
+ 'formatter': 'detailed',
+ },
+ },
+ 'loggers': {
+ 'foo': {
+ 'handlers' : ['foofile']
+ }
+ },
+ 'root': {
+ 'level': 'DEBUG',
+ 'handlers': ['console', 'file', 'errors']
+ },
+ }
+ # Log some initial events, just to show that logging in the parent works
+ # normally.
+ logging.config.dictConfig(config_initial)
+ logger = logging.getLogger('setup')
+ logger.info('About to create workers ...')
+ workers = []
+ for i in range(5):
+ wp = Process(target=worker_process, name='worker %d' % (i + 1),
+ args=(config_worker,))
+ workers.append(wp)
+ wp.start()
+ logger.info('Started worker: %s', wp.name)
+ logger.info('About to create listener ...')
+ stop_event = Event()
+ lp = Process(target=listener_process, name='listener',
+ args=(q, stop_event, config_listener))
+ lp.start()
+ logger.info('Started listener')
+ # We now hang around for the workers to finish their work.
+ for wp in workers:
+ wp.join()
+ # Workers all done, listening can now stop.
+ # Logging in the parent still works normally.
+ logger.info('Telling listener to stop ...')
+ stop_event.set()
+ lp.join()
+ logger.info('All done.')
+
+ if __name__ == '__main__':
+ main()
+