diff options
author | Charles-François Natali <neologix@free.fr> | 2012-03-24 09:06:23 (GMT) |
---|---|---|
committer | Charles-François Natali <neologix@free.fr> | 2012-03-24 09:06:23 (GMT) |
commit | 55bce63ea0b6da28a71d4a014bf58b39b91b4e3c (patch) | |
tree | 1da6e0f42a7253dd062e70fc1ae814295e59dcc2 /Lib/test | |
parent | 226ed7ecbd47e84bd1e71c967c8130027c02f54f (diff) | |
download | cpython-55bce63ea0b6da28a71d4a014bf58b39b91b4e3c.zip cpython-55bce63ea0b6da28a71d4a014bf58b39b91b4e3c.tar.gz cpython-55bce63ea0b6da28a71d4a014bf58b39b91b4e3c.tar.bz2 |
Issue #14154: Reimplement the bigmem test memory watchdog as a subprocess.
Diffstat (limited to 'Lib/test')
-rw-r--r-- | Lib/test/memory_watchdog.py | 28 | ||||
-rw-r--r-- | Lib/test/support.py | 66 |
2 files changed, 38 insertions, 56 deletions
diff --git a/Lib/test/memory_watchdog.py b/Lib/test/memory_watchdog.py new file mode 100644 index 0000000..88cca8d --- /dev/null +++ b/Lib/test/memory_watchdog.py @@ -0,0 +1,28 @@ +"""Memory watchdog: periodically read the memory usage of the main test process +and print it out, until terminated.""" +# stdin should refer to the process' /proc/<PID>/statm: we don't pass the +# process' PID to avoid a race condition in case of - unlikely - PID recycling. +# If the process crashes, reading from the /proc entry will fail with ESRCH. + + +import os +import sys +import time + + +try: + page_size = os.sysconf('SC_PAGESIZE') +except (ValueError, AttributeError): + try: + page_size = os.sysconf('SC_PAGE_SIZE') + except (ValueError, AttributeError): + page_size = 4096 + +while True: + sys.stdin.seek(0) + statm = sys.stdin.read() + data = int(statm.split()[5]) + sys.stdout.write(" ... process data size: {data:.1f}G\n" + .format(data=data * page_size / (1024 ** 3))) + sys.stdout.flush() + time.sleep(1) diff --git a/Lib/test/support.py b/Lib/test/support.py index 0cdf791..a1ab09c 100644 --- a/Lib/test/support.py +++ b/Lib/test/support.py @@ -36,20 +36,10 @@ except ImportError: multiprocessing = None try: - import faulthandler -except ImportError: - faulthandler = None - -try: import zlib except ImportError: zlib = None -try: - import fcntl -except ImportError: - fcntl = None - __all__ = [ "Error", "TestFailed", "ResourceDenied", "import_module", "verbose", "use_resources", "max_memuse", "record_original_stdout", @@ -1151,62 +1141,26 @@ class _MemoryWatchdog: def __init__(self): self.procfile = '/proc/{pid}/statm'.format(pid=os.getpid()) self.started = False - self.thread = None - try: - self.page_size = os.sysconf('SC_PAGESIZE') - except (ValueError, AttributeError): - try: - self.page_size = os.sysconf('SC_PAGE_SIZE') - except (ValueError, AttributeError): - self.page_size = 4096 - - def consumer(self, fd): - HEADER = "l" - header_size = struct.calcsize(HEADER) - try: - while True: - header = os.read(fd, header_size) - if len(header) < header_size: - # Pipe closed on other end - break - data_len, = struct.unpack(HEADER, header) - data = os.read(fd, data_len) - statm = data.decode('ascii') - data = int(statm.split()[5]) - print(" ... process data size: {data:.1f}G" - .format(data=data * self.page_size / (1024 ** 3))) - finally: - os.close(fd) def start(self): - if not faulthandler or not hasattr(faulthandler, '_file_watchdog'): - return try: - rfd = os.open(self.procfile, os.O_RDONLY) + f = open(self.procfile, 'r') except OSError as e: warnings.warn('/proc not available for stats: {}'.format(e), RuntimeWarning) sys.stderr.flush() return - pipe_fd, wfd = os.pipe() - # set the write end of the pipe non-blocking to avoid blocking the - # watchdog thread when the consumer doesn't drain the pipe fast enough - if fcntl: - flags = fcntl.fcntl(wfd, fcntl.F_GETFL) - fcntl.fcntl(wfd, fcntl.F_SETFL, flags|os.O_NONBLOCK) - # _file_watchdog() doesn't take the GIL in its child thread, and - # therefore collects statistics timely - faulthandler._file_watchdog(rfd, wfd, 1.0) + + watchdog_script = findfile("memory_watchdog.py") + self.mem_watchdog = subprocess.Popen([sys.executable, watchdog_script], + stdin=f, stderr=subprocess.DEVNULL) + f.close() self.started = True - self.thread = threading.Thread(target=self.consumer, args=(pipe_fd,)) - self.thread.daemon = True - self.thread.start() def stop(self): - if not self.started: - return - faulthandler._cancel_file_watchdog() - self.thread.join() + if self.started: + self.mem_watchdog.terminate() + self.mem_watchdog.wait() def bigmemtest(size, memuse, dry_run=True): @@ -1234,7 +1188,7 @@ def bigmemtest(size, memuse, dry_run=True): "not enough memory: %.1fG minimum needed" % (size * memuse / (1024 ** 3))) - if real_max_memuse and verbose and faulthandler and threading: + if real_max_memuse and verbose: print() print(" ... expected peak memory use: {peak:.1f}G" .format(peak=size * memuse / (1024 ** 3))) |