From d07f72533cec8b1a19e2909ed8e277928da9c467 Mon Sep 17 00:00:00 2001 From: William Deegan Date: Mon, 10 Oct 2022 15:39:33 -0700 Subject: move Taskmaster and Jobs to SCons.Taskmaster --- SCons/Job.py | 439 ------------ SCons/JobTests.py | 575 ---------------- SCons/SConf.py | 4 +- SCons/Script/Main.py | 6 +- SCons/Taskmaster.py | 1059 ----------------------------- SCons/Taskmaster/Job.py | 439 ++++++++++++ SCons/Taskmaster/JobTests.py | 574 ++++++++++++++++ SCons/Taskmaster/TaskmasterTests.py | 1257 +++++++++++++++++++++++++++++++++++ SCons/Taskmaster/__init__.py | 1059 +++++++++++++++++++++++++++++ SCons/TaskmasterTests.py | 1257 ----------------------------------- 10 files changed, 3334 insertions(+), 3335 deletions(-) delete mode 100644 SCons/Job.py delete mode 100644 SCons/JobTests.py delete mode 100644 SCons/Taskmaster.py create mode 100644 SCons/Taskmaster/Job.py create mode 100644 SCons/Taskmaster/JobTests.py create mode 100644 SCons/Taskmaster/TaskmasterTests.py create mode 100644 SCons/Taskmaster/__init__.py delete mode 100644 SCons/TaskmasterTests.py diff --git a/SCons/Job.py b/SCons/Job.py deleted file mode 100644 index b398790..0000000 --- a/SCons/Job.py +++ /dev/null @@ -1,439 +0,0 @@ -# MIT License -# -# Copyright The SCons Foundation -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be included -# in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY -# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE -# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -"""Serial and Parallel classes to execute build tasks. - -The Jobs class provides a higher level interface to start, -stop, and wait on jobs. -""" - -import SCons.compat - -import os -import signal - -import SCons.Errors -import SCons.Warnings - -# The default stack size (in kilobytes) of the threads used to execute -# jobs in parallel. -# -# We use a stack size of 256 kilobytes. The default on some platforms -# is too large and prevents us from creating enough threads to fully -# parallelized the build. For example, the default stack size on linux -# is 8 MBytes. - -explicit_stack_size = None -default_stack_size = 256 - -interrupt_msg = 'Build interrupted.' - - -class InterruptState: - def __init__(self): - self.interrupted = False - - def set(self): - self.interrupted = True - - def __call__(self): - return self.interrupted - - -class Jobs: - """An instance of this class initializes N jobs, and provides - methods for starting, stopping, and waiting on all N jobs. - """ - - def __init__(self, num, taskmaster): - """ - Create 'num' jobs using the given taskmaster. - - If 'num' is 1 or less, then a serial job will be used, - otherwise a parallel job with 'num' worker threads will - be used. - - The 'num_jobs' attribute will be set to the actual number of jobs - allocated. If more than one job is requested but the Parallel - class can't do it, it gets reset to 1. Wrapping interfaces that - care should check the value of 'num_jobs' after initialization. - """ - - self.job = None - if num > 1: - stack_size = explicit_stack_size - if stack_size is None: - stack_size = default_stack_size - - try: - self.job = Parallel(taskmaster, num, stack_size) - self.num_jobs = num - except NameError: - pass - if self.job is None: - self.job = Serial(taskmaster) - self.num_jobs = 1 - - def run(self, postfunc=lambda: None): - """Run the jobs. - - postfunc() will be invoked after the jobs has run. It will be - invoked even if the jobs are interrupted by a keyboard - interrupt (well, in fact by a signal such as either SIGINT, - SIGTERM or SIGHUP). The execution of postfunc() is protected - against keyboard interrupts and is guaranteed to run to - completion.""" - self._setup_sig_handler() - try: - self.job.start() - finally: - postfunc() - self._reset_sig_handler() - - def were_interrupted(self): - """Returns whether the jobs were interrupted by a signal.""" - return self.job.interrupted() - - def _setup_sig_handler(self): - """Setup an interrupt handler so that SCons can shutdown cleanly in - various conditions: - - a) SIGINT: Keyboard interrupt - b) SIGTERM: kill or system shutdown - c) SIGHUP: Controlling shell exiting - - We handle all of these cases by stopping the taskmaster. It - turns out that it's very difficult to stop the build process - by throwing asynchronously an exception such as - KeyboardInterrupt. For example, the python Condition - variables (threading.Condition) and queues do not seem to be - asynchronous-exception-safe. It would require adding a whole - bunch of try/finally block and except KeyboardInterrupt all - over the place. - - Note also that we have to be careful to handle the case when - SCons forks before executing another process. In that case, we - want the child to exit immediately. - """ - def handler(signum, stack, self=self, parentpid=os.getpid()): - if os.getpid() == parentpid: - self.job.taskmaster.stop() - self.job.interrupted.set() - else: - os._exit(2) # pylint: disable=protected-access - - self.old_sigint = signal.signal(signal.SIGINT, handler) - self.old_sigterm = signal.signal(signal.SIGTERM, handler) - try: - self.old_sighup = signal.signal(signal.SIGHUP, handler) - except AttributeError: - pass - if (self.old_sigint is None) or (self.old_sigterm is None) or \ - (hasattr(self, "old_sighup") and self.old_sighup is None): - msg = "Overwritting previous signal handler which was not installed from Python. " + \ - "Will not be able to reinstate and so will return to default handler." - SCons.Warnings.warn(SCons.Warnings.SConsWarning, msg) - - def _reset_sig_handler(self): - """Restore the signal handlers to their previous state (before the - call to _setup_sig_handler().""" - sigint_to_use = self.old_sigint if self.old_sigint is not None else signal.SIG_DFL - sigterm_to_use = self.old_sigterm if self.old_sigterm is not None else signal.SIG_DFL - signal.signal(signal.SIGINT, sigint_to_use) - signal.signal(signal.SIGTERM, sigterm_to_use) - try: - sigterm_to_use = self.old_sighup if self.old_sighup is not None else signal.SIG_DFL - signal.signal(signal.SIGHUP, sigterm_to_use) - except AttributeError: - pass - -class Serial: - """This class is used to execute tasks in series, and is more efficient - than Parallel, but is only appropriate for non-parallel builds. Only - one instance of this class should be in existence at a time. - - This class is not thread safe. - """ - - def __init__(self, taskmaster): - """Create a new serial job given a taskmaster. - - The taskmaster's next_task() method should return the next task - that needs to be executed, or None if there are no more tasks. The - taskmaster's executed() method will be called for each task when it - is successfully executed, or failed() will be called if it failed to - execute (e.g. execute() raised an exception).""" - - self.taskmaster = taskmaster - self.interrupted = InterruptState() - - def start(self): - """Start the job. This will begin pulling tasks from the taskmaster - and executing them, and return when there are no more tasks. If a task - fails to execute (i.e. execute() raises an exception), then the job will - stop.""" - - while True: - task = self.taskmaster.next_task() - - if task is None: - break - - try: - task.prepare() - if task.needs_execute(): - task.execute() - except Exception: - if self.interrupted(): - try: - raise SCons.Errors.BuildError( - task.targets[0], errstr=interrupt_msg) - except: - task.exception_set() - else: - task.exception_set() - - # Let the failed() callback function arrange for the - # build to stop if that's appropriate. - task.failed() - else: - task.executed() - - task.postprocess() - self.taskmaster.cleanup() - - -# Trap import failure so that everything in the Job module but the -# Parallel class (and its dependent classes) will work if the interpreter -# doesn't support threads. -try: - import queue - import threading -except ImportError: - pass -else: - class Worker(threading.Thread): - """A worker thread waits on a task to be posted to its request queue, - dequeues the task, executes it, and posts a tuple including the task - and a boolean indicating whether the task executed successfully. """ - - def __init__(self, requestQueue, resultsQueue, interrupted): - super().__init__() - self.daemon = True - self.requestQueue = requestQueue - self.resultsQueue = resultsQueue - self.interrupted = interrupted - self.start() - - def run(self): - while True: - task = self.requestQueue.get() - - if task is None: - # The "None" value is used as a sentinel by - # ThreadPool.cleanup(). This indicates that there - # are no more tasks, so we should quit. - break - - try: - if self.interrupted(): - raise SCons.Errors.BuildError( - task.targets[0], errstr=interrupt_msg) - task.execute() - except: - task.exception_set() - ok = False - else: - ok = True - - self.resultsQueue.put((task, ok)) - - class ThreadPool: - """This class is responsible for spawning and managing worker threads.""" - - def __init__(self, num, stack_size, interrupted): - """Create the request and reply queues, and 'num' worker threads. - - One must specify the stack size of the worker threads. The - stack size is specified in kilobytes. - """ - self.requestQueue = queue.Queue(0) - self.resultsQueue = queue.Queue(0) - - try: - prev_size = threading.stack_size(stack_size*1024) - except AttributeError as e: - # Only print a warning if the stack size has been - # explicitly set. - if explicit_stack_size is not None: - msg = "Setting stack size is unsupported by this version of Python:\n " + \ - e.args[0] - SCons.Warnings.warn(SCons.Warnings.StackSizeWarning, msg) - except ValueError as e: - msg = "Setting stack size failed:\n " + str(e) - SCons.Warnings.warn(SCons.Warnings.StackSizeWarning, msg) - - # Create worker threads - self.workers = [] - for _ in range(num): - worker = Worker(self.requestQueue, self.resultsQueue, interrupted) - self.workers.append(worker) - - if 'prev_size' in locals(): - threading.stack_size(prev_size) - - def put(self, task): - """Put task into request queue.""" - self.requestQueue.put(task) - - def get(self): - """Remove and return a result tuple from the results queue.""" - return self.resultsQueue.get() - - def preparation_failed(self, task): - self.resultsQueue.put((task, False)) - - def cleanup(self): - """ - Shuts down the thread pool, giving each worker thread a - chance to shut down gracefully. - """ - # For each worker thread, put a sentinel "None" value - # on the requestQueue (indicating that there's no work - # to be done) so that each worker thread will get one and - # terminate gracefully. - for _ in self.workers: - self.requestQueue.put(None) - - # Wait for all of the workers to terminate. - # - # If we don't do this, later Python versions (2.4, 2.5) often - # seem to raise exceptions during shutdown. This happens - # in requestQueue.get(), as an assertion failure that - # requestQueue.not_full is notified while not acquired, - # seemingly because the main thread has shut down (or is - # in the process of doing so) while the workers are still - # trying to pull sentinels off the requestQueue. - # - # Normally these terminations should happen fairly quickly, - # but we'll stick a one-second timeout on here just in case - # someone gets hung. - for worker in self.workers: - worker.join(1.0) - self.workers = [] - - class Parallel: - """This class is used to execute tasks in parallel, and is somewhat - less efficient than Serial, but is appropriate for parallel builds. - - This class is thread safe. - """ - - def __init__(self, taskmaster, num, stack_size): - """Create a new parallel job given a taskmaster. - - The taskmaster's next_task() method should return the next - task that needs to be executed, or None if there are no more - tasks. The taskmaster's executed() method will be called - for each task when it is successfully executed, or failed() - will be called if the task failed to execute (i.e. execute() - raised an exception). - - Note: calls to taskmaster are serialized, but calls to - execute() on distinct tasks are not serialized, because - that is the whole point of parallel jobs: they can execute - multiple tasks simultaneously. """ - - self.taskmaster = taskmaster - self.interrupted = InterruptState() - self.tp = ThreadPool(num, stack_size, self.interrupted) - - self.maxjobs = num - - def start(self): - """Start the job. This will begin pulling tasks from the - taskmaster and executing them, and return when there are no - more tasks. If a task fails to execute (i.e. execute() raises - an exception), then the job will stop.""" - - jobs = 0 - - while True: - # Start up as many available tasks as we're - # allowed to. - while jobs < self.maxjobs: - task = self.taskmaster.next_task() - if task is None: - break - - try: - # prepare task for execution - task.prepare() - except: - task.exception_set() - task.failed() - task.postprocess() - else: - if task.needs_execute(): - # dispatch task - self.tp.put(task) - jobs += 1 - else: - task.executed() - task.postprocess() - - if not task and not jobs: break - - # Let any/all completed tasks finish up before we go - # back and put the next batch of tasks on the queue. - while True: - task, ok = self.tp.get() - jobs -= 1 - - if ok: - task.executed() - else: - if self.interrupted(): - try: - raise SCons.Errors.BuildError( - task.targets[0], errstr=interrupt_msg) - except: - task.exception_set() - - # Let the failed() callback function arrange - # for the build to stop if that's appropriate. - task.failed() - - task.postprocess() - - if self.tp.resultsQueue.empty(): - break - - self.tp.cleanup() - self.taskmaster.cleanup() - -# Local Variables: -# tab-width:4 -# indent-tabs-mode:nil -# End: -# vim: set expandtab tabstop=4 shiftwidth=4: diff --git a/SCons/JobTests.py b/SCons/JobTests.py deleted file mode 100644 index 54d7fa4..0000000 --- a/SCons/JobTests.py +++ /dev/null @@ -1,575 +0,0 @@ -# MIT License -# -# Copyright The SCons Foundation -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be included -# in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY -# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE -# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -import unittest -import random -import math -import sys -import time -import os - -import TestUnit - -import SCons.Job - - -def get_cpu_nums(): - # Linux, Unix and MacOS: - if hasattr( os, "sysconf" ): - if "SC_NPROCESSORS_ONLN" in os.sysconf_names: - # Linux & Unix: - ncpus = os.sysconf( "SC_NPROCESSORS_ONLN" ) - if isinstance(ncpus, int) and ncpus > 0: - return ncpus - else: # OSX: - return int(os.popen2("sysctl -n hw.ncpu")[1].read() ) - # Windows: - if "NUMBER_OF_PROCESSORS" in os.environ: - ncpus = int(os.environ["NUMBER_OF_PROCESSORS"]) - if ncpus > 0: - return ncpus - return 1 # Default - -# a large number -num_sines = 500 - -# how many parallel jobs to perform for the test -num_jobs = get_cpu_nums()*2 - -# in case we werent able to detect num cpus for this test -# just make a hardcoded suffcient large number, though not future proof -if num_jobs == 2: - num_jobs = 33 - -# how many tasks to perform for the test -num_tasks = num_jobs*5 - -class DummyLock: - """fake lock class to use if threads are not supported""" - def acquire(self): - pass - - def release(self): - pass - -class NoThreadsException(Exception): - """raised by the ParallelTestCase if threads are not supported""" - - def __str__(self): - return "the interpreter doesn't support threads" - -class Task: - """A dummy task class for testing purposes.""" - - def __init__(self, i, taskmaster): - self.i = i - self.taskmaster = taskmaster - self.was_executed = 0 - self.was_prepared = 0 - - def prepare(self): - self.was_prepared = 1 - - def _do_something(self): - pass - - def needs_execute(self): - return True - - def execute(self): - self.taskmaster.test_case.assertTrue(self.was_prepared, - "the task wasn't prepared") - - self.taskmaster.guard.acquire() - self.taskmaster.begin_list.append(self.i) - self.taskmaster.guard.release() - - # while task is executing, represent this in the parallel_list - # and then turn it off - self.taskmaster.parallel_list[self.i] = 1 - self._do_something() - self.taskmaster.parallel_list[self.i] = 0 - - # check if task was executing while another was also executing - for j in range(1, self.taskmaster.num_tasks): - if self.taskmaster.parallel_list[j + 1] == 1: - self.taskmaster.found_parallel = True - break - - self.was_executed = 1 - - self.taskmaster.guard.acquire() - self.taskmaster.end_list.append(self.i) - self.taskmaster.guard.release() - - def executed(self): - self.taskmaster.num_executed = self.taskmaster.num_executed + 1 - - self.taskmaster.test_case.assertTrue(self.was_prepared, - "the task wasn't prepared") - self.taskmaster.test_case.assertTrue(self.was_executed, - "the task wasn't really executed") - self.taskmaster.test_case.assertTrue(isinstance(self, Task), - "the task wasn't really a Task instance") - - def failed(self): - self.taskmaster.num_failed = self.taskmaster.num_failed + 1 - self.taskmaster.stop = 1 - self.taskmaster.test_case.assertTrue(self.was_prepared, - "the task wasn't prepared") - - def postprocess(self): - self.taskmaster.num_postprocessed = self.taskmaster.num_postprocessed + 1 - - def exception_set(self): - pass - -class RandomTask(Task): - def _do_something(self): - # do something that will take some random amount of time: - for i in range(random.randrange(0, 100 + num_sines, 1)): - x = math.sin(i) - time.sleep(0.01) - -class ExceptionTask: - """A dummy task class for testing purposes.""" - - def __init__(self, i, taskmaster): - self.taskmaster = taskmaster - self.was_prepared = 0 - - def prepare(self): - self.was_prepared = 1 - - def needs_execute(self): - return True - - def execute(self): - raise Exception - - def executed(self): - self.taskmaster.num_executed = self.taskmaster.num_executed + 1 - - self.taskmaster.test_case.assertTrue(self.was_prepared, - "the task wasn't prepared") - self.taskmaster.test_case.assertTrue(self.was_executed, - "the task wasn't really executed") - self.taskmaster.test_case.assertTrue(self.__class__ is Task, - "the task wasn't really a Task instance") - - def failed(self): - self.taskmaster.num_failed = self.taskmaster.num_failed + 1 - self.taskmaster.stop = 1 - self.taskmaster.test_case.assertTrue(self.was_prepared, - "the task wasn't prepared") - - def postprocess(self): - self.taskmaster.num_postprocessed = self.taskmaster.num_postprocessed + 1 - - def exception_set(self): - self.taskmaster.exception_set() - -class Taskmaster: - """A dummy taskmaster class for testing the job classes.""" - - def __init__(self, n, test_case, Task): - """n is the number of dummy tasks to perform.""" - - self.test_case = test_case - self.stop = None - self.num_tasks = n - self.num_iterated = 0 - self.num_executed = 0 - self.num_failed = 0 - self.num_postprocessed = 0 - self.parallel_list = [0] * (n+1) - self.found_parallel = False - self.Task = Task - - # 'guard' guards 'task_begin_list' and 'task_end_list' - try: - import threading - self.guard = threading.Lock() - except ImportError: - self.guard = DummyLock() - - # keep track of the order tasks are begun in - self.begin_list = [] - - # keep track of the order tasks are completed in - self.end_list = [] - - def next_task(self): - if self.stop or self.all_tasks_are_iterated(): - return None - else: - self.num_iterated = self.num_iterated + 1 - return self.Task(self.num_iterated, self) - - def all_tasks_are_executed(self): - return self.num_executed == self.num_tasks - - def all_tasks_are_iterated(self): - return self.num_iterated == self.num_tasks - - def all_tasks_are_postprocessed(self): - return self.num_postprocessed == self.num_tasks - - def tasks_were_serial(self): - """analyze the task order to see if they were serial""" - return not self.found_parallel - - def exception_set(self): - pass - - def cleanup(self): - pass - -SaveThreadPool = None -ThreadPoolCallList = [] - -class ParallelTestCase(unittest.TestCase): - def runTest(self): - """test parallel jobs""" - - try: - import threading - except ImportError: - raise NoThreadsException() - - taskmaster = Taskmaster(num_tasks, self, RandomTask) - jobs = SCons.Job.Jobs(num_jobs, taskmaster) - jobs.run() - - self.assertTrue(not taskmaster.tasks_were_serial(), - "the tasks were not executed in parallel") - self.assertTrue(taskmaster.all_tasks_are_executed(), - "all the tests were not executed") - self.assertTrue(taskmaster.all_tasks_are_iterated(), - "all the tests were not iterated over") - self.assertTrue(taskmaster.all_tasks_are_postprocessed(), - "all the tests were not postprocessed") - self.assertFalse(taskmaster.num_failed, - "some task(s) failed to execute") - - # Verify that parallel jobs will pull all of the completed tasks - # out of the queue at once, instead of one by one. We do this by - # replacing the default ThreadPool class with one that records the - # order in which tasks are put() and get() to/from the pool, and - # which sleeps a little bit before call get() to let the initial - # tasks complete and get their notifications on the resultsQueue. - - class SleepTask(Task): - def _do_something(self): - time.sleep(0.01) - - global SaveThreadPool - SaveThreadPool = SCons.Job.ThreadPool - - class WaitThreadPool(SaveThreadPool): - def put(self, task): - ThreadPoolCallList.append('put(%s)' % task.i) - return SaveThreadPool.put(self, task) - def get(self): - time.sleep(0.05) - result = SaveThreadPool.get(self) - ThreadPoolCallList.append('get(%s)' % result[0].i) - return result - - SCons.Job.ThreadPool = WaitThreadPool - - try: - taskmaster = Taskmaster(3, self, SleepTask) - jobs = SCons.Job.Jobs(2, taskmaster) - jobs.run() - - # The key here is that we get(1) and get(2) from the - # resultsQueue before we put(3), but get(1) and get(2) can - # be in either order depending on how the first two parallel - # tasks get scheduled by the operating system. - expect = [ - ['put(1)', 'put(2)', 'get(1)', 'get(2)', 'put(3)', 'get(3)'], - ['put(1)', 'put(2)', 'get(2)', 'get(1)', 'put(3)', 'get(3)'], - ] - assert ThreadPoolCallList in expect, ThreadPoolCallList - - finally: - SCons.Job.ThreadPool = SaveThreadPool - -class SerialTestCase(unittest.TestCase): - def runTest(self): - """test a serial job""" - - taskmaster = Taskmaster(num_tasks, self, RandomTask) - jobs = SCons.Job.Jobs(1, taskmaster) - jobs.run() - - self.assertTrue(taskmaster.tasks_were_serial(), - "the tasks were not executed in series") - self.assertTrue(taskmaster.all_tasks_are_executed(), - "all the tests were not executed") - self.assertTrue(taskmaster.all_tasks_are_iterated(), - "all the tests were not iterated over") - self.assertTrue(taskmaster.all_tasks_are_postprocessed(), - "all the tests were not postprocessed") - self.assertFalse(taskmaster.num_failed, - "some task(s) failed to execute") - -class NoParallelTestCase(unittest.TestCase): - def runTest(self): - """test handling lack of parallel support""" - def NoParallel(tm, num, stack_size): - raise NameError - save_Parallel = SCons.Job.Parallel - SCons.Job.Parallel = NoParallel - try: - taskmaster = Taskmaster(num_tasks, self, RandomTask) - jobs = SCons.Job.Jobs(2, taskmaster) - self.assertTrue(jobs.num_jobs == 1, - "unexpected number of jobs %d" % jobs.num_jobs) - jobs.run() - self.assertTrue(taskmaster.tasks_were_serial(), - "the tasks were not executed in series") - self.assertTrue(taskmaster.all_tasks_are_executed(), - "all the tests were not executed") - self.assertTrue(taskmaster.all_tasks_are_iterated(), - "all the tests were not iterated over") - self.assertTrue(taskmaster.all_tasks_are_postprocessed(), - "all the tests were not postprocessed") - self.assertFalse(taskmaster.num_failed, - "some task(s) failed to execute") - finally: - SCons.Job.Parallel = save_Parallel - - -class SerialExceptionTestCase(unittest.TestCase): - def runTest(self): - """test a serial job with tasks that raise exceptions""" - - taskmaster = Taskmaster(num_tasks, self, ExceptionTask) - jobs = SCons.Job.Jobs(1, taskmaster) - jobs.run() - - self.assertFalse(taskmaster.num_executed, - "a task was executed") - self.assertTrue(taskmaster.num_iterated == 1, - "exactly one task should have been iterated") - self.assertTrue(taskmaster.num_failed == 1, - "exactly one task should have failed") - self.assertTrue(taskmaster.num_postprocessed == 1, - "exactly one task should have been postprocessed") - -class ParallelExceptionTestCase(unittest.TestCase): - def runTest(self): - """test parallel jobs with tasks that raise exceptions""" - - taskmaster = Taskmaster(num_tasks, self, ExceptionTask) - jobs = SCons.Job.Jobs(num_jobs, taskmaster) - jobs.run() - - self.assertFalse(taskmaster.num_executed, - "a task was executed") - self.assertTrue(taskmaster.num_iterated >= 1, - "one or more task should have been iterated") - self.assertTrue(taskmaster.num_failed >= 1, - "one or more tasks should have failed") - self.assertTrue(taskmaster.num_postprocessed >= 1, - "one or more tasks should have been postprocessed") - -#--------------------------------------------------------------------- -# Above tested Job object with contrived Task and Taskmaster objects. -# Now test Job object with actual Task and Taskmaster objects. - -import SCons.Taskmaster -import SCons.Node -import time - -class DummyNodeInfo: - def update(self, obj): - pass - -class testnode (SCons.Node.Node): - def __init__(self): - super().__init__() - self.expect_to_be = SCons.Node.executed - self.ninfo = DummyNodeInfo() - -class goodnode (testnode): - def __init__(self): - super().__init__() - self.expect_to_be = SCons.Node.up_to_date - self.ninfo = DummyNodeInfo() - -class slowgoodnode (goodnode): - def prepare(self): - # Delay to allow scheduled Jobs to run while the dispatcher - # sleeps. Keep this short because it affects the time taken - # by this test. - time.sleep(0.15) - goodnode.prepare(self) - -class badnode (goodnode): - def __init__(self): - super().__init__() - self.expect_to_be = SCons.Node.failed - def build(self, **kw): - raise Exception('badnode exception') - -class slowbadnode (badnode): - def build(self, **kw): - # Appears to take a while to build, allowing faster builds to - # overlap. Time duration is not especially important, but if - # it is faster than slowgoodnode then these could complete - # while the scheduler is sleeping. - time.sleep(0.05) - raise Exception('slowbadnode exception') - -class badpreparenode (badnode): - def prepare(self): - raise Exception('badpreparenode exception') - -class _SConsTaskTest(unittest.TestCase): - - def _test_seq(self, num_jobs): - for node_seq in [ - [goodnode], - [badnode], - [slowbadnode], - [slowgoodnode], - [badpreparenode], - [goodnode, badnode], - [slowgoodnode, badnode], - [goodnode, slowbadnode], - [goodnode, goodnode, goodnode, slowbadnode], - [goodnode, slowbadnode, badpreparenode, slowgoodnode], - [goodnode, slowbadnode, slowgoodnode, badnode] - ]: - - self._do_test(num_jobs, node_seq) - - def _do_test(self, num_jobs, node_seq): - - testnodes = [] - for tnum in range(num_tasks): - testnodes.append(node_seq[tnum % len(node_seq)]()) - - taskmaster = SCons.Taskmaster.Taskmaster(testnodes, - tasker=SCons.Taskmaster.AlwaysTask) - - jobs = SCons.Job.Jobs(num_jobs, taskmaster) - - # Exceptions thrown by tasks are not actually propagated to - # this level, but are instead stored in the Taskmaster. - - jobs.run() - - # Now figure out if tests proceeded correctly. The first test - # that fails will shutdown the initiation of subsequent tests, - # but any tests currently queued for execution will still be - # processed, and any tests that completed before the failure - # would have resulted in new tests being queued for execution. - - # Apply the following operational heuristics of Job.py: - # 0) An initial jobset of tasks will be queued before any - # good/bad results are obtained (from "execute" of task in - # thread). - # 1) A goodnode will complete immediately on its thread and - # allow another node to be queued for execution. - # 2) A badnode will complete immediately and suppress any - # subsequent execution queuing, but all currently queued - # tasks will still be processed. - # 3) A slowbadnode will fail later. It will block slots in - # the job queue. Nodes that complete immediately will - # allow other nodes to be queued in their place, and this - # will continue until either (#2) above or until all job - # slots are filled with slowbadnode entries. - - # One approach to validating this test would be to try to - # determine exactly how many nodes executed, how many didn't, - # and the results of each, and then to assert failure on any - # mismatch (including the total number of built nodes). - # However, while this is possible to do for a single-processor - # system, it is nearly impossible to predict correctly for a - # multi-processor system and still test the characteristics of - # delayed execution nodes. Stated another way, multithreading - # is inherently non-deterministic unless you can completely - # characterize the entire system, and since that's not - # possible here, we shouldn't try. - - # Therefore, this test will simply scan the set of nodes to - # see if the node was executed or not and if it was executed - # that it obtained the expected value for that node - # (i.e. verifying we don't get failure crossovers or - # mislabelling of results). - - for N in testnodes: - state = N.get_state() - self.assertTrue(state in [SCons.Node.no_state, N.expect_to_be], - "Node %s got unexpected result: %s" % (N, state)) - - self.assertTrue([N for N in testnodes if N.get_state()], - "no nodes ran at all.") - - -class SerialTaskTest(_SConsTaskTest): - def runTest(self): - """test serial jobs with actual Taskmaster and Task""" - self._test_seq(1) - - -class ParallelTaskTest(_SConsTaskTest): - def runTest(self): - """test parallel jobs with actual Taskmaster and Task""" - self._test_seq(num_jobs) - - - -#--------------------------------------------------------------------- - -def suite(): - suite = unittest.TestSuite() - suite.addTest(ParallelTestCase()) - suite.addTest(SerialTestCase()) - suite.addTest(NoParallelTestCase()) - suite.addTest(SerialExceptionTestCase()) - suite.addTest(ParallelExceptionTestCase()) - suite.addTest(SerialTaskTest()) - suite.addTest(ParallelTaskTest()) - return suite - -if __name__ == "__main__": - runner = TestUnit.cli.get_runner() - result = runner().run(suite()) - if (len(result.failures) == 0 - and len(result.errors) == 1 - and isinstance(result.errors[0][0], SerialTestCase) - and isinstance(result.errors[0][1][0], NoThreadsException)): - sys.exit(2) - elif not result.wasSuccessful(): - sys.exit(1) - -# Local Variables: -# tab-width:4 -# indent-tabs-mode:nil -# End: -# vim: set expandtab tabstop=4 shiftwidth=4: diff --git a/SCons/SConf.py b/SCons/SConf.py index 2a427df..fe14a0c 100644 --- a/SCons/SConf.py +++ b/SCons/SConf.py @@ -43,7 +43,7 @@ import traceback import SCons.Action import SCons.Builder import SCons.Errors -import SCons.Job +import SCons.Taskmaster.Job import SCons.Node.FS import SCons.Taskmaster import SCons.Util @@ -551,7 +551,7 @@ class SConfBase: SConfFS.set_max_drift(0) tm = SCons.Taskmaster.Taskmaster(nodes, SConfBuildTask) # we don't want to build tests in parallel - jobs = SCons.Job.Jobs(1, tm ) + jobs = SCons.Taskmaster.Job.Jobs(1, tm) jobs.run() for n in nodes: state = n.get_state() diff --git a/SCons/Script/Main.py b/SCons/Script/Main.py index 1b06a64..22042f5 100644 --- a/SCons/Script/Main.py +++ b/SCons/Script/Main.py @@ -52,7 +52,7 @@ import SCons.Debug import SCons.Defaults import SCons.Environment import SCons.Errors -import SCons.Job +import SCons.Taskmaster.Job import SCons.Node import SCons.Node.FS import SCons.Platform @@ -1134,7 +1134,7 @@ def _main(parser): SCons.Node.FS.set_duplicate(options.duplicate) fs.set_max_drift(options.max_drift) - SCons.Job.explicit_stack_size = options.stack_size + SCons.Taskmaster.Job.explicit_stack_size = options.stack_size # Hash format and chunksize are set late to support SetOption being called # in a SConscript or SConstruct file. @@ -1321,7 +1321,7 @@ def _build_targets(fs, options, targets, target_top): # to check if python configured with threads. global num_jobs num_jobs = options.num_jobs - jobs = SCons.Job.Jobs(num_jobs, taskmaster) + jobs = SCons.Taskmaster.Job.Jobs(num_jobs, taskmaster) if num_jobs > 1: msg = None if jobs.num_jobs == 1 or not python_has_threads: diff --git a/SCons/Taskmaster.py b/SCons/Taskmaster.py deleted file mode 100644 index d571795..0000000 --- a/SCons/Taskmaster.py +++ /dev/null @@ -1,1059 +0,0 @@ -# MIT License -# -# Copyright The SCons Foundation -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be included -# in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY -# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE -# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -"""Generic Taskmaster module for the SCons build engine. - -This module contains the primary interface(s) between a wrapping user -interface and the SCons build engine. There are two key classes here: - -Taskmaster - This is the main engine for walking the dependency graph and - calling things to decide what does or doesn't need to be built. - -Task - This is the base class for allowing a wrapping interface to - decide what does or doesn't actually need to be done. The - intention is for a wrapping interface to subclass this as - appropriate for different types of behavior it may need. - - The canonical example is the SCons native Python interface, - which has Task subclasses that handle its specific behavior, - like printing "'foo' is up to date" when a top-level target - doesn't need to be built, and handling the -c option by removing - targets as its "build" action. There is also a separate subclass - for suppressing this output when the -q option is used. - - The Taskmaster instantiates a Task object for each (set of) - target(s) that it decides need to be evaluated and/or built. -""" - -import sys -from abc import ABC, abstractmethod -from itertools import chain - -import SCons.Errors -import SCons.Node -import SCons.Warnings - -StateString = SCons.Node.StateString -NODE_NO_STATE = SCons.Node.no_state -NODE_PENDING = SCons.Node.pending -NODE_EXECUTING = SCons.Node.executing -NODE_UP_TO_DATE = SCons.Node.up_to_date -NODE_EXECUTED = SCons.Node.executed -NODE_FAILED = SCons.Node.failed - -print_prepare = False # set by option --debug=prepare - -# A subsystem for recording stats about how different Nodes are handled by -# the main Taskmaster loop. There's no external control here (no need for -# a --debug= option); enable it by changing the value of CollectStats. - -CollectStats = None - -class Stats: - """ - A simple class for holding statistics about the disposition of a - Node by the Taskmaster. If we're collecting statistics, each Node - processed by the Taskmaster gets one of these attached, in which case - the Taskmaster records its decision each time it processes the Node. - (Ideally, that's just once per Node.) - """ - def __init__(self): - """ - Instantiates a Taskmaster.Stats object, initializing all - appropriate counters to zero. - """ - self.considered = 0 - self.already_handled = 0 - self.problem = 0 - self.child_failed = 0 - self.not_built = 0 - self.side_effects = 0 - self.build = 0 - -StatsNodes = [] - -fmt = "%(considered)3d "\ - "%(already_handled)3d " \ - "%(problem)3d " \ - "%(child_failed)3d " \ - "%(not_built)3d " \ - "%(side_effects)3d " \ - "%(build)3d " - -def dump_stats(): - for n in sorted(StatsNodes, key=lambda a: str(a)): - print((fmt % n.attributes.stats.__dict__) + str(n)) - - -class Task(ABC): - """ SCons build engine abstract task class. - - This controls the interaction of the actual building of node - and the rest of the engine. - - This is expected to handle all of the normally-customizable - aspects of controlling a build, so any given application - *should* be able to do what it wants by sub-classing this - class and overriding methods as appropriate. If an application - needs to customize something by sub-classing Taskmaster (or - some other build engine class), we should first try to migrate - that functionality into this class. - - Note that it's generally a good idea for sub-classes to call - these methods explicitly to update state, etc., rather than - roll their own interaction with Taskmaster from scratch. - """ - def __init__(self, tm, targets, top, node): - self.tm = tm - self.targets = targets - self.top = top - self.node = node - self.exc_clear() - - def trace_message(self, method, node, description='node'): - fmt = '%-20s %s %s\n' - return fmt % (method + ':', description, self.tm.trace_node(node)) - - def display(self, message): - """ - Hook to allow the calling interface to display a message. - - This hook gets called as part of preparing a task for execution - (that is, a Node to be built). As part of figuring out what Node - should be built next, the actual target list may be altered, - along with a message describing the alteration. The calling - interface can subclass Task and provide a concrete implementation - of this method to see those messages. - """ - pass - - def prepare(self): - """ - Called just before the task is executed. - - This is mainly intended to give the target Nodes a chance to - unlink underlying files and make all necessary directories before - the Action is actually called to build the targets. - """ - global print_prepare - T = self.tm.trace - if T: T.write(self.trace_message('Task.prepare()', self.node)) - - # Now that it's the appropriate time, give the TaskMaster a - # chance to raise any exceptions it encountered while preparing - # this task. - self.exception_raise() - - if self.tm.message: - self.display(self.tm.message) - self.tm.message = None - - # Let the targets take care of any necessary preparations. - # This includes verifying that all of the necessary sources - # and dependencies exist, removing the target file(s), etc. - # - # As of April 2008, the get_executor().prepare() method makes - # sure that all of the aggregate sources necessary to build this - # Task's target(s) exist in one up-front check. The individual - # target t.prepare() methods check that each target's explicit - # or implicit dependencies exists, and also initialize the - # .sconsign info. - executor = self.targets[0].get_executor() - if executor is None: - return - executor.prepare() - for t in executor.get_action_targets(): - if print_prepare: - print("Preparing target %s..."%t) - for s in t.side_effects: - print("...with side-effect %s..."%s) - t.prepare() - for s in t.side_effects: - if print_prepare: - print("...Preparing side-effect %s..."%s) - s.prepare() - - def get_target(self): - """Fetch the target being built or updated by this task. - """ - return self.node - - @abstractmethod - def needs_execute(self): - return - - def execute(self): - """ - Called to execute the task. - - This method is called from multiple threads in a parallel build, - so only do thread safe stuff here. Do thread unsafe stuff in - prepare(), executed() or failed(). - """ - T = self.tm.trace - if T: T.write(self.trace_message('Task.execute()', self.node)) - - try: - cached_targets = [] - for t in self.targets: - if not t.retrieve_from_cache(): - break - cached_targets.append(t) - if len(cached_targets) < len(self.targets): - # Remove targets before building. It's possible that we - # partially retrieved targets from the cache, leaving - # them in read-only mode. That might cause the command - # to fail. - # - for t in cached_targets: - try: - t.fs.unlink(t.get_internal_path()) - except (IOError, OSError): - pass - self.targets[0].build() - else: - for t in cached_targets: - t.cached = 1 - except SystemExit: - exc_value = sys.exc_info()[1] - raise SCons.Errors.ExplicitExit(self.targets[0], exc_value.code) - except SCons.Errors.UserError: - raise - except SCons.Errors.BuildError: - raise - except Exception as e: - buildError = SCons.Errors.convert_to_BuildError(e) - buildError.node = self.targets[0] - buildError.exc_info = sys.exc_info() - raise buildError - - def executed_without_callbacks(self): - """ - Called when the task has been successfully executed - and the Taskmaster instance doesn't want to call - the Node's callback methods. - """ - T = self.tm.trace - if T: T.write(self.trace_message('Task.executed_without_callbacks()', - self.node)) - - for t in self.targets: - if t.get_state() == NODE_EXECUTING: - for side_effect in t.side_effects: - side_effect.set_state(NODE_NO_STATE) - t.set_state(NODE_EXECUTED) - - def executed_with_callbacks(self): - """ - Called when the task has been successfully executed and - the Taskmaster instance wants to call the Node's callback - methods. - - This may have been a do-nothing operation (to preserve build - order), so we must check the node's state before deciding whether - it was "built", in which case we call the appropriate Node method. - In any event, we always call "visited()", which will handle any - post-visit actions that must take place regardless of whether - or not the target was an actual built target or a source Node. - """ - global print_prepare - T = self.tm.trace - if T: T.write(self.trace_message('Task.executed_with_callbacks()', - self.node)) - - for t in self.targets: - if t.get_state() == NODE_EXECUTING: - for side_effect in t.side_effects: - side_effect.set_state(NODE_NO_STATE) - t.set_state(NODE_EXECUTED) - if not t.cached: - t.push_to_cache() - t.built() - t.visited() - if (not print_prepare and - (not hasattr(self, 'options') or not self.options.debug_includes)): - t.release_target_info() - else: - t.visited() - - executed = executed_with_callbacks - - def failed(self): - """ - Default action when a task fails: stop the build. - - Note: Although this function is normally invoked on nodes in - the executing state, it might also be invoked on up-to-date - nodes when using Configure(). - """ - self.fail_stop() - - def fail_stop(self): - """ - Explicit stop-the-build failure. - - This sets failure status on the target nodes and all of - their dependent parent nodes. - - Note: Although this function is normally invoked on nodes in - the executing state, it might also be invoked on up-to-date - nodes when using Configure(). - """ - T = self.tm.trace - if T: T.write(self.trace_message('Task.failed_stop()', self.node)) - - # Invoke will_not_build() to clean-up the pending children - # list. - self.tm.will_not_build(self.targets, lambda n: n.set_state(NODE_FAILED)) - - # Tell the taskmaster to not start any new tasks - self.tm.stop() - - # We're stopping because of a build failure, but give the - # calling Task class a chance to postprocess() the top-level - # target under which the build failure occurred. - self.targets = [self.tm.current_top] - self.top = 1 - - def fail_continue(self): - """ - Explicit continue-the-build failure. - - This sets failure status on the target nodes and all of - their dependent parent nodes. - - Note: Although this function is normally invoked on nodes in - the executing state, it might also be invoked on up-to-date - nodes when using Configure(). - """ - T = self.tm.trace - if T: T.write(self.trace_message('Task.failed_continue()', self.node)) - - self.tm.will_not_build(self.targets, lambda n: n.set_state(NODE_FAILED)) - - def make_ready_all(self): - """ - Marks all targets in a task ready for execution. - - This is used when the interface needs every target Node to be - visited--the canonical example being the "scons -c" option. - """ - T = self.tm.trace - if T: T.write(self.trace_message('Task.make_ready_all()', self.node)) - - self.out_of_date = self.targets[:] - for t in self.targets: - t.disambiguate().set_state(NODE_EXECUTING) - for s in t.side_effects: - # add disambiguate here to mirror the call on targets above - s.disambiguate().set_state(NODE_EXECUTING) - - def make_ready_current(self): - """ - Marks all targets in a task ready for execution if any target - is not current. - - This is the default behavior for building only what's necessary. - """ - global print_prepare - T = self.tm.trace - if T: T.write(self.trace_message('Task.make_ready_current()', - self.node)) - - self.out_of_date = [] - needs_executing = False - for t in self.targets: - try: - t.disambiguate().make_ready() - is_up_to_date = not t.has_builder() or \ - (not t.always_build and t.is_up_to_date()) - except EnvironmentError as e: - raise SCons.Errors.BuildError(node=t, errstr=e.strerror, filename=e.filename) - - if not is_up_to_date: - self.out_of_date.append(t) - needs_executing = True - - if needs_executing: - for t in self.targets: - t.set_state(NODE_EXECUTING) - for s in t.side_effects: - # add disambiguate here to mirror the call on targets in first loop above - s.disambiguate().set_state(NODE_EXECUTING) - else: - for t in self.targets: - # We must invoke visited() to ensure that the node - # information has been computed before allowing the - # parent nodes to execute. (That could occur in a - # parallel build...) - t.visited() - t.set_state(NODE_UP_TO_DATE) - if (not print_prepare and - (not hasattr(self, 'options') or not self.options.debug_includes)): - t.release_target_info() - - make_ready = make_ready_current - - def postprocess(self): - """ - Post-processes a task after it's been executed. - - This examines all the targets just built (or not, we don't care - if the build was successful, or even if there was no build - because everything was up-to-date) to see if they have any - waiting parent Nodes, or Nodes waiting on a common side effect, - that can be put back on the candidates list. - """ - T = self.tm.trace - if T: T.write(self.trace_message('Task.postprocess()', self.node)) - - # We may have built multiple targets, some of which may have - # common parents waiting for this build. Count up how many - # targets each parent was waiting for so we can subtract the - # values later, and so we *don't* put waiting side-effect Nodes - # back on the candidates list if the Node is also a waiting - # parent. - - targets = set(self.targets) - - pending_children = self.tm.pending_children - parents = {} - for t in targets: - # A node can only be in the pending_children set if it has - # some waiting_parents. - if t.waiting_parents: - if T: T.write(self.trace_message('Task.postprocess()', - t, - 'removing')) - pending_children.discard(t) - for p in t.waiting_parents: - parents[p] = parents.get(p, 0) + 1 - t.waiting_parents = set() - - for t in targets: - if t.side_effects is not None: - for s in t.side_effects: - if s.get_state() == NODE_EXECUTING: - s.set_state(NODE_NO_STATE) - - # The side-effects may have been transferred to - # NODE_NO_STATE by executed_with{,out}_callbacks, but was - # not taken out of the waiting parents/pending children - # data structures. Check for that now. - if s.get_state() == NODE_NO_STATE and s.waiting_parents: - pending_children.discard(s) - for p in s.waiting_parents: - parents[p] = parents.get(p, 0) + 1 - s.waiting_parents = set() - for p in s.waiting_s_e: - if p.ref_count == 0: - self.tm.candidates.append(p) - - for p, subtract in parents.items(): - p.ref_count = p.ref_count - subtract - if T: T.write(self.trace_message('Task.postprocess()', - p, - 'adjusted parent ref count')) - if p.ref_count == 0: - self.tm.candidates.append(p) - - for t in targets: - t.postprocess() - - # Exception handling subsystem. - # - # Exceptions that occur while walking the DAG or examining Nodes - # must be raised, but must be raised at an appropriate time and in - # a controlled manner so we can, if necessary, recover gracefully, - # possibly write out signature information for Nodes we've updated, - # etc. This is done by having the Taskmaster tell us about the - # exception, and letting - - def exc_info(self): - """ - Returns info about a recorded exception. - """ - return self.exception - - def exc_clear(self): - """ - Clears any recorded exception. - - This also changes the "exception_raise" attribute to point - to the appropriate do-nothing method. - """ - self.exception = (None, None, None) - self.exception_raise = self._no_exception_to_raise - - def exception_set(self, exception=None): - """ - Records an exception to be raised at the appropriate time. - - This also changes the "exception_raise" attribute to point - to the method that will, in fact - """ - if not exception: - exception = sys.exc_info() - self.exception = exception - self.exception_raise = self._exception_raise - - def _no_exception_to_raise(self): - pass - - def _exception_raise(self): - """ - Raises a pending exception that was recorded while getting a - Task ready for execution. - """ - exc = self.exc_info()[:] - try: - exc_type, exc_value, exc_traceback = exc - except ValueError: - exc_type, exc_value = exc # pylint: disable=unbalanced-tuple-unpacking - exc_traceback = None - - # raise exc_type(exc_value).with_traceback(exc_traceback) - if isinstance(exc_value, Exception): #hasattr(exc_value, 'with_traceback'): - # If exc_value is an exception, then just reraise - raise exc_value.with_traceback(exc_traceback) - else: - # else we'll create an exception using the value and raise that - raise exc_type(exc_value).with_traceback(exc_traceback) - - - # raise e.__class__, e.__class__(e), sys.exc_info()[2] - # exec("raise exc_type(exc_value).with_traceback(exc_traceback)") - - - -class AlwaysTask(Task): - def needs_execute(self): - """ - Always returns True (indicating this Task should always - be executed). - - Subclasses that need this behavior (as opposed to the default - of only executing Nodes that are out of date w.r.t. their - dependencies) can use this as follows: - - class MyTaskSubclass(SCons.Taskmaster.Task): - needs_execute = SCons.Taskmaster.AlwaysTask.needs_execute - """ - return True - -class OutOfDateTask(Task): - def needs_execute(self): - """ - Returns True (indicating this Task should be executed) if this - Task's target state indicates it needs executing, which has - already been determined by an earlier up-to-date check. - """ - return self.targets[0].get_state() == SCons.Node.executing - - -def find_cycle(stack, visited): - if stack[-1] in visited: - return None - visited.add(stack[-1]) - for n in stack[-1].waiting_parents: - stack.append(n) - if stack[0] == stack[-1]: - return stack - if find_cycle(stack, visited): - return stack - stack.pop() - return None - - -class Taskmaster: - """ - The Taskmaster for walking the dependency DAG. - """ - - def __init__(self, targets=[], tasker=None, order=None, trace=None): - self.original_top = targets - self.top_targets_left = targets[:] - self.top_targets_left.reverse() - self.candidates = [] - if tasker is None: - tasker = OutOfDateTask - self.tasker = tasker - if not order: - order = lambda l: l - self.order = order - self.message = None - self.trace = trace - self.next_candidate = self.find_next_candidate - self.pending_children = set() - - def find_next_candidate(self): - """ - Returns the next candidate Node for (potential) evaluation. - - The candidate list (really a stack) initially consists of all of - the top-level (command line) targets provided when the Taskmaster - was initialized. While we walk the DAG, visiting Nodes, all the - children that haven't finished processing get pushed on to the - candidate list. Each child can then be popped and examined in - turn for whether *their* children are all up-to-date, in which - case a Task will be created for their actual evaluation and - potential building. - - Here is where we also allow candidate Nodes to alter the list of - Nodes that should be examined. This is used, for example, when - invoking SCons in a source directory. A source directory Node can - return its corresponding build directory Node, essentially saying, - "Hey, you really need to build this thing over here instead." - """ - try: - return self.candidates.pop() - except IndexError: - pass - try: - node = self.top_targets_left.pop() - except IndexError: - return None - self.current_top = node - alt, message = node.alter_targets() - if alt: - self.message = message - self.candidates.append(node) - self.candidates.extend(self.order(alt)) - node = self.candidates.pop() - return node - - def no_next_candidate(self): - """ - Stops Taskmaster processing by not returning a next candidate. - - Note that we have to clean-up the Taskmaster candidate list - because the cycle detection depends on the fact all nodes have - been processed somehow. - """ - while self.candidates: - candidates = self.candidates - self.candidates = [] - self.will_not_build(candidates) - return None - - def _validate_pending_children(self): - """ - Validate the content of the pending_children set. Assert if an - internal error is found. - - This function is used strictly for debugging the taskmaster by - checking that no invariants are violated. It is not used in - normal operation. - - The pending_children set is used to detect cycles in the - dependency graph. We call a "pending child" a child that is - found in the "pending" state when checking the dependencies of - its parent node. - - A pending child can occur when the Taskmaster completes a loop - through a cycle. For example, let's imagine a graph made of - three nodes (A, B and C) making a cycle. The evaluation starts - at node A. The Taskmaster first considers whether node A's - child B is up-to-date. Then, recursively, node B needs to - check whether node C is up-to-date. This leaves us with a - dependency graph looking like:: - - Next candidate \ - \ - Node A (Pending) --> Node B(Pending) --> Node C (NoState) - ^ | - | | - +-------------------------------------+ - - Now, when the Taskmaster examines the Node C's child Node A, - it finds that Node A is in the "pending" state. Therefore, - Node A is a pending child of node C. - - Pending children indicate that the Taskmaster has potentially - loop back through a cycle. We say potentially because it could - also occur when a DAG is evaluated in parallel. For example, - consider the following graph:: - - Node A (Pending) --> Node B(Pending) --> Node C (Pending) --> ... - | ^ - | | - +----------> Node D (NoState) --------+ - / - Next candidate / - - The Taskmaster first evaluates the nodes A, B, and C and - starts building some children of node C. Assuming, that the - maximum parallel level has not been reached, the Taskmaster - will examine Node D. It will find that Node C is a pending - child of Node D. - - In summary, evaluating a graph with a cycle will always - involve a pending child at one point. A pending child might - indicate either a cycle or a diamond-shaped DAG. Only a - fraction of the nodes ends-up being a "pending child" of - another node. This keeps the pending_children set small in - practice. - - We can differentiate between the two cases if we wait until - the end of the build. At this point, all the pending children - nodes due to a diamond-shaped DAG will have been properly - built (or will have failed to build). But, the pending - children involved in a cycle will still be in the pending - state. - - The taskmaster removes nodes from the pending_children set as - soon as a pending_children node moves out of the pending - state. This also helps to keep the pending_children set small. - """ - - for n in self.pending_children: - assert n.state in (NODE_PENDING, NODE_EXECUTING), \ - (str(n), StateString[n.state]) - assert len(n.waiting_parents) != 0, (str(n), len(n.waiting_parents)) - for p in n.waiting_parents: - assert p.ref_count > 0, (str(n), str(p), p.ref_count) - - - def trace_message(self, message): - return 'Taskmaster: %s\n' % message - - def trace_node(self, node): - return '<%-10s %-3s %s>' % (StateString[node.get_state()], - node.ref_count, - repr(str(node))) - - def _find_next_ready_node(self): - """ - Finds the next node that is ready to be built. - - This is *the* main guts of the DAG walk. We loop through the - list of candidates, looking for something that has no un-built - children (i.e., that is a leaf Node or has dependencies that are - all leaf Nodes or up-to-date). Candidate Nodes are re-scanned - (both the target Node itself and its sources, which are always - scanned in the context of a given target) to discover implicit - dependencies. A Node that must wait for some children to be - built will be put back on the candidates list after the children - have finished building. A Node that has been put back on the - candidates list in this way may have itself (or its sources) - re-scanned, in order to handle generated header files (e.g.) and - the implicit dependencies therein. - - Note that this method does not do any signature calculation or - up-to-date check itself. All of that is handled by the Task - class. This is purely concerned with the dependency graph walk. - """ - - self.ready_exc = None - - T = self.trace - if T: T.write('\n' + self.trace_message('Looking for a node to evaluate')) - - while True: - node = self.next_candidate() - if node is None: - if T: T.write(self.trace_message('No candidate anymore.') + '\n') - return None - - node = node.disambiguate() - state = node.get_state() - - # For debugging only: - # - # try: - # self._validate_pending_children() - # except: - # self.ready_exc = sys.exc_info() - # return node - - if CollectStats: - if not hasattr(node.attributes, 'stats'): - node.attributes.stats = Stats() - StatsNodes.append(node) - S = node.attributes.stats - S.considered = S.considered + 1 - else: - S = None - - if T: T.write(self.trace_message(' Considering node %s and its children:' % self.trace_node(node))) - - if state == NODE_NO_STATE: - # Mark this node as being on the execution stack: - node.set_state(NODE_PENDING) - elif state > NODE_PENDING: - # Skip this node if it has already been evaluated: - if S: S.already_handled = S.already_handled + 1 - if T: T.write(self.trace_message(' already handled (executed)')) - continue - - executor = node.get_executor() - - try: - children = executor.get_all_children() - except SystemExit: - exc_value = sys.exc_info()[1] - e = SCons.Errors.ExplicitExit(node, exc_value.code) - self.ready_exc = (SCons.Errors.ExplicitExit, e) - if T: T.write(self.trace_message(' SystemExit')) - return node - except Exception as e: - # We had a problem just trying to figure out the - # children (like a child couldn't be linked in to a - # VariantDir, or a Scanner threw something). Arrange to - # raise the exception when the Task is "executed." - self.ready_exc = sys.exc_info() - if S: S.problem = S.problem + 1 - if T: T.write(self.trace_message(' exception %s while scanning children.\n' % e)) - return node - - children_not_visited = [] - children_pending = set() - children_not_ready = [] - children_failed = False - - for child in chain(executor.get_all_prerequisites(), children): - childstate = child.get_state() - - if T: T.write(self.trace_message(' ' + self.trace_node(child))) - - if childstate == NODE_NO_STATE: - children_not_visited.append(child) - elif childstate == NODE_PENDING: - children_pending.add(child) - elif childstate == NODE_FAILED: - children_failed = True - - if childstate <= NODE_EXECUTING: - children_not_ready.append(child) - - # These nodes have not even been visited yet. Add - # them to the list so that on some next pass we can - # take a stab at evaluating them (or their children). - if children_not_visited: - if len(children_not_visited) > 1: - children_not_visited.reverse() - self.candidates.extend(self.order(children_not_visited)) - - # if T and children_not_visited: - # T.write(self.trace_message(' adding to candidates: %s' % map(str, children_not_visited))) - # T.write(self.trace_message(' candidates now: %s\n' % map(str, self.candidates))) - - # Skip this node if any of its children have failed. - # - # This catches the case where we're descending a top-level - # target and one of our children failed while trying to be - # built by a *previous* descent of an earlier top-level - # target. - # - # It can also occur if a node is reused in multiple - # targets. One first descends though the one of the - # target, the next time occurs through the other target. - # - # Note that we can only have failed_children if the - # --keep-going flag was used, because without it the build - # will stop before diving in the other branch. - # - # Note that even if one of the children fails, we still - # added the other children to the list of candidate nodes - # to keep on building (--keep-going). - if children_failed: - for n in executor.get_action_targets(): - n.set_state(NODE_FAILED) - - if S: S.child_failed = S.child_failed + 1 - if T: T.write(self.trace_message('****** %s\n' % self.trace_node(node))) - continue - - if children_not_ready: - for child in children_not_ready: - # We're waiting on one or more derived targets - # that have not yet finished building. - if S: S.not_built = S.not_built + 1 - - # Add this node to the waiting parents lists of - # anything we're waiting on, with a reference - # count so we can be put back on the list for - # re-evaluation when they've all finished. - node.ref_count = node.ref_count + child.add_to_waiting_parents(node) - if T: T.write(self.trace_message(' adjusted ref count: %s, child %s' % - (self.trace_node(node), repr(str(child))))) - - if T: - for pc in children_pending: - T.write(self.trace_message(' adding %s to the pending children set\n' % - self.trace_node(pc))) - self.pending_children = self.pending_children | children_pending - - continue - - # Skip this node if it has side-effects that are - # currently being built: - wait_side_effects = False - for se in executor.get_action_side_effects(): - if se.get_state() == NODE_EXECUTING: - se.add_to_waiting_s_e(node) - wait_side_effects = True - - if wait_side_effects: - if S: S.side_effects = S.side_effects + 1 - continue - - # The default when we've gotten through all of the checks above: - # this node is ready to be built. - if S: S.build = S.build + 1 - if T: T.write(self.trace_message('Evaluating %s\n' % - self.trace_node(node))) - - # For debugging only: - # - # try: - # self._validate_pending_children() - # except: - # self.ready_exc = sys.exc_info() - # return node - - return node - - return None - - def next_task(self): - """ - Returns the next task to be executed. - - This simply asks for the next Node to be evaluated, and then wraps - it in the specific Task subclass with which we were initialized. - """ - node = self._find_next_ready_node() - - if node is None: - return None - - executor = node.get_executor() - if executor is None: - return None - - tlist = executor.get_all_targets() - - task = self.tasker(self, tlist, node in self.original_top, node) - try: - task.make_ready() - except Exception as e : - # We had a problem just trying to get this task ready (like - # a child couldn't be linked to a VariantDir when deciding - # whether this node is current). Arrange to raise the - # exception when the Task is "executed." - self.ready_exc = sys.exc_info() - - if self.ready_exc: - task.exception_set(self.ready_exc) - - self.ready_exc = None - - return task - - def will_not_build(self, nodes, node_func=lambda n: None): - """ - Perform clean-up about nodes that will never be built. Invokes - a user defined function on all of these nodes (including all - of their parents). - """ - - T = self.trace - - pending_children = self.pending_children - - to_visit = set(nodes) - pending_children = pending_children - to_visit - - if T: - for n in nodes: - T.write(self.trace_message(' removing node %s from the pending children set\n' % - self.trace_node(n))) - try: - while len(to_visit): - node = to_visit.pop() - node_func(node) - - # Prune recursion by flushing the waiting children - # list immediately. - parents = node.waiting_parents - node.waiting_parents = set() - - to_visit = to_visit | parents - pending_children = pending_children - parents - - for p in parents: - p.ref_count = p.ref_count - 1 - if T: T.write(self.trace_message(' removing parent %s from the pending children set\n' % - self.trace_node(p))) - except KeyError: - # The container to_visit has been emptied. - pass - - # We have the stick back the pending_children list into the - # taskmaster because the python 1.5.2 compatibility does not - # allow us to use in-place updates - self.pending_children = pending_children - - def stop(self): - """ - Stops the current build completely. - """ - self.next_candidate = self.no_next_candidate - - def cleanup(self): - """ - Check for dependency cycles. - """ - if not self.pending_children: - return - - nclist = [(n, find_cycle([n], set())) for n in self.pending_children] - - genuine_cycles = [ - node for node,cycle in nclist - if cycle or node.get_state() != NODE_EXECUTED - ] - if not genuine_cycles: - # All of the "cycles" found were single nodes in EXECUTED state, - # which is to say, they really weren't cycles. Just return. - return - - desc = 'Found dependency cycle(s):\n' - for node, cycle in nclist: - if cycle: - desc = desc + " " + " -> ".join(map(str, cycle)) + "\n" - else: - desc = desc + \ - " Internal Error: no cycle found for node %s (%s) in state %s\n" % \ - (node, repr(node), StateString[node.get_state()]) - - raise SCons.Errors.UserError(desc) - -# Local Variables: -# tab-width:4 -# indent-tabs-mode:nil -# End: -# vim: set expandtab tabstop=4 shiftwidth=4: diff --git a/SCons/Taskmaster/Job.py b/SCons/Taskmaster/Job.py new file mode 100644 index 0000000..b398790 --- /dev/null +++ b/SCons/Taskmaster/Job.py @@ -0,0 +1,439 @@ +# MIT License +# +# Copyright The SCons Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY +# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +"""Serial and Parallel classes to execute build tasks. + +The Jobs class provides a higher level interface to start, +stop, and wait on jobs. +""" + +import SCons.compat + +import os +import signal + +import SCons.Errors +import SCons.Warnings + +# The default stack size (in kilobytes) of the threads used to execute +# jobs in parallel. +# +# We use a stack size of 256 kilobytes. The default on some platforms +# is too large and prevents us from creating enough threads to fully +# parallelized the build. For example, the default stack size on linux +# is 8 MBytes. + +explicit_stack_size = None +default_stack_size = 256 + +interrupt_msg = 'Build interrupted.' + + +class InterruptState: + def __init__(self): + self.interrupted = False + + def set(self): + self.interrupted = True + + def __call__(self): + return self.interrupted + + +class Jobs: + """An instance of this class initializes N jobs, and provides + methods for starting, stopping, and waiting on all N jobs. + """ + + def __init__(self, num, taskmaster): + """ + Create 'num' jobs using the given taskmaster. + + If 'num' is 1 or less, then a serial job will be used, + otherwise a parallel job with 'num' worker threads will + be used. + + The 'num_jobs' attribute will be set to the actual number of jobs + allocated. If more than one job is requested but the Parallel + class can't do it, it gets reset to 1. Wrapping interfaces that + care should check the value of 'num_jobs' after initialization. + """ + + self.job = None + if num > 1: + stack_size = explicit_stack_size + if stack_size is None: + stack_size = default_stack_size + + try: + self.job = Parallel(taskmaster, num, stack_size) + self.num_jobs = num + except NameError: + pass + if self.job is None: + self.job = Serial(taskmaster) + self.num_jobs = 1 + + def run(self, postfunc=lambda: None): + """Run the jobs. + + postfunc() will be invoked after the jobs has run. It will be + invoked even if the jobs are interrupted by a keyboard + interrupt (well, in fact by a signal such as either SIGINT, + SIGTERM or SIGHUP). The execution of postfunc() is protected + against keyboard interrupts and is guaranteed to run to + completion.""" + self._setup_sig_handler() + try: + self.job.start() + finally: + postfunc() + self._reset_sig_handler() + + def were_interrupted(self): + """Returns whether the jobs were interrupted by a signal.""" + return self.job.interrupted() + + def _setup_sig_handler(self): + """Setup an interrupt handler so that SCons can shutdown cleanly in + various conditions: + + a) SIGINT: Keyboard interrupt + b) SIGTERM: kill or system shutdown + c) SIGHUP: Controlling shell exiting + + We handle all of these cases by stopping the taskmaster. It + turns out that it's very difficult to stop the build process + by throwing asynchronously an exception such as + KeyboardInterrupt. For example, the python Condition + variables (threading.Condition) and queues do not seem to be + asynchronous-exception-safe. It would require adding a whole + bunch of try/finally block and except KeyboardInterrupt all + over the place. + + Note also that we have to be careful to handle the case when + SCons forks before executing another process. In that case, we + want the child to exit immediately. + """ + def handler(signum, stack, self=self, parentpid=os.getpid()): + if os.getpid() == parentpid: + self.job.taskmaster.stop() + self.job.interrupted.set() + else: + os._exit(2) # pylint: disable=protected-access + + self.old_sigint = signal.signal(signal.SIGINT, handler) + self.old_sigterm = signal.signal(signal.SIGTERM, handler) + try: + self.old_sighup = signal.signal(signal.SIGHUP, handler) + except AttributeError: + pass + if (self.old_sigint is None) or (self.old_sigterm is None) or \ + (hasattr(self, "old_sighup") and self.old_sighup is None): + msg = "Overwritting previous signal handler which was not installed from Python. " + \ + "Will not be able to reinstate and so will return to default handler." + SCons.Warnings.warn(SCons.Warnings.SConsWarning, msg) + + def _reset_sig_handler(self): + """Restore the signal handlers to their previous state (before the + call to _setup_sig_handler().""" + sigint_to_use = self.old_sigint if self.old_sigint is not None else signal.SIG_DFL + sigterm_to_use = self.old_sigterm if self.old_sigterm is not None else signal.SIG_DFL + signal.signal(signal.SIGINT, sigint_to_use) + signal.signal(signal.SIGTERM, sigterm_to_use) + try: + sigterm_to_use = self.old_sighup if self.old_sighup is not None else signal.SIG_DFL + signal.signal(signal.SIGHUP, sigterm_to_use) + except AttributeError: + pass + +class Serial: + """This class is used to execute tasks in series, and is more efficient + than Parallel, but is only appropriate for non-parallel builds. Only + one instance of this class should be in existence at a time. + + This class is not thread safe. + """ + + def __init__(self, taskmaster): + """Create a new serial job given a taskmaster. + + The taskmaster's next_task() method should return the next task + that needs to be executed, or None if there are no more tasks. The + taskmaster's executed() method will be called for each task when it + is successfully executed, or failed() will be called if it failed to + execute (e.g. execute() raised an exception).""" + + self.taskmaster = taskmaster + self.interrupted = InterruptState() + + def start(self): + """Start the job. This will begin pulling tasks from the taskmaster + and executing them, and return when there are no more tasks. If a task + fails to execute (i.e. execute() raises an exception), then the job will + stop.""" + + while True: + task = self.taskmaster.next_task() + + if task is None: + break + + try: + task.prepare() + if task.needs_execute(): + task.execute() + except Exception: + if self.interrupted(): + try: + raise SCons.Errors.BuildError( + task.targets[0], errstr=interrupt_msg) + except: + task.exception_set() + else: + task.exception_set() + + # Let the failed() callback function arrange for the + # build to stop if that's appropriate. + task.failed() + else: + task.executed() + + task.postprocess() + self.taskmaster.cleanup() + + +# Trap import failure so that everything in the Job module but the +# Parallel class (and its dependent classes) will work if the interpreter +# doesn't support threads. +try: + import queue + import threading +except ImportError: + pass +else: + class Worker(threading.Thread): + """A worker thread waits on a task to be posted to its request queue, + dequeues the task, executes it, and posts a tuple including the task + and a boolean indicating whether the task executed successfully. """ + + def __init__(self, requestQueue, resultsQueue, interrupted): + super().__init__() + self.daemon = True + self.requestQueue = requestQueue + self.resultsQueue = resultsQueue + self.interrupted = interrupted + self.start() + + def run(self): + while True: + task = self.requestQueue.get() + + if task is None: + # The "None" value is used as a sentinel by + # ThreadPool.cleanup(). This indicates that there + # are no more tasks, so we should quit. + break + + try: + if self.interrupted(): + raise SCons.Errors.BuildError( + task.targets[0], errstr=interrupt_msg) + task.execute() + except: + task.exception_set() + ok = False + else: + ok = True + + self.resultsQueue.put((task, ok)) + + class ThreadPool: + """This class is responsible for spawning and managing worker threads.""" + + def __init__(self, num, stack_size, interrupted): + """Create the request and reply queues, and 'num' worker threads. + + One must specify the stack size of the worker threads. The + stack size is specified in kilobytes. + """ + self.requestQueue = queue.Queue(0) + self.resultsQueue = queue.Queue(0) + + try: + prev_size = threading.stack_size(stack_size*1024) + except AttributeError as e: + # Only print a warning if the stack size has been + # explicitly set. + if explicit_stack_size is not None: + msg = "Setting stack size is unsupported by this version of Python:\n " + \ + e.args[0] + SCons.Warnings.warn(SCons.Warnings.StackSizeWarning, msg) + except ValueError as e: + msg = "Setting stack size failed:\n " + str(e) + SCons.Warnings.warn(SCons.Warnings.StackSizeWarning, msg) + + # Create worker threads + self.workers = [] + for _ in range(num): + worker = Worker(self.requestQueue, self.resultsQueue, interrupted) + self.workers.append(worker) + + if 'prev_size' in locals(): + threading.stack_size(prev_size) + + def put(self, task): + """Put task into request queue.""" + self.requestQueue.put(task) + + def get(self): + """Remove and return a result tuple from the results queue.""" + return self.resultsQueue.get() + + def preparation_failed(self, task): + self.resultsQueue.put((task, False)) + + def cleanup(self): + """ + Shuts down the thread pool, giving each worker thread a + chance to shut down gracefully. + """ + # For each worker thread, put a sentinel "None" value + # on the requestQueue (indicating that there's no work + # to be done) so that each worker thread will get one and + # terminate gracefully. + for _ in self.workers: + self.requestQueue.put(None) + + # Wait for all of the workers to terminate. + # + # If we don't do this, later Python versions (2.4, 2.5) often + # seem to raise exceptions during shutdown. This happens + # in requestQueue.get(), as an assertion failure that + # requestQueue.not_full is notified while not acquired, + # seemingly because the main thread has shut down (or is + # in the process of doing so) while the workers are still + # trying to pull sentinels off the requestQueue. + # + # Normally these terminations should happen fairly quickly, + # but we'll stick a one-second timeout on here just in case + # someone gets hung. + for worker in self.workers: + worker.join(1.0) + self.workers = [] + + class Parallel: + """This class is used to execute tasks in parallel, and is somewhat + less efficient than Serial, but is appropriate for parallel builds. + + This class is thread safe. + """ + + def __init__(self, taskmaster, num, stack_size): + """Create a new parallel job given a taskmaster. + + The taskmaster's next_task() method should return the next + task that needs to be executed, or None if there are no more + tasks. The taskmaster's executed() method will be called + for each task when it is successfully executed, or failed() + will be called if the task failed to execute (i.e. execute() + raised an exception). + + Note: calls to taskmaster are serialized, but calls to + execute() on distinct tasks are not serialized, because + that is the whole point of parallel jobs: they can execute + multiple tasks simultaneously. """ + + self.taskmaster = taskmaster + self.interrupted = InterruptState() + self.tp = ThreadPool(num, stack_size, self.interrupted) + + self.maxjobs = num + + def start(self): + """Start the job. This will begin pulling tasks from the + taskmaster and executing them, and return when there are no + more tasks. If a task fails to execute (i.e. execute() raises + an exception), then the job will stop.""" + + jobs = 0 + + while True: + # Start up as many available tasks as we're + # allowed to. + while jobs < self.maxjobs: + task = self.taskmaster.next_task() + if task is None: + break + + try: + # prepare task for execution + task.prepare() + except: + task.exception_set() + task.failed() + task.postprocess() + else: + if task.needs_execute(): + # dispatch task + self.tp.put(task) + jobs += 1 + else: + task.executed() + task.postprocess() + + if not task and not jobs: break + + # Let any/all completed tasks finish up before we go + # back and put the next batch of tasks on the queue. + while True: + task, ok = self.tp.get() + jobs -= 1 + + if ok: + task.executed() + else: + if self.interrupted(): + try: + raise SCons.Errors.BuildError( + task.targets[0], errstr=interrupt_msg) + except: + task.exception_set() + + # Let the failed() callback function arrange + # for the build to stop if that's appropriate. + task.failed() + + task.postprocess() + + if self.tp.resultsQueue.empty(): + break + + self.tp.cleanup() + self.taskmaster.cleanup() + +# Local Variables: +# tab-width:4 +# indent-tabs-mode:nil +# End: +# vim: set expandtab tabstop=4 shiftwidth=4: diff --git a/SCons/Taskmaster/JobTests.py b/SCons/Taskmaster/JobTests.py new file mode 100644 index 0000000..374d3f3 --- /dev/null +++ b/SCons/Taskmaster/JobTests.py @@ -0,0 +1,574 @@ +# MIT License +# +# Copyright The SCons Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY +# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import unittest +import random +import math +import sys +import os + +import TestUnit + +import SCons.Taskmaster.Job + + +def get_cpu_nums(): + # Linux, Unix and MacOS: + if hasattr( os, "sysconf" ): + if "SC_NPROCESSORS_ONLN" in os.sysconf_names: + # Linux & Unix: + ncpus = os.sysconf( "SC_NPROCESSORS_ONLN" ) + if isinstance(ncpus, int) and ncpus > 0: + return ncpus + else: # OSX: + return int(os.popen2("sysctl -n hw.ncpu")[1].read() ) + # Windows: + if "NUMBER_OF_PROCESSORS" in os.environ: + ncpus = int(os.environ["NUMBER_OF_PROCESSORS"]) + if ncpus > 0: + return ncpus + return 1 # Default + +# a large number +num_sines = 500 + +# how many parallel jobs to perform for the test +num_jobs = get_cpu_nums()*2 + +# in case we werent able to detect num cpus for this test +# just make a hardcoded suffcient large number, though not future proof +if num_jobs == 2: + num_jobs = 33 + +# how many tasks to perform for the test +num_tasks = num_jobs*5 + +class DummyLock: + """fake lock class to use if threads are not supported""" + def acquire(self): + pass + + def release(self): + pass + +class NoThreadsException(Exception): + """raised by the ParallelTestCase if threads are not supported""" + + def __str__(self): + return "the interpreter doesn't support threads" + +class Task: + """A dummy task class for testing purposes.""" + + def __init__(self, i, taskmaster): + self.i = i + self.taskmaster = taskmaster + self.was_executed = 0 + self.was_prepared = 0 + + def prepare(self): + self.was_prepared = 1 + + def _do_something(self): + pass + + def needs_execute(self): + return True + + def execute(self): + self.taskmaster.test_case.assertTrue(self.was_prepared, + "the task wasn't prepared") + + self.taskmaster.guard.acquire() + self.taskmaster.begin_list.append(self.i) + self.taskmaster.guard.release() + + # while task is executing, represent this in the parallel_list + # and then turn it off + self.taskmaster.parallel_list[self.i] = 1 + self._do_something() + self.taskmaster.parallel_list[self.i] = 0 + + # check if task was executing while another was also executing + for j in range(1, self.taskmaster.num_tasks): + if self.taskmaster.parallel_list[j + 1] == 1: + self.taskmaster.found_parallel = True + break + + self.was_executed = 1 + + self.taskmaster.guard.acquire() + self.taskmaster.end_list.append(self.i) + self.taskmaster.guard.release() + + def executed(self): + self.taskmaster.num_executed = self.taskmaster.num_executed + 1 + + self.taskmaster.test_case.assertTrue(self.was_prepared, + "the task wasn't prepared") + self.taskmaster.test_case.assertTrue(self.was_executed, + "the task wasn't really executed") + self.taskmaster.test_case.assertTrue(isinstance(self, Task), + "the task wasn't really a Task instance") + + def failed(self): + self.taskmaster.num_failed = self.taskmaster.num_failed + 1 + self.taskmaster.stop = 1 + self.taskmaster.test_case.assertTrue(self.was_prepared, + "the task wasn't prepared") + + def postprocess(self): + self.taskmaster.num_postprocessed = self.taskmaster.num_postprocessed + 1 + + def exception_set(self): + pass + +class RandomTask(Task): + def _do_something(self): + # do something that will take some random amount of time: + for i in range(random.randrange(0, 100 + num_sines, 1)): + x = math.sin(i) + time.sleep(0.01) + +class ExceptionTask: + """A dummy task class for testing purposes.""" + + def __init__(self, i, taskmaster): + self.taskmaster = taskmaster + self.was_prepared = 0 + + def prepare(self): + self.was_prepared = 1 + + def needs_execute(self): + return True + + def execute(self): + raise Exception + + def executed(self): + self.taskmaster.num_executed = self.taskmaster.num_executed + 1 + + self.taskmaster.test_case.assertTrue(self.was_prepared, + "the task wasn't prepared") + self.taskmaster.test_case.assertTrue(self.was_executed, + "the task wasn't really executed") + self.taskmaster.test_case.assertTrue(self.__class__ is Task, + "the task wasn't really a Task instance") + + def failed(self): + self.taskmaster.num_failed = self.taskmaster.num_failed + 1 + self.taskmaster.stop = 1 + self.taskmaster.test_case.assertTrue(self.was_prepared, + "the task wasn't prepared") + + def postprocess(self): + self.taskmaster.num_postprocessed = self.taskmaster.num_postprocessed + 1 + + def exception_set(self): + self.taskmaster.exception_set() + +class Taskmaster: + """A dummy taskmaster class for testing the job classes.""" + + def __init__(self, n, test_case, Task): + """n is the number of dummy tasks to perform.""" + + self.test_case = test_case + self.stop = None + self.num_tasks = n + self.num_iterated = 0 + self.num_executed = 0 + self.num_failed = 0 + self.num_postprocessed = 0 + self.parallel_list = [0] * (n+1) + self.found_parallel = False + self.Task = Task + + # 'guard' guards 'task_begin_list' and 'task_end_list' + try: + import threading + self.guard = threading.Lock() + except ImportError: + self.guard = DummyLock() + + # keep track of the order tasks are begun in + self.begin_list = [] + + # keep track of the order tasks are completed in + self.end_list = [] + + def next_task(self): + if self.stop or self.all_tasks_are_iterated(): + return None + else: + self.num_iterated = self.num_iterated + 1 + return self.Task(self.num_iterated, self) + + def all_tasks_are_executed(self): + return self.num_executed == self.num_tasks + + def all_tasks_are_iterated(self): + return self.num_iterated == self.num_tasks + + def all_tasks_are_postprocessed(self): + return self.num_postprocessed == self.num_tasks + + def tasks_were_serial(self): + """analyze the task order to see if they were serial""" + return not self.found_parallel + + def exception_set(self): + pass + + def cleanup(self): + pass + +SaveThreadPool = None +ThreadPoolCallList = [] + +class ParallelTestCase(unittest.TestCase): + def runTest(self): + """test parallel jobs""" + + try: + import threading + except ImportError: + raise NoThreadsException() + + taskmaster = Taskmaster(num_tasks, self, RandomTask) + jobs = SCons.Taskmaster.Job.Jobs(num_jobs, taskmaster) + jobs.run() + + self.assertTrue(not taskmaster.tasks_were_serial(), + "the tasks were not executed in parallel") + self.assertTrue(taskmaster.all_tasks_are_executed(), + "all the tests were not executed") + self.assertTrue(taskmaster.all_tasks_are_iterated(), + "all the tests were not iterated over") + self.assertTrue(taskmaster.all_tasks_are_postprocessed(), + "all the tests were not postprocessed") + self.assertFalse(taskmaster.num_failed, + "some task(s) failed to execute") + + # Verify that parallel jobs will pull all of the completed tasks + # out of the queue at once, instead of one by one. We do this by + # replacing the default ThreadPool class with one that records the + # order in which tasks are put() and get() to/from the pool, and + # which sleeps a little bit before call get() to let the initial + # tasks complete and get their notifications on the resultsQueue. + + class SleepTask(Task): + def _do_something(self): + time.sleep(0.01) + + global SaveThreadPool + SaveThreadPool = SCons.Taskmaster.Job.ThreadPool + + class WaitThreadPool(SaveThreadPool): + def put(self, task): + ThreadPoolCallList.append('put(%s)' % task.i) + return SaveThreadPool.put(self, task) + def get(self): + time.sleep(0.05) + result = SaveThreadPool.get(self) + ThreadPoolCallList.append('get(%s)' % result[0].i) + return result + + SCons.Taskmaster.Job.ThreadPool = WaitThreadPool + + try: + taskmaster = Taskmaster(3, self, SleepTask) + jobs = SCons.Taskmaster.Job.Jobs(2, taskmaster) + jobs.run() + + # The key here is that we get(1) and get(2) from the + # resultsQueue before we put(3), but get(1) and get(2) can + # be in either order depending on how the first two parallel + # tasks get scheduled by the operating system. + expect = [ + ['put(1)', 'put(2)', 'get(1)', 'get(2)', 'put(3)', 'get(3)'], + ['put(1)', 'put(2)', 'get(2)', 'get(1)', 'put(3)', 'get(3)'], + ] + assert ThreadPoolCallList in expect, ThreadPoolCallList + + finally: + SCons.Taskmaster.Job.ThreadPool = SaveThreadPool + +class SerialTestCase(unittest.TestCase): + def runTest(self): + """test a serial job""" + + taskmaster = Taskmaster(num_tasks, self, RandomTask) + jobs = SCons.Taskmaster.Job.Jobs(1, taskmaster) + jobs.run() + + self.assertTrue(taskmaster.tasks_were_serial(), + "the tasks were not executed in series") + self.assertTrue(taskmaster.all_tasks_are_executed(), + "all the tests were not executed") + self.assertTrue(taskmaster.all_tasks_are_iterated(), + "all the tests were not iterated over") + self.assertTrue(taskmaster.all_tasks_are_postprocessed(), + "all the tests were not postprocessed") + self.assertFalse(taskmaster.num_failed, + "some task(s) failed to execute") + +class NoParallelTestCase(unittest.TestCase): + def runTest(self): + """test handling lack of parallel support""" + def NoParallel(tm, num, stack_size): + raise NameError + save_Parallel = SCons.Taskmaster.Job.Parallel + SCons.Taskmaster.Job.Parallel = NoParallel + try: + taskmaster = Taskmaster(num_tasks, self, RandomTask) + jobs = SCons.Taskmaster.Job.Jobs(2, taskmaster) + self.assertTrue(jobs.num_jobs == 1, + "unexpected number of jobs %d" % jobs.num_jobs) + jobs.run() + self.assertTrue(taskmaster.tasks_were_serial(), + "the tasks were not executed in series") + self.assertTrue(taskmaster.all_tasks_are_executed(), + "all the tests were not executed") + self.assertTrue(taskmaster.all_tasks_are_iterated(), + "all the tests were not iterated over") + self.assertTrue(taskmaster.all_tasks_are_postprocessed(), + "all the tests were not postprocessed") + self.assertFalse(taskmaster.num_failed, + "some task(s) failed to execute") + finally: + SCons.Taskmaster.Job.Parallel = save_Parallel + + +class SerialExceptionTestCase(unittest.TestCase): + def runTest(self): + """test a serial job with tasks that raise exceptions""" + + taskmaster = Taskmaster(num_tasks, self, ExceptionTask) + jobs = SCons.Taskmaster.Job.Jobs(1, taskmaster) + jobs.run() + + self.assertFalse(taskmaster.num_executed, + "a task was executed") + self.assertTrue(taskmaster.num_iterated == 1, + "exactly one task should have been iterated") + self.assertTrue(taskmaster.num_failed == 1, + "exactly one task should have failed") + self.assertTrue(taskmaster.num_postprocessed == 1, + "exactly one task should have been postprocessed") + +class ParallelExceptionTestCase(unittest.TestCase): + def runTest(self): + """test parallel jobs with tasks that raise exceptions""" + + taskmaster = Taskmaster(num_tasks, self, ExceptionTask) + jobs = SCons.Taskmaster.Job.Jobs(num_jobs, taskmaster) + jobs.run() + + self.assertFalse(taskmaster.num_executed, + "a task was executed") + self.assertTrue(taskmaster.num_iterated >= 1, + "one or more task should have been iterated") + self.assertTrue(taskmaster.num_failed >= 1, + "one or more tasks should have failed") + self.assertTrue(taskmaster.num_postprocessed >= 1, + "one or more tasks should have been postprocessed") + +#--------------------------------------------------------------------- +# Above tested Job object with contrived Task and Taskmaster objects. +# Now test Job object with actual Task and Taskmaster objects. + +import SCons.Taskmaster +import SCons.Node +import time + +class DummyNodeInfo: + def update(self, obj): + pass + +class testnode (SCons.Node.Node): + def __init__(self): + super().__init__() + self.expect_to_be = SCons.Node.executed + self.ninfo = DummyNodeInfo() + +class goodnode (testnode): + def __init__(self): + super().__init__() + self.expect_to_be = SCons.Node.up_to_date + self.ninfo = DummyNodeInfo() + +class slowgoodnode (goodnode): + def prepare(self): + # Delay to allow scheduled Jobs to run while the dispatcher + # sleeps. Keep this short because it affects the time taken + # by this test. + time.sleep(0.15) + goodnode.prepare(self) + +class badnode (goodnode): + def __init__(self): + super().__init__() + self.expect_to_be = SCons.Node.failed + def build(self, **kw): + raise Exception('badnode exception') + +class slowbadnode (badnode): + def build(self, **kw): + # Appears to take a while to build, allowing faster builds to + # overlap. Time duration is not especially important, but if + # it is faster than slowgoodnode then these could complete + # while the scheduler is sleeping. + time.sleep(0.05) + raise Exception('slowbadnode exception') + +class badpreparenode (badnode): + def prepare(self): + raise Exception('badpreparenode exception') + +class _SConsTaskTest(unittest.TestCase): + + def _test_seq(self, num_jobs): + for node_seq in [ + [goodnode], + [badnode], + [slowbadnode], + [slowgoodnode], + [badpreparenode], + [goodnode, badnode], + [slowgoodnode, badnode], + [goodnode, slowbadnode], + [goodnode, goodnode, goodnode, slowbadnode], + [goodnode, slowbadnode, badpreparenode, slowgoodnode], + [goodnode, slowbadnode, slowgoodnode, badnode] + ]: + + self._do_test(num_jobs, node_seq) + + def _do_test(self, num_jobs, node_seq): + + testnodes = [] + for tnum in range(num_tasks): + testnodes.append(node_seq[tnum % len(node_seq)]()) + + taskmaster = SCons.Taskmaster.Taskmaster(testnodes, + tasker=SCons.Taskmaster.AlwaysTask) + + jobs = SCons.Taskmaster.Job.Jobs(num_jobs, taskmaster) + + # Exceptions thrown by tasks are not actually propagated to + # this level, but are instead stored in the Taskmaster. + + jobs.run() + + # Now figure out if tests proceeded correctly. The first test + # that fails will shutdown the initiation of subsequent tests, + # but any tests currently queued for execution will still be + # processed, and any tests that completed before the failure + # would have resulted in new tests being queued for execution. + + # Apply the following operational heuristics of Job.py: + # 0) An initial jobset of tasks will be queued before any + # good/bad results are obtained (from "execute" of task in + # thread). + # 1) A goodnode will complete immediately on its thread and + # allow another node to be queued for execution. + # 2) A badnode will complete immediately and suppress any + # subsequent execution queuing, but all currently queued + # tasks will still be processed. + # 3) A slowbadnode will fail later. It will block slots in + # the job queue. Nodes that complete immediately will + # allow other nodes to be queued in their place, and this + # will continue until either (#2) above or until all job + # slots are filled with slowbadnode entries. + + # One approach to validating this test would be to try to + # determine exactly how many nodes executed, how many didn't, + # and the results of each, and then to assert failure on any + # mismatch (including the total number of built nodes). + # However, while this is possible to do for a single-processor + # system, it is nearly impossible to predict correctly for a + # multi-processor system and still test the characteristics of + # delayed execution nodes. Stated another way, multithreading + # is inherently non-deterministic unless you can completely + # characterize the entire system, and since that's not + # possible here, we shouldn't try. + + # Therefore, this test will simply scan the set of nodes to + # see if the node was executed or not and if it was executed + # that it obtained the expected value for that node + # (i.e. verifying we don't get failure crossovers or + # mislabelling of results). + + for N in testnodes: + state = N.get_state() + self.assertTrue(state in [SCons.Node.no_state, N.expect_to_be], + "Node %s got unexpected result: %s" % (N, state)) + + self.assertTrue([N for N in testnodes if N.get_state()], + "no nodes ran at all.") + + +class SerialTaskTest(_SConsTaskTest): + def runTest(self): + """test serial jobs with actual Taskmaster and Task""" + self._test_seq(1) + + +class ParallelTaskTest(_SConsTaskTest): + def runTest(self): + """test parallel jobs with actual Taskmaster and Task""" + self._test_seq(num_jobs) + + + +#--------------------------------------------------------------------- + +def suite(): + suite = unittest.TestSuite() + suite.addTest(ParallelTestCase()) + suite.addTest(SerialTestCase()) + suite.addTest(NoParallelTestCase()) + suite.addTest(SerialExceptionTestCase()) + suite.addTest(ParallelExceptionTestCase()) + suite.addTest(SerialTaskTest()) + suite.addTest(ParallelTaskTest()) + return suite + +if __name__ == "__main__": + runner = TestUnit.cli.get_runner() + result = runner().run(suite()) + if (len(result.failures) == 0 + and len(result.errors) == 1 + and isinstance(result.errors[0][0], SerialTestCase) + and isinstance(result.errors[0][1][0], NoThreadsException)): + sys.exit(2) + elif not result.wasSuccessful(): + sys.exit(1) + +# Local Variables: +# tab-width:4 +# indent-tabs-mode:nil +# End: +# vim: set expandtab tabstop=4 shiftwidth=4: diff --git a/SCons/Taskmaster/TaskmasterTests.py b/SCons/Taskmaster/TaskmasterTests.py new file mode 100644 index 0000000..f20fd71 --- /dev/null +++ b/SCons/Taskmaster/TaskmasterTests.py @@ -0,0 +1,1257 @@ +# MIT License +# +# Copyright The SCons Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY +# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import SCons.compat + +import sys +import unittest + + +import SCons.Taskmaster +import SCons.Errors + + +built_text = None +cache_text = [] +visited_nodes = [] +executed = None +scan_called = 0 + +class Node: + def __init__(self, name, kids = [], scans = []): + self.name = name + self.kids = kids + self.scans = scans + self.cached = 0 + self.scanned = 0 + self.scanner = None + self.targets = [self] + self.prerequisites = None + class Builder: + def targets(self, node): + return node.targets + self.builder = Builder() + self.bsig = None + self.csig = None + self.state = SCons.Node.no_state + self.prepared = None + self.ref_count = 0 + self.waiting_parents = set() + self.waiting_s_e = set() + self.side_effect = 0 + self.side_effects = [] + self.alttargets = [] + self.postprocessed = None + self._bsig_val = None + self._current_val = 0 + self.always_build = None + + def disambiguate(self): + return self + + def push_to_cache(self): + pass + + def retrieve_from_cache(self): + global cache_text + if self.cached: + cache_text.append(self.name + " retrieved") + return self.cached + + def make_ready(self): + pass + + def prepare(self): + self.prepared = 1 + self.get_binfo() + + def build(self): + global built_text + built_text = self.name + " built" + + def remove(self): + pass + + # The following four methods new_binfo(), del_binfo(), + # get_binfo(), clear() as well as its calls have been added + # to support the cached_execute() test (issue #2720). + # They are full copies (or snippets) of their actual + # counterparts in the Node class... + def new_binfo(self): + binfo = "binfo" + return binfo + + def del_binfo(self): + """Delete the build info from this node.""" + try: + delattr(self, 'binfo') + except AttributeError: + pass + + def get_binfo(self): + """Fetch a node's build information.""" + try: + return self.binfo + except AttributeError: + pass + + binfo = self.new_binfo() + self.binfo = binfo + + return binfo + + def clear(self): + # The del_binfo() call here isn't necessary for normal execution, + # but is for interactive mode, where we might rebuild the same + # target and need to start from scratch. + self.del_binfo() + + def built(self): + global built_text + if not self.cached: + built_text = built_text + " really" + + # Clear the implicit dependency caches of any Nodes + # waiting for this Node to be built. + for parent in self.waiting_parents: + parent.implicit = None + + self.clear() + + def release_target_info(self): + pass + + def has_builder(self): + return self.builder is not None + + def is_derived(self): + return self.has_builder or self.side_effect + + def alter_targets(self): + return self.alttargets, None + + def visited(self): + global visited_nodes + visited_nodes.append(self.name) + + def children(self): + if not self.scanned: + self.scan() + self.scanned = 1 + return self.kids + + def scan(self): + global scan_called + scan_called = scan_called + 1 + self.kids = self.kids + self.scans + self.scans = [] + + def scanner_key(self): + return self.name + + def add_to_waiting_parents(self, node): + wp = self.waiting_parents + if node in wp: + return 0 + wp.add(node) + return 1 + + def get_state(self): + return self.state + + def set_state(self, state): + self.state = state + + def set_bsig(self, bsig): + self.bsig = bsig + + def set_csig(self, csig): + self.csig = csig + + def store_csig(self): + pass + + def store_bsig(self): + pass + + def is_pseudo_derived(self): + pass + + def is_up_to_date(self): + return self._current_val + + def depends_on(self, nodes): + for node in nodes: + if node in self.kids: + return 1 + return 0 + + def __str__(self): + return self.name + + def postprocess(self): + self.postprocessed = 1 + self.waiting_parents = set() + + def get_executor(self): + if not hasattr(self, 'executor'): + class Executor: + def prepare(self): + pass + def get_action_targets(self): + return self.targets + def get_all_targets(self): + return self.targets + def get_all_children(self): + result = [] + for node in self.targets: + result.extend(node.children()) + return result + def get_all_prerequisites(self): + return [] + def get_action_side_effects(self): + return [] + self.executor = Executor() + self.executor.targets = self.targets + return self.executor + +class OtherError(Exception): + pass + +class MyException(Exception): + pass + + +class TaskmasterTestCase(unittest.TestCase): + + def test_next_task(self): + """Test fetching the next task + """ + global built_text + + n1 = Node("n1") + tm = SCons.Taskmaster.Taskmaster([n1, n1]) + t = tm.next_task() + t.prepare() + t.execute() + t = tm.next_task() + assert t is None + + n1 = Node("n1") + n2 = Node("n2") + n3 = Node("n3", [n1, n2]) + + tm = SCons.Taskmaster.Taskmaster([n3]) + + t = tm.next_task() + t.prepare() + t.execute() + assert built_text == "n1 built", built_text + t.executed() + t.postprocess() + + t = tm.next_task() + t.prepare() + t.execute() + assert built_text == "n2 built", built_text + t.executed() + t.postprocess() + + t = tm.next_task() + t.prepare() + t.execute() + assert built_text == "n3 built", built_text + t.executed() + t.postprocess() + + assert tm.next_task() is None + + built_text = "up to date: " + top_node = n3 + + class MyTask(SCons.Taskmaster.AlwaysTask): + def execute(self): + global built_text + if self.targets[0].get_state() == SCons.Node.up_to_date: + if self.top: + built_text = self.targets[0].name + " up-to-date top" + else: + built_text = self.targets[0].name + " up-to-date" + else: + self.targets[0].build() + + n1.set_state(SCons.Node.no_state) + n1._current_val = 1 + n2.set_state(SCons.Node.no_state) + n2._current_val = 1 + n3.set_state(SCons.Node.no_state) + n3._current_val = 1 + tm = SCons.Taskmaster.Taskmaster(targets = [n3], tasker = MyTask) + + t = tm.next_task() + t.prepare() + t.execute() + assert built_text == "n1 up-to-date", built_text + t.executed() + t.postprocess() + + t = tm.next_task() + t.prepare() + t.execute() + assert built_text == "n2 up-to-date", built_text + t.executed() + t.postprocess() + + t = tm.next_task() + t.prepare() + t.execute() + assert built_text == "n3 up-to-date top", built_text + t.executed() + t.postprocess() + + assert tm.next_task() is None + + + n1 = Node("n1") + n2 = Node("n2") + n3 = Node("n3", [n1, n2]) + n4 = Node("n4") + n5 = Node("n5", [n3, n4]) + tm = SCons.Taskmaster.Taskmaster([n5]) + + t1 = tm.next_task() + assert t1.get_target() == n1 + + t2 = tm.next_task() + assert t2.get_target() == n2 + + t4 = tm.next_task() + assert t4.get_target() == n4 + t4.executed() + t4.postprocess() + + t1.executed() + t1.postprocess() + t2.executed() + t2.postprocess() + t3 = tm.next_task() + assert t3.get_target() == n3 + + t3.executed() + t3.postprocess() + t5 = tm.next_task() + assert t5.get_target() == n5, t5.get_target() + t5.executed() + t5.postprocess() + + assert tm.next_task() is None + + + n4 = Node("n4") + n4.set_state(SCons.Node.executed) + tm = SCons.Taskmaster.Taskmaster([n4]) + assert tm.next_task() is None + + n1 = Node("n1") + n2 = Node("n2", [n1]) + tm = SCons.Taskmaster.Taskmaster([n2,n2]) + t = tm.next_task() + t.executed() + t.postprocess() + t = tm.next_task() + assert tm.next_task() is None + + + n1 = Node("n1") + n2 = Node("n2") + n3 = Node("n3", [n1], [n2]) + tm = SCons.Taskmaster.Taskmaster([n3]) + t = tm.next_task() + target = t.get_target() + assert target == n1, target + t.executed() + t.postprocess() + t = tm.next_task() + target = t.get_target() + assert target == n2, target + t.executed() + t.postprocess() + t = tm.next_task() + target = t.get_target() + assert target == n3, target + t.executed() + t.postprocess() + assert tm.next_task() is None + + n1 = Node("n1") + n2 = Node("n2") + n3 = Node("n3", [n1, n2]) + n4 = Node("n4", [n3]) + n5 = Node("n5", [n3]) + global scan_called + scan_called = 0 + tm = SCons.Taskmaster.Taskmaster([n4]) + t = tm.next_task() + assert t.get_target() == n1 + t.executed() + t.postprocess() + t = tm.next_task() + assert t.get_target() == n2 + t.executed() + t.postprocess() + t = tm.next_task() + assert t.get_target() == n3 + t.executed() + t.postprocess() + t = tm.next_task() + assert t.get_target() == n4 + t.executed() + t.postprocess() + assert tm.next_task() is None + assert scan_called == 4, scan_called + + tm = SCons.Taskmaster.Taskmaster([n5]) + t = tm.next_task() + assert t.get_target() == n5, t.get_target() + t.executed() + assert tm.next_task() is None + assert scan_called == 5, scan_called + + n1 = Node("n1") + n2 = Node("n2") + n3 = Node("n3") + n4 = Node("n4", [n1,n2,n3]) + n5 = Node("n5", [n4]) + n3.side_effect = 1 + n1.side_effects = n2.side_effects = n3.side_effects = [n4] + tm = SCons.Taskmaster.Taskmaster([n1,n2,n3,n4,n5]) + t = tm.next_task() + assert t.get_target() == n1 + assert n4.state == SCons.Node.executing, n4.state + t.executed() + t.postprocess() + t = tm.next_task() + assert t.get_target() == n2 + t.executed() + t.postprocess() + t = tm.next_task() + assert t.get_target() == n3 + t.executed() + t.postprocess() + t = tm.next_task() + assert t.get_target() == n4 + t.executed() + t.postprocess() + t = tm.next_task() + assert t.get_target() == n5 + assert not tm.next_task() + t.executed() + t.postprocess() + + n1 = Node("n1") + n2 = Node("n2") + n3 = Node("n3") + n4 = Node("n4", [n1,n2,n3]) + def reverse(dependencies): + dependencies.reverse() + return dependencies + tm = SCons.Taskmaster.Taskmaster([n4], order=reverse) + t = tm.next_task() + assert t.get_target() == n3, t.get_target() + t.executed() + t.postprocess() + t = tm.next_task() + assert t.get_target() == n2, t.get_target() + t.executed() + t.postprocess() + t = tm.next_task() + assert t.get_target() == n1, t.get_target() + t.executed() + t.postprocess() + t = tm.next_task() + assert t.get_target() == n4, t.get_target() + t.executed() + t.postprocess() + + n5 = Node("n5") + n6 = Node("n6") + n7 = Node("n7") + n6.alttargets = [n7] + + tm = SCons.Taskmaster.Taskmaster([n5]) + t = tm.next_task() + assert t.get_target() == n5 + t.executed() + t.postprocess() + + tm = SCons.Taskmaster.Taskmaster([n6]) + t = tm.next_task() + assert t.get_target() == n7 + t.executed() + t.postprocess() + t = tm.next_task() + assert t.get_target() == n6 + t.executed() + t.postprocess() + + n1 = Node("n1") + n2 = Node("n2", [n1]) + n1.set_state(SCons.Node.failed) + tm = SCons.Taskmaster.Taskmaster([n2]) + assert tm.next_task() is None + + n1 = Node("n1") + n2 = Node("n2") + n1.targets = [n1, n2] + n1._current_val = 1 + tm = SCons.Taskmaster.Taskmaster([n1]) + t = tm.next_task() + t.executed() + t.postprocess() + + s = n1.get_state() + assert s == SCons.Node.executed, s + s = n2.get_state() + assert s == SCons.Node.executed, s + + + def test_make_ready_out_of_date(self): + """Test the Task.make_ready() method's list of out-of-date Nodes + """ + ood = [] + def TaskGen(tm, targets, top, node, ood=ood): + class MyTask(SCons.Taskmaster.AlwaysTask): + def make_ready(self): + SCons.Taskmaster.Task.make_ready(self) + self.ood.extend(self.out_of_date) + + t = MyTask(tm, targets, top, node) + t.ood = ood + return t + + n1 = Node("n1") + c2 = Node("c2") + c2._current_val = 1 + n3 = Node("n3") + c4 = Node("c4") + c4._current_val = 1 + a5 = Node("a5") + a5._current_val = 1 + a5.always_build = 1 + tm = SCons.Taskmaster.Taskmaster(targets = [n1, c2, n3, c4, a5], + tasker = TaskGen) + + del ood[:] + t = tm.next_task() + assert ood == [n1], ood + + del ood[:] + t = tm.next_task() + assert ood == [], ood + + del ood[:] + t = tm.next_task() + assert ood == [n3], ood + + del ood[:] + t = tm.next_task() + assert ood == [], ood + + del ood[:] + t = tm.next_task() + assert ood == [a5], ood + + def test_make_ready_exception(self): + """Test handling exceptions from Task.make_ready() + """ + class MyTask(SCons.Taskmaster.AlwaysTask): + def make_ready(self): + raise MyException("from make_ready()") + + n1 = Node("n1") + tm = SCons.Taskmaster.Taskmaster(targets = [n1], tasker = MyTask) + t = tm.next_task() + exc_type, exc_value, exc_tb = t.exception + assert exc_type == MyException, repr(exc_type) + assert str(exc_value) == "from make_ready()", exc_value + + def test_needs_execute(self): + """Test that we can't instantiate a Task subclass without needs_execute + + We should be getting: + TypeError: Can't instantiate abstract class MyTask with abstract methods needs_execute + """ + class MyTask(SCons.Taskmaster.Task): + pass + + n1 = Node("n1") + tm = SCons.Taskmaster.Taskmaster(targets=[n1], tasker=MyTask) + with self.assertRaises(TypeError): + _ = tm.next_task() + + def test_make_ready_all(self): + """Test the make_ready_all() method""" + class MyTask(SCons.Taskmaster.AlwaysTask): + make_ready = SCons.Taskmaster.Task.make_ready_all + + n1 = Node("n1") + c2 = Node("c2") + c2._current_val = 1 + n3 = Node("n3") + c4 = Node("c4") + c4._current_val = 1 + + tm = SCons.Taskmaster.Taskmaster(targets = [n1, c2, n3, c4]) + + t = tm.next_task() + target = t.get_target() + assert target is n1, target + assert target.state == SCons.Node.executing, target.state + t = tm.next_task() + target = t.get_target() + assert target is c2, target + assert target.state == SCons.Node.up_to_date, target.state + t = tm.next_task() + target = t.get_target() + assert target is n3, target + assert target.state == SCons.Node.executing, target.state + t = tm.next_task() + target = t.get_target() + assert target is c4, target + assert target.state == SCons.Node.up_to_date, target.state + t = tm.next_task() + assert t is None + + n1 = Node("n1") + c2 = Node("c2") + n3 = Node("n3") + c4 = Node("c4") + + tm = SCons.Taskmaster.Taskmaster(targets = [n1, c2, n3, c4], + tasker = MyTask) + + t = tm.next_task() + target = t.get_target() + assert target is n1, target + assert target.state == SCons.Node.executing, target.state + t = tm.next_task() + target = t.get_target() + assert target is c2, target + assert target.state == SCons.Node.executing, target.state + t = tm.next_task() + target = t.get_target() + assert target is n3, target + assert target.state == SCons.Node.executing, target.state + t = tm.next_task() + target = t.get_target() + assert target is c4, target + assert target.state == SCons.Node.executing, target.state + t = tm.next_task() + assert t is None + + + def test_children_errors(self): + """Test errors when fetching the children of a node. + """ + class StopNode(Node): + def children(self): + raise SCons.Errors.StopError("stop!") + class ExitNode(Node): + def children(self): + sys.exit(77) + + n1 = StopNode("n1") + tm = SCons.Taskmaster.Taskmaster([n1]) + t = tm.next_task() + exc_type, exc_value, exc_tb = t.exception + assert exc_type == SCons.Errors.StopError, repr(exc_type) + assert str(exc_value) == "stop!", exc_value + + n2 = ExitNode("n2") + tm = SCons.Taskmaster.Taskmaster([n2]) + t = tm.next_task() + exc_type, exc_value = t.exception + assert exc_type == SCons.Errors.ExplicitExit, repr(exc_type) + assert exc_value.node == n2, exc_value.node + assert exc_value.status == 77, exc_value.status + + def test_cycle_detection(self): + """Test detecting dependency cycles + """ + n1 = Node("n1") + n2 = Node("n2", [n1]) + n3 = Node("n3", [n2]) + n1.kids = [n3] + + tm = SCons.Taskmaster.Taskmaster([n3]) + try: + t = tm.next_task() + except SCons.Errors.UserError as e: + assert str(e) == "Dependency cycle: n3 -> n1 -> n2 -> n3", str(e) + else: + assert 'Did not catch expected UserError' + + def test_next_top_level_candidate(self): + """Test the next_top_level_candidate() method + """ + n1 = Node("n1") + n2 = Node("n2", [n1]) + n3 = Node("n3", [n2]) + + tm = SCons.Taskmaster.Taskmaster([n3]) + t = tm.next_task() + assert t.targets == [n1], t.targets + t.fail_stop() + assert t.targets == [n3], list(map(str, t.targets)) + assert t.top == 1, t.top + + def test_stop(self): + """Test the stop() method + + Both default and overridden in a subclass. + """ + global built_text + + n1 = Node("n1") + n2 = Node("n2") + n3 = Node("n3", [n1, n2]) + + tm = SCons.Taskmaster.Taskmaster([n3]) + t = tm.next_task() + t.prepare() + t.execute() + assert built_text == "n1 built", built_text + t.executed() + t.postprocess() + assert built_text == "n1 built really", built_text + + tm.stop() + assert tm.next_task() is None + + class MyTM(SCons.Taskmaster.Taskmaster): + def stop(self): + global built_text + built_text = "MyTM.stop()" + SCons.Taskmaster.Taskmaster.stop(self) + + n1 = Node("n1") + n2 = Node("n2") + n3 = Node("n3", [n1, n2]) + + built_text = None + tm = MyTM([n3]) + tm.next_task().execute() + assert built_text == "n1 built" + + tm.stop() + assert built_text == "MyTM.stop()" + assert tm.next_task() is None + + def test_executed(self): + """Test when a task has been executed + """ + global built_text + global visited_nodes + + n1 = Node("n1") + tm = SCons.Taskmaster.Taskmaster([n1]) + t = tm.next_task() + built_text = "xxx" + visited_nodes = [] + n1.set_state(SCons.Node.executing) + + t.executed() + + s = n1.get_state() + assert s == SCons.Node.executed, s + assert built_text == "xxx really", built_text + assert visited_nodes == ['n1'], visited_nodes + + n2 = Node("n2") + tm = SCons.Taskmaster.Taskmaster([n2]) + t = tm.next_task() + built_text = "should_not_change" + visited_nodes = [] + n2.set_state(None) + + t.executed() + + s = n2.get_state() + assert s is None, s + assert built_text == "should_not_change", built_text + assert visited_nodes == ['n2'], visited_nodes + + n3 = Node("n3") + n4 = Node("n4") + n3.targets = [n3, n4] + tm = SCons.Taskmaster.Taskmaster([n3]) + t = tm.next_task() + visited_nodes = [] + n3.set_state(SCons.Node.up_to_date) + n4.set_state(SCons.Node.executing) + + t.executed() + + s = n3.get_state() + assert s == SCons.Node.up_to_date, s + s = n4.get_state() + assert s == SCons.Node.executed, s + assert visited_nodes == ['n3', 'n4'], visited_nodes + + def test_prepare(self): + """Test preparation of multiple Nodes for a task + """ + n1 = Node("n1") + n2 = Node("n2") + tm = SCons.Taskmaster.Taskmaster([n1, n2]) + t = tm.next_task() + # This next line is moderately bogus. We're just reaching + # in and setting the targets for this task to an array. The + # "right" way to do this would be to have the next_task() call + # set it up by having something that approximates a real Builder + # return this list--but that's more work than is probably + # warranted right now. + n1.get_executor().targets = [n1, n2] + t.prepare() + assert n1.prepared + assert n2.prepared + + n3 = Node("n3") + n4 = Node("n4") + tm = SCons.Taskmaster.Taskmaster([n3, n4]) + t = tm.next_task() + # More bogus reaching in and setting the targets. + n3.set_state(SCons.Node.up_to_date) + n3.get_executor().targets = [n3, n4] + t.prepare() + assert n3.prepared + assert n4.prepared + + # If the Node has had an exception recorded while it was getting + # prepared, then prepare() should raise that exception. + class MyException(Exception): + pass + + built_text = None + n5 = Node("n5") + tm = SCons.Taskmaster.Taskmaster([n5]) + t = tm.next_task() + t.exception_set((MyException, "exception value")) + exc_caught = None + exc_actually_caught = None + exc_value = None + try: + t.prepare() + except MyException as e: + exc_caught = 1 + exc_value = e + except Exception as exc_actually_caught: + pass + assert exc_caught, "did not catch expected MyException: %s" % exc_actually_caught + assert str(exc_value) == "exception value", exc_value + assert built_text is None, built_text + + # Regression test, make sure we prepare not only + # all targets, but their side effects as well. + n6 = Node("n6") + n7 = Node("n7") + n8 = Node("n8") + n9 = Node("n9") + n10 = Node("n10") + + n6.side_effects = [ n8 ] + n7.side_effects = [ n9, n10 ] + + tm = SCons.Taskmaster.Taskmaster([n6, n7]) + t = tm.next_task() + # More bogus reaching in and setting the targets. + n6.get_executor().targets = [n6, n7] + t.prepare() + assert n6.prepared + assert n7.prepared + assert n8.prepared + assert n9.prepared + assert n10.prepared + + # Make sure we call an Executor's prepare() method. + class ExceptionExecutor: + def prepare(self): + raise Exception("Executor.prepare() exception") + def get_all_targets(self): + return self.nodes + def get_all_children(self): + result = [] + for node in self.nodes: + result.extend(node.children()) + return result + def get_all_prerequisites(self): + return [] + def get_action_side_effects(self): + return [] + + n11 = Node("n11") + n11.executor = ExceptionExecutor() + n11.executor.nodes = [n11] + tm = SCons.Taskmaster.Taskmaster([n11]) + t = tm.next_task() + try: + t.prepare() + except Exception as e: + assert str(e) == "Executor.prepare() exception", e + else: + raise AssertionError("did not catch expected exception") + + def test_execute(self): + """Test executing a task + """ + global built_text + global cache_text + + n1 = Node("n1") + tm = SCons.Taskmaster.Taskmaster([n1]) + t = tm.next_task() + t.execute() + assert built_text == "n1 built", built_text + + def raise_UserError(): + raise SCons.Errors.UserError + n2 = Node("n2") + n2.build = raise_UserError + tm = SCons.Taskmaster.Taskmaster([n2]) + t = tm.next_task() + try: + t.execute() + except SCons.Errors.UserError: + pass + else: + self.fail("did not catch expected UserError") + + def raise_BuildError(): + raise SCons.Errors.BuildError + n3 = Node("n3") + n3.build = raise_BuildError + tm = SCons.Taskmaster.Taskmaster([n3]) + t = tm.next_task() + try: + t.execute() + except SCons.Errors.BuildError: + pass + else: + self.fail("did not catch expected BuildError") + + # On a generic (non-BuildError) exception from a Builder, + # the target should throw a BuildError exception with the + # args set to the exception value, instance, and traceback. + def raise_OtherError(): + raise OtherError + n4 = Node("n4") + n4.build = raise_OtherError + tm = SCons.Taskmaster.Taskmaster([n4]) + t = tm.next_task() + try: + t.execute() + except SCons.Errors.BuildError as e: + assert e.node == n4, e.node + assert e.errstr == "OtherError : ", e.errstr + assert len(e.exc_info) == 3, e.exc_info + exc_traceback = sys.exc_info()[2] + assert isinstance(e.exc_info[2], type(exc_traceback)), e.exc_info[2] + else: + self.fail("did not catch expected BuildError") + + built_text = None + cache_text = [] + n5 = Node("n5") + n6 = Node("n6") + n6.cached = 1 + tm = SCons.Taskmaster.Taskmaster([n5]) + t = tm.next_task() + # This next line is moderately bogus. We're just reaching + # in and setting the targets for this task to an array. The + # "right" way to do this would be to have the next_task() call + # set it up by having something that approximates a real Builder + # return this list--but that's more work than is probably + # warranted right now. + t.targets = [n5, n6] + t.execute() + assert built_text == "n5 built", built_text + assert cache_text == [], cache_text + + built_text = None + cache_text = [] + n7 = Node("n7") + n8 = Node("n8") + n7.cached = 1 + n8.cached = 1 + tm = SCons.Taskmaster.Taskmaster([n7]) + t = tm.next_task() + # This next line is moderately bogus. We're just reaching + # in and setting the targets for this task to an array. The + # "right" way to do this would be to have the next_task() call + # set it up by having something that approximates a real Builder + # return this list--but that's more work than is probably + # warranted right now. + t.targets = [n7, n8] + t.execute() + assert built_text is None, built_text + assert cache_text == ["n7 retrieved", "n8 retrieved"], cache_text + + def test_cached_execute(self): + """Test executing a task with cached targets + """ + # In issue #2720 Alexei Klimkin detected that the previous + # workflow for execute() led to problems in a multithreaded build. + # We have: + # task.prepare() + # task.execute() + # task.executed() + # -> node.visited() + # for the Serial flow, but + # - Parallel - - Worker - + # task.prepare() + # requestQueue.put(task) + # task = requestQueue.get() + # task.execute() + # resultQueue.put(task) + # task = resultQueue.get() + # task.executed() + # ->node.visited() + # in parallel. Since execute() used to call built() when a target + # was cached, it could unblock dependent nodes before the binfo got + # restored again in visited(). This resulted in spurious + # "file not found" build errors, because files fetched from cache would + # be seen as not up to date and wouldn't be scanned for implicit + # dependencies. + # + # The following test ensures that execute() only marks targets as cached, + # but the actual call to built() happens in executed() only. + # Like this, the binfo should still be intact after calling execute()... + global cache_text + + n1 = Node("n1") + # Mark the node as being cached + n1.cached = 1 + tm = SCons.Taskmaster.Taskmaster([n1]) + t = tm.next_task() + t.prepare() + t.execute() + assert cache_text == ["n1 retrieved"], cache_text + # If no binfo exists anymore, something has gone wrong... + has_binfo = hasattr(n1, 'binfo') + assert has_binfo, has_binfo + + def test_exception(self): + """Test generic Taskmaster exception handling + + """ + n1 = Node("n1") + tm = SCons.Taskmaster.Taskmaster([n1]) + t = tm.next_task() + + t.exception_set((1, 2)) + exc_type, exc_value = t.exception + assert exc_type == 1, exc_type + assert exc_value == 2, exc_value + + t.exception_set(3) + assert t.exception == 3 + + try: 1//0 + except: + # Moved from below + t.exception_set(None) + #pass + +# import pdb; pdb.set_trace() + + # Having this here works for python 2.x, + # but it is a tuple (None, None, None) when called outside + # an except statement + # t.exception_set(None) + + exc_type, exc_value, exc_tb = t.exception + assert exc_type is ZeroDivisionError, "Expecting ZeroDevisionError got:%s"%exc_type + exception_values = [ + "integer division or modulo", + "integer division or modulo by zero", + "integer division by zero", # PyPy2 + ] + assert str(exc_value) in exception_values, exc_value + + class Exception1(Exception): + pass + + # Previously value was None, but while PY2 None = "", in Py3 None != "", so set to "" + t.exception_set((Exception1, "")) + try: + t.exception_raise() + except: + exc_type, exc_value = sys.exc_info()[:2] + assert exc_type == Exception1, exc_type + assert str(exc_value) == '', "Expecting empty string got:%s (type %s)"%(exc_value,type(exc_value)) + else: + assert 0, "did not catch expected exception" + + class Exception2(Exception): + pass + + t.exception_set((Exception2, "xyzzy")) + try: + t.exception_raise() + except: + exc_type, exc_value = sys.exc_info()[:2] + assert exc_type == Exception2, exc_type + assert str(exc_value) == "xyzzy", exc_value + else: + assert 0, "did not catch expected exception" + + class Exception3(Exception): + pass + + try: + 1//0 + except: + tb = sys.exc_info()[2] + t.exception_set((Exception3, "arg", tb)) + try: + t.exception_raise() + except: + exc_type, exc_value, exc_tb = sys.exc_info() + assert exc_type == Exception3, exc_type + assert str(exc_value) == "arg", exc_value + import traceback + x = traceback.extract_tb(tb)[-1] + y = traceback.extract_tb(exc_tb)[-1] + assert x == y, "x = %s, y = %s" % (x, y) + else: + assert 0, "did not catch expected exception" + + def test_postprocess(self): + """Test postprocessing targets to give them a chance to clean up + """ + n1 = Node("n1") + tm = SCons.Taskmaster.Taskmaster([n1]) + + t = tm.next_task() + assert not n1.postprocessed + t.postprocess() + assert n1.postprocessed + + n2 = Node("n2") + n3 = Node("n3") + tm = SCons.Taskmaster.Taskmaster([n2, n3]) + + assert not n2.postprocessed + assert not n3.postprocessed + t = tm.next_task() + t.postprocess() + assert n2.postprocessed + assert not n3.postprocessed + t = tm.next_task() + t.postprocess() + assert n2.postprocessed + assert n3.postprocessed + + def test_trace(self): + """Test Taskmaster tracing + """ + import io + + trace = io.StringIO() + n1 = Node("n1") + n2 = Node("n2") + n3 = Node("n3", [n1, n2]) + tm = SCons.Taskmaster.Taskmaster([n1, n1, n3], trace=trace) + t = tm.next_task() + t.prepare() + t.execute() + t.postprocess() + n1.set_state(SCons.Node.executed) + t = tm.next_task() + t.prepare() + t.execute() + t.postprocess() + n2.set_state(SCons.Node.executed) + t = tm.next_task() + t.prepare() + t.execute() + t.postprocess() + t = tm.next_task() + assert t is None + + value = trace.getvalue() + expect = """\ + +Taskmaster: Looking for a node to evaluate +Taskmaster: Considering node and its children: +Taskmaster: Evaluating + +Task.make_ready_current(): node +Task.prepare(): node +Task.execute(): node +Task.postprocess(): node + +Taskmaster: Looking for a node to evaluate +Taskmaster: Considering node and its children: +Taskmaster: already handled (executed) +Taskmaster: Considering node and its children: +Taskmaster: +Taskmaster: +Taskmaster: adjusted ref count: , child 'n2' +Taskmaster: Considering node and its children: +Taskmaster: Evaluating + +Task.make_ready_current(): node +Task.prepare(): node +Task.execute(): node +Task.postprocess(): node +Task.postprocess(): removing +Task.postprocess(): adjusted parent ref count + +Taskmaster: Looking for a node to evaluate +Taskmaster: Considering node and its children: +Taskmaster: +Taskmaster: +Taskmaster: Evaluating + +Task.make_ready_current(): node +Task.prepare(): node +Task.execute(): node +Task.postprocess(): node + +Taskmaster: Looking for a node to evaluate +Taskmaster: No candidate anymore. + +""" + assert value == expect, value + + + +if __name__ == "__main__": + unittest.main() + +# Local Variables: +# tab-width:4 +# indent-tabs-mode:nil +# End: +# vim: set expandtab tabstop=4 shiftwidth=4: diff --git a/SCons/Taskmaster/__init__.py b/SCons/Taskmaster/__init__.py new file mode 100644 index 0000000..d571795 --- /dev/null +++ b/SCons/Taskmaster/__init__.py @@ -0,0 +1,1059 @@ +# MIT License +# +# Copyright The SCons Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY +# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +"""Generic Taskmaster module for the SCons build engine. + +This module contains the primary interface(s) between a wrapping user +interface and the SCons build engine. There are two key classes here: + +Taskmaster + This is the main engine for walking the dependency graph and + calling things to decide what does or doesn't need to be built. + +Task + This is the base class for allowing a wrapping interface to + decide what does or doesn't actually need to be done. The + intention is for a wrapping interface to subclass this as + appropriate for different types of behavior it may need. + + The canonical example is the SCons native Python interface, + which has Task subclasses that handle its specific behavior, + like printing "'foo' is up to date" when a top-level target + doesn't need to be built, and handling the -c option by removing + targets as its "build" action. There is also a separate subclass + for suppressing this output when the -q option is used. + + The Taskmaster instantiates a Task object for each (set of) + target(s) that it decides need to be evaluated and/or built. +""" + +import sys +from abc import ABC, abstractmethod +from itertools import chain + +import SCons.Errors +import SCons.Node +import SCons.Warnings + +StateString = SCons.Node.StateString +NODE_NO_STATE = SCons.Node.no_state +NODE_PENDING = SCons.Node.pending +NODE_EXECUTING = SCons.Node.executing +NODE_UP_TO_DATE = SCons.Node.up_to_date +NODE_EXECUTED = SCons.Node.executed +NODE_FAILED = SCons.Node.failed + +print_prepare = False # set by option --debug=prepare + +# A subsystem for recording stats about how different Nodes are handled by +# the main Taskmaster loop. There's no external control here (no need for +# a --debug= option); enable it by changing the value of CollectStats. + +CollectStats = None + +class Stats: + """ + A simple class for holding statistics about the disposition of a + Node by the Taskmaster. If we're collecting statistics, each Node + processed by the Taskmaster gets one of these attached, in which case + the Taskmaster records its decision each time it processes the Node. + (Ideally, that's just once per Node.) + """ + def __init__(self): + """ + Instantiates a Taskmaster.Stats object, initializing all + appropriate counters to zero. + """ + self.considered = 0 + self.already_handled = 0 + self.problem = 0 + self.child_failed = 0 + self.not_built = 0 + self.side_effects = 0 + self.build = 0 + +StatsNodes = [] + +fmt = "%(considered)3d "\ + "%(already_handled)3d " \ + "%(problem)3d " \ + "%(child_failed)3d " \ + "%(not_built)3d " \ + "%(side_effects)3d " \ + "%(build)3d " + +def dump_stats(): + for n in sorted(StatsNodes, key=lambda a: str(a)): + print((fmt % n.attributes.stats.__dict__) + str(n)) + + +class Task(ABC): + """ SCons build engine abstract task class. + + This controls the interaction of the actual building of node + and the rest of the engine. + + This is expected to handle all of the normally-customizable + aspects of controlling a build, so any given application + *should* be able to do what it wants by sub-classing this + class and overriding methods as appropriate. If an application + needs to customize something by sub-classing Taskmaster (or + some other build engine class), we should first try to migrate + that functionality into this class. + + Note that it's generally a good idea for sub-classes to call + these methods explicitly to update state, etc., rather than + roll their own interaction with Taskmaster from scratch. + """ + def __init__(self, tm, targets, top, node): + self.tm = tm + self.targets = targets + self.top = top + self.node = node + self.exc_clear() + + def trace_message(self, method, node, description='node'): + fmt = '%-20s %s %s\n' + return fmt % (method + ':', description, self.tm.trace_node(node)) + + def display(self, message): + """ + Hook to allow the calling interface to display a message. + + This hook gets called as part of preparing a task for execution + (that is, a Node to be built). As part of figuring out what Node + should be built next, the actual target list may be altered, + along with a message describing the alteration. The calling + interface can subclass Task and provide a concrete implementation + of this method to see those messages. + """ + pass + + def prepare(self): + """ + Called just before the task is executed. + + This is mainly intended to give the target Nodes a chance to + unlink underlying files and make all necessary directories before + the Action is actually called to build the targets. + """ + global print_prepare + T = self.tm.trace + if T: T.write(self.trace_message('Task.prepare()', self.node)) + + # Now that it's the appropriate time, give the TaskMaster a + # chance to raise any exceptions it encountered while preparing + # this task. + self.exception_raise() + + if self.tm.message: + self.display(self.tm.message) + self.tm.message = None + + # Let the targets take care of any necessary preparations. + # This includes verifying that all of the necessary sources + # and dependencies exist, removing the target file(s), etc. + # + # As of April 2008, the get_executor().prepare() method makes + # sure that all of the aggregate sources necessary to build this + # Task's target(s) exist in one up-front check. The individual + # target t.prepare() methods check that each target's explicit + # or implicit dependencies exists, and also initialize the + # .sconsign info. + executor = self.targets[0].get_executor() + if executor is None: + return + executor.prepare() + for t in executor.get_action_targets(): + if print_prepare: + print("Preparing target %s..."%t) + for s in t.side_effects: + print("...with side-effect %s..."%s) + t.prepare() + for s in t.side_effects: + if print_prepare: + print("...Preparing side-effect %s..."%s) + s.prepare() + + def get_target(self): + """Fetch the target being built or updated by this task. + """ + return self.node + + @abstractmethod + def needs_execute(self): + return + + def execute(self): + """ + Called to execute the task. + + This method is called from multiple threads in a parallel build, + so only do thread safe stuff here. Do thread unsafe stuff in + prepare(), executed() or failed(). + """ + T = self.tm.trace + if T: T.write(self.trace_message('Task.execute()', self.node)) + + try: + cached_targets = [] + for t in self.targets: + if not t.retrieve_from_cache(): + break + cached_targets.append(t) + if len(cached_targets) < len(self.targets): + # Remove targets before building. It's possible that we + # partially retrieved targets from the cache, leaving + # them in read-only mode. That might cause the command + # to fail. + # + for t in cached_targets: + try: + t.fs.unlink(t.get_internal_path()) + except (IOError, OSError): + pass + self.targets[0].build() + else: + for t in cached_targets: + t.cached = 1 + except SystemExit: + exc_value = sys.exc_info()[1] + raise SCons.Errors.ExplicitExit(self.targets[0], exc_value.code) + except SCons.Errors.UserError: + raise + except SCons.Errors.BuildError: + raise + except Exception as e: + buildError = SCons.Errors.convert_to_BuildError(e) + buildError.node = self.targets[0] + buildError.exc_info = sys.exc_info() + raise buildError + + def executed_without_callbacks(self): + """ + Called when the task has been successfully executed + and the Taskmaster instance doesn't want to call + the Node's callback methods. + """ + T = self.tm.trace + if T: T.write(self.trace_message('Task.executed_without_callbacks()', + self.node)) + + for t in self.targets: + if t.get_state() == NODE_EXECUTING: + for side_effect in t.side_effects: + side_effect.set_state(NODE_NO_STATE) + t.set_state(NODE_EXECUTED) + + def executed_with_callbacks(self): + """ + Called when the task has been successfully executed and + the Taskmaster instance wants to call the Node's callback + methods. + + This may have been a do-nothing operation (to preserve build + order), so we must check the node's state before deciding whether + it was "built", in which case we call the appropriate Node method. + In any event, we always call "visited()", which will handle any + post-visit actions that must take place regardless of whether + or not the target was an actual built target or a source Node. + """ + global print_prepare + T = self.tm.trace + if T: T.write(self.trace_message('Task.executed_with_callbacks()', + self.node)) + + for t in self.targets: + if t.get_state() == NODE_EXECUTING: + for side_effect in t.side_effects: + side_effect.set_state(NODE_NO_STATE) + t.set_state(NODE_EXECUTED) + if not t.cached: + t.push_to_cache() + t.built() + t.visited() + if (not print_prepare and + (not hasattr(self, 'options') or not self.options.debug_includes)): + t.release_target_info() + else: + t.visited() + + executed = executed_with_callbacks + + def failed(self): + """ + Default action when a task fails: stop the build. + + Note: Although this function is normally invoked on nodes in + the executing state, it might also be invoked on up-to-date + nodes when using Configure(). + """ + self.fail_stop() + + def fail_stop(self): + """ + Explicit stop-the-build failure. + + This sets failure status on the target nodes and all of + their dependent parent nodes. + + Note: Although this function is normally invoked on nodes in + the executing state, it might also be invoked on up-to-date + nodes when using Configure(). + """ + T = self.tm.trace + if T: T.write(self.trace_message('Task.failed_stop()', self.node)) + + # Invoke will_not_build() to clean-up the pending children + # list. + self.tm.will_not_build(self.targets, lambda n: n.set_state(NODE_FAILED)) + + # Tell the taskmaster to not start any new tasks + self.tm.stop() + + # We're stopping because of a build failure, but give the + # calling Task class a chance to postprocess() the top-level + # target under which the build failure occurred. + self.targets = [self.tm.current_top] + self.top = 1 + + def fail_continue(self): + """ + Explicit continue-the-build failure. + + This sets failure status on the target nodes and all of + their dependent parent nodes. + + Note: Although this function is normally invoked on nodes in + the executing state, it might also be invoked on up-to-date + nodes when using Configure(). + """ + T = self.tm.trace + if T: T.write(self.trace_message('Task.failed_continue()', self.node)) + + self.tm.will_not_build(self.targets, lambda n: n.set_state(NODE_FAILED)) + + def make_ready_all(self): + """ + Marks all targets in a task ready for execution. + + This is used when the interface needs every target Node to be + visited--the canonical example being the "scons -c" option. + """ + T = self.tm.trace + if T: T.write(self.trace_message('Task.make_ready_all()', self.node)) + + self.out_of_date = self.targets[:] + for t in self.targets: + t.disambiguate().set_state(NODE_EXECUTING) + for s in t.side_effects: + # add disambiguate here to mirror the call on targets above + s.disambiguate().set_state(NODE_EXECUTING) + + def make_ready_current(self): + """ + Marks all targets in a task ready for execution if any target + is not current. + + This is the default behavior for building only what's necessary. + """ + global print_prepare + T = self.tm.trace + if T: T.write(self.trace_message('Task.make_ready_current()', + self.node)) + + self.out_of_date = [] + needs_executing = False + for t in self.targets: + try: + t.disambiguate().make_ready() + is_up_to_date = not t.has_builder() or \ + (not t.always_build and t.is_up_to_date()) + except EnvironmentError as e: + raise SCons.Errors.BuildError(node=t, errstr=e.strerror, filename=e.filename) + + if not is_up_to_date: + self.out_of_date.append(t) + needs_executing = True + + if needs_executing: + for t in self.targets: + t.set_state(NODE_EXECUTING) + for s in t.side_effects: + # add disambiguate here to mirror the call on targets in first loop above + s.disambiguate().set_state(NODE_EXECUTING) + else: + for t in self.targets: + # We must invoke visited() to ensure that the node + # information has been computed before allowing the + # parent nodes to execute. (That could occur in a + # parallel build...) + t.visited() + t.set_state(NODE_UP_TO_DATE) + if (not print_prepare and + (not hasattr(self, 'options') or not self.options.debug_includes)): + t.release_target_info() + + make_ready = make_ready_current + + def postprocess(self): + """ + Post-processes a task after it's been executed. + + This examines all the targets just built (or not, we don't care + if the build was successful, or even if there was no build + because everything was up-to-date) to see if they have any + waiting parent Nodes, or Nodes waiting on a common side effect, + that can be put back on the candidates list. + """ + T = self.tm.trace + if T: T.write(self.trace_message('Task.postprocess()', self.node)) + + # We may have built multiple targets, some of which may have + # common parents waiting for this build. Count up how many + # targets each parent was waiting for so we can subtract the + # values later, and so we *don't* put waiting side-effect Nodes + # back on the candidates list if the Node is also a waiting + # parent. + + targets = set(self.targets) + + pending_children = self.tm.pending_children + parents = {} + for t in targets: + # A node can only be in the pending_children set if it has + # some waiting_parents. + if t.waiting_parents: + if T: T.write(self.trace_message('Task.postprocess()', + t, + 'removing')) + pending_children.discard(t) + for p in t.waiting_parents: + parents[p] = parents.get(p, 0) + 1 + t.waiting_parents = set() + + for t in targets: + if t.side_effects is not None: + for s in t.side_effects: + if s.get_state() == NODE_EXECUTING: + s.set_state(NODE_NO_STATE) + + # The side-effects may have been transferred to + # NODE_NO_STATE by executed_with{,out}_callbacks, but was + # not taken out of the waiting parents/pending children + # data structures. Check for that now. + if s.get_state() == NODE_NO_STATE and s.waiting_parents: + pending_children.discard(s) + for p in s.waiting_parents: + parents[p] = parents.get(p, 0) + 1 + s.waiting_parents = set() + for p in s.waiting_s_e: + if p.ref_count == 0: + self.tm.candidates.append(p) + + for p, subtract in parents.items(): + p.ref_count = p.ref_count - subtract + if T: T.write(self.trace_message('Task.postprocess()', + p, + 'adjusted parent ref count')) + if p.ref_count == 0: + self.tm.candidates.append(p) + + for t in targets: + t.postprocess() + + # Exception handling subsystem. + # + # Exceptions that occur while walking the DAG or examining Nodes + # must be raised, but must be raised at an appropriate time and in + # a controlled manner so we can, if necessary, recover gracefully, + # possibly write out signature information for Nodes we've updated, + # etc. This is done by having the Taskmaster tell us about the + # exception, and letting + + def exc_info(self): + """ + Returns info about a recorded exception. + """ + return self.exception + + def exc_clear(self): + """ + Clears any recorded exception. + + This also changes the "exception_raise" attribute to point + to the appropriate do-nothing method. + """ + self.exception = (None, None, None) + self.exception_raise = self._no_exception_to_raise + + def exception_set(self, exception=None): + """ + Records an exception to be raised at the appropriate time. + + This also changes the "exception_raise" attribute to point + to the method that will, in fact + """ + if not exception: + exception = sys.exc_info() + self.exception = exception + self.exception_raise = self._exception_raise + + def _no_exception_to_raise(self): + pass + + def _exception_raise(self): + """ + Raises a pending exception that was recorded while getting a + Task ready for execution. + """ + exc = self.exc_info()[:] + try: + exc_type, exc_value, exc_traceback = exc + except ValueError: + exc_type, exc_value = exc # pylint: disable=unbalanced-tuple-unpacking + exc_traceback = None + + # raise exc_type(exc_value).with_traceback(exc_traceback) + if isinstance(exc_value, Exception): #hasattr(exc_value, 'with_traceback'): + # If exc_value is an exception, then just reraise + raise exc_value.with_traceback(exc_traceback) + else: + # else we'll create an exception using the value and raise that + raise exc_type(exc_value).with_traceback(exc_traceback) + + + # raise e.__class__, e.__class__(e), sys.exc_info()[2] + # exec("raise exc_type(exc_value).with_traceback(exc_traceback)") + + + +class AlwaysTask(Task): + def needs_execute(self): + """ + Always returns True (indicating this Task should always + be executed). + + Subclasses that need this behavior (as opposed to the default + of only executing Nodes that are out of date w.r.t. their + dependencies) can use this as follows: + + class MyTaskSubclass(SCons.Taskmaster.Task): + needs_execute = SCons.Taskmaster.AlwaysTask.needs_execute + """ + return True + +class OutOfDateTask(Task): + def needs_execute(self): + """ + Returns True (indicating this Task should be executed) if this + Task's target state indicates it needs executing, which has + already been determined by an earlier up-to-date check. + """ + return self.targets[0].get_state() == SCons.Node.executing + + +def find_cycle(stack, visited): + if stack[-1] in visited: + return None + visited.add(stack[-1]) + for n in stack[-1].waiting_parents: + stack.append(n) + if stack[0] == stack[-1]: + return stack + if find_cycle(stack, visited): + return stack + stack.pop() + return None + + +class Taskmaster: + """ + The Taskmaster for walking the dependency DAG. + """ + + def __init__(self, targets=[], tasker=None, order=None, trace=None): + self.original_top = targets + self.top_targets_left = targets[:] + self.top_targets_left.reverse() + self.candidates = [] + if tasker is None: + tasker = OutOfDateTask + self.tasker = tasker + if not order: + order = lambda l: l + self.order = order + self.message = None + self.trace = trace + self.next_candidate = self.find_next_candidate + self.pending_children = set() + + def find_next_candidate(self): + """ + Returns the next candidate Node for (potential) evaluation. + + The candidate list (really a stack) initially consists of all of + the top-level (command line) targets provided when the Taskmaster + was initialized. While we walk the DAG, visiting Nodes, all the + children that haven't finished processing get pushed on to the + candidate list. Each child can then be popped and examined in + turn for whether *their* children are all up-to-date, in which + case a Task will be created for their actual evaluation and + potential building. + + Here is where we also allow candidate Nodes to alter the list of + Nodes that should be examined. This is used, for example, when + invoking SCons in a source directory. A source directory Node can + return its corresponding build directory Node, essentially saying, + "Hey, you really need to build this thing over here instead." + """ + try: + return self.candidates.pop() + except IndexError: + pass + try: + node = self.top_targets_left.pop() + except IndexError: + return None + self.current_top = node + alt, message = node.alter_targets() + if alt: + self.message = message + self.candidates.append(node) + self.candidates.extend(self.order(alt)) + node = self.candidates.pop() + return node + + def no_next_candidate(self): + """ + Stops Taskmaster processing by not returning a next candidate. + + Note that we have to clean-up the Taskmaster candidate list + because the cycle detection depends on the fact all nodes have + been processed somehow. + """ + while self.candidates: + candidates = self.candidates + self.candidates = [] + self.will_not_build(candidates) + return None + + def _validate_pending_children(self): + """ + Validate the content of the pending_children set. Assert if an + internal error is found. + + This function is used strictly for debugging the taskmaster by + checking that no invariants are violated. It is not used in + normal operation. + + The pending_children set is used to detect cycles in the + dependency graph. We call a "pending child" a child that is + found in the "pending" state when checking the dependencies of + its parent node. + + A pending child can occur when the Taskmaster completes a loop + through a cycle. For example, let's imagine a graph made of + three nodes (A, B and C) making a cycle. The evaluation starts + at node A. The Taskmaster first considers whether node A's + child B is up-to-date. Then, recursively, node B needs to + check whether node C is up-to-date. This leaves us with a + dependency graph looking like:: + + Next candidate \ + \ + Node A (Pending) --> Node B(Pending) --> Node C (NoState) + ^ | + | | + +-------------------------------------+ + + Now, when the Taskmaster examines the Node C's child Node A, + it finds that Node A is in the "pending" state. Therefore, + Node A is a pending child of node C. + + Pending children indicate that the Taskmaster has potentially + loop back through a cycle. We say potentially because it could + also occur when a DAG is evaluated in parallel. For example, + consider the following graph:: + + Node A (Pending) --> Node B(Pending) --> Node C (Pending) --> ... + | ^ + | | + +----------> Node D (NoState) --------+ + / + Next candidate / + + The Taskmaster first evaluates the nodes A, B, and C and + starts building some children of node C. Assuming, that the + maximum parallel level has not been reached, the Taskmaster + will examine Node D. It will find that Node C is a pending + child of Node D. + + In summary, evaluating a graph with a cycle will always + involve a pending child at one point. A pending child might + indicate either a cycle or a diamond-shaped DAG. Only a + fraction of the nodes ends-up being a "pending child" of + another node. This keeps the pending_children set small in + practice. + + We can differentiate between the two cases if we wait until + the end of the build. At this point, all the pending children + nodes due to a diamond-shaped DAG will have been properly + built (or will have failed to build). But, the pending + children involved in a cycle will still be in the pending + state. + + The taskmaster removes nodes from the pending_children set as + soon as a pending_children node moves out of the pending + state. This also helps to keep the pending_children set small. + """ + + for n in self.pending_children: + assert n.state in (NODE_PENDING, NODE_EXECUTING), \ + (str(n), StateString[n.state]) + assert len(n.waiting_parents) != 0, (str(n), len(n.waiting_parents)) + for p in n.waiting_parents: + assert p.ref_count > 0, (str(n), str(p), p.ref_count) + + + def trace_message(self, message): + return 'Taskmaster: %s\n' % message + + def trace_node(self, node): + return '<%-10s %-3s %s>' % (StateString[node.get_state()], + node.ref_count, + repr(str(node))) + + def _find_next_ready_node(self): + """ + Finds the next node that is ready to be built. + + This is *the* main guts of the DAG walk. We loop through the + list of candidates, looking for something that has no un-built + children (i.e., that is a leaf Node or has dependencies that are + all leaf Nodes or up-to-date). Candidate Nodes are re-scanned + (both the target Node itself and its sources, which are always + scanned in the context of a given target) to discover implicit + dependencies. A Node that must wait for some children to be + built will be put back on the candidates list after the children + have finished building. A Node that has been put back on the + candidates list in this way may have itself (or its sources) + re-scanned, in order to handle generated header files (e.g.) and + the implicit dependencies therein. + + Note that this method does not do any signature calculation or + up-to-date check itself. All of that is handled by the Task + class. This is purely concerned with the dependency graph walk. + """ + + self.ready_exc = None + + T = self.trace + if T: T.write('\n' + self.trace_message('Looking for a node to evaluate')) + + while True: + node = self.next_candidate() + if node is None: + if T: T.write(self.trace_message('No candidate anymore.') + '\n') + return None + + node = node.disambiguate() + state = node.get_state() + + # For debugging only: + # + # try: + # self._validate_pending_children() + # except: + # self.ready_exc = sys.exc_info() + # return node + + if CollectStats: + if not hasattr(node.attributes, 'stats'): + node.attributes.stats = Stats() + StatsNodes.append(node) + S = node.attributes.stats + S.considered = S.considered + 1 + else: + S = None + + if T: T.write(self.trace_message(' Considering node %s and its children:' % self.trace_node(node))) + + if state == NODE_NO_STATE: + # Mark this node as being on the execution stack: + node.set_state(NODE_PENDING) + elif state > NODE_PENDING: + # Skip this node if it has already been evaluated: + if S: S.already_handled = S.already_handled + 1 + if T: T.write(self.trace_message(' already handled (executed)')) + continue + + executor = node.get_executor() + + try: + children = executor.get_all_children() + except SystemExit: + exc_value = sys.exc_info()[1] + e = SCons.Errors.ExplicitExit(node, exc_value.code) + self.ready_exc = (SCons.Errors.ExplicitExit, e) + if T: T.write(self.trace_message(' SystemExit')) + return node + except Exception as e: + # We had a problem just trying to figure out the + # children (like a child couldn't be linked in to a + # VariantDir, or a Scanner threw something). Arrange to + # raise the exception when the Task is "executed." + self.ready_exc = sys.exc_info() + if S: S.problem = S.problem + 1 + if T: T.write(self.trace_message(' exception %s while scanning children.\n' % e)) + return node + + children_not_visited = [] + children_pending = set() + children_not_ready = [] + children_failed = False + + for child in chain(executor.get_all_prerequisites(), children): + childstate = child.get_state() + + if T: T.write(self.trace_message(' ' + self.trace_node(child))) + + if childstate == NODE_NO_STATE: + children_not_visited.append(child) + elif childstate == NODE_PENDING: + children_pending.add(child) + elif childstate == NODE_FAILED: + children_failed = True + + if childstate <= NODE_EXECUTING: + children_not_ready.append(child) + + # These nodes have not even been visited yet. Add + # them to the list so that on some next pass we can + # take a stab at evaluating them (or their children). + if children_not_visited: + if len(children_not_visited) > 1: + children_not_visited.reverse() + self.candidates.extend(self.order(children_not_visited)) + + # if T and children_not_visited: + # T.write(self.trace_message(' adding to candidates: %s' % map(str, children_not_visited))) + # T.write(self.trace_message(' candidates now: %s\n' % map(str, self.candidates))) + + # Skip this node if any of its children have failed. + # + # This catches the case where we're descending a top-level + # target and one of our children failed while trying to be + # built by a *previous* descent of an earlier top-level + # target. + # + # It can also occur if a node is reused in multiple + # targets. One first descends though the one of the + # target, the next time occurs through the other target. + # + # Note that we can only have failed_children if the + # --keep-going flag was used, because without it the build + # will stop before diving in the other branch. + # + # Note that even if one of the children fails, we still + # added the other children to the list of candidate nodes + # to keep on building (--keep-going). + if children_failed: + for n in executor.get_action_targets(): + n.set_state(NODE_FAILED) + + if S: S.child_failed = S.child_failed + 1 + if T: T.write(self.trace_message('****** %s\n' % self.trace_node(node))) + continue + + if children_not_ready: + for child in children_not_ready: + # We're waiting on one or more derived targets + # that have not yet finished building. + if S: S.not_built = S.not_built + 1 + + # Add this node to the waiting parents lists of + # anything we're waiting on, with a reference + # count so we can be put back on the list for + # re-evaluation when they've all finished. + node.ref_count = node.ref_count + child.add_to_waiting_parents(node) + if T: T.write(self.trace_message(' adjusted ref count: %s, child %s' % + (self.trace_node(node), repr(str(child))))) + + if T: + for pc in children_pending: + T.write(self.trace_message(' adding %s to the pending children set\n' % + self.trace_node(pc))) + self.pending_children = self.pending_children | children_pending + + continue + + # Skip this node if it has side-effects that are + # currently being built: + wait_side_effects = False + for se in executor.get_action_side_effects(): + if se.get_state() == NODE_EXECUTING: + se.add_to_waiting_s_e(node) + wait_side_effects = True + + if wait_side_effects: + if S: S.side_effects = S.side_effects + 1 + continue + + # The default when we've gotten through all of the checks above: + # this node is ready to be built. + if S: S.build = S.build + 1 + if T: T.write(self.trace_message('Evaluating %s\n' % + self.trace_node(node))) + + # For debugging only: + # + # try: + # self._validate_pending_children() + # except: + # self.ready_exc = sys.exc_info() + # return node + + return node + + return None + + def next_task(self): + """ + Returns the next task to be executed. + + This simply asks for the next Node to be evaluated, and then wraps + it in the specific Task subclass with which we were initialized. + """ + node = self._find_next_ready_node() + + if node is None: + return None + + executor = node.get_executor() + if executor is None: + return None + + tlist = executor.get_all_targets() + + task = self.tasker(self, tlist, node in self.original_top, node) + try: + task.make_ready() + except Exception as e : + # We had a problem just trying to get this task ready (like + # a child couldn't be linked to a VariantDir when deciding + # whether this node is current). Arrange to raise the + # exception when the Task is "executed." + self.ready_exc = sys.exc_info() + + if self.ready_exc: + task.exception_set(self.ready_exc) + + self.ready_exc = None + + return task + + def will_not_build(self, nodes, node_func=lambda n: None): + """ + Perform clean-up about nodes that will never be built. Invokes + a user defined function on all of these nodes (including all + of their parents). + """ + + T = self.trace + + pending_children = self.pending_children + + to_visit = set(nodes) + pending_children = pending_children - to_visit + + if T: + for n in nodes: + T.write(self.trace_message(' removing node %s from the pending children set\n' % + self.trace_node(n))) + try: + while len(to_visit): + node = to_visit.pop() + node_func(node) + + # Prune recursion by flushing the waiting children + # list immediately. + parents = node.waiting_parents + node.waiting_parents = set() + + to_visit = to_visit | parents + pending_children = pending_children - parents + + for p in parents: + p.ref_count = p.ref_count - 1 + if T: T.write(self.trace_message(' removing parent %s from the pending children set\n' % + self.trace_node(p))) + except KeyError: + # The container to_visit has been emptied. + pass + + # We have the stick back the pending_children list into the + # taskmaster because the python 1.5.2 compatibility does not + # allow us to use in-place updates + self.pending_children = pending_children + + def stop(self): + """ + Stops the current build completely. + """ + self.next_candidate = self.no_next_candidate + + def cleanup(self): + """ + Check for dependency cycles. + """ + if not self.pending_children: + return + + nclist = [(n, find_cycle([n], set())) for n in self.pending_children] + + genuine_cycles = [ + node for node,cycle in nclist + if cycle or node.get_state() != NODE_EXECUTED + ] + if not genuine_cycles: + # All of the "cycles" found were single nodes in EXECUTED state, + # which is to say, they really weren't cycles. Just return. + return + + desc = 'Found dependency cycle(s):\n' + for node, cycle in nclist: + if cycle: + desc = desc + " " + " -> ".join(map(str, cycle)) + "\n" + else: + desc = desc + \ + " Internal Error: no cycle found for node %s (%s) in state %s\n" % \ + (node, repr(node), StateString[node.get_state()]) + + raise SCons.Errors.UserError(desc) + +# Local Variables: +# tab-width:4 +# indent-tabs-mode:nil +# End: +# vim: set expandtab tabstop=4 shiftwidth=4: diff --git a/SCons/TaskmasterTests.py b/SCons/TaskmasterTests.py deleted file mode 100644 index f20fd71..0000000 --- a/SCons/TaskmasterTests.py +++ /dev/null @@ -1,1257 +0,0 @@ -# MIT License -# -# Copyright The SCons Foundation -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be included -# in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY -# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE -# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -import SCons.compat - -import sys -import unittest - - -import SCons.Taskmaster -import SCons.Errors - - -built_text = None -cache_text = [] -visited_nodes = [] -executed = None -scan_called = 0 - -class Node: - def __init__(self, name, kids = [], scans = []): - self.name = name - self.kids = kids - self.scans = scans - self.cached = 0 - self.scanned = 0 - self.scanner = None - self.targets = [self] - self.prerequisites = None - class Builder: - def targets(self, node): - return node.targets - self.builder = Builder() - self.bsig = None - self.csig = None - self.state = SCons.Node.no_state - self.prepared = None - self.ref_count = 0 - self.waiting_parents = set() - self.waiting_s_e = set() - self.side_effect = 0 - self.side_effects = [] - self.alttargets = [] - self.postprocessed = None - self._bsig_val = None - self._current_val = 0 - self.always_build = None - - def disambiguate(self): - return self - - def push_to_cache(self): - pass - - def retrieve_from_cache(self): - global cache_text - if self.cached: - cache_text.append(self.name + " retrieved") - return self.cached - - def make_ready(self): - pass - - def prepare(self): - self.prepared = 1 - self.get_binfo() - - def build(self): - global built_text - built_text = self.name + " built" - - def remove(self): - pass - - # The following four methods new_binfo(), del_binfo(), - # get_binfo(), clear() as well as its calls have been added - # to support the cached_execute() test (issue #2720). - # They are full copies (or snippets) of their actual - # counterparts in the Node class... - def new_binfo(self): - binfo = "binfo" - return binfo - - def del_binfo(self): - """Delete the build info from this node.""" - try: - delattr(self, 'binfo') - except AttributeError: - pass - - def get_binfo(self): - """Fetch a node's build information.""" - try: - return self.binfo - except AttributeError: - pass - - binfo = self.new_binfo() - self.binfo = binfo - - return binfo - - def clear(self): - # The del_binfo() call here isn't necessary for normal execution, - # but is for interactive mode, where we might rebuild the same - # target and need to start from scratch. - self.del_binfo() - - def built(self): - global built_text - if not self.cached: - built_text = built_text + " really" - - # Clear the implicit dependency caches of any Nodes - # waiting for this Node to be built. - for parent in self.waiting_parents: - parent.implicit = None - - self.clear() - - def release_target_info(self): - pass - - def has_builder(self): - return self.builder is not None - - def is_derived(self): - return self.has_builder or self.side_effect - - def alter_targets(self): - return self.alttargets, None - - def visited(self): - global visited_nodes - visited_nodes.append(self.name) - - def children(self): - if not self.scanned: - self.scan() - self.scanned = 1 - return self.kids - - def scan(self): - global scan_called - scan_called = scan_called + 1 - self.kids = self.kids + self.scans - self.scans = [] - - def scanner_key(self): - return self.name - - def add_to_waiting_parents(self, node): - wp = self.waiting_parents - if node in wp: - return 0 - wp.add(node) - return 1 - - def get_state(self): - return self.state - - def set_state(self, state): - self.state = state - - def set_bsig(self, bsig): - self.bsig = bsig - - def set_csig(self, csig): - self.csig = csig - - def store_csig(self): - pass - - def store_bsig(self): - pass - - def is_pseudo_derived(self): - pass - - def is_up_to_date(self): - return self._current_val - - def depends_on(self, nodes): - for node in nodes: - if node in self.kids: - return 1 - return 0 - - def __str__(self): - return self.name - - def postprocess(self): - self.postprocessed = 1 - self.waiting_parents = set() - - def get_executor(self): - if not hasattr(self, 'executor'): - class Executor: - def prepare(self): - pass - def get_action_targets(self): - return self.targets - def get_all_targets(self): - return self.targets - def get_all_children(self): - result = [] - for node in self.targets: - result.extend(node.children()) - return result - def get_all_prerequisites(self): - return [] - def get_action_side_effects(self): - return [] - self.executor = Executor() - self.executor.targets = self.targets - return self.executor - -class OtherError(Exception): - pass - -class MyException(Exception): - pass - - -class TaskmasterTestCase(unittest.TestCase): - - def test_next_task(self): - """Test fetching the next task - """ - global built_text - - n1 = Node("n1") - tm = SCons.Taskmaster.Taskmaster([n1, n1]) - t = tm.next_task() - t.prepare() - t.execute() - t = tm.next_task() - assert t is None - - n1 = Node("n1") - n2 = Node("n2") - n3 = Node("n3", [n1, n2]) - - tm = SCons.Taskmaster.Taskmaster([n3]) - - t = tm.next_task() - t.prepare() - t.execute() - assert built_text == "n1 built", built_text - t.executed() - t.postprocess() - - t = tm.next_task() - t.prepare() - t.execute() - assert built_text == "n2 built", built_text - t.executed() - t.postprocess() - - t = tm.next_task() - t.prepare() - t.execute() - assert built_text == "n3 built", built_text - t.executed() - t.postprocess() - - assert tm.next_task() is None - - built_text = "up to date: " - top_node = n3 - - class MyTask(SCons.Taskmaster.AlwaysTask): - def execute(self): - global built_text - if self.targets[0].get_state() == SCons.Node.up_to_date: - if self.top: - built_text = self.targets[0].name + " up-to-date top" - else: - built_text = self.targets[0].name + " up-to-date" - else: - self.targets[0].build() - - n1.set_state(SCons.Node.no_state) - n1._current_val = 1 - n2.set_state(SCons.Node.no_state) - n2._current_val = 1 - n3.set_state(SCons.Node.no_state) - n3._current_val = 1 - tm = SCons.Taskmaster.Taskmaster(targets = [n3], tasker = MyTask) - - t = tm.next_task() - t.prepare() - t.execute() - assert built_text == "n1 up-to-date", built_text - t.executed() - t.postprocess() - - t = tm.next_task() - t.prepare() - t.execute() - assert built_text == "n2 up-to-date", built_text - t.executed() - t.postprocess() - - t = tm.next_task() - t.prepare() - t.execute() - assert built_text == "n3 up-to-date top", built_text - t.executed() - t.postprocess() - - assert tm.next_task() is None - - - n1 = Node("n1") - n2 = Node("n2") - n3 = Node("n3", [n1, n2]) - n4 = Node("n4") - n5 = Node("n5", [n3, n4]) - tm = SCons.Taskmaster.Taskmaster([n5]) - - t1 = tm.next_task() - assert t1.get_target() == n1 - - t2 = tm.next_task() - assert t2.get_target() == n2 - - t4 = tm.next_task() - assert t4.get_target() == n4 - t4.executed() - t4.postprocess() - - t1.executed() - t1.postprocess() - t2.executed() - t2.postprocess() - t3 = tm.next_task() - assert t3.get_target() == n3 - - t3.executed() - t3.postprocess() - t5 = tm.next_task() - assert t5.get_target() == n5, t5.get_target() - t5.executed() - t5.postprocess() - - assert tm.next_task() is None - - - n4 = Node("n4") - n4.set_state(SCons.Node.executed) - tm = SCons.Taskmaster.Taskmaster([n4]) - assert tm.next_task() is None - - n1 = Node("n1") - n2 = Node("n2", [n1]) - tm = SCons.Taskmaster.Taskmaster([n2,n2]) - t = tm.next_task() - t.executed() - t.postprocess() - t = tm.next_task() - assert tm.next_task() is None - - - n1 = Node("n1") - n2 = Node("n2") - n3 = Node("n3", [n1], [n2]) - tm = SCons.Taskmaster.Taskmaster([n3]) - t = tm.next_task() - target = t.get_target() - assert target == n1, target - t.executed() - t.postprocess() - t = tm.next_task() - target = t.get_target() - assert target == n2, target - t.executed() - t.postprocess() - t = tm.next_task() - target = t.get_target() - assert target == n3, target - t.executed() - t.postprocess() - assert tm.next_task() is None - - n1 = Node("n1") - n2 = Node("n2") - n3 = Node("n3", [n1, n2]) - n4 = Node("n4", [n3]) - n5 = Node("n5", [n3]) - global scan_called - scan_called = 0 - tm = SCons.Taskmaster.Taskmaster([n4]) - t = tm.next_task() - assert t.get_target() == n1 - t.executed() - t.postprocess() - t = tm.next_task() - assert t.get_target() == n2 - t.executed() - t.postprocess() - t = tm.next_task() - assert t.get_target() == n3 - t.executed() - t.postprocess() - t = tm.next_task() - assert t.get_target() == n4 - t.executed() - t.postprocess() - assert tm.next_task() is None - assert scan_called == 4, scan_called - - tm = SCons.Taskmaster.Taskmaster([n5]) - t = tm.next_task() - assert t.get_target() == n5, t.get_target() - t.executed() - assert tm.next_task() is None - assert scan_called == 5, scan_called - - n1 = Node("n1") - n2 = Node("n2") - n3 = Node("n3") - n4 = Node("n4", [n1,n2,n3]) - n5 = Node("n5", [n4]) - n3.side_effect = 1 - n1.side_effects = n2.side_effects = n3.side_effects = [n4] - tm = SCons.Taskmaster.Taskmaster([n1,n2,n3,n4,n5]) - t = tm.next_task() - assert t.get_target() == n1 - assert n4.state == SCons.Node.executing, n4.state - t.executed() - t.postprocess() - t = tm.next_task() - assert t.get_target() == n2 - t.executed() - t.postprocess() - t = tm.next_task() - assert t.get_target() == n3 - t.executed() - t.postprocess() - t = tm.next_task() - assert t.get_target() == n4 - t.executed() - t.postprocess() - t = tm.next_task() - assert t.get_target() == n5 - assert not tm.next_task() - t.executed() - t.postprocess() - - n1 = Node("n1") - n2 = Node("n2") - n3 = Node("n3") - n4 = Node("n4", [n1,n2,n3]) - def reverse(dependencies): - dependencies.reverse() - return dependencies - tm = SCons.Taskmaster.Taskmaster([n4], order=reverse) - t = tm.next_task() - assert t.get_target() == n3, t.get_target() - t.executed() - t.postprocess() - t = tm.next_task() - assert t.get_target() == n2, t.get_target() - t.executed() - t.postprocess() - t = tm.next_task() - assert t.get_target() == n1, t.get_target() - t.executed() - t.postprocess() - t = tm.next_task() - assert t.get_target() == n4, t.get_target() - t.executed() - t.postprocess() - - n5 = Node("n5") - n6 = Node("n6") - n7 = Node("n7") - n6.alttargets = [n7] - - tm = SCons.Taskmaster.Taskmaster([n5]) - t = tm.next_task() - assert t.get_target() == n5 - t.executed() - t.postprocess() - - tm = SCons.Taskmaster.Taskmaster([n6]) - t = tm.next_task() - assert t.get_target() == n7 - t.executed() - t.postprocess() - t = tm.next_task() - assert t.get_target() == n6 - t.executed() - t.postprocess() - - n1 = Node("n1") - n2 = Node("n2", [n1]) - n1.set_state(SCons.Node.failed) - tm = SCons.Taskmaster.Taskmaster([n2]) - assert tm.next_task() is None - - n1 = Node("n1") - n2 = Node("n2") - n1.targets = [n1, n2] - n1._current_val = 1 - tm = SCons.Taskmaster.Taskmaster([n1]) - t = tm.next_task() - t.executed() - t.postprocess() - - s = n1.get_state() - assert s == SCons.Node.executed, s - s = n2.get_state() - assert s == SCons.Node.executed, s - - - def test_make_ready_out_of_date(self): - """Test the Task.make_ready() method's list of out-of-date Nodes - """ - ood = [] - def TaskGen(tm, targets, top, node, ood=ood): - class MyTask(SCons.Taskmaster.AlwaysTask): - def make_ready(self): - SCons.Taskmaster.Task.make_ready(self) - self.ood.extend(self.out_of_date) - - t = MyTask(tm, targets, top, node) - t.ood = ood - return t - - n1 = Node("n1") - c2 = Node("c2") - c2._current_val = 1 - n3 = Node("n3") - c4 = Node("c4") - c4._current_val = 1 - a5 = Node("a5") - a5._current_val = 1 - a5.always_build = 1 - tm = SCons.Taskmaster.Taskmaster(targets = [n1, c2, n3, c4, a5], - tasker = TaskGen) - - del ood[:] - t = tm.next_task() - assert ood == [n1], ood - - del ood[:] - t = tm.next_task() - assert ood == [], ood - - del ood[:] - t = tm.next_task() - assert ood == [n3], ood - - del ood[:] - t = tm.next_task() - assert ood == [], ood - - del ood[:] - t = tm.next_task() - assert ood == [a5], ood - - def test_make_ready_exception(self): - """Test handling exceptions from Task.make_ready() - """ - class MyTask(SCons.Taskmaster.AlwaysTask): - def make_ready(self): - raise MyException("from make_ready()") - - n1 = Node("n1") - tm = SCons.Taskmaster.Taskmaster(targets = [n1], tasker = MyTask) - t = tm.next_task() - exc_type, exc_value, exc_tb = t.exception - assert exc_type == MyException, repr(exc_type) - assert str(exc_value) == "from make_ready()", exc_value - - def test_needs_execute(self): - """Test that we can't instantiate a Task subclass without needs_execute - - We should be getting: - TypeError: Can't instantiate abstract class MyTask with abstract methods needs_execute - """ - class MyTask(SCons.Taskmaster.Task): - pass - - n1 = Node("n1") - tm = SCons.Taskmaster.Taskmaster(targets=[n1], tasker=MyTask) - with self.assertRaises(TypeError): - _ = tm.next_task() - - def test_make_ready_all(self): - """Test the make_ready_all() method""" - class MyTask(SCons.Taskmaster.AlwaysTask): - make_ready = SCons.Taskmaster.Task.make_ready_all - - n1 = Node("n1") - c2 = Node("c2") - c2._current_val = 1 - n3 = Node("n3") - c4 = Node("c4") - c4._current_val = 1 - - tm = SCons.Taskmaster.Taskmaster(targets = [n1, c2, n3, c4]) - - t = tm.next_task() - target = t.get_target() - assert target is n1, target - assert target.state == SCons.Node.executing, target.state - t = tm.next_task() - target = t.get_target() - assert target is c2, target - assert target.state == SCons.Node.up_to_date, target.state - t = tm.next_task() - target = t.get_target() - assert target is n3, target - assert target.state == SCons.Node.executing, target.state - t = tm.next_task() - target = t.get_target() - assert target is c4, target - assert target.state == SCons.Node.up_to_date, target.state - t = tm.next_task() - assert t is None - - n1 = Node("n1") - c2 = Node("c2") - n3 = Node("n3") - c4 = Node("c4") - - tm = SCons.Taskmaster.Taskmaster(targets = [n1, c2, n3, c4], - tasker = MyTask) - - t = tm.next_task() - target = t.get_target() - assert target is n1, target - assert target.state == SCons.Node.executing, target.state - t = tm.next_task() - target = t.get_target() - assert target is c2, target - assert target.state == SCons.Node.executing, target.state - t = tm.next_task() - target = t.get_target() - assert target is n3, target - assert target.state == SCons.Node.executing, target.state - t = tm.next_task() - target = t.get_target() - assert target is c4, target - assert target.state == SCons.Node.executing, target.state - t = tm.next_task() - assert t is None - - - def test_children_errors(self): - """Test errors when fetching the children of a node. - """ - class StopNode(Node): - def children(self): - raise SCons.Errors.StopError("stop!") - class ExitNode(Node): - def children(self): - sys.exit(77) - - n1 = StopNode("n1") - tm = SCons.Taskmaster.Taskmaster([n1]) - t = tm.next_task() - exc_type, exc_value, exc_tb = t.exception - assert exc_type == SCons.Errors.StopError, repr(exc_type) - assert str(exc_value) == "stop!", exc_value - - n2 = ExitNode("n2") - tm = SCons.Taskmaster.Taskmaster([n2]) - t = tm.next_task() - exc_type, exc_value = t.exception - assert exc_type == SCons.Errors.ExplicitExit, repr(exc_type) - assert exc_value.node == n2, exc_value.node - assert exc_value.status == 77, exc_value.status - - def test_cycle_detection(self): - """Test detecting dependency cycles - """ - n1 = Node("n1") - n2 = Node("n2", [n1]) - n3 = Node("n3", [n2]) - n1.kids = [n3] - - tm = SCons.Taskmaster.Taskmaster([n3]) - try: - t = tm.next_task() - except SCons.Errors.UserError as e: - assert str(e) == "Dependency cycle: n3 -> n1 -> n2 -> n3", str(e) - else: - assert 'Did not catch expected UserError' - - def test_next_top_level_candidate(self): - """Test the next_top_level_candidate() method - """ - n1 = Node("n1") - n2 = Node("n2", [n1]) - n3 = Node("n3", [n2]) - - tm = SCons.Taskmaster.Taskmaster([n3]) - t = tm.next_task() - assert t.targets == [n1], t.targets - t.fail_stop() - assert t.targets == [n3], list(map(str, t.targets)) - assert t.top == 1, t.top - - def test_stop(self): - """Test the stop() method - - Both default and overridden in a subclass. - """ - global built_text - - n1 = Node("n1") - n2 = Node("n2") - n3 = Node("n3", [n1, n2]) - - tm = SCons.Taskmaster.Taskmaster([n3]) - t = tm.next_task() - t.prepare() - t.execute() - assert built_text == "n1 built", built_text - t.executed() - t.postprocess() - assert built_text == "n1 built really", built_text - - tm.stop() - assert tm.next_task() is None - - class MyTM(SCons.Taskmaster.Taskmaster): - def stop(self): - global built_text - built_text = "MyTM.stop()" - SCons.Taskmaster.Taskmaster.stop(self) - - n1 = Node("n1") - n2 = Node("n2") - n3 = Node("n3", [n1, n2]) - - built_text = None - tm = MyTM([n3]) - tm.next_task().execute() - assert built_text == "n1 built" - - tm.stop() - assert built_text == "MyTM.stop()" - assert tm.next_task() is None - - def test_executed(self): - """Test when a task has been executed - """ - global built_text - global visited_nodes - - n1 = Node("n1") - tm = SCons.Taskmaster.Taskmaster([n1]) - t = tm.next_task() - built_text = "xxx" - visited_nodes = [] - n1.set_state(SCons.Node.executing) - - t.executed() - - s = n1.get_state() - assert s == SCons.Node.executed, s - assert built_text == "xxx really", built_text - assert visited_nodes == ['n1'], visited_nodes - - n2 = Node("n2") - tm = SCons.Taskmaster.Taskmaster([n2]) - t = tm.next_task() - built_text = "should_not_change" - visited_nodes = [] - n2.set_state(None) - - t.executed() - - s = n2.get_state() - assert s is None, s - assert built_text == "should_not_change", built_text - assert visited_nodes == ['n2'], visited_nodes - - n3 = Node("n3") - n4 = Node("n4") - n3.targets = [n3, n4] - tm = SCons.Taskmaster.Taskmaster([n3]) - t = tm.next_task() - visited_nodes = [] - n3.set_state(SCons.Node.up_to_date) - n4.set_state(SCons.Node.executing) - - t.executed() - - s = n3.get_state() - assert s == SCons.Node.up_to_date, s - s = n4.get_state() - assert s == SCons.Node.executed, s - assert visited_nodes == ['n3', 'n4'], visited_nodes - - def test_prepare(self): - """Test preparation of multiple Nodes for a task - """ - n1 = Node("n1") - n2 = Node("n2") - tm = SCons.Taskmaster.Taskmaster([n1, n2]) - t = tm.next_task() - # This next line is moderately bogus. We're just reaching - # in and setting the targets for this task to an array. The - # "right" way to do this would be to have the next_task() call - # set it up by having something that approximates a real Builder - # return this list--but that's more work than is probably - # warranted right now. - n1.get_executor().targets = [n1, n2] - t.prepare() - assert n1.prepared - assert n2.prepared - - n3 = Node("n3") - n4 = Node("n4") - tm = SCons.Taskmaster.Taskmaster([n3, n4]) - t = tm.next_task() - # More bogus reaching in and setting the targets. - n3.set_state(SCons.Node.up_to_date) - n3.get_executor().targets = [n3, n4] - t.prepare() - assert n3.prepared - assert n4.prepared - - # If the Node has had an exception recorded while it was getting - # prepared, then prepare() should raise that exception. - class MyException(Exception): - pass - - built_text = None - n5 = Node("n5") - tm = SCons.Taskmaster.Taskmaster([n5]) - t = tm.next_task() - t.exception_set((MyException, "exception value")) - exc_caught = None - exc_actually_caught = None - exc_value = None - try: - t.prepare() - except MyException as e: - exc_caught = 1 - exc_value = e - except Exception as exc_actually_caught: - pass - assert exc_caught, "did not catch expected MyException: %s" % exc_actually_caught - assert str(exc_value) == "exception value", exc_value - assert built_text is None, built_text - - # Regression test, make sure we prepare not only - # all targets, but their side effects as well. - n6 = Node("n6") - n7 = Node("n7") - n8 = Node("n8") - n9 = Node("n9") - n10 = Node("n10") - - n6.side_effects = [ n8 ] - n7.side_effects = [ n9, n10 ] - - tm = SCons.Taskmaster.Taskmaster([n6, n7]) - t = tm.next_task() - # More bogus reaching in and setting the targets. - n6.get_executor().targets = [n6, n7] - t.prepare() - assert n6.prepared - assert n7.prepared - assert n8.prepared - assert n9.prepared - assert n10.prepared - - # Make sure we call an Executor's prepare() method. - class ExceptionExecutor: - def prepare(self): - raise Exception("Executor.prepare() exception") - def get_all_targets(self): - return self.nodes - def get_all_children(self): - result = [] - for node in self.nodes: - result.extend(node.children()) - return result - def get_all_prerequisites(self): - return [] - def get_action_side_effects(self): - return [] - - n11 = Node("n11") - n11.executor = ExceptionExecutor() - n11.executor.nodes = [n11] - tm = SCons.Taskmaster.Taskmaster([n11]) - t = tm.next_task() - try: - t.prepare() - except Exception as e: - assert str(e) == "Executor.prepare() exception", e - else: - raise AssertionError("did not catch expected exception") - - def test_execute(self): - """Test executing a task - """ - global built_text - global cache_text - - n1 = Node("n1") - tm = SCons.Taskmaster.Taskmaster([n1]) - t = tm.next_task() - t.execute() - assert built_text == "n1 built", built_text - - def raise_UserError(): - raise SCons.Errors.UserError - n2 = Node("n2") - n2.build = raise_UserError - tm = SCons.Taskmaster.Taskmaster([n2]) - t = tm.next_task() - try: - t.execute() - except SCons.Errors.UserError: - pass - else: - self.fail("did not catch expected UserError") - - def raise_BuildError(): - raise SCons.Errors.BuildError - n3 = Node("n3") - n3.build = raise_BuildError - tm = SCons.Taskmaster.Taskmaster([n3]) - t = tm.next_task() - try: - t.execute() - except SCons.Errors.BuildError: - pass - else: - self.fail("did not catch expected BuildError") - - # On a generic (non-BuildError) exception from a Builder, - # the target should throw a BuildError exception with the - # args set to the exception value, instance, and traceback. - def raise_OtherError(): - raise OtherError - n4 = Node("n4") - n4.build = raise_OtherError - tm = SCons.Taskmaster.Taskmaster([n4]) - t = tm.next_task() - try: - t.execute() - except SCons.Errors.BuildError as e: - assert e.node == n4, e.node - assert e.errstr == "OtherError : ", e.errstr - assert len(e.exc_info) == 3, e.exc_info - exc_traceback = sys.exc_info()[2] - assert isinstance(e.exc_info[2], type(exc_traceback)), e.exc_info[2] - else: - self.fail("did not catch expected BuildError") - - built_text = None - cache_text = [] - n5 = Node("n5") - n6 = Node("n6") - n6.cached = 1 - tm = SCons.Taskmaster.Taskmaster([n5]) - t = tm.next_task() - # This next line is moderately bogus. We're just reaching - # in and setting the targets for this task to an array. The - # "right" way to do this would be to have the next_task() call - # set it up by having something that approximates a real Builder - # return this list--but that's more work than is probably - # warranted right now. - t.targets = [n5, n6] - t.execute() - assert built_text == "n5 built", built_text - assert cache_text == [], cache_text - - built_text = None - cache_text = [] - n7 = Node("n7") - n8 = Node("n8") - n7.cached = 1 - n8.cached = 1 - tm = SCons.Taskmaster.Taskmaster([n7]) - t = tm.next_task() - # This next line is moderately bogus. We're just reaching - # in and setting the targets for this task to an array. The - # "right" way to do this would be to have the next_task() call - # set it up by having something that approximates a real Builder - # return this list--but that's more work than is probably - # warranted right now. - t.targets = [n7, n8] - t.execute() - assert built_text is None, built_text - assert cache_text == ["n7 retrieved", "n8 retrieved"], cache_text - - def test_cached_execute(self): - """Test executing a task with cached targets - """ - # In issue #2720 Alexei Klimkin detected that the previous - # workflow for execute() led to problems in a multithreaded build. - # We have: - # task.prepare() - # task.execute() - # task.executed() - # -> node.visited() - # for the Serial flow, but - # - Parallel - - Worker - - # task.prepare() - # requestQueue.put(task) - # task = requestQueue.get() - # task.execute() - # resultQueue.put(task) - # task = resultQueue.get() - # task.executed() - # ->node.visited() - # in parallel. Since execute() used to call built() when a target - # was cached, it could unblock dependent nodes before the binfo got - # restored again in visited(). This resulted in spurious - # "file not found" build errors, because files fetched from cache would - # be seen as not up to date and wouldn't be scanned for implicit - # dependencies. - # - # The following test ensures that execute() only marks targets as cached, - # but the actual call to built() happens in executed() only. - # Like this, the binfo should still be intact after calling execute()... - global cache_text - - n1 = Node("n1") - # Mark the node as being cached - n1.cached = 1 - tm = SCons.Taskmaster.Taskmaster([n1]) - t = tm.next_task() - t.prepare() - t.execute() - assert cache_text == ["n1 retrieved"], cache_text - # If no binfo exists anymore, something has gone wrong... - has_binfo = hasattr(n1, 'binfo') - assert has_binfo, has_binfo - - def test_exception(self): - """Test generic Taskmaster exception handling - - """ - n1 = Node("n1") - tm = SCons.Taskmaster.Taskmaster([n1]) - t = tm.next_task() - - t.exception_set((1, 2)) - exc_type, exc_value = t.exception - assert exc_type == 1, exc_type - assert exc_value == 2, exc_value - - t.exception_set(3) - assert t.exception == 3 - - try: 1//0 - except: - # Moved from below - t.exception_set(None) - #pass - -# import pdb; pdb.set_trace() - - # Having this here works for python 2.x, - # but it is a tuple (None, None, None) when called outside - # an except statement - # t.exception_set(None) - - exc_type, exc_value, exc_tb = t.exception - assert exc_type is ZeroDivisionError, "Expecting ZeroDevisionError got:%s"%exc_type - exception_values = [ - "integer division or modulo", - "integer division or modulo by zero", - "integer division by zero", # PyPy2 - ] - assert str(exc_value) in exception_values, exc_value - - class Exception1(Exception): - pass - - # Previously value was None, but while PY2 None = "", in Py3 None != "", so set to "" - t.exception_set((Exception1, "")) - try: - t.exception_raise() - except: - exc_type, exc_value = sys.exc_info()[:2] - assert exc_type == Exception1, exc_type - assert str(exc_value) == '', "Expecting empty string got:%s (type %s)"%(exc_value,type(exc_value)) - else: - assert 0, "did not catch expected exception" - - class Exception2(Exception): - pass - - t.exception_set((Exception2, "xyzzy")) - try: - t.exception_raise() - except: - exc_type, exc_value = sys.exc_info()[:2] - assert exc_type == Exception2, exc_type - assert str(exc_value) == "xyzzy", exc_value - else: - assert 0, "did not catch expected exception" - - class Exception3(Exception): - pass - - try: - 1//0 - except: - tb = sys.exc_info()[2] - t.exception_set((Exception3, "arg", tb)) - try: - t.exception_raise() - except: - exc_type, exc_value, exc_tb = sys.exc_info() - assert exc_type == Exception3, exc_type - assert str(exc_value) == "arg", exc_value - import traceback - x = traceback.extract_tb(tb)[-1] - y = traceback.extract_tb(exc_tb)[-1] - assert x == y, "x = %s, y = %s" % (x, y) - else: - assert 0, "did not catch expected exception" - - def test_postprocess(self): - """Test postprocessing targets to give them a chance to clean up - """ - n1 = Node("n1") - tm = SCons.Taskmaster.Taskmaster([n1]) - - t = tm.next_task() - assert not n1.postprocessed - t.postprocess() - assert n1.postprocessed - - n2 = Node("n2") - n3 = Node("n3") - tm = SCons.Taskmaster.Taskmaster([n2, n3]) - - assert not n2.postprocessed - assert not n3.postprocessed - t = tm.next_task() - t.postprocess() - assert n2.postprocessed - assert not n3.postprocessed - t = tm.next_task() - t.postprocess() - assert n2.postprocessed - assert n3.postprocessed - - def test_trace(self): - """Test Taskmaster tracing - """ - import io - - trace = io.StringIO() - n1 = Node("n1") - n2 = Node("n2") - n3 = Node("n3", [n1, n2]) - tm = SCons.Taskmaster.Taskmaster([n1, n1, n3], trace=trace) - t = tm.next_task() - t.prepare() - t.execute() - t.postprocess() - n1.set_state(SCons.Node.executed) - t = tm.next_task() - t.prepare() - t.execute() - t.postprocess() - n2.set_state(SCons.Node.executed) - t = tm.next_task() - t.prepare() - t.execute() - t.postprocess() - t = tm.next_task() - assert t is None - - value = trace.getvalue() - expect = """\ - -Taskmaster: Looking for a node to evaluate -Taskmaster: Considering node and its children: -Taskmaster: Evaluating - -Task.make_ready_current(): node -Task.prepare(): node -Task.execute(): node -Task.postprocess(): node - -Taskmaster: Looking for a node to evaluate -Taskmaster: Considering node and its children: -Taskmaster: already handled (executed) -Taskmaster: Considering node and its children: -Taskmaster: -Taskmaster: -Taskmaster: adjusted ref count: , child 'n2' -Taskmaster: Considering node and its children: -Taskmaster: Evaluating - -Task.make_ready_current(): node -Task.prepare(): node -Task.execute(): node -Task.postprocess(): node -Task.postprocess(): removing -Task.postprocess(): adjusted parent ref count - -Taskmaster: Looking for a node to evaluate -Taskmaster: Considering node and its children: -Taskmaster: -Taskmaster: -Taskmaster: Evaluating - -Task.make_ready_current(): node -Task.prepare(): node -Task.execute(): node -Task.postprocess(): node - -Taskmaster: Looking for a node to evaluate -Taskmaster: No candidate anymore. - -""" - assert value == expect, value - - - -if __name__ == "__main__": - unittest.main() - -# Local Variables: -# tab-width:4 -# indent-tabs-mode:nil -# End: -# vim: set expandtab tabstop=4 shiftwidth=4: -- cgit v0.12