From 74381f694f3155c0b051a3e368423f57d337eb9c Mon Sep 17 00:00:00 2001 From: Andrew Morrow Date: Wed, 31 Jan 2024 22:36:37 -0500 Subject: Only add worker threads as necesary --- SCons/Taskmaster/Job.py | 35 ++++++---- .../fixture/taskmaster_expected_new_parallel.txt | 3 +- test/option/stack-size.py | 76 ++++++++++++---------- 3 files changed, 65 insertions(+), 49 deletions(-) diff --git a/SCons/Taskmaster/Job.py b/SCons/Taskmaster/Job.py index 3bcc803..3b5b854 100644 --- a/SCons/Taskmaster/Job.py +++ b/SCons/Taskmaster/Job.py @@ -474,7 +474,7 @@ class NewParallel: def __init__(self, taskmaster, num, stack_size) -> None: self.taskmaster = taskmaster - self.num_workers = num + self.max_workers = num self.stack_size = stack_size self.interrupted = InterruptState() self.workers = [] @@ -484,7 +484,7 @@ class NewParallel: # also protects access to our state that gets updated # concurrently. The `can_search_cv` is associated with # this mutex. - self.tm_lock = (threading.Lock if self.num_workers > 1 else NewParallel.FakeLock)() + self.tm_lock = (threading.Lock if self.max_workers > 1 else NewParallel.FakeLock)() # Guarded under `tm_lock`. self.jobs = 0 @@ -493,11 +493,11 @@ class NewParallel: # The `can_search_cv` is used to manage a leader / # follower pattern for access to the taskmaster, and to # awaken from stalls. - self.can_search_cv = (threading.Condition if self.num_workers > 1 else NewParallel.FakeCondition)(self.tm_lock) + self.can_search_cv = (threading.Condition if self.max_workers > 1 else NewParallel.FakeCondition)(self.tm_lock) # The queue of tasks that have completed execution. The # next thread to obtain `tm_lock`` will retire them. - self.results_queue_lock = (threading.Lock if self.num_workers > 1 else NewParallel.FakeLock)() + self.results_queue_lock = (threading.Lock if self.max_workers > 1 else NewParallel.FakeLock)() self.results_queue = [] if self.taskmaster.trace: @@ -516,22 +516,26 @@ class NewParallel: method_name = sys._getframe(1).f_code.co_name + "():" thread_id=threading.get_ident() self.trace.debug('%s.%s [Thread:%s] %s' % (type(self).__name__, method_name, thread_id, message)) - # print('%-15s %s' % (method_name, message)) def start(self) -> None: - if self.num_workers == 1: + if self.max_workers == 1: self._work() else: - self._start_workers() - for worker in self.workers: - worker.join() - self.workers = [] + self._start_worker() + while len(self.workers) > 0: + self.workers[0].join() + self.workers.pop(0) self.taskmaster.cleanup() - def _start_workers(self) -> None: + def _maybe_start_worker(self) -> None: + if self.max_workers > 1 and len(self.workers) < self.max_workers: + self._start_worker() + + def _start_worker(self) -> None: prev_size = self._adjust_stack_size() - for _ in range(self.num_workers): - self.workers.append(NewParallel.Worker(self)) + if self.trace: + self.trace_message("Starting new worker thread") + self.workers.append(NewParallel.Worker(self)) self._restore_stack_size(prev_size) def _adjust_stack_size(self): @@ -680,6 +684,11 @@ class NewParallel: self.trace_message("Found task requiring execution") self.state = NewParallel.State.READY self.can_search_cv.notify() + # This thread will be busy taking care of + # `execute`ing this task. If we haven't + # reached the limit, spawn a new thread to + # turn the crank and find the next task. + self._maybe_start_worker() else: # We failed to find a task, so this thread diff --git a/test/option/fixture/taskmaster_expected_new_parallel.txt b/test/option/fixture/taskmaster_expected_new_parallel.txt index 071c8ce..23f491f 100644 --- a/test/option/fixture/taskmaster_expected_new_parallel.txt +++ b/test/option/fixture/taskmaster_expected_new_parallel.txt @@ -1,3 +1,4 @@ +Job.NewParallel._start_worker(): [Thread:XXXXX] Starting new worker thread Job.NewParallel._work(): [Thread:XXXXX] Gained exclusive access Job.NewParallel._work(): [Thread:XXXXX] Starting search Job.NewParallel._work(): [Thread:XXXXX] Found 0 completed tasks to process @@ -86,5 +87,3 @@ Taskmaster: No candidate anymore. Job.NewParallel._work(): [Thread:XXXXX] Found no task requiring execution, and have no jobs: marking complete Job.NewParallel._work(): [Thread:XXXXX] Gained exclusive access Job.NewParallel._work(): [Thread:XXXXX] Completion detected, breaking from main loop -Job.NewParallel._work(): [Thread:XXXXX] Gained exclusive access -Job.NewParallel._work(): [Thread:XXXXX] Completion detected, breaking from main loop diff --git a/test/option/stack-size.py b/test/option/stack-size.py index d64c73b..e9cb38e 100644 --- a/test/option/stack-size.py +++ b/test/option/stack-size.py @@ -89,14 +89,14 @@ File .* # # Test without any options # -test.run(chdir='work1', +test.run(chdir='work1', arguments = '.', stdout=expected_stdout, stderr='') test.must_exist(['work1', 'f1.out']) test.must_exist(['work1', 'f2.out']) -test.run(chdir='work1', +test.run(chdir='work1', arguments = '-c .') test.must_not_exist(['work1', 'f1.out']) test.must_not_exist(['work1', 'f2.out']) @@ -104,14 +104,14 @@ test.must_not_exist(['work1', 'f2.out']) # # Test with -j2 # -test.run(chdir='work1', +test.run(chdir='work1', arguments = '-j2 .', stdout=expected_stdout, stderr='') test.must_exist(['work1', 'f1.out']) test.must_exist(['work1', 'f2.out']) -test.run(chdir='work1', +test.run(chdir='work1', arguments = '-j2 -c .') test.must_not_exist(['work1', 'f1.out']) test.must_not_exist(['work1', 'f2.out']) @@ -120,14 +120,14 @@ test.must_not_exist(['work1', 'f2.out']) # # Test with --stack-size # -test.run(chdir='work1', +test.run(chdir='work1', arguments = '--stack-size=128 .', stdout=expected_stdout, stderr='') test.must_exist(['work1', 'f1.out']) test.must_exist(['work1', 'f2.out']) -test.run(chdir='work1', +test.run(chdir='work1', arguments = '--stack-size=128 -c .') test.must_not_exist(['work1', 'f1.out']) test.must_not_exist(['work1', 'f2.out']) @@ -135,14 +135,14 @@ test.must_not_exist(['work1', 'f2.out']) # # Test with SetOption('stack_size', 128) # -test.run(chdir='work2', +test.run(chdir='work2', arguments = '.', stdout=expected_stdout, stderr='') test.must_exist(['work2', 'f1.out']) test.must_exist(['work2', 'f2.out']) -test.run(chdir='work2', +test.run(chdir='work2', arguments = '--stack-size=128 -c .') test.must_not_exist(['work2', 'f1.out']) test.must_not_exist(['work2', 'f2.out']) @@ -151,14 +151,14 @@ if isStackSizeAvailable: # # Test with -j2 --stack-size=128 # - test.run(chdir='work1', + test.run(chdir='work1', arguments = '-j2 --stack-size=128 .', stdout=expected_stdout, stderr='') test.must_exist(['work1', 'f1.out']) test.must_exist(['work1', 'f2.out']) - test.run(chdir='work1', + test.run(chdir='work1', arguments = '-j2 --stack-size=128 -c .') test.must_not_exist(['work1', 'f1.out']) test.must_not_exist(['work1', 'f2.out']) @@ -166,7 +166,7 @@ if isStackSizeAvailable: # # Test with -j2 --stack-size=16 # - test.run(chdir='work1', + test.run(chdir='work1', arguments = '-j2 --stack-size=16 .', match=TestSCons.match_re, stdout=re_expected_stdout, @@ -174,17 +174,25 @@ if isStackSizeAvailable: scons: warning: Setting stack size failed: size not valid: 16384 bytes File .* + +scons: warning: Setting stack size failed: + size not valid: 16384 bytes +File .* """) test.must_exist(['work1', 'f1.out']) test.must_exist(['work1', 'f2.out']) - test.run(chdir='work1', + test.run(chdir='work1', arguments = '-j2 --stack-size=16 -c .', match=TestSCons.match_re, stderr=""" scons: warning: Setting stack size failed: size not valid: 16384 bytes File .* + +scons: warning: Setting stack size failed: + size not valid: 16384 bytes +File .* """) test.must_not_exist(['work1', 'f1.out']) test.must_not_exist(['work1', 'f2.out']) @@ -192,14 +200,14 @@ File .* # # Test with -j2 SetOption('stack_size', 128) # - test.run(chdir='work2', + test.run(chdir='work2', arguments = '-j2 .', stdout=expected_stdout, stderr='') test.must_exist(['work2', 'f1.out']) test.must_exist(['work2', 'f2.out']) - test.run(chdir='work2', + test.run(chdir='work2', arguments = '-j2 -c .') test.must_not_exist(['work2', 'f1.out']) test.must_not_exist(['work2', 'f2.out']) @@ -207,14 +215,14 @@ File .* # # Test with -j2 --stack-size=128 --warn=no-stack-size # - test.run(chdir='work1', + test.run(chdir='work1', arguments = '-j2 --stack-size=128 --warn=no-stack-size .', stdout=expected_stdout, stderr='') test.must_exist(['work1', 'f1.out']) test.must_exist(['work1', 'f2.out']) - test.run(chdir='work1', + test.run(chdir='work1', arguments = '-j2 --stack-size=128 --warn=no-stack-size -c .') test.must_not_exist(['work1', 'f1.out']) test.must_not_exist(['work1', 'f2.out']) @@ -222,29 +230,29 @@ File .* # # Test with -j2 --stack-size=16 --warn=no-stack-size # - test.run(chdir='work1', + test.run(chdir='work1', arguments = '-j2 --stack-size=16 --warn=no-stack-size .', stdout=expected_stdout, stderr='') test.must_exist(['work1', 'f1.out']) test.must_exist(['work1', 'f2.out']) - test.run(chdir='work1', + test.run(chdir='work1', arguments = '-j2 --stack-size=16 --warn=no-stack-size -c .') test.must_not_exist(['work1', 'f1.out']) test.must_not_exist(['work1', 'f2.out']) # - # Test with -j2 --warn=no-stack-size SetOption('stack_size', 128) + # Test with -j2 --warn=no-stack-size SetOption('stack_size', 128) # - test.run(chdir='work2', + test.run(chdir='work2', arguments = '-j2 --warn=no-stack-size .', stdout=expected_stdout, stderr='') test.must_exist(['work2', 'f1.out']) test.must_exist(['work2', 'f2.out']) - test.run(chdir='work2', + test.run(chdir='work2', arguments = '-j2 --warn=no-stack-size -c .') test.must_not_exist(['work2', 'f1.out']) test.must_not_exist(['work2', 'f2.out']) @@ -254,7 +262,7 @@ else: # # Test with -j2 --stack-size=128 # - test.run(chdir='work1', + test.run(chdir='work1', arguments = '-j2 --stack-size=128 .', match=TestSCons.match_re, stdout=re_expected_stdout, @@ -262,7 +270,7 @@ else: test.must_exist(['work1', 'f1.out']) test.must_exist(['work1', 'f2.out']) - test.run(chdir='work1', + test.run(chdir='work1', arguments = '-j2 --stack-size=128 -c .', match=TestSCons.match_re, stderr=expect_unsupported) @@ -272,7 +280,7 @@ else: # # Test with -j2 --stack-size=16 # - test.run(chdir='work1', + test.run(chdir='work1', arguments = '-j2 --stack-size=16 .', match=TestSCons.match_re, stdout=re_expected_stdout, @@ -280,7 +288,7 @@ else: test.must_exist(['work1', 'f1.out']) test.must_exist(['work1', 'f2.out']) - test.run(chdir='work1', + test.run(chdir='work1', arguments = '-j2 --stack-size=16 -c .', match=TestSCons.match_re, stderr=expect_unsupported) @@ -290,7 +298,7 @@ else: # # Test with -j2 SetOption('stack_size', 128) # - test.run(chdir='work2', + test.run(chdir='work2', arguments = '-j2 .', match=TestSCons.match_re, stdout=re_expected_stdout, @@ -298,7 +306,7 @@ else: test.must_exist(['work2', 'f1.out']) test.must_exist(['work2', 'f2.out']) - test.run(chdir='work2', + test.run(chdir='work2', arguments = '-j2 -c .', match=TestSCons.match_re, stderr=expect_unsupported) @@ -308,14 +316,14 @@ else: # # Test with -j2 --stack-size=128 --warn=no-stack-size # - test.run(chdir='work1', + test.run(chdir='work1', arguments = '-j2 --stack-size=128 --warn=no-stack-size .', stdout=expected_stdout, stderr='') test.must_exist(['work1', 'f1.out']) test.must_exist(['work1', 'f2.out']) - test.run(chdir='work1', + test.run(chdir='work1', arguments = '-j2 --stack-size=128 --warn=no-stack-size -c .') test.must_not_exist(['work1', 'f1.out']) test.must_not_exist(['work1', 'f2.out']) @@ -323,29 +331,29 @@ else: # # Test with -j2 --stack-size=16 --warn=no-stack-size # - test.run(chdir='work1', + test.run(chdir='work1', arguments = '-j2 --stack-size=16 --warn=no-stack-size .', stdout=expected_stdout, stderr='') test.must_exist(['work1', 'f1.out']) test.must_exist(['work1', 'f2.out']) - test.run(chdir='work1', + test.run(chdir='work1', arguments = '-j2 --stack-size=16 --warn=no-stack-size -c .') test.must_not_exist(['work1', 'f1.out']) test.must_not_exist(['work1', 'f2.out']) # - # Test with -j2 --warn=no-stack-size SetOption('stack_size', 128) + # Test with -j2 --warn=no-stack-size SetOption('stack_size', 128) # - test.run(chdir='work2', + test.run(chdir='work2', arguments = '-j2 --warn=no-stack-size .', stdout=expected_stdout, stderr='') test.must_exist(['work2', 'f1.out']) test.must_exist(['work2', 'f2.out']) - test.run(chdir='work2', + test.run(chdir='work2', arguments = '-j2 --warn=no-stack-size -c .') test.must_not_exist(['work2', 'f1.out']) test.must_not_exist(['work2', 'f2.out']) -- cgit v0.12 From 311c860bafe3f48ec60d22c19df6be5126b01173 Mon Sep 17 00:00:00 2001 From: Andrew Morrow Date: Thu, 1 Feb 2024 10:18:15 -0500 Subject: update chagnes and release --- CHANGES.txt | 3 +++ RELEASE.txt | 3 +++ 2 files changed, 6 insertions(+) diff --git a/CHANGES.txt b/CHANGES.txt index 357cb45..cb45be7 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -68,6 +68,9 @@ RELEASE VERSION/DATE TO BE FILLED IN LATER - CacheDir writes no longer happen within the taskmaster critical section, and therefore can run in parallel with both other CacheDir writes and the taskmaster DAG walk. + - The NewParallel scheduler now only adds threads as new work requiring execution + is discovered, up to the limit set by -j. This should reduce resource utilization + when the achievable parallelism in the DAG is less than the -j limit. From Mats Wichmann: - Add support for Python 3.13 (as of alpha 2). So far only affects diff --git a/RELEASE.txt b/RELEASE.txt index ae43db1..02f63e6 100644 --- a/RELEASE.txt +++ b/RELEASE.txt @@ -67,6 +67,9 @@ IMPROVEMENTS (Larger -j values) - CacheDir writes no longer happen within the taskmaster critical section, and therefore can run in parallel with both other CacheDir writes and the taskmaster DAG walk. +- The NewParallel scheduler now only adds threads as new work requiring execution + is discovered, up to the limit set by -j. This should reduce resource utilization + when the achievable parallelism in the DAG is less than the -j limit. PACKAGING -- cgit v0.12 From f55307bcd6b047ca841549362b05479abb6b5728 Mon Sep 17 00:00:00 2001 From: Mats Wichmann Date: Fri, 9 Feb 2024 13:18:42 -0700 Subject: doc: update Pseudo description and more The Pseudo manpage entry is updated to be more descriptive. Since they were near neighbors in Environment.py, it and the three functions Precious, Repository and Requires received minor docstring changes there; Precious and Requires also got a minor tweak to the manpage entry (mainly to clarify allowable argument types and list return value). Signed-off-by: Mats Wichmann --- CHANGES.txt | 1 + RELEASE.txt | 1 + SCons/Environment.py | 12 +++++++++--- SCons/Environment.xml | 4 ++++ SCons/Node/__init__.py | 4 ++-- SCons/Script/Main.xml | 41 ++++++++++++++++++++++++----------------- 6 files changed, 41 insertions(+), 22 deletions(-) diff --git a/CHANGES.txt b/CHANGES.txt index 357cb45..30543e5 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -80,6 +80,7 @@ RELEASE VERSION/DATE TO BE FILLED IN LATER Fixes #4468. - Fix bad typing in Action.py: process() and strfunction(). - Add Pseudo() to global functions, had been omitted. Fixes #4474. + The Pseudo manpage entry was updated to provide more clarity. RELEASE 4.6.0 - Sun, 19 Nov 2023 17:22:20 -0700 diff --git a/RELEASE.txt b/RELEASE.txt index ae43db1..8afc3d9 100644 --- a/RELEASE.txt +++ b/RELEASE.txt @@ -80,6 +80,7 @@ DOCUMENTATION - Fixed the Scanner examples in the User Guide to be runnable and added some more explantion. Clarified discussion of the scanner function in the Scanner Objects section of the manpage. +- The manpage entry for Pseudo was clarified. DEVELOPMENT ----------- diff --git a/SCons/Environment.py b/SCons/Environment.py index 64d38b0..e61ec1f 100644 --- a/SCons/Environment.py +++ b/SCons/Environment.py @@ -2339,6 +2339,7 @@ class Base(SubstitutionEnvironment): return ret def Precious(self, *targets): + """Mark *targets* as precious: do not delete before building.""" tlist = [] for t in targets: tlist.extend(self.arg2nodes(t, self.fs.Entry)) @@ -2347,6 +2348,7 @@ class Base(SubstitutionEnvironment): return tlist def Pseudo(self, *targets): + """Mark *targets* as pseudo: must not exist.""" tlist = [] for t in targets: tlist.extend(self.arg2nodes(t, self.fs.Entry)) @@ -2355,13 +2357,17 @@ class Base(SubstitutionEnvironment): return tlist def Repository(self, *dirs, **kw) -> None: + """Specify Repository directories to search.""" dirs = self.arg2nodes(list(dirs), self.fs.Dir) self.fs.Repository(*dirs, **kw) def Requires(self, target, prerequisite): - """Specify that 'prerequisite' must be built before 'target', - (but 'target' does not actually depend on 'prerequisite' - and need not be rebuilt if it changes).""" + """Specify that *prerequisite* must be built before *target*. + + Creates an order-only relationship, not a full dependency. + *prerequisite* must exist before *target* can be built, but + a change to *prerequisite* does not trigger a rebuild of *target*. + """ tlist = self.arg2nodes(target, self.fs.Entry) plist = self.arg2nodes(prerequisite, self.fs.Entry) for t in tlist: diff --git a/SCons/Environment.xml b/SCons/Environment.xml index 89d9b49..94dab08 100644 --- a/SCons/Environment.xml +++ b/SCons/Environment.xml @@ -2988,6 +2988,10 @@ but the target file(s) do not actually depend on the prerequisites and will not be rebuilt simply because the prerequisite file(s) change. +target and +prerequisite may each +be a string or Node, or a list of strings or Nodes. +Returns a list of the affected target nodes. diff --git a/SCons/Node/__init__.py b/SCons/Node/__init__.py index 3da4faf..c3a671c 100644 --- a/SCons/Node/__init__.py +++ b/SCons/Node/__init__.py @@ -1230,7 +1230,7 @@ class Node(metaclass=NoSlotsPyPy): self.precious = precious def set_pseudo(self, pseudo: bool = True) -> None: - """Set the Node's precious value.""" + """Set the Node's pseudo value.""" self.pseudo = pseudo def set_noclean(self, noclean: int = 1) -> None: @@ -1250,7 +1250,7 @@ class Node(metaclass=NoSlotsPyPy): self.always_build = always_build def exists(self) -> bool: - """Does this node exists?""" + """Reports whether node exists.""" return _exists_map[self._func_exists](self) def rexists(self): diff --git a/SCons/Script/Main.xml b/SCons/Script/Main.xml index 9248668..36e7d30 100644 --- a/SCons/Script/Main.xml +++ b/SCons/Script/Main.xml @@ -725,13 +725,12 @@ Progress(['-\r', '\\\r', '|\r', '/\r'], interval=5) -Marks each given -target -as precious so it is not deleted before it is rebuilt. Normally -&scons; -deletes a target before building it. -Multiple targets can be passed in to a single call to -&f-Precious;. +Marks target as precious so it is not +deleted before it is rebuilt. +Normally &SCons; deletes a target before building it. +Multiple targets can be passed in a single call, +and may be strings and/or nodes. +Returns a list of the affected target nodes. @@ -742,16 +741,24 @@ Multiple targets can be passed in to a single call to -This indicates that each given -target -should not be created by the build rule, and if the target is created, -an error will be generated. This is similar to the gnu make .PHONY -target. However, in the vast majority of cases, an -&f-Alias; -is more appropriate. - -Multiple targets can be passed in to a single call to -&f-Pseudo;. +Marks target as a pseudo target, +not representing the production of any physical target file. +If any pseudo target does exist, +&SCons; will abort the build with an error. +Multiple targets can be passed in a single call, +and may be strings and/or Nodes. +Returns a list of the affected target nodes. + + + +&f-Pseudo; may be useful in conjuction with a builder +call (such as &f-link-Command;) which does not create a physical target, +and the behavior if the target accidentally existed would be incorrect. +This is similar in concept to the GNU make +.PHONY target. +&SCons; also provides a powerful target alias capability +(see &f-link-Alias;) which may provide more flexibility +in many situations when defining target names that are not directly built. -- cgit v0.12 From dca250fe2185ff6688e38d11f495bff138da28c6 Mon Sep 17 00:00:00 2001 From: Andrew Morrow Date: Tue, 13 Feb 2024 09:34:07 -0500 Subject: Only create new workers when current workers are saturated with jobs --- SCons/Taskmaster/Job.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/SCons/Taskmaster/Job.py b/SCons/Taskmaster/Job.py index 3b5b854..73ec0df 100644 --- a/SCons/Taskmaster/Job.py +++ b/SCons/Taskmaster/Job.py @@ -529,7 +529,8 @@ class NewParallel: def _maybe_start_worker(self) -> None: if self.max_workers > 1 and len(self.workers) < self.max_workers: - self._start_worker() + if self.jobs >= len(self.workers): + self._start_worker() def _start_worker(self) -> None: prev_size = self._adjust_stack_size() -- cgit v0.12 From 00ef03afd454d7cc2a47df1abd17c8fc071b9b08 Mon Sep 17 00:00:00 2001 From: Mats Wichmann Date: Sun, 11 Feb 2024 16:28:52 -0700 Subject: Fix PyPackageDir Passing a module which the active Python (system or virtualenv) cannot locate through the import machinery would cause SCons to fail with an AttributError, because the result of the initial lookup was used without checking for success. Now returns None for not found. Manpage entry and docstring also updated. Signed-off-by: Mats Wichmann --- CHANGES.txt | 4 ++++ RELEASE.txt | 3 +++ SCons/Environment.xml | 43 ++++++++++++++++++++++++++++++++----------- SCons/Node/FS.py | 31 +++++++++++++++++-------------- SCons/Node/FSTests.py | 17 +++++++++++++++++ 5 files changed, 73 insertions(+), 25 deletions(-) diff --git a/CHANGES.txt b/CHANGES.txt index 30543e5..61ceb4a 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -81,6 +81,10 @@ RELEASE VERSION/DATE TO BE FILLED IN LATER - Fix bad typing in Action.py: process() and strfunction(). - Add Pseudo() to global functions, had been omitted. Fixes #4474. The Pseudo manpage entry was updated to provide more clarity. + - The internal routine which implements the PyPackageDir function + would fail with an exception if called with a module which is + not found. It will now return None. Updated manpage entry and + docstring.. RELEASE 4.6.0 - Sun, 19 Nov 2023 17:22:20 -0700 diff --git a/RELEASE.txt b/RELEASE.txt index 8afc3d9..e014974 100644 --- a/RELEASE.txt +++ b/RELEASE.txt @@ -54,6 +54,9 @@ FIXES make sure decoding of bytes doesn't fail. - Documentation indicated that both Pseudo() and env.Pseudo() were usable, but Pseudo() did not work; is now enabled. +- PyPackageDir no longer fails if passed a module name which cannot be found, + now returns None. + IMPROVEMENTS ------------ diff --git a/SCons/Environment.xml b/SCons/Environment.xml index 94dab08..0014426 100644 --- a/SCons/Environment.xml +++ b/SCons/Environment.xml @@ -2880,20 +2880,41 @@ and &f-link-env-Prepend;. -This returns a Directory Node similar to Dir. -The python module / package is looked up and if located -the directory is returned for the location. -modulename -Is a named python package / module to -lookup the directory for it's location. - - -If -modulename -is a list, SCons returns a list of Dir nodes. +Finds the location of modulename, +which can be a string or a sequence of strings, +each representing the name of a &Python; module. Construction variables are expanded in modulename. +Returns a Directory Node (see &f-link-Dir;), +or a list of Directory Nodes if +modulename is a sequence. +None is returned for any module not found. + + + +When a Tool module which is installed as a +&Python; module is used, you need +to specify a toolpath argument to +&f-link-Tool;, +&f-link-Environment; +or &f-link-Clone;, +as tools outside the standard project locations +(site_scons/site_tools) +will not be found otherwise. +Using &f-PyPackageDir; allows this path to be +discovered at runtime instead of hardcoding the path. + + + +Example: + + +env = Environment( + tools=["default", "ExampleTool"], + toolpath=[PyPackageDir("example_tool")] +) + diff --git a/SCons/Node/FS.py b/SCons/Node/FS.py index a5282e6..d1ffddb 100644 --- a/SCons/Node/FS.py +++ b/SCons/Node/FS.py @@ -1294,7 +1294,7 @@ class FS(LocalFS): self.Root[''] = root return root - def _lookup(self, p, directory, fsclass, create: int=1): + def _lookup(self, p, directory, fsclass, create: bool = True): """ The generic entry point for Node lookup with user-supplied data. @@ -1430,7 +1430,7 @@ class FS(LocalFS): return root._lookup_abs(p, fsclass, create) - def Entry(self, name, directory = None, create: int = 1): + def Entry(self, name, directory = None, create: bool = True): """Look up or create a generic Entry node with the specified name. If the name is a relative path (begins with ./, ../, or a file name), then it is looked up relative to the supplied directory @@ -1439,7 +1439,7 @@ class FS(LocalFS): """ return self._lookup(name, directory, Entry, create) - def File(self, name, directory = None, create: int = 1): + def File(self, name, directory = None, create: bool = True): """Look up or create a File node with the specified name. If the name is a relative path (begins with ./, ../, or a file name), then it is looked up relative to the supplied directory node, @@ -1486,21 +1486,24 @@ class FS(LocalFS): d = self.Dir(d) self.Top.addRepository(d) - def PyPackageDir(self, modulename): - r"""Locate the directory of a given python module name + def PyPackageDir(self, modulename) -> Optional[Dir]: + r"""Locate the directory of Python module *modulename*. - For example scons might resolve to - Windows: C:\Python27\Lib\site-packages\scons-2.5.1 - Linux: /usr/lib/scons + For example 'SCons' might resolve to + Windows: C:\Python311\Lib\site-packages\SCons + Linux: /usr/lib64/python3.11/site-packages/SCons - This can be useful when we want to determine a toolpath based on a python module name""" + Can be used to determine a toolpath based on a Python module name. - dirpath = '' - - # Python3 Code + This is the backend called by the public API function + :meth:`~Environment.Base.PyPackageDir`. + """ modspec = importlib.util.find_spec(modulename) - dirpath = os.path.dirname(modspec.origin) - return self._lookup(dirpath, None, Dir, True) + if modspec: + origin = os.path.dirname(modspec.origin) + return self._lookup(origin, directory=None, fsclass=Dir, create=True) + else: + return None def variant_dir_target_climb(self, orig, dir, tail): diff --git a/SCons/Node/FSTests.py b/SCons/Node/FSTests.py index e2eb0af..e50f721 100644 --- a/SCons/Node/FSTests.py +++ b/SCons/Node/FSTests.py @@ -4046,6 +4046,23 @@ class AbsolutePathTestCase(unittest.TestCase): os.chdir(save_cwd) +class PyPackageDir(unittest.TestCase): + def runTest(self) -> None: + """Test calling the PyPackageDir() method. + + We don't want to mock the positive case here - there's + testing for that in E2E test test/Dir/PyPackageDir. + We're only making sure we don't die in the negative case + (module not found) and instead return None. + """ + fs = SCons.Node.FS.FS('/') + try: + pkdir = fs.PyPackageDir("garglemod") + except AttributeError: + self.fail("non-existent module raised AttributeError") + self.assertIsNone(pkdir) + + if __name__ == "__main__": unittest.main() -- cgit v0.12