diff options
author | Steven Knight <knight@baldmt.com> | 2004-12-29 21:04:56 (GMT) |
---|---|---|
committer | Steven Knight <knight@baldmt.com> | 2004-12-29 21:04:56 (GMT) |
commit | a2b119edf2fdd972c426f08f9898fb2efbe36646 (patch) | |
tree | 12b6722f049211b37574477e82ab5c49a0521052 | |
parent | 9113805b081ef58fdf56bd5b5a9be6afad0b7a41 (diff) | |
download | SCons-a2b119edf2fdd972c426f08f9898fb2efbe36646.zip SCons-a2b119edf2fdd972c426f08f9898fb2efbe36646.tar.gz SCons-a2b119edf2fdd972c426f08f9898fb2efbe36646.tar.bz2 |
Add a Memoizer metaclass to collect the logic for caching values in one location. Convert by-hand caching to use of Memoizer. (Kevin Quick)
26 files changed, 1223 insertions, 700 deletions
diff --git a/src/CHANGES.txt b/src/CHANGES.txt index e97ea15..6242524 100644 --- a/src/CHANGES.txt +++ b/src/CHANGES.txt @@ -347,6 +347,10 @@ RELEASE 0.97 - XXX - Use the correct scanner if the same source file is used for targets in two different environments with the same path but different scanners. + - Collect logic for caching values in memory in a Memoizer class. + This cleans up a lot of special-case code in various methods and + caches additional values to speed up most configurations. + From Levi Stephen: - Allow $JARCHDIR to be expanded to other construction variables. diff --git a/src/engine/MANIFEST.in b/src/engine/MANIFEST.in index b8ce8b6..5c329bd 100644 --- a/src/engine/MANIFEST.in +++ b/src/engine/MANIFEST.in @@ -10,6 +10,7 @@ SCons/Errors.py SCons/Executor.py SCons/Job.py SCons/exitfuncs.py +SCons/Memoize.py SCons/Node/__init__.py SCons/Node/Alias.py SCons/Node/FS.py @@ -129,6 +130,5 @@ SCons/Tool/tex.py SCons/Tool/tlib.py SCons/Tool/yacc.py SCons/Tool/zip.py -SCons/UserTuple.py SCons/Util.py SCons/Warnings.py diff --git a/src/engine/SCons/Action.py b/src/engine/SCons/Action.py index 83e4208..a3b62eb 100644 --- a/src/engine/SCons/Action.py +++ b/src/engine/SCons/Action.py @@ -215,6 +215,8 @@ class ActionBase: other objects (Builders, Executors, etc.) This provides the common methods for manipulating and combining those actions.""" + __metaclass__ = SCons.Memoize.Memoized_Metaclass + def __cmp__(self, other): return cmp(self.__dict__, other) @@ -239,6 +241,14 @@ class ActionBase: self.presub_env = None # don't need this any more return lines +if not SCons.Memoize.has_metaclass: + _Base = ActionBase + class ActionBase(SCons.Memoize.Memoizer, _Base): + "Cache-backed version of ActionBase" + def __init__(self, *args, **kw): + apply(_Base.__init__, (self,)+args, kw) + SCons.Memoize.Memoizer.__init__(self) + class _ActionAction(ActionBase): """Base class for actions that create output objects.""" @@ -558,7 +568,10 @@ class FunctionAction(_ActionAction): return "%s(%s, %s)" % (name, tstr, sstr) def __str__(self): - return "%s(target, source, env)" % self.function_name() + name = self.function_name() + if name == 'ActionCaller': + return str(self.execfunction) + return "%s(target, source, env)" % name def execute(self, target, source, env): rsources = map(rfile, source) @@ -689,6 +702,8 @@ class ActionCaller: args = self.subst_args(target, source, env) kw = self.subst_kw(target, source, env) return apply(self.parent.strfunc, args, kw) + def __str__(self): + return apply(self.parent.strfunc, self.args, self.kw) class ActionFactory: """A factory class that will wrap up an arbitrary function @@ -705,11 +720,4 @@ class ActionFactory: def __call__(self, *args, **kw): ac = ActionCaller(self, args, kw) action = Action(ac, strfunction=ac.strfunction) - # action will be a FunctionAction; if left to its own devices, - # a genstr or str of this action will just show - # "ActionCaller(target, source, env)". Override that with the - # description from strfunc. Note that the apply is evaluated - # right now; __str__ is set to a (lambda) function that just - # returns the stored result of the evaluation whenever called. - action.__str__ = lambda name=apply(self.strfunc, args, kw): name return action diff --git a/src/engine/SCons/ActionTests.py b/src/engine/SCons/ActionTests.py index 4c1d521..4bcb084 100644 --- a/src/engine/SCons/ActionTests.py +++ b/src/engine/SCons/ActionTests.py @@ -1628,12 +1628,7 @@ class ActionFactoryTestCase(unittest.TestCase): af = SCons.Action.ActionFactory(actfunc, strfunc) af(3, 6, 9)([], [], Environment()) assert actfunc_args == [3, 6, 9], actfunc_args - # Note that strfunc gets evaluated twice: once when we called - # the actionfactory itself to get the real action - # (Action(ActionCaller, ...)), and once when we actually call - # that resulting action; since strfunc modifies the global, - # account for the number of times it was called. - assert strfunc_args == [3, 6, 9, 3, 6, 9], strfunc_args + assert strfunc_args == [3, 6, 9], strfunc_args class ActionCompareTestCase(unittest.TestCase): @@ -1681,9 +1676,9 @@ class ActionCompareTestCase(unittest.TestCase): 'BAR' : bar, 'DOG' : dog} ) - assert foo.get_name(env) == 'FOO' - assert bar.get_name(env) == 'BAR' - assert dog.get_name(env) == 'DOG' + assert foo.get_name(env) == 'FOO', foo.get_name(env) + assert bar.get_name(env) == 'BAR', bar.get_name(env) + assert dog.get_name(env) == 'DOG', dog.get_name(env) if __name__ == "__main__": diff --git a/src/engine/SCons/Builder.py b/src/engine/SCons/Builder.py index ed070c4..9ac01fa 100644 --- a/src/engine/SCons/Builder.py +++ b/src/engine/SCons/Builder.py @@ -377,6 +377,8 @@ class BuilderBase: nodes (files) from input nodes (files). """ + __metaclass__ = SCons.Memoize.Memoized_Metaclass + def __init__(self, action = None, prefix = '', suffix = '', @@ -515,7 +517,7 @@ class BuilderBase: new_targets = [] for t in tlist: if not t.is_derived(): - t.builder = self + t.builder_set(self) new_targets.append(t) target, source = self.emitter(target=tlist, source=slist, env=env) @@ -527,7 +529,7 @@ class BuilderBase: if t.builder is self: # Only delete the temporary builder if the emitter # didn't change it on us. - t.builder = None + t.builder_set(None) # Have to call arg2nodes yet again, since it is legal for # emitters to spit out strings as well as Node instances. @@ -631,6 +633,14 @@ class BuilderBase: """ self.emitter[suffix] = emitter +if not SCons.Memoize.has_metaclass: + _Base = BuilderBase + class BuilderBase(SCons.Memoize.Memoizer, _Base): + "Cache-backed version of BuilderBase" + def __init__(self, *args, **kw): + apply(_Base.__init__, (self,)+args, kw) + SCons.Memoize.Memoizer.__init__(self) + class ListBuilder(SCons.Util.Proxy): """A Proxy to support building an array of targets (for example, foo.o and foo.h from foo.y) from a single Action execution. @@ -687,27 +697,26 @@ class MultiStepBuilder(BuilderBase): if not SCons.Util.is_List(src_builder): src_builder = [ src_builder ] self.src_builder = src_builder - self.sdict = {} - self.cached_src_suffixes = {} # source suffixes keyed on id(env) + def _get_sdict(self, env): + "__cacheable__" + sdict = {} + for bld in self.src_builder: + if SCons.Util.is_String(bld): + try: + bld = env['BUILDERS'][bld] + except KeyError: + continue + for suf in bld.src_suffixes(env): + sdict[suf] = bld + return sdict + def _execute(self, env, target, source, overwarn={}, executor_kw={}): # We now assume that target and source are lists or None. slist = env.arg2nodes(source, self.source_factory) final_sources = [] - try: - sdict = self.sdict[id(env)] - except KeyError: - sdict = {} - self.sdict[id(env)] = sdict - for bld in self.src_builder: - if SCons.Util.is_String(bld): - try: - bld = env['BUILDERS'][bld] - except KeyError: - continue - for suf in bld.src_suffixes(env): - sdict[suf] = bld + sdict = self._get_sdict(env) src_suffixes = self.src_suffixes(env) @@ -750,15 +759,12 @@ class MultiStepBuilder(BuilderBase): def src_suffixes(self, env): """Return a list of the src_suffix attributes for all src_builders of this Builder. + __cacheable__ """ - try: - return self.cached_src_suffixes[id(env)] - except KeyError: - suffixes = BuilderBase.src_suffixes(self, env) - for builder in self.get_src_builders(env): - suffixes.extend(builder.src_suffixes(env)) - self.cached_src_suffixes[id(env)] = suffixes - return suffixes + suffixes = BuilderBase.src_suffixes(self, env) + for builder in self.get_src_builders(env): + suffixes.extend(builder.src_suffixes(env)) + return suffixes class CompositeBuilder(SCons.Util.Proxy): """A Builder Proxy whose main purpose is to always have diff --git a/src/engine/SCons/BuilderTests.py b/src/engine/SCons/BuilderTests.py index 5cb07e8..005f3de 100644 --- a/src/engine/SCons/BuilderTests.py +++ b/src/engine/SCons/BuilderTests.py @@ -948,9 +948,9 @@ class BuilderTestCase(unittest.TestCase): src = tgt.sources[0] assert tgt.builder.target_scanner != scanner, tgt.builder.target_scanner assert tgt.builder.source_scanner is None, tgt.builder.source_scanner - assert tgt.get_source_scanner(bar_y, env1) is None, tgt.get_source_scanner(bar_y, env1) + assert tgt.get_source_scanner(bar_y) is None, tgt.get_source_scanner(bar_y) assert not src.has_builder(), src.has_builder() - assert src.get_source_scanner(bar_y, env1) is None, src.get_source_scanner(bar_y, env1) + assert src.get_source_scanner(bar_y) is None, src.get_source_scanner(bar_y) # An Environment that has suffix-specified SCANNERS should # provide a source scanner to the target. @@ -974,10 +974,10 @@ class BuilderTestCase(unittest.TestCase): src = tgt.sources[0] assert tgt.builder.target_scanner != scanner, tgt.builder.target_scanner assert not tgt.builder.source_scanner, tgt.builder.source_scanner - assert tgt.get_source_scanner(bar_y, env3), tgt.get_source_scanner(bar_y, env3) - assert str(tgt.get_source_scanner(bar_y, env3)) == 'EnvTestScanner', tgt.get_source_scanner(bar_y, env3) + assert tgt.get_source_scanner(bar_y), tgt.get_source_scanner(bar_y) + assert str(tgt.get_source_scanner(bar_y)) == 'EnvTestScanner', tgt.get_source_scanner(bar_y) assert not src.has_builder(), src.has_builder() - assert src.get_source_scanner(bar_y, env3) is None, src.get_source_scanner(bar_y, env3) + assert src.get_source_scanner(bar_y) is None, src.get_source_scanner(bar_y) # Can't simply specify the scanner as a builder argument; it's # global to all invocations of this builder. @@ -985,10 +985,10 @@ class BuilderTestCase(unittest.TestCase): src = tgt.sources[0] assert tgt.builder.target_scanner != scanner, tgt.builder.target_scanner assert not tgt.builder.source_scanner, tgt.builder.source_scanner - assert tgt.get_source_scanner(bar_y, env3), tgt.get_source_scanner(bar_y, env3) - assert str(tgt.get_source_scanner(bar_y, env3)) == 'EnvTestScanner', tgt.get_source_scanner(bar_y, env3) + assert tgt.get_source_scanner(bar_y), tgt.get_source_scanner(bar_y) + assert str(tgt.get_source_scanner(bar_y)) == 'EnvTestScanner', tgt.get_source_scanner(bar_y) assert not src.has_builder(), src.has_builder() - assert src.get_source_scanner(bar_y, env3) is None, src.get_source_scanner(bar_y, env3) + assert src.get_source_scanner(bar_y) is None, src.get_source_scanner(bar_y) # Now use a builder that actually has scanners and ensure that # the target is set accordingly (using the specified scanner @@ -1002,11 +1002,11 @@ class BuilderTestCase(unittest.TestCase): assert tgt.builder.source_scanner, tgt.builder.source_scanner assert tgt.builder.source_scanner == scanner, tgt.builder.source_scanner assert str(tgt.builder.source_scanner) == 'TestScanner', str(tgt.builder.source_scanner) - assert tgt.get_source_scanner(bar_y, env3), tgt.get_source_scanner(bar_y, env3) - assert tgt.get_source_scanner(bar_y, env3) == scanner, tgt.get_source_scanner(bar_y, env3) - assert str(tgt.get_source_scanner(bar_y, env3)) == 'TestScanner', tgt.get_source_scanner(bar_y, env3) + assert tgt.get_source_scanner(bar_y), tgt.get_source_scanner(bar_y) + assert tgt.get_source_scanner(bar_y) == scanner, tgt.get_source_scanner(bar_y) + assert str(tgt.get_source_scanner(bar_y)) == 'TestScanner', tgt.get_source_scanner(bar_y) assert not src.has_builder(), src.has_builder() - assert src.get_source_scanner(bar_y, env3) is None, src.get_source_scanner(bar_y, env3) + assert src.get_source_scanner(bar_y) is None, src.get_source_scanner(bar_y) @@ -1418,13 +1418,25 @@ class BuilderTestCase(unittest.TestCase): assert b3.get_name(env) == 'bldr3', b3.get_name(env) assert b4.get_name(env) == 'bldr4', b4.get_name(env) assert b5.get_name(env) == 'builder5', b5.get_name(env) - assert b6.get_name(env) == 'SCons.Builder.BuilderBase', b6.get_name(env) + # With no name, get_name will return the class. Allow + # for caching... + assert b6.get_name(env) in [ + 'SCons.Builder.BuilderBase', + "<class 'SCons.Builder.BuilderBase'>", + 'SCons.Memoize.BuilderBase', + "<class 'SCons.Memoize.BuilderBase'>", + ], b6.get_name(env) assert b1.get_name(env2) == 'B1', b1.get_name(env2) assert b2.get_name(env2) == 'B2', b2.get_name(env2) assert b3.get_name(env2) == 'B3', b3.get_name(env2) assert b4.get_name(env2) == 'B4', b4.get_name(env2) assert b5.get_name(env2) == 'builder5', b5.get_name(env2) - assert b6.get_name(env2) == 'SCons.Builder.BuilderBase', b6.get_name(env2) + assert b6.get_name(env2) in [ + 'SCons.Builder.BuilderBase', + "<class 'SCons.Builder.BuilderBase'>", + 'SCons.Memoize.BuilderBase', + "<class 'SCons.Memoize.BuilderBase'>", + ], b6.get_name(env2) for B in b3.get_src_builders(env): assert B.get_name(env) == 'bldr1' diff --git a/src/engine/SCons/Environment.py b/src/engine/SCons/Environment.py index 4af4ced..2e146e3 100644 --- a/src/engine/SCons/Environment.py +++ b/src/engine/SCons/Environment.py @@ -239,6 +239,9 @@ class SubstitutionEnvironment: environment, we'll save that for a future refactoring when this class actually becomes useful.) """ + + __metaclass__ = SCons.Memoize.Memoized_Metaclass + def __init__(self, **kw): """Initialization of an underlying SubstitutionEnvironment class. """ @@ -451,6 +454,8 @@ class Base(SubstitutionEnvironment): Environment. """ + __metaclass__ = SCons.Memoize.Memoized_Metaclass + ####################################################################### # This is THE class for interacting with the SCons build engine, # and it contains a lot of stuff, so we're going to try to keep this @@ -530,19 +535,16 @@ class Base(SubstitutionEnvironment): ####################################################################### def get_calculator(self): + "__cacheable__" try: - return self._calculator + module = self._calc_module + c = apply(SCons.Sig.Calculator, (module,), CalculatorArgs) except AttributeError: - try: - module = self._calc_module - c = apply(SCons.Sig.Calculator, (module,), CalculatorArgs) - except AttributeError: - # Note that we're calling get_calculator() here, so the - # DefaultEnvironment() must have a _calc_module attribute - # to avoid infinite recursion. - c = SCons.Defaults.DefaultEnvironment().get_calculator() - self._calculator = c - return c + # Note that we're calling get_calculator() here, so the + # DefaultEnvironment() must have a _calc_module attribute + # to avoid infinite recursion. + c = SCons.Defaults.DefaultEnvironment().get_calculator() + return c def get_builder(self, name): """Fetch the builder with the specified name from the environment. @@ -552,43 +554,47 @@ class Base(SubstitutionEnvironment): except KeyError: return None + def _gsm(self): + "__cacheable__" + try: + scanners = self._dict['SCANNERS'] + except KeyError: + return None + + sm = {} + # Reverse the scanner list so that, if multiple scanners + # claim they can scan the same suffix, earlier scanners + # in the list will overwrite later scanners, so that + # the result looks like a "first match" to the user. + if not SCons.Util.is_List(scanners): + scanners = [scanners] + else: + scanners = scanners[:] # copy so reverse() doesn't mod original + scanners.reverse() + for scanner in scanners: + for k in scanner.get_skeys(self): + sm[k] = scanner + return sm + def get_scanner(self, skey): """Find the appropriate scanner given a key (usually a file suffix). + __cacheable__ """ - try: - sm = self.scanner_map - except AttributeError: - try: - scanners = self._dict['SCANNERS'] - except KeyError: - self.scanner_map = {} - return None - else: - self.scanner_map = sm = {} - # Reverse the scanner list so that, if multiple scanners - # claim they can scan the same suffix, earlier scanners - # in the list will overwrite later scanners, so that - # the result looks like a "first match" to the user. - if not SCons.Util.is_List(scanners): - scanners = [scanners] - scanners.reverse() - for scanner in scanners: - for k in scanner.get_skeys(self): - sm[k] = scanner - try: + sm = self._gsm() + if sm.has_key(skey): return sm[skey] - except KeyError: - return None + return None + def _smd(self): + "__reset_cache__" + pass + def scanner_map_delete(self, kw=None): """Delete the cached scanner map (if we need to). """ if not kw is None and not kw.has_key('SCANNERS'): return - try: - del self.scanner_map - except AttributeError: - pass + self._smd() def _update(self, dict): """Update an environment's values directly, bypassing the normal @@ -1402,6 +1408,9 @@ class OverrideEnvironment(SubstitutionEnvironment): be proxied because they need *this* object's methods to fetch the values from the overrides dictionary. """ + + __metaclass__ = SCons.Memoize.Memoized_Metaclass + def __init__(self, subject, overrides={}): if __debug__: logInstanceCreation(self, 'OverrideEnvironment') self.__dict__['__subject'] = subject @@ -1519,3 +1528,12 @@ def NoSubstitutionProxy(subject): self.raw_to_mode(nkw) return apply(SCons.Util.scons_subst, nargs, nkw) return _NoSubstitutionProxy(subject) + +if not SCons.Memoize.has_metaclass: + _Base = Base + class Base(SCons.Memoize.Memoizer, _Base): + def __init__(self, *args, **kw): + SCons.Memoize.Memoizer.__init__(self) + apply(_Base.__init__, (self,)+args, kw) + Environment = Base + diff --git a/src/engine/SCons/EnvironmentTests.py b/src/engine/SCons/EnvironmentTests.py index 92b89c0..a0b1615 100644 --- a/src/engine/SCons/EnvironmentTests.py +++ b/src/engine/SCons/EnvironmentTests.py @@ -122,6 +122,9 @@ class Scanner: def get_skeys(self, env): return self.skeys + def __str__(self): + return self.name + class CLVar(UserList.UserList): @@ -1231,27 +1234,31 @@ def exists(env): assert env['BBB3'] == ['b3', 'c', 'd'], env['BBB3'] def test_Copy(self): - """Test construction Environment copying - - Update the copy independently afterwards and check that - the original remains intact (that is, no dangling - references point to objects in the copied environment). - Copy the original with some construction variable - updates and check that the original remains intact - and the copy has the updated values. - """ - env1 = Environment(XXX = 'x', YYY = 'y') - env2 = env1.Copy() - env1copy = env1.Copy() - env2.Replace(YYY = 'yyy') - assert env1 != env2 - assert env1 == env1copy - - env3 = env1.Copy(XXX = 'x3', ZZZ = 'z3') - assert env3.Dictionary('XXX') == 'x3' - assert env3.Dictionary('YYY') == 'y' - assert env3.Dictionary('ZZZ') == 'z3' - assert env1 == env1copy + """Test construction Environment copying + + Update the copy independently afterwards and check that + the original remains intact (that is, no dangling + references point to objects in the copied environment). + Copy the original with some construction variable + updates and check that the original remains intact + and the copy has the updated values. + """ + env1 = Environment(XXX = 'x', YYY = 'y') + env2 = env1.Copy() + env1copy = env1.Copy() + assert env1copy == env1copy + assert env2 == env2 + env2.Replace(YYY = 'yyy') + assert env2 == env2 + assert env1 != env2 + assert env1 == env1copy + + env3 = env1.Copy(XXX = 'x3', ZZZ = 'z3') + assert env3 == env3 + assert env3.Dictionary('XXX') == 'x3' + assert env3.Dictionary('YYY') == 'y' + assert env3.Dictionary('ZZZ') == 'z3' + assert env1 == env1copy assert env1['__env__'] is env1, env1['__env__'] assert env2['__env__'] is env2, env2['__env__'] @@ -1277,6 +1284,8 @@ def exists(env): assert hasattr(env1, 'b1'), "env1.b1 was not set" assert env1.b1.env == env1, "b1.env doesn't point to env1" env2 = env1.Copy(BUILDERS = {'b2' : 2}) + assert env2 is env2 + assert env2 == env2 assert hasattr(env1, 'b1'), "b1 was mistakenly cleared from env1" assert env1.b1.env == env1, "b1.env was changed" assert not hasattr(env2, 'b1'), "b1 was not cleared from env2" diff --git a/src/engine/SCons/Executor.py b/src/engine/SCons/Executor.py index 1cb449c..2a19171 100644 --- a/src/engine/SCons/Executor.py +++ b/src/engine/SCons/Executor.py @@ -43,7 +43,9 @@ class Executor: and sources for later processing as needed. """ - def __init__(self, action, env=None, overridelist=[], + __metaclass__ = SCons.Memoize.Memoized_Metaclass + + def __init__(self, action, env=None, overridelist=[{}], targets=[], sources=[], builder_kw={}): if __debug__: logInstanceCreation(self) if not action: @@ -58,56 +60,59 @@ class Executor: def get_build_env(self): """Fetch or create the appropriate build Environment for this Executor. + __cacheable__ """ + # Create the build environment instance with appropriate + # overrides. These get evaluated against the current + # environment's construction variables so that users can + # add to existing values by referencing the variable in + # the expansion. + overrides = {} + for odict in self.overridelist: + overrides.update(odict) try: - return self.build_env - except AttributeError: - # Create the build environment instance with appropriate - # overrides. These get evaluated against the current - # environment's construction variables so that users can - # add to existing values by referencing the variable in - # the expansion. - overrides = {} - for odict in self.overridelist: - overrides.update(odict) - try: - generate_build_dict = self.targets[0].generate_build_dict - except (AttributeError, IndexError): - pass - else: - overrides.update(generate_build_dict()) - - import SCons.Defaults - env = self.env or SCons.Defaults.DefaultEnvironment() - self.build_env = env.Override(overrides) - - # Update the overrides with the $TARGET/$SOURCE variables for - # this target+source pair, so that evaluations of arbitrary - # Python functions have them in the __env__ environment - # they're passed. Note that the underlying substitution - # functions also override these with their own $TARGET/$SOURCE - # expansions, which is *usually* duplicated effort, but covers - # a corner case where an Action is called directly from within - # a function action with different target and source lists. - self.build_env._update(SCons.Util.subst_dict(self.targets, - self.sources)) - return self.build_env - - def do_nothing(self, target, errfunc, **kw): + generate_build_dict = self.targets[0].generate_build_dict + except (AttributeError, IndexError): + pass + else: + overrides.update(generate_build_dict()) + + import SCons.Defaults + env = self.env or SCons.Defaults.DefaultEnvironment() + build_env = env.Override(overrides) + + # Update the overrides with the $TARGET/$SOURCE variables for + # this target+source pair, so that evaluations of arbitrary + # Python functions have them in the __env__ environment + # they're passed. Note that the underlying substitution + # functions also override these with their own $TARGET/$SOURCE + # expansions, which is *usually* duplicated effort, but covers + # a corner case where an Action is called directly from within + # a function action with different target and source lists. + build_env._update(SCons.Util.subst_dict(self.targets, self.sources)) + + return build_env + + def do_nothing(self, target, errfunc, kw): pass - def __call__(self, target, errfunc, **kw): + def do_execute(self, target, errfunc, kw): """Actually execute the action list.""" kw = kw.copy() kw.update(self.builder_kw) apply(self.action, (self.targets, self.sources, self.get_build_env(), errfunc), kw) + # use extra indirection because with new-style objects (Python 2.2 + # and above) we can't override special methods, and nullify() needs + # to be able to do this. + + def __call__(self, target, errfunc, **kw): + self.do_execute(target, errfunc, kw) + def cleanup(self): - try: - del self.build_env - except AttributeError: - pass + "__reset_cache__" + pass def add_sources(self, sources): """Add source files to this Executor's list. This is necessary @@ -116,34 +121,32 @@ class Executor: slist = filter(lambda x, s=self.sources: x not in s, sources) self.sources.extend(slist) + # another extra indirection for new-style objects and nullify... + + def my_str(self): + return self.action.genstring(self.targets, + self.sources, + self.get_build_env()) + def __str__(self): - try: - return self.string - except AttributeError: - action = self.action - self.string = action.genstring(self.targets, - self.sources, - self.get_build_env()) - return self.string + "__cacheable__" + return self.my_str() def nullify(self): - self.__call__ = self.do_nothing - self.string = '' + "__reset_cache__" + self.do_execute = self.do_nothing + self.my_str = lambda S=self: '' def get_contents(self): """Fetch the signature contents. This, along with get_raw_contents(), is the real reason this class exists, so we can compute this once and cache it regardless of how many target or source Nodes there are. + __cacheable__ """ - try: - return self.contents - except AttributeError: - action = self.action - self.contents = action.get_contents(self.targets, - self.sources, - self.get_build_env()) - return self.contents + return self.action.get_contents(self.targets, + self.sources, + self.get_build_env()) def get_timestamp(self): """Fetch a time stamp for this Executor. We don't have one, of @@ -151,3 +154,11 @@ class Executor: timestamp module. """ return 0 + +if not SCons.Memoize.has_metaclass: + _Base = Executor + class Executor(SCons.Memoize.Memoizer, _Base): + def __init__(self, *args, **kw): + SCons.Memoize.Memoizer.__init__(self) + apply(_Base.__init__, (self,)+args, kw) + diff --git a/src/engine/SCons/ExecutorTests.py b/src/engine/SCons/ExecutorTests.py index 219efee..d5b6dd2 100644 --- a/src/engine/SCons/ExecutorTests.py +++ b/src/engine/SCons/ExecutorTests.py @@ -96,10 +96,11 @@ class ExecutorTestCase(unittest.TestCase): def test_get_build_env(self): """Test fetching and generating a build environment""" - x = SCons.Executor.Executor(MyAction(), 'e', [], 't', ['s1', 's2']) - x.build_env = 'eee' + x = SCons.Executor.Executor(MyAction(), MyEnvironment(e=1), [], + 't', ['s1', 's2']) + x.env = MyEnvironment(eee=1) be = x.get_build_env() - assert be == 'eee', be + assert be['eee'] == 1, be env = MyEnvironment(X='xxx') x = SCons.Executor.Executor(MyAction(), @@ -171,17 +172,23 @@ class ExecutorTestCase(unittest.TestCase): def test_cleanup(self): """Test cleaning up an Executor""" - x = SCons.Executor.Executor('b', 'e', 'o', 't', ['s1', 's2']) + orig_env = MyEnvironment(e=1) + x = SCons.Executor.Executor('b', orig_env, [{'o':1}], + 't', ['s1', 's2']) + be = x.get_build_env() + assert be['e'] == 1, be['e'] + x.cleanup() - x.build_env = 'eee' + x.env = MyEnvironment(eee=1) be = x.get_build_env() - assert be == 'eee', be + assert be['eee'] == 1, be['eee'] x.cleanup() - assert not hasattr(x, 'build_env') + be = x.get_build_env() + assert be['eee'] == 1, be['eee'] def test_add_sources(self): """Test adding sources to an Executor""" @@ -220,6 +227,7 @@ class ExecutorTestCase(unittest.TestCase): del result[:] x.nullify() + assert result == [], result x(MyNode([], []), None) assert result == [], result s = str(x) @@ -230,13 +238,13 @@ class ExecutorTestCase(unittest.TestCase): env = MyEnvironment(C='contents') x = SCons.Executor.Executor(MyAction(), env, [], ['t'], ['s']) - x.contents = 'contents' c = x.get_contents() - assert c == 'contents', c + assert c == 'action1 action2 t s', c - x = SCons.Executor.Executor(MyAction(), env, [], ['t'], ['s']) + x = SCons.Executor.Executor(MyAction(actions=['grow']), env, [], + ['t'], ['s']) c = x.get_contents() - assert c == 'action1 action2 t s', c + assert c == 'grow t s', c def test_get_timestamp(self): """Test fetching the "timestamp" """ diff --git a/src/engine/SCons/Memoize.py b/src/engine/SCons/Memoize.py new file mode 100644 index 0000000..ce3d9e6 --- /dev/null +++ b/src/engine/SCons/Memoize.py @@ -0,0 +1,682 @@ +"""Memoizer + +Memoizer -- base class to provide automatic, optimized caching of +method return values for subclassed objects. Caching is activated by +the presence of "__cacheable__" in the doc of a method (acts like a +decorator). The presence of "__cache_reset__" or "__reset_cache__" +in the doc string instead indicates a method that should reset the +cache, discarding any currently cached values. + +Note: current implementation is optimized for speed, not space. The +cache reset operation does not actually discard older results, and in +fact, all cached results (and keys) are held indefinitely. + +Most of the work for this is done by copying and modifying the class +definition itself, rather than the object instances. This will +therefore allow all instances of a class to get caching activated +without requiring lengthy initialization or other management of the +instance. + +[This could also be done using metaclassing (which would require +Python 2.2) and decorators (which would require Python 2.4). Current +implementation is used due to Python 1.5.2 compatability requirement +contraint.] + +A few notes: + + * All local methods/attributes use a prefix of "_MeMoIZeR" to avoid + namespace collisions with the attributes of the objects + being cached. + + * Based on performance evaluations of dictionaries, caching is + done by providing each object with a unique key attribute and + using the value of that attribute as an index for dictionary + lookup. If an object doesn't have one of these attributes, + fallbacks are utilized (although they will be somewhat slower). + + * To support this unique-value attribute correctly, it must be + removed whenever a __cmp__ operation is performed, and it must + be updated whenever a copy.copy or copy.deepcopy is performed, + so appropriate manipulation is provided by the Caching code + below. + + * Cached values are stored in the class (indexed by the caching + key attribute, then by the name of the method called and the + constructed key of the arguments passed). By storing them here + rather than on the instance, the instance can be compared, + copied, and pickled much easier. + +Some advantages: + + * The method by which caching is implemented can be changed in a + single location and it will apply globally. + + * Greatly simplified client code: remove lots of try...except or + similar handling of cached lookup. Also usually more correct in + that it based caching on all input arguments whereas many + hand-implemented caching operations often miss arguments that + might affect results. + + * Caching can be globally disabled very easily (for testing, etc.) + +""" + +# +# __COPYRIGHT__ +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY +# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# + +__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__" + +#TBD: for pickling, should probably revert object to unclassed state... + +import copy +import string +import sys + +# +# Generate a key for an object that is to be used as the caching key +# for that object. +# +# Current implementation: singleton generating a monotonically +# increasing integer + +class MemoizerKey: + def __init__(self): + self._next_keyval = 0 + def __call__(self): + r = self._next_keyval + self._next_keyval = self._next_keyval + 1 + return str(r) +Next_Memoize_Key = MemoizerKey() + + +# +# Memoized Class management. +# +# Classes can be manipulated just like object instances; we are going +# to do some of that here, without the benefit of metaclassing +# introduced in Python 2.2 (it would be nice to use that, but this +# attempts to maintain backward compatibility to Python 1.5.2). +# +# The basic implementation therefore is to update the class definition +# for any objects that we want to enable caching for. The updated +# definition performs caching activities for those methods +# appropriately marked in the original class. +# +# When an object is created, its class is switched to this updated, +# cache-enabled class definition, thereby enabling caching operations. +# +# To get an instance to used the updated, caching class, the instance +# must declare the Memoizer as a base class and make sure to call the +# Memoizer's __init__ during the instance's __init__. The Memoizer's +# __init__ will perform the class updating. + +# For Python 2.2 and later, where metaclassing is supported, it is +# sufficient to provide a "__metaclass__ = Memoized_Metaclass" as part +# of the class definition; the metaclassing will automatically invoke +# the code herein properly. + +##import cPickle +##def ALT0_MeMoIZeR_gen_key(argtuple, kwdict): +## return cPickle.dumps( (argtuple,kwdict) ) + +def ALT1_MeMoIZeR_gen_key(argtuple, kwdict): + return repr(argtuple) + '|' + repr(kwdict) + +def ALT2_MeMoIZeR_gen_key(argtuple, kwdict): + return string.join(map(lambda A: + getattr(A, '_MeMoIZeR_Key', str(A)), + argtuple) + \ + map(lambda D: + str(D[0])+ + getattr(D[1], '_MeMoIZeR_Key', str(D[1])), + kwdict.items()), + '|') + +def ALT3_MeMoIZeR_gen_key(argtuple, kwdict): + ret = [] + for A in argtuple: + X = getattr(A, '_MeMoIZeR_Key', None) + if X: + ret.append(X) + else: + ret.append(str(A)) + for K,V in kwdict.items(): + ret.append(str(K)) + X = getattr(V, '_MeMoIZeR_Key', None) + if X: + ret.append(X) + else: + ret.append(str(V)) + return string.join(ret, '|') + +def ALT4_MeMoIZeR_gen_key(argtuple, kwdict): + if kwdict: + return string.join(map(lambda A: + getattr(A, '_MeMoIZeR_Key', None) or str(A), + argtuple) + \ + map(lambda D: + str(D[0])+ + (getattr(D[1], '_MeMoIZeR_Key', None) or str(D[1])), + kwdict.items()), + '|') + return string.join(map(lambda A: + getattr(A, '_MeMoIZeR_Key', None) or str(A), + argtuple), + '!') + +def ALT5_MeMoIZeR_gen_key(argtuple, kwdict): + A = string.join(map(str, argtuple), '|') + K = '' + if kwdict: + I = map(lambda K,D=kwdict: str(K)+'='+str(D[K]), kwdict.keys()) + K = string.join(I, '|') + return string.join([A,K], '!') + +def ALT6_MeMoIZeR_gen_key(argtuple, kwdict): + A = string.join(map(str, map(id, argtuple)), '|') + K = '' + if kwdict: + I = map(lambda K,D=kwdict: str(K)+'='+str(id(D[K])), kwdict.keys()) + K = string.join(I, '|') + return string.join([A,K], '!') + +def ALT7_MeMoIZeR_gen_key(argtuple, kwdict): + A = string.join(map(repr, argtuple), '|') + K = '' + if kwdict: + I = map(lambda K,D=kwdict: repr(K)+'='+repr(D[K]), kwdict.keys()) + K = string.join(I, '|') + return string.join([A,K], '!') + +def ALT8_MeMoIZeR_gen_key(argtuple, kwdict): + ret = [] + for A in argtuple: + X = getattr(A, '_MeMoIZeR_Key', None) + if X: + ret.append(X) + else: + ret.append(repr(A)) + for K,V in kwdict.items(): + ret.append(str(K)) + X = getattr(V, '_MeMoIZeR_Key', None) + if X: + ret.append(X) + else: + ret.append(repr(V)) + return string.join(ret, '|') + +def ALT9_MeMoIZeR_gen_key(argtuple, kwdict): + ret = [] + for A in argtuple: + try: + X = A.__dict__.get('_MeMoIZeR_Key', None) or repr(A) + except (AttributeError, KeyError): + X = repr(A) + ret.append(X) + for K,V in kwdict.items(): + ret.append(str(K)) + ret.append('=') + try: + X = V.__dict__.get('_MeMoIZeR_Key', None) or repr(V) + except (AttributeError, KeyError): + X = repr(V) + ret.append(X) + return string.join(ret, '|') + +#_MeMoIZeR_gen_key = ALT9_MeMoIZeR_gen_key # 8.8, 0.20 +_MeMoIZeR_gen_key = ALT8_MeMoIZeR_gen_key # 8.5, 0.18 +#_MeMoIZeR_gen_key = ALT7_MeMoIZeR_gen_key # 8.7, 0.17 +#_MeMoIZeR_gen_key = ALT6_MeMoIZeR_gen_key # +#_MeMoIZeR_gen_key = ALT5_MeMoIZeR_gen_key # 9.7, 0.20 +#_MeMoIZeR_gen_key = ALT4_MeMoIZeR_gen_key # 8.6, 0.19 +#_MeMoIZeR_gen_key = ALT3_MeMoIZeR_gen_key # 8.5, 0.20 +#_MeMoIZeR_gen_key = ALT2_MeMoIZeR_gen_key # 10.1, 0.22 +#_MeMoIZeR_gen_key = ALT1_MeMoIZeR_gen_key # 8.6 0.18 + + + +## This is really the core worker of the Memoize module. Any +## __cacheable__ method ends up calling this function which tries to +## return a previously cached value if it exists, and which calls the +## actual function and caches the return value if it doesn't already +## exist. +## +## This function should be VERY efficient: it will get called a lot +## and its job is to be faster than what would be called. + +def Memoizer_cache_get(func, cdict, args, kw): + """Called instead of name to see if this method call's return + value has been cached. If it has, just return the cached + value; if not, call the actual method and cache the return.""" + + obj = args[0] + + ckey = obj._MeMoIZeR_Key + ':' + _MeMoIZeR_gen_key(args, kw) + +## try: +## rval = cdict[ckey] +## except KeyError: +## rval = cdict[ckey] = apply(func, args, kw) + + rval = cdict.get(ckey, "_MeMoIZeR") + if rval is "_MeMoIZeR": + rval = cdict[ckey] = apply(func, args, kw) + +## rval = cdict.setdefault(ckey, apply(func, args, kw)) + +## if cdict.has_key(ckey): +## rval = cdict[ckey] +## else: +## rval = cdict[ckey] = apply(func, args, kw) + + return rval + +def Memoizer_cache_get_self(func, cdict, self): + """Called instead of func(self) to see if this method call's + return value has been cached. If it has, just return the cached + value; if not, call the actual method and cache the return. + Optimized version of Memoizer_cache_get for methods that take the + object instance as the only argument.""" + + ckey = self._MeMoIZeR_Key + +## try: +## rval = cdict[ckey] +## except KeyError: +## rval = cdict[ckey] = func(self) + + rval = cdict.get(ckey, "_MeMoIZeR") + if rval is "_MeMoIZeR": + rval = cdict[ckey] = func(self) + +## rval = cdict.setdefault(ckey, func(self))) + +## if cdict.has_key(ckey): +## rval = cdict[ckey] +## else: +## rval = cdict[ckey] = func(self) + + return rval + +def Memoizer_cache_get_one(func, cdict, self, arg): + """Called instead of func(self, arg) to see if this method call's + return value has been cached. If it has, just return the cached + value; if not, call the actual method and cache the return. + Optimized version of Memoizer_cache_get for methods that take the + object instance and one other argument only.""" + +## X = getattr(arg, "_MeMoIZeR_Key", None) +## if X: +## ckey = self._MeMoIZeR_Key +':'+ X +## else: +## ckey = self._MeMoIZeR_Key +':'+ str(arg) + ckey = self._MeMoIZeR_Key + ':' + \ + (getattr(arg, "_MeMoIZeR_Key", None) or repr(arg)) + +## try: +## rval = cdict[ckey] +## except KeyError: +## rval = cdict[ckey] = func(self, arg) + + rval = cdict.get(ckey, "_MeMoIZeR") + if rval is "_MeMoIZeR": + rval = cdict[ckey] = func(self, arg) + +## rval = cdict.setdefault(ckey, func(self, arg))) + +## if cdict.has_key(ckey): +## rval = cdict[ckey] +## else: +## rval = cdict[ckey] = func(self, arg) + + return rval + + +class _Memoizer_Simple: + + def __setstate__(self, state): + self.__dict__.update(state) + self.__dict__['_MeMoIZeR_Key'] = Next_Memoize_Key() + #kwq: need to call original's setstate if it had one... + + def _MeMoIZeR_reset(self): + self.__dict__['_MeMoIZeR_Key'] = Next_Memoize_Key() + return 1 + + +class _Memoizer_Comparable: + + def __setstate__(self, state): + self.__dict__.update(state) + self.__dict__['_MeMoIZeR_Key'] = Next_Memoize_Key() + #kwq: need to call original's setstate if it had one... + + def _MeMoIZeR_reset(self): + self.__dict__['_MeMoIZeR_Key'] = Next_Memoize_Key() + return 1 + + def __cmp__(self, other): + """A comparison might use the object dictionaries to + compare, so the dictionaries should contain caching + entries. Make new dictionaries without those entries + to use with the underlying comparison.""" + + if self is other: + return 0 + + # We are here as a cached object, but cmp will flip its + # arguments back and forth and recurse attempting to get base + # arguments for the comparison, so we might have already been + # stripped. + + try: + saved_d1 = self.__dict__ + d1 = copy.copy(saved_d1) + del d1['_MeMoIZeR_Key'] + except KeyError: + return self._MeMoIZeR_cmp(other) + self.__dict__ = d1 + + # Same thing for the other, but we should try to convert it + # here in case the _MeMoIZeR_cmp compares __dict__ objects + # directly. + + saved_other = None + try: + if other.__dict__.has_key('_MeMoIZeR_Key'): + saved_other = other.__dict__ + d2 = copy.copy(saved_other) + del d2['_MeMoIZeR_Key'] + other.__dict__ = d2 + except (AttributeError, KeyError): + pass + + # Both self and other have been prepared: perform the test, + # then restore the original dictionaries and exit + + rval = self._MeMoIZeR_cmp(other) + + self.__dict__ = saved_d1 + if saved_other: + other.__dict__ = saved_other + + return rval + + +def Analyze_Class(klass): + if klass.__dict__.has_key('_MeMoIZeR_converted'): return klass + + original_name = str(klass) + + D,R,C = _analyze_classmethods(klass.__dict__, klass.__bases__) + + if C: + modelklass = _Memoizer_Comparable + lcldict = {'_MeMoIZeR_cmp':C} + else: + modelklass = _Memoizer_Simple + lcldict = {} + + klass.__dict__.update(memoize_classdict(modelklass, lcldict, D, R)) + + return klass + + +# Note that each eval("lambda...") has a few \n's prepended to the +# lambda, and furthermore that each of these evals has a different +# number of \n's prepended. This is to provide a little bit of info +# for traceback or profile output, which generate things like 'File +# "<string>", line X'. X will be the number of \n's plus 1. + +def memoize_classdict(modelklass, new_klassdict, cacheable, resetting): + new_klassdict.update(modelklass.__dict__) + new_klassdict['_MeMoIZeR_converted'] = 1 + + for name,code in cacheable.items(): + if code.func_code.co_argcount == 1 and \ + not code.func_code.co_flags & 0xC: + newmethod = eval( + compile("\n"*1 + + "lambda self: Memoizer_cache_get_self(methcode, methcached, self)", + "Memoizer_cache_get_self_lambda", + "eval"), + {'methcode':code, 'methcached':{}, + 'Memoizer_cache_get_self':Memoizer_cache_get_self}, + {}) + elif code.func_code.co_argcount == 2 and \ + not code.func_code.co_flags & 0xC: + newmethod = eval( + compile("\n"*2 + + "lambda self, arg: Memoizer_cache_get_one(methcode, methcached, self, arg)", + "Memoizer_cache_get_one_lambda", + "eval"), + {'methcode':code, 'methcached':{}, + 'Memoizer_cache_get_one':Memoizer_cache_get_one}, + {}) + else: + newmethod = eval( + compile("\n"*3 + + "lambda *args, **kw: Memoizer_cache_get(methcode, methcached, args, kw)", + "Memoizer_cache_get_lambda", + "eval"), + {'methcode':code, 'methcached':{}, + 'Memoizer_cache_get':Memoizer_cache_get}, {}) + new_klassdict[name] = newmethod + + for name,code in resetting.items(): + newmethod = eval("lambda obj_self, *args, **kw: (obj_self._MeMoIZeR_reset(), apply(rmethcode, (obj_self,)+args, kw))[1]", + {'rmethcode':code}, {}) + new_klassdict[name] = newmethod + + return new_klassdict + + +def _analyze_classmethods(klassdict, klassbases): + """Given a class, performs a scan of methods for that class and + all its base classes (recursively). Returns aggregated results of + _scan_classdict calls where subclass methods are superimposed over + base class methods of the same name (emulating instance->class + method lookup).""" + + D = {} + R = {} + C = None + + # Get cache/reset/cmp methods from subclasses + + for K in klassbases: + if K.__dict__.has_key('_MeMoIZeR_converted'): continue + d,r,c = _analyze_classmethods(K.__dict__, K.__bases__) + D.update(d) + R.update(r) + C = c or C + + # Delete base method info if current class has an override + + for M in D.keys(): + if M == '__cmp__': continue + if klassdict.has_key(M): + del D[M] + for M in R.keys(): + if M == '__cmp__': continue + if klassdict.has_key(M): + del R[M] + + # Get cache/reset/cmp from current class + + d,r,c = _scan_classdict(klassdict) + + # Update accumulated cache/reset/cmp methods + + D.update(d) + R.update(r) + C = c or C + + return D,R,C + + +def _scan_classdict(klassdict): + """Scans the method dictionary of a class to find all methods + interesting to caching operations. Returns a tuple of these + interesting methods: + + ( dict-of-cachable-methods, + dict-of-cache-resetting-methods, + cmp_method_val or None) + + Each dict has the name of the method as a key and the corresponding + value is the method body.""" + + cache_setters = {} + cache_resetters = {} + cmp_if_exists = None + already_cache_modified = 0 + + for attr,val in klassdict.items(): + if not callable(val): continue + if attr == '__cmp__': + cmp_if_exists = val + continue # cmp can't be cached and can't reset cache + if attr == '_MeMoIZeR_cmp': + already_cache_modified = 1 + continue + if not val.__doc__: continue + if string.find(val.__doc__, '__cache_reset__') > -1: + cache_resetters[attr] = val + continue + if string.find(val.__doc__, '__reset_cache__') > -1: + cache_resetters[attr] = val + continue + if string.find(val.__doc__, '__cacheable__') > -1: + cache_setters[attr] = val + continue + if already_cache_modified: cmp_if_exists = 'already_cache_modified' + return cache_setters, cache_resetters, cmp_if_exists + +# +# Primary Memoizer class. This should be a base-class for any class +# that wants method call results to be cached. The sub-class should +# call this parent class's __init__ method, but no other requirements +# are made on the subclass (other than appropriate decoration). + +class Memoizer: + """Object which performs caching of method calls for its 'primary' + instance.""" + + def __init__(self): + self.__class__ = Analyze_Class(self.__class__) + self._MeMoIZeR_Key = Next_Memoize_Key() + + +has_metaclass = 1 +# Find out if we are pre-2.2 + +try: + vinfo = sys.version_info +except AttributeError: + """Split an old-style version string into major and minor parts. This + is complicated by the fact that a version string can be something + like 3.2b1.""" + import re + version = string.split(string.split(sys.version, ' ')[0], '.') + vinfo = (int(version[0]), int(re.match('\d+', version[1]).group())) + del re + +need_version = (2, 2) # actual +#need_version = (33, 0) # always +#need_version = (0, 0) # never +if vinfo[0] < need_version[0] or \ + (vinfo[0] == need_version[0] and + vinfo[1] < need_version[1]): + has_metaclass = 0 + class Memoized_Metaclass: + # Just a place-holder so pre-metaclass Python versions don't + # have to have special code for the Memoized classes. + pass +else: + + # Initialization is a wee bit of a hassle. We want to do some of + # our own work for initialization, then pass on to the actual + # initialization function. However, we have to be careful we + # don't interfere with (a) the super()'s initialization call of + # it's superclass's __init__, and (b) classes we are Memoizing + # that don't have their own __init__ but which have a super that + # has an __init__. To do (a), we eval a lambda below where the + # actual init code is locally bound and the __init__ entry in the + # class's dictionary is replaced with the _MeMoIZeR_init call. To + # do (b), we use _MeMoIZeR_superinit as a fallback if the class + # doesn't have it's own __init__. Note that we don't use getattr + # to obtain the __init__ because we don't want to re-instrument + # parent-class __init__ operations (and we want to avoid the + # Object object's slot init if the class has no __init__). + + def _MeMoIZeR_init(actual_init, self, args, kw): + self.__dict__['_MeMoIZeR_Key'] = Next_Memoize_Key() + apply(actual_init, (self,)+args, kw) + + def _MeMoIZeR_superinit(self, cls, args, kw): + apply(super(cls, self).__init__, args, kw) + + class Memoized_Metaclass(type): + def __init__(cls, name, bases, cls_dict): + # Note that cls_dict apparently contains a *copy* of the + # attribute dictionary of the class; modifying cls_dict + # has no effect on the actual class itself. + D,R,C = _analyze_classmethods(cls_dict, bases) + if C: + modelklass = _Memoizer_Comparable + cls_dict['_MeMoIZeR_cmp'] = C + else: + modelklass = _Memoizer_Simple + klassdict = memoize_classdict(modelklass, cls_dict, D, R) + + init = klassdict.get('__init__', None) + if not init: + # Make sure filename has os.sep+'SCons'+os.sep so that + # SCons.Script.find_deepest_user_frame doesn't stop here + import inspect # It's OK, can't get here for Python < 2.1 + superinitcode = compile( + "lambda self, *args, **kw: MPI(self, cls, args, kw)", + inspect.getsourcefile(_MeMoIZeR_superinit), + "eval") + superinit = eval(superinitcode, + {'cls':cls, + 'MPI':_MeMoIZeR_superinit}) + init = superinit + + newinitcode = compile( + "\n"*(init.func_code.co_firstlineno-1) + + "lambda self, args, kw: _MeMoIZeR_init(real_init, self, args, kw)", + init.func_code.co_filename, 'eval') + newinit = eval(newinitcode, + {'real_init':init, + '_MeMoIZeR_init':_MeMoIZeR_init}, + {}) + klassdict['__init__'] = lambda self, *args, **kw: newinit(self, args, kw) + + super(Memoized_Metaclass, cls).__init__(name, bases, klassdict) + # Now, since klassdict doesn't seem to have affected the class + # definition itself, apply klassdict. + for attr in klassdict.keys(): + setattr(cls, attr, klassdict[attr]) + diff --git a/src/engine/SCons/Node/FS.py b/src/engine/SCons/Node/FS.py index 60ebb79..a67fa76 100644 --- a/src/engine/SCons/Node/FS.py +++ b/src/engine/SCons/Node/FS.py @@ -419,6 +419,12 @@ class EntryProxy(SCons.Util.Proxy): except AttributeError: entry = self.get() classname = string.split(str(entry.__class__), '.')[-1] + if classname[-2:] == "'>": + # new-style classes report their name as: + # "<class 'something'>" + # instead of the classic classes: + # "something" + classname = classname[:-2] raise AttributeError, "%s instance '%s' has no attribute '%s'" % (classname, entry.name, name) return attr @@ -447,7 +453,6 @@ class Base(SCons.Node.Node): self.name = name self.fs = fs - self.relpath = {self : '.'} assert directory, "A directory must be provided" @@ -465,31 +470,16 @@ class Base(SCons.Node.Node): """Completely clear a Node.FS.Base object of all its cached state (so that it can be re-evaluated by interfaces that do continuous integration builds). + __cache_reset__ """ SCons.Node.Node.clear(self) - try: - delattr(self, '_exists') - except AttributeError: - pass - try: - delattr(self, '_rexists') - except AttributeError: - pass - try: - delattr(self, '_str_val') - except AttributeError: - pass - self.relpath = {self : '.'} def get_dir(self): return self.dir def get_suffix(self): - try: - return self.ext - except AttributeError: - self.ext = SCons.Util.splitext(self.name)[1] - return self.ext + "__cacheable__" + return SCons.Util.splitext(self.name)[1] def rfile(self): return self @@ -497,33 +487,29 @@ class Base(SCons.Node.Node): def __str__(self): """A Node.FS.Base object's string representation is its path name.""" - try: - return self._str_val - except AttributeError: - global Save_Strings - if self.duplicate or self.is_derived(): - str_val = self.get_path() - else: - str_val = self.srcnode().get_path() - if Save_Strings: - self._str_val = str_val - return str_val + global Save_Strings + if Save_Strings: + return self._save_str() + return self._get_str() + + def _save_str(self): + "__cacheable__" + return self._get_str() + + def _get_str(self): + if self.duplicate or self.is_derived(): + return self.get_path() + return self.srcnode().get_path() rstr = __str__ def exists(self): - try: - return self._exists - except AttributeError: - self._exists = self.fs.exists(self.abspath) - return self._exists + "__cacheable__" + return self.fs.exists(self.abspath) def rexists(self): - try: - return self._rexists - except AttributeError: - self._rexists = self.rfile().exists() - return self._rexists + "__cacheable__" + return self.rfile().exists() def is_under(self, dir): if self is dir: @@ -537,44 +523,40 @@ class Base(SCons.Node.Node): def srcnode(self): """If this node is in a build path, return the node corresponding to its source file. Otherwise, return - ourself.""" - try: - return self._srcnode - except AttributeError: - dir=self.dir - name=self.name - while dir: - if dir.srcdir: - self._srcnode = self.fs.Entry(name, dir.srcdir, - klass=self.__class__) - if self._srcnode.is_under(dir): - # Shouldn't source from something in the build - # path: probably means build_dir is under - # src_dir and we are reflecting. - break - return self._srcnode - name = dir.name + os.sep + name - dir=dir.get_dir() - self._srcnode = self - return self._srcnode + ourself. + __cacheable__""" + dir=self.dir + name=self.name + while dir: + if dir.srcdir: + srcnode = self.fs.Entry(name, dir.srcdir, + klass=self.__class__) + if srcnode.is_under(dir): + # Shouldn't source from something in the build + # path: probably means build_dir is under + # src_dir and we are reflecting. + break + return srcnode + name = dir.name + os.sep + name + dir=dir.get_dir() + return self def get_path(self, dir=None): """Return path relative to the current working directory of the Node.FS.Base object that owns us.""" if not dir: dir = self.fs.getcwd() - try: - return self.relpath[dir] - except KeyError: - path_elems = [] - d = self + path_elems = [] + d = self + if d == dir: + path_elems.append('.') + else: while d != dir and not isinstance(d, ParentOfRoot): path_elems.append(d.name) d = d.dir path_elems.reverse() - ret = string.join(path_elems, os.sep) - self.relpath[dir] = ret - return ret + ret = string.join(path_elems, os.sep) + return ret def set_src_builder(self, builder): """Set the source code builder for this node.""" @@ -1142,7 +1124,8 @@ class Dir(Base): Set up this directory's entries and hook it into the file system tree. Specify that directories (this Node) don't use - signatures for calculating whether they're current.""" + signatures for calculating whether they're current. + __cache_reset__""" self.repositories = [] self.srcdir = None @@ -1166,30 +1149,11 @@ class Dir(Base): if node != self and isinstance(node, Dir): node.__clearRepositoryCache(duplicate) else: + node.clear() try: del node._srcreps except AttributeError: pass - try: - del node._rfile - except AttributeError: - pass - try: - del node._rexists - except AttributeError: - pass - try: - del node._exists - except AttributeError: - pass - try: - del node._srcnode - except AttributeError: - pass - try: - del node._str_val - except AttributeError: - pass if duplicate != None: node.duplicate=duplicate @@ -1311,15 +1275,13 @@ class Dir(Base): return 0 def rdir(self): - try: - return self._rdir - except AttributeError: - self._rdir = self - if not self.exists(): - n = self.fs.Rsearch(self.path, clazz=Dir, cwd=self.fs.Top) - if n: - self._rdir = n - return self._rdir + "__cacheable__" + rdir = self + if not self.exists(): + n = self.fs.Rsearch(self.path, clazz=Dir, cwd=self.fs.Top) + if n: + rdir = n + return rdir def sconsign(self): """Return the .sconsign file info for this directory, @@ -1420,9 +1382,8 @@ class File(Base): 'RDirs' : self.RDirs} def _morph(self): - """Turn a file system node into a File object.""" + """Turn a file system node into a File object. __cache_reset__""" self.scanner_paths = {} - self.found_includes = {} if not hasattr(self, '_local'): self._local = 0 @@ -1501,14 +1462,7 @@ class File(Base): path = scanner.path(env, target.cwd) target.scanner_paths[scanner] = path - key = str(id(env)) + '|' + str(id(scanner)) + '|' + string.join(map(str,path), ':') - try: - includes = self.found_includes[key] - except KeyError: - includes = scanner(self, env, path) - self.found_includes[key] = includes - - return includes + return scanner(self, env, path) def _createDir(self): # ensure that the directories for this node are @@ -1537,14 +1491,7 @@ class File(Base): # created the directory, depending on whether the -n # option was used or not. Delete the _exists and # _rexists attributes so they can be reevaluated. - try: - delattr(dirnode, '_exists') - except AttributeError: - pass - try: - delattr(dirnode, '_rexists') - except AttributeError: - pass + dirnode.clear() except OSError: pass @@ -1589,22 +1536,14 @@ class File(Base): return None def built(self): - """Called just after this node is sucessfully built.""" + """Called just after this node is successfully built. + __cache_reset__""" # Push this file out to cache before the superclass Node.built() # method has a chance to clear the build signature, which it # will do if this file has a source scanner. if self.fs.CachePath and self.fs.exists(self.path): CachePush(self, [], None) SCons.Node.Node.built(self) - self.found_includes = {} - try: - delattr(self, '_exists') - except AttributeError: - pass - try: - delattr(self, '_rexists') - except AttributeError: - pass def visited(self): if self.fs.CachePath and self.fs.cache_force and self.fs.exists(self.path): @@ -1656,7 +1595,11 @@ class File(Base): def is_pseudo_derived(self): return self.has_src_builder() - + + def _rmv_existing(self): + '__cache_reset__' + Unlink(self, [], None) + def prepare(self): """Prepare for this file to be created.""" SCons.Node.Node.prepare(self) @@ -1664,11 +1607,7 @@ class File(Base): if self.get_state() != SCons.Node.up_to_date: if self.exists(): if self.is_derived() and not self.precious: - Unlink(self, [], None) - try: - delattr(self, '_exists') - except AttributeError: - pass + self._rmv_existing() else: try: self._createDir() @@ -1684,9 +1623,13 @@ class File(Base): return None def exists(self): + "__cacheable__" # Duplicate from source path if we are set up to do this. if self.duplicate and not self.is_derived() and not self.linked: - src=self.srcnode().rfile() + src=self.srcnode() + if src is self: + return Base.exists(self) + src = src.rfile() if src.abspath != self.abspath and src.exists(): self._createDir() try: @@ -1703,14 +1646,7 @@ class File(Base): # created the file, depending on whether the -n # option was used or not. Delete the _exists and # _rexists attributes so they can be reevaluated. - try: - delattr(self, '_exists') - except AttributeError: - pass - try: - delattr(self, '_rexists') - except AttributeError: - pass + self.clear() return Base.exists(self) def new_binfo(self): @@ -1791,23 +1727,20 @@ class File(Base): LocalCopy(self, r, None) self.store_info(self.binfo) return 1 - self._rfile = self return None else: old = self.get_stored_info() return (old == self.binfo) def rfile(self): - try: - return self._rfile - except AttributeError: - self._rfile = self - if not self.exists(): - n = self.fs.Rsearch(self.path, clazz=File, - cwd=self.fs.Top) - if n: - self._rfile = n - return self._rfile + "__cacheable__" + rfile = self + if not self.exists(): + n = self.fs.Rsearch(self.path, clazz=File, + cwd=self.fs.Top) + if n: + rfile = n + return rfile def rstr(self): return str(self.rfile()) @@ -1840,7 +1773,9 @@ def find_file(filename, paths, node_factory=default_fs.File, verbose=None): find_file(str, [Dir()]) -> [nodes] filename - a filename to find - paths - a list of directory path *nodes* to search in + paths - a list of directory path *nodes* to search in. Can be + represented as a list, a tuple, or a callable that is + called with no arguments and returns the list or tuple. returns - the node created from the found file. @@ -1853,6 +1788,10 @@ def find_file(filename, paths, node_factory=default_fs.File, verbose=None): if verbose and not SCons.Util.is_String(verbose): verbose = "find_file" retval = None + + if callable(paths): + paths = paths() + for dir in paths: if verbose: sys.stdout.write(" %s: looking for '%s' in '%s' ...\n" % (verbose, filename, dir)) diff --git a/src/engine/SCons/Node/FSTests.py b/src/engine/SCons/Node/FSTests.py index c829f22..e71093f 100644 --- a/src/engine/SCons/Node/FSTests.py +++ b/src/engine/SCons/Node/FSTests.py @@ -211,7 +211,7 @@ class BuildDirTestCase(unittest.TestCase): assert str(f2) == os.path.normpath('build/var2/test.in'), str(f2) # Build path exists assert f2.exists() - # ...and should copy the file from src to build path + # ...and exists() should copy the file from src to build path assert test.read(['work', 'build', 'var2', 'test.in']) == 'test.in',\ test.read(['work', 'build', 'var2', 'test.in']) # Since exists() is true, so should rexists() be @@ -898,10 +898,6 @@ class FSTestCase(unittest.TestCase): assert deps == [xyz], deps assert s.call_count == 1, s.call_count - deps = f12.get_found_includes(env, s, t1) - assert deps == [xyz], deps - assert s.call_count == 1, s.call_count - f12.built() deps = f12.get_found_includes(env, s, t1) @@ -946,9 +942,9 @@ class FSTestCase(unittest.TestCase): f1 = fs.File(test.workpath("do_i_exist")) assert not f1.exists() test.write("do_i_exist","\n") - assert not f1.exists() + assert not f1.exists(), "exists() call not cached" f1.built() - assert f1.exists() + assert f1.exists(), "exists() call caching not reset" test.unlink("do_i_exist") assert f1.exists() f1.built() @@ -1822,33 +1818,48 @@ class clearTestCase(unittest.TestCase): def runTest(self): """Test clearing FS nodes of cached data.""" fs = SCons.Node.FS.FS() + test = TestCmd(workdir='') e = fs.Entry('e') - e._exists = 1 - e._rexists = 1 - e._str_val = 'e' + assert not e.exists() + assert not e.rexists() + assert str(e) == 'e', str(d) e.clear() - assert not hasattr(e, '_exists') - assert not hasattr(e, '_rexists') - assert not hasattr(e, '_str_val') + assert not e.exists() + assert not e.rexists() + assert str(e) == 'e', str(d) - d = fs.Dir('d') - d._exists = 1 - d._rexists = 1 - d._str_val = 'd' + d = fs.Dir(test.workpath('d')) + test.subdir('d') + assert d.exists() + assert d.rexists() + assert str(d) == test.workpath('d'), str(d) + fs.rename(test.workpath('d'), test.workpath('gone')) + # Verify caching is active + assert d.exists(), 'caching not active' + assert d.rexists() + assert str(d) == test.workpath('d'), str(d) + # Now verify clear() resets the cache d.clear() - assert not hasattr(d, '_exists') - assert not hasattr(d, '_rexists') - assert not hasattr(d, '_str_val') - - f = fs.File('f') - f._exists = 1 - f._rexists = 1 - f._str_val = 'f' + assert not d.exists() + assert not d.rexists() + assert str(d) == test.workpath('d'), str(d) + + f = fs.File(test.workpath('f')) + test.write(test.workpath('f'), 'file f') + assert f.exists() + assert f.rexists() + assert str(f) == test.workpath('f'), str(f) + # Verify caching is active + test.unlink(test.workpath('f')) + assert f.exists() + assert f.rexists() + assert str(f) == test.workpath('f'), str(f) + # Now verify clear() resets the cache f.clear() - assert not hasattr(f, '_exists') - assert not hasattr(f, '_rexists') - assert not hasattr(f, '_str_val') + assert not f.exists() + assert not f.rexists() + assert str(f) == test.workpath('f'), str(f) class postprocessTestCase(unittest.TestCase): def runTest(self): @@ -2066,7 +2077,7 @@ class SaveStringsTestCase(unittest.TestCase): s = map(str, nodes) expect = map(os.path.normpath, ['src/f', 'd1/f', 'd0/b', 'd1/b']) - assert s == expect, s + assert s == expect, 'node str() not cached: %s'%s if __name__ == "__main__": suite = unittest.TestSuite() diff --git a/src/engine/SCons/Node/NodeTests.py b/src/engine/SCons/Node/NodeTests.py index 3c402d0..106e44e 100644 --- a/src/engine/SCons/Node/NodeTests.py +++ b/src/engine/SCons/Node/NodeTests.py @@ -525,13 +525,18 @@ class NodeTestCase(unittest.TestCase): def test_explain(self): """Test explaining why a Node must be rebuilt """ - node = SCons.Node.Node() + class testNode(SCons.Node.Node): + def __str__(self): return 'xyzzy' + node = testNode() node.exists = lambda: None - node.__str__ = lambda: 'xyzzy' + # Can't do this with new-style classes (python bug #1066490) + #node.__str__ = lambda: 'xyzzy' result = node.explain() assert result == "building `xyzzy' because it doesn't exist\n", result - node = SCons.Node.Node() + class testNode2(SCons.Node.Node): + def __str__(self): return 'null_binfo' + node = testNode2() result = node.explain() assert result == None, result @@ -540,7 +545,7 @@ class NodeTestCase(unittest.TestCase): pass node.get_stored_info = Null_BInfo - node.__str__ = lambda: 'null_binfo' + #see above: node.__str__ = lambda: 'null_binfo' result = node.explain() assert result == "Cannot explain why `null_binfo' is being rebuilt: No previous build information found\n", result @@ -802,7 +807,7 @@ class NodeTestCase(unittest.TestCase): """ target = SCons.Node.Node() source = SCons.Node.Node() - s = target.get_source_scanner(source, None) + s = target.get_source_scanner(source) assert s is None, s ts1 = Scanner() @@ -821,19 +826,19 @@ class NodeTestCase(unittest.TestCase): builder = Builder2(ts1) targets = builder([source]) - s = targets[0].get_source_scanner(source, None) + s = targets[0].get_source_scanner(source) assert s is ts1, s target.builder_set(Builder2(ts1)) target.builder.source_scanner = ts2 - s = target.get_source_scanner(source, None) + s = target.get_source_scanner(source) assert s is ts2, s builder = Builder1(env=Environment(SCANNERS = [ts3])) targets = builder([source]) - s = targets[0].get_source_scanner(source, builder.env) + s = targets[0].get_source_scanner(source) assert s is ts3, s @@ -880,14 +885,13 @@ class NodeTestCase(unittest.TestCase): SCons.Node.implicit_deps_unchanged = None try: sn = StoredNode("eee") - sn._children = ['fake'] sn.builder_set(Builder()) sn.builder.target_scanner = s sn.scan() assert sn.implicit == [], sn.implicit - assert sn._children == [], sn._children + assert sn.children() == [], sn.children() finally: SCons.Sig.default_calc = save_default_calc @@ -1100,7 +1104,6 @@ class NodeTestCase(unittest.TestCase): n.clear() - assert n.get_state() is None, n.get_state() assert not hasattr(n, 'binfo'), n.bsig assert n.includes is None, n.includes assert n.found_includes == {}, n.found_includes @@ -1176,8 +1179,13 @@ class NodeListTestCase(unittest.TestCase): assert s == "['n3', 'n2', 'n1']", s r = repr(nl) - r = re.sub('at (0x)?[0-9A-Fa-f]+', 'at 0x', repr(nl)) - l = string.join(["<__main__.MyNode instance at 0x>"]*3, ", ") + r = re.sub('at (0x)?[0-9a-z]+', 'at 0x', r) + # Don't care about ancestry: just leaf value of MyNode + r = re.sub('<.*?\.MyNode', '<MyNode', r) + # New-style classes report as "object"; classic classes report + # as "instance"... + r = re.sub("object", "instance", r) + l = string.join(["<MyNode instance at 0x>"]*3, ", ") assert r == '[%s]' % l, r diff --git a/src/engine/SCons/Node/__init__.py b/src/engine/SCons/Node/__init__.py index c7a652d..e239e93 100644 --- a/src/engine/SCons/Node/__init__.py +++ b/src/engine/SCons/Node/__init__.py @@ -92,6 +92,8 @@ class Node: build, or use to build other Nodes. """ + __metaclass__ = SCons.Memoize.Memoized_Metaclass + class Attrs: pass @@ -147,18 +149,9 @@ class Node: return {} def get_build_env(self): - """Fetch the appropriate Environment to build this node.""" - try: - build_env = self._build_env - except AttributeError: - # This gets called a lot, so cache it. A node gets created - # in the context of a specific environment and it doesn't - # get "moved" to a different environment, so caching this - # value is safe. - executor = self.get_executor() - build_env = executor.get_build_env() - self._build_env = build_env - return self._build_env + """Fetch the appropriate Environment to build this node. + __cacheable__""" + return self.get_executor().get_build_env() def set_executor(self, executor): """Set the action executor for this node.""" @@ -219,7 +212,7 @@ class Node: apply(executor, (self, errfunc), kw) def built(self): - """Called just after this node is sucessfully built.""" + """Called just after this node is successfully built.""" # Clear the implicit dependency caches of any Nodes # waiting for this Node to be built. @@ -237,9 +230,7 @@ class Node: # Reset this Node's cached state since it was just built and # various state has changed. - save_state = self.get_state() self.clear() - self.set_state(save_state) # Had build info, so it should be stored in the signature # cache. However, if the build info included a content @@ -275,8 +266,8 @@ class Node: """Completely clear a Node of all its cached state (so that it can be re-evaluated by interfaces that do continuous integration builds). + __reset_cache__ """ - self.set_state(None) self.del_binfo() self.del_cinfo() try: @@ -299,8 +290,8 @@ class Node: return reduce(lambda D,N,C=self.children(): D or (N in C), nodes, 0) def builder_set(self, builder): + "__cache_reset__" self.builder = builder - self._src_scanners = {} # cached scanners are based on the builder def has_builder(self): """Return whether this Node has a builder or not. @@ -312,6 +303,7 @@ class Node: and __nonzero__ attributes on instances of our Builder Proxy class(es), generating a bazillion extra calls and slowing things down immensely. + __cacheable__ """ try: b = self.builder @@ -328,7 +320,8 @@ class Node: This allows an internal Builder created by SCons to be marked non-explicit, so that it can be overridden by an explicit builder that the user supplies (the canonical example being - directories).""" + directories). + __cacheable__""" return self.has_builder() and self.builder.is_explicit def get_builder(self, default_builder=None): @@ -411,37 +404,28 @@ class Node: return deps - # cache used to make implicit_factory fast. - implicit_factory_cache = {} - def implicit_factory(self, path): """ Turn a cache implicit dependency path into a node. This is called so many times that doing caching here is a significant performance boost. + __cacheable__ """ - try: - return self.implicit_factory_cache[path] - except KeyError: - n = self.builder.source_factory(path) - self.implicit_factory_cache[path] = n - return n + return self.builder.source_factory(path) + - def get_source_scanner(self, node, build_env): + def get_source_scanner(self, node): """Fetch the source scanner for the specified node NOTE: "self" is the target being built, "node" is the source file for which we want to fetch the scanner. - build_env is the build environment (it's self.get_build_env(), - but the caller always knows this so it can give it - to us). - Implies self.has_builder() is true; again, expect to only be called from locations where this is already verified. This function may be called very often; it attempts to cache the scanner found to improve performance. + __cacheable__ """ # Called from scan() for each child (node) of this node # (self). The scan() may be called multiple times, so this @@ -451,22 +435,12 @@ class Node: # as an optimization of an already-determined value, not as a # changing parameter. - key = str(id(node)) + '|' + str(id(build_env)) - try: - return self._src_scanners[key] - except AttributeError: - self._src_scanners = {} - except KeyError: - pass - if not self.has_builder(): - self._src_scanners[key] = None return None try: scanner = self.builder.source_scanner if scanner: - self._src_scanners[key] = scanner return scanner except AttributeError: pass @@ -475,8 +449,7 @@ class Node: # based on the node's scanner key (usually the file # extension). - scanner = build_env.get_scanner(node.scanner_key()) - self._src_scanners[key] = scanner + scanner = self.get_build_env().get_scanner(node.scanner_key()) return scanner def scan(self): @@ -513,7 +486,7 @@ class Node: self.del_binfo() for child in self.children(scan=0): - scanner = self.get_source_scanner(child, build_env) + scanner = self.get_source_scanner(child) if scanner: deps = child.get_implicit_deps(build_env, scanner, self) self._add_child(self.implicit, self.implicit_dict, deps) @@ -545,28 +518,21 @@ class Node: def calc_signature(self, calc=None): """ Select and calculate the appropriate build signature for a node. + __cacheable__ self - the node calc - the signature calculation module returns - the signature """ - try: - return self._calculated_sig - except AttributeError: - if self.is_derived(): - import SCons.Defaults - - env = self.env or SCons.Defaults.DefaultEnvironment() - if env.use_build_signature(): - sig = self.calc_bsig(calc) - else: - sig = self.calc_csig(calc) - elif not self.rexists(): - sig = None - else: - sig = self.calc_csig(calc) - self._calculated_sig = sig - return sig + if self.is_derived(): + import SCons.Defaults + + env = self.env or SCons.Defaults.DefaultEnvironment() + if env.use_build_signature(): + return self.calc_bsig(calc) + elif not self.rexists(): + return None + return self.calc_csig(calc) def new_binfo(self): return BuildInfo() @@ -769,10 +735,8 @@ class Node: self.wkids.append(wkid) def _children_reset(self): - try: - delattr(self, '_children') - except AttributeError: - pass + "__cache_reset__" + pass def filter_ignore(self, nodelist): ignore = self.ignore @@ -782,17 +746,16 @@ class Node: result.append(node) return result + def _children_get(self): + "__cacheable__" + return self.filter_ignore(self.all_children(scan=0)) + def children(self, scan=1): """Return a list of the node's direct children, minus those that are ignored by this node.""" if scan: self.scan() - try: - return self._children - except AttributeError: - c = self.all_children(scan=0) - self._children = self.filter_ignore(c) - return self._children + return self._children_get() def all_children(self, scan=1): """Return a list of all the node's direct children.""" @@ -875,7 +838,7 @@ class Node: if self.is_derived() and self.env: env = self.get_build_env() for s in self.sources: - scanner = self.get_source_scanner(s, env) + scanner = self.get_source_scanner(s) def f(node, env=env, scanner=scanner, target=self): return node.get_found_includes(env, scanner, target) return SCons.Util.render_tree(s, f, 1) @@ -1022,6 +985,14 @@ else: del l del ul +if not SCons.Memoize.has_metaclass: + _Base = Node + class Node(SCons.Memoize.Memoizer, _Base): + def __init__(self, *args, **kw): + apply(_Base.__init__, (self,)+args, kw) + SCons.Memoize.Memoizer.__init__(self) + + def get_children(node, parent): return node.children() def ignore_cycle(node, stack): pass def do_nothing(node, parent): pass diff --git a/src/engine/SCons/Scanner/CTests.py b/src/engine/SCons/Scanner/CTests.py index e5830a0..5296f93 100644 --- a/src/engine/SCons/Scanner/CTests.py +++ b/src/engine/SCons/Scanner/CTests.py @@ -333,8 +333,8 @@ class CScannerTestCase10(unittest.TestCase): s = SCons.Scanner.C.CScan(fs=fs) path = s.path(env) test.write('include/fa.cpp', test.read('fa.cpp')) - deps = s(fs.File('#include/fa.cpp'), env, path) fs.chdir(fs.Dir('..')) + deps = s(fs.File('#include/fa.cpp'), env, path) deps_match(self, deps, [ 'include/fa.h', 'include/fb.h' ]) test.unlink('include/fa.cpp') diff --git a/src/engine/SCons/Scanner/D.py b/src/engine/SCons/Scanner/D.py index 27ba165..fba973f 100644 --- a/src/engine/SCons/Scanner/D.py +++ b/src/engine/SCons/Scanner/D.py @@ -47,6 +47,7 @@ def DScan(fs = SCons.Node.FS.default_fs): class DScanner(SCons.Scanner.Classic): def find_include(self, include, source_dir, path): + if callable(path): path=path() # translate dots (package separators) to slashes inc = string.replace(include, '.', '/') diff --git a/src/engine/SCons/Scanner/Fortran.py b/src/engine/SCons/Scanner/Fortran.py index 6ab878f..4da0914 100644 --- a/src/engine/SCons/Scanner/Fortran.py +++ b/src/engine/SCons/Scanner/Fortran.py @@ -62,6 +62,11 @@ class F90Scanner(SCons.Scanner.Classic): self.fs = fs def _scan(node, env, path, self=self, fs=fs): + node = node.rfile() + + if not node.exists(): + return [] + return self.scan(node, env, path) kw['function'] = _scan @@ -73,11 +78,8 @@ class F90Scanner(SCons.Scanner.Classic): apply(SCons.Scanner.Current.__init__, (self,) + args, kw) def scan(self, node, env, path=()): - node = node.rfile() - - if not node.exists(): - return [] - + "__cacheable__" + # cache the includes list in node so we only scan it once: if node.includes != None: mods_and_includes = node.includes diff --git a/src/engine/SCons/Scanner/Prog.py b/src/engine/SCons/Scanner/Prog.py index 512e512..d9b57b9 100644 --- a/src/engine/SCons/Scanner/Prog.py +++ b/src/engine/SCons/Scanner/Prog.py @@ -80,6 +80,8 @@ def scan(node, env, libpath = (), fs = SCons.Node.FS.default_fs): result = [] + if callable(libpath): libpath = libpath() + find_file = SCons.Node.FS.find_file adjustixes = SCons.Util.adjustixes for lib in libs: diff --git a/src/engine/SCons/Scanner/ScannerTests.py b/src/engine/SCons/Scanner/ScannerTests.py index 00ad7fb..e418c17 100644 --- a/src/engine/SCons/Scanner/ScannerTests.py +++ b/src/engine/SCons/Scanner/ScannerTests.py @@ -69,7 +69,7 @@ class FindPathDirsTestCase(unittest.TestCase): fpd = SCons.Scanner.FindPathDirs('LIBPATH', FS()) result = fpd(env, dir) - assert result == ('xxx', 'foo'), result + assert str(result) == "('xxx', 'foo')", result class ScannerTestCase(unittest.TestCase): @@ -434,7 +434,7 @@ class ClassicTestCase(unittest.TestCase): # Verify that overall scan results are cached even if individual # results are de-cached ret = s.function(n, env, ('foo2',)) - assert ret == ['abc'], ret + assert ret == ['abc'], 'caching inactive; got: %s'%ret # Verify that it sorts what it finds. n.includes = ['xyz', 'uvw'] @@ -459,6 +459,8 @@ class ClassicCPPTestCase(unittest.TestCase): s = SCons.Scanner.ClassicCPP("Test", [], None, "") def _find_file(filename, paths, factory): + if callable(paths): + paths = paths() return paths[0]+'/'+filename save = SCons.Node.FS.find_file @@ -474,7 +476,7 @@ class ClassicCPPTestCase(unittest.TestCase): assert i == 'bbb', i finally: - SCons.Node.FS.find_file = _find_file + SCons.Node.FS.find_file = save def suite(): suite = unittest.TestSuite() diff --git a/src/engine/SCons/Scanner/__init__.py b/src/engine/SCons/Scanner/__init__.py index cbab50c..1968a9e 100644 --- a/src/engine/SCons/Scanner/__init__.py +++ b/src/engine/SCons/Scanner/__init__.py @@ -34,7 +34,6 @@ import string import SCons.Node.FS import SCons.Sig -import SCons.UserTuple import SCons.Util @@ -54,51 +53,51 @@ def Scanner(function, *args, **kw): else: return apply(Base, (function,) + args, kw) -# Important, important, important performance optimization: -# -# The paths of Nodes returned from a FindPathDirs will be used to index -# a dictionary that caches the values, so we don't have to look up the -# same path over and over and over. If FindPathDir returns just a tuple, -# though, then the time it takes to compute the hash of the tuple grows -# proportionally to the length of the tuple itself--and some people can -# have very, very long strings of include directories... -# -# The solution is to wrap the tuple in an object, a UserTuple class -# whose *id()* our caller can use to cache the appropriate value. -# This means we have to guarantee that these ids genuinely represent -# unique values, which we do by maintaining our own cache that maps the -# expensive-to-hash tuple values to the easy-to-hash UniqueUserTuple -# values that our caller uses. -# -# *Major* kudos to Eric Frias and his colleagues for finding this. -class UniqueUserTuple(SCons.UserTuple.UserTuple): - def __hash__(self): - return id(self) - -PathCache = {} +class Binder: + __metaclass__ = SCons.Memoize.Memoized_Metaclass + def __init__(self, bindval): + self._val = bindval + def __call__(self): + return self._val + def __str__(self): + return str(self._val) + #debug: return 'B<%s>'%str(self._val) + class FindPathDirs: """A class to bind a specific *PATH variable name and the fs object to a function that will return all of the *path directories.""" + __metaclass__ = SCons.Memoize.Memoized_Metaclass def __init__(self, variable, fs): self.variable = variable self.fs = fs def __call__(self, env, dir, argument=None): + "__cacheable__" try: path = env[self.variable] except KeyError: return () path_tuple = tuple(self.fs.Rsearchall(env.subst_path(path), - must_exist = 0, + must_exist = 0, #kwq! clazz = SCons.Node.FS.Dir, cwd = dir)) - try: - return PathCache[path_tuple] - except KeyError: - path_UserTuple = UniqueUserTuple(path_tuple) - PathCache[path_tuple] = path_UserTuple - return path_UserTuple + return Binder(path_tuple) + +if not SCons.Memoize.has_metaclass: + _FPD_Base = FindPathDirs + class FindPathDirs(SCons.Memoize.Memoizer, _FPD_Base): + "Cache-backed version of FindPathDirs" + def __init__(self, *args, **kw): + apply(_FPD_Base.__init__, (self,)+args, kw) + SCons.Memoize.Memoizer.__init__(self) + _BinderBase = Binder + class Binder(SCons.Memoize.Memoizer, _BinderBase): + "Cache-backed version of Binder" + def __init__(self, *args, **kw): + apply(_BinderBase.__init__, (self,)+args, kw) + SCons.Memoize.Memoizer.__init__(self) + class Base: """ @@ -106,6 +105,8 @@ class Base: straightforward, single-pass scanning of a single file. """ + __metaclass__ = SCons.Memoize.Memoized_Metaclass + def __init__(self, function, name = "NONE", @@ -135,6 +136,8 @@ class Base: (a construction environment, optional directory, and optional argument for this instance) and returns a tuple of the directories that can be searched for implicit dependency files. + May also return a callable() which is called with no args and + returns the tuple (supporting Bindable class). 'node_class' - the class of Nodes which this scan will return. If node_class is None, then this scanner will not enforce any @@ -186,6 +189,7 @@ class Base: self.recursive = recursive def path(self, env, dir = None): + "__cacheable__" if not self.path_function: return () if not self.argument is _null: @@ -242,6 +246,14 @@ class Base: def select(self, node): return self +if not SCons.Memoize.has_metaclass: + _Base = Base + class Base(SCons.Memoize.Memoizer, _Base): + "Cache-backed version of Scanner Base" + def __init__(self, *args, **kw): + apply(_Base.__init__, (self,)+args, kw) + SCons.Memoize.Memoizer.__init__(self) + class Selector(Base): """ @@ -296,22 +308,12 @@ class Classic(Current): self.cre = re.compile(regex, re.M) self.fs = fs - self._cached = {} def _scan(node, env, path=(), self=self): node = node.rfile() - if not node.exists(): return [] - - key = str(id(node)) + '|' + string.join(map(str, path), ':') - try: - return self._cached[key] - except KeyError: - pass - - self._cached[key] = scan_result = self.scan(node, path) - return scan_result + return self.scan(node, path) kw['function'] = _scan kw['path_function'] = FindPathDirs(path_variable, fs) @@ -322,6 +324,8 @@ class Classic(Current): apply(Current.__init__, (self,) + args, kw) def find_include(self, include, source_dir, path): + "__cacheable__" + if callable(path): path = path() n = SCons.Node.FS.find_file(include, (source_dir,) + tuple(path), self.fs.File) @@ -331,6 +335,7 @@ class Classic(Current): return SCons.Node.FS._my_normcase(include) def scan(self, node, path=()): + "__cacheable__" # cache the includes list in node so we only scan it once: if node.includes != None: @@ -372,14 +377,19 @@ class ClassicCPP(Classic): the contained filename in group 1. """ def find_include(self, include, source_dir, path): + "__cacheable__" + if callable(path): + path = path() #kwq: extend callable to find_file... + if include[0] == '"': - n = SCons.Node.FS.find_file(include[1], - (source_dir,) + tuple(path), - self.fs.File) + paths = Binder( (source_dir,) + tuple(path) ) else: - n = SCons.Node.FS.find_file(include[1], - tuple(path) + (source_dir,), - self.fs.File) + paths = Binder( tuple(path) + (source_dir,) ) + + n = SCons.Node.FS.find_file(include[1], + paths, + self.fs.File) + return n, include[1] def sort_key(self, include): diff --git a/src/engine/SCons/UserTuple.py b/src/engine/SCons/UserTuple.py deleted file mode 100644 index 8682783..0000000 --- a/src/engine/SCons/UserTuple.py +++ /dev/null @@ -1,185 +0,0 @@ -"""SCons.UserTuple - -A more or less complete user-defined wrapper around tuple objects. - -This is basically cut-and-pasted from UserList, but it wraps an immutable -tuple instead of a mutable list, primarily so that the wrapper object can -be used as the hash of a dictionary. The time it takes to compute the -hash value of a builtin tuple grows as the length of the tuple grows, but -the time it takes to compute hash value of an object can stay constant. - -""" - -# -# __COPYRIGHT__ -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be included -# in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY -# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE -# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -# - -__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__" - -class UserTuple: - def __init__(self, inittuple=None): - self.data = () - if inittuple is not None: - # XXX should this accept an arbitrary sequence? - if type(inittuple) == type(self.data): - self.data = inittuple[:] - elif isinstance(inittuple, UserTuple): - self.data = tuple(inittuple.data[:]) - else: - self.data = tuple(inittuple) - def __str__(self): return str(self.data) - def __lt__(self, other): return self.data < self.__cast(other) - def __le__(self, other): return self.data <= self.__cast(other) - def __eq__(self, other): return self.data == self.__cast(other) - def __ne__(self, other): return self.data != self.__cast(other) - def __gt__(self, other): return self.data > self.__cast(other) - def __ge__(self, other): return self.data >= self.__cast(other) - def __cast(self, other): - if isinstance(other, UserTuple): return other.data - else: return other - def __cmp__(self, other): - return cmp(self.data, self.__cast(other)) - def __contains__(self, item): return item in self.data - def __len__(self): return len(self.data) - def __getitem__(self, i): return self.data[i] - def __setitem__(self, i, item): - raise TypeError, "object doesn't support item assignment" - def __delitem__(self, i): - raise TypeError, "object doesn't support item deletion" - def __getslice__(self, i, j): - i = max(i, 0); j = max(j, 0) - return self.__class__(self.data[i:j]) - def __setslice__(self, i, j, other): - raise TypeError, "object doesn't support slice assignment" - def __delslice__(self, i, j): - raise TypeError, "object doesn't support slice deletion" - def __add__(self, other): - if isinstance(other, UserTuple): - return self.__class__(self.data + other.data) - elif isinstance(other, type(self.data)): - return self.__class__(self.data + other) - else: - return self.__class__(self.data + tuple(other)) - def __radd__(self, other): - if isinstance(other, UserTuple): - return self.__class__(other.data + self.data) - elif isinstance(other, type(self.data)): - return self.__class__(other + self.data) - else: - return self.__class__(tuple(other) + self.data) - def __mul__(self, n): - return self.__class__(self.data*n) - __rmul__ = __mul__ - def __iter__(self): - return iter(self.data) - def __hash__(self): - return hash(self.data) - -if (__name__ == "__main__"): - t = UserTuple((1, 2, 3)) - assert isinstance(t, UserTuple) - t2 = UserTuple(t) - assert isinstance(t2, UserTuple) - t3 = UserTuple([1, 2, 3]) - assert isinstance(t3, UserTuple) - assert t == t2 - assert t == t3 - assert str(t) == '(1, 2, 3)', str(t) - assert t < UserTuple((2, 2, 3)) - assert t <= UserTuple((2, 2, 3)) - assert t == UserTuple((1, 2, 3)) - assert t != UserTuple((3, 2, 1)) - assert t > UserTuple((0, 2, 3)) - assert t >= UserTuple((0, 2, 3)) - assert cmp(t, UserTuple((0,))) == 1 - assert cmp(t, UserTuple((1, 2, 3))) == 0 - assert cmp(t, UserTuple((2,))) == -1 - assert t < (2, 2, 3) - assert t <= (2, 2, 3) - assert t == (1, 2, 3) - assert t != (3, 2, 1) - assert t > (0, 2, 3) - assert t >= (0, 2, 3) - assert cmp(t, (0,)) == 1 - assert cmp(t, (1, 2, 3)) == 0 - assert cmp(t, (2,)) == -1 - assert 3 in t - assert len(t) == 3 - assert t[0] == 1 - assert t[1] == 2 - assert t[2] == 3 - try: - t[0] = 4 - except TypeError, e: - assert str(e) == "object doesn't support item assignment" - else: - raise "Did not catch expected TypeError" - try: - del t[0] - except TypeError, e: - assert str(e) == "object doesn't support item deletion" - else: - raise "Did not catch expected TypeError" - assert t[1:2] == (2,) - try: - t[0:2] = (4, 5) - except TypeError, e: - assert str(e) == "object doesn't support slice assignment", e - else: - raise "Did not catch expected TypeError" - try: - del t[0:2] - except TypeError, e: - assert str(e) == "object doesn't support slice deletion" - else: - raise "Did not catch expected TypeError" - assert t + UserTuple((4, 5)) == (1, 2, 3, 4, 5) - assert t + (4, 5) == (1, 2, 3, 4, 5) - assert t + [4, 5] == (1, 2, 3, 4, 5) - assert UserTuple((-1, 0)) + t == (-1, 0, 1, 2, 3) - assert (-1, 0) + t == (-1, 0, 1, 2, 3) - assert [-1, 0] + t == (-1, 0, 1, 2, 3) - assert t * 2 == (1, 2, 3, 1, 2, 3) - assert 2 * t == (1, 2, 3, 1, 2, 3) - - t1 = UserTuple((1,)) - t1a = UserTuple((1,)) - t1b = UserTuple((1,)) - t2 = UserTuple((2,)) - t3 = UserTuple((3,)) - d = {} - d[t1] = 't1' - d[t2] = 't2' - d[t3] = 't3' - assert d[t1] == 't1' - assert d[t1a] == 't1' - assert d[t1b] == 't1' - assert d[t2] == 't2' - assert d[t3] == 't3' - d[t1a] = 't1a' - assert d[t1] == 't1a' - assert d[t1a] == 't1a' - assert d[t1b] == 't1a' - d[t1b] = 't1b' - assert d[t1] == 't1b' - assert d[t1a] == 't1b' - assert d[t1b] == 't1b' diff --git a/src/engine/SCons/__init__.py b/src/engine/SCons/__init__.py index b548841..20e4734 100644 --- a/src/engine/SCons/__init__.py +++ b/src/engine/SCons/__init__.py @@ -38,3 +38,5 @@ __buildsys__ = "__BUILDSYS__" __date__ = "__DATE__" __developer__ = "__DEVELOPER__" + +import SCons.Memoize diff --git a/test/DirSource.py b/test/DirSource.py index 5c0291e..84d8185 100644 --- a/test/DirSource.py +++ b/test/DirSource.py @@ -69,8 +69,8 @@ env_csig.TestDir(source='csig', target='csig.out') """) test.run(arguments=".", stderr=None) -test.fail_test(test.read('bsig.out') != 'stuff\n') -test.fail_test(test.read('csig.out') != 'stuff\n') +test.must_match('bsig.out', 'stuff\n') +test.must_match('csig.out', 'stuff\n') test.up_to_date(arguments='bsig.out') test.up_to_date(arguments='csig.out') diff --git a/test/Repository/LIBPATH.py b/test/Repository/LIBPATH.py index 96b198e..4b249e5 100644 --- a/test/Repository/LIBPATH.py +++ b/test/Repository/LIBPATH.py @@ -49,7 +49,7 @@ def write_LIBDIRFLAGS(env, target, source): pre = env.subst('$LIBDIRPREFIX') suf = env.subst('$LIBDIRSUFFIX') f = open(str(target[0]), 'wb') - for arg in string.split(env.subst('$_LIBDIRFLAGS')): + for arg in string.split(env.subst('$_LIBDIRFLAGS', target=target)): if arg[:len(pre)] == pre: arg = arg[len(pre):] if arg[-len(suf):] == suf: diff --git a/test/scan-once.py b/test/scan-once.py index 7019e23..4436310 100644 --- a/test/scan-once.py +++ b/test/scan-once.py @@ -481,14 +481,21 @@ test.run(arguments = 'SLF', # XXX Note that the generated .h files still get scanned twice, # once before they're generated and once after. That's the # next thing to fix here. -test.fail_test(test.read("MyCScan.out", "rb") != """\ + +# Note KWQ 01 Nov 2004: used to check for a one for all counts below; +# this was indirectly a test that the caching method in use at the +# time was working. With the introduction of Memoize-based caching, +# the caching is performed right at the interface level, so the test +# here cannot be run the same way; ergo real counts are used below. + +test.must_match("MyCScan.out", """\ libg_1.c: 1 libg_2.c: 1 libg_3.c: 1 -libg_gx.h: 1 +libg_gx.h: 3 libg_gy.h: 1 libg_gz.h: 1 -libg_w.h: 1 +libg_w.h: 3 """) test.pass_test() |