diff options
Diffstat (limited to 'Tools/scripts')
-rw-r--r-- | Tools/scripts/README | 4 | ||||
-rwxr-xr-x | Tools/scripts/diff.py | 16 | ||||
-rwxr-xr-x | Tools/scripts/find_recursionlimit.py | 7 | ||||
-rwxr-xr-x | Tools/scripts/findnocoding.py | 23 | ||||
-rwxr-xr-x | Tools/scripts/highlight.py | 260 | ||||
-rwxr-xr-x | Tools/scripts/import_diagnostics.py | 37 | ||||
-rwxr-xr-x | Tools/scripts/patchcheck.py | 53 | ||||
-rwxr-xr-x | Tools/scripts/pysource.py | 14 | ||||
-rwxr-xr-x | Tools/scripts/pyvenv | 11 | ||||
-rwxr-xr-x | Tools/scripts/reindent.py | 18 | ||||
-rwxr-xr-x | Tools/scripts/run_tests.py | 51 | ||||
-rwxr-xr-x | Tools/scripts/texi2html.py | 10 |
12 files changed, 445 insertions, 59 deletions
diff --git a/Tools/scripts/README b/Tools/scripts/README index 8c02529..d65d1fd 100644 --- a/Tools/scripts/README +++ b/Tools/scripts/README @@ -15,7 +15,7 @@ db2pickle.py Dump a database file to a pickle diff.py Print file diffs in context, unified, or ndiff formats dutree.py Format du(1) output as a tree sorted by size eptags.py Create Emacs TAGS file for Python modules -find_recursionlimit.py Find the maximum recursion limit on this machine +find_recursionlimit.py Find the maximum recursion limit on this machine finddiv.py A grep-like tool that looks for division operators findlinksto.py Recursively find symbolic links to a given path prefix findnocoding.py Find source files which need an encoding declaration @@ -28,6 +28,7 @@ ftpmirror.py FTP mirror script google.py Open a webbrowser with Google gprof2html.py Transform gprof(1) output into useful HTML h2py.py Translate #define's into Python assignments +highlight.py Python syntax highlighting with HTML output idle3 Main program to start IDLE ifdef.py Remove #if(n)def groups from C sources lfcr.py Change LF line endings to CRLF (Unix to Windows) @@ -53,6 +54,7 @@ redemo.py Basic regular expression demonstration facility reindent.py Change .py files to use 4-space indents reindent-rst.py Fix-up reStructuredText file whitespace rgrep.py Reverse grep through a file (useful for big logfiles) +run_tests.py Run the test suite with more sensible default options serve.py Small wsgiref-based web server, used in make serve in Doc suff.py Sort a list of files by suffix svneol.py Set svn:eol-style on all files in directory diff --git a/Tools/scripts/diff.py b/Tools/scripts/diff.py index 9efb078..f9b14bf 100755 --- a/Tools/scripts/diff.py +++ b/Tools/scripts/diff.py @@ -9,6 +9,12 @@ """ import sys, os, time, difflib, optparse +from datetime import datetime, timezone + +def file_mtime(path): + t = datetime.fromtimestamp(os.stat(path).st_mtime, + timezone.utc) + return t.astimezone().isoformat() def main(): @@ -30,10 +36,12 @@ def main(): n = options.lines fromfile, tofile = args - fromdate = time.ctime(os.stat(fromfile).st_mtime) - todate = time.ctime(os.stat(tofile).st_mtime) - fromlines = open(fromfile, 'U').readlines() - tolines = open(tofile, 'U').readlines() + fromdate = file_mtime(fromfile) + todate = file_mtime(tofile) + with open(fromfile, 'U') as ff: + fromlines = ff.readlines() + with open(tofile, 'U') as tf: + tolines = tf.readlines() if options.u: diff = difflib.unified_diff(fromlines, tolines, fromfile, tofile, fromdate, todate, n=n) diff --git a/Tools/scripts/find_recursionlimit.py b/Tools/scripts/find_recursionlimit.py index 7a86603..1171146 100755 --- a/Tools/scripts/find_recursionlimit.py +++ b/Tools/scripts/find_recursionlimit.py @@ -89,6 +89,12 @@ def test_cpickle(_cache={}): _pickle.Pickler(io.BytesIO(), protocol=-1).dump(l) _cache[n] = l +def test_compiler_recursion(): + # The compiler uses a scaling factor to support additional levels + # of recursion. This is a sanity check of that scaling to ensure + # it still raises RuntimeError even at higher recursion limits + compile("()" * (10 * sys.getrecursionlimit()), "<single>", "single") + def check_limit(n, test_func_name): sys.setrecursionlimit(n) if test_func_name.startswith("test_"): @@ -117,5 +123,6 @@ if __name__ == '__main__': check_limit(limit, "test_getattr") check_limit(limit, "test_getitem") check_limit(limit, "test_cpickle") + check_limit(limit, "test_compiler_recursion") print("Limit of %d is fine" % limit) limit = limit + 100 diff --git a/Tools/scripts/findnocoding.py b/Tools/scripts/findnocoding.py index a494a48..b3e9dc7 100755 --- a/Tools/scripts/findnocoding.py +++ b/Tools/scripts/findnocoding.py @@ -2,7 +2,7 @@ """List all those Python files that require a coding directive -Usage: nocoding.py dir1 [dir2...] +Usage: findnocoding.py dir1 [dir2...] """ __author__ = "Oleg Broytmann, Georg Brandl" @@ -32,7 +32,7 @@ except ImportError: "no sophisticated Python source file search will be done.", file=sys.stderr) -decl_re = re.compile(r"coding[=:]\s*([-\w.]+)") +decl_re = re.compile(rb"coding[=:]\s*([-\w.]+)") def get_declaration(line): match = decl_re.search(line) @@ -50,21 +50,20 @@ def has_correct_encoding(text, codec): def needs_declaration(fullpath): try: - infile = open(fullpath, 'rU') + infile = open(fullpath, 'rb') except IOError: # Oops, the file was removed - ignore it return None - line1 = infile.readline() - line2 = infile.readline() + with infile: + line1 = infile.readline() + line2 = infile.readline() - if get_declaration(line1) or get_declaration(line2): - # the file does have an encoding declaration, so trust it - infile.close() - return False + if get_declaration(line1) or get_declaration(line2): + # the file does have an encoding declaration, so trust it + return False - # check the whole file for non utf-8 characters - rest = infile.read() - infile.close() + # check the whole file for non utf-8 characters + rest = infile.read() if has_correct_encoding(line1+line2+rest, "utf-8"): return False diff --git a/Tools/scripts/highlight.py b/Tools/scripts/highlight.py new file mode 100755 index 0000000..aff5cae --- /dev/null +++ b/Tools/scripts/highlight.py @@ -0,0 +1,260 @@ +#!/usr/bin/env python3 +'''Add syntax highlighting to Python source code''' + +__author__ = 'Raymond Hettinger' + +import keyword, tokenize, cgi, re, functools +try: + import builtins +except ImportError: + import __builtin__ as builtins + +#### Analyze Python Source ################################# + +def is_builtin(s): + 'Return True if s is the name of a builtin' + return hasattr(builtins, s) + +def combine_range(lines, start, end): + 'Join content from a range of lines between start and end' + (srow, scol), (erow, ecol) = start, end + if srow == erow: + return lines[srow-1][scol:ecol], end + rows = [lines[srow-1][scol:]] + lines[srow: erow-1] + [lines[erow-1][:ecol]] + return ''.join(rows), end + +def analyze_python(source): + '''Generate and classify chunks of Python for syntax highlighting. + Yields tuples in the form: (category, categorized_text). + ''' + lines = source.splitlines(True) + lines.append('') + readline = functools.partial(next, iter(lines), '') + kind = tok_str = '' + tok_type = tokenize.COMMENT + written = (1, 0) + for tok in tokenize.generate_tokens(readline): + prev_tok_type, prev_tok_str = tok_type, tok_str + tok_type, tok_str, (srow, scol), (erow, ecol), logical_lineno = tok + kind = '' + if tok_type == tokenize.COMMENT: + kind = 'comment' + elif tok_type == tokenize.OP and tok_str[:1] not in '{}[](),.:;@': + kind = 'operator' + elif tok_type == tokenize.STRING: + kind = 'string' + if prev_tok_type == tokenize.INDENT or scol==0: + kind = 'docstring' + elif tok_type == tokenize.NAME: + if tok_str in ('def', 'class', 'import', 'from'): + kind = 'definition' + elif prev_tok_str in ('def', 'class'): + kind = 'defname' + elif keyword.iskeyword(tok_str): + kind = 'keyword' + elif is_builtin(tok_str) and prev_tok_str != '.': + kind = 'builtin' + if kind: + text, written = combine_range(lines, written, (srow, scol)) + yield '', text + text, written = tok_str, (erow, ecol) + yield kind, text + line_upto_token, written = combine_range(lines, written, (erow, ecol)) + yield '', line_upto_token + +#### Raw Output ########################################### + +def raw_highlight(classified_text): + 'Straight text display of text classifications' + result = [] + for kind, text in classified_text: + result.append('%15s: %r\n' % (kind or 'plain', text)) + return ''.join(result) + +#### ANSI Output ########################################### + +default_ansi = { + 'comment': ('\033[0;31m', '\033[0m'), + 'string': ('\033[0;32m', '\033[0m'), + 'docstring': ('\033[0;32m', '\033[0m'), + 'keyword': ('\033[0;33m', '\033[0m'), + 'builtin': ('\033[0;35m', '\033[0m'), + 'definition': ('\033[0;33m', '\033[0m'), + 'defname': ('\033[0;34m', '\033[0m'), + 'operator': ('\033[0;33m', '\033[0m'), +} + +def ansi_highlight(classified_text, colors=default_ansi): + 'Add syntax highlighting to source code using ANSI escape sequences' + # http://en.wikipedia.org/wiki/ANSI_escape_code + result = [] + for kind, text in classified_text: + opener, closer = colors.get(kind, ('', '')) + result += [opener, text, closer] + return ''.join(result) + +#### HTML Output ########################################### + +def html_highlight(classified_text,opener='<pre class="python">\n', closer='</pre>\n'): + 'Convert classified text to an HTML fragment' + result = [opener] + for kind, text in classified_text: + if kind: + result.append('<span class="%s">' % kind) + result.append(cgi.escape(text)) + if kind: + result.append('</span>') + result.append(closer) + return ''.join(result) + +default_css = { + '.comment': '{color: crimson;}', + '.string': '{color: forestgreen;}', + '.docstring': '{color: forestgreen; font-style:italic;}', + '.keyword': '{color: darkorange;}', + '.builtin': '{color: purple;}', + '.definition': '{color: darkorange; font-weight:bold;}', + '.defname': '{color: blue;}', + '.operator': '{color: brown;}', +} + +default_html = '''\ +<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" + "http://www.w3.org/TR/html4/strict.dtd"> +<html> +<head> +<meta http-equiv="Content-type" content="text/html;charset=UTF-8"> +<title> {title} </title> +<style type="text/css"> +{css} +</style> +</head> +<body> +{body} +</body> +</html> +''' + +def build_html_page(classified_text, title='python', + css=default_css, html=default_html): + 'Create a complete HTML page with colorized source code' + css_str = '\n'.join(['%s %s' % item for item in css.items()]) + result = html_highlight(classified_text) + title = cgi.escape(title) + return html.format(title=title, css=css_str, body=result) + +#### LaTeX Output ########################################## + +default_latex_commands = { + 'comment': '{\color{red}#1}', + 'string': '{\color{ForestGreen}#1}', + 'docstring': '{\emph{\color{ForestGreen}#1}}', + 'keyword': '{\color{orange}#1}', + 'builtin': '{\color{purple}#1}', + 'definition': '{\color{orange}#1}', + 'defname': '{\color{blue}#1}', + 'operator': '{\color{brown}#1}', +} + +default_latex_document = r''' +\documentclass{article} +\usepackage{alltt} +\usepackage{upquote} +\usepackage{color} +\usepackage[usenames,dvipsnames]{xcolor} +\usepackage[cm]{fullpage} +%(macros)s +\begin{document} +\center{\LARGE{%(title)s}} +\begin{alltt} +%(body)s +\end{alltt} +\end{document} +''' + +def alltt_escape(s): + 'Replace backslash and braces with their escaped equivalents' + xlat = {'{': r'\{', '}': r'\}', '\\': r'\textbackslash{}'} + return re.sub(r'[\\{}]', lambda mo: xlat[mo.group()], s) + +def latex_highlight(classified_text, title = 'python', + commands = default_latex_commands, + document = default_latex_document): + 'Create a complete LaTeX document with colorized source code' + macros = '\n'.join(r'\newcommand{\py%s}[1]{%s}' % c for c in commands.items()) + result = [] + for kind, text in classified_text: + if kind: + result.append(r'\py%s{' % kind) + result.append(alltt_escape(text)) + if kind: + result.append('}') + return default_latex_document % dict(title=title, macros=macros, body=''.join(result)) + + +if __name__ == '__main__': + import sys, argparse, webbrowser, os, textwrap + + parser = argparse.ArgumentParser( + description = 'Add syntax highlighting to Python source code', + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog = textwrap.dedent(''' + examples: + + # Show syntax highlighted code in the terminal window + $ ./highlight.py myfile.py + + # Colorize myfile.py and display in a browser + $ ./highlight.py -b myfile.py + + # Create an HTML section to embed in an existing webpage + ./highlight.py -s myfile.py + + # Create a complete HTML file + $ ./highlight.py -c myfile.py > myfile.html + + # Create a PDF using LaTeX + $ ./highlight.py -l myfile.py | pdflatex + + ''')) + parser.add_argument('sourcefile', metavar = 'SOURCEFILE', + help = 'file containing Python sourcecode') + parser.add_argument('-b', '--browser', action = 'store_true', + help = 'launch a browser to show results') + parser.add_argument('-c', '--complete', action = 'store_true', + help = 'build a complete html webpage') + parser.add_argument('-l', '--latex', action = 'store_true', + help = 'build a LaTeX document') + parser.add_argument('-r', '--raw', action = 'store_true', + help = 'raw parse of categorized text') + parser.add_argument('-s', '--section', action = 'store_true', + help = 'show an HTML section rather than a complete webpage') + args = parser.parse_args() + + if args.section and (args.browser or args.complete): + parser.error('The -s/--section option is incompatible with ' + 'the -b/--browser or -c/--complete options') + + sourcefile = args.sourcefile + with open(sourcefile) as f: + source = f.read() + classified_text = analyze_python(source) + + if args.raw: + encoded = raw_highlight(classified_text) + elif args.complete or args.browser: + encoded = build_html_page(classified_text, title=sourcefile) + elif args.section: + encoded = html_highlight(classified_text) + elif args.latex: + encoded = latex_highlight(classified_text, title=sourcefile) + else: + encoded = ansi_highlight(classified_text) + + if args.browser: + htmlfile = os.path.splitext(os.path.basename(sourcefile))[0] + '.html' + with open(htmlfile, 'w') as f: + f.write(encoded) + webbrowser.open('file://' + os.path.abspath(htmlfile)) + else: + sys.stdout.write(encoded) diff --git a/Tools/scripts/import_diagnostics.py b/Tools/scripts/import_diagnostics.py new file mode 100755 index 0000000..c907221 --- /dev/null +++ b/Tools/scripts/import_diagnostics.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python3 +"""Miscellaneous diagnostics for the import system""" + +import sys +import argparse +from pprint import pprint + +def _dump_state(args): + print(sys.version) + for name in args.attributes: + print("sys.{}:".format(name)) + pprint(getattr(sys, name)) + +def _add_dump_args(cmd): + cmd.add_argument("attributes", metavar="ATTR", nargs="+", + help="sys module attribute to display") + +COMMANDS = ( + ("dump", "Dump import state", _dump_state, _add_dump_args), +) + +def _make_parser(): + parser = argparse.ArgumentParser() + sub = parser.add_subparsers(title="Commands") + for name, description, implementation, add_args in COMMANDS: + cmd = sub.add_parser(name, help=description) + cmd.set_defaults(command=implementation) + add_args(cmd) + return parser + +def main(args): + parser = _make_parser() + args = parser.parse_args(args) + return args.command(args) + +if __name__ == "__main__": + sys.exit(main(sys.argv[1:])) diff --git a/Tools/scripts/patchcheck.py b/Tools/scripts/patchcheck.py index 0e18dd9..503c67a 100755 --- a/Tools/scripts/patchcheck.py +++ b/Tools/scripts/patchcheck.py @@ -49,29 +49,15 @@ def mq_patches_applied(): @status("Getting the list of files that have been added/changed", info=lambda x: n_files_str(len(x))) def changed_files(): - """Get the list of changed or added files from the VCS.""" - if os.path.isdir(os.path.join(SRCDIR, '.hg')): - vcs = 'hg' - cmd = 'hg status --added --modified --no-status' - if mq_patches_applied(): - cmd += ' --rev qparent' - elif os.path.isdir('.svn'): - vcs = 'svn' - cmd = 'svn status --quiet --non-interactive --ignore-externals' - else: + """Get the list of changed or added files from Mercurial.""" + if not os.path.isdir(os.path.join(SRCDIR, '.hg')): sys.exit('need a checkout to get modified files') - st = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE) - try: - st.wait() - if vcs == 'hg': - return [x.decode().rstrip() for x in st.stdout] - else: - output = (x.decode().rstrip().rsplit(None, 1)[-1] - for x in st.stdout if x[0] in b'AM') - return set(path for path in output if os.path.isfile(path)) - finally: - st.stdout.close() + cmd = 'hg status --added --modified --no-status' + if mq_patches_applied(): + cmd += ' --rev qparent' + with subprocess.Popen(cmd.split(), stdout=subprocess.PIPE) as st: + return [x.decode().rstrip() for x in st.stdout] def report_modified_files(file_paths): @@ -89,10 +75,8 @@ def report_modified_files(file_paths): def normalize_whitespace(file_paths): """Make sure that the whitespace for .py files have been normalized.""" reindent.makebackup = False # No need to create backups. - fixed = [] - for path in (x for x in file_paths if x.endswith('.py')): - if reindent.check(os.path.join(SRCDIR, path)): - fixed.append(path) + fixed = [path for path in file_paths if path.endswith('.py') and + reindent.check(os.path.join(SRCDIR, path))] return fixed @@ -148,6 +132,21 @@ def reported_news(file_paths): """Check if Misc/NEWS has been changed.""" return 'Misc/NEWS' in file_paths +@status("configure regenerated", modal=True, info=str) +def regenerated_configure(file_paths): + """Check if configure has been regenerated.""" + if 'configure.ac' in file_paths: + return "yes" if 'configure' in file_paths else "no" + else: + return "not needed" + +@status("pyconfig.h.in regenerated", modal=True, info=str) +def regenerated_pyconfig_h_in(file_paths): + """Check if pyconfig.h.in has been regenerated.""" + if 'configure.ac' in file_paths: + return "yes" if 'pyconfig.h.in' in file_paths else "no" + else: + return "not needed" def main(): file_paths = changed_files() @@ -167,6 +166,10 @@ def main(): credit_given(special_files) # Misc/NEWS changed. reported_news(special_files) + # Regenerated configure, if necessary. + regenerated_configure(file_paths) + # Regenerated pyconfig.h.in, if necessary. + regenerated_pyconfig_h_in(file_paths) # Test suite run and passed. if python_files or c_files: diff --git a/Tools/scripts/pysource.py b/Tools/scripts/pysource.py index 048131e..69e8e0d 100755 --- a/Tools/scripts/pysource.py +++ b/Tools/scripts/pysource.py @@ -22,7 +22,7 @@ __all__ = ["has_python_ext", "looks_like_python", "can_be_compiled", "walk_pytho import os, re -binary_re = re.compile('[\x00-\x08\x0E-\x1F\x7F]') +binary_re = re.compile(br'[\x00-\x08\x0E-\x1F\x7F]') debug = False @@ -42,7 +42,7 @@ def _open(fullpath): return None try: - return open(fullpath, 'rU') + return open(fullpath, "rb") except IOError as err: # Access denied, or a special file - ignore it print_debug("%s: access denied: %s" % (fullpath, err)) return None @@ -55,8 +55,8 @@ def looks_like_python(fullpath): if infile is None: return False - line = infile.readline() - infile.close() + with infile: + line = infile.readline() if binary_re.search(line): # file appears to be binary @@ -65,7 +65,7 @@ def looks_like_python(fullpath): if fullpath.endswith(".py") or fullpath.endswith(".pyw"): return True - elif "python" in line: + elif b"python" in line: # disguised Python script (e.g. CGI) return True @@ -76,8 +76,8 @@ def can_be_compiled(fullpath): if infile is None: return False - code = infile.read() - infile.close() + with infile: + code = infile.read() try: compile(code, fullpath, "exec") diff --git a/Tools/scripts/pyvenv b/Tools/scripts/pyvenv new file mode 100755 index 0000000..978d691 --- /dev/null +++ b/Tools/scripts/pyvenv @@ -0,0 +1,11 @@ +#!/usr/bin/env python3 +if __name__ == '__main__': + import sys + rc = 1 + try: + import venv + venv.main() + rc = 0 + except Exception as e: + print('Error: %s' % e, file=sys.stderr) + sys.exit(rc) diff --git a/Tools/scripts/reindent.py b/Tools/scripts/reindent.py index b18993b..4a916ea 100755 --- a/Tools/scripts/reindent.py +++ b/Tools/scripts/reindent.py @@ -8,6 +8,8 @@ -r (--recurse) Recurse. Search for all .py files in subdirectories too. -n (--nobackup) No backup. Does not make a ".bak" file before reindenting. -v (--verbose) Verbose. Print informative msgs; else no output. + (--newline) Newline. Specify the newline character to use (CRLF, LF). + Default is the same as the original file. -h (--help) Help. Print this usage information and exit. Change Python (.py) files to use 4-space indents and no hard tab characters. @@ -50,6 +52,8 @@ verbose = False recurse = False dryrun = False makebackup = True +spec_newline = None +"""A specified newline to be used in the output (set by --newline option)""" def usage(msg=None): @@ -62,13 +66,12 @@ def errprint(*args): sys.stderr.write(" ".join(str(arg) for arg in args)) sys.stderr.write("\n") - def main(): import getopt - global verbose, recurse, dryrun, makebackup + global verbose, recurse, dryrun, makebackup, spec_newline try: opts, args = getopt.getopt(sys.argv[1:], "drnvh", - ["dryrun", "recurse", "nobackup", "verbose", "help"]) + ["dryrun", "recurse", "nobackup", "verbose", "newline=", "help"]) except getopt.error as msg: usage(msg) return @@ -81,6 +84,11 @@ def main(): makebackup = False elif o in ('-v', '--verbose'): verbose = True + elif o in ('--newline',): + if not a.upper() in ('CRLF', 'LF'): + usage() + return + spec_newline = dict(CRLF='\r\n', LF='\n')[a.upper()] elif o in ('-h', '--help'): usage() return @@ -118,9 +126,9 @@ def check(file): errprint("%s: I/O Error: %s" % (file, str(msg))) return - newline = r.newlines + newline = spec_newline if spec_newline else r.newlines if isinstance(newline, tuple): - errprint("%s: mixed newlines detected; cannot process file" % file) + errprint("%s: mixed newlines detected; cannot continue without --newline" % file) return if r.run(): diff --git a/Tools/scripts/run_tests.py b/Tools/scripts/run_tests.py new file mode 100755 index 0000000..e2a2050 --- /dev/null +++ b/Tools/scripts/run_tests.py @@ -0,0 +1,51 @@ +"""Run Python's test suite in a fast, rigorous way. + +The defaults are meant to be reasonably thorough, while skipping certain +tests that can be time-consuming or resource-intensive (e.g. largefile), +or distracting (e.g. audio and gui). These defaults can be overridden by +simply passing a -u option to this script. + +""" + +import os +import sys +import test.support +try: + import threading +except ImportError: + threading = None + + +def is_multiprocess_flag(arg): + return arg.startswith('-j') or arg.startswith('--multiprocess') + + +def is_resource_use_flag(arg): + return arg.startswith('-u') or arg.startswith('--use') + + +def main(regrtest_args): + args = [sys.executable, + '-W', 'default', # Warnings set to 'default' + '-bb', # Warnings about bytes/bytearray + '-E', # Ignore environment variables + ] + # Allow user-specified interpreter options to override our defaults. + args.extend(test.support.args_from_interpreter_flags()) + args.extend(['-m', 'test', # Run the test suite + '-r', # Randomize test order + '-w', # Re-run failed tests in verbose mode + ]) + if sys.platform == 'win32': + args.append('-n') # Silence alerts under Windows + if threading and not any(is_multiprocess_flag(arg) for arg in regrtest_args): + args.extend(['-j', '0']) # Use all CPU cores + if not any(is_resource_use_flag(arg) for arg in regrtest_args): + args.extend(['-u', 'all,-largefile,-audio,-gui']) + args.extend(regrtest_args) + print(' '.join(args)) + os.execv(sys.executable, args) + + +if __name__ == '__main__': + main(sys.argv[1:]) diff --git a/Tools/scripts/texi2html.py b/Tools/scripts/texi2html.py index af2147a..9983528 100755 --- a/Tools/scripts/texi2html.py +++ b/Tools/scripts/texi2html.py @@ -319,7 +319,7 @@ class TexinfoParser: # Start saving text in a buffer instead of writing it to a file def startsaving(self): - if self.savetext != None: + if self.savetext is not None: self.savestack.append(self.savetext) # print '*** Recursively saving text, expect trouble' self.savetext = '' @@ -341,7 +341,7 @@ class TexinfoParser: except: print(args) raise TypeError - if self.savetext != None: + if self.savetext is not None: self.savetext = self.savetext + text elif self.nodefp: self.nodefp.write(text) @@ -350,7 +350,7 @@ class TexinfoParser: # Complete the current node -- write footnotes and close file def endnode(self): - if self.savetext != None: + if self.savetext is not None: print('*** Still saving text at end of node') dummy = self.collectsavings() if self.footnotes: @@ -804,7 +804,7 @@ class TexinfoParser: def close_i(self): self.write('</I>') def open_footnote(self): - # if self.savetext <> None: + # if self.savetext is not None: # print '*** Recursive footnote -- expect weirdness' id = len(self.footnotes) + 1 self.write(self.FN_SOURCE_PATTERN % {'id': repr(id)}) @@ -1442,7 +1442,7 @@ class TexinfoParser: else: # some other character, e.g. '-' args = self.itemarg + ' ' + args - if self.itemnumber != None: + if self.itemnumber is not None: args = self.itemnumber + '. ' + args self.itemnumber = increment(self.itemnumber) if self.stack and self.stack[-1] == 'table': |