summaryrefslogtreecommitdiffstats
path: root/Tools/c-analyzer/c_common
diff options
context:
space:
mode:
authorEric Snow <ericsnowcurrently@gmail.com>2020-12-24 18:04:19 (GMT)
committerGitHub <noreply@github.com>2020-12-24 18:04:19 (GMT)
commit7ec59d8861ef1104c3028678b2cacde4c5693e19 (patch)
treeb499f0504f79e1a218229e93ca1847fb61d80831 /Tools/c-analyzer/c_common
parentb57ada98da0d5b0cf1ebc2c9c5502d04aa962042 (diff)
downloadcpython-7ec59d8861ef1104c3028678b2cacde4c5693e19.zip
cpython-7ec59d8861ef1104c3028678b2cacde4c5693e19.tar.gz
cpython-7ec59d8861ef1104c3028678b2cacde4c5693e19.tar.bz2
bpo-36876: [c-analyzer tool] Add a "capi" subcommand to the c-analyzer tool. (gh-23918)
This will help identify which C-API items will need to be updated for subinterpreter support. https://bugs.python.org/issue36876
Diffstat (limited to 'Tools/c-analyzer/c_common')
-rw-r--r--Tools/c-analyzer/c_common/scriptutil.py24
-rw-r--r--Tools/c-analyzer/c_common/tables.py176
2 files changed, 188 insertions, 12 deletions
diff --git a/Tools/c-analyzer/c_common/scriptutil.py b/Tools/c-analyzer/c_common/scriptutil.py
index 50dd754..ce69af2 100644
--- a/Tools/c-analyzer/c_common/scriptutil.py
+++ b/Tools/c-analyzer/c_common/scriptutil.py
@@ -192,7 +192,7 @@ def add_verbosity_cli(parser):
parser.add_argument('-q', '--quiet', action='count', default=0)
parser.add_argument('-v', '--verbose', action='count', default=0)
- def process_args(args):
+ def process_args(args, *, argv=None):
ns = vars(args)
key = 'verbosity'
if key in ns:
@@ -208,7 +208,7 @@ def add_traceback_cli(parser):
parser.add_argument('--no-traceback', '--no-tb', dest='traceback',
action='store_const', const=False)
- def process_args(args):
+ def process_args(args, *, argv=None):
ns = vars(args)
key = 'traceback_cm'
if key in ns:
@@ -262,7 +262,7 @@ def add_sepval_cli(parser, opt, dest, choices, *, sep=',', **kwargs):
#kwargs.setdefault('metavar', opt.upper())
parser.add_argument(opt, dest=dest, action='append', **kwargs)
- def process_args(args):
+ def process_args(args, *, argv=None):
ns = vars(args)
# XXX Use normalize_selection()?
@@ -293,7 +293,7 @@ def add_file_filtering_cli(parser, *, excluded=None):
excluded = tuple(excluded or ())
- def process_args(args):
+ def process_args(args, *, argv=None):
ns = vars(args)
key = 'iter_filenames'
if key in ns:
@@ -323,7 +323,7 @@ def add_progress_cli(parser, *, threshold=VERBOSITY, **kwargs):
parser.add_argument('--no-progress', dest='track_progress', action='store_false')
parser.set_defaults(track_progress=True)
- def process_args(args):
+ def process_args(args, *, argv=None):
if args.track_progress:
ns = vars(args)
verbosity = ns.get('verbosity', VERBOSITY)
@@ -339,7 +339,7 @@ def add_failure_filtering_cli(parser, pool, *, default=False):
metavar=f'"{{all|{"|".join(sorted(pool))}}},..."')
parser.add_argument('--no-fail', dest='fail', action='store_const', const=())
- def process_args(args):
+ def process_args(args, *, argv=None):
ns = vars(args)
fail = ns.pop('fail')
@@ -371,7 +371,7 @@ def add_failure_filtering_cli(parser, pool, *, default=False):
def add_kind_filtering_cli(parser, *, default=None):
parser.add_argument('--kinds', action='append')
- def process_args(args):
+ def process_args(args, *, argv=None):
ns = vars(args)
kinds = []
@@ -486,18 +486,18 @@ def _flatten_processors(processors):
yield from _flatten_processors(proc)
-def process_args(args, processors, *, keys=None):
+def process_args(args, argv, processors, *, keys=None):
processors = _flatten_processors(processors)
ns = vars(args)
extracted = {}
if keys is None:
for process_args in processors:
- for key in process_args(args):
+ for key in process_args(args, argv=argv):
extracted[key] = ns.pop(key)
else:
remainder = set(keys)
for process_args in processors:
- hanging = process_args(args)
+ hanging = process_args(args, argv=argv)
if isinstance(hanging, str):
hanging = [hanging]
for key in hanging or ():
@@ -510,8 +510,8 @@ def process_args(args, processors, *, keys=None):
return extracted
-def process_args_by_key(args, processors, keys):
- extracted = process_args(args, processors, keys=keys)
+def process_args_by_key(args, argv, processors, keys):
+ extracted = process_args(args, argv, processors, keys=keys)
return [extracted[key] for key in keys]
diff --git a/Tools/c-analyzer/c_common/tables.py b/Tools/c-analyzer/c_common/tables.py
index 411152e..85b5019 100644
--- a/Tools/c-analyzer/c_common/tables.py
+++ b/Tools/c-analyzer/c_common/tables.py
@@ -1,4 +1,6 @@
import csv
+import re
+import textwrap
from . import NOT_SET, strutil, fsutil
@@ -212,3 +214,177 @@ def _normalize_table_file_props(header, sep):
else:
sep = None
return header, sep
+
+
+##################################
+# stdout tables
+
+WIDTH = 20
+
+
+def resolve_columns(specs):
+ if isinstance(specs, str):
+ specs = specs.replace(',', ' ').strip().split()
+ return _resolve_colspecs(specs)
+
+
+def build_table(specs, *, sep=' ', defaultwidth=None):
+ columns = resolve_columns(specs)
+ return _build_table(columns, sep=sep, defaultwidth=defaultwidth)
+
+
+_COLSPEC_RE = re.compile(textwrap.dedent(r'''
+ ^
+ (?:
+ [[]
+ (
+ (?: [^\s\]] [^\]]* )?
+ [^\s\]]
+ ) # <label>
+ []]
+ )?
+ ( \w+ ) # <field>
+ (?:
+ (?:
+ :
+ ( [<^>] ) # <align>
+ ( \d+ ) # <width1>
+ )
+ |
+ (?:
+ (?:
+ :
+ ( \d+ ) # <width2>
+ )?
+ (?:
+ :
+ ( .*? ) # <fmt>
+ )?
+ )
+ )?
+ $
+'''), re.VERBOSE)
+
+
+def _parse_fmt(fmt):
+ if fmt.startswith(tuple('<^>')):
+ align = fmt[0]
+ width = fmt[1:]
+ if width.isdigit():
+ return int(width), align
+ return None, None
+
+
+def _parse_colspec(raw):
+ m = _COLSPEC_RE.match(raw)
+ if not m:
+ return None
+ label, field, align, width1, width2, fmt = m.groups()
+ if not label:
+ label = field
+ if width1:
+ width = None
+ fmt = f'{align}{width1}'
+ elif width2:
+ width = int(width2)
+ if fmt:
+ _width, _ = _parse_fmt(fmt)
+ if _width == width:
+ width = None
+ else:
+ width = None
+ return field, label, width, fmt
+
+
+def _normalize_colspec(spec):
+ if len(spec) == 1:
+ raw, = spec
+ return _resolve_column(raw)
+
+ if len(spec) == 4:
+ label, field, width, fmt = spec
+ if width:
+ fmt = f'{width}:{fmt}' if fmt else width
+ elif len(raw) == 3:
+ label, field, fmt = spec
+ if not field:
+ label, field = None, label
+ elif not isinstance(field, str) or not field.isidentifier():
+ fmt = f'{field}:{fmt}' if fmt else field
+ label, field = None, label
+ elif len(raw) == 2:
+ label = None
+ field, fmt = raw
+ if not field:
+ field, fmt = fmt, None
+ elif not field.isidentifier() or fmt.isidentifier():
+ label, field = field, fmt
+ else:
+ raise NotImplementedError
+
+ fmt = f':{fmt}' if fmt else ''
+ if label:
+ return _parse_colspec(f'[{label}]{field}{fmt}')
+ else:
+ return _parse_colspec(f'{field}{fmt}')
+
+
+def _resolve_colspec(raw):
+ if isinstance(raw, str):
+ spec = _parse_colspec(raw)
+ else:
+ spec = _normalize_colspec(raw)
+ if spec is None:
+ raise ValueError(f'unsupported column spec {raw!r}')
+ return spec
+
+
+def _resolve_colspecs(columns):
+ parsed = []
+ for raw in columns:
+ column = _resolve_colspec(raw)
+ parsed.append(column)
+ return parsed
+
+
+def _resolve_width(spec, defaultwidth):
+ _, label, width, fmt = spec
+ if width:
+ if not isinstance(width, int):
+ raise NotImplementedError
+ return width
+ elif width and fmt:
+ width, _ = _parse_fmt(fmt)
+ if width:
+ return width
+
+ if not defaultwidth:
+ return WIDTH
+ elif not hasattr(defaultwidth, 'get'):
+ return defaultwidth or WIDTH
+
+ defaultwidths = defaultwidth
+ defaultwidth = defaultwidths.get(None) or WIDTH
+ return defaultwidths.get(label) or defaultwidth
+
+
+def _build_table(columns, *, sep=' ', defaultwidth=None):
+ header = []
+ div = []
+ rowfmt = []
+ for spec in columns:
+ label, field, _, colfmt = spec
+ width = _resolve_width(spec, defaultwidth)
+ if colfmt:
+ colfmt = f':{colfmt}'
+ else:
+ colfmt = f':{width}'
+
+ header.append(f' {{:^{width}}} '.format(label))
+ div.append('-' * (width + 2))
+ rowfmt.append(f' {{{field}{colfmt}}} ')
+ return (
+ sep.join(header),
+ sep.join(div),
+ sep.join(rowfmt),
+ )