summaryrefslogtreecommitdiffstats
path: root/Tools/scripts
diff options
context:
space:
mode:
authorVictor Stinner <vstinner@python.org>2022-10-11 11:18:53 (GMT)
committerGitHub <noreply@github.com>2022-10-11 11:18:53 (GMT)
commit454a6d61bc569b45cad163601d745bb304b53bde (patch)
tree754d6b2fee55fb6118acf4994ee57a3f4d1e3dae /Tools/scripts
parentf0a680007f345a46bcd187b952a929c7068c1da8 (diff)
downloadcpython-454a6d61bc569b45cad163601d745bb304b53bde.zip
cpython-454a6d61bc569b45cad163601d745bb304b53bde.tar.gz
cpython-454a6d61bc569b45cad163601d745bb304b53bde.tar.bz2
gh-97669: Remove abitype.py and pep384_macrocheck.py (#98165)
Remove abitype.py and pep384_macrocheck.py scripts of Tools/scripts/.
Diffstat (limited to 'Tools/scripts')
-rw-r--r--Tools/scripts/README1
-rwxr-xr-xTools/scripts/abitype.py202
-rw-r--r--Tools/scripts/pep384_macrocheck.py148
3 files changed, 0 insertions, 351 deletions
diff --git a/Tools/scripts/README b/Tools/scripts/README
index 2fccecc..9943d4c 100644
--- a/Tools/scripts/README
+++ b/Tools/scripts/README
@@ -2,7 +2,6 @@ This directory contains a collection of executable Python scripts that are
useful while building, extending or managing Python.
2to3 Main script for running the 2to3 conversion tool
-abitype.py Converts a C file to use the PEP 384 type definition API
combinerefs.py A helper for analyzing PYTHONDUMPREFS output
idle3 Main program to start IDLE
parse_html5_entities.py Utility for parsing HTML5 entity definitions
diff --git a/Tools/scripts/abitype.py b/Tools/scripts/abitype.py
deleted file mode 100755
index d6a74a1..0000000
--- a/Tools/scripts/abitype.py
+++ /dev/null
@@ -1,202 +0,0 @@
-#!/usr/bin/env python3
-# This script converts a C file to use the PEP 384 type definition API
-# Usage: abitype.py < old_code > new_code
-import re, sys
-
-###### Replacement of PyTypeObject static instances ##############
-
-# classify each token, giving it a one-letter code:
-# S: static
-# T: PyTypeObject
-# I: ident
-# W: whitespace
-# =, {, }, ; : themselves
-def classify():
- res = []
- for t,v in tokens:
- if t == 'other' and v in "={};":
- res.append(v)
- elif t == 'ident':
- if v == 'PyTypeObject':
- res.append('T')
- elif v == 'static':
- res.append('S')
- else:
- res.append('I')
- elif t == 'ws':
- res.append('W')
- else:
- res.append('.')
- return ''.join(res)
-
-# Obtain a list of fields of a PyTypeObject, in declaration order,
-# skipping ob_base
-# All comments are dropped from the variable (which are typically
-# just the slot names, anyway), and information is discarded whether
-# the original type was static.
-def get_fields(start, real_end):
- pos = start
- # static?
- if tokens[pos][1] == 'static':
- pos += 2
- # PyTypeObject
- pos += 2
- # name
- name = tokens[pos][1]
- pos += 1
- while tokens[pos][1] != '{':
- pos += 1
- pos += 1
- # PyVarObject_HEAD_INIT
- while tokens[pos][0] in ('ws', 'comment'):
- pos += 1
- if tokens[pos][1] != 'PyVarObject_HEAD_INIT':
- raise Exception('%s has no PyVarObject_HEAD_INIT' % name)
- while tokens[pos][1] != ')':
- pos += 1
- pos += 1
- # field definitions: various tokens, comma-separated
- fields = []
- while True:
- while tokens[pos][0] in ('ws', 'comment'):
- pos += 1
- end = pos
- while tokens[end][1] not in ',}':
- if tokens[end][1] == '(':
- nesting = 1
- while nesting:
- end += 1
- if tokens[end][1] == '(': nesting+=1
- if tokens[end][1] == ')': nesting-=1
- end += 1
- assert end < real_end
- # join field, excluding separator and trailing ws
- end1 = end-1
- while tokens[end1][0] in ('ws', 'comment'):
- end1 -= 1
- fields.append(''.join(t[1] for t in tokens[pos:end1+1]))
- if tokens[end][1] == '}':
- break
- pos = end+1
- return name, fields
-
-# List of type slots as of Python 3.2, omitting ob_base
-typeslots = [
- 'tp_name',
- 'tp_basicsize',
- 'tp_itemsize',
- 'tp_dealloc',
- 'tp_print',
- 'tp_getattr',
- 'tp_setattr',
- 'tp_reserved',
- 'tp_repr',
- 'tp_as_number',
- 'tp_as_sequence',
- 'tp_as_mapping',
- 'tp_hash',
- 'tp_call',
- 'tp_str',
- 'tp_getattro',
- 'tp_setattro',
- 'tp_as_buffer',
- 'tp_flags',
- 'tp_doc',
- 'tp_traverse',
- 'tp_clear',
- 'tp_richcompare',
- 'tp_weaklistoffset',
- 'tp_iter',
- 'iternextfunc',
- 'tp_methods',
- 'tp_members',
- 'tp_getset',
- 'tp_base',
- 'tp_dict',
- 'tp_descr_get',
- 'tp_descr_set',
- 'tp_dictoffset',
- 'tp_init',
- 'tp_alloc',
- 'tp_new',
- 'tp_free',
- 'tp_is_gc',
- 'tp_bases',
- 'tp_mro',
- 'tp_cache',
- 'tp_subclasses',
- 'tp_weaklist',
- 'tp_del',
- 'tp_version_tag',
-]
-
-# Generate a PyType_Spec definition
-def make_slots(name, fields):
- res = []
- res.append('static PyType_Slot %s_slots[] = {' % name)
- # defaults for spec
- spec = { 'tp_itemsize':'0' }
- for i, val in enumerate(fields):
- if val.endswith('0'):
- continue
- if typeslots[i] in ('tp_name', 'tp_doc', 'tp_basicsize',
- 'tp_itemsize', 'tp_flags'):
- spec[typeslots[i]] = val
- continue
- res.append(' {Py_%s, %s},' % (typeslots[i], val))
- res.append('};')
- res.append('static PyType_Spec %s_spec = {' % name)
- res.append(' %s,' % spec['tp_name'])
- res.append(' %s,' % spec['tp_basicsize'])
- res.append(' %s,' % spec['tp_itemsize'])
- res.append(' %s,' % spec['tp_flags'])
- res.append(' %s_slots,' % name)
- res.append('};\n')
- return '\n'.join(res)
-
-
-if __name__ == '__main__':
-
- ############ Simplistic C scanner ##################################
- tokenizer = re.compile(
- r"(?P<preproc>#.*\n)"
- r"|(?P<comment>/\*.*?\*/)"
- r"|(?P<ident>[a-zA-Z_][a-zA-Z0-9_]*)"
- r"|(?P<ws>[ \t\n]+)"
- r"|(?P<other>.)",
- re.MULTILINE)
-
- tokens = []
- source = sys.stdin.read()
- pos = 0
- while pos != len(source):
- m = tokenizer.match(source, pos)
- tokens.append([m.lastgroup, m.group()])
- pos += len(tokens[-1][1])
- if tokens[-1][0] == 'preproc':
- # continuation lines are considered
- # only in preprocess statements
- while tokens[-1][1].endswith('\\\n'):
- nl = source.find('\n', pos)
- if nl == -1:
- line = source[pos:]
- else:
- line = source[pos:nl+1]
- tokens[-1][1] += line
- pos += len(line)
-
- # Main loop: replace all static PyTypeObjects until
- # there are none left.
- while 1:
- c = classify()
- m = re.search('(SW)?TWIW?=W?{.*?};', c)
- if not m:
- break
- start = m.start()
- end = m.end()
- name, fields = get_fields(start, end)
- tokens[start:end] = [('',make_slots(name, fields))]
-
- # Output result to stdout
- for t, v in tokens:
- sys.stdout.write(v)
diff --git a/Tools/scripts/pep384_macrocheck.py b/Tools/scripts/pep384_macrocheck.py
deleted file mode 100644
index ab9dd7c..0000000
--- a/Tools/scripts/pep384_macrocheck.py
+++ /dev/null
@@ -1,148 +0,0 @@
-"""
-pep384_macrocheck.py
-
-This program tries to locate errors in the relevant Python header
-files where macros access type fields when they are reachable from
-the limited API.
-
-The idea is to search macros with the string "->tp_" in it.
-When the macro name does not begin with an underscore,
-then we have found a dormant error.
-
-Christian Tismer
-2018-06-02
-"""
-
-import sys
-import os
-import re
-
-
-DEBUG = False
-
-def dprint(*args, **kw):
- if DEBUG:
- print(*args, **kw)
-
-def parse_headerfiles(startpath):
- """
- Scan all header files which are reachable fronm Python.h
- """
- search = "Python.h"
- name = os.path.join(startpath, search)
- if not os.path.exists(name):
- raise ValueError("file {} was not found in {}\n"
- "Please give the path to Python's include directory."
- .format(search, startpath))
- errors = 0
- with open(name) as python_h:
- while True:
- line = python_h.readline()
- if not line:
- break
- found = re.match(r'^\s*#\s*include\s*"(\w+\.h)"', line)
- if not found:
- continue
- include = found.group(1)
- dprint("Scanning", include)
- name = os.path.join(startpath, include)
- if not os.path.exists(name):
- name = os.path.join(startpath, "../PC", include)
- errors += parse_file(name)
- return errors
-
-def ifdef_level_gen():
- """
- Scan lines for #ifdef and track the level.
- """
- level = 0
- ifdef_pattern = r"^\s*#\s*if" # covers ifdef and ifndef as well
- endif_pattern = r"^\s*#\s*endif"
- while True:
- line = yield level
- if re.match(ifdef_pattern, line):
- level += 1
- elif re.match(endif_pattern, line):
- level -= 1
-
-def limited_gen():
- """
- Scan lines for Py_LIMITED_API yes(1) no(-1) or nothing (0)
- """
- limited = [0] # nothing
- unlimited_pattern = r"^\s*#\s*ifndef\s+Py_LIMITED_API"
- limited_pattern = "|".join([
- r"^\s*#\s*ifdef\s+Py_LIMITED_API",
- r"^\s*#\s*(el)?if\s+!\s*defined\s*\(\s*Py_LIMITED_API\s*\)\s*\|\|",
- r"^\s*#\s*(el)?if\s+defined\s*\(\s*Py_LIMITED_API"
- ])
- else_pattern = r"^\s*#\s*else"
- ifdef_level = ifdef_level_gen()
- status = next(ifdef_level)
- wait_for = -1
- while True:
- line = yield limited[-1]
- new_status = ifdef_level.send(line)
- dir = new_status - status
- status = new_status
- if dir == 1:
- if re.match(unlimited_pattern, line):
- limited.append(-1)
- wait_for = status - 1
- elif re.match(limited_pattern, line):
- limited.append(1)
- wait_for = status - 1
- elif dir == -1:
- # this must have been an endif
- if status == wait_for:
- limited.pop()
- wait_for = -1
- else:
- # it could be that we have an elif
- if re.match(limited_pattern, line):
- limited.append(1)
- wait_for = status - 1
- elif re.match(else_pattern, line):
- limited.append(-limited.pop()) # negate top
-
-def parse_file(fname):
- errors = 0
- with open(fname) as f:
- lines = f.readlines()
- type_pattern = r"^.*?->\s*tp_"
- define_pattern = r"^\s*#\s*define\s+(\w+)"
- limited = limited_gen()
- status = next(limited)
- for nr, line in enumerate(lines):
- status = limited.send(line)
- line = line.rstrip()
- dprint(fname, nr, status, line)
- if status != -1:
- if re.match(define_pattern, line):
- name = re.match(define_pattern, line).group(1)
- if not name.startswith("_"):
- # found a candidate, check it!
- macro = line + "\n"
- idx = nr
- while line.endswith("\\"):
- idx += 1
- line = lines[idx].rstrip()
- macro += line + "\n"
- if re.match(type_pattern, macro, re.DOTALL):
- # this type field can reach the limited API
- report(fname, nr + 1, macro)
- errors += 1
- return errors
-
-def report(fname, nr, macro):
- f = sys.stderr
- print(fname + ":" + str(nr), file=f)
- print(macro, file=f)
-
-if __name__ == "__main__":
- p = sys.argv[1] if sys.argv[1:] else "../../Include"
- errors = parse_headerfiles(p)
- if errors:
- # somehow it makes sense to raise a TypeError :-)
- raise TypeError("These {} locations contradict the limited API."
- .format(errors))