summaryrefslogtreecommitdiffstats
path: root/Tools
diff options
context:
space:
mode:
authorPetr Viktorin <encukou@gmail.com>2024-06-24 18:24:19 (GMT)
committerGitHub <noreply@github.com>2024-06-24 18:24:19 (GMT)
commit9769b7ae064a0546a98cbcbec2561dbaba20cd23 (patch)
tree1b953e866faca7527099a68daf362e29501a7680 /Tools
parent447e07ab3d569bb4b2209ccfe3889fafa3ad6693 (diff)
downloadcpython-9769b7ae064a0546a98cbcbec2561dbaba20cd23.zip
cpython-9769b7ae064a0546a98cbcbec2561dbaba20cd23.tar.gz
cpython-9769b7ae064a0546a98cbcbec2561dbaba20cd23.tar.bz2
[3.13] gh-113993: Allow interned strings to be mortal, and fix related issues (GH-120520) (GH-120945)
* Add an InternalDocs file describing how interning should work and how to use it. * Add internal functions to *explicitly* request what kind of interning is done: - `_PyUnicode_InternMortal` - `_PyUnicode_InternImmortal` - `_PyUnicode_InternStatic` * Switch uses of `PyUnicode_InternInPlace` to those. * Disallow using `_Py_SetImmortal` on strings directly. You should use `_PyUnicode_InternImmortal` instead: - Strings should be interned before immortalization, otherwise you're possibly interning a immortalizing copy. - `_Py_SetImmortal` doesn't handle the `SSTATE_INTERNED_MORTAL` to `SSTATE_INTERNED_IMMORTAL` update, and those flags can't be changed in backports, as they are now part of public API and version-specific ABI. * Add private `_only_immortal` argument for `sys.getunicodeinternedsize`, used in refleak test machinery. * Make sure the statically allocated string singletons are unique. This means these sets are now disjoint: - `_Py_ID` - `_Py_STR` (including the empty string) - one-character latin-1 singletons Now, when you intern a singleton, that exact singleton will be interned. * Add a `_Py_LATIN1_CHR` macro, use it instead of `_Py_ID`/`_Py_STR` for one-character latin-1 singletons everywhere (including Clinic). * Intern `_Py_STR` singletons at startup. * For free-threaded builds, intern `_Py_LATIN1_CHR` singletons at startup. * Beef up the tests. Cover internal details (marked with `@cpython_only`). * Add lots of assertions Co-authored-by: Eric Snow <ericsnowcurrently@gmail.com>
Diffstat (limited to 'Tools')
-rw-r--r--Tools/build/generate_global_objects.py25
-rw-r--r--Tools/clinic/libclinic/clanguage.py16
2 files changed, 36 insertions, 5 deletions
diff --git a/Tools/build/generate_global_objects.py b/Tools/build/generate_global_objects.py
index 33d1b32..882918f 100644
--- a/Tools/build/generate_global_objects.py
+++ b/Tools/build/generate_global_objects.py
@@ -370,9 +370,14 @@ def generate_static_strings_initializer(identifiers, strings):
# This use of _Py_ID() is ignored by iter_global_strings()
# since iter_files() ignores .h files.
printer.write(f'string = &_Py_ID({i});')
+ printer.write(f'_PyUnicode_InternStatic(interp, &string);')
printer.write(f'assert(_PyUnicode_CheckConsistency(string, 1));')
- printer.write(f'_PyUnicode_InternInPlace(interp, &string);')
- # XXX What about "strings"?
+ printer.write(f'assert(PyUnicode_GET_LENGTH(string) != 1);')
+ for value, name in sorted(strings.items()):
+ printer.write(f'string = &_Py_STR({name});')
+ printer.write(f'_PyUnicode_InternStatic(interp, &string);')
+ printer.write(f'assert(_PyUnicode_CheckConsistency(string, 1));')
+ printer.write(f'assert(PyUnicode_GET_LENGTH(string) != 1);')
printer.write(END)
printer.write(after)
@@ -414,15 +419,31 @@ def generate_global_object_finalizers(generated_immortal_objects):
def get_identifiers_and_strings() -> 'tuple[set[str], dict[str, str]]':
identifiers = set(IDENTIFIERS)
strings = {}
+ # Note that we store strings as they appear in C source, so the checks here
+ # can be defeated, e.g.:
+ # - "a" and "\0x61" won't be reported as duplicate.
+ # - "\n" appears as 2 characters.
+ # Probably not worth adding a C string parser.
for name, string, *_ in iter_global_strings():
if string is None:
if name not in IGNORED:
identifiers.add(name)
else:
+ if len(string) == 1 and ord(string) < 256:
+ # Give a nice message for common mistakes.
+ # To cover tricky cases (like "\n") we also generate C asserts.
+ raise ValueError(
+ 'do not use &_PyID or &_Py_STR for one-character latin-1 '
+ + f'strings, use _Py_LATIN1_CHR instead: {string!r}')
if string not in strings:
strings[string] = name
elif name != strings[string]:
raise ValueError(f'string mismatch for {name!r} ({string!r} != {strings[name]!r}')
+ overlap = identifiers & set(strings.keys())
+ if overlap:
+ raise ValueError(
+ 'do not use both _PyID and _Py_DECLARE_STR for the same string: '
+ + repr(overlap))
return identifiers, strings
diff --git a/Tools/clinic/libclinic/clanguage.py b/Tools/clinic/libclinic/clanguage.py
index 10efedd..73d4783 100644
--- a/Tools/clinic/libclinic/clanguage.py
+++ b/Tools/clinic/libclinic/clanguage.py
@@ -21,6 +21,16 @@ if TYPE_CHECKING:
from libclinic.app import Clinic
+def c_id(name: str) -> str:
+ if len(name) == 1 and ord(name) < 256:
+ if name.isalnum():
+ return f"_Py_LATIN1_CHR('{name}')"
+ else:
+ return f'_Py_LATIN1_CHR({ord(name)})'
+ else:
+ return f'&_Py_ID({name})'
+
+
class CLanguage(Language):
body_prefix = "#"
@@ -167,11 +177,11 @@ class CLanguage(Language):
if argname_fmt:
conditions.append(f"nargs < {i+1} && {argname_fmt % i}")
elif fastcall:
- conditions.append(f"nargs < {i+1} && PySequence_Contains(kwnames, &_Py_ID({p.name}))")
+ conditions.append(f"nargs < {i+1} && PySequence_Contains(kwnames, {c_id(p.name)})")
containscheck = "PySequence_Contains"
codegen.add_include('pycore_runtime.h', '_Py_ID()')
else:
- conditions.append(f"nargs < {i+1} && PyDict_Contains(kwargs, &_Py_ID({p.name}))")
+ conditions.append(f"nargs < {i+1} && PyDict_Contains(kwargs, {c_id(p.name)})")
containscheck = "PyDict_Contains"
codegen.add_include('pycore_runtime.h', '_Py_ID()')
else:
@@ -459,7 +469,7 @@ class CLanguage(Language):
template_dict['keywords_c'] = ' '.join('"' + k + '",'
for k in data.keywords)
keywords = [k for k in data.keywords if k]
- template_dict['keywords_py'] = ' '.join('&_Py_ID(' + k + '),'
+ template_dict['keywords_py'] = ' '.join(c_id(k) + ','
for k in keywords)
template_dict['format_units'] = ''.join(data.format_units)
template_dict['parse_arguments'] = ', '.join(data.parse_arguments)