/* System module */ /* Various bits of information used by the interpreter are collected in module 'sys'. Function member: - exit(sts): raise SystemExit Data members: - stdin, stdout, stderr: standard file objects - modules: the table of modules (dictionary) - path: module search path (list of strings) - argv: script arguments (list of strings) - ps1, ps2: optional primary and secondary prompts (strings) */ #include "Python.h" #include "pycore_call.h" // _PyObject_CallNoArgs() #include "pycore_ceval.h" // _PyEval_SetAsyncGenFinalizer() #include "pycore_dict.h" // _PyDict_GetItemWithError() #include "pycore_frame.h" // _PyInterpreterFrame #include "pycore_initconfig.h" // _PyStatus_EXCEPTION() #include "pycore_long.h" // _PY_LONG_MAX_STR_DIGITS_THRESHOLD #include "pycore_modsupport.h" // _PyModule_CreateInitialized() #include "pycore_namespace.h" // _PyNamespace_New() #include "pycore_object.h" // _PyObject_DebugTypeStats() #include "pycore_pathconfig.h" // _PyPathConfig_ComputeSysPath0() #include "pycore_pyerrors.h" // _PyErr_GetRaisedException() #include "pycore_pylifecycle.h" // _PyErr_WriteUnraisableDefaultHook() #include "pycore_pymath.h" // _PY_SHORT_FLOAT_REPR #include "pycore_pymem.h" // _PyMem_SetDefaultAllocator() #include "pycore_pystate.h" // _PyThreadState_GET() #include "pycore_pystats.h" // _Py_PrintSpecializationStats() #include "pycore_structseq.h" // _PyStructSequence_InitBuiltinWithFlags() #include "pycore_sysmodule.h" // export _PySys_GetSizeOf() #include "pycore_tuple.h" // _PyTuple_FromArray() #include "frameobject.h" // PyFrame_FastToLocalsWithError() #include "pydtrace.h" // PyDTrace_AUDIT() #include "osdefs.h" // DELIM #include "stdlib_module_names.h" // _Py_stdlib_module_names #ifdef HAVE_UNISTD_H # include // getpid() #endif #ifdef MS_WINDOWS # define WIN32_LEAN_AND_MEAN # include #endif /* MS_WINDOWS */ #ifdef MS_COREDLL extern void *PyWin_DLLhModule; /* A string loaded from the DLL at startup: */ extern const char *PyWin_DLLVersionString; #endif #ifdef __EMSCRIPTEN__ # include #endif #ifdef HAVE_FCNTL_H # include #endif /*[clinic input] module sys [clinic start generated code]*/ /*[clinic end generated code: output=da39a3ee5e6b4b0d input=3726b388feee8cea]*/ #include "clinic/sysmodule.c.h" PyObject * _PySys_GetAttr(PyThreadState *tstate, PyObject *name) { PyObject *sd = tstate->interp->sysdict; if (sd == NULL) { return NULL; } PyObject *exc = _PyErr_GetRaisedException(tstate); /* XXX Suppress a new exception if it was raised and restore * the old one. */ PyObject *value = _PyDict_GetItemWithError(sd, name); _PyErr_SetRaisedException(tstate, exc); return value; } static PyObject * _PySys_GetObject(PyInterpreterState *interp, const char *name) { PyObject *sysdict = interp->sysdict; if (sysdict == NULL) { return NULL; } PyObject *value; if (PyDict_GetItemStringRef(sysdict, name, &value) != 1) { return NULL; } Py_DECREF(value); // return a borrowed reference return value; } PyObject * PySys_GetObject(const char *name) { PyThreadState *tstate = _PyThreadState_GET(); PyObject *exc = _PyErr_GetRaisedException(tstate); PyObject *value = _PySys_GetObject(tstate->interp, name); /* XXX Suppress a new exception if it was raised and restore * the old one. */ if (_PyErr_Occurred(tstate)) { PyErr_FormatUnraisable("Exception ignored in PySys_GetObject()"); } _PyErr_SetRaisedException(tstate, exc); return value; } static int sys_set_object(PyInterpreterState *interp, PyObject *key, PyObject *v) { if (key == NULL) { return -1; } PyObject *sd = interp->sysdict; if (v == NULL) { if (PyDict_Pop(sd, key, NULL) < 0) { return -1; } return 0; } else { return PyDict_SetItem(sd, key, v); } } int _PySys_SetAttr(PyObject *key, PyObject *v) { PyInterpreterState *interp = _PyInterpreterState_GET(); return sys_set_object(interp, key, v); } static int sys_set_object_str(PyInterpreterState *interp, const char *name, PyObject *v) { PyObject *key = v ? PyUnicode_InternFromString(name) : PyUnicode_FromString(name); int r = sys_set_object(interp, key, v); Py_XDECREF(key); return r; } int PySys_SetObject(const char *name, PyObject *v) { PyInterpreterState *interp = _PyInterpreterState_GET(); return sys_set_object_str(interp, name, v); } int _PySys_ClearAttrString(PyInterpreterState *interp, const char *name, int verbose) { if (verbose) { PySys_WriteStderr("# clear sys.%s\n", name); } /* To play it safe, we set the attr to None instead of deleting it. */ if (PyDict_SetItemString(interp->sysdict, name, Py_None) < 0) { return -1; } return 0; } static int should_audit(PyInterpreterState *interp) { /* interp must not be NULL, but test it just in case for extra safety */ assert(interp != NULL); if (!interp) { return 0; } return (interp->runtime->audit_hooks.head || interp->audit_hooks || PyDTrace_AUDIT_ENABLED()); } static int sys_audit_tstate(PyThreadState *ts, const char *event, const char *argFormat, va_list vargs) { assert(event != NULL); assert(!argFormat || !strchr(argFormat, 'N')); if (!ts) { /* Audit hooks cannot be called with a NULL thread state */ return 0; } /* The current implementation cannot be called if tstate is not the current Python thread state. */ assert(ts == _PyThreadState_GET()); /* Early exit when no hooks are registered */ PyInterpreterState *is = ts->interp; if (!should_audit(is)) { return 0; } PyObject *eventName = NULL; PyObject *eventArgs = NULL; PyObject *hooks = NULL; PyObject *hook = NULL; int res = -1; int dtrace = PyDTrace_AUDIT_ENABLED(); PyObject *exc = _PyErr_GetRaisedException(ts); /* Initialize event args now */ if (argFormat && argFormat[0]) { eventArgs = Py_VaBuildValue(argFormat, vargs); if (eventArgs && !PyTuple_Check(eventArgs)) { PyObject *argTuple = PyTuple_Pack(1, eventArgs); Py_SETREF(eventArgs, argTuple); } } else { eventArgs = PyTuple_New(0); } if (!eventArgs) { goto exit; } /* Call global hooks * * We don't worry about any races on hooks getting added, * since that would not leave is in an inconsistent state. */ _Py_AuditHookEntry *e = is->runtime->audit_hooks.head; for (; e; e = e->next) { if (e->hookCFunction(event, eventArgs, e->userData) < 0) { goto exit; } } /* Dtrace USDT point */ if (dtrace) { PyDTrace_AUDIT(event, (void *)eventArgs); } /* Call interpreter hooks */ if (is->audit_hooks) { eventName = PyUnicode_FromString(event); if (!eventName) { goto exit; } hooks = PyObject_GetIter(is->audit_hooks); if (!hooks) { goto exit; } /* Disallow tracing in hooks unless explicitly enabled */ PyThreadState_EnterTracing(ts); while ((hook = PyIter_Next(hooks)) != NULL) { PyObject *o; int canTrace = PyObject_GetOptionalAttr(hook, &_Py_ID(__cantrace__), &o); if (o) { canTrace = PyObject_IsTrue(o); Py_DECREF(o); } if (canTrace < 0) { break; } if (canTrace) { PyThreadState_LeaveTracing(ts); } PyObject* args[2] = {eventName, eventArgs}; o = _PyObject_VectorcallTstate(ts, hook, args, 2, NULL); if (canTrace) { PyThreadState_EnterTracing(ts); } if (!o) { break; } Py_DECREF(o); Py_CLEAR(hook); } PyThreadState_LeaveTracing(ts); if (_PyErr_Occurred(ts)) { goto exit; } } res = 0; exit: Py_XDECREF(hook); Py_XDECREF(hooks); Py_XDECREF(eventName); Py_XDECREF(eventArgs); if (!res) { _PyErr_SetRaisedException(ts, exc); } else { assert(_PyErr_Occurred(ts)); Py_XDECREF(exc); } return res; } int _PySys_Audit(PyThreadState *tstate, const char *event, const char *argFormat, ...) { va_list vargs; va_start(vargs, argFormat); int res = sys_audit_tstate(tstate, event, argFormat, vargs); va_end(vargs); return res; } int PySys_Audit(const char *event, const char *argFormat, ...) { PyThreadState *tstate = _PyThreadState_GET(); va_list vargs; va_start(vargs, argFormat); int res = sys_audit_tstate(tstate, event, argFormat, vargs); va_end(vargs); return res; } int PySys_AuditTuple(const char *event, PyObject *args) { if (args == NULL) { return PySys_Audit(event, NULL); } if (!PyTuple_Check(args)) { PyErr_Format(PyExc_TypeError, "args must be tuple, got %s", Py_TYPE(args)->tp_name); return -1; } return PySys_Audit(event, "O", args); } /* We expose this function primarily for our own cleanup during * finalization. In general, it should not need to be called, * and as such the function is not exported. * * Must be finalizing to clear hooks */ void _PySys_ClearAuditHooks(PyThreadState *ts) { assert(ts != NULL); if (!ts) { return; } _PyRuntimeState *runtime = ts->interp->runtime; /* The hooks are global so we have to check for runtime finalization. */ PyThreadState *finalizing = _PyRuntimeState_GetFinalizing(runtime); assert(finalizing == ts); if (finalizing != ts) { return; } const PyConfig *config = _PyInterpreterState_GetConfig(ts->interp); if (config->verbose) { PySys_WriteStderr("# clear sys.audit hooks\n"); } /* Hooks can abort later hooks for this event, but cannot abort the clear operation itself. */ _PySys_Audit(ts, "cpython._PySys_ClearAuditHooks", NULL); _PyErr_Clear(ts); /* We don't worry about the very unlikely race right here, * since it's entirely benign. Nothing else removes entries * from the list and adding an entry right now would not cause * any trouble. */ _Py_AuditHookEntry *e = runtime->audit_hooks.head, *n; runtime->audit_hooks.head = NULL; while (e) { n = e->next; PyMem_RawFree(e); e = n; } } static void add_audit_hook_entry_unlocked(_PyRuntimeState *runtime, _Py_AuditHookEntry *entry) { if (runtime->audit_hooks.head == NULL) { runtime->audit_hooks.head = entry; } else { _Py_AuditHookEntry *last = runtime->audit_hooks.head; while (last->next) { last = last->next; } last->next = entry; } } int PySys_AddAuditHook(Py_AuditHookFunction hook, void *userData) { /* tstate can be NULL, so access directly _PyRuntime: PySys_AddAuditHook() can be called before Python is initialized. */ _PyRuntimeState *runtime = &_PyRuntime; PyThreadState *tstate; if (runtime->initialized) { tstate = _PyThreadState_GET(); } else { tstate = NULL; } /* Invoke existing audit hooks to allow them an opportunity to abort. */ /* Cannot invoke hooks until we are initialized */ if (tstate != NULL) { if (_PySys_Audit(tstate, "sys.addaudithook", NULL) < 0) { if (_PyErr_ExceptionMatches(tstate, PyExc_RuntimeError)) { /* We do not report errors derived from RuntimeError */ _PyErr_Clear(tstate); return 0; } return -1; } } _Py_AuditHookEntry *e = (_Py_AuditHookEntry*)PyMem_RawMalloc( sizeof(_Py_AuditHookEntry)); if (!e) { if (tstate != NULL) { _PyErr_NoMemory(tstate); } return -1; } e->next = NULL; e->hookCFunction = (Py_AuditHookFunction)hook; e->userData = userData; PyMutex_Lock(&runtime->audit_hooks.mutex); add_audit_hook_entry_unlocked(runtime, e); PyMutex_Unlock(&runtime->audit_hooks.mutex); return 0; } /*[clinic input] sys.addaudithook hook: object Adds a new audit hook callback. [clinic start generated code]*/ static PyObject * sys_addaudithook_impl(PyObject *module, PyObject *hook) /*[clinic end generated code: output=4f9c17aaeb02f44e input=0f3e191217a45e34]*/ { PyThreadState *tstate = _PyThreadState_GET(); /* Invoke existing audit hooks to allow them an opportunity to abort. */ if (_PySys_Audit(tstate, "sys.addaudithook", NULL) < 0) { if (_PyErr_ExceptionMatches(tstate, PyExc_Exception)) { /* We do not report errors derived from Exception */ _PyErr_Clear(tstate); Py_RETURN_NONE; } return NULL; } PyInterpreterState *interp = tstate->interp; if (interp->audit_hooks == NULL) { interp->audit_hooks = PyList_New(0); if (interp->audit_hooks == NULL) { return NULL; } /* Avoid having our list of hooks show up in the GC module */ PyObject_GC_UnTrack(interp->audit_hooks); } if (PyList_Append(interp->audit_hooks, hook) < 0) { return NULL; } Py_RETURN_NONE; } PyDoc_STRVAR(audit_doc, "audit(event, *args)\n\ \n\ Passes the event to any audit hooks that are attached."); static PyObject * sys_audit(PyObject *self, PyObject *const *args, Py_ssize_t argc) { PyThreadState *tstate = _PyThreadState_GET(); _Py_EnsureTstateNotNULL(tstate); if (argc == 0) { _PyErr_SetString(tstate, PyExc_TypeError, "audit() missing 1 required positional argument: " "'event'"); return NULL; } assert(args[0] != NULL); assert(PyUnicode_Check(args[0])); if (!should_audit(tstate->interp)) { Py_RETURN_NONE; } PyObject *auditEvent = args[0]; if (!auditEvent) { _PyErr_SetString(tstate, PyExc_TypeError, "expected str for argument 'event'"); return NULL; } if (!PyUnicode_Check(auditEvent)) { _PyErr_Format(tstate, PyExc_TypeError, "expected str for argument 'event', not %.200s", Py_TYPE(auditEvent)->tp_name); return NULL; } const char *event = PyUnicode_AsUTF8(auditEvent); if (!event) { return NULL; } PyObject *auditArgs = _PyTuple_FromArray(args + 1, argc - 1); if (!auditArgs) { return NULL; } int res = _PySys_Audit(tstate, event, "O", auditArgs); Py_DECREF(auditArgs); if (res < 0) { return NULL; } Py_RETURN_NONE; } static PyObject * sys_breakpointhook(PyObject *self, PyObject *const *args, Py_ssize_t nargs, PyObject *keywords) { PyThreadState *tstate = _PyThreadState_GET(); assert(!_PyErr_Occurred(tstate)); char *envar = Py_GETENV("PYTHONBREAKPOINT"); if (envar == NULL || strlen(envar) == 0) { envar = "pdb.set_trace"; } else if (!strcmp(envar, "0")) { /* The breakpoint is explicitly no-op'd. */ Py_RETURN_NONE; } /* According to POSIX the string returned by getenv() might be invalidated * or the string content might be overwritten by a subsequent call to * getenv(). Since importing a module can performs the getenv() calls, * we need to save a copy of envar. */ envar = _PyMem_RawStrdup(envar); if (envar == NULL) { _PyErr_NoMemory(tstate); return NULL; } const char *last_dot = strrchr(envar, '.'); const char *attrname = NULL; PyObject *modulepath = NULL; if (last_dot == NULL) { /* The breakpoint is a built-in, e.g. PYTHONBREAKPOINT=int */ modulepath = PyUnicode_FromString("builtins"); attrname = envar; } else if (last_dot != envar) { /* Split on the last dot; */ modulepath = PyUnicode_FromStringAndSize(envar, last_dot - envar); attrname = last_dot + 1; } else { goto warn; } if (modulepath == NULL) { PyMem_RawFree(envar); return NULL; } PyObject *module = PyImport_Import(modulepath); Py_DECREF(modulepath); if (module == NULL) { if (_PyErr_ExceptionMatches(tstate, PyExc_ImportError)) { goto warn; } PyMem_RawFree(envar); return NULL; } PyObject *hook = PyObject_GetAttrString(module, attrname); Py_DECREF(module); if (hook == NULL) { if (_PyErr_ExceptionMatches(tstate, PyExc_AttributeError)) { goto warn; } PyMem_RawFree(envar); return NULL; } PyMem_RawFree(envar); PyObject *retval = PyObject_Vectorcall(hook, args, nargs, keywords); Py_DECREF(hook); return retval; warn: /* If any of the imports went wrong, then warn and ignore. */ _PyErr_Clear(tstate); int status = PyErr_WarnFormat( PyExc_RuntimeWarning, 0, "Ignoring unimportable $PYTHONBREAKPOINT: \"%s\"", envar); PyMem_RawFree(envar); if (status < 0) { /* Printing the warning raised an exception. */ return NULL; } /* The warning was (probably) issued. */ Py_RETURN_NONE; } PyDoc_STRVAR(breakpointhook_doc, "breakpointhook(*args, **kws)\n" "\n" "This hook function is called by built-in breakpoint().\n" ); /* Write repr(o) to sys.stdout using sys.stdout.encoding and 'backslashreplace' error handler. If sys.stdout has a buffer attribute, use sys.stdout.buffer.write(encoded), otherwise redecode the string and use sys.stdout.write(redecoded). Helper function for sys_displayhook(). */ static int sys_displayhook_unencodable(PyObject *outf, PyObject *o) { PyObject *stdout_encoding = NULL; PyObject *encoded, *escaped_str, *repr_str, *buffer, *result; const char *stdout_encoding_str; int ret; stdout_encoding = PyObject_GetAttr(outf, &_Py_ID(encoding)); if (stdout_encoding == NULL) goto error; stdout_encoding_str = PyUnicode_AsUTF8(stdout_encoding); if (stdout_encoding_str == NULL) goto error; repr_str = PyObject_Repr(o); if (repr_str == NULL) goto error; encoded = PyUnicode_AsEncodedString(repr_str, stdout_encoding_str, "backslashreplace"); Py_DECREF(repr_str); if (encoded == NULL) goto error; if (PyObject_GetOptionalAttr(outf, &_Py_ID(buffer), &buffer) < 0) { Py_DECREF(encoded); goto error; } if (buffer) { result = PyObject_CallMethodOneArg(buffer, &_Py_ID(write), encoded); Py_DECREF(buffer); Py_DECREF(encoded); if (result == NULL) goto error; Py_DECREF(result); } else { escaped_str = PyUnicode_FromEncodedObject(encoded, stdout_encoding_str, "strict"); Py_DECREF(encoded); if (PyFile_WriteObject(escaped_str, outf, Py_PRINT_RAW) != 0) { Py_DECREF(escaped_str); goto error; } Py_DECREF(escaped_str); } ret = 0; goto finally; error: ret = -1; finally: Py_XDECREF(stdout_encoding); return ret; } /*[clinic input] sys.displayhook object as o: object / Print an object to sys.stdout and also save it in builtins._ [clinic start generated code]*/ static PyObject * sys_displayhook(PyObject *module, PyObject *o) /*[clinic end generated code: output=347477d006df92ed input=08ba730166d7ef72]*/ { PyObject *outf; PyObject *builtins; PyThreadState *tstate = _PyThreadState_GET(); builtins = PyImport_GetModule(&_Py_ID(builtins)); if (builtins == NULL) { if (!_PyErr_Occurred(tstate)) { _PyErr_SetString(tstate, PyExc_RuntimeError, "lost builtins module"); } return NULL; } Py_DECREF(builtins); /* Print value except if None */ /* After printing, also assign to '_' */ /* Before, set '_' to None to avoid recursion */ if (o == Py_None) { Py_RETURN_NONE; } if (PyObject_SetAttr(builtins, &_Py_ID(_), Py_None) != 0) return NULL; outf = _PySys_GetAttr(tstate, &_Py_ID(stdout)); if (outf == NULL || outf == Py_None) { _PyErr_SetString(tstate, PyExc_RuntimeError, "lost sys.stdout"); return NULL; } if (PyFile_WriteObject(o, outf, 0) != 0) { if (_PyErr_ExceptionMatches(tstate, PyExc_UnicodeEncodeError)) { int err; /* repr(o) is not encodable to sys.stdout.encoding with * sys.stdout.errors error handler (which is probably 'strict') */ _PyErr_Clear(tstate); err = sys_displayhook_unencodable(outf, o); if (err) { return NULL; } } else { return NULL; } } _Py_DECLARE_STR(newline, "\n"); if (PyFile_WriteObject(&_Py_STR(newline), outf, Py_PRINT_RAW) != 0) return NULL; if (PyObject_SetAttr(builtins, &_Py_ID(_), o) != 0) return NULL; Py_RETURN_NONE; } /*[clinic input] sys.excepthook exctype: object value: object traceback: object / Handle an exception by displaying it with a traceback on sys.stderr. [clinic start generated code]*/ static PyObject * sys_excepthook_impl(PyObject *module, PyObject *exctype, PyObject *value, PyObject *traceback) /*[clinic end generated code: output=18d99fdda21b6b5e input=ecf606fa826f19d9]*/ { PyErr_Display(NULL, value, traceback); Py_RETURN_NONE; } /*[clinic input] sys.exception Return the current exception. Return the most recent exception caught by an except clause in the current stack frame or in an older stack frame, or None if no such exception exists. [clinic start generated code]*/ static PyObject * sys_exception_impl(PyObject *module) /*[clinic end generated code: output=2381ee2f25953e40 input=c88fbb94b6287431]*/ { _PyErr_StackItem *err_info = _PyErr_GetTopmostException(_PyThreadState_GET()); if (err_info->exc_value != NULL) { return Py_NewRef(err_info->exc_value); } Py_RETURN_NONE; } /*[clinic input] sys.exc_info Return current exception information: (type, value, traceback). Return information about the most recent exception caught by an except clause in the current stack frame or in an older stack frame. [clinic start generated code]*/ static PyObject * sys_exc_info_impl(PyObject *module) /*[clinic end generated code: output=3afd0940cf3a4d30 input=b5c5bf077788a3e5]*/ { _PyErr_StackItem *err_info = _PyErr_GetTopmostException(_PyThreadState_GET()); return _PyErr_StackItemToExcInfoTuple(err_info); } /*[clinic input] sys.unraisablehook unraisable: object / Handle an unraisable exception. The unraisable argument has the following attributes: * exc_type: Exception type. * exc_value: Exception value, can be None. * exc_traceback: Exception traceback, can be None. * err_msg: Error message, can be None. * object: Object causing the exception, can be None. [clinic start generated code]*/ static PyObject * sys_unraisablehook(PyObject *module, PyObject *unraisable) /*[clinic end generated code: output=bb92838b32abaa14 input=ec3af148294af8d3]*/ { return _PyErr_WriteUnraisableDefaultHook(unraisable); } /*[clinic input] sys.exit status: object = None / Exit the interpreter by raising SystemExit(status). If the status is omitted or None, it defaults to zero (i.e., success). If the status is an integer, it will be used as the system exit status. If it is another kind of object, it will be printed and the system exit status will be one (i.e., failure). [clinic start generated code]*/ static PyObject * sys_exit_impl(PyObject *module, PyObject *status) /*[clinic end generated code: output=13870986c1ab2ec0 input=b86ca9497baa94f2]*/ { /* Raise SystemExit so callers may catch it or clean up. */ PyErr_SetObject(PyExc_SystemExit, status); return NULL; } static PyObject * get_utf8_unicode(void) { _Py_DECLARE_STR(utf_8, "utf-8"); PyObject *ret = &_Py_STR(utf_8); return Py_NewRef(ret); } /*[clinic input] sys.getdefaultencoding Return the current default encoding used by the Unicode implementation. [clinic start generated code]*/ static PyObject * sys_getdefaultencoding_impl(PyObject *module) /*[clinic end generated code: output=256d19dfcc0711e6 input=d416856ddbef6909]*/ { return get_utf8_unicode(); } /*[clinic input] sys.getfilesystemencoding Return the encoding used to convert Unicode filenames to OS filenames. [clinic start generated code]*/ static PyObject * sys_getfilesystemencoding_impl(PyObject *module) /*[clinic end generated code: output=1dc4bdbe9be44aa7 input=8475f8649b8c7d8c]*/ { PyInterpreterState *interp = _PyInterpreterState_GET(); const PyConfig *config = _PyInterpreterState_GetConfig(interp); if (wcscmp(config->filesystem_encoding, L"utf-8") == 0) { return get_utf8_unicode(); } PyObject *u = PyUnicode_FromWideChar(config->filesystem_encoding, -1); if (u == NULL) { return NULL; } _PyUnicode_InternInPlace(interp, &u); return u; } /*[clinic input] sys.getfilesystemencodeerrors Return the error mode used Unicode to OS filename conversion. [clinic start generated code]*/ static PyObject * sys_getfilesystemencodeerrors_impl(PyObject *module) /*[clinic end generated code: output=ba77b36bbf7c96f5 input=22a1e8365566f1e5]*/ { PyInterpreterState *interp = _PyInterpreterState_GET(); const PyConfig *config = _PyInterpreterState_GetConfig(interp); PyObject *u = PyUnicode_FromWideChar(config->filesystem_errors, -1); if (u == NULL) { return NULL; } _PyUnicode_InternInPlace(interp, &u); return u; } /*[clinic input] sys.intern string as s: unicode / ``Intern'' the given string. This enters the string in the (global) table of interned strings whose purpose is to speed up dictionary lookups. Return the string itself or the previously interned string object with the same value. [clinic start generated code]*/ static PyObject * sys_intern_impl(PyObject *module, PyObject *s) /*[clinic end generated code: output=be680c24f5c9e5d6 input=849483c006924e2f]*/ { if (PyUnicode_CheckExact(s)) { Py_INCREF(s); PyUnicode_InternInPlace(&s); return s; } else { PyErr_Format(PyExc_TypeError, "can't intern %.400s", Py_TYPE(s)->tp_name); return NULL; } } /*[clinic input] sys._is_interned -> bool string: unicode / Return True if the given string is "interned". [clinic start generated code]*/ static int sys__is_interned_impl(PyObject *module, PyObject *string) /*[clinic end generated code: output=c3678267b4e9d7ed input=039843e17883b606]*/ { return PyUnicode_CHECK_INTERNED(string); } /* * Cached interned string objects used for calling the profile and * trace functions. */ static PyObject *whatstrings[8] = { &_Py_ID(call), &_Py_ID(exception), &_Py_ID(line), &_Py_ID(return), &_Py_ID(c_call), &_Py_ID(c_exception), &_Py_ID(c_return), &_Py_ID(opcode), }; static PyObject * call_trampoline(PyThreadState *tstate, PyObject* callback, PyFrameObject *frame, int what, PyObject *arg) { /* Discard any previous modifications the frame's fast locals */ if (frame->f_fast_as_locals) { if (PyFrame_FastToLocalsWithError(frame) < 0) { return NULL; } } /* call the Python-level function */ if (arg == NULL) { arg = Py_None; } PyObject *args[3] = {(PyObject *)frame, whatstrings[what], arg}; PyObject *result = _PyObject_VectorcallTstate(tstate, callback, args, 3, NULL); PyFrame_LocalsToFast(frame, 1); return result; } static int profile_trampoline(PyObject *self, PyFrameObject *frame, int what, PyObject *arg) { PyThreadState *tstate = _PyThreadState_GET(); PyObject *result = call_trampoline(tstate, self, frame, what, arg); if (result == NULL) { _PyEval_SetProfile(tstate, NULL, NULL); return -1; } Py_DECREF(result); return 0; } static int trace_trampoline(PyObject *self, PyFrameObject *frame, int what, PyObject *arg) { PyObject *callback; if (what == PyTrace_CALL) { callback = self; } else { callback = frame->f_trace; } if (callback == NULL) { return 0; } PyThreadState *tstate = _PyThreadState_GET(); PyObject *result = call_trampoline(tstate, callback, frame, what, arg); if (result == NULL) { _PyEval_SetTrace(tstate, NULL, NULL); Py_CLEAR(frame->f_trace); return -1; } if (result != Py_None) { Py_XSETREF(frame->f_trace, result); } else { Py_DECREF(result); } return 0; } static PyObject * sys_settrace(PyObject *self, PyObject *args) { PyThreadState *tstate = _PyThreadState_GET(); if (args == Py_None) { if (_PyEval_SetTrace(tstate, NULL, NULL) < 0) { return NULL; } } else { if (_PyEval_SetTrace(tstate, trace_trampoline, args) < 0) { return NULL; } } Py_RETURN_NONE; } PyDoc_STRVAR(settrace_doc, "settrace(function)\n\ \n\ Set the global debug tracing function. It will be called on each\n\ function call. See the debugger chapter in the library manual." ); /*[clinic input] sys._settraceallthreads arg: object / Set the global debug tracing function in all running threads belonging to the current interpreter. It will be called on each function call. See the debugger chapter in the library manual. [clinic start generated code]*/ static PyObject * sys__settraceallthreads(PyObject *module, PyObject *arg) /*[clinic end generated code: output=161cca30207bf3ca input=5906aa1485a50289]*/ { PyObject* argument = NULL; Py_tracefunc func = NULL; if (arg != Py_None) { func = trace_trampoline; argument = arg; } PyEval_SetTraceAllThreads(func, argument); Py_RETURN_NONE; } /*[clinic input] sys.gettrace Return the global debug tracing function set with sys.settrace. See the debugger chapter in the library manual. [clinic start generated code]*/ static PyObject * sys_gettrace_impl(PyObject *module) /*[clinic end generated code: output=e97e3a4d8c971b6e input=373b51bb2147f4d8]*/ { PyThreadState *tstate = _PyThreadState_GET(); PyObject *temp = tstate->c_traceobj; if (temp == NULL) temp = Py_None; return Py_NewRef(temp); } static PyObject * sys_setprofile(PyObject *self, PyObject *args) { PyThreadState *tstate = _PyThreadState_GET(); if (args == Py_None) { if (_PyEval_SetProfile(tstate, NULL, NULL) < 0) { return NULL; } } else { if (_PyEval_SetProfile(tstate, profile_trampoline, args) < 0) { return NULL; } } Py_RETURN_NONE; } PyDoc_STRVAR(setprofile_doc, "setprofile(function)\n\ \n\ Set the profiling function. It will be called on each function call\n\ and return. See the profiler chapter in the library manual." ); /*[clinic input] sys._setprofileallthreads arg: object / Set the profiling function in all running threads belonging to the current interpreter. It will be called on each function call and return. See the profiler chapter in the library manual. [clinic start generated code]*/ static PyObject * sys__setprofileallthreads(PyObject *module, PyObject *arg) /*[clinic end generated code: output=2d61319e27b309fe input=d1a356d3f4f9060a]*/ { PyObject* argument = NULL; Py_tracefunc func = NULL; if (arg != Py_None) { func = profile_trampoline; argument = arg; } PyEval_SetProfileAllThreads(func, argument); Py_RETURN_NONE; } /*[clinic input] sys.getprofile Return the profiling function set with sys.setprofile. See the profiler chapter in the library manual. [clinic start generated code]*/ static PyObject * sys_getprofile_impl(PyObject *module) /*[clinic end generated code: output=579b96b373448188 input=1b3209d89a32965d]*/ { PyThreadState *tstate = _PyThreadState_GET(); PyObject *temp = tstate->c_profileobj; if (temp == NULL) temp = Py_None; return Py_NewRef(temp); } /*[clinic input] sys.setswitchinterval interval: double / Set the ideal thread switching delay inside the Python interpreter. The actual frequency of switching threads can be lower if the interpreter executes long sequences of uninterruptible code (this is implementation-specific and workload-dependent). The parameter must represent the desired switching delay in seconds A typical value is 0.005 (5 milliseconds). [clinic start generated code]*/ static PyObject * sys_setswitchinterval_impl(PyObject *module, double interval) /*[clinic end generated code: output=65a19629e5153983 input=561b477134df91d9]*/ { if (interval <= 0.0) { PyErr_SetString(PyExc_ValueError, "switch interval must be strictly positive"); return NULL; } _PyEval_SetSwitchInterval((unsigned long) (1e6 * interval)); Py_RETURN_NONE; } /*[clinic input] sys.getswitchinterval -> double Return the current thread switch interval; see sys.setswitchinterval(). [clinic start generated code]*/ static double sys_getswitchinterval_impl(PyObject *module) /*[clinic end generated code: output=a38c277c85b5096d input=bdf9d39c0ebbbb6f]*/ { return 1e-6 * _PyEval_GetSwitchInterval(); } /*[clinic input] sys.setrecursionlimit limit as new_limit: int / Set the maximum depth of the Python interpreter stack to n. This limit prevents infinite recursion from causing an overflow of the C stack and crashing Python. The highest possible limit is platform- dependent. [clinic start generated code]*/ static PyObject * sys_setrecursionlimit_impl(PyObject *module, int new_limit) /*[clinic end generated code: output=35e1c64754800ace input=b0f7a23393924af3]*/ { PyThreadState *tstate = _PyThreadState_GET(); if (new_limit < 1) { _PyErr_SetString(tstate, PyExc_ValueError, "recursion limit must be greater or equal than 1"); return NULL; } /* Reject too low new limit if the current recursion depth is higher than the new low-water mark. */ int depth = tstate->py_recursion_limit - tstate->py_recursion_remaining; if (depth >= new_limit) { _PyErr_Format(tstate, PyExc_RecursionError, "cannot set the recursion limit to %i at " "the recursion depth %i: the limit is too low", new_limit, depth); return NULL; } Py_SetRecursionLimit(new_limit); Py_RETURN_NONE; } /*[clinic input] sys.set_coroutine_origin_tracking_depth depth: int Enable or disable origin tracking for coroutine objects in this thread. Coroutine objects will track 'depth' frames of traceback information about where they came from, available in their cr_origin attribute. Set a depth of 0 to disable. [clinic start generated code]*/ static PyObject * sys_set_coroutine_origin_tracking_depth_impl(PyObject *module, int depth) /*[clinic end generated code: output=0a2123c1cc6759c5 input=a1d0a05f89d2c426]*/ { if (_PyEval_SetCoroutineOriginTrackingDepth(depth) < 0) { return NULL; } Py_RETURN_NONE; } /*[clinic input] sys.get_coroutine_origin_tracking_depth -> int Check status of origin tracking for coroutine objects in this thread. [clinic start generated code]*/ static int sys_get_coroutine_origin_tracking_depth_impl(PyObject *module) /*[clinic end generated code: output=3699f7be95a3afb8 input=335266a71205b61a]*/ { return _PyEval_GetCoroutineOriginTrackingDepth(); } static PyTypeObject AsyncGenHooksType; PyDoc_STRVAR(asyncgen_hooks_doc, "asyncgen_hooks\n\ \n\ A named tuple providing information about asynchronous\n\ generators hooks. The attributes are read only."); static PyStructSequence_Field asyncgen_hooks_fields[] = { {"firstiter", "Hook to intercept first iteration"}, {"finalizer", "Hook to intercept finalization"}, {0} }; static PyStructSequence_Desc asyncgen_hooks_desc = { "asyncgen_hooks", /* name */ asyncgen_hooks_doc, /* doc */ asyncgen_hooks_fields , /* fields */ 2 }; static PyObject * sys_set_asyncgen_hooks(PyObject *self, PyObject *args, PyObject *kw) { static char *keywords[] = {"firstiter", "finalizer", NULL}; PyObject *firstiter = NULL; PyObject *finalizer = NULL; if (!PyArg_ParseTupleAndKeywords( args, kw, "|OO", keywords, &firstiter, &finalizer)) { return NULL; } if (finalizer && finalizer != Py_None) { if (!PyCallable_Check(finalizer)) { PyErr_Format(PyExc_TypeError, "callable finalizer expected, got %.50s", Py_TYPE(finalizer)->tp_name); return NULL; } if (_PyEval_SetAsyncGenFinalizer(finalizer) < 0) { return NULL; } } else if (finalizer == Py_None && _PyEval_SetAsyncGenFinalizer(NULL) < 0) { return NULL; } if (firstiter && firstiter != Py_None) { if (!PyCallable_Check(firstiter)) { PyErr_Format(PyExc_TypeError, "callable firstiter expected, got %.50s", Py_TYPE(firstiter)->tp_name); return NULL; } if (_PyEval_SetAsyncGenFirstiter(firstiter) < 0) { return NULL; } } else if (firstiter == Py_None && _PyEval_SetAsyncGenFirstiter(NULL) < 0) { return NULL; } Py_RETURN_NONE; } PyDoc_STRVAR(set_asyncgen_hooks_doc, "set_asyncgen_hooks([firstiter] [, finalizer])\n\ \n\ Set a finalizer for async generators objects." ); /*[clinic input] sys.get_asyncgen_hooks Return the installed asynchronous generators hooks. This returns a namedtuple of the form (firstiter, finalizer). [clinic start generated code]*/ static PyObject * sys_get_asyncgen_hooks_impl(PyObject *module) /*[clinic end generated code: output=53a253707146f6cf input=3676b9ea62b14625]*/ { PyObject *res; PyObject *firstiter = _PyEval_GetAsyncGenFirstiter(); PyObject *finalizer = _PyEval_GetAsyncGenFinalizer(); res = PyStructSequence_New(&AsyncGenHooksType); if (res == NULL) { return NULL; } if (firstiter == NULL) { firstiter = Py_None; } if (finalizer == NULL) { finalizer = Py_None; } PyStructSequence_SET_ITEM(res, 0, Py_NewRef(firstiter)); PyStructSequence_SET_ITEM(res, 1, Py_NewRef(finalizer)); return res; } static PyTypeObject Hash_InfoType; PyDoc_STRVAR(hash_info_doc, "hash_info\n\ \n\ A named tuple providing parameters used for computing\n\ hashes. The attributes are read only."); static PyStructSequence_Field hash_info_fields[] = { {"width", "width of the type used for hashing, in bits"}, {"modulus", "prime number giving the modulus on which the hash " "function is based"}, {"inf", "value to be used for hash of a positive infinity"}, {"nan", "value to be used for hash of a nan"}, {"imag", "multiplier used for the imaginary part of a complex number"}, {"algorithm", "name of the algorithm for hashing of str, bytes and " "memoryviews"}, {"hash_bits", "internal output size of hash algorithm"}, {"seed_bits", "seed size of hash algorithm"}, {"cutoff", "small string optimization cutoff"}, {NULL, NULL} }; static PyStructSequence_Desc hash_info_desc = { "sys.hash_info", hash_info_doc, hash_info_fields, 9, }; static PyObject * get_hash_info(PyThreadState *tstate) { PyObject *hash_info; int field = 0; PyHash_FuncDef *hashfunc; hash_info = PyStructSequence_New(&Hash_InfoType); if (hash_info == NULL) { return NULL; } hashfunc = PyHash_GetFuncDef(); #define SET_HASH_INFO_ITEM(CALL) \ do { \ PyObject *item = (CALL); \ if (item == NULL) { \ Py_CLEAR(hash_info); \ return NULL; \ } \ PyStructSequence_SET_ITEM(hash_info, field++, item); \ } while(0) SET_HASH_INFO_ITEM(PyLong_FromLong(8 * sizeof(Py_hash_t))); SET_HASH_INFO_ITEM(PyLong_FromSsize_t(_PyHASH_MODULUS)); SET_HASH_INFO_ITEM(PyLong_FromLong(_PyHASH_INF)); SET_HASH_INFO_ITEM(PyLong_FromLong(0)); // This is no longer used SET_HASH_INFO_ITEM(PyLong_FromLong(_PyHASH_IMAG)); SET_HASH_INFO_ITEM(PyUnicode_FromString(hashfunc->name)); SET_HASH_INFO_ITEM(PyLong_FromLong(hashfunc->hash_bits)); SET_HASH_INFO_ITEM(PyLong_FromLong(hashfunc->seed_bits)); SET_HASH_INFO_ITEM(PyLong_FromLong(Py_HASH_CUTOFF)); #undef SET_HASH_INFO_ITEM return hash_info; } /*[clinic input] sys.getrecursionlimit Return the current value of the recursion limit. The recursion limit is the maximum depth of the Python interpreter stack. This limit prevents infinite recursion from causing an overflow of the C stack and crashing Python. [clinic start generated code]*/ static PyObject * sys_getrecursionlimit_impl(PyObject *module) /*[clinic end generated code: output=d571fb6b4549ef2e input=1c6129fd2efaeea8]*/ { return PyLong_FromLong(Py_GetRecursionLimit()); } #ifdef MS_WINDOWS static PyTypeObject WindowsVersionType = {0, 0, 0, 0, 0, 0}; static PyStructSequence_Field windows_version_fields[] = { {"major", "Major version number"}, {"minor", "Minor version number"}, {"build", "Build number"}, {"platform", "Operating system platform"}, {"service_pack", "Latest Service Pack installed on the system"}, {"service_pack_major", "Service Pack major version number"}, {"service_pack_minor", "Service Pack minor version number"}, {"suite_mask", "Bit mask identifying available product suites"}, {"product_type", "System product type"}, {"platform_version", "Diagnostic version number"}, {0} }; static PyStructSequence_Desc windows_version_desc = { "sys.getwindowsversion", /* name */ sys_getwindowsversion__doc__, /* doc */ windows_version_fields, /* fields */ 5 /* For backward compatibility, only the first 5 items are accessible via indexing, the rest are name only */ }; static PyObject * _sys_getwindowsversion_from_kernel32(void) { #ifndef MS_WINDOWS_DESKTOP return NULL; #else HANDLE hKernel32; wchar_t kernel32_path[MAX_PATH]; LPVOID verblock; DWORD verblock_size; VS_FIXEDFILEINFO *ffi; UINT ffi_len; DWORD realMajor, realMinor, realBuild; Py_BEGIN_ALLOW_THREADS hKernel32 = GetModuleHandleW(L"kernel32.dll"); Py_END_ALLOW_THREADS if (!hKernel32 || !GetModuleFileNameW(hKernel32, kernel32_path, MAX_PATH)) { PyErr_SetFromWindowsErr(0); return NULL; } verblock_size = GetFileVersionInfoSizeW(kernel32_path, NULL); if (!verblock_size) { PyErr_SetFromWindowsErr(0); return NULL; } verblock = PyMem_RawMalloc(verblock_size); if (!verblock || !GetFileVersionInfoW(kernel32_path, 0, verblock_size, verblock) || !VerQueryValueW(verblock, L"", (LPVOID)&ffi, &ffi_len)) { PyErr_SetFromWindowsErr(0); return NULL; } realMajor = HIWORD(ffi->dwProductVersionMS); realMinor = LOWORD(ffi->dwProductVersionMS); realBuild = HIWORD(ffi->dwProductVersionLS); PyMem_RawFree(verblock); return Py_BuildValue("(kkk)", realMajor, realMinor, realBuild); #endif /* !MS_WINDOWS_DESKTOP */ } /* Disable deprecation warnings about GetVersionEx as the result is being passed straight through to the caller, who is responsible for using it correctly. */ #pragma warning(push) #pragma warning(disable:4996) /*[clinic input] sys.getwindowsversion Return info about the running version of Windows as a named tuple. The members are named: major, minor, build, platform, service_pack, service_pack_major, service_pack_minor, suite_mask, product_type and platform_version. For backward compatibility, only the first 5 items are available by indexing. All elements are numbers, except service_pack and platform_type which are strings, and platform_version which is a 3-tuple. Platform is always 2. Product_type may be 1 for a workstation, 2 for a domain controller, 3 for a server. Platform_version is a 3-tuple containing a version number that is intended for identifying the OS rather than feature detection. [clinic start generated code]*/ static PyObject * sys_getwindowsversion_impl(PyObject *module) /*[clinic end generated code: output=1ec063280b932857 input=73a228a328fee63a]*/ { PyObject *version; int pos = 0; OSVERSIONINFOEXW ver; if (PyObject_GetOptionalAttrString(module, "_cached_windows_version", &version) < 0) { return NULL; }; if (version && PyObject_TypeCheck(version, &WindowsVersionType)) { return version; } Py_XDECREF(version); ver.dwOSVersionInfoSize = sizeof(ver); if (!GetVersionExW((OSVERSIONINFOW*) &ver)) return PyErr_SetFromWindowsErr(0); version = PyStructSequence_New(&WindowsVersionType); if (version == NULL) return NULL; #define SET_VERSION_INFO(CALL) \ do { \ PyObject *item = (CALL); \ if (item == NULL) { \ goto error; \ } \ PyStructSequence_SET_ITEM(version, pos++, item); \ } while(0) SET_VERSION_INFO(PyLong_FromLong(ver.dwMajorVersion)); SET_VERSION_INFO(PyLong_FromLong(ver.dwMinorVersion)); SET_VERSION_INFO(PyLong_FromLong(ver.dwBuildNumber)); SET_VERSION_INFO(PyLong_FromLong(ver.dwPlatformId)); SET_VERSION_INFO(PyUnicode_FromWideChar(ver.szCSDVersion, -1)); SET_VERSION_INFO(PyLong_FromLong(ver.wServicePackMajor)); SET_VERSION_INFO(PyLong_FromLong(ver.wServicePackMinor)); SET_VERSION_INFO(PyLong_FromLong(ver.wSuiteMask)); SET_VERSION_INFO(PyLong_FromLong(ver.wProductType)); // GetVersion will lie if we are running in a compatibility mode. // We need to read the version info from a system file resource // to accurately identify the OS version. If we fail for any reason, // just return whatever GetVersion said. PyObject *realVersion = _sys_getwindowsversion_from_kernel32(); if (!realVersion) { if (!PyErr_ExceptionMatches(PyExc_WindowsError)) { return NULL; } PyErr_Clear(); realVersion = Py_BuildValue("(kkk)", ver.dwMajorVersion, ver.dwMinorVersion, ver.dwBuildNumber ); } SET_VERSION_INFO(realVersion); #undef SET_VERSION_INFO if (PyObject_SetAttrString(module, "_cached_windows_version", version) < 0) { goto error; } return version; error: Py_DECREF(version); return NULL; } #pragma warning(pop) /*[clinic input] sys._enablelegacywindowsfsencoding Changes the default filesystem encoding to mbcs:replace. This is done for consistency with earlier versions of Python. See PEP 529 for more information. This is equivalent to defining the PYTHONLEGACYWINDOWSFSENCODING environment variable before launching Python. [clinic start generated code]*/ static PyObject * sys__enablelegacywindowsfsencoding_impl(PyObject *module) /*[clinic end generated code: output=f5c3855b45e24fe9 input=2bfa931a20704492]*/ { if (PyErr_WarnEx(PyExc_DeprecationWarning, "sys._enablelegacywindowsfsencoding() is deprecated and will be " "removed in Python 3.16. Use PYTHONLEGACYWINDOWSFSENCODING " "instead.", 1)) { return NULL; } if (_PyUnicode_EnableLegacyWindowsFSEncoding() < 0) { return NULL; } Py_RETURN_NONE; } #endif /* MS_WINDOWS */ #ifdef HAVE_DLOPEN /*[clinic input] sys.setdlopenflags flags as new_val: int / Set the flags used by the interpreter for dlopen calls. This is used, for example, when the interpreter loads extension modules. Among other things, this will enable a lazy resolving of symbols when importing a module, if called as sys.setdlopenflags(0). To share symbols across extension modules, call as sys.setdlopenflags(os.RTLD_GLOBAL). Symbolic names for the flag modules can be found in the os module (RTLD_xxx constants, e.g. os.RTLD_LAZY). [clinic start generated code]*/ static PyObject * sys_setdlopenflags_impl(PyObject *module, int new_val) /*[clinic end generated code: output=ec918b7fe0a37281 input=4c838211e857a77f]*/ { PyInterpreterState *interp = _PyInterpreterState_GET(); _PyImport_SetDLOpenFlags(interp, new_val); Py_RETURN_NONE; } /*[clinic input] sys.getdlopenflags Return the current value of the flags that are used for dlopen calls. The flag constants are defined in the os module. [clinic start generated code]*/ static PyObject * sys_getdlopenflags_impl(PyObject *module) /*[clinic end generated code: output=e92cd1bc5005da6e input=dc4ea0899c53b4b6]*/ { PyInterpreterState *interp = _PyInterpreterState_GET(); return PyLong_FromLong( _PyImport_GetDLOpenFlags(interp)); } #endif /* HAVE_DLOPEN */ #ifdef USE_MALLOPT /* Link with -lmalloc (or -lmpc) on an SGI */ #include /*[clinic input] sys.mdebug flag: int / [clinic start generated code]*/ static PyObject * sys_mdebug_impl(PyObject *module, int flag) /*[clinic end generated code: output=5431d545847c3637 input=151d150ae1636f8a]*/ { int flag; mallopt(M_DEBUG, flag); Py_RETURN_NONE; } #endif /* USE_MALLOPT */ /*[clinic input] sys.get_int_max_str_digits Return the maximum string digits limit for non-binary int<->str conversions. [clinic start generated code]*/ static PyObject * sys_get_int_max_str_digits_impl(PyObject *module) /*[clinic end generated code: output=0042f5e8ae0e8631 input=61bf9f99bc8b112d]*/ { PyInterpreterState *interp = _PyInterpreterState_GET(); return PyLong_FromLong(interp->long_state.max_str_digits); } /*[clinic input] sys.set_int_max_str_digits maxdigits: int Set the maximum string digits limit for non-binary int<->str conversions. [clinic start generated code]*/ static PyObject * sys_set_int_max_str_digits_impl(PyObject *module, int maxdigits) /*[clinic end generated code: output=734d4c2511f2a56d input=d7e3f325db6910c5]*/ { PyThreadState *tstate = _PyThreadState_GET(); if ((!maxdigits) || (maxdigits >= _PY_LONG_MAX_STR_DIGITS_THRESHOLD)) { tstate->interp->long_state.max_str_digits = maxdigits; Py_RETURN_NONE; } else { PyErr_Format( PyExc_ValueError, "maxdigits must be 0 or larger than %d", _PY_LONG_MAX_STR_DIGITS_THRESHOLD); return NULL; } } size_t _PySys_GetSizeOf(PyObject *o) { PyObject *res = NULL; PyObject *method; Py_ssize_t size; PyThreadState *tstate = _PyThreadState_GET(); /* Make sure the type is initialized. float gets initialized late */ if (PyType_Ready(Py_TYPE(o)) < 0) { return (size_t)-1; } method = _PyObject_LookupSpecial(o, &_Py_ID(__sizeof__)); if (method == NULL) { if (!_PyErr_Occurred(tstate)) { _PyErr_Format(tstate, PyExc_TypeError, "Type %.100s doesn't define __sizeof__", Py_TYPE(o)->tp_name); } } else { res = _PyObject_CallNoArgs(method); Py_DECREF(method); } if (res == NULL) return (size_t)-1; size = PyLong_AsSsize_t(res); Py_DECREF(res); if (size == -1 && _PyErr_Occurred(tstate)) return (size_t)-1; if (size < 0) { _PyErr_SetString(tstate, PyExc_ValueError, "__sizeof__() should return >= 0"); return (size_t)-1; } size_t presize = 0; if (!Py_IS_TYPE(o, &PyType_Type) || PyType_HasFeature((PyTypeObject *)o, Py_TPFLAGS_HEAPTYPE)) { /* Add the size of the pre-header if "o" is not a static type */ presize = _PyType_PreHeaderSize(Py_TYPE(o)); } return (size_t)size + presize; } static PyObject * sys_getsizeof(PyObject *self, PyObject *args, PyObject *kwds) { static char *kwlist[] = {"object", "default", 0}; size_t size; PyObject *o, *dflt = NULL; PyThreadState *tstate = _PyThreadState_GET(); if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O:getsizeof", kwlist, &o, &dflt)) { return NULL; } size = _PySys_GetSizeOf(o); if (size == (size_t)-1 && _PyErr_Occurred(tstate)) { /* Has a default value been given */ if (dflt != NULL && _PyErr_ExceptionMatches(tstate, PyExc_TypeError)) { _PyErr_Clear(tstate); return Py_NewRef(dflt); } else return NULL; } return PyLong_FromSize_t(size); } PyDoc_STRVAR(getsizeof_doc, "getsizeof(object [, default]) -> int\n\ \n\ Return the size of object in bytes."); /*[clinic input] sys.getrefcount -> Py_ssize_t object: object / Return the reference count of object. The count returned is generally one higher than you might expect, because it includes the (temporary) reference as an argument to getrefcount(). [clinic start generated code]*/ static Py_ssize_t sys_getrefcount_impl(PyObject *module, PyObject *object) /*[clinic end generated code: output=5fd477f2264b85b2 input=bf474efd50a21535]*/ { return Py_REFCNT(object); } #ifdef Py_REF_DEBUG /*[clinic input] sys.gettotalrefcount -> Py_ssize_t [clinic start generated code]*/ static Py_ssize_t sys_gettotalrefcount_impl(PyObject *module) /*[clinic end generated code: output=4103886cf17c25bc input=53b744faa5d2e4f6]*/ { /* It may make sense to return the total for the current interpreter or have a second function that does so. */ return _Py_GetGlobalRefTotal(); } #endif /* Py_REF_DEBUG */ /*[clinic input] sys.getallocatedblocks -> Py_ssize_t Return the number of memory blocks currently allocated. [clinic start generated code]*/ static Py_ssize_t sys_getallocatedblocks_impl(PyObject *module) /*[clinic end generated code: output=f0c4e873f0b6dcf7 input=dab13ee346a0673e]*/ { // It might make sense to return the count // for just the current interpreter. return _Py_GetGlobalAllocatedBlocks(); } /*[clinic input] sys.getunicodeinternedsize -> Py_ssize_t Return the number of elements of the unicode interned dictionary [clinic start generated code]*/ static Py_ssize_t sys_getunicodeinternedsize_impl(PyObject *module) /*[clinic end generated code: output=ad0e4c9738ed4129 input=726298eaa063347a]*/ { return _PyUnicode_InternedSize(); } /*[clinic input] sys._getframe depth: int = 0 / Return a frame object from the call stack. If optional integer depth is given, return the frame object that many calls below the top of the stack. If that is deeper than the call stack, ValueError is raised. The default for depth is zero, returning the frame at the top of the call stack. This function should be used for internal and specialized purposes only. [clinic start generated code]*/ static PyObject * sys__getframe_impl(PyObject *module, int depth) /*[clinic end generated code: output=d438776c04d59804 input=c1be8a6464b11ee5]*/ { PyThreadState *tstate = _PyThreadState_GET(); _PyInterpreterFrame *frame = tstate->current_frame; if (frame != NULL) { while (depth > 0) { frame = _PyFrame_GetFirstComplete(frame->previous); if (frame == NULL) { break; } --depth; } } if (frame == NULL) { _PyErr_SetString(tstate, PyExc_ValueError, "call stack is not deep enough"); return NULL; } PyObject *pyFrame = Py_XNewRef((PyObject *)_PyFrame_GetFrameObject(frame)); if (pyFrame && _PySys_Audit(tstate, "sys._getframe", "(O)", pyFrame) < 0) { Py_DECREF(pyFrame); return NULL; } return pyFrame; } /*[clinic input] sys._current_frames Return a dict mapping each thread's thread id to its current stack frame. This function should be used for specialized purposes only. [clinic start generated code]*/ static PyObject * sys__current_frames_impl(PyObject *module) /*[clinic end generated code: output=d2a41ac0a0a3809a input=2a9049c5f5033691]*/ { return _PyThread_CurrentFrames(); } /*[clinic input] sys._current_exceptions Return a dict mapping each thread's identifier to its current raised exception. This function should be used for specialized purposes only. [clinic start generated code]*/ static PyObject * sys__current_exceptions_impl(PyObject *module) /*[clinic end generated code: output=2ccfd838c746f0ba input=0e91818fbf2edc1f]*/ { return _PyThread_CurrentExceptions(); } /*[clinic input] sys.call_tracing func: object args as funcargs: object(subclass_of='&PyTuple_Type') / Call func(*args), while tracing is enabled. The tracing state is saved, and restored afterwards. This is intended to be called from a debugger from a checkpoint, to recursively debug some other code. [clinic start generated code]*/ static PyObject * sys_call_tracing_impl(PyObject *module, PyObject *func, PyObject *funcargs) /*[clinic end generated code: output=7e4999853cd4e5a6 input=5102e8b11049f92f]*/ { return _PyEval_CallTracing(func, funcargs); } /*[clinic input] sys._debugmallocstats Print summary info to stderr about the state of pymalloc's structures. In Py_DEBUG mode, also perform some expensive internal consistency checks. [clinic start generated code]*/ static PyObject * sys__debugmallocstats_impl(PyObject *module) /*[clinic end generated code: output=ec3565f8c7cee46a input=33c0c9c416f98424]*/ { #ifdef WITH_PYMALLOC if (_PyObject_DebugMallocStats(stderr)) { fputc('\n', stderr); } #endif _PyObject_DebugTypeStats(stderr); Py_RETURN_NONE; } #ifdef Py_TRACE_REFS /* Defined in objects.c because it uses static globals in that file */ extern PyObject *_Py_GetObjects(PyObject *, PyObject *); #endif /*[clinic input] sys._clear_type_cache Clear the internal type lookup cache. [clinic start generated code]*/ static PyObject * sys__clear_type_cache_impl(PyObject *module) /*[clinic end generated code: output=20e48ca54a6f6971 input=127f3e04a8d9b555]*/ { PyType_ClearCache(); Py_RETURN_NONE; } /*[clinic input] sys._clear_internal_caches Clear all internal performance-related caches. [clinic start generated code]*/ static PyObject * sys__clear_internal_caches_impl(PyObject *module) /*[clinic end generated code: output=0ee128670a4966d6 input=253e741ca744f6e8]*/ { PyInterpreterState *interp = _PyInterpreterState_GET(); _Py_Executors_InvalidateAll(interp, 0); PyType_ClearCache(); Py_RETURN_NONE; } /* Note that, for now, we do not have a per-interpreter equivalent for sys.is_finalizing(). */ /*[clinic input] sys.is_finalizing Return True if Python is exiting. [clinic start generated code]*/ static PyObject * sys_is_finalizing_impl(PyObject *module) /*[clinic end generated code: output=735b5ff7962ab281 input=f0df747a039948a5]*/ { return PyBool_FromLong(Py_IsFinalizing()); } #ifdef Py_STATS /*[clinic input] sys._stats_on Turns on stats gathering (stats gathering is off by default). [clinic start generated code]*/ static PyObject * sys__stats_on_impl(PyObject *module) /*[clinic end generated code: output=aca53eafcbb4d9fe input=43b5bfe145299e55]*/ { _Py_StatsOn(); Py_RETURN_NONE; } /*[clinic input] sys._stats_off Turns off stats gathering (stats gathering is off by default). [clinic start generated code]*/ static PyObject * sys__stats_off_impl(PyObject *module) /*[clinic end generated code: output=1534c1ee63812214 input=d1a84c60c56cbce2]*/ { _Py_StatsOff(); Py_RETURN_NONE; } /*[clinic input] sys._stats_clear Clears the stats. [clinic start generated code]*/ static PyObject * sys__stats_clear_impl(PyObject *module) /*[clinic end generated code: output=fb65a2525ee50604 input=3e03f2654f44da96]*/ { _Py_StatsClear(); Py_RETURN_NONE; } /*[clinic input] sys._stats_dump -> bool Dump stats to file, and clears the stats. Return False if no statistics were not dumped because stats gathering was off. [clinic start generated code]*/ static int sys__stats_dump_impl(PyObject *module) /*[clinic end generated code: output=6e346b4ba0de4489 input=31a489e39418b2a5]*/ { int res = _Py_PrintSpecializationStats(1); _Py_StatsClear(); return res; } #endif // Py_STATS #ifdef ANDROID_API_LEVEL /*[clinic input] sys.getandroidapilevel Return the build time API version of Android as an integer. [clinic start generated code]*/ static PyObject * sys_getandroidapilevel_impl(PyObject *module) /*[clinic end generated code: output=214abf183a1c70c1 input=3e6d6c9fcdd24ac6]*/ { return PyLong_FromLong(ANDROID_API_LEVEL); } #endif /* ANDROID_API_LEVEL */ /*[clinic input] sys.activate_stack_trampoline backend: str / Activate stack profiler trampoline *backend*. [clinic start generated code]*/ static PyObject * sys_activate_stack_trampoline_impl(PyObject *module, const char *backend) /*[clinic end generated code: output=5783cdeb51874b43 input=a12df928758a82b4]*/ { #ifdef PY_HAVE_PERF_TRAMPOLINE if (strcmp(backend, "perf") == 0) { _PyPerf_Callbacks cur_cb; _PyPerfTrampoline_GetCallbacks(&cur_cb); if (cur_cb.write_state != _Py_perfmap_callbacks.write_state) { if (_PyPerfTrampoline_SetCallbacks(&_Py_perfmap_callbacks) < 0 ) { PyErr_SetString(PyExc_ValueError, "can't activate perf trampoline"); return NULL; } } } else { PyErr_Format(PyExc_ValueError, "invalid backend: %s", backend); return NULL; } if (_PyPerfTrampoline_Init(1) < 0) { return NULL; } Py_RETURN_NONE; #else PyErr_SetString(PyExc_ValueError, "perf trampoline not available"); return NULL; #endif } /*[clinic input] sys.deactivate_stack_trampoline Deactivate the current stack profiler trampoline backend. If no stack profiler is activated, this function has no effect. [clinic start generated code]*/ static PyObject * sys_deactivate_stack_trampoline_impl(PyObject *module) /*[clinic end generated code: output=b50da25465df0ef1 input=9f629a6be9fe7fc8]*/ { if (_PyPerfTrampoline_Init(0) < 0) { return NULL; } Py_RETURN_NONE; } /*[clinic input] sys.is_stack_trampoline_active Return *True* if a stack profiler trampoline is active. [clinic start generated code]*/ static PyObject * sys_is_stack_trampoline_active_impl(PyObject *module) /*[clinic end generated code: output=ab2746de0ad9d293 input=29616b7bf6a0b703]*/ { #ifdef PY_HAVE_PERF_TRAMPOLINE if (_PyIsPerfTrampolineActive()) { Py_RETURN_TRUE; } #endif Py_RETURN_FALSE; } /*[clinic input] sys._getframemodulename depth: int = 0 Return the name of the module for a calling frame. The default depth returns the module containing the call to this API. A more typical use in a library will pass a depth of 1 to get the user's module rather than the library module. If no frame, module, or name can be found, returns None. [clinic start generated code]*/ static PyObject * sys__getframemodulename_impl(PyObject *module, int depth) /*[clinic end generated code: output=1d70ef691f09d2db input=d4f1a8ed43b8fb46]*/ { if (PySys_Audit("sys._getframemodulename", "i", depth) < 0) { return NULL; } _PyInterpreterFrame *f = _PyThreadState_GET()->current_frame; while (f && (_PyFrame_IsIncomplete(f) || depth-- > 0)) { f = f->previous; } if (f == NULL || f->f_funcobj == NULL) { Py_RETURN_NONE; } PyObject *r = PyFunction_GetModule(f->f_funcobj); if (!r) { PyErr_Clear(); r = Py_None; } return Py_NewRef(r); } /*[clinic input] sys._get_cpu_count_config -> int Private function for getting PyConfig.cpu_count [clinic start generated code]*/ static int sys__get_cpu_count_config_impl(PyObject *module) /*[clinic end generated code: output=36611bb5efad16dc input=523e1ade2204084e]*/ { const PyConfig *config = _Py_GetConfig(); return config->cpu_count; } static PerfMapState perf_map_state; PyAPI_FUNC(int) PyUnstable_PerfMapState_Init(void) { #ifndef MS_WINDOWS char filename[100]; pid_t pid = getpid(); // Use nofollow flag to prevent symlink attacks. int flags = O_WRONLY | O_CREAT | O_APPEND | O_NOFOLLOW; #ifdef O_CLOEXEC flags |= O_CLOEXEC; #endif snprintf(filename, sizeof(filename) - 1, "/tmp/perf-%jd.map", (intmax_t)pid); int fd = open(filename, flags, 0600); if (fd == -1) { return -1; } else{ perf_map_state.perf_map = fdopen(fd, "a"); if (perf_map_state.perf_map == NULL) { close(fd); return -1; } } perf_map_state.map_lock = PyThread_allocate_lock(); if (perf_map_state.map_lock == NULL) { fclose(perf_map_state.perf_map); return -2; } #endif return 0; } PyAPI_FUNC(int) PyUnstable_WritePerfMapEntry( const void *code_addr, unsigned int code_size, const char *entry_name ) { #ifndef MS_WINDOWS if (perf_map_state.perf_map == NULL) { int ret = PyUnstable_PerfMapState_Init(); if (ret != 0){ return ret; } } PyThread_acquire_lock(perf_map_state.map_lock, 1); fprintf(perf_map_state.perf_map, "%" PRIxPTR " %x %s\n", (uintptr_t) code_addr, code_size, entry_name); fflush(perf_map_state.perf_map); PyThread_release_lock(perf_map_state.map_lock); #endif return 0; } PyAPI_FUNC(void) PyUnstable_PerfMapState_Fini(void) { #ifndef MS_WINDOWS if (perf_map_state.perf_map != NULL) { // close the file PyThread_acquire_lock(perf_map_state.map_lock, 1); fclose(perf_map_state.perf_map); PyThread_release_lock(perf_map_state.map_lock); // clean up the lock and state PyThread_free_lock(perf_map_state.map_lock); perf_map_state.perf_map = NULL; } #endif } PyAPI_FUNC(int) PyUnstable_CopyPerfMapFile(const char* parent_filename) { #ifndef MS_WINDOWS FILE* from = fopen(parent_filename, "r"); if (!from) { return -1; } if (perf_map_state.perf_map == NULL) { int ret = PyUnstable_PerfMapState_Init(); if (ret != 0) { return ret; } } char buf[4096]; PyThread_acquire_lock(perf_map_state.map_lock, 1); int fflush_result = 0, result = 0; while (1) { size_t bytes_read = fread(buf, 1, sizeof(buf), from); size_t bytes_written = fwrite(buf, 1, bytes_read, perf_map_state.perf_map); fflush_result = fflush(perf_map_state.perf_map); if (fflush_result != 0 || bytes_read == 0 || bytes_written < bytes_read) { result = -1; goto close_and_release; } if (bytes_read < sizeof(buf) && feof(from)) { goto close_and_release; } } close_and_release: fclose(from); PyThread_release_lock(perf_map_state.map_lock); return result; #endif return 0; } #ifdef __cplusplus } #endif static PyMethodDef sys_methods[] = { /* Might as well keep this in alphabetic order */ SYS_ADDAUDITHOOK_METHODDEF {"audit", _PyCFunction_CAST(sys_audit), METH_FASTCALL, audit_doc }, {"breakpointhook", _PyCFunction_CAST(sys_breakpointhook), METH_FASTCALL | METH_KEYWORDS, breakpointhook_doc}, SYS__CLEAR_INTERNAL_CACHES_METHODDEF SYS__CLEAR_TYPE_CACHE_METHODDEF SYS__CURRENT_FRAMES_METHODDEF SYS__CURRENT_EXCEPTIONS_METHODDEF SYS_DISPLAYHOOK_METHODDEF SYS_EXCEPTION_METHODDEF SYS_EXC_INFO_METHODDEF SYS_EXCEPTHOOK_METHODDEF SYS_EXIT_METHODDEF SYS_GETDEFAULTENCODING_METHODDEF SYS_GETDLOPENFLAGS_METHODDEF SYS_GETALLOCATEDBLOCKS_METHODDEF SYS_GETUNICODEINTERNEDSIZE_METHODDEF SYS_GETFILESYSTEMENCODING_METHODDEF SYS_GETFILESYSTEMENCODEERRORS_METHODDEF #ifdef Py_TRACE_REFS {"getobjects", _Py_GetObjects, METH_VARARGS}, #endif SYS_GETTOTALREFCOUNT_METHODDEF SYS_GETREFCOUNT_METHODDEF SYS_GETRECURSIONLIMIT_METHODDEF {"getsizeof", _PyCFunction_CAST(sys_getsizeof), METH_VARARGS | METH_KEYWORDS, getsizeof_doc}, SYS__GETFRAME_METHODDEF SYS__GETFRAMEMODULENAME_METHODDEF SYS_GETWINDOWSVERSION_METHODDEF SYS__ENABLELEGACYWINDOWSFSENCODING_METHODDEF SYS_INTERN_METHODDEF SYS__IS_INTERNED_METHODDEF SYS_IS_FINALIZING_METHODDEF SYS_MDEBUG_METHODDEF SYS_SETSWITCHINTERVAL_METHODDEF SYS_GETSWITCHINTERVAL_METHODDEF SYS_SETDLOPENFLAGS_METHODDEF {"setprofile", sys_setprofile, METH_O, setprofile_doc}, SYS__SETPROFILEALLTHREADS_METHODDEF SYS_GETPROFILE_METHODDEF SYS_SETRECURSIONLIMIT_METHODDEF {"settrace", sys_settrace, METH_O, settrace_doc}, SYS__SETTRACEALLTHREADS_METHODDEF SYS_GETTRACE_METHODDEF SYS_CALL_TRACING_METHODDEF SYS__DEBUGMALLOCSTATS_METHODDEF SYS_SET_COROUTINE_ORIGIN_TRACKING_DEPTH_METHODDEF SYS_GET_COROUTINE_ORIGIN_TRACKING_DEPTH_METHODDEF {"set_asyncgen_hooks", _PyCFunction_CAST(sys_set_asyncgen_hooks), METH_VARARGS | METH_KEYWORDS, set_asyncgen_hooks_doc}, SYS_GET_ASYNCGEN_HOOKS_METHODDEF SYS_GETANDROIDAPILEVEL_METHODDEF SYS_ACTIVATE_STACK_TRAMPOLINE_METHODDEF SYS_DEACTIVATE_STACK_TRAMPOLINE_METHODDEF SYS_IS_STACK_TRAMPOLINE_ACTIVE_METHODDEF SYS_UNRAISABLEHOOK_METHODDEF SYS_GET_INT_MAX_STR_DIGITS_METHODDEF SYS_SET_INT_MAX_STR_DIGITS_METHODDEF #ifdef Py_STATS SYS__STATS_ON_METHODDEF SYS__STATS_OFF_METHODDEF SYS__STATS_CLEAR_METHODDEF SYS__STATS_DUMP_METHODDEF #endif SYS__GET_CPU_COUNT_CONFIG_METHODDEF {NULL, NULL} // sentinel }; static PyObject * list_builtin_module_names(void) { PyObject *list = _PyImport_GetBuiltinModuleNames(); if (list == NULL) { return NULL; } if (PyList_Sort(list) != 0) { goto error; } PyObject *tuple = PyList_AsTuple(list); Py_DECREF(list); return tuple; error: Py_DECREF(list); return NULL; } static PyObject * list_stdlib_module_names(void) { Py_ssize_t len = Py_ARRAY_LENGTH(_Py_stdlib_module_names); PyObject *names = PyTuple_New(len); if (names == NULL) { return NULL; } for (Py_ssize_t i = 0; i < len; i++) { PyObject *name = PyUnicode_FromString(_Py_stdlib_module_names[i]); if (name == NULL) { Py_DECREF(names); return NULL; } PyTuple_SET_ITEM(names, i, name); } PyObject *set = PyObject_CallFunction((PyObject *)&PyFrozenSet_Type, "(O)", names); Py_DECREF(names); return set; } /* Pre-initialization support for sys.warnoptions and sys._xoptions * * Modern internal code paths: * These APIs get called after _Py_InitializeCore and get to use the * regular CPython list, dict, and unicode APIs. * * Legacy embedding code paths: * The multi-phase initialization API isn't public yet, so embedding * apps still need to be able configure sys.warnoptions and sys._xoptions * before they call Py_Initialize. To support this, we stash copies of * the supplied wchar * sequences in linked lists, and then migrate the * contents of those lists to the sys module in _PyInitializeCore. * */ struct _preinit_entry { wchar_t *value; struct _preinit_entry *next; }; typedef struct _preinit_entry *_Py_PreInitEntry; static _Py_PreInitEntry _preinit_warnoptions = NULL; static _Py_PreInitEntry _preinit_xoptions = NULL; static _Py_PreInitEntry _alloc_preinit_entry(const wchar_t *value) { /* To get this to work, we have to initialize the runtime implicitly */ _PyRuntime_Initialize(); /* Force default allocator, so we can ensure that it also gets used to * destroy the linked list in _clear_preinit_entries. */ PyMemAllocatorEx old_alloc; _PyMem_SetDefaultAllocator(PYMEM_DOMAIN_RAW, &old_alloc); _Py_PreInitEntry node = PyMem_RawCalloc(1, sizeof(*node)); if (node != NULL) { node->value = _PyMem_RawWcsdup(value); if (node->value == NULL) { PyMem_RawFree(node); node = NULL; }; }; PyMem_SetAllocator(PYMEM_DOMAIN_RAW, &old_alloc); return node; } static int _append_preinit_entry(_Py_PreInitEntry *optionlist, const wchar_t *value) { _Py_PreInitEntry new_entry = _alloc_preinit_entry(value); if (new_entry == NULL) { return -1; } /* We maintain the linked list in this order so it's easy to play back * the add commands in the same order later on in _Py_InitializeCore */ _Py_PreInitEntry last_entry = *optionlist; if (last_entry == NULL) { *optionlist = new_entry; } else { while (last_entry->next != NULL) { last_entry = last_entry->next; } last_entry->next = new_entry; } return 0; } static void _clear_preinit_entries(_Py_PreInitEntry *optionlist) { _Py_PreInitEntry current = *optionlist; *optionlist = NULL; /* Deallocate the nodes and their contents using the default allocator */ PyMemAllocatorEx old_alloc; _PyMem_SetDefaultAllocator(PYMEM_DOMAIN_RAW, &old_alloc); while (current != NULL) { _Py_PreInitEntry next = current->next; PyMem_RawFree(current->value); PyMem_RawFree(current); current = next; } PyMem_SetAllocator(PYMEM_DOMAIN_RAW, &old_alloc); } PyStatus _PySys_ReadPreinitWarnOptions(PyWideStringList *options) { PyStatus status; _Py_PreInitEntry entry; for (entry = _preinit_warnoptions; entry != NULL; entry = entry->next) { status = PyWideStringList_Append(options, entry->value); if (_PyStatus_EXCEPTION(status)) { return status; } } _clear_preinit_entries(&_preinit_warnoptions); return _PyStatus_OK(); } PyStatus _PySys_ReadPreinitXOptions(PyConfig *config) { PyStatus status; _Py_PreInitEntry entry; for (entry = _preinit_xoptions; entry != NULL; entry = entry->next) { status = PyWideStringList_Append(&config->xoptions, entry->value); if (_PyStatus_EXCEPTION(status)) { return status; } } _clear_preinit_entries(&_preinit_xoptions); return _PyStatus_OK(); } static PyObject * get_warnoptions(PyThreadState *tstate) { PyObject *warnoptions = _PySys_GetAttr(tstate, &_Py_ID(warnoptions)); if (warnoptions == NULL || !PyList_Check(warnoptions)) { /* PEP432 TODO: we can reach this if warnoptions is NULL in the main * interpreter config. When that happens, we need to properly set * the `warnoptions` reference in the main interpreter config as well. * * For Python 3.7, we shouldn't be able to get here due to the * combination of how _PyMainInterpreter_ReadConfig and _PySys_EndInit * work, but we expect 3.8+ to make the _PyMainInterpreter_ReadConfig * call optional for embedding applications, thus making this * reachable again. */ warnoptions = PyList_New(0); if (warnoptions == NULL) { return NULL; } if (sys_set_object(tstate->interp, &_Py_ID(warnoptions), warnoptions)) { Py_DECREF(warnoptions); return NULL; } Py_DECREF(warnoptions); } return warnoptions; } void PySys_ResetWarnOptions(void) { PyThreadState *tstate = _PyThreadState_GET(); if (tstate == NULL) { _clear_preinit_entries(&_preinit_warnoptions); return; } PyObject *warnoptions = _PySys_GetAttr(tstate, &_Py_ID(warnoptions)); if (warnoptions == NULL || !PyList_Check(warnoptions)) return; PyList_SetSlice(warnoptions, 0, PyList_GET_SIZE(warnoptions), NULL); } static int _PySys_AddWarnOptionWithError(PyThreadState *tstate, PyObject *option) { PyObject *warnoptions = get_warnoptions(tstate); if (warnoptions == NULL) { return -1; } if (PyList_Append(warnoptions, option)) { return -1; } return 0; } // Removed in Python 3.13 API, but kept for the stable ABI PyAPI_FUNC(void) PySys_AddWarnOptionUnicode(PyObject *option) { PyThreadState *tstate = _PyThreadState_GET(); if (_PySys_AddWarnOptionWithError(tstate, option) < 0) { /* No return value, therefore clear error state if possible */ if (tstate) { _PyErr_Clear(tstate); } } } // Removed in Python 3.13 API, but kept for the stable ABI PyAPI_FUNC(void) PySys_AddWarnOption(const wchar_t *s) { PyThreadState *tstate = _PyThreadState_GET(); if (tstate == NULL) { _append_preinit_entry(&_preinit_warnoptions, s); return; } PyObject *unicode; unicode = PyUnicode_FromWideChar(s, -1); if (unicode == NULL) return; _Py_COMP_DIAG_PUSH _Py_COMP_DIAG_IGNORE_DEPR_DECLS PySys_AddWarnOptionUnicode(unicode); _Py_COMP_DIAG_POP Py_DECREF(unicode); } // Removed in Python 3.13 API, but kept for the stable ABI PyAPI_FUNC(int) PySys_HasWarnOptions(void) { PyThreadState *tstate = _PyThreadState_GET(); PyObject *warnoptions = _PySys_GetAttr(tstate, &_Py_ID(warnoptions)); return (warnoptions != NULL && PyList_Check(warnoptions) && PyList_GET_SIZE(warnoptions) > 0); } static PyObject * get_xoptions(PyThreadState *tstate) { PyObject *xoptions = _PySys_GetAttr(tstate, &_Py_ID(_xoptions)); if (xoptions == NULL || !PyDict_Check(xoptions)) { /* PEP432 TODO: we can reach this if xoptions is NULL in the main * interpreter config. When that happens, we need to properly set * the `xoptions` reference in the main interpreter config as well. * * For Python 3.7, we shouldn't be able to get here due to the * combination of how _PyMainInterpreter_ReadConfig and _PySys_EndInit * work, but we expect 3.8+ to make the _PyMainInterpreter_ReadConfig * call optional for embedding applications, thus making this * reachable again. */ xoptions = PyDict_New(); if (xoptions == NULL) { return NULL; } if (sys_set_object(tstate->interp, &_Py_ID(_xoptions), xoptions)) { Py_DECREF(xoptions); return NULL; } Py_DECREF(xoptions); } return xoptions; } static int _PySys_AddXOptionWithError(const wchar_t *s) { PyObject *name = NULL, *value = NULL; PyThreadState *tstate = _PyThreadState_GET(); PyObject *opts = get_xoptions(tstate); if (opts == NULL) { goto error; } const wchar_t *name_end = wcschr(s, L'='); if (!name_end) { name = PyUnicode_FromWideChar(s, -1); if (name == NULL) { goto error; } value = Py_NewRef(Py_True); } else { name = PyUnicode_FromWideChar(s, name_end - s); if (name == NULL) { goto error; } value = PyUnicode_FromWideChar(name_end + 1, -1); if (value == NULL) { goto error; } } if (PyDict_SetItem(opts, name, value) < 0) { goto error; } Py_DECREF(name); Py_DECREF(value); return 0; error: Py_XDECREF(name); Py_XDECREF(value); return -1; } // Removed in Python 3.13 API, but kept for the stable ABI PyAPI_FUNC(void) PySys_AddXOption(const wchar_t *s) { PyThreadState *tstate = _PyThreadState_GET(); if (tstate == NULL) { _append_preinit_entry(&_preinit_xoptions, s); return; } if (_PySys_AddXOptionWithError(s) < 0) { /* No return value, therefore clear error state if possible */ _PyErr_Clear(tstate); } } PyObject * PySys_GetXOptions(void) { PyThreadState *tstate = _PyThreadState_GET(); return get_xoptions(tstate); } /* XXX This doc string is too long to be a single string literal in VC++ 5.0. Two literals concatenated works just fine. If you have a K&R compiler or other abomination that however *does* understand longer strings, get rid of the !!! comment in the middle and the quotes that surround it. */ PyDoc_VAR(sys_doc) = PyDoc_STR( "This module provides access to some objects used or maintained by the\n\ interpreter and to functions that interact strongly with the interpreter.\n\ \n\ Dynamic objects:\n\ \n\ argv -- command line arguments; argv[0] is the script pathname if known\n\ path -- module search path; path[0] is the script directory, else ''\n\ modules -- dictionary of loaded modules\n\ \n\ displayhook -- called to show results in an interactive session\n\ excepthook -- called to handle any uncaught exception other than SystemExit\n\ To customize printing in an interactive session or to install a custom\n\ top-level exception handler, assign other functions to replace these.\n\ \n\ stdin -- standard input file object; used by input()\n\ stdout -- standard output file object; used by print()\n\ stderr -- standard error object; used for error messages\n\ By assigning other file objects (or objects that behave like files)\n\ to these, it is possible to redirect all of the interpreter's I/O.\n\ \n\ last_exc - the last uncaught exception\n\ Only available in an interactive session after a\n\ traceback has been printed.\n\ last_type -- type of last uncaught exception\n\ last_value -- value of last uncaught exception\n\ last_traceback -- traceback of last uncaught exception\n\ These three are the (deprecated) legacy representation of last_exc.\n\ " ) /* concatenating string here */ PyDoc_STR( "\n\ Static objects:\n\ \n\ builtin_module_names -- tuple of module names built into this interpreter\n\ copyright -- copyright notice pertaining to this interpreter\n\ exec_prefix -- prefix used to find the machine-specific Python library\n\ executable -- absolute path of the executable binary of the Python interpreter\n\ float_info -- a named tuple with information about the float implementation.\n\ float_repr_style -- string indicating the style of repr() output for floats\n\ hash_info -- a named tuple with information about the hash algorithm.\n\ hexversion -- version information encoded as a single integer\n\ implementation -- Python implementation information.\n\ int_info -- a named tuple with information about the int implementation.\n\ maxsize -- the largest supported length of containers.\n\ maxunicode -- the value of the largest Unicode code point\n\ platform -- platform identifier\n\ prefix -- prefix used to find the Python library\n\ thread_info -- a named tuple with information about the thread implementation.\n\ version -- the version of this interpreter as a string\n\ version_info -- version information as a named tuple\n\ " ) #ifdef MS_COREDLL /* concatenating string here */ PyDoc_STR( "dllhandle -- [Windows only] integer handle of the Python DLL\n\ winver -- [Windows only] version number of the Python DLL\n\ " ) #endif /* MS_COREDLL */ #ifdef MS_WINDOWS /* concatenating string here */ PyDoc_STR( "_enablelegacywindowsfsencoding -- [Windows only]\n\ " ) #endif PyDoc_STR( "__stdin__ -- the original stdin; don't touch!\n\ __stdout__ -- the original stdout; don't touch!\n\ __stderr__ -- the original stderr; don't touch!\n\ __displayhook__ -- the original displayhook; don't touch!\n\ __excepthook__ -- the original excepthook; don't touch!\n\ \n\ Functions:\n\ \n\ displayhook() -- print an object to the screen, and save it in builtins._\n\ excepthook() -- print an exception and its traceback to sys.stderr\n\ exception() -- return the current thread's active exception\n\ exc_info() -- return information about the current thread's active exception\n\ exit() -- exit the interpreter by raising SystemExit\n\ getdlopenflags() -- returns flags to be used for dlopen() calls\n\ getprofile() -- get the global profiling function\n\ getrefcount() -- return the reference count for an object (plus one :-)\n\ getrecursionlimit() -- return the max recursion depth for the interpreter\n\ getsizeof() -- return the size of an object in bytes\n\ gettrace() -- get the global debug tracing function\n\ setdlopenflags() -- set the flags to be used for dlopen() calls\n\ setprofile() -- set the global profiling function\n\ setrecursionlimit() -- set the max recursion depth for the interpreter\n\ settrace() -- set the global debug tracing function\n\ " ) /* end of sys_doc */ ; PyDoc_STRVAR(flags__doc__, "sys.flags\n\ \n\ Flags provided through command line arguments or environment vars."); static PyTypeObject FlagsType; static PyStructSequence_Field flags_fields[] = { {"debug", "-d"}, {"inspect", "-i"}, {"interactive", "-i"}, {"optimize", "-O or -OO"}, {"dont_write_bytecode", "-B"}, {"no_user_site", "-s"}, {"no_site", "-S"}, {"ignore_environment", "-E"}, {"verbose", "-v"}, {"bytes_warning", "-b"}, {"quiet", "-q"}, {"hash_randomization", "-R"}, {"isolated", "-I"}, {"dev_mode", "-X dev"}, {"utf8_mode", "-X utf8"}, {"warn_default_encoding", "-X warn_default_encoding"}, {"safe_path", "-P"}, {"int_max_str_digits", "-X int_max_str_digits"}, {0} }; static PyStructSequence_Desc flags_desc = { "sys.flags", /* name */ flags__doc__, /* doc */ flags_fields, /* fields */ 18 }; static int set_flags_from_config(PyInterpreterState *interp, PyObject *flags) { const PyPreConfig *preconfig = &interp->runtime->preconfig; const PyConfig *config = _PyInterpreterState_GetConfig(interp); // _PySys_UpdateConfig() modifies sys.flags in-place: // Py_XDECREF() is needed in this case. Py_ssize_t pos = 0; #define SetFlagObj(expr) \ do { \ PyObject *value = (expr); \ if (value == NULL) { \ return -1; \ } \ Py_XDECREF(PyStructSequence_GET_ITEM(flags, pos)); \ PyStructSequence_SET_ITEM(flags, pos, value); \ pos++; \ } while (0) #define SetFlag(expr) SetFlagObj(PyLong_FromLong(expr)) SetFlag(config->parser_debug); SetFlag(config->inspect); SetFlag(config->interactive); SetFlag(config->optimization_level); SetFlag(!config->write_bytecode); SetFlag(!config->user_site_directory); SetFlag(!config->site_import); SetFlag(!config->use_environment); SetFlag(config->verbose); SetFlag(config->bytes_warning); SetFlag(config->quiet); SetFlag(config->use_hash_seed == 0 || config->hash_seed != 0); SetFlag(config->isolated); SetFlagObj(PyBool_FromLong(config->dev_mode)); SetFlag(preconfig->utf8_mode); SetFlag(config->warn_default_encoding); SetFlagObj(PyBool_FromLong(config->safe_path)); SetFlag(config->int_max_str_digits); #undef SetFlagObj #undef SetFlag return 0; } static PyObject* make_flags(PyInterpreterState *interp) { PyObject *flags = PyStructSequence_New(&FlagsType); if (flags == NULL) { return NULL; } if (set_flags_from_config(interp, flags) < 0) { Py_DECREF(flags); return NULL; } return flags; } PyDoc_STRVAR(version_info__doc__, "sys.version_info\n\ \n\ Version information as a named tuple."); static PyTypeObject VersionInfoType; static PyStructSequence_Field version_info_fields[] = { {"major", "Major release number"}, {"minor", "Minor release number"}, {"micro", "Patch release number"}, {"releaselevel", "'alpha', 'beta', 'candidate', or 'final'"}, {"serial", "Serial release number"}, {0} }; static PyStructSequence_Desc version_info_desc = { "sys.version_info", /* name */ version_info__doc__, /* doc */ version_info_fields, /* fields */ 5 }; static PyObject * make_version_info(PyThreadState *tstate) { PyObject *version_info; char *s; int pos = 0; version_info = PyStructSequence_New(&VersionInfoType); if (version_info == NULL) { return NULL; } /* * These release level checks are mutually exclusive and cover * the field, so don't get too fancy with the pre-processor! */ #if PY_RELEASE_LEVEL == PY_RELEASE_LEVEL_ALPHA s = "alpha"; #elif PY_RELEASE_LEVEL == PY_RELEASE_LEVEL_BETA s = "beta"; #elif PY_RELEASE_LEVEL == PY_RELEASE_LEVEL_GAMMA s = "candidate"; #elif PY_RELEASE_LEVEL == PY_RELEASE_LEVEL_FINAL s = "final"; #endif #define SetIntItem(flag) \ PyStructSequence_SET_ITEM(version_info, pos++, PyLong_FromLong(flag)) #define SetStrItem(flag) \ PyStructSequence_SET_ITEM(version_info, pos++, PyUnicode_FromString(flag)) SetIntItem(PY_MAJOR_VERSION); SetIntItem(PY_MINOR_VERSION); SetIntItem(PY_MICRO_VERSION); SetStrItem(s); SetIntItem(PY_RELEASE_SERIAL); #undef SetIntItem #undef SetStrItem if (_PyErr_Occurred(tstate)) { Py_CLEAR(version_info); return NULL; } return version_info; } /* sys.implementation values */ #define NAME "cpython" const char *_PySys_ImplName = NAME; #define MAJOR Py_STRINGIFY(PY_MAJOR_VERSION) #define MINOR Py_STRINGIFY(PY_MINOR_VERSION) #define TAG NAME "-" MAJOR MINOR const char *_PySys_ImplCacheTag = TAG; #undef NAME #undef MAJOR #undef MINOR #undef TAG static PyObject * make_impl_info(PyObject *version_info) { int res; PyObject *impl_info, *value, *ns; impl_info = PyDict_New(); if (impl_info == NULL) return NULL; /* populate the dict */ value = PyUnicode_FromString(_PySys_ImplName); if (value == NULL) goto error; res = PyDict_SetItemString(impl_info, "name", value); Py_DECREF(value); if (res < 0) goto error; value = PyUnicode_FromString(_PySys_ImplCacheTag); if (value == NULL) goto error; res = PyDict_SetItemString(impl_info, "cache_tag", value); Py_DECREF(value); if (res < 0) goto error; res = PyDict_SetItemString(impl_info, "version", version_info); if (res < 0) goto error; value = PyLong_FromLong(PY_VERSION_HEX); if (value == NULL) goto error; res = PyDict_SetItemString(impl_info, "hexversion", value); Py_DECREF(value); if (res < 0) goto error; #ifdef MULTIARCH value = PyUnicode_FromString(MULTIARCH); if (value == NULL) goto error; res = PyDict_SetItemString(impl_info, "_multiarch", value); Py_DECREF(value); if (res < 0) goto error; #endif /* dict ready */ ns = _PyNamespace_New(impl_info); Py_DECREF(impl_info); return ns; error: Py_CLEAR(impl_info); return NULL; } #ifdef __EMSCRIPTEN__ PyDoc_STRVAR(emscripten_info__doc__, "sys._emscripten_info\n\ \n\ WebAssembly Emscripten platform information."); static PyTypeObject *EmscriptenInfoType; static PyStructSequence_Field emscripten_info_fields[] = { {"emscripten_version", "Emscripten version (major, minor, micro)"}, {"runtime", "Runtime (Node.JS version, browser user agent)"}, {"pthreads", "pthread support"}, {"shared_memory", "shared memory support"}, {0} }; static PyStructSequence_Desc emscripten_info_desc = { "sys._emscripten_info", /* name */ emscripten_info__doc__ , /* doc */ emscripten_info_fields, /* fields */ 4 }; EM_JS(char *, _Py_emscripten_runtime, (void), { var info; if (typeof navigator == 'object') { info = navigator.userAgent; } else if (typeof process == 'object') { info = "Node.js ".concat(process.version); } else { info = "UNKNOWN"; } var len = lengthBytesUTF8(info) + 1; var res = _malloc(len); if (res) stringToUTF8(info, res, len); #if __wasm64__ return BigInt(res); #else return res; #endif }); static PyObject * make_emscripten_info(void) { PyObject *emscripten_info = NULL; PyObject *version = NULL; char *ua; int pos = 0; emscripten_info = PyStructSequence_New(EmscriptenInfoType); if (emscripten_info == NULL) { return NULL; } version = Py_BuildValue("(iii)", __EMSCRIPTEN_major__, __EMSCRIPTEN_minor__, __EMSCRIPTEN_tiny__); if (version == NULL) { goto error; } PyStructSequence_SET_ITEM(emscripten_info, pos++, version); ua = _Py_emscripten_runtime(); if (ua != NULL) { PyObject *oua = PyUnicode_DecodeUTF8(ua, strlen(ua), "strict"); free(ua); if (oua == NULL) { goto error; } PyStructSequence_SET_ITEM(emscripten_info, pos++, oua); } else { PyStructSequence_SET_ITEM(emscripten_info, pos++, Py_NewRef(Py_None)); } #define SetBoolItem(flag) \ PyStructSequence_SET_ITEM(emscripten_info, pos++, PyBool_FromLong(flag)) #ifdef __EMSCRIPTEN_PTHREADS__ SetBoolItem(1); #else SetBoolItem(0); #endif #ifdef __EMSCRIPTEN_SHARED_MEMORY__ SetBoolItem(1); #else SetBoolItem(0); #endif #undef SetBoolItem if (PyErr_Occurred()) { goto error; } return emscripten_info; error: Py_CLEAR(emscripten_info); return NULL; } #endif // __EMSCRIPTEN__ static struct PyModuleDef sysmodule = { PyModuleDef_HEAD_INIT, "sys", sys_doc, -1, /* multiple "initialization" just copies the module dict. */ sys_methods, NULL, NULL, NULL, NULL }; /* Updating the sys namespace, returning NULL pointer on error */ #define SET_SYS(key, value) \ do { \ PyObject *v = (value); \ if (v == NULL) { \ goto err_occurred; \ } \ res = PyDict_SetItemString(sysdict, key, v); \ Py_DECREF(v); \ if (res < 0) { \ goto err_occurred; \ } \ } while (0) #define SET_SYS_FROM_STRING(key, value) \ SET_SYS(key, PyUnicode_FromString(value)) static PyStatus _PySys_InitCore(PyThreadState *tstate, PyObject *sysdict) { PyObject *version_info; int res; PyInterpreterState *interp = tstate->interp; /* stdin/stdout/stderr are set in pylifecycle.c */ #define COPY_SYS_ATTR(tokey, fromkey) \ SET_SYS(tokey, PyMapping_GetItemString(sysdict, fromkey)) COPY_SYS_ATTR("__displayhook__", "displayhook"); COPY_SYS_ATTR("__excepthook__", "excepthook"); COPY_SYS_ATTR("__breakpointhook__", "breakpointhook"); COPY_SYS_ATTR("__unraisablehook__", "unraisablehook"); #undef COPY_SYS_ATTR SET_SYS_FROM_STRING("version", Py_GetVersion()); SET_SYS("hexversion", PyLong_FromLong(PY_VERSION_HEX)); SET_SYS("_git", Py_BuildValue("(szz)", "CPython", _Py_gitidentifier(), _Py_gitversion())); SET_SYS_FROM_STRING("_framework", _PYTHONFRAMEWORK); SET_SYS("api_version", PyLong_FromLong(PYTHON_API_VERSION)); SET_SYS_FROM_STRING("copyright", Py_GetCopyright()); SET_SYS_FROM_STRING("platform", Py_GetPlatform()); SET_SYS("maxsize", PyLong_FromSsize_t(PY_SSIZE_T_MAX)); SET_SYS("float_info", PyFloat_GetInfo()); SET_SYS("int_info", PyLong_GetInfo()); /* initialize hash_info */ if (_PyStructSequence_InitBuiltin(interp, &Hash_InfoType, &hash_info_desc) < 0) { goto type_init_failed; } SET_SYS("hash_info", get_hash_info(tstate)); SET_SYS("maxunicode", PyLong_FromLong(0x10FFFF)); SET_SYS("builtin_module_names", list_builtin_module_names()); SET_SYS("stdlib_module_names", list_stdlib_module_names()); #if PY_BIG_ENDIAN SET_SYS_FROM_STRING("byteorder", "big"); #else SET_SYS_FROM_STRING("byteorder", "little"); #endif #ifdef MS_COREDLL SET_SYS("dllhandle", PyLong_FromVoidPtr(PyWin_DLLhModule)); SET_SYS_FROM_STRING("winver", PyWin_DLLVersionString); #endif #ifdef ABIFLAGS SET_SYS_FROM_STRING("abiflags", ABIFLAGS); #endif #define ENSURE_INFO_TYPE(TYPE, DESC) \ do { \ if (_PyStructSequence_InitBuiltinWithFlags( \ interp, &TYPE, &DESC, Py_TPFLAGS_DISALLOW_INSTANTIATION) < 0) { \ goto type_init_failed; \ } \ } while (0) /* version_info */ ENSURE_INFO_TYPE(VersionInfoType, version_info_desc); version_info = make_version_info(tstate); SET_SYS("version_info", version_info); /* implementation */ SET_SYS("implementation", make_impl_info(version_info)); // sys.flags: updated in-place later by _PySys_UpdateConfig() ENSURE_INFO_TYPE(FlagsType, flags_desc); SET_SYS("flags", make_flags(tstate->interp)); #if defined(MS_WINDOWS) /* getwindowsversion */ ENSURE_INFO_TYPE(WindowsVersionType, windows_version_desc); SET_SYS_FROM_STRING("_vpath", VPATH); #endif #undef ENSURE_INFO_TYPE /* float repr style: 0.03 (short) vs 0.029999999999999999 (legacy) */ #if _PY_SHORT_FLOAT_REPR == 1 SET_SYS_FROM_STRING("float_repr_style", "short"); #else SET_SYS_FROM_STRING("float_repr_style", "legacy"); #endif SET_SYS("thread_info", PyThread_GetInfo()); /* initialize asyncgen_hooks */ if (_PyStructSequence_InitBuiltin(interp, &AsyncGenHooksType, &asyncgen_hooks_desc) < 0) { goto type_init_failed; } #ifdef __EMSCRIPTEN__ if (EmscriptenInfoType == NULL) { EmscriptenInfoType = PyStructSequence_NewType(&emscripten_info_desc); if (EmscriptenInfoType == NULL) { goto type_init_failed; } } SET_SYS("_emscripten_info", make_emscripten_info()); #endif /* adding sys.path_hooks and sys.path_importer_cache */ SET_SYS("meta_path", PyList_New(0)); SET_SYS("path_importer_cache", PyDict_New()); SET_SYS("path_hooks", PyList_New(0)); if (_PyErr_Occurred(tstate)) { goto err_occurred; } return _PyStatus_OK(); type_init_failed: return _PyStatus_ERR("failed to initialize a type"); err_occurred: return _PyStatus_ERR("can't initialize sys module"); } static int sys_add_xoption(PyObject *opts, const wchar_t *s) { PyObject *name, *value = NULL; const wchar_t *name_end = wcschr(s, L'='); if (!name_end) { name = PyUnicode_FromWideChar(s, -1); if (name == NULL) { goto error; } value = Py_NewRef(Py_True); } else { name = PyUnicode_FromWideChar(s, name_end - s); if (name == NULL) { goto error; } value = PyUnicode_FromWideChar(name_end + 1, -1); if (value == NULL) { goto error; } } if (PyDict_SetItem(opts, name, value) < 0) { goto error; } Py_DECREF(name); Py_DECREF(value); return 0; error: Py_XDECREF(name); Py_XDECREF(value); return -1; } static PyObject* sys_create_xoptions_dict(const PyConfig *config) { Py_ssize_t nxoption = config->xoptions.length; wchar_t * const * xoptions = config->xoptions.items; PyObject *dict = PyDict_New(); if (dict == NULL) { return NULL; } for (Py_ssize_t i=0; i < nxoption; i++) { const wchar_t *option = xoptions[i]; if (sys_add_xoption(dict, option) < 0) { Py_DECREF(dict); return NULL; } } return dict; } // Update sys attributes for a new PyConfig configuration. // This function also adds attributes that _PySys_InitCore() didn't add. int _PySys_UpdateConfig(PyThreadState *tstate) { PyInterpreterState *interp = tstate->interp; PyObject *sysdict = interp->sysdict; const PyConfig *config = _PyInterpreterState_GetConfig(interp); int res; #define COPY_LIST(KEY, VALUE) \ SET_SYS(KEY, _PyWideStringList_AsList(&(VALUE))); #define SET_SYS_FROM_WSTR(KEY, VALUE) \ SET_SYS(KEY, PyUnicode_FromWideChar(VALUE, -1)); #define COPY_WSTR(SYS_ATTR, WSTR) \ if (WSTR != NULL) { \ SET_SYS_FROM_WSTR(SYS_ATTR, WSTR); \ } if (config->module_search_paths_set) { COPY_LIST("path", config->module_search_paths); } COPY_WSTR("executable", config->executable); COPY_WSTR("_base_executable", config->base_executable); COPY_WSTR("prefix", config->prefix); COPY_WSTR("base_prefix", config->base_prefix); COPY_WSTR("exec_prefix", config->exec_prefix); COPY_WSTR("base_exec_prefix", config->base_exec_prefix); COPY_WSTR("platlibdir", config->platlibdir); if (config->pycache_prefix != NULL) { SET_SYS_FROM_WSTR("pycache_prefix", config->pycache_prefix); } else { if (PyDict_SetItemString(sysdict, "pycache_prefix", Py_None) < 0) { return -1; } } COPY_LIST("argv", config->argv); COPY_LIST("orig_argv", config->orig_argv); COPY_LIST("warnoptions", config->warnoptions); SET_SYS("_xoptions", sys_create_xoptions_dict(config)); const wchar_t *stdlibdir = _Py_GetStdlibDir(); if (stdlibdir != NULL) { SET_SYS_FROM_WSTR("_stdlib_dir", stdlibdir); } else { if (PyDict_SetItemString(sysdict, "_stdlib_dir", Py_None) < 0) { return -1; } } #undef SET_SYS_FROM_WSTR #undef COPY_LIST #undef COPY_WSTR // sys.flags PyObject *flags = _PySys_GetObject(interp, "flags"); // borrowed ref if (flags == NULL) { if (!_PyErr_Occurred(tstate)) { _PyErr_SetString(tstate, PyExc_RuntimeError, "lost sys.flags"); } return -1; } if (set_flags_from_config(interp, flags) < 0) { return -1; } SET_SYS("dont_write_bytecode", PyBool_FromLong(!config->write_bytecode)); if (_PyErr_Occurred(tstate)) { goto err_occurred; } return 0; err_occurred: return -1; } #undef SET_SYS #undef SET_SYS_FROM_STRING /* Set up a preliminary stderr printer until we have enough infrastructure for the io module in place. Use UTF-8/backslashreplace and ignore EAGAIN errors. */ static PyStatus _PySys_SetPreliminaryStderr(PyObject *sysdict) { PyObject *pstderr = PyFile_NewStdPrinter(fileno(stderr)); if (pstderr == NULL) { goto error; } if (PyDict_SetItem(sysdict, &_Py_ID(stderr), pstderr) < 0) { goto error; } if (PyDict_SetItemString(sysdict, "__stderr__", pstderr) < 0) { goto error; } Py_DECREF(pstderr); return _PyStatus_OK(); error: Py_XDECREF(pstderr); return _PyStatus_ERR("can't set preliminary stderr"); } PyObject *_Py_CreateMonitoringObject(void); /* Create sys module without all attributes. _PySys_UpdateConfig() should be called later to add remaining attributes. */ PyStatus _PySys_Create(PyThreadState *tstate, PyObject **sysmod_p) { assert(!_PyErr_Occurred(tstate)); PyInterpreterState *interp = tstate->interp; PyObject *modules = _PyImport_InitModules(interp); if (modules == NULL) { goto error; } PyObject *sysmod = _PyModule_CreateInitialized(&sysmodule, PYTHON_API_VERSION); if (sysmod == NULL) { return _PyStatus_ERR("failed to create a module object"); } PyObject *sysdict = PyModule_GetDict(sysmod); if (sysdict == NULL) { goto error; } interp->sysdict = Py_NewRef(sysdict); interp->sysdict_copy = PyDict_Copy(sysdict); if (interp->sysdict_copy == NULL) { goto error; } if (PyDict_SetItemString(sysdict, "modules", modules) < 0) { goto error; } PyStatus status = _PySys_SetPreliminaryStderr(sysdict); if (_PyStatus_EXCEPTION(status)) { return status; } status = _PySys_InitCore(tstate, sysdict); if (_PyStatus_EXCEPTION(status)) { return status; } if (_PyImport_FixupBuiltin(sysmod, "sys", modules) < 0) { goto error; } PyObject *monitoring = _Py_CreateMonitoringObject(); if (monitoring == NULL) { goto error; } int err = PyDict_SetItemString(sysdict, "monitoring", monitoring); Py_DECREF(monitoring); if (err < 0) { goto error; } assert(!_PyErr_Occurred(tstate)); *sysmod_p = sysmod; return _PyStatus_OK(); error: return _PyStatus_ERR("can't initialize sys module"); } void _PySys_FiniTypes(PyInterpreterState *interp) { _PyStructSequence_FiniBuiltin(interp, &VersionInfoType); _PyStructSequence_FiniBuiltin(interp, &FlagsType); #if defined(MS_WINDOWS) _PyStructSequence_FiniBuiltin(interp, &WindowsVersionType); #endif _PyStructSequence_FiniBuiltin(interp, &Hash_InfoType); _PyStructSequence_FiniBuiltin(interp, &AsyncGenHooksType); #ifdef __EMSCRIPTEN__ if (_Py_IsMainInterpreter(interp)) { Py_CLEAR(EmscriptenInfoType); } #endif } static PyObject * makepathobject(const wchar_t *path, wchar_t delim) { int i, n; const wchar_t *p; PyObject *v, *w; n = 1; p = path; while ((p = wcschr(p, delim)) != NULL) { n++; p++; } v = PyList_New(n); if (v == NULL) return NULL; for (i = 0; ; i++) { p = wcschr(path, delim); if (p == NULL) p = path + wcslen(path); /* End of string */ w = PyUnicode_FromWideChar(path, (Py_ssize_t)(p - path)); if (w == NULL) { Py_DECREF(v); return NULL; } PyList_SET_ITEM(v, i, w); if (*p == '\0') break; path = p+1; } return v; } // Removed in Python 3.13 API, but kept for the stable ABI PyAPI_FUNC(void) PySys_SetPath(const wchar_t *path) { PyObject *v; if ((v = makepathobject(path, DELIM)) == NULL) Py_FatalError("can't create sys.path"); PyInterpreterState *interp = _PyInterpreterState_GET(); if (sys_set_object(interp, &_Py_ID(path), v) != 0) { Py_FatalError("can't assign sys.path"); } Py_DECREF(v); } static PyObject * make_sys_argv(int argc, wchar_t * const * argv) { PyObject *list = PyList_New(argc); if (list == NULL) { return NULL; } for (Py_ssize_t i = 0; i < argc; i++) { PyObject *v = PyUnicode_FromWideChar(argv[i], -1); if (v == NULL) { Py_DECREF(list); return NULL; } PyList_SET_ITEM(list, i, v); } return list; } // Removed in Python 3.13 API, but kept for the stable ABI PyAPI_FUNC(void) PySys_SetArgvEx(int argc, wchar_t **argv, int updatepath) { wchar_t* empty_argv[1] = {L""}; PyThreadState *tstate = _PyThreadState_GET(); if (argc < 1 || argv == NULL) { /* Ensure at least one (empty) argument is seen */ argv = empty_argv; argc = 1; } PyObject *av = make_sys_argv(argc, argv); if (av == NULL) { Py_FatalError("no mem for sys.argv"); } if (sys_set_object_str(tstate->interp, "argv", av) != 0) { Py_DECREF(av); Py_FatalError("can't assign sys.argv"); } Py_DECREF(av); if (updatepath) { /* If argv[0] is not '-c' nor '-m', prepend argv[0] to sys.path. If argv[0] is a symlink, use the real path. */ const PyWideStringList argv_list = {.length = argc, .items = argv}; PyObject *path0 = NULL; if (_PyPathConfig_ComputeSysPath0(&argv_list, &path0)) { if (path0 == NULL) { Py_FatalError("can't compute path0 from argv"); } PyObject *sys_path = _PySys_GetAttr(tstate, &_Py_ID(path)); if (sys_path != NULL) { if (PyList_Insert(sys_path, 0, path0) < 0) { Py_DECREF(path0); Py_FatalError("can't prepend path0 to sys.path"); } } Py_DECREF(path0); } } } // Removed in Python 3.13 API, but kept for the stable ABI PyAPI_FUNC(void) PySys_SetArgv(int argc, wchar_t **argv) { _Py_COMP_DIAG_PUSH _Py_COMP_DIAG_IGNORE_DEPR_DECLS PySys_SetArgvEx(argc, argv, Py_IsolatedFlag == 0); _Py_COMP_DIAG_POP } /* Reimplementation of PyFile_WriteString() no calling indirectly PyErr_CheckSignals(): avoid the call to PyObject_Str(). */ static int sys_pyfile_write_unicode(PyObject *unicode, PyObject *file) { if (file == NULL) return -1; assert(unicode != NULL); PyObject *result = PyObject_CallMethodOneArg(file, &_Py_ID(write), unicode); if (result == NULL) { return -1; } Py_DECREF(result); return 0; } static int sys_pyfile_write(const char *text, PyObject *file) { PyObject *unicode = NULL; int err; if (file == NULL) return -1; unicode = PyUnicode_FromString(text); if (unicode == NULL) return -1; err = sys_pyfile_write_unicode(unicode, file); Py_DECREF(unicode); return err; } /* APIs to write to sys.stdout or sys.stderr using a printf-like interface. Adapted from code submitted by Just van Rossum. PySys_WriteStdout(format, ...) PySys_WriteStderr(format, ...) The first function writes to sys.stdout; the second to sys.stderr. When there is a problem, they write to the real (C level) stdout or stderr; no exceptions are raised. PyErr_CheckSignals() is not called to avoid the execution of the Python signal handlers: they may raise a new exception whereas sys_write() ignores all exceptions. Both take a printf-style format string as their first argument followed by a variable length argument list determined by the format string. *** WARNING *** The format should limit the total size of the formatted output string to 1000 bytes. In particular, this means that no unrestricted "%s" formats should occur; these should be limited using "%.s where is a decimal number calculated so that plus the maximum size of other formatted text does not exceed 1000 bytes. Also watch out for "%f", which can print hundreds of digits for very large numbers. */ static void sys_write(PyObject *key, FILE *fp, const char *format, va_list va) { PyObject *file; char buffer[1001]; int written; PyThreadState *tstate = _PyThreadState_GET(); PyObject *exc = _PyErr_GetRaisedException(tstate); file = _PySys_GetAttr(tstate, key); written = PyOS_vsnprintf(buffer, sizeof(buffer), format, va); if (sys_pyfile_write(buffer, file) != 0) { _PyErr_Clear(tstate); fputs(buffer, fp); } if (written < 0 || (size_t)written >= sizeof(buffer)) { const char *truncated = "... truncated"; if (sys_pyfile_write(truncated, file) != 0) fputs(truncated, fp); } _PyErr_SetRaisedException(tstate, exc); } void PySys_WriteStdout(const char *format, ...) { va_list va; va_start(va, format); sys_write(&_Py_ID(stdout), stdout, format, va); va_end(va); } void PySys_WriteStderr(const char *format, ...) { va_list va; va_start(va, format); sys_write(&_Py_ID(stderr), stderr, format, va); va_end(va); } static void sys_format(PyObject *key, FILE *fp, const char *format, va_list va) { PyObject *file, *message; const char *utf8; PyThreadState *tstate = _PyThreadState_GET(); PyObject *exc = _PyErr_GetRaisedException(tstate); file = _PySys_GetAttr(tstate, key); message = PyUnicode_FromFormatV(format, va); if (message != NULL) { if (sys_pyfile_write_unicode(message, file) != 0) { _PyErr_Clear(tstate); utf8 = PyUnicode_AsUTF8(message); if (utf8 != NULL) fputs(utf8, fp); } Py_DECREF(message); } _PyErr_SetRaisedException(tstate, exc); } void PySys_FormatStdout(const char *format, ...) { va_list va; va_start(va, format); sys_format(&_Py_ID(stdout), stdout, format, va); va_end(va); } void PySys_FormatStderr(const char *format, ...) { va_list va; va_start(va, format); sys_format(&_Py_ID(stderr), stderr, format, va); va_end(va); } in core * representation of the target metadata cache entry. * This is the same pointer returned by a protect of the * addr and len given above. * * addr: Base address in file of the entry to be serialized. * * This parameter is supplied mainly for sanity checking. * Sanity checks should be performed when compiled in debug * mode, but the parameter may be unused when compiled in * production mode. * * len: Length in bytes of the in file image of the entry to be * serialized. Also the size the image passed to the * serialize callback (discussed below) unless that * value is altered by this function. * * This parameter is supplied mainly for sanity checking. * Sanity checks should be performed when compiled in debug * mode, but the parameter may be unused when compiled in * production mode. * * new_addr_ptr: Pointer to haddr_t. If the entry is moved by * the serialize function, the new on disk base address must * be stored in *new_addr_ptr, and the appropriate flag set * in *flags_ptr. * * If the entry is not moved by the serialize function, * *new_addr_ptr is undefined on pre-serialize callback * return. * * new_len_ptr: Pointer to size_t. If the entry is resized by the * serialize function, the new length of the on disk image * must be stored in *new_len_ptr, and the appropriate flag set * in *flags_ptr. * * If the entry is not resized by the pre-serialize function, * *new_len_ptr is undefined on pre-serialize callback * return. * * flags_ptr: Pointer to an unsigned integer used to return flags * indicating whether the preserialize function resized or moved * the entry. If the entry was neither resized or moved, the * serialize function must set *flags_ptr to zero. The * H5C__SERIALIZE_RESIZED_FLAG or H5C__SERIALIZE_MOVED_FLAG must * be set to indicate a resize or move respectively. * * If the H5C__SERIALIZE_RESIZED_FLAG is set, the new length * must be stored in *new_len_ptr. * * If the H5C__SERIALIZE_MOVED_FLAG flag is set, the * new image base address must be stored in *new_addr_ptr. * * Processing in the pre-serialize function should proceed as follows: * * The pre-serialize function must examine the in core representation * indicated by the thing parameter, if the pre-serialize function does * not need to change the size or location of the on-disk image, it must * set *flags_ptr to zero. * * If the size of the on-disk image must be changed, the pre-serialize * function must load the length of the new image into *new_len_ptr, and * set the H5C__SERIALIZE_RESIZED_FLAG in *flags_ptr. * * If the base address of the on disk image must be changed, the * pre-serialize function must set *new_addr_ptr to the new base address, * and set the H5C__SERIALIZE_MOVED_FLAG in *flags_ptr. * * In addition, the pre-serialize callback may perform any other * processing required before the entry is written to disk * * If it is successful, the function must return SUCCEED. * * If it fails for any reason, the function must return FAIL and * push error information on the error stack with the error API * routines. * * * SERIALIZE: Pointer to the serialize callback. * * The serialize callback is invoked by the metadata cache whenever * it needs a current on disk image of the metadata entry for purposes * either constructing a journal entry or flushing the entry to disk. * * At this point, the base address and length of the entry's image on * disk must be well known and not change during the serialization * process. * * While any size and/or location changes must have been handled * by a pre-serialize call, the client may elect to handle any other * changes to the entry required to place it in correct form for * writing to disk in this call. * * The typedef for the serialize callback is as follows: * * typedef herr_t (*H5C_serialize_func_t)(const H5F_t *f, * void * image_ptr, * size_t len, * void * thing); * * The parameters of the serialize callback are as follows: * * f: File pointer -- needed if other metadata cache entries * must be modified in the process of serializing the * target entry. * * image_ptr: Pointer to a buffer of length len bytes into which a * serialized image of the target metadata cache entry is * to be written. * * Note that this buffer will not in general be initialized * to any particular value. Thus the serialize function may * not assume any initial value and must set each byte in * the buffer. * * len: Length in bytes of the in file image of the entry to be * serialized. Also the size of *image_ptr (below). * * This parameter is supplied mainly for sanity checking. * Sanity checks should be performed when compiled in debug * mode, but the parameter may be unused when compiled in * production mode. * * thing: Pointer to void containing the address of the in core * representation of the target metadata cache entry. * This is the same pointer returned by a protect of the * addr and len given above. * * Processing in the serialize function should proceed as follows: * * If there are any remaining changes to the entry required before * write to disk, they must be dealt with first. * * The serialize function must then examine the in core * representation indicated by the thing parameter, and write a * serialized image of its contents into the provided buffer. * * If it is successful, the function must return SUCCEED. * * If it fails for any reason, the function must return FAIL and * push error information on the error stack with the error API * routines. * * * NOTIFY: Pointer to the notify callback. * * The notify callback is invoked by the metadata cache when a cache * action on an entry has taken/will take place and the client indicates * it wishes to be notified about the action. * * The typedef for the notify callback is as follows: * * typedef herr_t (*H5C_notify_func_t)(H5C_notify_action_t action, * void *thing); * * The parameters of the notify callback are as follows: * * action: An enum indicating the metadata cache action that has taken/ * will take place. * * thing: Pointer to void containing the address of the in core * representation of the target metadata cache entry. This * is the same pointer that would be returned by a protect * of the addr and len of the entry. * * Processing in the notify function should proceed as follows: * * The notify function may perform any action it would like, including * metadata cache calls. * * If the function is successful, it must return SUCCEED. * * If it fails for any reason, the function must return FAIL and * push error information on the error stack with the error API * routines. * * * FREE_ICR: Pointer to the free ICR callback. * * The free ICR callback is invoked by the metadata cache when it * wishes to evict an entry, and needs the client to free the memory * allocated for the in core representation. * * The typedef for the free ICR callback is as follows: * * typedef herr_t (*H5C_free_icr_func_t)(void * thing)); * * The parameters of the free ICR callback are as follows: * * thing: Pointer to void containing the address of the in core * representation of the target metadata cache entry. This * is the same pointer that would be returned by a protect * of the addr and len of the entry. * * Processing in the free ICR function should proceed as follows: * * The free ICR function must free all memory allocated to the * in core representation. * * If the function is successful, it must return SUCCEED. * * If it fails for any reason, the function must return FAIL and * push error information on the error stack with the error API * routines. * * At least when compiled with debug, it would be useful if the * free ICR call would fail if the in core representation has been * modified since the last serialize callback. * * GET_FSF_SIZE: Pointer to the get file space free size callback. * * In principle, there is no need for the get file space free size * callback. However, as an optimization, it is sometimes convenient * to allocate and free file space for a number of cache entries * simultaneously in a single contiguous block of file space. * * File space allocation is done by the client, so the metadata cache * need not be involved. However, since the metadata cache typically * handles file space release when an entry is destroyed, some * adjustment on the part of the metadata cache is required for this * operation. * * The get file space free size callback exists to support this * operation. * * If a group of cache entries that were allocated as a group are to * be discarded and their file space released, the type of the first * (i.e. lowest address) entry in the group must implement the * get free file space size callback. * * To free the file space of all entries in the group in a single * operation, first expunge all entries other than the first without * the free file space flag. * * Then, to complete the operation, unprotect or expunge the first * entry in the block with the free file space flag set. Since * the get free file space callback is implemented, the metadata * cache will use this callback to get the size of the block to be * freed, instead of using the size of the entry as is done otherwise. * * At present this callback is used only by the H5FA and H5EA dblock * and dblock page client classes. * * The typedef for the get_fsf_size callback is as follows: * * typedef herr_t (*H5C_get_fsf_size_t)(const void * thing, * size_t *fsf_size_ptr); * * The parameters of the get_fsf_size callback are as follows: * * thing: Pointer to void containing the address of the in core * representation of the target metadata cache entry. This * is the same pointer that would be returned by a protect() * call of the associated addr and len. * * fs_size_ptr: Pointer to size_t in which the callback will return * the size of the piece of file space to be freed. Note * that the space to be freed is presumed to have the same * base address as the cache entry. * * The function simply returns the size of the block of file space * to be freed in *fsf_size_ptr. * * If the function is successful, it must return SUCCEED. * * If it fails for any reason, the function must return FAIL and * push error information on the error stack with the error API * routines. * ***************************************************************************/ /* Actions that can be reported to 'notify' client callback */ typedef enum H5C_notify_action_t { H5C_NOTIFY_ACTION_AFTER_INSERT, /* Entry has been added to the cache * via the insert call */ H5C_NOTIFY_ACTION_AFTER_LOAD, /* Entry has been loaded into the * from file via the protect call */ H5C_NOTIFY_ACTION_AFTER_FLUSH, /* Entry has just been flushed to * file. */ H5C_NOTIFY_ACTION_BEFORE_EVICT, /* Entry is about to be evicted * from cache. */ H5C_NOTIFY_ACTION_ENTRY_DIRTIED, /* Entry has been marked dirty. */ H5C_NOTIFY_ACTION_ENTRY_CLEANED, /* Entry has been marked clean. */ H5C_NOTIFY_ACTION_CHILD_DIRTIED, /* Dependent child has been marked dirty. */ H5C_NOTIFY_ACTION_CHILD_CLEANED, /* Dependent child has been marked clean. */ H5C_NOTIFY_ACTION_CHILD_UNSERIALIZED, /* Dependent child has been marked unserialized. */ H5C_NOTIFY_ACTION_CHILD_SERIALIZED /* Dependent child has been marked serialized. */ } H5C_notify_action_t; /* Cache client callback function pointers */ typedef herr_t (*H5C_get_initial_load_size_func_t)(void *udata_ptr, size_t *image_len_ptr); typedef herr_t (*H5C_get_final_load_size_func_t)(const void *image_ptr, size_t image_len, void *udata_ptr, size_t *actual_len_ptr); typedef htri_t (*H5C_verify_chksum_func_t)(const void *image_ptr, size_t len, void *udata_ptr); typedef void *(*H5C_deserialize_func_t)(const void *image_ptr, size_t len, void *udata_ptr, hbool_t *dirty_ptr); typedef herr_t (*H5C_image_len_func_t)(const void *thing, size_t *image_len_ptr); typedef herr_t (*H5C_pre_serialize_func_t)(H5F_t *f, hid_t dxpl_id, void *thing, haddr_t addr, size_t len, haddr_t *new_addr_ptr, size_t *new_len_ptr, unsigned *flags_ptr); typedef herr_t (*H5C_serialize_func_t)(const H5F_t *f, void *image_ptr, size_t len, void *thing); typedef herr_t (*H5C_notify_func_t)(H5C_notify_action_t action, void *thing); typedef herr_t (*H5C_free_icr_func_t)(void *thing); typedef herr_t (*H5C_get_fsf_size_t)(const void * thing, size_t *fsf_size_ptr); /* Metadata cache client class definition */ typedef struct H5C_class_t { int id; const char * name; H5FD_mem_t mem_type; unsigned flags; H5C_get_initial_load_size_func_t get_initial_load_size; H5C_get_final_load_size_func_t get_final_load_size; H5C_verify_chksum_func_t verify_chksum; H5C_deserialize_func_t deserialize; H5C_image_len_func_t image_len; H5C_pre_serialize_func_t pre_serialize; H5C_serialize_func_t serialize; H5C_notify_func_t notify; H5C_free_icr_func_t free_icr; H5C_get_fsf_size_t fsf_size; } H5C_class_t; /* Type defintions of callback functions used by the cache as a whole */ typedef herr_t (*H5C_write_permitted_func_t)(const H5F_t *f, hbool_t *write_permitted_ptr); typedef herr_t (*H5C_log_flush_func_t)(H5C_t *cache_ptr, haddr_t addr, hbool_t was_dirty, unsigned flags); /**************************************************************************** * * H5C_ring_t & associated #defines * * The metadata cache uses the concept of rings to order the flushes of * classes of entries. In this arrangement, each entry in the cache is * assigned to a ring, and on flush, the members of the outermost ring * are flushed first, followed by the next outermost, and so on with the * members of the innermost ring being flushed last. * * Note that flush dependencies are used to order flushes within rings. * * Note also that at the conceptual level, rings are argueably superfluous, * as a similar effect could be obtained via the flush dependency mechanism. * However, this would require all entries in the cache to participate in a * flush dependency -- with the implied setup and takedown overhead and * added complexity. Further, the flush ordering between rings need only * be enforced on flush operations, and thus the use of flush dependencies * instead would apply unecessary constraints on flushes under normal * operating circumstances. * * As of this writing, all metadata entries pretaining to data sets and * groups must be flushed first, and are thus assigned to the outermost * ring. * * Free space managers managing file space must be flushed next, * and are assigned to the second and third outermost rings. Two rings * are used here as the raw data free space manager must be flushed before * the metadata free space manager. * * The object header and associated chunks used to implement superblock * extension messages must be flushed next, and are thus assigned to * the fourth outermost ring. * * The superblock proper must be flushed last, and is thus assigned to * the innermost ring. * * The H5C_ring_t and the associated #defines below are used to define * the rings. Each entry must be assigned to the appropriate ring on * insertion or protect. * * Note that H5C_ring_t was originally an enumerated type. It was * converted to an integer and a set of #defines for convenience in * debugging. */ #define H5C_RING_UNDEFINED 0 /* shouldn't appear in the cache */ #define H5C_RING_USER 1 /* outermost ring */ #define H5C_RING_RDFSM 2 #define H5C_RING_MDFSM 3 #define H5C_RING_SBE 4 #define H5C_RING_SB 5 /* innermost ring */ #define H5C_RING_NTYPES 6 typedef int H5C_ring_t; /**************************************************************************** * * structure H5C_cache_entry_t * * Instances of the H5C_cache_entry_t structure are used to store cache * entries in a hash table and sometimes in a skip list. * See H5SL.c for the particulars of the skip list. * * In typical application, this structure is the first field in a * structure to be cached. For historical reasons, the external module * is responsible for managing the is_dirty field (this is no longer * completely true. See the comment on the is_dirty field for details). * All other fields are managed by the cache. * * The fields of this structure are discussed individually below: * * JRM - 4/26/04 * * magic: Unsigned 32 bit integer that must always be set to * H5C__H5C_CACHE_ENTRY_T_MAGIC when the entry is valid. * The field must be set to H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC * just before the entry is freed. * * This is necessary, as the LRU list can be changed out * from under H5C_make_space_in_cache() by the serialize * callback which may change the size of an existing entry, * and/or load a new entry while serializing the target entry. * * This in turn can cause a recursive call to * H5C_make_space_in_cache() which may either flush or evict * the next entry that the first invocation of that function * was about to examine. * * The magic field allows H5C_make_space_in_cache() to * detect this case, and re-start its scan from the bottom * of the LRU when this situation occurs. * * cache_ptr: Pointer to the cache that this entry is contained within. * * addr: Base address of the cache entry on disk. * * size: Length of the cache entry on disk in bytes Note that unlike * normal caches, the entries in this cache are of arbitrary size. * * The file space allocations for cache entries implied by the * addr and size fields must be disjoint. * * image_ptr: Pointer to void. When not NULL, this field points to a * dynamically allocated block of size bytes in which the * on disk image of the metadata cache entry is stored. * * If the entry is dirty, the pre-serialize and serialize * callbacks must be used to update this image before it is * written to disk * * image_up_to_date: Boolean flag that is set to TRUE when *image_ptr * is up to date, and set to false when the entry is dirtied. * * type: Pointer to the instance of H5C_class_t containing pointers * to the methods for cache entries of the current type. This * field should be NULL when the instance of H5C_cache_entry_t * is not in use. * * The name is not particularly descriptive, but is retained * to avoid changes in existing code. * * is_dirty: Boolean flag indicating whether the contents of the cache * entry has been modified since the last time it was written * to disk. * * dirtied: Boolean flag used to indicate that the entry has been * dirtied while protected. * * This field is set to FALSE in the protect call, and may * be set to TRUE by the H5C_mark_entry_dirty() call at any * time prior to the unprotect call. * * The H5C_mark_entry_dirty() call exists as a convenience * function for the fractal heap code which may not know if * an entry is protected or pinned, but knows that is either * protected or pinned. The dirtied field was added as in * the parallel case, it is necessary to know whether a * protected entry is dirty prior to the protect call. * * is_protected: Boolean flag indicating whether this entry is protected * (or locked, to use more conventional terms). When it is * protected, the entry cannot be flushed or accessed until * it is unprotected (or unlocked -- again to use more * conventional terms). * * Note that protected entries are removed from the LRU lists * and inserted on the protected list. * * is_read_only: Boolean flag that is only meaningful if is_protected is * TRUE. In this circumstance, it indicates whether the * entry has been protected read only, or read/write. * * If the entry has been protected read only (i.e. is_protected * and is_read_only are both TRUE), we allow the entry to be * protected more than once. * * In this case, the number of readers is maintained in the * ro_ref_count field (see below), and unprotect calls simply * decrement that field until it drops to zero, at which point * the entry is actually unprotected. * * ro_ref_count: Integer field used to maintain a count of the number of * outstanding read only protects on this entry. This field * must be zero whenever either is_protected or is_read_only * are TRUE. * * is_pinned: Boolean flag indicating whether the entry has been pinned * in the cache. * * For very hot entries, the protect / unprotect overhead * can become excessive. Thus the cache has been extended * to allow an entry to be "pinned" in the cache. * * Pinning an entry in the cache has several implications: * * 1) A pinned entry cannot be evicted. Thus unprotected * pinned entries must be stored in the pinned entry * list, instead of being managed by the replacement * policy code (LRU at present). * * 2) A pinned entry can be accessed or modified at any time. * This places an extra burden on the pre-serialize and * serialize callbacks, which must ensure that a pinned * entry is consistant and ready to write to disk before * generating an image. * * 3) A pinned entry can be marked as dirty (and possibly * change size) while it is unprotected. * * 4) The flush-destroy code must allow pinned entries to * be unpinned (and possibly unprotected) during the * flush. * * JRM -- 3/16/06 * * in_slist: Boolean flag indicating whether the entry is in the skip list * As a general rule, entries are placed in the list when they * are marked dirty. However they may remain in the list after * being flushed. * * Update: Dirty entries are now removed from the skip list * when they are flushed. * * flush_marker: Boolean flag indicating that the entry is to be flushed * the next time H5C_flush_cache() is called with the * H5C__FLUSH_MARKED_ENTRIES_FLAG. The flag is reset when * the entry is flushed for whatever reason. * * flush_me_last: Boolean flag indicating that this entry should not be * flushed from the cache until all other entries without * the flush_me_last flag set have been flushed. * * Note: * * At this time, the flush_me_last * flag will only be applied to one entry, the superblock, * and the code utilizing these flags is protected with HDasserts * to enforce this. This restraint can certainly be relaxed in * the future if the the need for multiple entries getting flushed * last or collectively arises, though the code allowing for that * will need to be expanded and tested appropriately if that * functionality is desired. * * Update: There are now two possible last entries * (superblock and file driver info message). This * number will probably increase as we add superblock * messages. JRM -- 11/18/14 * * clear_on_unprotect: Boolean flag used only in PHDF5. When H5C is used * to implement the metadata cache In the parallel case, only * the cache with mpi rank 0 is allowed to actually write to * file -- all other caches must retain dirty entries until they * are advised that the entry is clean. * * This flag is used in the case that such an advisory is * received when the entry is protected. If it is set when an * entry is unprotected, and the dirtied flag is not set in * the unprotect, the entry's is_dirty flag is reset by flushing * it with the H5C__FLUSH_CLEAR_ONLY_FLAG. * * flush_immediately: Boolean flag used only in Phdf5 -- and then only * for H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED. * * When a destributed metadata write is triggered at a * sync point, this field is used to mark entries that * must be flushed before leaving the sync point. At all * other times, this field should be set to FALSE. * * flush_in_progress: Boolean flag that is set to true iff the entry * is in the process of being flushed. This allows the cache * to detect when a call is the result of a flush callback. * * destroy_in_progress: Boolean flag that is set to true iff the entry * is in the process of being flushed and destroyed. * * * Fields supporting rings for flush ordering: * * All entries in the metadata cache are assigned to a ring. On cache * flush, all entries in the outermost ring are flushed first, followed * by all members of the next outermost ring, and so on until the * innermost ring is flushed. Note that this ordering is ONLY applied * in flush and serialize calls. Rings are ignored during normal operations * in which entries are flushed as directed by the replacement policy. * * See the header comment on H5C_ring_t above for further details. * * Note that flush dependencies (see below) are used to order flushes * within rings. Unlike rings, flush dependencies are applied to ALL * writes, not just those triggered by flush or serialize calls. * * ring: Instance of H5C_ring_t indicating the ring to which this * entry is assigned. * * * Fields supporting the 'flush dependency' feature: * * Entries in the cache may have 'flush dependencies' on other entries in the * cache. A flush dependency requires that all dirty child entries be flushed * to the file before a dirty parent entry (of those child entries) can be * flushed to the file. This can be used by cache clients to create data * structures that allow Single-Writer/Multiple-Reader (SWMR) access for the * data structure. * * flush_dep_parent: Pointer to the array of flush dependency parent entries * for this entry. * * flush_dep_nparents: Number of flush dependency parent entries for this * entry, i.e. the number of valid elements in flush_dep_parent. * * flush_dep_parent_nalloc: The number of allocated elements in * flush_dep_parent_nalloc. * * flush_dep_nchildren: Number of flush dependency children for this entry. If * this field is nonzero, then this entry must be pinned and * therefore cannot be evicted. * * flush_dep_ndirty_children: Number of flush dependency children that are * either dirty or have a nonzero flush_dep_ndirty_children. If * this field is nonzero, then this entry cannot be flushed. * * flush_dep_nunser_children: Number of flush dependency children * that are either unserialized, or have a non-zero number of * positive number of unserialized children. * * Note that since there is no requirement that a clean entry * be serialized, it is possible that flush_dep_nunser_children * to be greater than flush_dep_ndirty_children. * * This field exist to facilitate correct ordering of entry * serializations when it is necessary to serialize all the * entries in the metadata cache. Thus in the cache * serialization, no entry can be serialized unless this * field contains 0. * * Fields supporting the hash table: * * Entries in the cache are indexed by a more or less conventional hash table. * If there are multiple entries in any hash bin, they are stored in a doubly * linked list. * * Addendum: JRM -- 10/14/15 * * We have come to scan all entries in the cache frequently enough that * the cost of doing so by scanning the hash table has become unacceptable. * To reduce this cost, the index now also maintains a doubly linked list * of all entries in the index. This list is known as the index list. * The il_next and il_prev fields discussed below were added to support * the index list. * * ht_next: Next pointer used by the hash table to store multiple * entries in a single hash bin. This field points to the * next entry in the doubly linked list of entries in the * hash bin, or NULL if there is no next entry. * * ht_prev: Prev pointer used by the hash table to store multiple * entries in a single hash bin. This field points to the * previous entry in the doubly linked list of entries in * the hash bin, or NULL if there is no previuos entry. * * il_next: Next pointer used by the index to maintain a doubly linked * list of all entries in the index (and thus in the cache). * This field contains a pointer to the next entry in the * index list, or NULL if there is no next entry. * * il_prev: Prev pointer used by the index to maintain a doubly linked * list of all entries in the index (and thus in the cache). * This field contains a pointer to the previous entry in the * index list, or NULL if there is no previous entry. * * * Fields supporting replacement policies: * * The cache must have a replacement policy, and it will usually be * necessary for this structure to contain fields supporting that policy. * * While there has been interest in several replacement policies for * this cache, the initial development schedule is tight. Thus I have * elected to support only a modified LRU policy for the first cut. * * When additional replacement policies are added, the fields in this * section will be used in different ways or not at all. Thus the * documentation of these fields is repeated for each replacement policy. * * Modified LRU: * * When operating in parallel mode, we must ensure that a read does not * cause a write. If it does, the process will hang, as the write will * be collective and the other processes will not know to participate. * * To deal with this issue, I have modified the usual LRU policy by adding * clean and dirty LRU lists to the usual LRU list. When reading in * parallel mode, we evict from the clean LRU list only. This implies * that we must try to ensure that the clean LRU list is reasonably well * stocked. See the comments on H5C_t in H5Cpkg.h for more details. * * Note that even if we start with a completely clean cache, a sequence * of protects without unprotects can empty the clean LRU list. In this * case, the cache must grow temporarily. At the next write, we will * attempt to evict enough entries to get the cache down to its nominal * maximum size. * * The use of the replacement policy fields under the Modified LRU policy * is discussed below: * * next: Next pointer in either the LRU, the protected list, or * the pinned list depending on the current values of * is_protected and is_pinned. If there is no next entry * on the list, this field should be set to NULL. * * prev: Prev pointer in either the LRU, the protected list, * or the pinned list depending on the current values of * is_protected and is_pinned. If there is no previous * entry on the list, this field should be set to NULL. * * aux_next: Next pointer on either the clean or dirty LRU lists. * This entry should be NULL when either is_protected or * is_pinned is true. * * When is_protected and is_pinned are false, and is_dirty is * true, it should point to the next item on the dirty LRU * list. * * When is_protected and is_pinned are false, and is_dirty is * false, it should point to the next item on the clean LRU * list. In either case, when there is no next item, it * should be NULL. * * aux_prev: Previous pointer on either the clean or dirty LRU lists. * This entry should be NULL when either is_protected or * is_pinned is true. * * When is_protected and is_pinned are false, and is_dirty is * true, it should point to the previous item on the dirty * LRU list. * * When is_protected and is_pinned are false, and is_dirty * is false, it should point to the previous item on the * clean LRU list. * * In either case, when there is no previous item, it should * be NULL. * * * Fields supporting tagged entries: * * Entries in the cache that belong to a single object in the file are * joined into a doubly-linked list, and are "tagged" with the object header * address for that object's base header "chunk" (which is used as the * canonical address for the object). Global and shared entries are * not tagged. Tagged entries have a pointer to the tag info for the object, * which is shared state for all the entries for that object. * * tl_next: Pointer to the next entry in the tag list for an object. * NULL for the tail entry in the list, as well as untagged * entries. * * tl_prev: Pointer to the previous entry in the tag list for an object. * NULL for the head entry in the list, as well as untagged * entries. * * tag_info: Pointer to the common tag state for all entries belonging to * an object. NULL for untagged entries. * * * Cache entry stats collection fields: * * These fields should only be compiled in when both H5C_COLLECT_CACHE_STATS * and H5C_COLLECT_CACHE_ENTRY_STATS are true. When present, they allow * collection of statistics on individual cache entries. * * accesses: int32_t containing the number of times this cache entry has * been referenced in its lifetime. * * clears: int32_t containing the number of times this cache entry has * been cleared in its life time. * * flushes: int32_t containing the number of times this cache entry has * been flushed to file in its life time. * * pins: int32_t containing the number of times this cache entry has * been pinned in cache in its life time. * ****************************************************************************/ typedef struct H5C_cache_entry_t { uint32_t magic; H5C_t *cache_ptr; haddr_t addr; size_t size; void *image_ptr; hbool_t image_up_to_date; const H5C_class_t *type; hbool_t is_dirty; hbool_t dirtied; hbool_t is_protected; hbool_t is_read_only; int ro_ref_count; hbool_t is_pinned; hbool_t in_slist; hbool_t flush_marker; hbool_t flush_me_last; #ifdef H5_HAVE_PARALLEL hbool_t clear_on_unprotect; hbool_t flush_immediately; hbool_t coll_access; #endif /* H5_HAVE_PARALLEL */ hbool_t flush_in_progress; hbool_t destroy_in_progress; /* fields supporting rings for purposes of flush ordering */ H5C_ring_t ring; /* fields supporting the 'flush dependency' feature: */ struct H5C_cache_entry_t ** flush_dep_parent; unsigned flush_dep_nparents; unsigned flush_dep_parent_nalloc; unsigned flush_dep_nchildren; unsigned flush_dep_ndirty_children; unsigned flush_dep_nunser_children; hbool_t pinned_from_client; hbool_t pinned_from_cache; /* fields supporting the hash table: */ struct H5C_cache_entry_t *ht_next; struct H5C_cache_entry_t *ht_prev; struct H5C_cache_entry_t *il_next; struct H5C_cache_entry_t *il_prev; /* fields supporting replacement policies: */ struct H5C_cache_entry_t *next; struct H5C_cache_entry_t *prev; struct H5C_cache_entry_t *aux_next; struct H5C_cache_entry_t *aux_prev; #ifdef H5_HAVE_PARALLEL struct H5C_cache_entry_t *coll_next; struct H5C_cache_entry_t *coll_prev; #endif /* H5_HAVE_PARALLEL */ /* fields supporting tag lists */ struct H5C_cache_entry_t *tl_next; struct H5C_cache_entry_t *tl_prev; struct H5C_tag_info_t *tag_info; #if H5C_COLLECT_CACHE_ENTRY_STATS /* cache entry stats fields */ int32_t accesses; int32_t clears; int32_t flushes; int32_t pins; #endif /* H5C_COLLECT_CACHE_ENTRY_STATS */ } H5C_cache_entry_t; /**************************************************************************** * * structure H5C_auto_size_ctl_t * * Instances of H5C_auto_size_ctl_t are used to get and set the control * fields for automatic cache re-sizing. * * The fields of the structure are discussed individually below: * * version: Integer field containing the version number of this version * of the H5C_auto_size_ctl_t structure. Any instance of * H5C_auto_size_ctl_t passed to the cache must have a known * version number, or an error will be flagged. * * report_fcn: Pointer to the function that is to be called to report * activities each time the auto cache resize code is executed. If the * field is NULL, no call is made. * * If the field is not NULL, it must contain the address of a function * of type H5C_auto_resize_report_fcn. * * set_initial_size: Boolean flag indicating whether the size of the * initial size of the cache is to be set to the value given in * the initial_size field. If set_initial_size is FALSE, the * initial_size field is ignored. * * initial_size: If enabled, this field contain the size the cache is * to be set to upon receipt of this structure. Needless to say, * initial_size must lie in the closed interval [min_size, max_size]. * * min_clean_fraction: double in the range 0 to 1 indicating the fraction * of the cache that is to be kept clean. This field is only used * in parallel mode. Typical values are 0.1 to 0.5. * * max_size: Maximum size to which the cache can be adjusted. The * supplied value must fall in the closed interval * [MIN_MAX_CACHE_SIZE, MAX_MAX_CACHE_SIZE]. Also, max_size must * be greater than or equal to min_size. * * min_size: Minimum size to which the cache can be adjusted. The * supplied value must fall in the closed interval * [MIN_MAX_CACHE_SIZE, MAX_MAX_CACHE_SIZE]. Also, min_size must * be less than or equal to max_size. * * epoch_length: Number of accesses on the cache over which to collect * hit rate stats before running the automatic cache resize code, * if it is enabled. * * At the end of an epoch, we discard prior hit rate data and start * collecting afresh. The epoch_length must lie in the closed * interval [H5C__MIN_AR_EPOCH_LENGTH, H5C__MAX_AR_EPOCH_LENGTH]. * * * Cache size increase control fields: * * incr_mode: Instance of the H5C_cache_incr_mode enumerated type whose * value indicates how we determine whether the cache size should be * increased. At present there are two possible values: * * H5C_incr__off: Don't attempt to increase the size of the cache * automatically. * * When this increment mode is selected, the remaining fields * in the cache size increase section ar ignored. * * H5C_incr__threshold: Attempt to increase the size of the cache * whenever the average hit rate over the last epoch drops * below the value supplied in the lower_hr_threshold * field. * * Note that this attempt will fail if the cache is already * at its maximum size, or if the cache is not already using * all available space. * * lower_hr_threshold: Lower hit rate threshold. If the increment mode * (incr_mode) is H5C_incr__threshold and the hit rate drops below the * value supplied in this field in an epoch, increment the cache size by * size_increment. Note that cache size may not be incremented above * max_size, and that the increment may be further restricted by the * max_increment field if it is enabled. * * When enabled, this field must contain a value in the range [0.0, 1.0]. * Depending on the incr_mode selected, it may also have to be less than * upper_hr_threshold. * * increment: Double containing the multiplier used to derive the new * cache size from the old if a cache size increment is triggered. * The increment must be greater than 1.0, and should not exceed 2.0. * * The new cache size is obtained by multiplying the current max cache * size by the increment, and then clamping to max_size and to stay * within the max_increment as necessary. * * apply_max_increment: Boolean flag indicating whether the max_increment * field should be used to limit the maximum cache size increment. * * max_increment: If enabled by the apply_max_increment field described * above, this field contains the maximum number of bytes by which the * cache size can be increased in a single re-size. * * flash_incr_mode: Instance of the H5C_cache_flash_incr_mode enumerated * type whose value indicates whether and by what algorithm we should * make flash increases in the size of the cache to accomodate insertion * of large entries and large increases in the size of a single entry. * * The addition of the flash increment mode was occasioned by performance * problems that appear when a local heap is increased to a size in excess * of the current cache size. While the existing re-size code dealt with * this eventually, performance was very bad for the remainder of the * epoch. * * At present, there are two possible values for the flash_incr_mode: * * H5C_flash_incr__off: Don't perform flash increases in the size of * the cache. * * H5C_flash_incr__add_space: Let x be either the size of a newly * newly inserted entry, or the number of bytes by which the * size of an existing entry has been increased. * * If * x > flash_threshold * current max cache size, * * increase the current maximum cache size by x * flash_multiple * less any free space in the cache, and start a new epoch. For * now at least, pay no attention to the maximum increment. * * * With a little thought, it should be obvious that the above flash * cache size increase algorithm is not sufficient for all * circumstances -- for example, suppose the user round robins through * (1/flash_threshold) +1 groups, adding one data set to each on each * pass. Then all will increase in size at about the same time, requiring * the max cache size to at least double to maintain acceptable * performance, however the above flash increment algorithm will not be * triggered. * * Hopefully, the add space algorithm detailed above will be sufficient * for the performance problems encountered to date. However, we should * expect to revisit the issue. * * flash_multiple: Double containing the multiple described above in the * H5C_flash_incr__add_space section of the discussion of the * flash_incr_mode section. This field is ignored unless flash_incr_mode * is H5C_flash_incr__add_space. * * flash_threshold: Double containing the factor by which current max cache * size is multiplied to obtain the size threshold for the add_space * flash increment algorithm. The field is ignored unless * flash_incr_mode is H5C_flash_incr__add_space. * * * Cache size decrease control fields: * * decr_mode: Instance of the H5C_cache_decr_mode enumerated type whose * value indicates how we determine whether the cache size should be * decreased. At present there are four possibilities. * * H5C_decr__off: Don't attempt to decrease the size of the cache * automatically. * * When this increment mode is selected, the remaining fields * in the cache size decrease section are ignored. * * H5C_decr__threshold: Attempt to decrease the size of the cache * whenever the average hit rate over the last epoch rises * above the value supplied in the upper_hr_threshold * field. * * H5C_decr__age_out: At the end of each epoch, search the cache for * entries that have not been accessed for at least the number * of epochs specified in the epochs_before_eviction field, and * evict these entries. Conceptually, the maximum cache size * is then decreased to match the new actual cache size. However, * this reduction may be modified by the min_size, the * max_decrement, and/or the empty_reserve. * * H5C_decr__age_out_with_threshold: Same as age_out, but we only * attempt to reduce the cache size when the hit rate observed * over the last epoch exceeds the value provided in the * upper_hr_threshold field. * * upper_hr_threshold: Upper hit rate threshold. The use of this field * varies according to the current decr_mode: * * H5C_decr__off or H5C_decr__age_out: The value of this field is * ignored. * * H5C_decr__threshold: If the hit rate exceeds this threshold in any * epoch, attempt to decrement the cache size by size_decrement. * * Note that cache size may not be decremented below min_size. * * Note also that if the upper_threshold is 1.0, the cache size * will never be reduced. * * H5C_decr__age_out_with_threshold: If the hit rate exceeds this * threshold in any epoch, attempt to reduce the cache size * by evicting entries that have not been accessed for more * than the specified number of epochs. * * decrement: This field is only used when the decr_mode is * H5C_decr__threshold. * * The field is a double containing the multiplier used to derive the * new cache size from the old if a cache size decrement is triggered. * The decrement must be in the range 0.0 (in which case the cache will * try to contract to its minimum size) to 1.0 (in which case the * cache will never shrink). * * apply_max_decrement: Boolean flag used to determine whether decrements * in cache size are to be limited by the max_decrement field. * * max_decrement: Maximum number of bytes by which the cache size can be * decreased in a single re-size. Note that decrements may also be * restricted by the min_size of the cache, and (in age out modes) by * the empty_reserve field. * * epochs_before_eviction: Integer field used in H5C_decr__age_out and * H5C_decr__age_out_with_threshold decrement modes. * * This field contains the number of epochs an entry must remain * unaccessed before it is evicted in an attempt to reduce the * cache size. If applicable, this field must lie in the range * [1, H5C__MAX_EPOCH_MARKERS]. * * apply_empty_reserve: Boolean field controlling whether the empty_reserve * field is to be used in computing the new cache size when the * decr_mode is H5C_decr__age_out or H5C_decr__age_out_with_threshold. * * empty_reserve: To avoid a constant racheting down of cache size by small * amounts in the H5C_decr__age_out and H5C_decr__age_out_with_threshold * modes, this field allows one to require that any cache size * reductions leave the specified fraction of unused space in the cache. * * The value of this field must be in the range [0.0, 1.0]. I would * expect typical values to be in the range of 0.01 to 0.1. * ****************************************************************************/ enum H5C_resize_status { in_spec, increase, flash_increase, decrease, at_max_size, at_min_size, increase_disabled, decrease_disabled, not_full }; /* enum H5C_resize_conditions */ typedef void (*H5C_auto_resize_rpt_fcn)(H5C_t * cache_ptr, int32_t version, double hit_rate, enum H5C_resize_status status, size_t old_max_cache_size, size_t new_max_cache_size, size_t old_min_clean_size, size_t new_min_clean_size); typedef struct H5C_auto_size_ctl_t { /* general configuration fields: */ int32_t version; H5C_auto_resize_rpt_fcn rpt_fcn; hbool_t set_initial_size; size_t initial_size; double min_clean_fraction; size_t max_size; size_t min_size; int64_t epoch_length; /* size increase control fields: */ enum H5C_cache_incr_mode incr_mode; double lower_hr_threshold; double increment; hbool_t apply_max_increment; size_t max_increment; enum H5C_cache_flash_incr_mode flash_incr_mode; double flash_multiple; double flash_threshold; /* size decrease control fields: */ enum H5C_cache_decr_mode decr_mode; double upper_hr_threshold; double decrement; hbool_t apply_max_decrement; size_t max_decrement; int32_t epochs_before_eviction; hbool_t apply_empty_reserve; double empty_reserve; } H5C_auto_size_ctl_t; /***************************************/ /* Library-private Function Prototypes */ /***************************************/ H5_DLL H5C_t *H5C_create(size_t max_cache_size, size_t min_clean_size, int max_type_id, const H5C_class_t * const *class_table_ptr, H5C_write_permitted_func_t check_write_permitted, hbool_t write_permitted, H5C_log_flush_func_t log_flush, void *aux_ptr); H5_DLL herr_t H5C_set_up_logging(H5C_t *cache_ptr, const char log_location[], hbool_t start_immediately); H5_DLL herr_t H5C_tear_down_logging(H5C_t *cache_ptr); H5_DLL herr_t H5C_start_logging(H5C_t *cache_ptr); H5_DLL herr_t H5C_stop_logging(H5C_t *cache_ptr); H5_DLL herr_t H5C_get_logging_status(const H5C_t *cache_ptr, /*OUT*/ hbool_t *is_enabled, /*OUT*/ hbool_t *is_currently_logging); H5_DLL herr_t H5C_write_log_message(const H5C_t *cache_ptr, const char message[]); H5_DLL void H5C_def_auto_resize_rpt_fcn(H5C_t *cache_ptr, int32_t version, double hit_rate, enum H5C_resize_status status, size_t old_max_cache_size, size_t new_max_cache_size, size_t old_min_clean_size, size_t new_min_clean_size); H5_DLL herr_t H5C_dest(H5F_t *f, hid_t dxpl_id); H5_DLL herr_t H5C_evict(H5F_t *f, hid_t dxpl_id); H5_DLL herr_t H5C_expunge_entry(H5F_t *f, hid_t dxpl_id, const H5C_class_t *type, haddr_t addr, unsigned flags); H5_DLL herr_t H5C_flush_cache(H5F_t *f, hid_t dxpl_id, unsigned flags); H5_DLL herr_t H5C_flush_tagged_entries(H5F_t * f, hid_t dxpl_id, haddr_t tag); H5_DLL herr_t H5C_evict_tagged_entries(H5F_t * f, hid_t dxpl_id, haddr_t tag, hbool_t match_global); H5_DLL herr_t H5C_expunge_tag_type_metadata(H5F_t *f, hid_t dxpl_id, haddr_t tag, int type_id, unsigned flags); H5_DLL herr_t H5C_get_tag(const void *thing, /*OUT*/ haddr_t *tag); #if H5C_DO_TAGGING_SANITY_CHECKS herr_t H5C_verify_tag(int id, haddr_t tag); #endif H5_DLL herr_t H5C_flush_to_min_clean(H5F_t *f, hid_t dxpl_id); H5_DLL herr_t H5C_get_cache_auto_resize_config(const H5C_t *cache_ptr, H5C_auto_size_ctl_t *config_ptr); H5_DLL herr_t H5C_get_cache_size(H5C_t *cache_ptr, size_t *max_size_ptr, size_t *min_clean_size_ptr, size_t *cur_size_ptr, uint32_t *cur_num_entries_ptr); H5_DLL herr_t H5C_get_cache_hit_rate(H5C_t *cache_ptr, double *hit_rate_ptr); H5_DLL herr_t H5C_get_entry_status(const H5F_t *f, haddr_t addr, size_t *size_ptr, hbool_t *in_cache_ptr, hbool_t *is_dirty_ptr, hbool_t *is_protected_ptr, hbool_t *is_pinned_ptr, hbool_t *is_corked_ptr, hbool_t *is_flush_dep_parent_ptr, hbool_t *is_flush_dep_child_ptr, hbool_t *image_up_to_date_ptr); H5_DLL herr_t H5C_get_evictions_enabled(const H5C_t *cache_ptr, hbool_t *evictions_enabled_ptr); H5_DLL void * H5C_get_aux_ptr(const H5C_t *cache_ptr); H5_DLL FILE *H5C_get_trace_file_ptr(const H5C_t *cache_ptr); H5_DLL FILE *H5C_get_trace_file_ptr_from_entry(const H5C_cache_entry_t *entry_ptr); H5_DLL herr_t H5C_insert_entry(H5F_t *f, hid_t dxpl_id, const H5C_class_t *type, haddr_t addr, void *thing, unsigned int flags); H5_DLL herr_t H5C_mark_entry_dirty(void *thing); H5_DLL herr_t H5C_mark_entry_clean(void *thing); H5_DLL herr_t H5C_mark_entry_unserialized(void *thing); H5_DLL herr_t H5C_mark_entry_serialized(void *thing); H5_DLL herr_t H5C_move_entry(H5C_t *cache_ptr, const H5C_class_t *type, haddr_t old_addr, haddr_t new_addr); H5_DLL herr_t H5C_pin_protected_entry(void *thing); H5_DLL herr_t H5C_prep_for_file_close(H5F_t *f, hid_t dxpl_id); H5_DLL herr_t H5C_create_flush_dependency(void *parent_thing, void *child_thing); H5_DLL void * H5C_protect(H5F_t *f, hid_t dxpl_id, const H5C_class_t *type, haddr_t addr, void *udata, unsigned flags); H5_DLL herr_t H5C_reset_cache_hit_rate_stats(H5C_t *cache_ptr); H5_DLL herr_t H5C_resize_entry(void *thing, size_t new_size); H5_DLL herr_t H5C_set_cache_auto_resize_config(H5C_t *cache_ptr, H5C_auto_size_ctl_t *config_ptr); H5_DLL herr_t H5C_set_evictions_enabled(H5C_t *cache_ptr, hbool_t evictions_enabled); H5_DLL herr_t H5C_set_prefix(H5C_t *cache_ptr, char *prefix); H5_DLL herr_t H5C_set_trace_file_ptr(H5C_t *cache_ptr, FILE *trace_file_ptr); H5_DLL herr_t H5C_stats(H5C_t *cache_ptr, const char *cache_name, hbool_t display_detailed_stats); H5_DLL void H5C_stats__reset(H5C_t *cache_ptr); H5_DLL herr_t H5C_dump_cache(H5C_t *cache_ptr, const char *cache_name); H5_DLL herr_t H5C_unpin_entry(void *thing); H5_DLL herr_t H5C_destroy_flush_dependency(void *parent_thing, void *child_thing); H5_DLL herr_t H5C_unprotect(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *thing, unsigned int flags); H5_DLL herr_t H5C_validate_resize_config(H5C_auto_size_ctl_t *config_ptr, unsigned int tests); H5_DLL herr_t H5C_ignore_tags(H5C_t *cache_ptr); H5_DLL hbool_t H5C_get_ignore_tags(const H5C_t *cache_ptr); H5_DLL herr_t H5C_retag_entries(H5C_t * cache_ptr, haddr_t src_tag, haddr_t dest_tag); H5_DLL herr_t H5C_cork(H5C_t *cache_ptr, haddr_t obj_addr, unsigned action, hbool_t *corked); H5_DLL herr_t H5C_get_entry_ring(const H5F_t *f, haddr_t addr, H5C_ring_t *ring); H5_DLL herr_t H5C_unsettle_entry_ring(void *thing); H5_DLL herr_t H5C_remove_entry(void *thing); #ifdef H5_HAVE_PARALLEL H5_DLL herr_t H5C_apply_candidate_list(H5F_t *f, hid_t dxpl_id, H5C_t *cache_ptr, int num_candidates, haddr_t *candidates_list_ptr, int mpi_rank, int mpi_size); H5_DLL herr_t H5C_construct_candidate_list__clean_cache(H5C_t *cache_ptr); H5_DLL herr_t H5C_construct_candidate_list__min_clean(H5C_t *cache_ptr); H5_DLL herr_t H5C_clear_coll_entries(H5C_t * cache_ptr, hbool_t partial); H5_DLL herr_t H5C_mark_entries_as_clean(H5F_t *f, hid_t dxpl_id, int32_t ce_array_len, haddr_t *ce_array_ptr); #endif /* H5_HAVE_PARALLEL */ #ifndef NDEBUG /* debugging functions */ H5_DLL hbool_t H5C_cache_is_clean(const H5C_t *cache_ptr, H5C_ring_t inner_ring); #endif /* NDEBUG */ #endif /* !_H5Cprivate_H */