调整目录结构

This commit is contained in:
2025-10-23 18:57:37 +08:00
parent 6cf4d9cb03
commit 4966a659aa
2173 changed files with 4343 additions and 11 deletions

View File

@@ -0,0 +1,25 @@
#ifndef Py_INTERNAL_ABSTRACT_H
#define Py_INTERNAL_ABSTRACT_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
// Fast inlined version of PyIndex_Check()
static inline int
_PyIndex_Check(PyObject *obj)
{
PyNumberMethods *tp_as_number = Py_TYPE(obj)->tp_as_number;
return (tp_as_number != NULL && tp_as_number->nb_index != NULL);
}
PyObject *_PyNumber_PowerNoMod(PyObject *lhs, PyObject *rhs);
PyObject *_PyNumber_InPlacePowerNoMod(PyObject *lhs, PyObject *rhs);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_ABSTRACT_H */

View File

@@ -0,0 +1,112 @@
#ifndef Py_INTERNAL_ASDL_H
#define Py_INTERNAL_ASDL_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#include "pycore_pyarena.h" // _PyArena_Malloc()
typedef PyObject * identifier;
typedef PyObject * string;
typedef PyObject * object;
typedef PyObject * constant;
/* It would be nice if the code generated by asdl_c.py was completely
independent of Python, but it is a goal the requires too much work
at this stage. So, for example, I'll represent identifiers as
interned Python strings.
*/
#define _ASDL_SEQ_HEAD \
Py_ssize_t size; \
void **elements;
typedef struct {
_ASDL_SEQ_HEAD
} asdl_seq;
typedef struct {
_ASDL_SEQ_HEAD
void *typed_elements[1];
} asdl_generic_seq;
typedef struct {
_ASDL_SEQ_HEAD
PyObject *typed_elements[1];
} asdl_identifier_seq;
typedef struct {
_ASDL_SEQ_HEAD
int typed_elements[1];
} asdl_int_seq;
asdl_generic_seq *_Py_asdl_generic_seq_new(Py_ssize_t size, PyArena *arena);
asdl_identifier_seq *_Py_asdl_identifier_seq_new(Py_ssize_t size, PyArena *arena);
asdl_int_seq *_Py_asdl_int_seq_new(Py_ssize_t size, PyArena *arena);
#define GENERATE_ASDL_SEQ_CONSTRUCTOR(NAME, TYPE) \
asdl_ ## NAME ## _seq *_Py_asdl_ ## NAME ## _seq_new(Py_ssize_t size, PyArena *arena) \
{ \
asdl_ ## NAME ## _seq *seq = NULL; \
size_t n; \
/* check size is sane */ \
if (size < 0 || \
(size && (((size_t)size - 1) > (SIZE_MAX / sizeof(void *))))) { \
PyErr_NoMemory(); \
return NULL; \
} \
n = (size ? (sizeof(TYPE *) * (size - 1)) : 0); \
/* check if size can be added safely */ \
if (n > SIZE_MAX - sizeof(asdl_ ## NAME ## _seq)) { \
PyErr_NoMemory(); \
return NULL; \
} \
n += sizeof(asdl_ ## NAME ## _seq); \
seq = (asdl_ ## NAME ## _seq *)_PyArena_Malloc(arena, n); \
if (!seq) { \
PyErr_NoMemory(); \
return NULL; \
} \
memset(seq, 0, n); \
seq->size = size; \
seq->elements = (void**)seq->typed_elements; \
return seq; \
}
#define asdl_seq_GET_UNTYPED(S, I) _Py_RVALUE((S)->elements[(I)])
#define asdl_seq_GET(S, I) _Py_RVALUE((S)->typed_elements[(I)])
#define asdl_seq_LEN(S) _Py_RVALUE(((S) == NULL ? 0 : (S)->size))
#ifdef Py_DEBUG
# define asdl_seq_SET(S, I, V) \
do { \
Py_ssize_t _asdl_i = (I); \
assert((S) != NULL); \
assert(0 <= _asdl_i && _asdl_i < (S)->size); \
(S)->typed_elements[_asdl_i] = (V); \
} while (0)
#else
# define asdl_seq_SET(S, I, V) _Py_RVALUE((S)->typed_elements[(I)] = (V))
#endif
#ifdef Py_DEBUG
# define asdl_seq_SET_UNTYPED(S, I, V) \
do { \
Py_ssize_t _asdl_i = (I); \
assert((S) != NULL); \
assert(0 <= _asdl_i && _asdl_i < (S)->size); \
(S)->elements[_asdl_i] = (V); \
} while (0)
#else
# define asdl_seq_SET_UNTYPED(S, I, V) _Py_RVALUE((S)->elements[(I)] = (V))
#endif
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_ASDL_H */

View File

@@ -0,0 +1,922 @@
// File automatically generated by Parser/asdl_c.py.
#ifndef Py_INTERNAL_AST_H
#define Py_INTERNAL_AST_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#include "pycore_asdl.h"
typedef struct _mod *mod_ty;
typedef struct _stmt *stmt_ty;
typedef struct _expr *expr_ty;
typedef enum _expr_context { Load=1, Store=2, Del=3 } expr_context_ty;
typedef enum _boolop { And=1, Or=2 } boolop_ty;
typedef enum _operator { Add=1, Sub=2, Mult=3, MatMult=4, Div=5, Mod=6, Pow=7,
LShift=8, RShift=9, BitOr=10, BitXor=11, BitAnd=12,
FloorDiv=13 } operator_ty;
typedef enum _unaryop { Invert=1, Not=2, UAdd=3, USub=4 } unaryop_ty;
typedef enum _cmpop { Eq=1, NotEq=2, Lt=3, LtE=4, Gt=5, GtE=6, Is=7, IsNot=8,
In=9, NotIn=10 } cmpop_ty;
typedef struct _comprehension *comprehension_ty;
typedef struct _excepthandler *excepthandler_ty;
typedef struct _arguments *arguments_ty;
typedef struct _arg *arg_ty;
typedef struct _keyword *keyword_ty;
typedef struct _alias *alias_ty;
typedef struct _withitem *withitem_ty;
typedef struct _match_case *match_case_ty;
typedef struct _pattern *pattern_ty;
typedef struct _type_ignore *type_ignore_ty;
typedef struct _type_param *type_param_ty;
typedef struct {
_ASDL_SEQ_HEAD
mod_ty typed_elements[1];
} asdl_mod_seq;
asdl_mod_seq *_Py_asdl_mod_seq_new(Py_ssize_t size, PyArena *arena);
typedef struct {
_ASDL_SEQ_HEAD
stmt_ty typed_elements[1];
} asdl_stmt_seq;
asdl_stmt_seq *_Py_asdl_stmt_seq_new(Py_ssize_t size, PyArena *arena);
typedef struct {
_ASDL_SEQ_HEAD
expr_ty typed_elements[1];
} asdl_expr_seq;
asdl_expr_seq *_Py_asdl_expr_seq_new(Py_ssize_t size, PyArena *arena);
typedef struct {
_ASDL_SEQ_HEAD
comprehension_ty typed_elements[1];
} asdl_comprehension_seq;
asdl_comprehension_seq *_Py_asdl_comprehension_seq_new(Py_ssize_t size, PyArena
*arena);
typedef struct {
_ASDL_SEQ_HEAD
excepthandler_ty typed_elements[1];
} asdl_excepthandler_seq;
asdl_excepthandler_seq *_Py_asdl_excepthandler_seq_new(Py_ssize_t size, PyArena
*arena);
typedef struct {
_ASDL_SEQ_HEAD
arguments_ty typed_elements[1];
} asdl_arguments_seq;
asdl_arguments_seq *_Py_asdl_arguments_seq_new(Py_ssize_t size, PyArena *arena);
typedef struct {
_ASDL_SEQ_HEAD
arg_ty typed_elements[1];
} asdl_arg_seq;
asdl_arg_seq *_Py_asdl_arg_seq_new(Py_ssize_t size, PyArena *arena);
typedef struct {
_ASDL_SEQ_HEAD
keyword_ty typed_elements[1];
} asdl_keyword_seq;
asdl_keyword_seq *_Py_asdl_keyword_seq_new(Py_ssize_t size, PyArena *arena);
typedef struct {
_ASDL_SEQ_HEAD
alias_ty typed_elements[1];
} asdl_alias_seq;
asdl_alias_seq *_Py_asdl_alias_seq_new(Py_ssize_t size, PyArena *arena);
typedef struct {
_ASDL_SEQ_HEAD
withitem_ty typed_elements[1];
} asdl_withitem_seq;
asdl_withitem_seq *_Py_asdl_withitem_seq_new(Py_ssize_t size, PyArena *arena);
typedef struct {
_ASDL_SEQ_HEAD
match_case_ty typed_elements[1];
} asdl_match_case_seq;
asdl_match_case_seq *_Py_asdl_match_case_seq_new(Py_ssize_t size, PyArena
*arena);
typedef struct {
_ASDL_SEQ_HEAD
pattern_ty typed_elements[1];
} asdl_pattern_seq;
asdl_pattern_seq *_Py_asdl_pattern_seq_new(Py_ssize_t size, PyArena *arena);
typedef struct {
_ASDL_SEQ_HEAD
type_ignore_ty typed_elements[1];
} asdl_type_ignore_seq;
asdl_type_ignore_seq *_Py_asdl_type_ignore_seq_new(Py_ssize_t size, PyArena
*arena);
typedef struct {
_ASDL_SEQ_HEAD
type_param_ty typed_elements[1];
} asdl_type_param_seq;
asdl_type_param_seq *_Py_asdl_type_param_seq_new(Py_ssize_t size, PyArena
*arena);
enum _mod_kind {Module_kind=1, Interactive_kind=2, Expression_kind=3,
FunctionType_kind=4};
struct _mod {
enum _mod_kind kind;
union {
struct {
asdl_stmt_seq *body;
asdl_type_ignore_seq *type_ignores;
} Module;
struct {
asdl_stmt_seq *body;
} Interactive;
struct {
expr_ty body;
} Expression;
struct {
asdl_expr_seq *argtypes;
expr_ty returns;
} FunctionType;
} v;
};
enum _stmt_kind {FunctionDef_kind=1, AsyncFunctionDef_kind=2, ClassDef_kind=3,
Return_kind=4, Delete_kind=5, Assign_kind=6,
TypeAlias_kind=7, AugAssign_kind=8, AnnAssign_kind=9,
For_kind=10, AsyncFor_kind=11, While_kind=12, If_kind=13,
With_kind=14, AsyncWith_kind=15, Match_kind=16,
Raise_kind=17, Try_kind=18, TryStar_kind=19, Assert_kind=20,
Import_kind=21, ImportFrom_kind=22, Global_kind=23,
Nonlocal_kind=24, Expr_kind=25, Pass_kind=26, Break_kind=27,
Continue_kind=28};
struct _stmt {
enum _stmt_kind kind;
union {
struct {
identifier name;
arguments_ty args;
asdl_stmt_seq *body;
asdl_expr_seq *decorator_list;
expr_ty returns;
string type_comment;
asdl_type_param_seq *type_params;
} FunctionDef;
struct {
identifier name;
arguments_ty args;
asdl_stmt_seq *body;
asdl_expr_seq *decorator_list;
expr_ty returns;
string type_comment;
asdl_type_param_seq *type_params;
} AsyncFunctionDef;
struct {
identifier name;
asdl_expr_seq *bases;
asdl_keyword_seq *keywords;
asdl_stmt_seq *body;
asdl_expr_seq *decorator_list;
asdl_type_param_seq *type_params;
} ClassDef;
struct {
expr_ty value;
} Return;
struct {
asdl_expr_seq *targets;
} Delete;
struct {
asdl_expr_seq *targets;
expr_ty value;
string type_comment;
} Assign;
struct {
expr_ty name;
asdl_type_param_seq *type_params;
expr_ty value;
} TypeAlias;
struct {
expr_ty target;
operator_ty op;
expr_ty value;
} AugAssign;
struct {
expr_ty target;
expr_ty annotation;
expr_ty value;
int simple;
} AnnAssign;
struct {
expr_ty target;
expr_ty iter;
asdl_stmt_seq *body;
asdl_stmt_seq *orelse;
string type_comment;
} For;
struct {
expr_ty target;
expr_ty iter;
asdl_stmt_seq *body;
asdl_stmt_seq *orelse;
string type_comment;
} AsyncFor;
struct {
expr_ty test;
asdl_stmt_seq *body;
asdl_stmt_seq *orelse;
} While;
struct {
expr_ty test;
asdl_stmt_seq *body;
asdl_stmt_seq *orelse;
} If;
struct {
asdl_withitem_seq *items;
asdl_stmt_seq *body;
string type_comment;
} With;
struct {
asdl_withitem_seq *items;
asdl_stmt_seq *body;
string type_comment;
} AsyncWith;
struct {
expr_ty subject;
asdl_match_case_seq *cases;
} Match;
struct {
expr_ty exc;
expr_ty cause;
} Raise;
struct {
asdl_stmt_seq *body;
asdl_excepthandler_seq *handlers;
asdl_stmt_seq *orelse;
asdl_stmt_seq *finalbody;
} Try;
struct {
asdl_stmt_seq *body;
asdl_excepthandler_seq *handlers;
asdl_stmt_seq *orelse;
asdl_stmt_seq *finalbody;
} TryStar;
struct {
expr_ty test;
expr_ty msg;
} Assert;
struct {
asdl_alias_seq *names;
} Import;
struct {
identifier module;
asdl_alias_seq *names;
int level;
} ImportFrom;
struct {
asdl_identifier_seq *names;
} Global;
struct {
asdl_identifier_seq *names;
} Nonlocal;
struct {
expr_ty value;
} Expr;
} v;
int lineno;
int col_offset;
int end_lineno;
int end_col_offset;
};
enum _expr_kind {BoolOp_kind=1, NamedExpr_kind=2, BinOp_kind=3, UnaryOp_kind=4,
Lambda_kind=5, IfExp_kind=6, Dict_kind=7, Set_kind=8,
ListComp_kind=9, SetComp_kind=10, DictComp_kind=11,
GeneratorExp_kind=12, Await_kind=13, Yield_kind=14,
YieldFrom_kind=15, Compare_kind=16, Call_kind=17,
FormattedValue_kind=18, JoinedStr_kind=19, Constant_kind=20,
Attribute_kind=21, Subscript_kind=22, Starred_kind=23,
Name_kind=24, List_kind=25, Tuple_kind=26, Slice_kind=27};
struct _expr {
enum _expr_kind kind;
union {
struct {
boolop_ty op;
asdl_expr_seq *values;
} BoolOp;
struct {
expr_ty target;
expr_ty value;
} NamedExpr;
struct {
expr_ty left;
operator_ty op;
expr_ty right;
} BinOp;
struct {
unaryop_ty op;
expr_ty operand;
} UnaryOp;
struct {
arguments_ty args;
expr_ty body;
} Lambda;
struct {
expr_ty test;
expr_ty body;
expr_ty orelse;
} IfExp;
struct {
asdl_expr_seq *keys;
asdl_expr_seq *values;
} Dict;
struct {
asdl_expr_seq *elts;
} Set;
struct {
expr_ty elt;
asdl_comprehension_seq *generators;
} ListComp;
struct {
expr_ty elt;
asdl_comprehension_seq *generators;
} SetComp;
struct {
expr_ty key;
expr_ty value;
asdl_comprehension_seq *generators;
} DictComp;
struct {
expr_ty elt;
asdl_comprehension_seq *generators;
} GeneratorExp;
struct {
expr_ty value;
} Await;
struct {
expr_ty value;
} Yield;
struct {
expr_ty value;
} YieldFrom;
struct {
expr_ty left;
asdl_int_seq *ops;
asdl_expr_seq *comparators;
} Compare;
struct {
expr_ty func;
asdl_expr_seq *args;
asdl_keyword_seq *keywords;
} Call;
struct {
expr_ty value;
int conversion;
expr_ty format_spec;
} FormattedValue;
struct {
asdl_expr_seq *values;
} JoinedStr;
struct {
constant value;
string kind;
} Constant;
struct {
expr_ty value;
identifier attr;
expr_context_ty ctx;
} Attribute;
struct {
expr_ty value;
expr_ty slice;
expr_context_ty ctx;
} Subscript;
struct {
expr_ty value;
expr_context_ty ctx;
} Starred;
struct {
identifier id;
expr_context_ty ctx;
} Name;
struct {
asdl_expr_seq *elts;
expr_context_ty ctx;
} List;
struct {
asdl_expr_seq *elts;
expr_context_ty ctx;
} Tuple;
struct {
expr_ty lower;
expr_ty upper;
expr_ty step;
} Slice;
} v;
int lineno;
int col_offset;
int end_lineno;
int end_col_offset;
};
struct _comprehension {
expr_ty target;
expr_ty iter;
asdl_expr_seq *ifs;
int is_async;
};
enum _excepthandler_kind {ExceptHandler_kind=1};
struct _excepthandler {
enum _excepthandler_kind kind;
union {
struct {
expr_ty type;
identifier name;
asdl_stmt_seq *body;
} ExceptHandler;
} v;
int lineno;
int col_offset;
int end_lineno;
int end_col_offset;
};
struct _arguments {
asdl_arg_seq *posonlyargs;
asdl_arg_seq *args;
arg_ty vararg;
asdl_arg_seq *kwonlyargs;
asdl_expr_seq *kw_defaults;
arg_ty kwarg;
asdl_expr_seq *defaults;
};
struct _arg {
identifier arg;
expr_ty annotation;
string type_comment;
int lineno;
int col_offset;
int end_lineno;
int end_col_offset;
};
struct _keyword {
identifier arg;
expr_ty value;
int lineno;
int col_offset;
int end_lineno;
int end_col_offset;
};
struct _alias {
identifier name;
identifier asname;
int lineno;
int col_offset;
int end_lineno;
int end_col_offset;
};
struct _withitem {
expr_ty context_expr;
expr_ty optional_vars;
};
struct _match_case {
pattern_ty pattern;
expr_ty guard;
asdl_stmt_seq *body;
};
enum _pattern_kind {MatchValue_kind=1, MatchSingleton_kind=2,
MatchSequence_kind=3, MatchMapping_kind=4,
MatchClass_kind=5, MatchStar_kind=6, MatchAs_kind=7,
MatchOr_kind=8};
struct _pattern {
enum _pattern_kind kind;
union {
struct {
expr_ty value;
} MatchValue;
struct {
constant value;
} MatchSingleton;
struct {
asdl_pattern_seq *patterns;
} MatchSequence;
struct {
asdl_expr_seq *keys;
asdl_pattern_seq *patterns;
identifier rest;
} MatchMapping;
struct {
expr_ty cls;
asdl_pattern_seq *patterns;
asdl_identifier_seq *kwd_attrs;
asdl_pattern_seq *kwd_patterns;
} MatchClass;
struct {
identifier name;
} MatchStar;
struct {
pattern_ty pattern;
identifier name;
} MatchAs;
struct {
asdl_pattern_seq *patterns;
} MatchOr;
} v;
int lineno;
int col_offset;
int end_lineno;
int end_col_offset;
};
enum _type_ignore_kind {TypeIgnore_kind=1};
struct _type_ignore {
enum _type_ignore_kind kind;
union {
struct {
int lineno;
string tag;
} TypeIgnore;
} v;
};
enum _type_param_kind {TypeVar_kind=1, ParamSpec_kind=2, TypeVarTuple_kind=3};
struct _type_param {
enum _type_param_kind kind;
union {
struct {
identifier name;
expr_ty bound;
} TypeVar;
struct {
identifier name;
} ParamSpec;
struct {
identifier name;
} TypeVarTuple;
} v;
int lineno;
int col_offset;
int end_lineno;
int end_col_offset;
};
// Note: these macros affect function definitions, not only call sites.
mod_ty _PyAST_Module(asdl_stmt_seq * body, asdl_type_ignore_seq * type_ignores,
PyArena *arena);
mod_ty _PyAST_Interactive(asdl_stmt_seq * body, PyArena *arena);
mod_ty _PyAST_Expression(expr_ty body, PyArena *arena);
mod_ty _PyAST_FunctionType(asdl_expr_seq * argtypes, expr_ty returns, PyArena
*arena);
stmt_ty _PyAST_FunctionDef(identifier name, arguments_ty args, asdl_stmt_seq *
body, asdl_expr_seq * decorator_list, expr_ty
returns, string type_comment, asdl_type_param_seq *
type_params, int lineno, int col_offset, int
end_lineno, int end_col_offset, PyArena *arena);
stmt_ty _PyAST_AsyncFunctionDef(identifier name, arguments_ty args,
asdl_stmt_seq * body, asdl_expr_seq *
decorator_list, expr_ty returns, string
type_comment, asdl_type_param_seq *
type_params, int lineno, int col_offset, int
end_lineno, int end_col_offset, PyArena *arena);
stmt_ty _PyAST_ClassDef(identifier name, asdl_expr_seq * bases,
asdl_keyword_seq * keywords, asdl_stmt_seq * body,
asdl_expr_seq * decorator_list, asdl_type_param_seq *
type_params, int lineno, int col_offset, int
end_lineno, int end_col_offset, PyArena *arena);
stmt_ty _PyAST_Return(expr_ty value, int lineno, int col_offset, int
end_lineno, int end_col_offset, PyArena *arena);
stmt_ty _PyAST_Delete(asdl_expr_seq * targets, int lineno, int col_offset, int
end_lineno, int end_col_offset, PyArena *arena);
stmt_ty _PyAST_Assign(asdl_expr_seq * targets, expr_ty value, string
type_comment, int lineno, int col_offset, int end_lineno,
int end_col_offset, PyArena *arena);
stmt_ty _PyAST_TypeAlias(expr_ty name, asdl_type_param_seq * type_params,
expr_ty value, int lineno, int col_offset, int
end_lineno, int end_col_offset, PyArena *arena);
stmt_ty _PyAST_AugAssign(expr_ty target, operator_ty op, expr_ty value, int
lineno, int col_offset, int end_lineno, int
end_col_offset, PyArena *arena);
stmt_ty _PyAST_AnnAssign(expr_ty target, expr_ty annotation, expr_ty value, int
simple, int lineno, int col_offset, int end_lineno,
int end_col_offset, PyArena *arena);
stmt_ty _PyAST_For(expr_ty target, expr_ty iter, asdl_stmt_seq * body,
asdl_stmt_seq * orelse, string type_comment, int lineno, int
col_offset, int end_lineno, int end_col_offset, PyArena
*arena);
stmt_ty _PyAST_AsyncFor(expr_ty target, expr_ty iter, asdl_stmt_seq * body,
asdl_stmt_seq * orelse, string type_comment, int
lineno, int col_offset, int end_lineno, int
end_col_offset, PyArena *arena);
stmt_ty _PyAST_While(expr_ty test, asdl_stmt_seq * body, asdl_stmt_seq *
orelse, int lineno, int col_offset, int end_lineno, int
end_col_offset, PyArena *arena);
stmt_ty _PyAST_If(expr_ty test, asdl_stmt_seq * body, asdl_stmt_seq * orelse,
int lineno, int col_offset, int end_lineno, int
end_col_offset, PyArena *arena);
stmt_ty _PyAST_With(asdl_withitem_seq * items, asdl_stmt_seq * body, string
type_comment, int lineno, int col_offset, int end_lineno,
int end_col_offset, PyArena *arena);
stmt_ty _PyAST_AsyncWith(asdl_withitem_seq * items, asdl_stmt_seq * body,
string type_comment, int lineno, int col_offset, int
end_lineno, int end_col_offset, PyArena *arena);
stmt_ty _PyAST_Match(expr_ty subject, asdl_match_case_seq * cases, int lineno,
int col_offset, int end_lineno, int end_col_offset,
PyArena *arena);
stmt_ty _PyAST_Raise(expr_ty exc, expr_ty cause, int lineno, int col_offset,
int end_lineno, int end_col_offset, PyArena *arena);
stmt_ty _PyAST_Try(asdl_stmt_seq * body, asdl_excepthandler_seq * handlers,
asdl_stmt_seq * orelse, asdl_stmt_seq * finalbody, int
lineno, int col_offset, int end_lineno, int end_col_offset,
PyArena *arena);
stmt_ty _PyAST_TryStar(asdl_stmt_seq * body, asdl_excepthandler_seq * handlers,
asdl_stmt_seq * orelse, asdl_stmt_seq * finalbody, int
lineno, int col_offset, int end_lineno, int
end_col_offset, PyArena *arena);
stmt_ty _PyAST_Assert(expr_ty test, expr_ty msg, int lineno, int col_offset,
int end_lineno, int end_col_offset, PyArena *arena);
stmt_ty _PyAST_Import(asdl_alias_seq * names, int lineno, int col_offset, int
end_lineno, int end_col_offset, PyArena *arena);
stmt_ty _PyAST_ImportFrom(identifier module, asdl_alias_seq * names, int level,
int lineno, int col_offset, int end_lineno, int
end_col_offset, PyArena *arena);
stmt_ty _PyAST_Global(asdl_identifier_seq * names, int lineno, int col_offset,
int end_lineno, int end_col_offset, PyArena *arena);
stmt_ty _PyAST_Nonlocal(asdl_identifier_seq * names, int lineno, int
col_offset, int end_lineno, int end_col_offset, PyArena
*arena);
stmt_ty _PyAST_Expr(expr_ty value, int lineno, int col_offset, int end_lineno,
int end_col_offset, PyArena *arena);
stmt_ty _PyAST_Pass(int lineno, int col_offset, int end_lineno, int
end_col_offset, PyArena *arena);
stmt_ty _PyAST_Break(int lineno, int col_offset, int end_lineno, int
end_col_offset, PyArena *arena);
stmt_ty _PyAST_Continue(int lineno, int col_offset, int end_lineno, int
end_col_offset, PyArena *arena);
expr_ty _PyAST_BoolOp(boolop_ty op, asdl_expr_seq * values, int lineno, int
col_offset, int end_lineno, int end_col_offset, PyArena
*arena);
expr_ty _PyAST_NamedExpr(expr_ty target, expr_ty value, int lineno, int
col_offset, int end_lineno, int end_col_offset,
PyArena *arena);
expr_ty _PyAST_BinOp(expr_ty left, operator_ty op, expr_ty right, int lineno,
int col_offset, int end_lineno, int end_col_offset,
PyArena *arena);
expr_ty _PyAST_UnaryOp(unaryop_ty op, expr_ty operand, int lineno, int
col_offset, int end_lineno, int end_col_offset, PyArena
*arena);
expr_ty _PyAST_Lambda(arguments_ty args, expr_ty body, int lineno, int
col_offset, int end_lineno, int end_col_offset, PyArena
*arena);
expr_ty _PyAST_IfExp(expr_ty test, expr_ty body, expr_ty orelse, int lineno,
int col_offset, int end_lineno, int end_col_offset,
PyArena *arena);
expr_ty _PyAST_Dict(asdl_expr_seq * keys, asdl_expr_seq * values, int lineno,
int col_offset, int end_lineno, int end_col_offset, PyArena
*arena);
expr_ty _PyAST_Set(asdl_expr_seq * elts, int lineno, int col_offset, int
end_lineno, int end_col_offset, PyArena *arena);
expr_ty _PyAST_ListComp(expr_ty elt, asdl_comprehension_seq * generators, int
lineno, int col_offset, int end_lineno, int
end_col_offset, PyArena *arena);
expr_ty _PyAST_SetComp(expr_ty elt, asdl_comprehension_seq * generators, int
lineno, int col_offset, int end_lineno, int
end_col_offset, PyArena *arena);
expr_ty _PyAST_DictComp(expr_ty key, expr_ty value, asdl_comprehension_seq *
generators, int lineno, int col_offset, int end_lineno,
int end_col_offset, PyArena *arena);
expr_ty _PyAST_GeneratorExp(expr_ty elt, asdl_comprehension_seq * generators,
int lineno, int col_offset, int end_lineno, int
end_col_offset, PyArena *arena);
expr_ty _PyAST_Await(expr_ty value, int lineno, int col_offset, int end_lineno,
int end_col_offset, PyArena *arena);
expr_ty _PyAST_Yield(expr_ty value, int lineno, int col_offset, int end_lineno,
int end_col_offset, PyArena *arena);
expr_ty _PyAST_YieldFrom(expr_ty value, int lineno, int col_offset, int
end_lineno, int end_col_offset, PyArena *arena);
expr_ty _PyAST_Compare(expr_ty left, asdl_int_seq * ops, asdl_expr_seq *
comparators, int lineno, int col_offset, int end_lineno,
int end_col_offset, PyArena *arena);
expr_ty _PyAST_Call(expr_ty func, asdl_expr_seq * args, asdl_keyword_seq *
keywords, int lineno, int col_offset, int end_lineno, int
end_col_offset, PyArena *arena);
expr_ty _PyAST_FormattedValue(expr_ty value, int conversion, expr_ty
format_spec, int lineno, int col_offset, int
end_lineno, int end_col_offset, PyArena *arena);
expr_ty _PyAST_JoinedStr(asdl_expr_seq * values, int lineno, int col_offset,
int end_lineno, int end_col_offset, PyArena *arena);
expr_ty _PyAST_Constant(constant value, string kind, int lineno, int
col_offset, int end_lineno, int end_col_offset, PyArena
*arena);
expr_ty _PyAST_Attribute(expr_ty value, identifier attr, expr_context_ty ctx,
int lineno, int col_offset, int end_lineno, int
end_col_offset, PyArena *arena);
expr_ty _PyAST_Subscript(expr_ty value, expr_ty slice, expr_context_ty ctx, int
lineno, int col_offset, int end_lineno, int
end_col_offset, PyArena *arena);
expr_ty _PyAST_Starred(expr_ty value, expr_context_ty ctx, int lineno, int
col_offset, int end_lineno, int end_col_offset, PyArena
*arena);
expr_ty _PyAST_Name(identifier id, expr_context_ty ctx, int lineno, int
col_offset, int end_lineno, int end_col_offset, PyArena
*arena);
expr_ty _PyAST_List(asdl_expr_seq * elts, expr_context_ty ctx, int lineno, int
col_offset, int end_lineno, int end_col_offset, PyArena
*arena);
expr_ty _PyAST_Tuple(asdl_expr_seq * elts, expr_context_ty ctx, int lineno, int
col_offset, int end_lineno, int end_col_offset, PyArena
*arena);
expr_ty _PyAST_Slice(expr_ty lower, expr_ty upper, expr_ty step, int lineno,
int col_offset, int end_lineno, int end_col_offset,
PyArena *arena);
comprehension_ty _PyAST_comprehension(expr_ty target, expr_ty iter,
asdl_expr_seq * ifs, int is_async,
PyArena *arena);
excepthandler_ty _PyAST_ExceptHandler(expr_ty type, identifier name,
asdl_stmt_seq * body, int lineno, int
col_offset, int end_lineno, int
end_col_offset, PyArena *arena);
arguments_ty _PyAST_arguments(asdl_arg_seq * posonlyargs, asdl_arg_seq * args,
arg_ty vararg, asdl_arg_seq * kwonlyargs,
asdl_expr_seq * kw_defaults, arg_ty kwarg,
asdl_expr_seq * defaults, PyArena *arena);
arg_ty _PyAST_arg(identifier arg, expr_ty annotation, string type_comment, int
lineno, int col_offset, int end_lineno, int end_col_offset,
PyArena *arena);
keyword_ty _PyAST_keyword(identifier arg, expr_ty value, int lineno, int
col_offset, int end_lineno, int end_col_offset,
PyArena *arena);
alias_ty _PyAST_alias(identifier name, identifier asname, int lineno, int
col_offset, int end_lineno, int end_col_offset, PyArena
*arena);
withitem_ty _PyAST_withitem(expr_ty context_expr, expr_ty optional_vars,
PyArena *arena);
match_case_ty _PyAST_match_case(pattern_ty pattern, expr_ty guard,
asdl_stmt_seq * body, PyArena *arena);
pattern_ty _PyAST_MatchValue(expr_ty value, int lineno, int col_offset, int
end_lineno, int end_col_offset, PyArena *arena);
pattern_ty _PyAST_MatchSingleton(constant value, int lineno, int col_offset,
int end_lineno, int end_col_offset, PyArena
*arena);
pattern_ty _PyAST_MatchSequence(asdl_pattern_seq * patterns, int lineno, int
col_offset, int end_lineno, int end_col_offset,
PyArena *arena);
pattern_ty _PyAST_MatchMapping(asdl_expr_seq * keys, asdl_pattern_seq *
patterns, identifier rest, int lineno, int
col_offset, int end_lineno, int end_col_offset,
PyArena *arena);
pattern_ty _PyAST_MatchClass(expr_ty cls, asdl_pattern_seq * patterns,
asdl_identifier_seq * kwd_attrs, asdl_pattern_seq
* kwd_patterns, int lineno, int col_offset, int
end_lineno, int end_col_offset, PyArena *arena);
pattern_ty _PyAST_MatchStar(identifier name, int lineno, int col_offset, int
end_lineno, int end_col_offset, PyArena *arena);
pattern_ty _PyAST_MatchAs(pattern_ty pattern, identifier name, int lineno, int
col_offset, int end_lineno, int end_col_offset,
PyArena *arena);
pattern_ty _PyAST_MatchOr(asdl_pattern_seq * patterns, int lineno, int
col_offset, int end_lineno, int end_col_offset,
PyArena *arena);
type_ignore_ty _PyAST_TypeIgnore(int lineno, string tag, PyArena *arena);
type_param_ty _PyAST_TypeVar(identifier name, expr_ty bound, int lineno, int
col_offset, int end_lineno, int end_col_offset,
PyArena *arena);
type_param_ty _PyAST_ParamSpec(identifier name, int lineno, int col_offset, int
end_lineno, int end_col_offset, PyArena *arena);
type_param_ty _PyAST_TypeVarTuple(identifier name, int lineno, int col_offset,
int end_lineno, int end_col_offset, PyArena
*arena);
PyObject* PyAST_mod2obj(mod_ty t);
mod_ty PyAST_obj2mod(PyObject* ast, PyArena* arena, int mode);
int PyAST_Check(PyObject* obj);
extern int _PyAST_Validate(mod_ty);
/* _PyAST_ExprAsUnicode is defined in ast_unparse.c */
extern PyObject* _PyAST_ExprAsUnicode(expr_ty);
/* Return the borrowed reference to the first literal string in the
sequence of statements or NULL if it doesn't start from a literal string.
Doesn't set exception. */
extern PyObject* _PyAST_GetDocString(asdl_stmt_seq *);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_AST_H */

View File

@@ -0,0 +1,265 @@
// File automatically generated by Parser/asdl_c.py.
#ifndef Py_INTERNAL_AST_STATE_H
#define Py_INTERNAL_AST_STATE_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
struct ast_state {
int initialized;
int recursion_depth;
int recursion_limit;
PyObject *AST_type;
PyObject *Add_singleton;
PyObject *Add_type;
PyObject *And_singleton;
PyObject *And_type;
PyObject *AnnAssign_type;
PyObject *Assert_type;
PyObject *Assign_type;
PyObject *AsyncFor_type;
PyObject *AsyncFunctionDef_type;
PyObject *AsyncWith_type;
PyObject *Attribute_type;
PyObject *AugAssign_type;
PyObject *Await_type;
PyObject *BinOp_type;
PyObject *BitAnd_singleton;
PyObject *BitAnd_type;
PyObject *BitOr_singleton;
PyObject *BitOr_type;
PyObject *BitXor_singleton;
PyObject *BitXor_type;
PyObject *BoolOp_type;
PyObject *Break_type;
PyObject *Call_type;
PyObject *ClassDef_type;
PyObject *Compare_type;
PyObject *Constant_type;
PyObject *Continue_type;
PyObject *Del_singleton;
PyObject *Del_type;
PyObject *Delete_type;
PyObject *DictComp_type;
PyObject *Dict_type;
PyObject *Div_singleton;
PyObject *Div_type;
PyObject *Eq_singleton;
PyObject *Eq_type;
PyObject *ExceptHandler_type;
PyObject *Expr_type;
PyObject *Expression_type;
PyObject *FloorDiv_singleton;
PyObject *FloorDiv_type;
PyObject *For_type;
PyObject *FormattedValue_type;
PyObject *FunctionDef_type;
PyObject *FunctionType_type;
PyObject *GeneratorExp_type;
PyObject *Global_type;
PyObject *GtE_singleton;
PyObject *GtE_type;
PyObject *Gt_singleton;
PyObject *Gt_type;
PyObject *IfExp_type;
PyObject *If_type;
PyObject *ImportFrom_type;
PyObject *Import_type;
PyObject *In_singleton;
PyObject *In_type;
PyObject *Interactive_type;
PyObject *Invert_singleton;
PyObject *Invert_type;
PyObject *IsNot_singleton;
PyObject *IsNot_type;
PyObject *Is_singleton;
PyObject *Is_type;
PyObject *JoinedStr_type;
PyObject *LShift_singleton;
PyObject *LShift_type;
PyObject *Lambda_type;
PyObject *ListComp_type;
PyObject *List_type;
PyObject *Load_singleton;
PyObject *Load_type;
PyObject *LtE_singleton;
PyObject *LtE_type;
PyObject *Lt_singleton;
PyObject *Lt_type;
PyObject *MatMult_singleton;
PyObject *MatMult_type;
PyObject *MatchAs_type;
PyObject *MatchClass_type;
PyObject *MatchMapping_type;
PyObject *MatchOr_type;
PyObject *MatchSequence_type;
PyObject *MatchSingleton_type;
PyObject *MatchStar_type;
PyObject *MatchValue_type;
PyObject *Match_type;
PyObject *Mod_singleton;
PyObject *Mod_type;
PyObject *Module_type;
PyObject *Mult_singleton;
PyObject *Mult_type;
PyObject *Name_type;
PyObject *NamedExpr_type;
PyObject *Nonlocal_type;
PyObject *NotEq_singleton;
PyObject *NotEq_type;
PyObject *NotIn_singleton;
PyObject *NotIn_type;
PyObject *Not_singleton;
PyObject *Not_type;
PyObject *Or_singleton;
PyObject *Or_type;
PyObject *ParamSpec_type;
PyObject *Pass_type;
PyObject *Pow_singleton;
PyObject *Pow_type;
PyObject *RShift_singleton;
PyObject *RShift_type;
PyObject *Raise_type;
PyObject *Return_type;
PyObject *SetComp_type;
PyObject *Set_type;
PyObject *Slice_type;
PyObject *Starred_type;
PyObject *Store_singleton;
PyObject *Store_type;
PyObject *Sub_singleton;
PyObject *Sub_type;
PyObject *Subscript_type;
PyObject *TryStar_type;
PyObject *Try_type;
PyObject *Tuple_type;
PyObject *TypeAlias_type;
PyObject *TypeIgnore_type;
PyObject *TypeVarTuple_type;
PyObject *TypeVar_type;
PyObject *UAdd_singleton;
PyObject *UAdd_type;
PyObject *USub_singleton;
PyObject *USub_type;
PyObject *UnaryOp_type;
PyObject *While_type;
PyObject *With_type;
PyObject *YieldFrom_type;
PyObject *Yield_type;
PyObject *__dict__;
PyObject *__doc__;
PyObject *__match_args__;
PyObject *__module__;
PyObject *_attributes;
PyObject *_fields;
PyObject *alias_type;
PyObject *annotation;
PyObject *arg;
PyObject *arg_type;
PyObject *args;
PyObject *argtypes;
PyObject *arguments_type;
PyObject *asname;
PyObject *ast;
PyObject *attr;
PyObject *bases;
PyObject *body;
PyObject *boolop_type;
PyObject *bound;
PyObject *cases;
PyObject *cause;
PyObject *cls;
PyObject *cmpop_type;
PyObject *col_offset;
PyObject *comparators;
PyObject *comprehension_type;
PyObject *context_expr;
PyObject *conversion;
PyObject *ctx;
PyObject *decorator_list;
PyObject *defaults;
PyObject *elt;
PyObject *elts;
PyObject *end_col_offset;
PyObject *end_lineno;
PyObject *exc;
PyObject *excepthandler_type;
PyObject *expr_context_type;
PyObject *expr_type;
PyObject *finalbody;
PyObject *format_spec;
PyObject *func;
PyObject *generators;
PyObject *guard;
PyObject *handlers;
PyObject *id;
PyObject *ifs;
PyObject *is_async;
PyObject *items;
PyObject *iter;
PyObject *key;
PyObject *keys;
PyObject *keyword_type;
PyObject *keywords;
PyObject *kind;
PyObject *kw_defaults;
PyObject *kwarg;
PyObject *kwd_attrs;
PyObject *kwd_patterns;
PyObject *kwonlyargs;
PyObject *left;
PyObject *level;
PyObject *lineno;
PyObject *lower;
PyObject *match_case_type;
PyObject *mod_type;
PyObject *module;
PyObject *msg;
PyObject *name;
PyObject *names;
PyObject *op;
PyObject *operand;
PyObject *operator_type;
PyObject *ops;
PyObject *optional_vars;
PyObject *orelse;
PyObject *pattern;
PyObject *pattern_type;
PyObject *patterns;
PyObject *posonlyargs;
PyObject *rest;
PyObject *returns;
PyObject *right;
PyObject *simple;
PyObject *slice;
PyObject *step;
PyObject *stmt_type;
PyObject *subject;
PyObject *tag;
PyObject *target;
PyObject *targets;
PyObject *test;
PyObject *type;
PyObject *type_comment;
PyObject *type_ignore_type;
PyObject *type_ignores;
PyObject *type_param_type;
PyObject *type_params;
PyObject *unaryop_type;
PyObject *upper;
PyObject *value;
PyObject *values;
PyObject *vararg;
PyObject *withitem_type;
};
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_AST_STATE_H */

View File

@@ -0,0 +1,57 @@
#ifndef Py_INTERNAL_ATEXIT_H
#define Py_INTERNAL_ATEXIT_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
//###############
// runtime atexit
typedef void (*atexit_callbackfunc)(void);
struct _atexit_runtime_state {
PyThread_type_lock mutex;
#define NEXITFUNCS 32
atexit_callbackfunc callbacks[NEXITFUNCS];
int ncallbacks;
};
//###################
// interpreter atexit
struct atexit_callback;
typedef struct atexit_callback {
atexit_datacallbackfunc func;
void *data;
struct atexit_callback *next;
} atexit_callback;
typedef struct {
PyObject *func;
PyObject *args;
PyObject *kwargs;
} atexit_py_callback;
struct atexit_state {
atexit_callback *ll_callbacks;
atexit_callback *last_ll_callback;
// XXX The rest of the state could be moved to the atexit module state
// and a low-level callback added for it during module exec.
// For the moment we leave it here.
atexit_py_callback **callbacks;
int ncallbacks;
int callback_len;
};
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_ATEXIT_H */

View File

@@ -0,0 +1,557 @@
#ifndef Py_ATOMIC_H
#define Py_ATOMIC_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#include "dynamic_annotations.h" /* _Py_ANNOTATE_MEMORY_ORDER */
#include "pyconfig.h"
#ifdef HAVE_STD_ATOMIC
# include <stdatomic.h>
#endif
#if defined(_MSC_VER)
#include <intrin.h>
#if defined(_M_IX86) || defined(_M_X64)
# include <immintrin.h>
#endif
#endif
/* This is modeled after the atomics interface from C1x, according to
* the draft at
* http://www.open-std.org/JTC1/SC22/wg14/www/docs/n1425.pdf.
* Operations and types are named the same except with a _Py_ prefix
* and have the same semantics.
*
* Beware, the implementations here are deep magic.
*/
#if defined(HAVE_STD_ATOMIC)
typedef enum _Py_memory_order {
_Py_memory_order_relaxed = memory_order_relaxed,
_Py_memory_order_acquire = memory_order_acquire,
_Py_memory_order_release = memory_order_release,
_Py_memory_order_acq_rel = memory_order_acq_rel,
_Py_memory_order_seq_cst = memory_order_seq_cst
} _Py_memory_order;
typedef struct _Py_atomic_address {
atomic_uintptr_t _value;
} _Py_atomic_address;
typedef struct _Py_atomic_int {
atomic_int _value;
} _Py_atomic_int;
#define _Py_atomic_signal_fence(/*memory_order*/ ORDER) \
atomic_signal_fence(ORDER)
#define _Py_atomic_thread_fence(/*memory_order*/ ORDER) \
atomic_thread_fence(ORDER)
#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
atomic_store_explicit(&((ATOMIC_VAL)->_value), NEW_VAL, ORDER)
#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
atomic_load_explicit(&((ATOMIC_VAL)->_value), ORDER)
// Use builtin atomic operations in GCC >= 4.7 and clang
#elif defined(HAVE_BUILTIN_ATOMIC)
typedef enum _Py_memory_order {
_Py_memory_order_relaxed = __ATOMIC_RELAXED,
_Py_memory_order_acquire = __ATOMIC_ACQUIRE,
_Py_memory_order_release = __ATOMIC_RELEASE,
_Py_memory_order_acq_rel = __ATOMIC_ACQ_REL,
_Py_memory_order_seq_cst = __ATOMIC_SEQ_CST
} _Py_memory_order;
typedef struct _Py_atomic_address {
uintptr_t _value;
} _Py_atomic_address;
typedef struct _Py_atomic_int {
int _value;
} _Py_atomic_int;
#define _Py_atomic_signal_fence(/*memory_order*/ ORDER) \
__atomic_signal_fence(ORDER)
#define _Py_atomic_thread_fence(/*memory_order*/ ORDER) \
__atomic_thread_fence(ORDER)
#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
(assert((ORDER) == __ATOMIC_RELAXED \
|| (ORDER) == __ATOMIC_SEQ_CST \
|| (ORDER) == __ATOMIC_RELEASE), \
__atomic_store_n(&((ATOMIC_VAL)->_value), NEW_VAL, ORDER))
#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
(assert((ORDER) == __ATOMIC_RELAXED \
|| (ORDER) == __ATOMIC_SEQ_CST \
|| (ORDER) == __ATOMIC_ACQUIRE \
|| (ORDER) == __ATOMIC_CONSUME), \
__atomic_load_n(&((ATOMIC_VAL)->_value), ORDER))
/* Only support GCC (for expression statements) and x86 (for simple
* atomic semantics) and MSVC x86/x64/ARM */
#elif defined(__GNUC__) && (defined(__i386__) || defined(__amd64))
typedef enum _Py_memory_order {
_Py_memory_order_relaxed,
_Py_memory_order_acquire,
_Py_memory_order_release,
_Py_memory_order_acq_rel,
_Py_memory_order_seq_cst
} _Py_memory_order;
typedef struct _Py_atomic_address {
uintptr_t _value;
} _Py_atomic_address;
typedef struct _Py_atomic_int {
int _value;
} _Py_atomic_int;
static __inline__ void
_Py_atomic_signal_fence(_Py_memory_order order)
{
if (order != _Py_memory_order_relaxed)
__asm__ volatile("":::"memory");
}
static __inline__ void
_Py_atomic_thread_fence(_Py_memory_order order)
{
if (order != _Py_memory_order_relaxed)
__asm__ volatile("mfence":::"memory");
}
/* Tell the race checker about this operation's effects. */
static __inline__ void
_Py_ANNOTATE_MEMORY_ORDER(const volatile void *address, _Py_memory_order order)
{
(void)address; /* shut up -Wunused-parameter */
switch(order) {
case _Py_memory_order_release:
case _Py_memory_order_acq_rel:
case _Py_memory_order_seq_cst:
_Py_ANNOTATE_HAPPENS_BEFORE(address);
break;
case _Py_memory_order_relaxed:
case _Py_memory_order_acquire:
break;
}
switch(order) {
case _Py_memory_order_acquire:
case _Py_memory_order_acq_rel:
case _Py_memory_order_seq_cst:
_Py_ANNOTATE_HAPPENS_AFTER(address);
break;
case _Py_memory_order_relaxed:
case _Py_memory_order_release:
break;
}
}
#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
__extension__ ({ \
__typeof__(ATOMIC_VAL) atomic_val = ATOMIC_VAL; \
__typeof__(atomic_val->_value) new_val = NEW_VAL;\
volatile __typeof__(new_val) *volatile_data = &atomic_val->_value; \
_Py_memory_order order = ORDER; \
_Py_ANNOTATE_MEMORY_ORDER(atomic_val, order); \
\
/* Perform the operation. */ \
_Py_ANNOTATE_IGNORE_WRITES_BEGIN(); \
switch(order) { \
case _Py_memory_order_release: \
_Py_atomic_signal_fence(_Py_memory_order_release); \
/* fallthrough */ \
case _Py_memory_order_relaxed: \
*volatile_data = new_val; \
break; \
\
case _Py_memory_order_acquire: \
case _Py_memory_order_acq_rel: \
case _Py_memory_order_seq_cst: \
__asm__ volatile("xchg %0, %1" \
: "+r"(new_val) \
: "m"(atomic_val->_value) \
: "memory"); \
break; \
} \
_Py_ANNOTATE_IGNORE_WRITES_END(); \
})
#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
__extension__ ({ \
__typeof__(ATOMIC_VAL) atomic_val = ATOMIC_VAL; \
__typeof__(atomic_val->_value) result; \
volatile __typeof__(result) *volatile_data = &atomic_val->_value; \
_Py_memory_order order = ORDER; \
_Py_ANNOTATE_MEMORY_ORDER(atomic_val, order); \
\
/* Perform the operation. */ \
_Py_ANNOTATE_IGNORE_READS_BEGIN(); \
switch(order) { \
case _Py_memory_order_release: \
case _Py_memory_order_acq_rel: \
case _Py_memory_order_seq_cst: \
/* Loads on x86 are not releases by default, so need a */ \
/* thread fence. */ \
_Py_atomic_thread_fence(_Py_memory_order_release); \
break; \
default: \
/* No fence */ \
break; \
} \
result = *volatile_data; \
switch(order) { \
case _Py_memory_order_acquire: \
case _Py_memory_order_acq_rel: \
case _Py_memory_order_seq_cst: \
/* Loads on x86 are automatically acquire operations so */ \
/* can get by with just a compiler fence. */ \
_Py_atomic_signal_fence(_Py_memory_order_acquire); \
break; \
default: \
/* No fence */ \
break; \
} \
_Py_ANNOTATE_IGNORE_READS_END(); \
result; \
})
#elif defined(_MSC_VER)
/* _Interlocked* functions provide a full memory barrier and are therefore
enough for acq_rel and seq_cst. If the HLE variants aren't available
in hardware they will fall back to a full memory barrier as well.
This might affect performance but likely only in some very specific and
hard to measure scenario.
*/
#if defined(_M_IX86) || defined(_M_X64)
typedef enum _Py_memory_order {
_Py_memory_order_relaxed,
_Py_memory_order_acquire,
_Py_memory_order_release,
_Py_memory_order_acq_rel,
_Py_memory_order_seq_cst
} _Py_memory_order;
typedef struct _Py_atomic_address {
volatile uintptr_t _value;
} _Py_atomic_address;
typedef struct _Py_atomic_int {
volatile int _value;
} _Py_atomic_int;
#if defined(_M_X64)
#define _Py_atomic_store_64bit(ATOMIC_VAL, NEW_VAL, ORDER) \
switch (ORDER) { \
case _Py_memory_order_acquire: \
_InterlockedExchange64_HLEAcquire((__int64 volatile*)&((ATOMIC_VAL)->_value), (__int64)(NEW_VAL)); \
break; \
case _Py_memory_order_release: \
_InterlockedExchange64_HLERelease((__int64 volatile*)&((ATOMIC_VAL)->_value), (__int64)(NEW_VAL)); \
break; \
default: \
_InterlockedExchange64((__int64 volatile*)&((ATOMIC_VAL)->_value), (__int64)(NEW_VAL)); \
break; \
}
#else
#define _Py_atomic_store_64bit(ATOMIC_VAL, NEW_VAL, ORDER) ((void)0);
#endif
#define _Py_atomic_store_32bit(ATOMIC_VAL, NEW_VAL, ORDER) \
switch (ORDER) { \
case _Py_memory_order_acquire: \
_InterlockedExchange_HLEAcquire((volatile long*)&((ATOMIC_VAL)->_value), (int)(NEW_VAL)); \
break; \
case _Py_memory_order_release: \
_InterlockedExchange_HLERelease((volatile long*)&((ATOMIC_VAL)->_value), (int)(NEW_VAL)); \
break; \
default: \
_InterlockedExchange((volatile long*)&((ATOMIC_VAL)->_value), (int)(NEW_VAL)); \
break; \
}
#if defined(_M_X64)
/* This has to be an intptr_t for now.
gil_created() uses -1 as a sentinel value, if this returns
a uintptr_t it will do an unsigned compare and crash
*/
inline intptr_t _Py_atomic_load_64bit_impl(volatile uintptr_t* value, int order) {
__int64 old;
switch (order) {
case _Py_memory_order_acquire:
{
do {
old = *value;
} while(_InterlockedCompareExchange64_HLEAcquire((volatile __int64*)value, old, old) != old);
break;
}
case _Py_memory_order_release:
{
do {
old = *value;
} while(_InterlockedCompareExchange64_HLERelease((volatile __int64*)value, old, old) != old);
break;
}
case _Py_memory_order_relaxed:
old = *value;
break;
default:
{
do {
old = *value;
} while(_InterlockedCompareExchange64((volatile __int64*)value, old, old) != old);
break;
}
}
return old;
}
#define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) \
_Py_atomic_load_64bit_impl((volatile uintptr_t*)&((ATOMIC_VAL)->_value), (ORDER))
#else
#define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) ((ATOMIC_VAL)->_value)
#endif
inline int _Py_atomic_load_32bit_impl(volatile int* value, int order) {
long old;
switch (order) {
case _Py_memory_order_acquire:
{
do {
old = *value;
} while(_InterlockedCompareExchange_HLEAcquire((volatile long*)value, old, old) != old);
break;
}
case _Py_memory_order_release:
{
do {
old = *value;
} while(_InterlockedCompareExchange_HLERelease((volatile long*)value, old, old) != old);
break;
}
case _Py_memory_order_relaxed:
old = *value;
break;
default:
{
do {
old = *value;
} while(_InterlockedCompareExchange((volatile long*)value, old, old) != old);
break;
}
}
return old;
}
#define _Py_atomic_load_32bit(ATOMIC_VAL, ORDER) \
_Py_atomic_load_32bit_impl((volatile int*)&((ATOMIC_VAL)->_value), (ORDER))
#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
if (sizeof((ATOMIC_VAL)->_value) == 8) { \
_Py_atomic_store_64bit((ATOMIC_VAL), NEW_VAL, ORDER) } else { \
_Py_atomic_store_32bit((ATOMIC_VAL), NEW_VAL, ORDER) }
#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
( \
sizeof((ATOMIC_VAL)->_value) == 8 ? \
_Py_atomic_load_64bit((ATOMIC_VAL), ORDER) : \
_Py_atomic_load_32bit((ATOMIC_VAL), ORDER) \
)
#elif defined(_M_ARM) || defined(_M_ARM64)
typedef enum _Py_memory_order {
_Py_memory_order_relaxed,
_Py_memory_order_acquire,
_Py_memory_order_release,
_Py_memory_order_acq_rel,
_Py_memory_order_seq_cst
} _Py_memory_order;
typedef struct _Py_atomic_address {
volatile uintptr_t _value;
} _Py_atomic_address;
typedef struct _Py_atomic_int {
volatile int _value;
} _Py_atomic_int;
#if defined(_M_ARM64)
#define _Py_atomic_store_64bit(ATOMIC_VAL, NEW_VAL, ORDER) \
switch (ORDER) { \
case _Py_memory_order_acquire: \
_InterlockedExchange64_acq((__int64 volatile*)&((ATOMIC_VAL)->_value), (__int64)NEW_VAL); \
break; \
case _Py_memory_order_release: \
_InterlockedExchange64_rel((__int64 volatile*)&((ATOMIC_VAL)->_value), (__int64)NEW_VAL); \
break; \
default: \
_InterlockedExchange64((__int64 volatile*)&((ATOMIC_VAL)->_value), (__int64)NEW_VAL); \
break; \
}
#else
#define _Py_atomic_store_64bit(ATOMIC_VAL, NEW_VAL, ORDER) ((void)0);
#endif
#define _Py_atomic_store_32bit(ATOMIC_VAL, NEW_VAL, ORDER) \
switch (ORDER) { \
case _Py_memory_order_acquire: \
_InterlockedExchange_acq((volatile long*)&((ATOMIC_VAL)->_value), (int)NEW_VAL); \
break; \
case _Py_memory_order_release: \
_InterlockedExchange_rel((volatile long*)&((ATOMIC_VAL)->_value), (int)NEW_VAL); \
break; \
default: \
_InterlockedExchange((volatile long*)&((ATOMIC_VAL)->_value), (int)NEW_VAL); \
break; \
}
#if defined(_M_ARM64)
/* This has to be an intptr_t for now.
gil_created() uses -1 as a sentinel value, if this returns
a uintptr_t it will do an unsigned compare and crash
*/
inline intptr_t _Py_atomic_load_64bit_impl(volatile uintptr_t* value, int order) {
uintptr_t old;
switch (order) {
case _Py_memory_order_acquire:
{
do {
old = *value;
} while(_InterlockedCompareExchange64_acq(value, old, old) != old);
break;
}
case _Py_memory_order_release:
{
do {
old = *value;
} while(_InterlockedCompareExchange64_rel(value, old, old) != old);
break;
}
case _Py_memory_order_relaxed:
old = *value;
break;
default:
{
do {
old = *value;
} while(_InterlockedCompareExchange64(value, old, old) != old);
break;
}
}
return old;
}
#define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) \
_Py_atomic_load_64bit_impl((volatile uintptr_t*)&((ATOMIC_VAL)->_value), (ORDER))
#else
#define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) ((ATOMIC_VAL)->_value)
#endif
inline int _Py_atomic_load_32bit_impl(volatile int* value, int order) {
int old;
switch (order) {
case _Py_memory_order_acquire:
{
do {
old = *value;
} while(_InterlockedCompareExchange_acq(value, old, old) != old);
break;
}
case _Py_memory_order_release:
{
do {
old = *value;
} while(_InterlockedCompareExchange_rel(value, old, old) != old);
break;
}
case _Py_memory_order_relaxed:
old = *value;
break;
default:
{
do {
old = *value;
} while(_InterlockedCompareExchange(value, old, old) != old);
break;
}
}
return old;
}
#define _Py_atomic_load_32bit(ATOMIC_VAL, ORDER) \
_Py_atomic_load_32bit_impl((volatile int*)&((ATOMIC_VAL)->_value), (ORDER))
#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
if (sizeof((ATOMIC_VAL)->_value) == 8) { \
_Py_atomic_store_64bit((ATOMIC_VAL), (NEW_VAL), (ORDER)) } else { \
_Py_atomic_store_32bit((ATOMIC_VAL), (NEW_VAL), (ORDER)) }
#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
( \
sizeof((ATOMIC_VAL)->_value) == 8 ? \
_Py_atomic_load_64bit((ATOMIC_VAL), (ORDER)) : \
_Py_atomic_load_32bit((ATOMIC_VAL), (ORDER)) \
)
#endif
#else /* !gcc x86 !_msc_ver */
typedef enum _Py_memory_order {
_Py_memory_order_relaxed,
_Py_memory_order_acquire,
_Py_memory_order_release,
_Py_memory_order_acq_rel,
_Py_memory_order_seq_cst
} _Py_memory_order;
typedef struct _Py_atomic_address {
uintptr_t _value;
} _Py_atomic_address;
typedef struct _Py_atomic_int {
int _value;
} _Py_atomic_int;
/* Fall back to other compilers and processors by assuming that simple
volatile accesses are atomic. This is false, so people should port
this. */
#define _Py_atomic_signal_fence(/*memory_order*/ ORDER) ((void)0)
#define _Py_atomic_thread_fence(/*memory_order*/ ORDER) ((void)0)
#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
((ATOMIC_VAL)->_value = NEW_VAL)
#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
((ATOMIC_VAL)->_value)
#endif
/* Standardized shortcuts. */
#define _Py_atomic_store(ATOMIC_VAL, NEW_VAL) \
_Py_atomic_store_explicit((ATOMIC_VAL), (NEW_VAL), _Py_memory_order_seq_cst)
#define _Py_atomic_load(ATOMIC_VAL) \
_Py_atomic_load_explicit((ATOMIC_VAL), _Py_memory_order_seq_cst)
/* Python-local extensions */
#define _Py_atomic_store_relaxed(ATOMIC_VAL, NEW_VAL) \
_Py_atomic_store_explicit((ATOMIC_VAL), (NEW_VAL), _Py_memory_order_relaxed)
#define _Py_atomic_load_relaxed(ATOMIC_VAL) \
_Py_atomic_load_explicit((ATOMIC_VAL), _Py_memory_order_relaxed)
#ifdef __cplusplus
}
#endif
#endif /* Py_ATOMIC_H */

View File

@@ -0,0 +1,94 @@
/* Atomic functions: similar to pycore_atomic.h, but don't need
to declare variables as atomic.
Py_ssize_t type:
* value = _Py_atomic_size_get(&var)
* _Py_atomic_size_set(&var, value)
Use sequentially-consistent ordering (__ATOMIC_SEQ_CST memory order):
enforce total ordering with all other atomic functions.
*/
#ifndef Py_ATOMIC_FUNC_H
#define Py_ATOMIC_FUNC_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#if defined(_MSC_VER)
# include <intrin.h> // _InterlockedExchange()
#endif
// Use builtin atomic operations in GCC >= 4.7 and clang
#ifdef HAVE_BUILTIN_ATOMIC
static inline Py_ssize_t _Py_atomic_size_get(Py_ssize_t *var)
{
return __atomic_load_n(var, __ATOMIC_SEQ_CST);
}
static inline void _Py_atomic_size_set(Py_ssize_t *var, Py_ssize_t value)
{
__atomic_store_n(var, value, __ATOMIC_SEQ_CST);
}
#elif defined(_MSC_VER)
static inline Py_ssize_t _Py_atomic_size_get(Py_ssize_t *var)
{
#if SIZEOF_VOID_P == 8
Py_BUILD_ASSERT(sizeof(__int64) == sizeof(*var));
volatile __int64 *volatile_var = (volatile __int64 *)var;
__int64 old;
do {
old = *volatile_var;
} while(_InterlockedCompareExchange64(volatile_var, old, old) != old);
#else
Py_BUILD_ASSERT(sizeof(long) == sizeof(*var));
volatile long *volatile_var = (volatile long *)var;
long old;
do {
old = *volatile_var;
} while(_InterlockedCompareExchange(volatile_var, old, old) != old);
#endif
return old;
}
static inline void _Py_atomic_size_set(Py_ssize_t *var, Py_ssize_t value)
{
#if SIZEOF_VOID_P == 8
Py_BUILD_ASSERT(sizeof(__int64) == sizeof(*var));
volatile __int64 *volatile_var = (volatile __int64 *)var;
_InterlockedExchange64(volatile_var, value);
#else
Py_BUILD_ASSERT(sizeof(long) == sizeof(*var));
volatile long *volatile_var = (volatile long *)var;
_InterlockedExchange(volatile_var, value);
#endif
}
#else
// Fallback implementation using volatile
static inline Py_ssize_t _Py_atomic_size_get(Py_ssize_t *var)
{
volatile Py_ssize_t *volatile_var = (volatile Py_ssize_t *)var;
return *volatile_var;
}
static inline void _Py_atomic_size_set(Py_ssize_t *var, Py_ssize_t value)
{
volatile Py_ssize_t *volatile_var = (volatile Py_ssize_t *)var;
*volatile_var = value;
}
#endif
#ifdef __cplusplus
}
#endif
#endif /* Py_ATOMIC_FUNC_H */

View File

@@ -0,0 +1,186 @@
/* Bit and bytes utilities.
Bytes swap functions, reverse order of bytes:
- _Py_bswap16(uint16_t)
- _Py_bswap32(uint32_t)
- _Py_bswap64(uint64_t)
*/
#ifndef Py_INTERNAL_BITUTILS_H
#define Py_INTERNAL_BITUTILS_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#if defined(__GNUC__) \
&& ((__GNUC__ >= 5) || (__GNUC__ == 4) && (__GNUC_MINOR__ >= 8))
/* __builtin_bswap16() is available since GCC 4.8,
__builtin_bswap32() is available since GCC 4.3,
__builtin_bswap64() is available since GCC 4.3. */
# define _PY_HAVE_BUILTIN_BSWAP
#endif
#ifdef _MSC_VER
/* Get _byteswap_ushort(), _byteswap_ulong(), _byteswap_uint64() */
# include <intrin.h>
#endif
static inline uint16_t
_Py_bswap16(uint16_t word)
{
#if defined(_PY_HAVE_BUILTIN_BSWAP) || _Py__has_builtin(__builtin_bswap16)
return __builtin_bswap16(word);
#elif defined(_MSC_VER)
Py_BUILD_ASSERT(sizeof(word) == sizeof(unsigned short));
return _byteswap_ushort(word);
#else
// Portable implementation which doesn't rely on circular bit shift
return ( ((word & UINT16_C(0x00FF)) << 8)
| ((word & UINT16_C(0xFF00)) >> 8));
#endif
}
static inline uint32_t
_Py_bswap32(uint32_t word)
{
#if defined(_PY_HAVE_BUILTIN_BSWAP) || _Py__has_builtin(__builtin_bswap32)
return __builtin_bswap32(word);
#elif defined(_MSC_VER)
Py_BUILD_ASSERT(sizeof(word) == sizeof(unsigned long));
return _byteswap_ulong(word);
#else
// Portable implementation which doesn't rely on circular bit shift
return ( ((word & UINT32_C(0x000000FF)) << 24)
| ((word & UINT32_C(0x0000FF00)) << 8)
| ((word & UINT32_C(0x00FF0000)) >> 8)
| ((word & UINT32_C(0xFF000000)) >> 24));
#endif
}
static inline uint64_t
_Py_bswap64(uint64_t word)
{
#if defined(_PY_HAVE_BUILTIN_BSWAP) || _Py__has_builtin(__builtin_bswap64)
return __builtin_bswap64(word);
#elif defined(_MSC_VER)
return _byteswap_uint64(word);
#else
// Portable implementation which doesn't rely on circular bit shift
return ( ((word & UINT64_C(0x00000000000000FF)) << 56)
| ((word & UINT64_C(0x000000000000FF00)) << 40)
| ((word & UINT64_C(0x0000000000FF0000)) << 24)
| ((word & UINT64_C(0x00000000FF000000)) << 8)
| ((word & UINT64_C(0x000000FF00000000)) >> 8)
| ((word & UINT64_C(0x0000FF0000000000)) >> 24)
| ((word & UINT64_C(0x00FF000000000000)) >> 40)
| ((word & UINT64_C(0xFF00000000000000)) >> 56));
#endif
}
// Population count: count the number of 1's in 'x'
// (number of bits set to 1), also known as the hamming weight.
//
// Implementation note. CPUID is not used, to test if x86 POPCNT instruction
// can be used, to keep the implementation simple. For example, Visual Studio
// __popcnt() is not used this reason. The clang and GCC builtin function can
// use the x86 POPCNT instruction if the target architecture has SSE4a or
// newer.
static inline int
_Py_popcount32(uint32_t x)
{
#if (defined(__clang__) || defined(__GNUC__))
#if SIZEOF_INT >= 4
Py_BUILD_ASSERT(sizeof(x) <= sizeof(unsigned int));
return __builtin_popcount(x);
#else
// The C standard guarantees that unsigned long will always be big enough
// to hold a uint32_t value without losing information.
Py_BUILD_ASSERT(sizeof(x) <= sizeof(unsigned long));
return __builtin_popcountl(x);
#endif
#else
// 32-bit SWAR (SIMD Within A Register) popcount
// Binary: 0 1 0 1 ...
const uint32_t M1 = 0x55555555;
// Binary: 00 11 00 11. ..
const uint32_t M2 = 0x33333333;
// Binary: 0000 1111 0000 1111 ...
const uint32_t M4 = 0x0F0F0F0F;
// Put count of each 2 bits into those 2 bits
x = x - ((x >> 1) & M1);
// Put count of each 4 bits into those 4 bits
x = (x & M2) + ((x >> 2) & M2);
// Put count of each 8 bits into those 8 bits
x = (x + (x >> 4)) & M4;
// Sum of the 4 byte counts.
// Take care when considering changes to the next line. Portability and
// correctness are delicate here, thanks to C's "integer promotions" (C99
// §6.3.1.1p2). On machines where the `int` type has width greater than 32
// bits, `x` will be promoted to an `int`, and following C's "usual
// arithmetic conversions" (C99 §6.3.1.8), the multiplication will be
// performed as a multiplication of two `unsigned int` operands. In this
// case it's critical that we cast back to `uint32_t` in order to keep only
// the least significant 32 bits. On machines where the `int` type has
// width no greater than 32, the multiplication is of two 32-bit unsigned
// integer types, and the (uint32_t) cast is a no-op. In both cases, we
// avoid the risk of undefined behaviour due to overflow of a
// multiplication of signed integer types.
return (uint32_t)(x * 0x01010101U) >> 24;
#endif
}
// Return the index of the most significant 1 bit in 'x'. This is the smallest
// integer k such that x < 2**k. Equivalent to floor(log2(x)) + 1 for x != 0.
static inline int
_Py_bit_length(unsigned long x)
{
#if (defined(__clang__) || defined(__GNUC__))
if (x != 0) {
// __builtin_clzl() is available since GCC 3.4.
// Undefined behavior for x == 0.
return (int)sizeof(unsigned long) * 8 - __builtin_clzl(x);
}
else {
return 0;
}
#elif defined(_MSC_VER)
// _BitScanReverse() is documented to search 32 bits.
Py_BUILD_ASSERT(sizeof(unsigned long) <= 4);
unsigned long msb;
if (_BitScanReverse(&msb, x)) {
return (int)msb + 1;
}
else {
return 0;
}
#else
const int BIT_LENGTH_TABLE[32] = {
0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
};
int msb = 0;
while (x >= 32) {
msb += 6;
x >>= 6;
}
msb += BIT_LENGTH_TABLE[x];
return msb;
#endif
}
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_BITUTILS_H */

View File

@@ -0,0 +1,317 @@
/*
_BlocksOutputBuffer is used to maintain an output buffer
that has unpredictable size. Suitable for compression/decompression
API (bz2/lzma/zlib) that has stream->next_out and stream->avail_out:
stream->next_out: point to the next output position.
stream->avail_out: the number of available bytes left in the buffer.
It maintains a list of bytes object, so there is no overhead of resizing
the buffer.
Usage:
1, Initialize the struct instance like this:
_BlocksOutputBuffer buffer = {.list = NULL};
Set .list to NULL for _BlocksOutputBuffer_OnError()
2, Initialize the buffer use one of these functions:
_BlocksOutputBuffer_InitAndGrow()
_BlocksOutputBuffer_InitWithSize()
3, If (avail_out == 0), grow the buffer:
_BlocksOutputBuffer_Grow()
4, Get the current outputted data size:
_BlocksOutputBuffer_GetDataSize()
5, Finish the buffer, and return a bytes object:
_BlocksOutputBuffer_Finish()
6, Clean up the buffer when an error occurred:
_BlocksOutputBuffer_OnError()
*/
#ifndef Py_INTERNAL_BLOCKS_OUTPUT_BUFFER_H
#define Py_INTERNAL_BLOCKS_OUTPUT_BUFFER_H
#ifdef __cplusplus
extern "C" {
#endif
#include "Python.h"
typedef struct {
// List of bytes objects
PyObject *list;
// Number of whole allocated size
Py_ssize_t allocated;
// Max length of the buffer, negative number means unlimited length.
Py_ssize_t max_length;
} _BlocksOutputBuffer;
static const char unable_allocate_msg[] = "Unable to allocate output buffer.";
/* In 32-bit build, the max block size should <= INT32_MAX. */
#define OUTPUT_BUFFER_MAX_BLOCK_SIZE (256*1024*1024)
/* Block size sequence */
#define KB (1024)
#define MB (1024*1024)
static const Py_ssize_t BUFFER_BLOCK_SIZE[] =
{ 32*KB, 64*KB, 256*KB, 1*MB, 4*MB, 8*MB, 16*MB, 16*MB,
32*MB, 32*MB, 32*MB, 32*MB, 64*MB, 64*MB, 128*MB, 128*MB,
OUTPUT_BUFFER_MAX_BLOCK_SIZE };
#undef KB
#undef MB
/* According to the block sizes defined by BUFFER_BLOCK_SIZE, the whole
allocated size growth step is:
1 32 KB +32 KB
2 96 KB +64 KB
3 352 KB +256 KB
4 1.34 MB +1 MB
5 5.34 MB +4 MB
6 13.34 MB +8 MB
7 29.34 MB +16 MB
8 45.34 MB +16 MB
9 77.34 MB +32 MB
10 109.34 MB +32 MB
11 141.34 MB +32 MB
12 173.34 MB +32 MB
13 237.34 MB +64 MB
14 301.34 MB +64 MB
15 429.34 MB +128 MB
16 557.34 MB +128 MB
17 813.34 MB +256 MB
18 1069.34 MB +256 MB
19 1325.34 MB +256 MB
20 1581.34 MB +256 MB
21 1837.34 MB +256 MB
22 2093.34 MB +256 MB
...
*/
/* Initialize the buffer, and grow the buffer.
max_length: Max length of the buffer, -1 for unlimited length.
On success, return allocated size (>=0)
On failure, return -1
*/
static inline Py_ssize_t
_BlocksOutputBuffer_InitAndGrow(_BlocksOutputBuffer *buffer,
const Py_ssize_t max_length,
void **next_out)
{
PyObject *b;
Py_ssize_t block_size;
// ensure .list was set to NULL
assert(buffer->list == NULL);
// get block size
if (0 <= max_length && max_length < BUFFER_BLOCK_SIZE[0]) {
block_size = max_length;
} else {
block_size = BUFFER_BLOCK_SIZE[0];
}
// the first block
b = PyBytes_FromStringAndSize(NULL, block_size);
if (b == NULL) {
return -1;
}
// create the list
buffer->list = PyList_New(1);
if (buffer->list == NULL) {
Py_DECREF(b);
return -1;
}
PyList_SET_ITEM(buffer->list, 0, b);
// set variables
buffer->allocated = block_size;
buffer->max_length = max_length;
*next_out = PyBytes_AS_STRING(b);
return block_size;
}
/* Initialize the buffer, with an initial size.
Check block size limit in the outer wrapper function. For example, some libs
accept UINT32_MAX as the maximum block size, then init_size should <= it.
On success, return allocated size (>=0)
On failure, return -1
*/
static inline Py_ssize_t
_BlocksOutputBuffer_InitWithSize(_BlocksOutputBuffer *buffer,
const Py_ssize_t init_size,
void **next_out)
{
PyObject *b;
// ensure .list was set to NULL
assert(buffer->list == NULL);
// the first block
b = PyBytes_FromStringAndSize(NULL, init_size);
if (b == NULL) {
PyErr_SetString(PyExc_MemoryError, unable_allocate_msg);
return -1;
}
// create the list
buffer->list = PyList_New(1);
if (buffer->list == NULL) {
Py_DECREF(b);
return -1;
}
PyList_SET_ITEM(buffer->list, 0, b);
// set variables
buffer->allocated = init_size;
buffer->max_length = -1;
*next_out = PyBytes_AS_STRING(b);
return init_size;
}
/* Grow the buffer. The avail_out must be 0, please check it before calling.
On success, return allocated size (>=0)
On failure, return -1
*/
static inline Py_ssize_t
_BlocksOutputBuffer_Grow(_BlocksOutputBuffer *buffer,
void **next_out,
const Py_ssize_t avail_out)
{
PyObject *b;
const Py_ssize_t list_len = Py_SIZE(buffer->list);
Py_ssize_t block_size;
// ensure no gaps in the data
if (avail_out != 0) {
PyErr_SetString(PyExc_SystemError,
"avail_out is non-zero in _BlocksOutputBuffer_Grow().");
return -1;
}
// get block size
if (list_len < (Py_ssize_t) Py_ARRAY_LENGTH(BUFFER_BLOCK_SIZE)) {
block_size = BUFFER_BLOCK_SIZE[list_len];
} else {
block_size = BUFFER_BLOCK_SIZE[Py_ARRAY_LENGTH(BUFFER_BLOCK_SIZE) - 1];
}
// check max_length
if (buffer->max_length >= 0) {
// if (rest == 0), should not grow the buffer.
Py_ssize_t rest = buffer->max_length - buffer->allocated;
assert(rest > 0);
// block_size of the last block
if (block_size > rest) {
block_size = rest;
}
}
// check buffer->allocated overflow
if (block_size > PY_SSIZE_T_MAX - buffer->allocated) {
PyErr_SetString(PyExc_MemoryError, unable_allocate_msg);
return -1;
}
// create the block
b = PyBytes_FromStringAndSize(NULL, block_size);
if (b == NULL) {
PyErr_SetString(PyExc_MemoryError, unable_allocate_msg);
return -1;
}
if (PyList_Append(buffer->list, b) < 0) {
Py_DECREF(b);
return -1;
}
Py_DECREF(b);
// set variables
buffer->allocated += block_size;
*next_out = PyBytes_AS_STRING(b);
return block_size;
}
/* Return the current outputted data size. */
static inline Py_ssize_t
_BlocksOutputBuffer_GetDataSize(_BlocksOutputBuffer *buffer,
const Py_ssize_t avail_out)
{
return buffer->allocated - avail_out;
}
/* Finish the buffer.
Return a bytes object on success
Return NULL on failure
*/
static inline PyObject *
_BlocksOutputBuffer_Finish(_BlocksOutputBuffer *buffer,
const Py_ssize_t avail_out)
{
PyObject *result, *block;
const Py_ssize_t list_len = Py_SIZE(buffer->list);
// fast path for single block
if ((list_len == 1 && avail_out == 0) ||
(list_len == 2 && Py_SIZE(PyList_GET_ITEM(buffer->list, 1)) == avail_out))
{
block = PyList_GET_ITEM(buffer->list, 0);
Py_INCREF(block);
Py_CLEAR(buffer->list);
return block;
}
// final bytes object
result = PyBytes_FromStringAndSize(NULL, buffer->allocated - avail_out);
if (result == NULL) {
PyErr_SetString(PyExc_MemoryError, unable_allocate_msg);
return NULL;
}
// memory copy
if (list_len > 0) {
char *posi = PyBytes_AS_STRING(result);
// blocks except the last one
Py_ssize_t i = 0;
for (; i < list_len-1; i++) {
block = PyList_GET_ITEM(buffer->list, i);
memcpy(posi, PyBytes_AS_STRING(block), Py_SIZE(block));
posi += Py_SIZE(block);
}
// the last block
block = PyList_GET_ITEM(buffer->list, i);
memcpy(posi, PyBytes_AS_STRING(block), Py_SIZE(block) - avail_out);
} else {
assert(Py_SIZE(result) == 0);
}
Py_CLEAR(buffer->list);
return result;
}
/* Clean up the buffer when an error occurred. */
static inline void
_BlocksOutputBuffer_OnError(_BlocksOutputBuffer *buffer)
{
Py_CLEAR(buffer->list);
}
#ifdef __cplusplus
}
#endif
#endif /* Py_INTERNAL_BLOCKS_OUTPUT_BUFFER_H */

View File

@@ -0,0 +1,73 @@
#ifndef Py_LIMITED_API
#ifndef Py_BYTES_CTYPE_H
#define Py_BYTES_CTYPE_H
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
/*
* The internal implementation behind PyBytes (bytes) and PyByteArray (bytearray)
* methods of the given names, they operate on ASCII byte strings.
*/
extern PyObject* _Py_bytes_isspace(const char *cptr, Py_ssize_t len);
extern PyObject* _Py_bytes_isalpha(const char *cptr, Py_ssize_t len);
extern PyObject* _Py_bytes_isalnum(const char *cptr, Py_ssize_t len);
extern PyObject* _Py_bytes_isascii(const char *cptr, Py_ssize_t len);
extern PyObject* _Py_bytes_isdigit(const char *cptr, Py_ssize_t len);
extern PyObject* _Py_bytes_islower(const char *cptr, Py_ssize_t len);
extern PyObject* _Py_bytes_isupper(const char *cptr, Py_ssize_t len);
extern PyObject* _Py_bytes_istitle(const char *cptr, Py_ssize_t len);
/* These store their len sized answer in the given preallocated *result arg. */
extern void _Py_bytes_lower(char *result, const char *cptr, Py_ssize_t len);
extern void _Py_bytes_upper(char *result, const char *cptr, Py_ssize_t len);
extern void _Py_bytes_title(char *result, const char *s, Py_ssize_t len);
extern void _Py_bytes_capitalize(char *result, const char *s, Py_ssize_t len);
extern void _Py_bytes_swapcase(char *result, const char *s, Py_ssize_t len);
extern PyObject *_Py_bytes_find(const char *str, Py_ssize_t len, PyObject *args);
extern PyObject *_Py_bytes_index(const char *str, Py_ssize_t len, PyObject *args);
extern PyObject *_Py_bytes_rfind(const char *str, Py_ssize_t len, PyObject *args);
extern PyObject *_Py_bytes_rindex(const char *str, Py_ssize_t len, PyObject *args);
extern PyObject *_Py_bytes_count(const char *str, Py_ssize_t len, PyObject *args);
extern int _Py_bytes_contains(const char *str, Py_ssize_t len, PyObject *arg);
extern PyObject *_Py_bytes_startswith(const char *str, Py_ssize_t len, PyObject *args);
extern PyObject *_Py_bytes_endswith(const char *str, Py_ssize_t len, PyObject *args);
/* The maketrans() static method. */
extern PyObject* _Py_bytes_maketrans(Py_buffer *frm, Py_buffer *to);
/* Shared __doc__ strings. */
extern const char _Py_isspace__doc__[];
extern const char _Py_isalpha__doc__[];
extern const char _Py_isalnum__doc__[];
extern const char _Py_isascii__doc__[];
extern const char _Py_isdigit__doc__[];
extern const char _Py_islower__doc__[];
extern const char _Py_isupper__doc__[];
extern const char _Py_istitle__doc__[];
extern const char _Py_lower__doc__[];
extern const char _Py_upper__doc__[];
extern const char _Py_title__doc__[];
extern const char _Py_capitalize__doc__[];
extern const char _Py_swapcase__doc__[];
extern const char _Py_count__doc__[];
extern const char _Py_find__doc__[];
extern const char _Py_index__doc__[];
extern const char _Py_rfind__doc__[];
extern const char _Py_rindex__doc__[];
extern const char _Py_startswith__doc__[];
extern const char _Py_endswith__doc__[];
extern const char _Py_maketrans__doc__[];
extern const char _Py_expandtabs__doc__[];
extern const char _Py_ljust__doc__[];
extern const char _Py_rjust__doc__[];
extern const char _Py_center__doc__[];
extern const char _Py_zfill__doc__[];
/* this is needed because some docs are shared from the .o, not static */
#define PyDoc_STRVAR_shared(name,str) const char name[] = PyDoc_STR(str)
#endif /* !Py_BYTES_CTYPE_H */
#endif /* !Py_LIMITED_API */

View File

@@ -0,0 +1,47 @@
#ifndef Py_INTERNAL_BYTESOBJECT_H
#define Py_INTERNAL_BYTESOBJECT_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
/* Substring Search.
Returns the index of the first occurrence of
a substring ("needle") in a larger text ("haystack").
If the needle is not found, return -1.
If the needle is found, add offset to the index.
*/
PyAPI_FUNC(Py_ssize_t)
_PyBytes_Find(const char *haystack, Py_ssize_t len_haystack,
const char *needle, Py_ssize_t len_needle,
Py_ssize_t offset);
/* Same as above, but search right-to-left */
PyAPI_FUNC(Py_ssize_t)
_PyBytes_ReverseFind(const char *haystack, Py_ssize_t len_haystack,
const char *needle, Py_ssize_t len_needle,
Py_ssize_t offset);
/** Helper function to implement the repeat and inplace repeat methods on a buffer
*
* len_dest is assumed to be an integer multiple of len_src.
* If src equals dest, then assume the operation is inplace.
*
* This method repeately doubles the number of bytes copied to reduce
* the number of invocations of memcpy.
*/
PyAPI_FUNC(void)
_PyBytes_Repeat(char* dest, Py_ssize_t len_dest,
const char* src, Py_ssize_t len_src);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_BYTESOBJECT_H */

View File

@@ -0,0 +1,133 @@
#ifndef Py_INTERNAL_CALL_H
#define Py_INTERNAL_CALL_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#include "pycore_pystate.h" // _PyThreadState_GET()
PyAPI_FUNC(PyObject *) _PyObject_Call_Prepend(
PyThreadState *tstate,
PyObject *callable,
PyObject *obj,
PyObject *args,
PyObject *kwargs);
PyAPI_FUNC(PyObject *) _PyObject_FastCallDictTstate(
PyThreadState *tstate,
PyObject *callable,
PyObject *const *args,
size_t nargsf,
PyObject *kwargs);
PyAPI_FUNC(PyObject *) _PyObject_Call(
PyThreadState *tstate,
PyObject *callable,
PyObject *args,
PyObject *kwargs);
extern PyObject * _PyObject_CallMethodFormat(
PyThreadState *tstate, PyObject *callable, const char *format, ...);
// Static inline variant of public PyVectorcall_Function().
static inline vectorcallfunc
_PyVectorcall_FunctionInline(PyObject *callable)
{
assert(callable != NULL);
PyTypeObject *tp = Py_TYPE(callable);
if (!PyType_HasFeature(tp, Py_TPFLAGS_HAVE_VECTORCALL)) {
return NULL;
}
assert(PyCallable_Check(callable));
Py_ssize_t offset = tp->tp_vectorcall_offset;
assert(offset > 0);
vectorcallfunc ptr;
memcpy(&ptr, (char *) callable + offset, sizeof(ptr));
return ptr;
}
/* Call the callable object 'callable' with the "vectorcall" calling
convention.
args is a C array for positional arguments.
nargsf is the number of positional arguments plus optionally the flag
PY_VECTORCALL_ARGUMENTS_OFFSET which means that the caller is allowed to
modify args[-1].
kwnames is a tuple of keyword names. The values of the keyword arguments
are stored in "args" after the positional arguments (note that the number
of keyword arguments does not change nargsf). kwnames can also be NULL if
there are no keyword arguments.
keywords must only contain strings and all keys must be unique.
Return the result on success. Raise an exception and return NULL on
error. */
static inline PyObject *
_PyObject_VectorcallTstate(PyThreadState *tstate, PyObject *callable,
PyObject *const *args, size_t nargsf,
PyObject *kwnames)
{
vectorcallfunc func;
PyObject *res;
assert(kwnames == NULL || PyTuple_Check(kwnames));
assert(args != NULL || PyVectorcall_NARGS(nargsf) == 0);
func = _PyVectorcall_FunctionInline(callable);
if (func == NULL) {
Py_ssize_t nargs = PyVectorcall_NARGS(nargsf);
return _PyObject_MakeTpCall(tstate, callable, args, nargs, kwnames);
}
res = func(callable, args, nargsf, kwnames);
return _Py_CheckFunctionResult(tstate, callable, res, NULL);
}
static inline PyObject *
_PyObject_CallNoArgsTstate(PyThreadState *tstate, PyObject *func) {
return _PyObject_VectorcallTstate(tstate, func, NULL, 0, NULL);
}
// Private static inline function variant of public PyObject_CallNoArgs()
static inline PyObject *
_PyObject_CallNoArgs(PyObject *func) {
EVAL_CALL_STAT_INC_IF_FUNCTION(EVAL_CALL_API, func);
PyThreadState *tstate = _PyThreadState_GET();
return _PyObject_VectorcallTstate(tstate, func, NULL, 0, NULL);
}
static inline PyObject *
_PyObject_FastCallTstate(PyThreadState *tstate, PyObject *func, PyObject *const *args, Py_ssize_t nargs)
{
EVAL_CALL_STAT_INC_IF_FUNCTION(EVAL_CALL_API, func);
return _PyObject_VectorcallTstate(tstate, func, args, (size_t)nargs, NULL);
}
PyObject *const *
_PyStack_UnpackDict(PyThreadState *tstate,
PyObject *const *args, Py_ssize_t nargs,
PyObject *kwargs, PyObject **p_kwnames);
void
_PyStack_UnpackDict_Free(PyObject *const *stack, Py_ssize_t nargs,
PyObject *kwnames);
void _PyStack_UnpackDict_FreeNoDecRef(PyObject *const *stack, PyObject *kwnames);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_CALL_H */

View File

@@ -0,0 +1,163 @@
#ifndef Py_INTERNAL_CEVAL_H
#define Py_INTERNAL_CEVAL_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
/* Forward declarations */
struct pyruntimestate;
struct _ceval_runtime_state;
#ifndef Py_DEFAULT_RECURSION_LIMIT
# define Py_DEFAULT_RECURSION_LIMIT 1000
#endif
#include "pycore_interp.h" // PyInterpreterState.eval_frame
#include "pycore_pystate.h" // _PyThreadState_GET()
extern void _Py_FinishPendingCalls(PyThreadState *tstate);
extern void _PyEval_InitState(PyInterpreterState *, PyThread_type_lock);
extern void _PyEval_FiniState(struct _ceval_state *ceval);
PyAPI_FUNC(void) _PyEval_SignalReceived(PyInterpreterState *interp);
PyAPI_FUNC(int) _PyEval_AddPendingCall(
PyInterpreterState *interp,
int (*func)(void *),
void *arg,
int mainthreadonly);
PyAPI_FUNC(void) _PyEval_SignalAsyncExc(PyInterpreterState *interp);
#ifdef HAVE_FORK
extern PyStatus _PyEval_ReInitThreads(PyThreadState *tstate);
#endif
// Used by sys.call_tracing()
extern PyObject* _PyEval_CallTracing(PyObject *func, PyObject *args);
// Used by sys.get_asyncgen_hooks()
extern PyObject* _PyEval_GetAsyncGenFirstiter(void);
extern PyObject* _PyEval_GetAsyncGenFinalizer(void);
// Used by sys.set_asyncgen_hooks()
extern int _PyEval_SetAsyncGenFirstiter(PyObject *);
extern int _PyEval_SetAsyncGenFinalizer(PyObject *);
// Used by sys.get_coroutine_origin_tracking_depth()
// and sys.set_coroutine_origin_tracking_depth()
extern int _PyEval_GetCoroutineOriginTrackingDepth(void);
extern int _PyEval_SetCoroutineOriginTrackingDepth(int depth);
extern void _PyEval_Fini(void);
extern PyObject* _PyEval_GetBuiltins(PyThreadState *tstate);
extern PyObject* _PyEval_BuiltinsFromGlobals(
PyThreadState *tstate,
PyObject *globals);
// Trampoline API
typedef struct {
// Callback to initialize the trampoline state
void* (*init_state)(void);
// Callback to register every trampoline being created
void (*write_state)(void* state, const void *code_addr,
unsigned int code_size, PyCodeObject* code);
// Callback to free the trampoline state
int (*free_state)(void* state);
} _PyPerf_Callbacks;
extern int _PyPerfTrampoline_SetCallbacks(_PyPerf_Callbacks *);
extern void _PyPerfTrampoline_GetCallbacks(_PyPerf_Callbacks *);
extern int _PyPerfTrampoline_Init(int activate);
extern int _PyPerfTrampoline_Fini(void);
extern int _PyIsPerfTrampolineActive(void);
extern PyStatus _PyPerfTrampoline_AfterFork_Child(void);
#ifdef PY_HAVE_PERF_TRAMPOLINE
extern _PyPerf_Callbacks _Py_perfmap_callbacks;
#endif
static inline PyObject*
_PyEval_EvalFrame(PyThreadState *tstate, struct _PyInterpreterFrame *frame, int throwflag)
{
EVAL_CALL_STAT_INC(EVAL_CALL_TOTAL);
if (tstate->interp->eval_frame == NULL) {
return _PyEval_EvalFrameDefault(tstate, frame, throwflag);
}
return tstate->interp->eval_frame(tstate, frame, throwflag);
}
extern PyObject*
_PyEval_Vector(PyThreadState *tstate,
PyFunctionObject *func, PyObject *locals,
PyObject* const* args, size_t argcount,
PyObject *kwnames);
extern int _PyEval_ThreadsInitialized(void);
extern PyStatus _PyEval_InitGIL(PyThreadState *tstate, int own_gil);
extern void _PyEval_FiniGIL(PyInterpreterState *interp);
extern void _PyEval_AcquireLock(PyThreadState *tstate);
extern void _PyEval_ReleaseLock(PyInterpreterState *, PyThreadState *);
extern PyThreadState * _PyThreadState_SwapNoGIL(PyThreadState *);
extern void _PyEval_DeactivateOpCache(void);
/* --- _Py_EnterRecursiveCall() ----------------------------------------- */
#ifdef USE_STACKCHECK
/* With USE_STACKCHECK macro defined, trigger stack checks in
_Py_CheckRecursiveCall() on every 64th call to _Py_EnterRecursiveCall. */
static inline int _Py_MakeRecCheck(PyThreadState *tstate) {
return (tstate->c_recursion_remaining-- <= 0
|| (tstate->c_recursion_remaining & 63) == 0);
}
#else
static inline int _Py_MakeRecCheck(PyThreadState *tstate) {
return tstate->c_recursion_remaining-- <= 0;
}
#endif
PyAPI_FUNC(int) _Py_CheckRecursiveCall(
PyThreadState *tstate,
const char *where);
int _Py_CheckRecursiveCallPy(
PyThreadState *tstate);
static inline int _Py_EnterRecursiveCallTstate(PyThreadState *tstate,
const char *where) {
return (_Py_MakeRecCheck(tstate) && _Py_CheckRecursiveCall(tstate, where));
}
static inline int _Py_EnterRecursiveCall(const char *where) {
PyThreadState *tstate = _PyThreadState_GET();
return _Py_EnterRecursiveCallTstate(tstate, where);
}
static inline void _Py_LeaveRecursiveCallTstate(PyThreadState *tstate) {
tstate->c_recursion_remaining++;
}
static inline void _Py_LeaveRecursiveCall(void) {
PyThreadState *tstate = _PyThreadState_GET();
_Py_LeaveRecursiveCallTstate(tstate);
}
extern struct _PyInterpreterFrame* _PyEval_GetFrame(void);
extern PyObject* _Py_MakeCoro(PyFunctionObject *func);
extern int _Py_HandlePending(PyThreadState *tstate);
extern PyObject * _PyEval_GetFrameLocals(void);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_CEVAL_H */

View File

@@ -0,0 +1,103 @@
#ifndef Py_INTERNAL_CEVAL_STATE_H
#define Py_INTERNAL_CEVAL_STATE_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#include "pycore_atomic.h" /* _Py_atomic_address */
#include "pycore_gil.h" // struct _gil_runtime_state
struct _pending_calls {
int busy;
PyThread_type_lock lock;
/* Request for running pending calls. */
_Py_atomic_int calls_to_do;
/* Request for looking at the `async_exc` field of the current
thread state.
Guarded by the GIL. */
int async_exc;
#define NPENDINGCALLS 32
struct _pending_call {
int (*func)(void *);
void *arg;
} calls[NPENDINGCALLS];
int first;
int last;
};
typedef enum {
PERF_STATUS_FAILED = -1, // Perf trampoline is in an invalid state
PERF_STATUS_NO_INIT = 0, // Perf trampoline is not initialized
PERF_STATUS_OK = 1, // Perf trampoline is ready to be executed
} perf_status_t;
#ifdef PY_HAVE_PERF_TRAMPOLINE
struct code_arena_st;
struct trampoline_api_st {
void* (*init_state)(void);
void (*write_state)(void* state, const void *code_addr,
unsigned int code_size, PyCodeObject* code);
int (*free_state)(void* state);
void *state;
};
#endif
struct _ceval_runtime_state {
struct {
#ifdef PY_HAVE_PERF_TRAMPOLINE
perf_status_t status;
Py_ssize_t extra_code_index;
struct code_arena_st *code_arena;
struct trampoline_api_st trampoline_api;
FILE *map_file;
#else
int _not_used;
#endif
} perf;
/* Request for checking signals. It is shared by all interpreters (see
bpo-40513). Any thread of any interpreter can receive a signal, but only
the main thread of the main interpreter can handle signals: see
_Py_ThreadCanHandleSignals(). */
_Py_atomic_int signals_pending;
/* Pending calls to be made only on the main thread. */
struct _pending_calls pending_mainthread;
};
#ifdef PY_HAVE_PERF_TRAMPOLINE
# define _PyEval_RUNTIME_PERF_INIT \
{ \
.status = PERF_STATUS_NO_INIT, \
.extra_code_index = -1, \
}
#else
# define _PyEval_RUNTIME_PERF_INIT {0}
#endif
struct _ceval_state {
/* This single variable consolidates all requests to break out of
the fast path in the eval loop. */
_Py_atomic_int eval_breaker;
/* Request for dropping the GIL */
_Py_atomic_int gil_drop_request;
int recursion_limit;
struct _gil_runtime_state *gil;
int own_gil;
/* The GC is ready to be executed */
_Py_atomic_int gc_scheduled;
struct _pending_calls pending;
};
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_CEVAL_STATE_H */

View File

@@ -0,0 +1,494 @@
#ifndef Py_INTERNAL_CODE_H
#define Py_INTERNAL_CODE_H
#ifdef __cplusplus
extern "C" {
#endif
#define CODE_MAX_WATCHERS 8
/* PEP 659
* Specialization and quickening structs and helper functions
*/
// Inline caches. If you change the number of cache entries for an instruction,
// you must *also* update the number of cache entries in Lib/opcode.py and bump
// the magic number in Lib/importlib/_bootstrap_external.py!
#define CACHE_ENTRIES(cache) (sizeof(cache)/sizeof(_Py_CODEUNIT))
typedef struct {
uint16_t counter;
uint16_t index;
uint16_t module_keys_version;
uint16_t builtin_keys_version;
} _PyLoadGlobalCache;
#define INLINE_CACHE_ENTRIES_LOAD_GLOBAL CACHE_ENTRIES(_PyLoadGlobalCache)
typedef struct {
uint16_t counter;
} _PyBinaryOpCache;
#define INLINE_CACHE_ENTRIES_BINARY_OP CACHE_ENTRIES(_PyBinaryOpCache)
typedef struct {
uint16_t counter;
} _PyUnpackSequenceCache;
#define INLINE_CACHE_ENTRIES_UNPACK_SEQUENCE \
CACHE_ENTRIES(_PyUnpackSequenceCache)
typedef struct {
uint16_t counter;
} _PyCompareOpCache;
#define INLINE_CACHE_ENTRIES_COMPARE_OP CACHE_ENTRIES(_PyCompareOpCache)
typedef struct {
uint16_t counter;
} _PyBinarySubscrCache;
#define INLINE_CACHE_ENTRIES_BINARY_SUBSCR CACHE_ENTRIES(_PyBinarySubscrCache)
typedef struct {
uint16_t counter;
} _PySuperAttrCache;
#define INLINE_CACHE_ENTRIES_LOAD_SUPER_ATTR CACHE_ENTRIES(_PySuperAttrCache)
typedef struct {
uint16_t counter;
uint16_t version[2];
uint16_t index;
} _PyAttrCache;
typedef struct {
uint16_t counter;
uint16_t type_version[2];
uint16_t keys_version[2];
uint16_t descr[4];
} _PyLoadMethodCache;
// MUST be the max(_PyAttrCache, _PyLoadMethodCache)
#define INLINE_CACHE_ENTRIES_LOAD_ATTR CACHE_ENTRIES(_PyLoadMethodCache)
#define INLINE_CACHE_ENTRIES_STORE_ATTR CACHE_ENTRIES(_PyAttrCache)
typedef struct {
uint16_t counter;
uint16_t func_version[2];
} _PyCallCache;
#define INLINE_CACHE_ENTRIES_CALL CACHE_ENTRIES(_PyCallCache)
typedef struct {
uint16_t counter;
} _PyStoreSubscrCache;
#define INLINE_CACHE_ENTRIES_STORE_SUBSCR CACHE_ENTRIES(_PyStoreSubscrCache)
typedef struct {
uint16_t counter;
} _PyForIterCache;
#define INLINE_CACHE_ENTRIES_FOR_ITER CACHE_ENTRIES(_PyForIterCache)
typedef struct {
uint16_t counter;
} _PySendCache;
#define INLINE_CACHE_ENTRIES_SEND CACHE_ENTRIES(_PySendCache)
// Borrowed references to common callables:
struct callable_cache {
PyObject *isinstance;
PyObject *len;
PyObject *list_append;
PyObject *object__getattribute__;
};
/* "Locals plus" for a code object is the set of locals + cell vars +
* free vars. This relates to variable names as well as offsets into
* the "fast locals" storage array of execution frames. The compiler
* builds the list of names, their offsets, and the corresponding
* kind of local.
*
* Those kinds represent the source of the initial value and the
* variable's scope (as related to closures). A "local" is an
* argument or other variable defined in the current scope. A "free"
* variable is one that is defined in an outer scope and comes from
* the function's closure. A "cell" variable is a local that escapes
* into an inner function as part of a closure, and thus must be
* wrapped in a cell. Any "local" can also be a "cell", but the
* "free" kind is mutually exclusive with both.
*/
// Note that these all fit within a byte, as do combinations.
// Later, we will use the smaller numbers to differentiate the different
// kinds of locals (e.g. pos-only arg, varkwargs, local-only).
#define CO_FAST_HIDDEN 0x10
#define CO_FAST_LOCAL 0x20
#define CO_FAST_CELL 0x40
#define CO_FAST_FREE 0x80
typedef unsigned char _PyLocals_Kind;
static inline _PyLocals_Kind
_PyLocals_GetKind(PyObject *kinds, int i)
{
assert(PyBytes_Check(kinds));
assert(0 <= i && i < PyBytes_GET_SIZE(kinds));
char *ptr = PyBytes_AS_STRING(kinds);
return (_PyLocals_Kind)(ptr[i]);
}
static inline void
_PyLocals_SetKind(PyObject *kinds, int i, _PyLocals_Kind kind)
{
assert(PyBytes_Check(kinds));
assert(0 <= i && i < PyBytes_GET_SIZE(kinds));
char *ptr = PyBytes_AS_STRING(kinds);
ptr[i] = (char) kind;
}
struct _PyCodeConstructor {
/* metadata */
PyObject *filename;
PyObject *name;
PyObject *qualname;
int flags;
/* the code */
PyObject *code;
int firstlineno;
PyObject *linetable;
/* used by the code */
PyObject *consts;
PyObject *names;
/* mapping frame offsets to information */
PyObject *localsplusnames; // Tuple of strings
PyObject *localspluskinds; // Bytes object, one byte per variable
/* args (within varnames) */
int argcount;
int posonlyargcount;
// XXX Replace argcount with posorkwargcount (argcount - posonlyargcount).
int kwonlyargcount;
/* needed to create the frame */
int stacksize;
/* used by the eval loop */
PyObject *exceptiontable;
};
// Using an "arguments struct" like this is helpful for maintainability
// in a case such as this with many parameters. It does bear a risk:
// if the struct changes and callers are not updated properly then the
// compiler will not catch problems (like a missing argument). This can
// cause hard-to-debug problems. The risk is mitigated by the use of
// check_code() in codeobject.c. However, we may decide to switch
// back to a regular function signature. Regardless, this approach
// wouldn't be appropriate if this weren't a strictly internal API.
// (See the comments in https://github.com/python/cpython/pull/26258.)
PyAPI_FUNC(int) _PyCode_Validate(struct _PyCodeConstructor *);
PyAPI_FUNC(PyCodeObject *) _PyCode_New(struct _PyCodeConstructor *);
/* Private API */
/* Getters for internal PyCodeObject data. */
extern PyObject* _PyCode_GetVarnames(PyCodeObject *);
extern PyObject* _PyCode_GetCellvars(PyCodeObject *);
extern PyObject* _PyCode_GetFreevars(PyCodeObject *);
extern PyObject* _PyCode_GetCode(PyCodeObject *);
/** API for initializing the line number tables. */
extern int _PyCode_InitAddressRange(PyCodeObject* co, PyCodeAddressRange *bounds);
/** Out of process API for initializing the location table. */
extern void _PyLineTable_InitAddressRange(
const char *linetable,
Py_ssize_t length,
int firstlineno,
PyCodeAddressRange *range);
/** API for traversing the line number table. */
extern int _PyLineTable_NextAddressRange(PyCodeAddressRange *range);
extern int _PyLineTable_PreviousAddressRange(PyCodeAddressRange *range);
/* Specialization functions */
extern void _Py_Specialize_LoadSuperAttr(PyObject *global_super, PyObject *cls,
_Py_CODEUNIT *instr, int load_method);
extern void _Py_Specialize_LoadAttr(PyObject *owner, _Py_CODEUNIT *instr,
PyObject *name);
extern void _Py_Specialize_StoreAttr(PyObject *owner, _Py_CODEUNIT *instr,
PyObject *name);
extern void _Py_Specialize_LoadGlobal(PyObject *globals, PyObject *builtins,
_Py_CODEUNIT *instr, PyObject *name);
extern void _Py_Specialize_BinarySubscr(PyObject *sub, PyObject *container,
_Py_CODEUNIT *instr);
extern void _Py_Specialize_StoreSubscr(PyObject *container, PyObject *sub,
_Py_CODEUNIT *instr);
extern void _Py_Specialize_Call(PyObject *callable, _Py_CODEUNIT *instr,
int nargs, PyObject *kwnames);
extern void _Py_Specialize_BinaryOp(PyObject *lhs, PyObject *rhs, _Py_CODEUNIT *instr,
int oparg, PyObject **locals);
extern void _Py_Specialize_CompareOp(PyObject *lhs, PyObject *rhs,
_Py_CODEUNIT *instr, int oparg);
extern void _Py_Specialize_UnpackSequence(PyObject *seq, _Py_CODEUNIT *instr,
int oparg);
extern void _Py_Specialize_ForIter(PyObject *iter, _Py_CODEUNIT *instr, int oparg);
extern void _Py_Specialize_Send(PyObject *receiver, _Py_CODEUNIT *instr);
/* Finalizer function for static codeobjects used in deepfreeze.py */
extern void _PyStaticCode_Fini(PyCodeObject *co);
/* Function to intern strings of codeobjects and quicken the bytecode */
extern int _PyStaticCode_Init(PyCodeObject *co);
#ifdef Py_STATS
#define STAT_INC(opname, name) do { if (_py_stats) _py_stats->opcode_stats[opname].specialization.name++; } while (0)
#define STAT_DEC(opname, name) do { if (_py_stats) _py_stats->opcode_stats[opname].specialization.name--; } while (0)
#define OPCODE_EXE_INC(opname) do { if (_py_stats) _py_stats->opcode_stats[opname].execution_count++; } while (0)
#define CALL_STAT_INC(name) do { if (_py_stats) _py_stats->call_stats.name++; } while (0)
#define OBJECT_STAT_INC(name) do { if (_py_stats) _py_stats->object_stats.name++; } while (0)
#define OBJECT_STAT_INC_COND(name, cond) \
do { if (_py_stats && cond) _py_stats->object_stats.name++; } while (0)
#define EVAL_CALL_STAT_INC(name) do { if (_py_stats) _py_stats->call_stats.eval_calls[name]++; } while (0)
#define EVAL_CALL_STAT_INC_IF_FUNCTION(name, callable) \
do { if (_py_stats && PyFunction_Check(callable)) _py_stats->call_stats.eval_calls[name]++; } while (0)
// Used by the _opcode extension which is built as a shared library
PyAPI_FUNC(PyObject*) _Py_GetSpecializationStats(void);
#else
#define STAT_INC(opname, name) ((void)0)
#define STAT_DEC(opname, name) ((void)0)
#define OPCODE_EXE_INC(opname) ((void)0)
#define CALL_STAT_INC(name) ((void)0)
#define OBJECT_STAT_INC(name) ((void)0)
#define OBJECT_STAT_INC_COND(name, cond) ((void)0)
#define EVAL_CALL_STAT_INC(name) ((void)0)
#define EVAL_CALL_STAT_INC_IF_FUNCTION(name, callable) ((void)0)
#endif // !Py_STATS
// Utility functions for reading/writing 32/64-bit values in the inline caches.
// Great care should be taken to ensure that these functions remain correct and
// performant! They should compile to just "move" instructions on all supported
// compilers and platforms.
// We use memcpy to let the C compiler handle unaligned accesses and endianness
// issues for us. It also seems to produce better code than manual copying for
// most compilers (see https://blog.regehr.org/archives/959 for more info).
static inline void
write_u32(uint16_t *p, uint32_t val)
{
memcpy(p, &val, sizeof(val));
}
static inline void
write_u64(uint16_t *p, uint64_t val)
{
memcpy(p, &val, sizeof(val));
}
static inline void
write_obj(uint16_t *p, PyObject *val)
{
memcpy(p, &val, sizeof(val));
}
static inline uint16_t
read_u16(uint16_t *p)
{
return *p;
}
static inline uint32_t
read_u32(uint16_t *p)
{
uint32_t val;
memcpy(&val, p, sizeof(val));
return val;
}
static inline uint64_t
read_u64(uint16_t *p)
{
uint64_t val;
memcpy(&val, p, sizeof(val));
return val;
}
static inline PyObject *
read_obj(uint16_t *p)
{
PyObject *val;
memcpy(&val, p, sizeof(val));
return val;
}
/* See Objects/exception_handling_notes.txt for details.
*/
static inline unsigned char *
parse_varint(unsigned char *p, int *result) {
int val = p[0] & 63;
while (p[0] & 64) {
p++;
val = (val << 6) | (p[0] & 63);
}
*result = val;
return p+1;
}
static inline int
write_varint(uint8_t *ptr, unsigned int val)
{
int written = 1;
while (val >= 64) {
*ptr++ = 64 | (val & 63);
val >>= 6;
written++;
}
*ptr = val;
return written;
}
static inline int
write_signed_varint(uint8_t *ptr, int val)
{
if (val < 0) {
val = ((-val)<<1) | 1;
}
else {
val = val << 1;
}
return write_varint(ptr, val);
}
static inline int
write_location_entry_start(uint8_t *ptr, int code, int length)
{
assert((code & 15) == code);
*ptr = 128 | (code << 3) | (length - 1);
return 1;
}
/** Counters
* The first 16-bit value in each inline cache is a counter.
* When counting misses, the counter is treated as a simple unsigned value.
*
* When counting executions until the next specialization attempt,
* exponential backoff is used to reduce the number of specialization failures.
* The high 12 bits store the counter, the low 4 bits store the backoff exponent.
* On a specialization failure, the backoff exponent is incremented and the
* counter set to (2**backoff - 1).
* Backoff == 6 -> starting counter == 63, backoff == 10 -> starting counter == 1023.
*/
/* With a 16-bit counter, we have 12 bits for the counter value, and 4 bits for the backoff */
#define ADAPTIVE_BACKOFF_BITS 4
// A value of 1 means that we attempt to specialize the *second* time each
// instruction is executed. Executing twice is a much better indicator of
// "hotness" than executing once, but additional warmup delays only prevent
// specialization. Most types stabilize by the second execution, too:
#define ADAPTIVE_WARMUP_VALUE 1
#define ADAPTIVE_WARMUP_BACKOFF 1
// A value of 52 means that we attempt to re-specialize after 53 misses (a prime
// number, useful for avoiding artifacts if every nth value is a different type
// or something). Setting the backoff to 0 means that the counter is reset to
// the same state as a warming-up instruction (value == 1, backoff == 1) after
// deoptimization. This isn't strictly necessary, but it is bit easier to reason
// about when thinking about the opcode transitions as a state machine:
#define ADAPTIVE_COOLDOWN_VALUE 52
#define ADAPTIVE_COOLDOWN_BACKOFF 0
#define MAX_BACKOFF_VALUE (16 - ADAPTIVE_BACKOFF_BITS)
static inline uint16_t
adaptive_counter_bits(int value, int backoff) {
return (value << ADAPTIVE_BACKOFF_BITS) |
(backoff & ((1<<ADAPTIVE_BACKOFF_BITS)-1));
}
static inline uint16_t
adaptive_counter_warmup(void) {
return adaptive_counter_bits(ADAPTIVE_WARMUP_VALUE,
ADAPTIVE_WARMUP_BACKOFF);
}
static inline uint16_t
adaptive_counter_cooldown(void) {
return adaptive_counter_bits(ADAPTIVE_COOLDOWN_VALUE,
ADAPTIVE_COOLDOWN_BACKOFF);
}
static inline uint16_t
adaptive_counter_backoff(uint16_t counter) {
unsigned int backoff = counter & ((1<<ADAPTIVE_BACKOFF_BITS)-1);
backoff++;
if (backoff > MAX_BACKOFF_VALUE) {
backoff = MAX_BACKOFF_VALUE;
}
unsigned int value = (1 << backoff) - 1;
return adaptive_counter_bits(value, backoff);
}
/* Line array cache for tracing */
typedef struct _PyShimCodeDef {
const uint8_t *code;
int codelen;
int stacksize;
const char *cname;
} _PyShimCodeDef;
extern PyCodeObject *
_Py_MakeShimCode(const _PyShimCodeDef *code);
extern uint32_t _Py_next_func_version;
/* Comparison bit masks. */
/* Note this evaluates its arguments twice each */
#define COMPARISON_BIT(x, y) (1 << (2 * ((x) >= (y)) + ((x) <= (y))))
/*
* The following bits are chosen so that the value of
* COMPARSION_BIT(left, right)
* masked by the values below will be non-zero if the
* comparison is true, and zero if it is false */
/* This is for values that are unordered, ie. NaN, not types that are unordered, e.g. sets */
#define COMPARISON_UNORDERED 1
#define COMPARISON_LESS_THAN 2
#define COMPARISON_GREATER_THAN 4
#define COMPARISON_EQUALS 8
#define COMPARISON_NOT_EQUALS (COMPARISON_UNORDERED | COMPARISON_LESS_THAN | COMPARISON_GREATER_THAN)
extern int _Py_Instrument(PyCodeObject *co, PyInterpreterState *interp);
extern int _Py_GetBaseOpcode(PyCodeObject *code, int offset);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_CODE_H */

View File

@@ -0,0 +1,118 @@
#ifndef Py_INTERNAL_COMPILE_H
#define Py_INTERNAL_COMPILE_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
struct _arena; // Type defined in pycore_pyarena.h
struct _mod; // Type defined in pycore_ast.h
// Export the symbol for test_peg_generator (built as a library)
PyAPI_FUNC(PyCodeObject*) _PyAST_Compile(
struct _mod *mod,
PyObject *filename,
PyCompilerFlags *flags,
int optimize,
struct _arena *arena);
static const _PyCompilerSrcLocation NO_LOCATION = {-1, -1, -1, -1};
typedef struct {
int optimize;
int ff_features;
int recursion_depth; /* current recursion depth */
int recursion_limit; /* recursion limit */
} _PyASTOptimizeState;
extern int _PyAST_Optimize(
struct _mod *,
struct _arena *arena,
_PyASTOptimizeState *state);
typedef struct {
int h_offset;
int h_startdepth;
int h_preserve_lasti;
} _PyCompile_ExceptHandlerInfo;
typedef struct {
int i_opcode;
int i_oparg;
_PyCompilerSrcLocation i_loc;
_PyCompile_ExceptHandlerInfo i_except_handler_info;
} _PyCompile_Instruction;
typedef struct {
_PyCompile_Instruction *s_instrs;
int s_allocated;
int s_used;
int *s_labelmap; /* label id --> instr offset */
int s_labelmap_size;
int s_next_free_label; /* next free label id */
} _PyCompile_InstructionSequence;
typedef struct {
PyObject *u_name;
PyObject *u_qualname; /* dot-separated qualified name (lazy) */
/* The following fields are dicts that map objects to
the index of them in co_XXX. The index is used as
the argument for opcodes that refer to those collections.
*/
PyObject *u_consts; /* all constants */
PyObject *u_names; /* all names */
PyObject *u_varnames; /* local variables */
PyObject *u_cellvars; /* cell variables */
PyObject *u_freevars; /* free variables */
PyObject *u_fasthidden; /* dict; keys are names that are fast-locals only
temporarily within an inlined comprehension. When
value is True, treat as fast-local. */
Py_ssize_t u_argcount; /* number of arguments for block */
Py_ssize_t u_posonlyargcount; /* number of positional only arguments for block */
Py_ssize_t u_kwonlyargcount; /* number of keyword only arguments for block */
int u_firstlineno; /* the first lineno of the block */
} _PyCompile_CodeUnitMetadata;
/* Utility for a number of growing arrays used in the compiler */
int _PyCompile_EnsureArrayLargeEnough(
int idx,
void **array,
int *alloc,
int default_alloc,
size_t item_size);
int _PyCompile_ConstCacheMergeOne(PyObject *const_cache, PyObject **obj);
int _PyCompile_InstrSize(int opcode, int oparg);
/* Access compiler internals for unit testing */
PyAPI_FUNC(PyObject*) _PyCompile_CodeGen(
PyObject *ast,
PyObject *filename,
PyCompilerFlags *flags,
int optimize,
int compile_mode);
PyAPI_FUNC(PyObject*) _PyCompile_OptimizeCfg(
PyObject *instructions,
PyObject *consts,
int nlocals);
PyAPI_FUNC(PyCodeObject*)
_PyCompile_Assemble(_PyCompile_CodeUnitMetadata *umd, PyObject *filename,
PyObject *instructions);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_COMPILE_H */

View File

@@ -0,0 +1,97 @@
#ifndef Py_INTERNAL_CONDVAR_H
#define Py_INTERNAL_CONDVAR_H
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#ifndef _POSIX_THREADS
/* This means pthreads are not implemented in libc headers, hence the macro
not present in unistd.h. But they still can be implemented as an external
library (e.g. gnu pth in pthread emulation) */
# ifdef HAVE_PTHREAD_H
# include <pthread.h> /* _POSIX_THREADS */
# endif
#endif
#ifdef _POSIX_THREADS
/*
* POSIX support
*/
#define Py_HAVE_CONDVAR
#ifdef HAVE_PTHREAD_H
# include <pthread.h>
#endif
#define PyMUTEX_T pthread_mutex_t
#define PyCOND_T pthread_cond_t
#elif defined(NT_THREADS)
/*
* Windows (XP, 2003 server and later, as well as (hopefully) CE) support
*
* Emulated condition variables ones that work with XP and later, plus
* example native support on VISTA and onwards.
*/
#define Py_HAVE_CONDVAR
/* include windows if it hasn't been done before */
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
/* options */
/* non-emulated condition variables are provided for those that want
* to target Windows Vista. Modify this macro to enable them.
*/
#ifndef _PY_EMULATED_WIN_CV
#define _PY_EMULATED_WIN_CV 1 /* use emulated condition variables */
#endif
/* fall back to emulation if not targeting Vista */
#if !defined NTDDI_VISTA || NTDDI_VERSION < NTDDI_VISTA
#undef _PY_EMULATED_WIN_CV
#define _PY_EMULATED_WIN_CV 1
#endif
#if _PY_EMULATED_WIN_CV
typedef CRITICAL_SECTION PyMUTEX_T;
/* The ConditionVariable object. From XP onwards it is easily emulated
with a Semaphore.
Semaphores are available on Windows XP (2003 server) and later.
We use a Semaphore rather than an auto-reset event, because although
an auto-reset event might appear to solve the lost-wakeup bug (race
condition between releasing the outer lock and waiting) because it
maintains state even though a wait hasn't happened, there is still
a lost wakeup problem if more than one thread are interrupted in the
critical place. A semaphore solves that, because its state is
counted, not Boolean.
Because it is ok to signal a condition variable with no one
waiting, we need to keep track of the number of
waiting threads. Otherwise, the semaphore's state could rise
without bound. This also helps reduce the number of "spurious wakeups"
that would otherwise happen.
*/
typedef struct _PyCOND_T
{
HANDLE sem;
int waiting; /* to allow PyCOND_SIGNAL to be a no-op */
} PyCOND_T;
#else /* !_PY_EMULATED_WIN_CV */
/* Use native Win7 primitives if build target is Win7 or higher */
/* SRWLOCK is faster and better than CriticalSection */
typedef SRWLOCK PyMUTEX_T;
typedef CONDITION_VARIABLE PyCOND_T;
#endif /* _PY_EMULATED_WIN_CV */
#endif /* _POSIX_THREADS, NT_THREADS */
#endif /* Py_INTERNAL_CONDVAR_H */

View File

@@ -0,0 +1,71 @@
#ifndef Py_INTERNAL_CONTEXT_H
#define Py_INTERNAL_CONTEXT_H
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#include "pycore_hamt.h" /* PyHamtObject */
extern PyTypeObject _PyContextTokenMissing_Type;
/* runtime lifecycle */
PyStatus _PyContext_Init(PyInterpreterState *);
void _PyContext_Fini(PyInterpreterState *);
/* other API */
typedef struct {
PyObject_HEAD
} _PyContextTokenMissing;
#ifndef WITH_FREELISTS
// without freelists
# define PyContext_MAXFREELIST 0
#endif
#ifndef PyContext_MAXFREELIST
# define PyContext_MAXFREELIST 255
#endif
struct _Py_context_state {
#if PyContext_MAXFREELIST > 0
// List of free PyContext objects
PyContext *freelist;
int numfree;
#endif
};
struct _pycontextobject {
PyObject_HEAD
PyContext *ctx_prev;
PyHamtObject *ctx_vars;
PyObject *ctx_weakreflist;
int ctx_entered;
};
struct _pycontextvarobject {
PyObject_HEAD
PyObject *var_name;
PyObject *var_default;
PyObject *var_cached;
uint64_t var_cached_tsid;
uint64_t var_cached_tsver;
Py_hash_t var_hash;
};
struct _pycontexttokenobject {
PyObject_HEAD
PyContext *tok_ctx;
PyContextVar *tok_var;
PyObject *tok_oldval;
int tok_used;
};
#endif /* !Py_INTERNAL_CONTEXT_H */

View File

@@ -0,0 +1,26 @@
#ifndef Py_INTERNAL_DESCROBJECT_H
#define Py_INTERNAL_DESCROBJECT_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
typedef struct {
PyObject_HEAD
PyObject *prop_get;
PyObject *prop_set;
PyObject *prop_del;
PyObject *prop_doc;
PyObject *prop_name;
int getter_doc;
} propertyobject;
typedef propertyobject _PyPropertyObject;
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_DESCROBJECT_H */

View File

@@ -0,0 +1,199 @@
#ifndef Py_INTERNAL_DICT_H
#define Py_INTERNAL_DICT_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#include "pycore_dict_state.h"
#include "pycore_runtime.h" // _PyRuntime
/* runtime lifecycle */
extern void _PyDict_Fini(PyInterpreterState *interp);
/* other API */
typedef struct {
/* Cached hash code of me_key. */
Py_hash_t me_hash;
PyObject *me_key;
PyObject *me_value; /* This field is only meaningful for combined tables */
} PyDictKeyEntry;
typedef struct {
PyObject *me_key; /* The key must be Unicode and have hash. */
PyObject *me_value; /* This field is only meaningful for combined tables */
} PyDictUnicodeEntry;
extern PyDictKeysObject *_PyDict_NewKeysForClass(void);
extern PyObject *_PyDict_FromKeys(PyObject *, PyObject *, PyObject *);
/* Gets a version number unique to the current state of the keys of dict, if possible.
* Returns the version number, or zero if it was not possible to get a version number. */
extern uint32_t _PyDictKeys_GetVersionForCurrentState(
PyInterpreterState *interp, PyDictKeysObject *dictkeys);
extern size_t _PyDict_KeysSize(PyDictKeysObject *keys);
/* _Py_dict_lookup() returns index of entry which can be used like DK_ENTRIES(dk)[index].
* -1 when no entry found, -3 when compare raises error.
*/
extern Py_ssize_t _Py_dict_lookup(PyDictObject *mp, PyObject *key, Py_hash_t hash, PyObject **value_addr);
extern Py_ssize_t _PyDict_LookupIndex(PyDictObject *, PyObject *);
extern Py_ssize_t _PyDictKeys_StringLookup(PyDictKeysObject* dictkeys, PyObject *key);
extern PyObject *_PyDict_LoadGlobal(PyDictObject *, PyDictObject *, PyObject *);
/* Consumes references to key and value */
extern int _PyDict_SetItem_Take2(PyDictObject *op, PyObject *key, PyObject *value);
extern int _PyObjectDict_SetItem(PyTypeObject *tp, PyObject **dictptr, PyObject *name, PyObject *value);
extern PyObject *_PyDict_Pop_KnownHash(PyObject *, PyObject *, Py_hash_t, PyObject *);
#define DKIX_EMPTY (-1)
#define DKIX_DUMMY (-2) /* Used internally */
#define DKIX_ERROR (-3)
#define DKIX_KEY_CHANGED (-4) /* Used internally */
typedef enum {
DICT_KEYS_GENERAL = 0,
DICT_KEYS_UNICODE = 1,
DICT_KEYS_SPLIT = 2
} DictKeysKind;
/* See dictobject.c for actual layout of DictKeysObject */
struct _dictkeysobject {
Py_ssize_t dk_refcnt;
/* Size of the hash table (dk_indices). It must be a power of 2. */
uint8_t dk_log2_size;
/* Size of the hash table (dk_indices) by bytes. */
uint8_t dk_log2_index_bytes;
/* Kind of keys */
uint8_t dk_kind;
/* Version number -- Reset to 0 by any modification to keys */
uint32_t dk_version;
/* Number of usable entries in dk_entries. */
Py_ssize_t dk_usable;
/* Number of used entries in dk_entries. */
Py_ssize_t dk_nentries;
/* Actual hash table of dk_size entries. It holds indices in dk_entries,
or DKIX_EMPTY(-1) or DKIX_DUMMY(-2).
Indices must be: 0 <= indice < USABLE_FRACTION(dk_size).
The size in bytes of an indice depends on dk_size:
- 1 byte if dk_size <= 0xff (char*)
- 2 bytes if dk_size <= 0xffff (int16_t*)
- 4 bytes if dk_size <= 0xffffffff (int32_t*)
- 8 bytes otherwise (int64_t*)
Dynamically sized, SIZEOF_VOID_P is minimum. */
char dk_indices[]; /* char is required to avoid strict aliasing. */
/* "PyDictKeyEntry or PyDictUnicodeEntry dk_entries[USABLE_FRACTION(DK_SIZE(dk))];" array follows:
see the DK_ENTRIES() macro */
};
/* This must be no more than 250, for the prefix size to fit in one byte. */
#define SHARED_KEYS_MAX_SIZE 30
#define NEXT_LOG2_SHARED_KEYS_MAX_SIZE 6
/* Layout of dict values:
*
* The PyObject *values are preceded by an array of bytes holding
* the insertion order and size.
* [-1] = prefix size. [-2] = used size. size[-2-n...] = insertion order.
*/
struct _dictvalues {
PyObject *values[1];
};
#define DK_LOG_SIZE(dk) _Py_RVALUE((dk)->dk_log2_size)
#if SIZEOF_VOID_P > 4
#define DK_SIZE(dk) (((int64_t)1)<<DK_LOG_SIZE(dk))
#else
#define DK_SIZE(dk) (1<<DK_LOG_SIZE(dk))
#endif
static inline void* _DK_ENTRIES(PyDictKeysObject *dk) {
int8_t *indices = (int8_t*)(dk->dk_indices);
size_t index = (size_t)1 << dk->dk_log2_index_bytes;
return (&indices[index]);
}
static inline PyDictKeyEntry* DK_ENTRIES(PyDictKeysObject *dk) {
assert(dk->dk_kind == DICT_KEYS_GENERAL);
return (PyDictKeyEntry*)_DK_ENTRIES(dk);
}
static inline PyDictUnicodeEntry* DK_UNICODE_ENTRIES(PyDictKeysObject *dk) {
assert(dk->dk_kind != DICT_KEYS_GENERAL);
return (PyDictUnicodeEntry*)_DK_ENTRIES(dk);
}
#define DK_IS_UNICODE(dk) ((dk)->dk_kind != DICT_KEYS_GENERAL)
#define DICT_VERSION_INCREMENT (1 << DICT_MAX_WATCHERS)
#define DICT_VERSION_MASK (DICT_VERSION_INCREMENT - 1)
#define DICT_NEXT_VERSION(INTERP) \
((INTERP)->dict_state.global_version += DICT_VERSION_INCREMENT)
void
_PyDict_SendEvent(int watcher_bits,
PyDict_WatchEvent event,
PyDictObject *mp,
PyObject *key,
PyObject *value);
static inline uint64_t
_PyDict_NotifyEvent(PyInterpreterState *interp,
PyDict_WatchEvent event,
PyDictObject *mp,
PyObject *key,
PyObject *value)
{
assert(Py_REFCNT((PyObject*)mp) > 0);
int watcher_bits = mp->ma_version_tag & DICT_VERSION_MASK;
if (watcher_bits) {
_PyDict_SendEvent(watcher_bits, event, mp, key, value);
return DICT_NEXT_VERSION(interp) | watcher_bits;
}
return DICT_NEXT_VERSION(interp);
}
extern PyObject *_PyObject_MakeDictFromInstanceAttributes(PyObject *obj, PyDictValues *values);
extern PyObject *_PyDict_FromItems(
PyObject *const *keys, Py_ssize_t keys_offset,
PyObject *const *values, Py_ssize_t values_offset,
Py_ssize_t length);
static inline void
_PyDictValues_AddToInsertionOrder(PyDictValues *values, Py_ssize_t ix)
{
assert(ix < SHARED_KEYS_MAX_SIZE);
uint8_t *size_ptr = ((uint8_t *)values)-2;
int size = *size_ptr;
assert(size+2 < ((uint8_t *)values)[-1]);
size++;
size_ptr[-size] = (uint8_t)ix;
*size_ptr = size;
}
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_DICT_H */

View File

@@ -0,0 +1,50 @@
#ifndef Py_INTERNAL_DICT_STATE_H
#define Py_INTERNAL_DICT_STATE_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#ifndef WITH_FREELISTS
// without freelists
# define PyDict_MAXFREELIST 0
#endif
#ifndef PyDict_MAXFREELIST
# define PyDict_MAXFREELIST 80
#endif
#define DICT_MAX_WATCHERS 8
struct _Py_dict_state {
/*Global counter used to set ma_version_tag field of dictionary.
* It is incremented each time that a dictionary is created and each
* time that a dictionary is modified. */
uint64_t global_version;
uint32_t next_keys_version;
#if PyDict_MAXFREELIST > 0
/* Dictionary reuse scheme to save calls to malloc and free */
PyDictObject *free_list[PyDict_MAXFREELIST];
PyDictKeysObject *keys_free_list[PyDict_MAXFREELIST];
int numfree;
int keys_numfree;
#endif
PyDict_WatchCallback watchers[DICT_MAX_WATCHERS];
};
#define _dict_state_INIT \
{ \
.next_keys_version = 2, \
}
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_DICT_STATE_H */

View File

@@ -0,0 +1,73 @@
#ifndef Py_INTERNAL_DTOA_H
#define Py_INTERNAL_DTOA_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#include "pycore_pymath.h" // _PY_SHORT_FLOAT_REPR
#if _PY_SHORT_FLOAT_REPR == 1
typedef uint32_t ULong;
struct
Bigint {
struct Bigint *next;
int k, maxwds, sign, wds;
ULong x[1];
};
#ifdef Py_USING_MEMORY_DEBUGGER
struct _dtoa_state {
int _not_used;
};
#define _dtoa_interp_state_INIT(INTERP) \
{0}
#else // !Py_USING_MEMORY_DEBUGGER
/* The size of the Bigint freelist */
#define Bigint_Kmax 7
#ifndef PRIVATE_MEM
#define PRIVATE_MEM 2304
#endif
#define Bigint_PREALLOC_SIZE \
((PRIVATE_MEM+sizeof(double)-1)/sizeof(double))
struct _dtoa_state {
/* p5s is a linked list of powers of 5 of the form 5**(2**i), i >= 2 */
// XXX This should be freed during runtime fini.
struct Bigint *p5s;
struct Bigint *freelist[Bigint_Kmax+1];
double preallocated[Bigint_PREALLOC_SIZE];
double *preallocated_next;
};
#define _dtoa_state_INIT(INTERP) \
{ \
.preallocated_next = (INTERP)->dtoa.preallocated, \
}
#endif // !Py_USING_MEMORY_DEBUGGER
/* These functions are used by modules compiled as C extension like math:
they must be exported. */
PyAPI_FUNC(double) _Py_dg_strtod(const char *str, char **ptr);
PyAPI_FUNC(char *) _Py_dg_dtoa(double d, int mode, int ndigits,
int *decpt, int *sign, char **rve);
PyAPI_FUNC(void) _Py_dg_freedtoa(char *s);
#endif // _PY_SHORT_FLOAT_REPR == 1
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_DTOA_H */

View File

@@ -0,0 +1,25 @@
#ifndef Py_EMSCRIPTEN_SIGNAL_H
#define Py_EMSCRIPTEN_SIGNAL_H
#if defined(__EMSCRIPTEN__)
void
_Py_CheckEmscriptenSignals(void);
void
_Py_CheckEmscriptenSignalsPeriodically(void);
#define _Py_CHECK_EMSCRIPTEN_SIGNALS() _Py_CheckEmscriptenSignals()
#define _Py_CHECK_EMSCRIPTEN_SIGNALS_PERIODICALLY() _Py_CheckEmscriptenSignalsPeriodically()
extern int Py_EMSCRIPTEN_SIGNAL_HANDLING;
#else
#define _Py_CHECK_EMSCRIPTEN_SIGNALS()
#define _Py_CHECK_EMSCRIPTEN_SIGNALS_PERIODICALLY()
#endif // defined(__EMSCRIPTEN__)
#endif // ndef Py_EMSCRIPTEN_SIGNAL_H

View File

@@ -0,0 +1,37 @@
#ifndef Py_INTERNAL_EXCEPTIONS_H
#define Py_INTERNAL_EXCEPTIONS_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
/* runtime lifecycle */
extern PyStatus _PyExc_InitState(PyInterpreterState *);
extern PyStatus _PyExc_InitGlobalObjects(PyInterpreterState *);
extern int _PyExc_InitTypes(PyInterpreterState *);
extern void _PyExc_Fini(PyInterpreterState *);
/* other API */
struct _Py_exc_state {
// The dict mapping from errno codes to OSError subclasses
PyObject *errnomap;
PyBaseExceptionObject *memerrors_freelist;
int memerrors_numfree;
// The ExceptionGroup type
PyObject *PyExc_ExceptionGroup;
};
extern void _PyExc_ClearExceptionGroupType(PyInterpreterState *);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_EXCEPTIONS_H */

View File

@@ -0,0 +1,99 @@
#ifndef Py_INTERNAL_FAULTHANDLER_H
#define Py_INTERNAL_FAULTHANDLER_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#ifdef HAVE_SIGACTION
# include <signal.h>
#endif
#ifndef MS_WINDOWS
/* register() is useless on Windows, because only SIGSEGV, SIGABRT and
SIGILL can be handled by the process, and these signals can only be used
with enable(), not using register() */
# define FAULTHANDLER_USER
#endif
#ifdef HAVE_SIGACTION
/* Using an alternative stack requires sigaltstack()
and sigaction() SA_ONSTACK */
# ifdef HAVE_SIGALTSTACK
# define FAULTHANDLER_USE_ALT_STACK
# endif
typedef struct sigaction _Py_sighandler_t;
#else
typedef PyOS_sighandler_t _Py_sighandler_t;
#endif // HAVE_SIGACTION
#ifdef FAULTHANDLER_USER
struct faulthandler_user_signal {
int enabled;
PyObject *file;
int fd;
int all_threads;
int chain;
_Py_sighandler_t previous;
PyInterpreterState *interp;
};
#endif /* FAULTHANDLER_USER */
struct _faulthandler_runtime_state {
struct {
int enabled;
PyObject *file;
int fd;
int all_threads;
PyInterpreterState *interp;
#ifdef MS_WINDOWS
void *exc_handler;
#endif
} fatal_error;
struct {
PyObject *file;
int fd;
PY_TIMEOUT_T timeout_us; /* timeout in microseconds */
int repeat;
PyInterpreterState *interp;
int exit;
char *header;
size_t header_len;
/* The main thread always holds this lock. It is only released when
faulthandler_thread() is interrupted before this thread exits, or at
Python exit. */
PyThread_type_lock cancel_event;
/* released by child thread when joined */
PyThread_type_lock running;
} thread;
#ifdef FAULTHANDLER_USER
struct faulthandler_user_signal *user_signals;
#endif
#ifdef FAULTHANDLER_USE_ALT_STACK
stack_t stack;
stack_t old_stack;
#endif
};
#define _faulthandler_runtime_state_INIT \
{ \
.fatal_error = { \
.fd = -1, \
}, \
}
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_FAULTHANDLER_H */

View File

@@ -0,0 +1,292 @@
#ifndef Py_INTERNAL_FILEUTILS_H
#define Py_INTERNAL_FILEUTILS_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "Py_BUILD_CORE must be defined to include this header"
#endif
#include <locale.h> /* struct lconv */
struct _fileutils_state {
int force_ascii;
};
typedef enum {
_Py_ERROR_UNKNOWN=0,
_Py_ERROR_STRICT,
_Py_ERROR_SURROGATEESCAPE,
_Py_ERROR_REPLACE,
_Py_ERROR_IGNORE,
_Py_ERROR_BACKSLASHREPLACE,
_Py_ERROR_SURROGATEPASS,
_Py_ERROR_XMLCHARREFREPLACE,
_Py_ERROR_OTHER
} _Py_error_handler;
PyAPI_FUNC(_Py_error_handler) _Py_GetErrorHandler(const char *errors);
PyAPI_FUNC(int) _Py_DecodeLocaleEx(
const char *arg,
wchar_t **wstr,
size_t *wlen,
const char **reason,
int current_locale,
_Py_error_handler errors);
PyAPI_FUNC(int) _Py_EncodeLocaleEx(
const wchar_t *text,
char **str,
size_t *error_pos,
const char **reason,
int current_locale,
_Py_error_handler errors);
PyAPI_FUNC(char*) _Py_EncodeLocaleRaw(
const wchar_t *text,
size_t *error_pos);
PyAPI_FUNC(PyObject *) _Py_device_encoding(int);
#if defined(MS_WINDOWS) || defined(__APPLE__)
/* On Windows, the count parameter of read() is an int (bpo-9015, bpo-9611).
On macOS 10.13, read() and write() with more than INT_MAX bytes
fail with EINVAL (bpo-24658). */
# define _PY_READ_MAX INT_MAX
# define _PY_WRITE_MAX INT_MAX
#else
/* write() should truncate the input to PY_SSIZE_T_MAX bytes,
but it's safer to do it ourself to have a portable behaviour */
# define _PY_READ_MAX PY_SSIZE_T_MAX
# define _PY_WRITE_MAX PY_SSIZE_T_MAX
#endif
#ifdef MS_WINDOWS
struct _Py_stat_struct {
uint64_t st_dev;
uint64_t st_ino;
unsigned short st_mode;
int st_nlink;
int st_uid;
int st_gid;
unsigned long st_rdev;
__int64 st_size;
time_t st_atime;
int st_atime_nsec;
time_t st_mtime;
int st_mtime_nsec;
time_t st_ctime;
int st_ctime_nsec;
time_t st_birthtime;
int st_birthtime_nsec;
unsigned long st_file_attributes;
unsigned long st_reparse_tag;
uint64_t st_ino_high;
};
#else
# define _Py_stat_struct stat
#endif
PyAPI_FUNC(int) _Py_fstat(
int fd,
struct _Py_stat_struct *status);
PyAPI_FUNC(int) _Py_fstat_noraise(
int fd,
struct _Py_stat_struct *status);
PyAPI_FUNC(int) _Py_stat(
PyObject *path,
struct stat *status);
PyAPI_FUNC(int) _Py_open(
const char *pathname,
int flags);
PyAPI_FUNC(int) _Py_open_noraise(
const char *pathname,
int flags);
PyAPI_FUNC(FILE *) _Py_wfopen(
const wchar_t *path,
const wchar_t *mode);
PyAPI_FUNC(Py_ssize_t) _Py_read(
int fd,
void *buf,
size_t count);
PyAPI_FUNC(Py_ssize_t) _Py_write(
int fd,
const void *buf,
size_t count);
PyAPI_FUNC(Py_ssize_t) _Py_write_noraise(
int fd,
const void *buf,
size_t count);
#ifdef HAVE_READLINK
PyAPI_FUNC(int) _Py_wreadlink(
const wchar_t *path,
wchar_t *buf,
/* Number of characters of 'buf' buffer
including the trailing NUL character */
size_t buflen);
#endif
#ifdef HAVE_REALPATH
PyAPI_FUNC(wchar_t*) _Py_wrealpath(
const wchar_t *path,
wchar_t *resolved_path,
/* Number of characters of 'resolved_path' buffer
including the trailing NUL character */
size_t resolved_path_len);
#endif
PyAPI_FUNC(wchar_t*) _Py_wgetcwd(
wchar_t *buf,
/* Number of characters of 'buf' buffer
including the trailing NUL character */
size_t buflen);
PyAPI_FUNC(int) _Py_get_inheritable(int fd);
PyAPI_FUNC(int) _Py_set_inheritable(int fd, int inheritable,
int *atomic_flag_works);
PyAPI_FUNC(int) _Py_set_inheritable_async_safe(int fd, int inheritable,
int *atomic_flag_works);
PyAPI_FUNC(int) _Py_dup(int fd);
PyAPI_FUNC(int) _Py_get_blocking(int fd);
PyAPI_FUNC(int) _Py_set_blocking(int fd, int blocking);
#ifdef MS_WINDOWS
PyAPI_FUNC(void*) _Py_get_osfhandle_noraise(int fd);
PyAPI_FUNC(void*) _Py_get_osfhandle(int fd);
PyAPI_FUNC(int) _Py_open_osfhandle_noraise(void *handle, int flags);
PyAPI_FUNC(int) _Py_open_osfhandle(void *handle, int flags);
#endif /* MS_WINDOWS */
// This is used after getting NULL back from Py_DecodeLocale().
#define DECODE_LOCALE_ERR(NAME, LEN) \
((LEN) == (size_t)-2) \
? _PyStatus_ERR("cannot decode " NAME) \
: _PyStatus_NO_MEMORY()
PyAPI_DATA(int) _Py_HasFileSystemDefaultEncodeErrors;
PyAPI_FUNC(int) _Py_DecodeUTF8Ex(
const char *arg,
Py_ssize_t arglen,
wchar_t **wstr,
size_t *wlen,
const char **reason,
_Py_error_handler errors);
PyAPI_FUNC(int) _Py_EncodeUTF8Ex(
const wchar_t *text,
char **str,
size_t *error_pos,
const char **reason,
int raw_malloc,
_Py_error_handler errors);
PyAPI_FUNC(wchar_t*) _Py_DecodeUTF8_surrogateescape(
const char *arg,
Py_ssize_t arglen,
size_t *wlen);
extern int
_Py_wstat(const wchar_t *, struct stat *);
PyAPI_FUNC(int) _Py_GetForceASCII(void);
/* Reset "force ASCII" mode (if it was initialized).
This function should be called when Python changes the LC_CTYPE locale,
so the "force ASCII" mode can be detected again on the new locale
encoding. */
PyAPI_FUNC(void) _Py_ResetForceASCII(void);
PyAPI_FUNC(int) _Py_GetLocaleconvNumeric(
struct lconv *lc,
PyObject **decimal_point,
PyObject **thousands_sep);
PyAPI_FUNC(void) _Py_closerange(int first, int last);
PyAPI_FUNC(wchar_t*) _Py_GetLocaleEncoding(void);
PyAPI_FUNC(PyObject*) _Py_GetLocaleEncodingObject(void);
#ifdef HAVE_NON_UNICODE_WCHAR_T_REPRESENTATION
extern int _Py_LocaleUsesNonUnicodeWchar(void);
extern wchar_t* _Py_DecodeNonUnicodeWchar(
const wchar_t* native,
Py_ssize_t size);
extern int _Py_EncodeNonUnicodeWchar_InPlace(
wchar_t* unicode,
Py_ssize_t size);
#endif
extern int _Py_isabs(const wchar_t *path);
extern int _Py_abspath(const wchar_t *path, wchar_t **abspath_p);
#ifdef MS_WINDOWS
extern int _PyOS_getfullpathname(const wchar_t *path, wchar_t **abspath_p);
#endif
extern wchar_t * _Py_join_relfile(const wchar_t *dirname,
const wchar_t *relfile);
extern int _Py_add_relfile(wchar_t *dirname,
const wchar_t *relfile,
size_t bufsize);
extern size_t _Py_find_basename(const wchar_t *filename);
PyAPI_FUNC(wchar_t*) _Py_normpath(wchar_t *path, Py_ssize_t size);
extern wchar_t *_Py_normpath_and_size(wchar_t *path, Py_ssize_t size, Py_ssize_t *length);
// The Windows Games API family does not provide these functions
// so provide our own implementations. Remove them in case they get added
// to the Games API family
#if defined(MS_WINDOWS_GAMES) && !defined(MS_WINDOWS_DESKTOP)
#include <winerror.h>
extern HRESULT PathCchSkipRoot(const wchar_t *pszPath, const wchar_t **ppszRootEnd);
#endif /* defined(MS_WINDOWS_GAMES) && !defined(MS_WINDOWS_DESKTOP) */
// Macros to protect CRT calls against instant termination when passed an
// invalid parameter (bpo-23524). IPH stands for Invalid Parameter Handler.
// Usage:
//
// _Py_BEGIN_SUPPRESS_IPH
// ...
// _Py_END_SUPPRESS_IPH
#if defined _MSC_VER && _MSC_VER >= 1900
# include <stdlib.h> // _set_thread_local_invalid_parameter_handler()
extern _invalid_parameter_handler _Py_silent_invalid_parameter_handler;
# define _Py_BEGIN_SUPPRESS_IPH \
{ _invalid_parameter_handler _Py_old_handler = \
_set_thread_local_invalid_parameter_handler(_Py_silent_invalid_parameter_handler);
# define _Py_END_SUPPRESS_IPH \
_set_thread_local_invalid_parameter_handler(_Py_old_handler); }
#else
# define _Py_BEGIN_SUPPRESS_IPH
# define _Py_END_SUPPRESS_IPH
#endif /* _MSC_VER >= 1900 */
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_FILEUTILS_H */

View File

@@ -0,0 +1,98 @@
#ifndef Py_INTERNAL_FILEUTILS_WINDOWS_H
#define Py_INTERNAL_FILEUTILS_WINDOWS_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "Py_BUILD_CORE must be defined to include this header"
#endif
#ifdef MS_WINDOWS
#if !defined(NTDDI_WIN10_NI) || !(NTDDI_VERSION >= NTDDI_WIN10_NI)
typedef struct _FILE_STAT_BASIC_INFORMATION {
LARGE_INTEGER FileId;
LARGE_INTEGER CreationTime;
LARGE_INTEGER LastAccessTime;
LARGE_INTEGER LastWriteTime;
LARGE_INTEGER ChangeTime;
LARGE_INTEGER AllocationSize;
LARGE_INTEGER EndOfFile;
ULONG FileAttributes;
ULONG ReparseTag;
ULONG NumberOfLinks;
ULONG DeviceType;
ULONG DeviceCharacteristics;
ULONG Reserved;
LARGE_INTEGER VolumeSerialNumber;
FILE_ID_128 FileId128;
} FILE_STAT_BASIC_INFORMATION;
typedef enum _FILE_INFO_BY_NAME_CLASS {
FileStatByNameInfo,
FileStatLxByNameInfo,
FileCaseSensitiveByNameInfo,
FileStatBasicByNameInfo,
MaximumFileInfoByNameClass
} FILE_INFO_BY_NAME_CLASS;
#endif
typedef BOOL (WINAPI *PGetFileInformationByName)(
PCWSTR FileName,
FILE_INFO_BY_NAME_CLASS FileInformationClass,
PVOID FileInfoBuffer,
ULONG FileInfoBufferSize
);
static inline BOOL _Py_GetFileInformationByName(
PCWSTR FileName,
FILE_INFO_BY_NAME_CLASS FileInformationClass,
PVOID FileInfoBuffer,
ULONG FileInfoBufferSize
) {
static PGetFileInformationByName GetFileInformationByName = NULL;
static int GetFileInformationByName_init = -1;
if (GetFileInformationByName_init < 0) {
HMODULE hMod = LoadLibraryW(L"api-ms-win-core-file-l2-1-4");
GetFileInformationByName_init = 0;
if (hMod) {
GetFileInformationByName = (PGetFileInformationByName)GetProcAddress(
hMod, "GetFileInformationByName");
if (GetFileInformationByName) {
GetFileInformationByName_init = 1;
} else {
FreeLibrary(hMod);
}
}
}
if (GetFileInformationByName_init <= 0) {
SetLastError(ERROR_NOT_SUPPORTED);
return FALSE;
}
return GetFileInformationByName(FileName, FileInformationClass, FileInfoBuffer, FileInfoBufferSize);
}
static inline BOOL _Py_GetFileInformationByName_ErrorIsTrustworthy(int error)
{
switch(error) {
case ERROR_FILE_NOT_FOUND:
case ERROR_PATH_NOT_FOUND:
case ERROR_NOT_READY:
case ERROR_BAD_NET_NAME:
case ERROR_BAD_NETPATH:
case ERROR_BAD_PATHNAME:
case ERROR_INVALID_NAME:
case ERROR_FILENAME_EXCED_RANGE:
return TRUE;
case ERROR_NOT_SUPPORTED:
return FALSE;
}
return FALSE;
}
#endif
#endif

View File

@@ -0,0 +1,71 @@
#ifndef Py_INTERNAL_FLOATOBJECT_H
#define Py_INTERNAL_FLOATOBJECT_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
/* runtime lifecycle */
extern void _PyFloat_InitState(PyInterpreterState *);
extern PyStatus _PyFloat_InitTypes(PyInterpreterState *);
extern void _PyFloat_Fini(PyInterpreterState *);
extern void _PyFloat_FiniType(PyInterpreterState *);
/* other API */
enum _py_float_format_type {
_py_float_format_unknown,
_py_float_format_ieee_big_endian,
_py_float_format_ieee_little_endian,
};
struct _Py_float_runtime_state {
enum _py_float_format_type float_format;
enum _py_float_format_type double_format;
};
#ifndef WITH_FREELISTS
// without freelists
# define PyFloat_MAXFREELIST 0
#endif
#ifndef PyFloat_MAXFREELIST
# define PyFloat_MAXFREELIST 100
#endif
struct _Py_float_state {
#if PyFloat_MAXFREELIST > 0
/* Special free list
free_list is a singly-linked list of available PyFloatObjects,
linked via abuse of their ob_type members. */
int numfree;
PyFloatObject *free_list;
#endif
};
void _PyFloat_ExactDealloc(PyObject *op);
PyAPI_FUNC(void) _PyFloat_DebugMallocStats(FILE* out);
/* Format the object based on the format_spec, as defined in PEP 3101
(Advanced String Formatting). */
PyAPI_FUNC(int) _PyFloat_FormatAdvancedWriter(
_PyUnicodeWriter *writer,
PyObject *obj,
PyObject *format_spec,
Py_ssize_t start,
Py_ssize_t end);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_FLOATOBJECT_H */

View File

@@ -0,0 +1,120 @@
#ifndef Py_INTERNAL_CFG_H
#define Py_INTERNAL_CFG_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#include "pycore_opcode_utils.h"
#include "pycore_compile.h"
typedef struct {
int i_opcode;
int i_oparg;
_PyCompilerSrcLocation i_loc;
struct _PyCfgBasicblock_ *i_target; /* target block (if jump instruction) */
struct _PyCfgBasicblock_ *i_except; /* target block when exception is raised */
} _PyCfgInstruction;
typedef struct {
int id;
} _PyCfgJumpTargetLabel;
typedef struct {
struct _PyCfgBasicblock_ *handlers[CO_MAXBLOCKS+1];
int depth;
} _PyCfgExceptStack;
typedef struct _PyCfgBasicblock_ {
/* Each basicblock in a compilation unit is linked via b_list in the
reverse order that the block are allocated. b_list points to the next
block in this list, not to be confused with b_next, which is next by
control flow. */
struct _PyCfgBasicblock_ *b_list;
/* The label of this block if it is a jump target, -1 otherwise */
_PyCfgJumpTargetLabel b_label;
/* Exception stack at start of block, used by assembler to create the exception handling table */
_PyCfgExceptStack *b_exceptstack;
/* pointer to an array of instructions, initially NULL */
_PyCfgInstruction *b_instr;
/* If b_next is non-NULL, it is a pointer to the next
block reached by normal control flow. */
struct _PyCfgBasicblock_ *b_next;
/* number of instructions used */
int b_iused;
/* length of instruction array (b_instr) */
int b_ialloc;
/* Used by add_checks_for_loads_of_unknown_variables */
uint64_t b_unsafe_locals_mask;
/* Number of predecessors that a block has. */
int b_predecessors;
/* depth of stack upon entry of block, computed by stackdepth() */
int b_startdepth;
/* instruction offset for block, computed by assemble_jump_offsets() */
int b_offset;
/* Basic block is an exception handler that preserves lasti */
unsigned b_preserve_lasti : 1;
/* Used by compiler passes to mark whether they have visited a basic block. */
unsigned b_visited : 1;
/* b_except_handler is used by the cold-detection algorithm to mark exception targets */
unsigned b_except_handler : 1;
/* b_cold is true if this block is not perf critical (like an exception handler) */
unsigned b_cold : 1;
/* b_warm is used by the cold-detection algorithm to mark blocks which are definitely not cold */
unsigned b_warm : 1;
} _PyCfgBasicblock;
int _PyBasicblock_InsertInstruction(_PyCfgBasicblock *block, int pos, _PyCfgInstruction *instr);
typedef struct cfg_builder_ {
/* The entryblock, at which control flow begins. All blocks of the
CFG are reachable through the b_next links */
_PyCfgBasicblock *g_entryblock;
/* Pointer to the most recently allocated block. By following
b_list links, you can reach all allocated blocks. */
_PyCfgBasicblock *g_block_list;
/* pointer to the block currently being constructed */
_PyCfgBasicblock *g_curblock;
/* label for the next instruction to be placed */
_PyCfgJumpTargetLabel g_current_label;
} _PyCfgBuilder;
int _PyCfgBuilder_UseLabel(_PyCfgBuilder *g, _PyCfgJumpTargetLabel lbl);
int _PyCfgBuilder_Addop(_PyCfgBuilder *g, int opcode, int oparg, _PyCompilerSrcLocation loc);
int _PyCfgBuilder_Init(_PyCfgBuilder *g);
void _PyCfgBuilder_Fini(_PyCfgBuilder *g);
_PyCfgInstruction* _PyCfg_BasicblockLastInstr(const _PyCfgBasicblock *b);
int _PyCfg_OptimizeCodeUnit(_PyCfgBuilder *g, PyObject *consts, PyObject *const_cache,
int code_flags, int nlocals, int nparams, int firstlineno);
int _PyCfg_Stackdepth(_PyCfgBasicblock *entryblock, int code_flags);
void _PyCfg_ConvertPseudoOps(_PyCfgBasicblock *entryblock);
int _PyCfg_ResolveJumps(_PyCfgBuilder *g);
static inline int
basicblock_nofallthrough(const _PyCfgBasicblock *b) {
_PyCfgInstruction *last = _PyCfg_BasicblockLastInstr(b);
return (last &&
(IS_SCOPE_EXIT_OPCODE(last->i_opcode) ||
IS_UNCONDITIONAL_JUMP_OPCODE(last->i_opcode)));
}
#define BB_NO_FALLTHROUGH(B) (basicblock_nofallthrough(B))
#define BB_HAS_FALLTHROUGH(B) (!basicblock_nofallthrough(B))
PyCodeObject *
_PyAssemble_MakeCodeObject(_PyCompile_CodeUnitMetadata *u, PyObject *const_cache,
PyObject *consts, int maxdepth, _PyCompile_InstructionSequence *instrs,
int nlocalsplus, int code_flags, PyObject *filename);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_CFG_H */

View File

@@ -0,0 +1,27 @@
#ifndef Py_INTERNAL_FORMAT_H
#define Py_INTERNAL_FORMAT_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
/* Format codes
* F_LJUST '-'
* F_SIGN '+'
* F_BLANK ' '
* F_ALT '#'
* F_ZERO '0'
*/
#define F_LJUST (1<<0)
#define F_SIGN (1<<1)
#define F_BLANK (1<<2)
#define F_ALT (1<<3)
#define F_ZERO (1<<4)
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_FORMAT_H */

View File

@@ -0,0 +1,281 @@
#ifndef Py_INTERNAL_FRAME_H
#define Py_INTERNAL_FRAME_H
#ifdef __cplusplus
extern "C" {
#endif
#include <stdbool.h>
#include <stddef.h>
#include "pycore_code.h" // STATS
/* See Objects/frame_layout.md for an explanation of the frame stack
* including explanation of the PyFrameObject and _PyInterpreterFrame
* structs. */
struct _frame {
PyObject_HEAD
PyFrameObject *f_back; /* previous frame, or NULL */
struct _PyInterpreterFrame *f_frame; /* points to the frame data */
PyObject *f_trace; /* Trace function */
int f_lineno; /* Current line number. Only valid if non-zero */
char f_trace_lines; /* Emit per-line trace events? */
char f_trace_opcodes; /* Emit per-opcode trace events? */
char f_fast_as_locals; /* Have the fast locals of this frame been converted to a dict? */
/* The frame data, if this frame object owns the frame */
PyObject *_f_frame_data[1];
};
extern PyFrameObject* _PyFrame_New_NoTrack(PyCodeObject *code);
/* other API */
typedef enum _framestate {
FRAME_CREATED = -2,
FRAME_SUSPENDED = -1,
FRAME_EXECUTING = 0,
FRAME_COMPLETED = 1,
FRAME_CLEARED = 4
} PyFrameState;
enum _frameowner {
FRAME_OWNED_BY_THREAD = 0,
FRAME_OWNED_BY_GENERATOR = 1,
FRAME_OWNED_BY_FRAME_OBJECT = 2,
FRAME_OWNED_BY_CSTACK = 3,
};
typedef struct _PyInterpreterFrame {
PyCodeObject *f_code; /* Strong reference */
struct _PyInterpreterFrame *previous;
PyObject *f_funcobj; /* Strong reference. Only valid if not on C stack */
PyObject *f_globals; /* Borrowed reference. Only valid if not on C stack */
PyObject *f_builtins; /* Borrowed reference. Only valid if not on C stack */
PyObject *f_locals; /* Strong reference, may be NULL. Only valid if not on C stack */
PyFrameObject *frame_obj; /* Strong reference, may be NULL. Only valid if not on C stack */
// NOTE: This is not necessarily the last instruction started in the given
// frame. Rather, it is the code unit *prior to* the *next* instruction. For
// example, it may be an inline CACHE entry, an instruction we just jumped
// over, or (in the case of a newly-created frame) a totally invalid value:
_Py_CODEUNIT *prev_instr;
int stacktop; /* Offset of TOS from localsplus */
/* The return_offset determines where a `RETURN` should go in the caller,
* relative to `prev_instr`.
* It is only meaningful to the callee,
* so it needs to be set in any CALL (to a Python function)
* or SEND (to a coroutine or generator).
* If there is no callee, then it is meaningless. */
uint16_t return_offset;
char owner;
/* Locals and stack */
PyObject *localsplus[1];
} _PyInterpreterFrame;
#define _PyInterpreterFrame_LASTI(IF) \
((int)((IF)->prev_instr - _PyCode_CODE((IF)->f_code)))
static inline PyObject **_PyFrame_Stackbase(_PyInterpreterFrame *f) {
return f->localsplus + f->f_code->co_nlocalsplus;
}
static inline PyObject *_PyFrame_StackPeek(_PyInterpreterFrame *f) {
assert(f->stacktop > f->f_code->co_nlocalsplus);
assert(f->localsplus[f->stacktop-1] != NULL);
return f->localsplus[f->stacktop-1];
}
static inline PyObject *_PyFrame_StackPop(_PyInterpreterFrame *f) {
assert(f->stacktop > f->f_code->co_nlocalsplus);
f->stacktop--;
return f->localsplus[f->stacktop];
}
static inline void _PyFrame_StackPush(_PyInterpreterFrame *f, PyObject *value) {
f->localsplus[f->stacktop] = value;
f->stacktop++;
}
#define FRAME_SPECIALS_SIZE ((int)((sizeof(_PyInterpreterFrame)-1)/sizeof(PyObject *)))
static inline int
_PyFrame_NumSlotsForCodeObject(PyCodeObject *code)
{
/* This function needs to remain in sync with the calculation of
* co_framesize in Tools/build/deepfreeze.py */
assert(code->co_framesize >= FRAME_SPECIALS_SIZE);
return code->co_framesize - FRAME_SPECIALS_SIZE;
}
void _PyFrame_Copy(_PyInterpreterFrame *src, _PyInterpreterFrame *dest);
/* Consumes reference to func and locals.
Does not initialize frame->previous, which happens
when frame is linked into the frame stack.
*/
static inline void
_PyFrame_Initialize(
_PyInterpreterFrame *frame, PyFunctionObject *func,
PyObject *locals, PyCodeObject *code, int null_locals_from)
{
frame->f_funcobj = (PyObject *)func;
frame->f_code = (PyCodeObject *)Py_NewRef(code);
frame->f_builtins = func->func_builtins;
frame->f_globals = func->func_globals;
frame->f_locals = locals;
frame->stacktop = code->co_nlocalsplus;
frame->frame_obj = NULL;
frame->prev_instr = _PyCode_CODE(code) - 1;
frame->return_offset = 0;
frame->owner = FRAME_OWNED_BY_THREAD;
for (int i = null_locals_from; i < code->co_nlocalsplus; i++) {
frame->localsplus[i] = NULL;
}
}
/* Gets the pointer to the locals array
* that precedes this frame.
*/
static inline PyObject**
_PyFrame_GetLocalsArray(_PyInterpreterFrame *frame)
{
return frame->localsplus;
}
/* Fetches the stack pointer, and sets stacktop to -1.
Having stacktop <= 0 ensures that invalid
values are not visible to the cycle GC.
We choose -1 rather than 0 to assist debugging. */
static inline PyObject**
_PyFrame_GetStackPointer(_PyInterpreterFrame *frame)
{
PyObject **sp = frame->localsplus + frame->stacktop;
frame->stacktop = -1;
return sp;
}
static inline void
_PyFrame_SetStackPointer(_PyInterpreterFrame *frame, PyObject **stack_pointer)
{
frame->stacktop = (int)(stack_pointer - frame->localsplus);
}
/* Determine whether a frame is incomplete.
* A frame is incomplete if it is part way through
* creating cell objects or a generator or coroutine.
*
* Frames on the frame stack are incomplete until the
* first RESUME instruction.
* Frames owned by a generator are always complete.
*/
static inline bool
_PyFrame_IsIncomplete(_PyInterpreterFrame *frame)
{
return frame->owner != FRAME_OWNED_BY_GENERATOR &&
frame->prev_instr < _PyCode_CODE(frame->f_code) + frame->f_code->_co_firsttraceable;
}
static inline _PyInterpreterFrame *
_PyFrame_GetFirstComplete(_PyInterpreterFrame *frame)
{
while (frame && _PyFrame_IsIncomplete(frame)) {
frame = frame->previous;
}
return frame;
}
static inline _PyInterpreterFrame *
_PyThreadState_GetFrame(PyThreadState *tstate)
{
return _PyFrame_GetFirstComplete(tstate->cframe->current_frame);
}
/* For use by _PyFrame_GetFrameObject
Do not call directly. */
PyFrameObject *
_PyFrame_MakeAndSetFrameObject(_PyInterpreterFrame *frame);
/* Gets the PyFrameObject for this frame, lazily
* creating it if necessary.
* Returns a borrowed referennce */
static inline PyFrameObject *
_PyFrame_GetFrameObject(_PyInterpreterFrame *frame)
{
assert(!_PyFrame_IsIncomplete(frame));
PyFrameObject *res = frame->frame_obj;
if (res != NULL) {
return res;
}
return _PyFrame_MakeAndSetFrameObject(frame);
}
/* Clears all references in the frame.
* If take is non-zero, then the _PyInterpreterFrame frame
* may be transferred to the frame object it references
* instead of being cleared. Either way
* the caller no longer owns the references
* in the frame.
* take should be set to 1 for heap allocated
* frames like the ones in generators and coroutines.
*/
void
_PyFrame_ClearExceptCode(_PyInterpreterFrame * frame);
int
_PyFrame_Traverse(_PyInterpreterFrame *frame, visitproc visit, void *arg);
PyObject *
_PyFrame_GetLocals(_PyInterpreterFrame *frame, int include_hidden);
int
_PyFrame_FastToLocalsWithError(_PyInterpreterFrame *frame);
void
_PyFrame_LocalsToFast(_PyInterpreterFrame *frame, int clear);
static inline bool
_PyThreadState_HasStackSpace(PyThreadState *tstate, int size)
{
assert(
(tstate->datastack_top == NULL && tstate->datastack_limit == NULL)
||
(tstate->datastack_top != NULL && tstate->datastack_limit != NULL)
);
return tstate->datastack_top != NULL &&
size < tstate->datastack_limit - tstate->datastack_top;
}
extern _PyInterpreterFrame *
_PyThreadState_PushFrame(PyThreadState *tstate, size_t size);
void _PyThreadState_PopFrame(PyThreadState *tstate, _PyInterpreterFrame *frame);
/* Pushes a frame without checking for space.
* Must be guarded by _PyThreadState_HasStackSpace()
* Consumes reference to func. */
static inline _PyInterpreterFrame *
_PyFrame_PushUnchecked(PyThreadState *tstate, PyFunctionObject *func, int null_locals_from)
{
CALL_STAT_INC(frames_pushed);
PyCodeObject *code = (PyCodeObject *)func->func_code;
_PyInterpreterFrame *new_frame = (_PyInterpreterFrame *)tstate->datastack_top;
tstate->datastack_top += code->co_framesize;
assert(tstate->datastack_top < tstate->datastack_limit);
_PyFrame_Initialize(new_frame, func, NULL, code, null_locals_from);
return new_frame;
}
static inline
PyGenObject *_PyFrame_GetGenerator(_PyInterpreterFrame *frame)
{
assert(frame->owner == FRAME_OWNED_BY_GENERATOR);
size_t offset_in_gen = offsetof(PyGenObject, gi_iframe);
return (PyGenObject *)(((char *)frame) - offset_in_gen);
}
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_FRAME_H */

View File

@@ -0,0 +1,26 @@
#ifndef Py_INTERNAL_FUNCTION_H
#define Py_INTERNAL_FUNCTION_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#define FUNC_MAX_WATCHERS 8
struct _py_func_state {
uint32_t next_version;
};
extern PyFunctionObject* _PyFunction_FromConstructor(PyFrameConstructor *constr);
extern uint32_t _PyFunction_GetVersionForCurrentState(PyFunctionObject *func);
extern PyObject *_Py_set_function_type_params(
PyThreadState* unused, PyObject *func, PyObject *type_params);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_FUNCTION_H */

View File

@@ -0,0 +1,211 @@
#ifndef Py_INTERNAL_GC_H
#define Py_INTERNAL_GC_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
/* GC information is stored BEFORE the object structure. */
typedef struct {
// Pointer to next object in the list.
// 0 means the object is not tracked
uintptr_t _gc_next;
// Pointer to previous object in the list.
// Lowest two bits are used for flags documented later.
uintptr_t _gc_prev;
} PyGC_Head;
static inline PyGC_Head* _Py_AS_GC(PyObject *op) {
return (_Py_CAST(PyGC_Head*, op) - 1);
}
#define _PyGC_Head_UNUSED PyGC_Head
/* True if the object is currently tracked by the GC. */
static inline int _PyObject_GC_IS_TRACKED(PyObject *op) {
PyGC_Head *gc = _Py_AS_GC(op);
return (gc->_gc_next != 0);
}
#define _PyObject_GC_IS_TRACKED(op) _PyObject_GC_IS_TRACKED(_Py_CAST(PyObject*, op))
/* True if the object may be tracked by the GC in the future, or already is.
This can be useful to implement some optimizations. */
static inline int _PyObject_GC_MAY_BE_TRACKED(PyObject *obj) {
if (!PyObject_IS_GC(obj)) {
return 0;
}
if (PyTuple_CheckExact(obj)) {
return _PyObject_GC_IS_TRACKED(obj);
}
return 1;
}
/* Bit flags for _gc_prev */
/* Bit 0 is set when tp_finalize is called */
#define _PyGC_PREV_MASK_FINALIZED (1)
/* Bit 1 is set when the object is in generation which is GCed currently. */
#define _PyGC_PREV_MASK_COLLECTING (2)
/* The (N-2) most significant bits contain the real address. */
#define _PyGC_PREV_SHIFT (2)
#define _PyGC_PREV_MASK (((uintptr_t) -1) << _PyGC_PREV_SHIFT)
// Lowest bit of _gc_next is used for flags only in GC.
// But it is always 0 for normal code.
static inline PyGC_Head* _PyGCHead_NEXT(PyGC_Head *gc) {
uintptr_t next = gc->_gc_next;
return _Py_CAST(PyGC_Head*, next);
}
static inline void _PyGCHead_SET_NEXT(PyGC_Head *gc, PyGC_Head *next) {
gc->_gc_next = _Py_CAST(uintptr_t, next);
}
// Lowest two bits of _gc_prev is used for _PyGC_PREV_MASK_* flags.
static inline PyGC_Head* _PyGCHead_PREV(PyGC_Head *gc) {
uintptr_t prev = (gc->_gc_prev & _PyGC_PREV_MASK);
return _Py_CAST(PyGC_Head*, prev);
}
static inline void _PyGCHead_SET_PREV(PyGC_Head *gc, PyGC_Head *prev) {
uintptr_t uprev = _Py_CAST(uintptr_t, prev);
assert((uprev & ~_PyGC_PREV_MASK) == 0);
gc->_gc_prev = ((gc->_gc_prev & ~_PyGC_PREV_MASK) | uprev);
}
static inline int _PyGCHead_FINALIZED(PyGC_Head *gc) {
return ((gc->_gc_prev & _PyGC_PREV_MASK_FINALIZED) != 0);
}
static inline void _PyGCHead_SET_FINALIZED(PyGC_Head *gc) {
gc->_gc_prev |= _PyGC_PREV_MASK_FINALIZED;
}
static inline int _PyGC_FINALIZED(PyObject *op) {
PyGC_Head *gc = _Py_AS_GC(op);
return _PyGCHead_FINALIZED(gc);
}
static inline void _PyGC_SET_FINALIZED(PyObject *op) {
PyGC_Head *gc = _Py_AS_GC(op);
_PyGCHead_SET_FINALIZED(gc);
}
/* GC runtime state */
/* If we change this, we need to change the default value in the
signature of gc.collect. */
#define NUM_GENERATIONS 3
/*
NOTE: about untracking of mutable objects.
Certain types of container cannot participate in a reference cycle, and
so do not need to be tracked by the garbage collector. Untracking these
objects reduces the cost of garbage collections. However, determining
which objects may be untracked is not free, and the costs must be
weighed against the benefits for garbage collection.
There are two possible strategies for when to untrack a container:
i) When the container is created.
ii) When the container is examined by the garbage collector.
Tuples containing only immutable objects (integers, strings etc, and
recursively, tuples of immutable objects) do not need to be tracked.
The interpreter creates a large number of tuples, many of which will
not survive until garbage collection. It is therefore not worthwhile
to untrack eligible tuples at creation time.
Instead, all tuples except the empty tuple are tracked when created.
During garbage collection it is determined whether any surviving tuples
can be untracked. A tuple can be untracked if all of its contents are
already not tracked. Tuples are examined for untracking in all garbage
collection cycles. It may take more than one cycle to untrack a tuple.
Dictionaries containing only immutable objects also do not need to be
tracked. Dictionaries are untracked when created. If a tracked item is
inserted into a dictionary (either as a key or value), the dictionary
becomes tracked. During a full garbage collection (all generations),
the collector will untrack any dictionaries whose contents are not
tracked.
The module provides the python function is_tracked(obj), which returns
the CURRENT tracking status of the object. Subsequent garbage
collections may change the tracking status of the object.
Untracking of certain containers was introduced in issue #4688, and
the algorithm was refined in response to issue #14775.
*/
struct gc_generation {
PyGC_Head head;
int threshold; /* collection threshold */
int count; /* count of allocations or collections of younger
generations */
};
/* Running stats per generation */
struct gc_generation_stats {
/* total number of collections */
Py_ssize_t collections;
/* total number of collected objects */
Py_ssize_t collected;
/* total number of uncollectable objects (put into gc.garbage) */
Py_ssize_t uncollectable;
};
struct _gc_runtime_state {
/* List of objects that still need to be cleaned up, singly linked
* via their gc headers' gc_prev pointers. */
PyObject *trash_delete_later;
/* Current call-stack depth of tp_dealloc calls. */
int trash_delete_nesting;
/* Is automatic collection enabled? */
int enabled;
int debug;
/* linked lists of container objects */
struct gc_generation generations[NUM_GENERATIONS];
PyGC_Head *generation0;
/* a permanent generation which won't be collected */
struct gc_generation permanent_generation;
struct gc_generation_stats generation_stats[NUM_GENERATIONS];
/* true if we are currently running the collector */
int collecting;
/* list of uncollectable objects */
PyObject *garbage;
/* a list of callbacks to be invoked when collection is performed */
PyObject *callbacks;
/* This is the number of objects that survived the last full
collection. It approximates the number of long lived objects
tracked by the GC.
(by "full collection", we mean a collection of the oldest
generation). */
Py_ssize_t long_lived_total;
/* This is the number of objects that survived all "non-full"
collections, and are awaiting to undergo a full collection for
the first time. */
Py_ssize_t long_lived_pending;
};
extern void _PyGC_InitState(struct _gc_runtime_state *);
extern Py_ssize_t _PyGC_CollectNoFail(PyThreadState *tstate);
// Functions to clear types free lists
extern void _PyTuple_ClearFreeList(PyInterpreterState *interp);
extern void _PyFloat_ClearFreeList(PyInterpreterState *interp);
extern void _PyList_ClearFreeList(PyInterpreterState *interp);
extern void _PyDict_ClearFreeList(PyInterpreterState *interp);
extern void _PyAsyncGen_ClearFreeLists(PyInterpreterState *interp);
extern void _PyContext_ClearFreeList(PyInterpreterState *interp);
extern void _Py_ScheduleGC(PyInterpreterState *interp);
extern void _Py_RunGC(PyThreadState *tstate);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_GC_H */

View File

@@ -0,0 +1,49 @@
#ifndef Py_INTERNAL_GENOBJECT_H
#define Py_INTERNAL_GENOBJECT_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
extern PyObject *_PyGen_yf(PyGenObject *);
extern PyObject *_PyCoro_GetAwaitableIter(PyObject *o);
extern PyObject *_PyAsyncGenValueWrapperNew(PyThreadState *state, PyObject *);
/* runtime lifecycle */
extern void _PyAsyncGen_Fini(PyInterpreterState *);
/* other API */
#ifndef WITH_FREELISTS
// without freelists
# define _PyAsyncGen_MAXFREELIST 0
#endif
#ifndef _PyAsyncGen_MAXFREELIST
# define _PyAsyncGen_MAXFREELIST 80
#endif
struct _Py_async_gen_state {
#if _PyAsyncGen_MAXFREELIST > 0
/* Freelists boost performance 6-10%; they also reduce memory
fragmentation, as _PyAsyncGenWrappedValue and PyAsyncGenASend
are short-living objects that are instantiated for every
__anext__() call. */
struct _PyAsyncGenWrappedValue* value_freelist[_PyAsyncGen_MAXFREELIST];
int value_numfree;
struct PyAsyncGenASend* asend_freelist[_PyAsyncGen_MAXFREELIST];
int asend_numfree;
#endif
};
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_GENOBJECT_H */

View File

@@ -0,0 +1,22 @@
#ifndef Py_INTERNAL_PYGETOPT_H
#define Py_INTERNAL_PYGETOPT_H
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
extern int _PyOS_opterr;
extern Py_ssize_t _PyOS_optind;
extern const wchar_t *_PyOS_optarg;
extern void _PyOS_ResetGetOpt(void);
typedef struct {
const wchar_t *name;
int has_arg;
int val;
} _PyOS_LongOption;
extern int _PyOS_GetOpt(Py_ssize_t argc, wchar_t * const *argv, int *longindex);
#endif /* !Py_INTERNAL_PYGETOPT_H */

View File

@@ -0,0 +1,50 @@
#ifndef Py_INTERNAL_GIL_H
#define Py_INTERNAL_GIL_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#include "pycore_atomic.h" /* _Py_atomic_address */
#include "pycore_condvar.h" /* PyCOND_T */
#ifndef Py_HAVE_CONDVAR
# error You need either a POSIX-compatible or a Windows system!
#endif
/* Enable if you want to force the switching of threads at least
every `interval`. */
#undef FORCE_SWITCHING
#define FORCE_SWITCHING
struct _gil_runtime_state {
/* microseconds (the Python API uses seconds, though) */
unsigned long interval;
/* Last PyThreadState holding / having held the GIL. This helps us
know whether anyone else was scheduled after we dropped the GIL. */
_Py_atomic_address last_holder;
/* Whether the GIL is already taken (-1 if uninitialized). This is
atomic because it can be read without any lock taken in ceval.c. */
_Py_atomic_int locked;
/* Number of GIL switches since the beginning. */
unsigned long switch_number;
/* This condition variable allows one or several threads to wait
until the GIL is released. In addition, the mutex also protects
the above variables. */
PyCOND_T cond;
PyMUTEX_T mutex;
#ifdef FORCE_SWITCHING
/* This condition variable helps the GIL-releasing thread wait for
a GIL-awaiting thread to be scheduled and take the GIL. */
PyCOND_T switch_cond;
PyMUTEX_T switch_mutex;
#endif
};
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_GIL_H */

View File

@@ -0,0 +1,99 @@
#ifndef Py_INTERNAL_GLOBAL_OBJECTS_H
#define Py_INTERNAL_GLOBAL_OBJECTS_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#include "pycore_gc.h" // PyGC_Head
#include "pycore_global_strings.h" // struct _Py_global_strings
#include "pycore_hamt.h" // PyHamtNode_Bitmap
#include "pycore_context.h" // _PyContextTokenMissing
#include "pycore_typeobject.h" // pytype_slotdef
// These would be in pycore_long.h if it weren't for an include cycle.
#define _PY_NSMALLPOSINTS 257
#define _PY_NSMALLNEGINTS 5
// Only immutable objects should be considered runtime-global.
// All others must be per-interpreter.
#define _Py_GLOBAL_OBJECT(NAME) \
_PyRuntime.static_objects.NAME
#define _Py_SINGLETON(NAME) \
_Py_GLOBAL_OBJECT(singletons.NAME)
struct _Py_static_objects {
struct {
/* Small integers are preallocated in this array so that they
* can be shared.
* The integers that are preallocated are those in the range
* -_PY_NSMALLNEGINTS (inclusive) to _PY_NSMALLPOSINTS (exclusive).
*/
PyLongObject small_ints[_PY_NSMALLNEGINTS + _PY_NSMALLPOSINTS];
PyBytesObject bytes_empty;
struct {
PyBytesObject ob;
char eos;
} bytes_characters[256];
struct _Py_global_strings strings;
_PyGC_Head_UNUSED _tuple_empty_gc_not_used;
PyTupleObject tuple_empty;
_PyGC_Head_UNUSED _hamt_bitmap_node_empty_gc_not_used;
PyHamtNode_Bitmap hamt_bitmap_node_empty;
_PyContextTokenMissing context_token_missing;
} singletons;
};
#define _Py_INTERP_CACHED_OBJECT(interp, NAME) \
(interp)->cached_objects.NAME
struct _Py_interp_cached_objects {
PyObject *interned_strings;
/* AST */
PyObject *str_replace_inf;
/* object.__reduce__ */
PyObject *objreduce;
PyObject *type_slots_pname;
pytype_slotdef *type_slots_ptrs[MAX_EQUIV];
/* TypeVar and related types */
PyTypeObject *generic_type;
PyTypeObject *typevar_type;
PyTypeObject *typevartuple_type;
PyTypeObject *paramspec_type;
PyTypeObject *paramspecargs_type;
PyTypeObject *paramspeckwargs_type;
};
#define _Py_INTERP_STATIC_OBJECT(interp, NAME) \
(interp)->static_objects.NAME
#define _Py_INTERP_SINGLETON(interp, NAME) \
_Py_INTERP_STATIC_OBJECT(interp, singletons.NAME)
struct _Py_interp_static_objects {
struct {
int _not_used;
// hamt_empty is here instead of global because of its weakreflist.
_PyGC_Head_UNUSED _hamt_empty_gc_not_used;
PyHamtObject hamt_empty;
PyBaseExceptionObject last_resort_memory_error;
} singletons;
};
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_GLOBAL_OBJECTS_H */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,787 @@
#ifndef Py_INTERNAL_GLOBAL_STRINGS_H
#define Py_INTERNAL_GLOBAL_STRINGS_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
// The data structure & init here are inspired by Tools/build/deepfreeze.py.
// All field names generated by ASCII_STR() have a common prefix,
// to help avoid collisions with keywords, macros, etc.
#define STRUCT_FOR_ASCII_STR(LITERAL) \
struct { \
PyASCIIObject _ascii; \
uint8_t _data[sizeof(LITERAL)]; \
}
#define STRUCT_FOR_STR(NAME, LITERAL) \
STRUCT_FOR_ASCII_STR(LITERAL) _py_ ## NAME;
#define STRUCT_FOR_ID(NAME) \
STRUCT_FOR_ASCII_STR(#NAME) _py_ ## NAME;
// XXX Order by frequency of use?
/* The following is auto-generated by Tools/build/generate_global_objects.py. */
struct _Py_global_strings {
struct {
STRUCT_FOR_STR(anon_dictcomp, "<dictcomp>")
STRUCT_FOR_STR(anon_genexpr, "<genexpr>")
STRUCT_FOR_STR(anon_lambda, "<lambda>")
STRUCT_FOR_STR(anon_listcomp, "<listcomp>")
STRUCT_FOR_STR(anon_module, "<module>")
STRUCT_FOR_STR(anon_setcomp, "<setcomp>")
STRUCT_FOR_STR(anon_string, "<string>")
STRUCT_FOR_STR(anon_unknown, "<unknown>")
STRUCT_FOR_STR(close_br, "}")
STRUCT_FOR_STR(dbl_close_br, "}}")
STRUCT_FOR_STR(dbl_open_br, "{{")
STRUCT_FOR_STR(dbl_percent, "%%")
STRUCT_FOR_STR(defaults, ".defaults")
STRUCT_FOR_STR(dot, ".")
STRUCT_FOR_STR(dot_locals, ".<locals>")
STRUCT_FOR_STR(empty, "")
STRUCT_FOR_STR(generic_base, ".generic_base")
STRUCT_FOR_STR(json_decoder, "json.decoder")
STRUCT_FOR_STR(kwdefaults, ".kwdefaults")
STRUCT_FOR_STR(list_err, "list index out of range")
STRUCT_FOR_STR(newline, "\n")
STRUCT_FOR_STR(open_br, "{")
STRUCT_FOR_STR(percent, "%")
STRUCT_FOR_STR(shim_name, "<shim>")
STRUCT_FOR_STR(type_params, ".type_params")
STRUCT_FOR_STR(utf_8, "utf-8")
} literals;
struct {
STRUCT_FOR_ID(CANCELLED)
STRUCT_FOR_ID(FINISHED)
STRUCT_FOR_ID(False)
STRUCT_FOR_ID(JSONDecodeError)
STRUCT_FOR_ID(PENDING)
STRUCT_FOR_ID(Py_Repr)
STRUCT_FOR_ID(TextIOWrapper)
STRUCT_FOR_ID(True)
STRUCT_FOR_ID(WarningMessage)
STRUCT_FOR_ID(_)
STRUCT_FOR_ID(_WindowsConsoleIO)
STRUCT_FOR_ID(__IOBase_closed)
STRUCT_FOR_ID(__abc_tpflags__)
STRUCT_FOR_ID(__abs__)
STRUCT_FOR_ID(__abstractmethods__)
STRUCT_FOR_ID(__add__)
STRUCT_FOR_ID(__aenter__)
STRUCT_FOR_ID(__aexit__)
STRUCT_FOR_ID(__aiter__)
STRUCT_FOR_ID(__all__)
STRUCT_FOR_ID(__and__)
STRUCT_FOR_ID(__anext__)
STRUCT_FOR_ID(__annotations__)
STRUCT_FOR_ID(__args__)
STRUCT_FOR_ID(__asyncio_running_event_loop__)
STRUCT_FOR_ID(__await__)
STRUCT_FOR_ID(__bases__)
STRUCT_FOR_ID(__bool__)
STRUCT_FOR_ID(__buffer__)
STRUCT_FOR_ID(__build_class__)
STRUCT_FOR_ID(__builtins__)
STRUCT_FOR_ID(__bytes__)
STRUCT_FOR_ID(__call__)
STRUCT_FOR_ID(__cantrace__)
STRUCT_FOR_ID(__class__)
STRUCT_FOR_ID(__class_getitem__)
STRUCT_FOR_ID(__classcell__)
STRUCT_FOR_ID(__classdict__)
STRUCT_FOR_ID(__classdictcell__)
STRUCT_FOR_ID(__complex__)
STRUCT_FOR_ID(__contains__)
STRUCT_FOR_ID(__copy__)
STRUCT_FOR_ID(__ctypes_from_outparam__)
STRUCT_FOR_ID(__del__)
STRUCT_FOR_ID(__delattr__)
STRUCT_FOR_ID(__delete__)
STRUCT_FOR_ID(__delitem__)
STRUCT_FOR_ID(__dict__)
STRUCT_FOR_ID(__dictoffset__)
STRUCT_FOR_ID(__dir__)
STRUCT_FOR_ID(__divmod__)
STRUCT_FOR_ID(__doc__)
STRUCT_FOR_ID(__enter__)
STRUCT_FOR_ID(__eq__)
STRUCT_FOR_ID(__exit__)
STRUCT_FOR_ID(__file__)
STRUCT_FOR_ID(__float__)
STRUCT_FOR_ID(__floordiv__)
STRUCT_FOR_ID(__format__)
STRUCT_FOR_ID(__fspath__)
STRUCT_FOR_ID(__ge__)
STRUCT_FOR_ID(__get__)
STRUCT_FOR_ID(__getattr__)
STRUCT_FOR_ID(__getattribute__)
STRUCT_FOR_ID(__getinitargs__)
STRUCT_FOR_ID(__getitem__)
STRUCT_FOR_ID(__getnewargs__)
STRUCT_FOR_ID(__getnewargs_ex__)
STRUCT_FOR_ID(__getstate__)
STRUCT_FOR_ID(__gt__)
STRUCT_FOR_ID(__hash__)
STRUCT_FOR_ID(__iadd__)
STRUCT_FOR_ID(__iand__)
STRUCT_FOR_ID(__ifloordiv__)
STRUCT_FOR_ID(__ilshift__)
STRUCT_FOR_ID(__imatmul__)
STRUCT_FOR_ID(__imod__)
STRUCT_FOR_ID(__import__)
STRUCT_FOR_ID(__imul__)
STRUCT_FOR_ID(__index__)
STRUCT_FOR_ID(__init__)
STRUCT_FOR_ID(__init_subclass__)
STRUCT_FOR_ID(__instancecheck__)
STRUCT_FOR_ID(__int__)
STRUCT_FOR_ID(__invert__)
STRUCT_FOR_ID(__ior__)
STRUCT_FOR_ID(__ipow__)
STRUCT_FOR_ID(__irshift__)
STRUCT_FOR_ID(__isabstractmethod__)
STRUCT_FOR_ID(__isub__)
STRUCT_FOR_ID(__iter__)
STRUCT_FOR_ID(__itruediv__)
STRUCT_FOR_ID(__ixor__)
STRUCT_FOR_ID(__le__)
STRUCT_FOR_ID(__len__)
STRUCT_FOR_ID(__length_hint__)
STRUCT_FOR_ID(__lltrace__)
STRUCT_FOR_ID(__loader__)
STRUCT_FOR_ID(__lshift__)
STRUCT_FOR_ID(__lt__)
STRUCT_FOR_ID(__main__)
STRUCT_FOR_ID(__matmul__)
STRUCT_FOR_ID(__missing__)
STRUCT_FOR_ID(__mod__)
STRUCT_FOR_ID(__module__)
STRUCT_FOR_ID(__mro_entries__)
STRUCT_FOR_ID(__mul__)
STRUCT_FOR_ID(__name__)
STRUCT_FOR_ID(__ne__)
STRUCT_FOR_ID(__neg__)
STRUCT_FOR_ID(__new__)
STRUCT_FOR_ID(__newobj__)
STRUCT_FOR_ID(__newobj_ex__)
STRUCT_FOR_ID(__next__)
STRUCT_FOR_ID(__notes__)
STRUCT_FOR_ID(__or__)
STRUCT_FOR_ID(__orig_class__)
STRUCT_FOR_ID(__origin__)
STRUCT_FOR_ID(__package__)
STRUCT_FOR_ID(__parameters__)
STRUCT_FOR_ID(__path__)
STRUCT_FOR_ID(__pos__)
STRUCT_FOR_ID(__pow__)
STRUCT_FOR_ID(__prepare__)
STRUCT_FOR_ID(__qualname__)
STRUCT_FOR_ID(__radd__)
STRUCT_FOR_ID(__rand__)
STRUCT_FOR_ID(__rdivmod__)
STRUCT_FOR_ID(__reduce__)
STRUCT_FOR_ID(__reduce_ex__)
STRUCT_FOR_ID(__release_buffer__)
STRUCT_FOR_ID(__repr__)
STRUCT_FOR_ID(__reversed__)
STRUCT_FOR_ID(__rfloordiv__)
STRUCT_FOR_ID(__rlshift__)
STRUCT_FOR_ID(__rmatmul__)
STRUCT_FOR_ID(__rmod__)
STRUCT_FOR_ID(__rmul__)
STRUCT_FOR_ID(__ror__)
STRUCT_FOR_ID(__round__)
STRUCT_FOR_ID(__rpow__)
STRUCT_FOR_ID(__rrshift__)
STRUCT_FOR_ID(__rshift__)
STRUCT_FOR_ID(__rsub__)
STRUCT_FOR_ID(__rtruediv__)
STRUCT_FOR_ID(__rxor__)
STRUCT_FOR_ID(__set__)
STRUCT_FOR_ID(__set_name__)
STRUCT_FOR_ID(__setattr__)
STRUCT_FOR_ID(__setitem__)
STRUCT_FOR_ID(__setstate__)
STRUCT_FOR_ID(__sizeof__)
STRUCT_FOR_ID(__slotnames__)
STRUCT_FOR_ID(__slots__)
STRUCT_FOR_ID(__spec__)
STRUCT_FOR_ID(__str__)
STRUCT_FOR_ID(__sub__)
STRUCT_FOR_ID(__subclasscheck__)
STRUCT_FOR_ID(__subclasshook__)
STRUCT_FOR_ID(__truediv__)
STRUCT_FOR_ID(__trunc__)
STRUCT_FOR_ID(__type_params__)
STRUCT_FOR_ID(__typing_is_unpacked_typevartuple__)
STRUCT_FOR_ID(__typing_prepare_subst__)
STRUCT_FOR_ID(__typing_subst__)
STRUCT_FOR_ID(__typing_unpacked_tuple_args__)
STRUCT_FOR_ID(__warningregistry__)
STRUCT_FOR_ID(__weaklistoffset__)
STRUCT_FOR_ID(__weakref__)
STRUCT_FOR_ID(__xor__)
STRUCT_FOR_ID(_abc_impl)
STRUCT_FOR_ID(_abstract_)
STRUCT_FOR_ID(_active)
STRUCT_FOR_ID(_annotation)
STRUCT_FOR_ID(_anonymous_)
STRUCT_FOR_ID(_argtypes_)
STRUCT_FOR_ID(_as_parameter_)
STRUCT_FOR_ID(_asyncio_future_blocking)
STRUCT_FOR_ID(_blksize)
STRUCT_FOR_ID(_bootstrap)
STRUCT_FOR_ID(_check_retval_)
STRUCT_FOR_ID(_dealloc_warn)
STRUCT_FOR_ID(_feature_version)
STRUCT_FOR_ID(_fields_)
STRUCT_FOR_ID(_finalizing)
STRUCT_FOR_ID(_find_and_load)
STRUCT_FOR_ID(_fix_up_module)
STRUCT_FOR_ID(_flags_)
STRUCT_FOR_ID(_get_sourcefile)
STRUCT_FOR_ID(_handle_fromlist)
STRUCT_FOR_ID(_initializing)
STRUCT_FOR_ID(_io)
STRUCT_FOR_ID(_is_text_encoding)
STRUCT_FOR_ID(_length_)
STRUCT_FOR_ID(_limbo)
STRUCT_FOR_ID(_lock_unlock_module)
STRUCT_FOR_ID(_loop)
STRUCT_FOR_ID(_needs_com_addref_)
STRUCT_FOR_ID(_pack_)
STRUCT_FOR_ID(_restype_)
STRUCT_FOR_ID(_showwarnmsg)
STRUCT_FOR_ID(_shutdown)
STRUCT_FOR_ID(_slotnames)
STRUCT_FOR_ID(_strptime_datetime)
STRUCT_FOR_ID(_swappedbytes_)
STRUCT_FOR_ID(_type_)
STRUCT_FOR_ID(_uninitialized_submodules)
STRUCT_FOR_ID(_warn_unawaited_coroutine)
STRUCT_FOR_ID(_xoptions)
STRUCT_FOR_ID(a)
STRUCT_FOR_ID(abs_tol)
STRUCT_FOR_ID(access)
STRUCT_FOR_ID(add)
STRUCT_FOR_ID(add_done_callback)
STRUCT_FOR_ID(after_in_child)
STRUCT_FOR_ID(after_in_parent)
STRUCT_FOR_ID(aggregate_class)
STRUCT_FOR_ID(alias)
STRUCT_FOR_ID(append)
STRUCT_FOR_ID(arg)
STRUCT_FOR_ID(argdefs)
STRUCT_FOR_ID(args)
STRUCT_FOR_ID(arguments)
STRUCT_FOR_ID(argv)
STRUCT_FOR_ID(as_integer_ratio)
STRUCT_FOR_ID(ast)
STRUCT_FOR_ID(attribute)
STRUCT_FOR_ID(authorizer_callback)
STRUCT_FOR_ID(autocommit)
STRUCT_FOR_ID(b)
STRUCT_FOR_ID(backtick)
STRUCT_FOR_ID(base)
STRUCT_FOR_ID(before)
STRUCT_FOR_ID(big)
STRUCT_FOR_ID(binary_form)
STRUCT_FOR_ID(block)
STRUCT_FOR_ID(bound)
STRUCT_FOR_ID(buffer)
STRUCT_FOR_ID(buffer_callback)
STRUCT_FOR_ID(buffer_size)
STRUCT_FOR_ID(buffering)
STRUCT_FOR_ID(buffers)
STRUCT_FOR_ID(bufsize)
STRUCT_FOR_ID(builtins)
STRUCT_FOR_ID(byteorder)
STRUCT_FOR_ID(bytes)
STRUCT_FOR_ID(bytes_per_sep)
STRUCT_FOR_ID(c)
STRUCT_FOR_ID(c_call)
STRUCT_FOR_ID(c_exception)
STRUCT_FOR_ID(c_return)
STRUCT_FOR_ID(cached_statements)
STRUCT_FOR_ID(cadata)
STRUCT_FOR_ID(cafile)
STRUCT_FOR_ID(call)
STRUCT_FOR_ID(call_exception_handler)
STRUCT_FOR_ID(call_soon)
STRUCT_FOR_ID(cancel)
STRUCT_FOR_ID(capath)
STRUCT_FOR_ID(category)
STRUCT_FOR_ID(cb_type)
STRUCT_FOR_ID(certfile)
STRUCT_FOR_ID(check_same_thread)
STRUCT_FOR_ID(clear)
STRUCT_FOR_ID(close)
STRUCT_FOR_ID(closed)
STRUCT_FOR_ID(closefd)
STRUCT_FOR_ID(closure)
STRUCT_FOR_ID(co_argcount)
STRUCT_FOR_ID(co_cellvars)
STRUCT_FOR_ID(co_code)
STRUCT_FOR_ID(co_consts)
STRUCT_FOR_ID(co_exceptiontable)
STRUCT_FOR_ID(co_filename)
STRUCT_FOR_ID(co_firstlineno)
STRUCT_FOR_ID(co_flags)
STRUCT_FOR_ID(co_freevars)
STRUCT_FOR_ID(co_kwonlyargcount)
STRUCT_FOR_ID(co_linetable)
STRUCT_FOR_ID(co_name)
STRUCT_FOR_ID(co_names)
STRUCT_FOR_ID(co_nlocals)
STRUCT_FOR_ID(co_posonlyargcount)
STRUCT_FOR_ID(co_qualname)
STRUCT_FOR_ID(co_stacksize)
STRUCT_FOR_ID(co_varnames)
STRUCT_FOR_ID(code)
STRUCT_FOR_ID(command)
STRUCT_FOR_ID(comment_factory)
STRUCT_FOR_ID(compile_mode)
STRUCT_FOR_ID(consts)
STRUCT_FOR_ID(context)
STRUCT_FOR_ID(contravariant)
STRUCT_FOR_ID(cookie)
STRUCT_FOR_ID(copy)
STRUCT_FOR_ID(copyreg)
STRUCT_FOR_ID(coro)
STRUCT_FOR_ID(count)
STRUCT_FOR_ID(covariant)
STRUCT_FOR_ID(cwd)
STRUCT_FOR_ID(d)
STRUCT_FOR_ID(data)
STRUCT_FOR_ID(database)
STRUCT_FOR_ID(decode)
STRUCT_FOR_ID(decoder)
STRUCT_FOR_ID(default)
STRUCT_FOR_ID(defaultaction)
STRUCT_FOR_ID(delete)
STRUCT_FOR_ID(depth)
STRUCT_FOR_ID(detect_types)
STRUCT_FOR_ID(deterministic)
STRUCT_FOR_ID(device)
STRUCT_FOR_ID(dict)
STRUCT_FOR_ID(dictcomp)
STRUCT_FOR_ID(difference_update)
STRUCT_FOR_ID(digest)
STRUCT_FOR_ID(digest_size)
STRUCT_FOR_ID(digestmod)
STRUCT_FOR_ID(dir_fd)
STRUCT_FOR_ID(discard)
STRUCT_FOR_ID(dispatch_table)
STRUCT_FOR_ID(displayhook)
STRUCT_FOR_ID(dklen)
STRUCT_FOR_ID(doc)
STRUCT_FOR_ID(dont_inherit)
STRUCT_FOR_ID(dst)
STRUCT_FOR_ID(dst_dir_fd)
STRUCT_FOR_ID(duration)
STRUCT_FOR_ID(e)
STRUCT_FOR_ID(eager_start)
STRUCT_FOR_ID(effective_ids)
STRUCT_FOR_ID(element_factory)
STRUCT_FOR_ID(encode)
STRUCT_FOR_ID(encoding)
STRUCT_FOR_ID(end)
STRUCT_FOR_ID(end_lineno)
STRUCT_FOR_ID(end_offset)
STRUCT_FOR_ID(endpos)
STRUCT_FOR_ID(entrypoint)
STRUCT_FOR_ID(env)
STRUCT_FOR_ID(errors)
STRUCT_FOR_ID(event)
STRUCT_FOR_ID(eventmask)
STRUCT_FOR_ID(exc_type)
STRUCT_FOR_ID(exc_value)
STRUCT_FOR_ID(excepthook)
STRUCT_FOR_ID(exception)
STRUCT_FOR_ID(existing_file_name)
STRUCT_FOR_ID(exp)
STRUCT_FOR_ID(extend)
STRUCT_FOR_ID(extra_tokens)
STRUCT_FOR_ID(facility)
STRUCT_FOR_ID(factory)
STRUCT_FOR_ID(false)
STRUCT_FOR_ID(family)
STRUCT_FOR_ID(fanout)
STRUCT_FOR_ID(fd)
STRUCT_FOR_ID(fd2)
STRUCT_FOR_ID(fdel)
STRUCT_FOR_ID(fget)
STRUCT_FOR_ID(file)
STRUCT_FOR_ID(file_actions)
STRUCT_FOR_ID(filename)
STRUCT_FOR_ID(fileno)
STRUCT_FOR_ID(filepath)
STRUCT_FOR_ID(fillvalue)
STRUCT_FOR_ID(filters)
STRUCT_FOR_ID(final)
STRUCT_FOR_ID(find_class)
STRUCT_FOR_ID(fix_imports)
STRUCT_FOR_ID(flags)
STRUCT_FOR_ID(flush)
STRUCT_FOR_ID(follow_symlinks)
STRUCT_FOR_ID(format)
STRUCT_FOR_ID(frequency)
STRUCT_FOR_ID(from_param)
STRUCT_FOR_ID(fromlist)
STRUCT_FOR_ID(fromtimestamp)
STRUCT_FOR_ID(fromutc)
STRUCT_FOR_ID(fset)
STRUCT_FOR_ID(func)
STRUCT_FOR_ID(future)
STRUCT_FOR_ID(generation)
STRUCT_FOR_ID(genexpr)
STRUCT_FOR_ID(get)
STRUCT_FOR_ID(get_debug)
STRUCT_FOR_ID(get_event_loop)
STRUCT_FOR_ID(get_loop)
STRUCT_FOR_ID(get_source)
STRUCT_FOR_ID(getattr)
STRUCT_FOR_ID(getstate)
STRUCT_FOR_ID(gid)
STRUCT_FOR_ID(globals)
STRUCT_FOR_ID(groupindex)
STRUCT_FOR_ID(groups)
STRUCT_FOR_ID(handle)
STRUCT_FOR_ID(hash_name)
STRUCT_FOR_ID(header)
STRUCT_FOR_ID(headers)
STRUCT_FOR_ID(hi)
STRUCT_FOR_ID(hook)
STRUCT_FOR_ID(id)
STRUCT_FOR_ID(ident)
STRUCT_FOR_ID(ignore)
STRUCT_FOR_ID(imag)
STRUCT_FOR_ID(importlib)
STRUCT_FOR_ID(in_fd)
STRUCT_FOR_ID(incoming)
STRUCT_FOR_ID(indexgroup)
STRUCT_FOR_ID(inf)
STRUCT_FOR_ID(infer_variance)
STRUCT_FOR_ID(inheritable)
STRUCT_FOR_ID(initial)
STRUCT_FOR_ID(initial_bytes)
STRUCT_FOR_ID(initial_value)
STRUCT_FOR_ID(initval)
STRUCT_FOR_ID(inner_size)
STRUCT_FOR_ID(input)
STRUCT_FOR_ID(insert_comments)
STRUCT_FOR_ID(insert_pis)
STRUCT_FOR_ID(instructions)
STRUCT_FOR_ID(intern)
STRUCT_FOR_ID(intersection)
STRUCT_FOR_ID(is_running)
STRUCT_FOR_ID(isatty)
STRUCT_FOR_ID(isinstance)
STRUCT_FOR_ID(isoformat)
STRUCT_FOR_ID(isolation_level)
STRUCT_FOR_ID(istext)
STRUCT_FOR_ID(item)
STRUCT_FOR_ID(items)
STRUCT_FOR_ID(iter)
STRUCT_FOR_ID(iterable)
STRUCT_FOR_ID(iterations)
STRUCT_FOR_ID(join)
STRUCT_FOR_ID(jump)
STRUCT_FOR_ID(keepends)
STRUCT_FOR_ID(key)
STRUCT_FOR_ID(keyfile)
STRUCT_FOR_ID(keys)
STRUCT_FOR_ID(kind)
STRUCT_FOR_ID(kw)
STRUCT_FOR_ID(kw1)
STRUCT_FOR_ID(kw2)
STRUCT_FOR_ID(lambda)
STRUCT_FOR_ID(last)
STRUCT_FOR_ID(last_exc)
STRUCT_FOR_ID(last_node)
STRUCT_FOR_ID(last_traceback)
STRUCT_FOR_ID(last_type)
STRUCT_FOR_ID(last_value)
STRUCT_FOR_ID(latin1)
STRUCT_FOR_ID(leaf_size)
STRUCT_FOR_ID(len)
STRUCT_FOR_ID(length)
STRUCT_FOR_ID(level)
STRUCT_FOR_ID(limit)
STRUCT_FOR_ID(line)
STRUCT_FOR_ID(line_buffering)
STRUCT_FOR_ID(lineno)
STRUCT_FOR_ID(listcomp)
STRUCT_FOR_ID(little)
STRUCT_FOR_ID(lo)
STRUCT_FOR_ID(locale)
STRUCT_FOR_ID(locals)
STRUCT_FOR_ID(logoption)
STRUCT_FOR_ID(loop)
STRUCT_FOR_ID(mapping)
STRUCT_FOR_ID(match)
STRUCT_FOR_ID(max_length)
STRUCT_FOR_ID(maxdigits)
STRUCT_FOR_ID(maxevents)
STRUCT_FOR_ID(maxmem)
STRUCT_FOR_ID(maxsplit)
STRUCT_FOR_ID(maxvalue)
STRUCT_FOR_ID(memLevel)
STRUCT_FOR_ID(memlimit)
STRUCT_FOR_ID(message)
STRUCT_FOR_ID(metaclass)
STRUCT_FOR_ID(metadata)
STRUCT_FOR_ID(method)
STRUCT_FOR_ID(mod)
STRUCT_FOR_ID(mode)
STRUCT_FOR_ID(module)
STRUCT_FOR_ID(module_globals)
STRUCT_FOR_ID(modules)
STRUCT_FOR_ID(mro)
STRUCT_FOR_ID(msg)
STRUCT_FOR_ID(mycmp)
STRUCT_FOR_ID(n)
STRUCT_FOR_ID(n_arg)
STRUCT_FOR_ID(n_fields)
STRUCT_FOR_ID(n_sequence_fields)
STRUCT_FOR_ID(n_unnamed_fields)
STRUCT_FOR_ID(name)
STRUCT_FOR_ID(name_from)
STRUCT_FOR_ID(namespace_separator)
STRUCT_FOR_ID(namespaces)
STRUCT_FOR_ID(narg)
STRUCT_FOR_ID(ndigits)
STRUCT_FOR_ID(new_file_name)
STRUCT_FOR_ID(new_limit)
STRUCT_FOR_ID(newline)
STRUCT_FOR_ID(newlines)
STRUCT_FOR_ID(next)
STRUCT_FOR_ID(nlocals)
STRUCT_FOR_ID(node_depth)
STRUCT_FOR_ID(node_offset)
STRUCT_FOR_ID(ns)
STRUCT_FOR_ID(nstype)
STRUCT_FOR_ID(nt)
STRUCT_FOR_ID(null)
STRUCT_FOR_ID(number)
STRUCT_FOR_ID(obj)
STRUCT_FOR_ID(object)
STRUCT_FOR_ID(offset)
STRUCT_FOR_ID(offset_dst)
STRUCT_FOR_ID(offset_src)
STRUCT_FOR_ID(on_type_read)
STRUCT_FOR_ID(onceregistry)
STRUCT_FOR_ID(only_keys)
STRUCT_FOR_ID(oparg)
STRUCT_FOR_ID(opcode)
STRUCT_FOR_ID(open)
STRUCT_FOR_ID(opener)
STRUCT_FOR_ID(operation)
STRUCT_FOR_ID(optimize)
STRUCT_FOR_ID(options)
STRUCT_FOR_ID(order)
STRUCT_FOR_ID(origin)
STRUCT_FOR_ID(out_fd)
STRUCT_FOR_ID(outgoing)
STRUCT_FOR_ID(overlapped)
STRUCT_FOR_ID(owner)
STRUCT_FOR_ID(p)
STRUCT_FOR_ID(pages)
STRUCT_FOR_ID(parent)
STRUCT_FOR_ID(password)
STRUCT_FOR_ID(path)
STRUCT_FOR_ID(pattern)
STRUCT_FOR_ID(peek)
STRUCT_FOR_ID(persistent_id)
STRUCT_FOR_ID(persistent_load)
STRUCT_FOR_ID(person)
STRUCT_FOR_ID(pi_factory)
STRUCT_FOR_ID(pid)
STRUCT_FOR_ID(policy)
STRUCT_FOR_ID(pos)
STRUCT_FOR_ID(pos1)
STRUCT_FOR_ID(pos2)
STRUCT_FOR_ID(posix)
STRUCT_FOR_ID(print_file_and_line)
STRUCT_FOR_ID(priority)
STRUCT_FOR_ID(progress)
STRUCT_FOR_ID(progress_handler)
STRUCT_FOR_ID(progress_routine)
STRUCT_FOR_ID(proto)
STRUCT_FOR_ID(protocol)
STRUCT_FOR_ID(ps1)
STRUCT_FOR_ID(ps2)
STRUCT_FOR_ID(query)
STRUCT_FOR_ID(quotetabs)
STRUCT_FOR_ID(r)
STRUCT_FOR_ID(raw)
STRUCT_FOR_ID(read)
STRUCT_FOR_ID(read1)
STRUCT_FOR_ID(readable)
STRUCT_FOR_ID(readall)
STRUCT_FOR_ID(readinto)
STRUCT_FOR_ID(readinto1)
STRUCT_FOR_ID(readline)
STRUCT_FOR_ID(readonly)
STRUCT_FOR_ID(real)
STRUCT_FOR_ID(reducer_override)
STRUCT_FOR_ID(registry)
STRUCT_FOR_ID(rel_tol)
STRUCT_FOR_ID(release)
STRUCT_FOR_ID(reload)
STRUCT_FOR_ID(repl)
STRUCT_FOR_ID(replace)
STRUCT_FOR_ID(reserved)
STRUCT_FOR_ID(reset)
STRUCT_FOR_ID(resetids)
STRUCT_FOR_ID(return)
STRUCT_FOR_ID(reverse)
STRUCT_FOR_ID(reversed)
STRUCT_FOR_ID(s)
STRUCT_FOR_ID(salt)
STRUCT_FOR_ID(sched_priority)
STRUCT_FOR_ID(scheduler)
STRUCT_FOR_ID(seek)
STRUCT_FOR_ID(seekable)
STRUCT_FOR_ID(selectors)
STRUCT_FOR_ID(self)
STRUCT_FOR_ID(send)
STRUCT_FOR_ID(sep)
STRUCT_FOR_ID(sequence)
STRUCT_FOR_ID(server_hostname)
STRUCT_FOR_ID(server_side)
STRUCT_FOR_ID(session)
STRUCT_FOR_ID(setcomp)
STRUCT_FOR_ID(setpgroup)
STRUCT_FOR_ID(setsid)
STRUCT_FOR_ID(setsigdef)
STRUCT_FOR_ID(setsigmask)
STRUCT_FOR_ID(setstate)
STRUCT_FOR_ID(shape)
STRUCT_FOR_ID(show_cmd)
STRUCT_FOR_ID(signed)
STRUCT_FOR_ID(size)
STRUCT_FOR_ID(sizehint)
STRUCT_FOR_ID(skip_file_prefixes)
STRUCT_FOR_ID(sleep)
STRUCT_FOR_ID(sock)
STRUCT_FOR_ID(sort)
STRUCT_FOR_ID(sound)
STRUCT_FOR_ID(source)
STRUCT_FOR_ID(source_traceback)
STRUCT_FOR_ID(src)
STRUCT_FOR_ID(src_dir_fd)
STRUCT_FOR_ID(stacklevel)
STRUCT_FOR_ID(start)
STRUCT_FOR_ID(statement)
STRUCT_FOR_ID(status)
STRUCT_FOR_ID(stderr)
STRUCT_FOR_ID(stdin)
STRUCT_FOR_ID(stdout)
STRUCT_FOR_ID(step)
STRUCT_FOR_ID(steps)
STRUCT_FOR_ID(store_name)
STRUCT_FOR_ID(strategy)
STRUCT_FOR_ID(strftime)
STRUCT_FOR_ID(strict)
STRUCT_FOR_ID(strict_mode)
STRUCT_FOR_ID(string)
STRUCT_FOR_ID(sub_key)
STRUCT_FOR_ID(symmetric_difference_update)
STRUCT_FOR_ID(tabsize)
STRUCT_FOR_ID(tag)
STRUCT_FOR_ID(target)
STRUCT_FOR_ID(target_is_directory)
STRUCT_FOR_ID(task)
STRUCT_FOR_ID(tb_frame)
STRUCT_FOR_ID(tb_lasti)
STRUCT_FOR_ID(tb_lineno)
STRUCT_FOR_ID(tb_next)
STRUCT_FOR_ID(tell)
STRUCT_FOR_ID(template)
STRUCT_FOR_ID(term)
STRUCT_FOR_ID(text)
STRUCT_FOR_ID(threading)
STRUCT_FOR_ID(throw)
STRUCT_FOR_ID(timeout)
STRUCT_FOR_ID(times)
STRUCT_FOR_ID(timetuple)
STRUCT_FOR_ID(top)
STRUCT_FOR_ID(trace_callback)
STRUCT_FOR_ID(traceback)
STRUCT_FOR_ID(trailers)
STRUCT_FOR_ID(translate)
STRUCT_FOR_ID(true)
STRUCT_FOR_ID(truncate)
STRUCT_FOR_ID(twice)
STRUCT_FOR_ID(txt)
STRUCT_FOR_ID(type)
STRUCT_FOR_ID(type_params)
STRUCT_FOR_ID(tz)
STRUCT_FOR_ID(tzname)
STRUCT_FOR_ID(uid)
STRUCT_FOR_ID(unlink)
STRUCT_FOR_ID(unraisablehook)
STRUCT_FOR_ID(uri)
STRUCT_FOR_ID(usedforsecurity)
STRUCT_FOR_ID(value)
STRUCT_FOR_ID(values)
STRUCT_FOR_ID(version)
STRUCT_FOR_ID(volume)
STRUCT_FOR_ID(warnings)
STRUCT_FOR_ID(warnoptions)
STRUCT_FOR_ID(wbits)
STRUCT_FOR_ID(week)
STRUCT_FOR_ID(weekday)
STRUCT_FOR_ID(which)
STRUCT_FOR_ID(who)
STRUCT_FOR_ID(withdata)
STRUCT_FOR_ID(writable)
STRUCT_FOR_ID(write)
STRUCT_FOR_ID(write_through)
STRUCT_FOR_ID(x)
STRUCT_FOR_ID(year)
STRUCT_FOR_ID(zdict)
} identifiers;
struct {
PyASCIIObject _ascii;
uint8_t _data[2];
} ascii[128];
struct {
PyCompactUnicodeObject _latin1;
uint8_t _data[2];
} latin1[128];
};
/* End auto-generated code */
#undef ID
#undef STR
#define _Py_ID(NAME) \
(_Py_SINGLETON(strings.identifiers._py_ ## NAME._ascii.ob_base))
#define _Py_STR(NAME) \
(_Py_SINGLETON(strings.literals._py_ ## NAME._ascii.ob_base))
/* _Py_DECLARE_STR() should precede all uses of _Py_STR() in a function.
This is true even if the same string has already been declared
elsewhere, even in the same file. Mismatched duplicates are detected
by Tools/scripts/generate-global-objects.py.
Pairing _Py_DECLARE_STR() with every use of _Py_STR() makes sure the
string keeps working even if the declaration is removed somewhere
else. It also makes it clear what the actual string is at every
place it is being used. */
#define _Py_DECLARE_STR(name, str)
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_GLOBAL_STRINGS_H */

View File

@@ -0,0 +1,134 @@
#ifndef Py_INTERNAL_HAMT_H
#define Py_INTERNAL_HAMT_H
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
/*
HAMT tree is shaped by hashes of keys. Every group of 5 bits of a hash denotes
the exact position of the key in one level of the tree. Since we're using
32 bit hashes, we can have at most 7 such levels. Although if there are
two distinct keys with equal hashes, they will have to occupy the same
cell in the 7th level of the tree -- so we'd put them in a "collision" node.
Which brings the total possible tree depth to 8. Read more about the actual
layout of the HAMT tree in `hamt.c`.
This constant is used to define a datastucture for storing iteration state.
*/
#define _Py_HAMT_MAX_TREE_DEPTH 8
extern PyTypeObject _PyHamt_Type;
extern PyTypeObject _PyHamt_ArrayNode_Type;
extern PyTypeObject _PyHamt_BitmapNode_Type;
extern PyTypeObject _PyHamt_CollisionNode_Type;
extern PyTypeObject _PyHamtKeys_Type;
extern PyTypeObject _PyHamtValues_Type;
extern PyTypeObject _PyHamtItems_Type;
/* other API */
#define PyHamt_Check(o) Py_IS_TYPE((o), &_PyHamt_Type)
/* Abstract tree node. */
typedef struct {
PyObject_HEAD
} PyHamtNode;
/* An HAMT immutable mapping collection. */
typedef struct {
PyObject_HEAD
PyHamtNode *h_root;
PyObject *h_weakreflist;
Py_ssize_t h_count;
} PyHamtObject;
typedef struct {
PyObject_VAR_HEAD
uint32_t b_bitmap;
PyObject *b_array[1];
} PyHamtNode_Bitmap;
/* A struct to hold the state of depth-first traverse of the tree.
HAMT is an immutable collection. Iterators will hold a strong reference
to it, and every node in the HAMT has strong references to its children.
So for iterators, we can implement zero allocations and zero reference
inc/dec depth-first iteration.
- i_nodes: an array of seven pointers to tree nodes
- i_level: the current node in i_nodes
- i_pos: an array of positions within nodes in i_nodes.
*/
typedef struct {
PyHamtNode *i_nodes[_Py_HAMT_MAX_TREE_DEPTH];
Py_ssize_t i_pos[_Py_HAMT_MAX_TREE_DEPTH];
int8_t i_level;
} PyHamtIteratorState;
/* Base iterator object.
Contains the iteration state, a pointer to the HAMT tree,
and a pointer to the 'yield function'. The latter is a simple
function that returns a key/value tuple for the 'Items' iterator,
just a key for the 'Keys' iterator, and a value for the 'Values'
iterator.
*/
typedef struct {
PyObject_HEAD
PyHamtObject *hi_obj;
PyHamtIteratorState hi_iter;
binaryfunc hi_yield;
} PyHamtIterator;
/* Create a new HAMT immutable mapping. */
PyHamtObject * _PyHamt_New(void);
/* Return a new collection based on "o", but with an additional
key/val pair. */
PyHamtObject * _PyHamt_Assoc(PyHamtObject *o, PyObject *key, PyObject *val);
/* Return a new collection based on "o", but without "key". */
PyHamtObject * _PyHamt_Without(PyHamtObject *o, PyObject *key);
/* Find "key" in the "o" collection.
Return:
- -1: An error occurred.
- 0: "key" wasn't found in "o".
- 1: "key" is in "o"; "*val" is set to its value (a borrowed ref).
*/
int _PyHamt_Find(PyHamtObject *o, PyObject *key, PyObject **val);
/* Check if "v" is equal to "w".
Return:
- 0: v != w
- 1: v == w
- -1: An error occurred.
*/
int _PyHamt_Eq(PyHamtObject *v, PyHamtObject *w);
/* Return the size of "o"; equivalent of "len(o)". */
Py_ssize_t _PyHamt_Len(PyHamtObject *o);
/* Return a Keys iterator over "o". */
PyObject * _PyHamt_NewIterKeys(PyHamtObject *o);
/* Return a Values iterator over "o". */
PyObject * _PyHamt_NewIterValues(PyHamtObject *o);
/* Return a Items iterator over "o". */
PyObject * _PyHamt_NewIterItems(PyHamtObject *o);
#endif /* !Py_INTERNAL_HAMT_H */

View File

@@ -0,0 +1,148 @@
#ifndef Py_INTERNAL_HASHTABLE_H
#define Py_INTERNAL_HASHTABLE_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
/* Single linked list */
typedef struct _Py_slist_item_s {
struct _Py_slist_item_s *next;
} _Py_slist_item_t;
typedef struct {
_Py_slist_item_t *head;
} _Py_slist_t;
#define _Py_SLIST_ITEM_NEXT(ITEM) _Py_RVALUE(((_Py_slist_item_t *)(ITEM))->next)
#define _Py_SLIST_HEAD(SLIST) _Py_RVALUE(((_Py_slist_t *)(SLIST))->head)
/* _Py_hashtable: table entry */
typedef struct {
/* used by _Py_hashtable_t.buckets to link entries */
_Py_slist_item_t _Py_slist_item;
Py_uhash_t key_hash;
void *key;
void *value;
} _Py_hashtable_entry_t;
/* _Py_hashtable: prototypes */
/* Forward declaration */
struct _Py_hashtable_t;
typedef struct _Py_hashtable_t _Py_hashtable_t;
typedef Py_uhash_t (*_Py_hashtable_hash_func) (const void *key);
typedef int (*_Py_hashtable_compare_func) (const void *key1, const void *key2);
typedef void (*_Py_hashtable_destroy_func) (void *key);
typedef _Py_hashtable_entry_t* (*_Py_hashtable_get_entry_func)(_Py_hashtable_t *ht,
const void *key);
typedef struct {
// Allocate a memory block
void* (*malloc) (size_t size);
// Release a memory block
void (*free) (void *ptr);
} _Py_hashtable_allocator_t;
/* _Py_hashtable: table */
struct _Py_hashtable_t {
size_t nentries; // Total number of entries in the table
size_t nbuckets;
_Py_slist_t *buckets;
_Py_hashtable_get_entry_func get_entry_func;
_Py_hashtable_hash_func hash_func;
_Py_hashtable_compare_func compare_func;
_Py_hashtable_destroy_func key_destroy_func;
_Py_hashtable_destroy_func value_destroy_func;
_Py_hashtable_allocator_t alloc;
};
/* Hash a pointer (void*) */
PyAPI_FUNC(Py_uhash_t) _Py_hashtable_hash_ptr(const void *key);
/* Comparison using memcmp() */
PyAPI_FUNC(int) _Py_hashtable_compare_direct(
const void *key1,
const void *key2);
PyAPI_FUNC(_Py_hashtable_t *) _Py_hashtable_new(
_Py_hashtable_hash_func hash_func,
_Py_hashtable_compare_func compare_func);
PyAPI_FUNC(_Py_hashtable_t *) _Py_hashtable_new_full(
_Py_hashtable_hash_func hash_func,
_Py_hashtable_compare_func compare_func,
_Py_hashtable_destroy_func key_destroy_func,
_Py_hashtable_destroy_func value_destroy_func,
_Py_hashtable_allocator_t *allocator);
PyAPI_FUNC(void) _Py_hashtable_destroy(_Py_hashtable_t *ht);
PyAPI_FUNC(void) _Py_hashtable_clear(_Py_hashtable_t *ht);
typedef int (*_Py_hashtable_foreach_func) (_Py_hashtable_t *ht,
const void *key, const void *value,
void *user_data);
/* Call func() on each entry of the hashtable.
Iteration stops if func() result is non-zero, in this case it's the result
of the call. Otherwise, the function returns 0. */
PyAPI_FUNC(int) _Py_hashtable_foreach(
_Py_hashtable_t *ht,
_Py_hashtable_foreach_func func,
void *user_data);
PyAPI_FUNC(size_t) _Py_hashtable_size(const _Py_hashtable_t *ht);
/* Add a new entry to the hash. The key must not be present in the hash table.
Return 0 on success, -1 on memory error. */
PyAPI_FUNC(int) _Py_hashtable_set(
_Py_hashtable_t *ht,
const void *key,
void *value);
/* Get an entry.
Return NULL if the key does not exist. */
static inline _Py_hashtable_entry_t *
_Py_hashtable_get_entry(_Py_hashtable_t *ht, const void *key)
{
return ht->get_entry_func(ht, key);
}
/* Get value from an entry.
Return NULL if the entry is not found.
Use _Py_hashtable_get_entry() to distinguish entry value equal to NULL
and entry not found. */
PyAPI_FUNC(void*) _Py_hashtable_get(_Py_hashtable_t *ht, const void *key);
/* Remove a key and its associated value without calling key and value destroy
functions.
Return the removed value if the key was found.
Return NULL if the key was not found. */
PyAPI_FUNC(void*) _Py_hashtable_steal(
_Py_hashtable_t *ht,
const void *key);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_HASHTABLE_H */

View File

@@ -0,0 +1,183 @@
#ifndef Py_LIMITED_API
#ifndef Py_INTERNAL_IMPORT_H
#define Py_INTERNAL_IMPORT_H
#ifdef __cplusplus
extern "C" {
#endif
#include "pycore_hashtable.h" // _Py_hashtable_t
#include "pycore_time.h" // _PyTime_t
struct _import_runtime_state {
/* The builtin modules (defined in config.c). */
struct _inittab *inittab;
/* The most recent value assigned to a PyModuleDef.m_base.m_index.
This is incremented each time PyModuleDef_Init() is called,
which is just about every time an extension module is imported.
See PyInterpreterState.modules_by_index for more info. */
Py_ssize_t last_module_index;
struct {
/* A lock to guard the cache. */
PyThread_type_lock mutex;
/* The actual cache of (filename, name, PyModuleDef) for modules.
Only legacy (single-phase init) extension modules are added
and only if they support multiple initialization (m_size >- 0)
or are imported in the main interpreter.
This is initialized lazily in _PyImport_FixupExtensionObject().
Modules are added there and looked up in _imp.find_extension(). */
_Py_hashtable_t *hashtable;
} extensions;
/* Package context -- the full module name for package imports */
const char * pkgcontext;
};
struct _import_state {
/* cached sys.modules dictionary */
PyObject *modules;
/* This is the list of module objects for all legacy (single-phase init)
extension modules ever loaded in this process (i.e. imported
in this interpreter or in any other). Py_None stands in for
modules that haven't actually been imported in this interpreter.
A module's index (PyModuleDef.m_base.m_index) is used to look up
the corresponding module object for this interpreter, if any.
(See PyState_FindModule().) When any extension module
is initialized during import, its moduledef gets initialized by
PyModuleDef_Init(), and the first time that happens for each
PyModuleDef, its index gets set to the current value of
a global counter (see _PyRuntimeState.imports.last_module_index).
The entry for that index in this interpreter remains unset until
the module is actually imported here. (Py_None is used as
a placeholder.) Note that multi-phase init modules always get
an index for which there will never be a module set.
This is initialized lazily in PyState_AddModule(), which is also
where modules get added. */
PyObject *modules_by_index;
/* importlib module._bootstrap */
PyObject *importlib;
/* override for config->use_frozen_modules (for tests)
(-1: "off", 1: "on", 0: no override) */
int override_frozen_modules;
int override_multi_interp_extensions_check;
#ifdef HAVE_DLOPEN
int dlopenflags;
#endif
PyObject *import_func;
/* The global import lock. */
struct {
PyThread_type_lock mutex;
unsigned long thread;
int level;
} lock;
/* diagnostic info in PyImport_ImportModuleLevelObject() */
struct {
int import_level;
_PyTime_t accumulated;
int header;
} find_and_load;
};
#ifdef HAVE_DLOPEN
# include <dlfcn.h>
# if HAVE_DECL_RTLD_NOW
# define _Py_DLOPEN_FLAGS RTLD_NOW
# else
# define _Py_DLOPEN_FLAGS RTLD_LAZY
# endif
# define DLOPENFLAGS_INIT .dlopenflags = _Py_DLOPEN_FLAGS,
#else
# define _Py_DLOPEN_FLAGS 0
# define DLOPENFLAGS_INIT
#endif
#define IMPORTS_INIT \
{ \
DLOPENFLAGS_INIT \
.lock = { \
.mutex = NULL, \
.thread = PYTHREAD_INVALID_THREAD_ID, \
.level = 0, \
}, \
.find_and_load = { \
.header = 1, \
}, \
}
extern void _PyImport_ClearCore(PyInterpreterState *interp);
extern Py_ssize_t _PyImport_GetNextModuleIndex(void);
extern const char * _PyImport_ResolveNameWithPackageContext(const char *name);
extern const char * _PyImport_SwapPackageContext(const char *newcontext);
extern int _PyImport_GetDLOpenFlags(PyInterpreterState *interp);
extern void _PyImport_SetDLOpenFlags(PyInterpreterState *interp, int new_val);
extern PyObject * _PyImport_InitModules(PyInterpreterState *interp);
extern PyObject * _PyImport_GetModules(PyInterpreterState *interp);
extern void _PyImport_ClearModules(PyInterpreterState *interp);
extern void _PyImport_ClearModulesByIndex(PyInterpreterState *interp);
extern int _PyImport_InitDefaultImportFunc(PyInterpreterState *interp);
extern int _PyImport_IsDefaultImportFunc(
PyInterpreterState *interp,
PyObject *func);
extern PyObject * _PyImport_GetImportlibLoader(
PyInterpreterState *interp,
const char *loader_name);
extern PyObject * _PyImport_GetImportlibExternalLoader(
PyInterpreterState *interp,
const char *loader_name);
extern PyObject * _PyImport_BlessMyLoader(
PyInterpreterState *interp,
PyObject *module_globals);
extern PyObject * _PyImport_ImportlibModuleRepr(
PyInterpreterState *interp,
PyObject *module);
extern PyStatus _PyImport_Init(void);
extern void _PyImport_Fini(void);
extern void _PyImport_Fini2(void);
extern PyStatus _PyImport_InitCore(
PyThreadState *tstate,
PyObject *sysmod,
int importlib);
extern PyStatus _PyImport_InitExternal(PyThreadState *tstate);
extern void _PyImport_FiniCore(PyInterpreterState *interp);
extern void _PyImport_FiniExternal(PyInterpreterState *interp);
#ifdef HAVE_FORK
extern PyStatus _PyImport_ReInitLock(PyInterpreterState *interp);
#endif
extern PyObject* _PyImport_GetBuiltinModuleNames(void);
struct _module_alias {
const char *name; /* ASCII encoded string */
const char *orig; /* ASCII encoded string */
};
PyAPI_DATA(const struct _frozen *) _PyImport_FrozenBootstrap;
PyAPI_DATA(const struct _frozen *) _PyImport_FrozenStdlib;
PyAPI_DATA(const struct _frozen *) _PyImport_FrozenTest;
extern const struct _module_alias * _PyImport_FrozenAliases;
PyAPI_FUNC(int) _PyImport_CheckSubinterpIncompatibleExtensionAllowed(
const char *name);
// for testing
PyAPI_FUNC(int) _PyImport_ClearExtension(PyObject *name, PyObject *filename);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_IMPORT_H */
#endif /* !Py_LIMITED_API */

View File

@@ -0,0 +1,179 @@
#ifndef Py_INTERNAL_CORECONFIG_H
#define Py_INTERNAL_CORECONFIG_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
/* Forward declaration */
struct pyruntimestate;
/* --- PyStatus ----------------------------------------------- */
/* Almost all errors causing Python initialization to fail */
#ifdef _MSC_VER
/* Visual Studio 2015 doesn't implement C99 __func__ in C */
# define _PyStatus_GET_FUNC() __FUNCTION__
#else
# define _PyStatus_GET_FUNC() __func__
#endif
#define _PyStatus_OK() \
(PyStatus){._type = _PyStatus_TYPE_OK,}
/* other fields are set to 0 */
#define _PyStatus_ERR(ERR_MSG) \
(PyStatus){ \
._type = _PyStatus_TYPE_ERROR, \
.func = _PyStatus_GET_FUNC(), \
.err_msg = (ERR_MSG)}
/* other fields are set to 0 */
#define _PyStatus_NO_MEMORY() _PyStatus_ERR("memory allocation failed")
#define _PyStatus_EXIT(EXITCODE) \
(PyStatus){ \
._type = _PyStatus_TYPE_EXIT, \
.exitcode = (EXITCODE)}
#define _PyStatus_IS_ERROR(err) \
((err)._type == _PyStatus_TYPE_ERROR)
#define _PyStatus_IS_EXIT(err) \
((err)._type == _PyStatus_TYPE_EXIT)
#define _PyStatus_EXCEPTION(err) \
((err)._type != _PyStatus_TYPE_OK)
#define _PyStatus_UPDATE_FUNC(err) \
do { (err).func = _PyStatus_GET_FUNC(); } while (0)
/* --- PyWideStringList ------------------------------------------------ */
#define _PyWideStringList_INIT (PyWideStringList){.length = 0, .items = NULL}
#ifndef NDEBUG
PyAPI_FUNC(int) _PyWideStringList_CheckConsistency(const PyWideStringList *list);
#endif
PyAPI_FUNC(void) _PyWideStringList_Clear(PyWideStringList *list);
PyAPI_FUNC(int) _PyWideStringList_Copy(PyWideStringList *list,
const PyWideStringList *list2);
PyAPI_FUNC(PyStatus) _PyWideStringList_Extend(PyWideStringList *list,
const PyWideStringList *list2);
PyAPI_FUNC(PyObject*) _PyWideStringList_AsList(const PyWideStringList *list);
/* --- _PyArgv ---------------------------------------------------- */
typedef struct _PyArgv {
Py_ssize_t argc;
int use_bytes_argv;
char * const *bytes_argv;
wchar_t * const *wchar_argv;
} _PyArgv;
PyAPI_FUNC(PyStatus) _PyArgv_AsWstrList(const _PyArgv *args,
PyWideStringList *list);
/* --- Helper functions ------------------------------------------- */
PyAPI_FUNC(int) _Py_str_to_int(
const char *str,
int *result);
PyAPI_FUNC(const wchar_t*) _Py_get_xoption(
const PyWideStringList *xoptions,
const wchar_t *name);
PyAPI_FUNC(const char*) _Py_GetEnv(
int use_environment,
const char *name);
PyAPI_FUNC(void) _Py_get_env_flag(
int use_environment,
int *flag,
const char *name);
/* Py_GetArgcArgv() helper */
PyAPI_FUNC(void) _Py_ClearArgcArgv(void);
/* --- _PyPreCmdline ------------------------------------------------- */
typedef struct {
PyWideStringList argv;
PyWideStringList xoptions; /* "-X value" option */
int isolated; /* -I option */
int use_environment; /* -E option */
int dev_mode; /* -X dev and PYTHONDEVMODE */
int warn_default_encoding; /* -X warn_default_encoding and PYTHONWARNDEFAULTENCODING */
} _PyPreCmdline;
#define _PyPreCmdline_INIT \
(_PyPreCmdline){ \
.use_environment = -1, \
.isolated = -1, \
.dev_mode = -1}
/* Note: _PyPreCmdline_INIT sets other fields to 0/NULL */
extern void _PyPreCmdline_Clear(_PyPreCmdline *cmdline);
extern PyStatus _PyPreCmdline_SetArgv(_PyPreCmdline *cmdline,
const _PyArgv *args);
extern PyStatus _PyPreCmdline_SetConfig(
const _PyPreCmdline *cmdline,
PyConfig *config);
extern PyStatus _PyPreCmdline_Read(_PyPreCmdline *cmdline,
const PyPreConfig *preconfig);
/* --- PyPreConfig ----------------------------------------------- */
PyAPI_FUNC(void) _PyPreConfig_InitCompatConfig(PyPreConfig *preconfig);
extern void _PyPreConfig_InitFromConfig(
PyPreConfig *preconfig,
const PyConfig *config);
extern PyStatus _PyPreConfig_InitFromPreConfig(
PyPreConfig *preconfig,
const PyPreConfig *config2);
extern PyObject* _PyPreConfig_AsDict(const PyPreConfig *preconfig);
extern void _PyPreConfig_GetConfig(PyPreConfig *preconfig,
const PyConfig *config);
extern PyStatus _PyPreConfig_Read(PyPreConfig *preconfig,
const _PyArgv *args);
extern PyStatus _PyPreConfig_Write(const PyPreConfig *preconfig);
/* --- PyConfig ---------------------------------------------- */
typedef enum {
/* Py_Initialize() API: backward compatibility with Python 3.6 and 3.7 */
_PyConfig_INIT_COMPAT = 1,
_PyConfig_INIT_PYTHON = 2,
_PyConfig_INIT_ISOLATED = 3
} _PyConfigInitEnum;
PyAPI_FUNC(void) _PyConfig_InitCompatConfig(PyConfig *config);
extern PyStatus _PyConfig_Copy(
PyConfig *config,
const PyConfig *config2);
extern PyStatus _PyConfig_InitPathConfig(
PyConfig *config,
int compute_path_config);
extern PyStatus _PyConfig_InitImportConfig(PyConfig *config);
extern PyStatus _PyConfig_Read(PyConfig *config, int compute_path_config);
extern PyStatus _PyConfig_Write(const PyConfig *config,
struct pyruntimestate *runtime);
extern PyStatus _PyConfig_SetPyArgv(
PyConfig *config,
const _PyArgv *args);
PyAPI_FUNC(PyObject*) _PyConfig_AsDict(const PyConfig *config);
PyAPI_FUNC(int) _PyConfig_FromDict(PyConfig *config, PyObject *dict);
extern void _Py_DumpPathConfig(PyThreadState *tstate);
PyAPI_FUNC(PyObject*) _Py_Get_Getpath_CodeObject(void);
/* --- Function used for testing ---------------------------------- */
PyAPI_FUNC(PyObject*) _Py_GetConfigsAsDict(void);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_CORECONFIG_H */

View File

@@ -0,0 +1,106 @@
#ifndef Py_INTERNAL_INSTRUMENT_H
#define Py_INTERNAL_INSTRUMENT_H
#include "pycore_bitutils.h" // _Py_popcount32
#include "pycore_frame.h"
#include "cpython/code.h"
#ifdef __cplusplus
extern "C" {
#endif
#define PY_MONITORING_TOOL_IDS 8
/* Local events.
* These require bytecode instrumentation */
#define PY_MONITORING_EVENT_PY_START 0
#define PY_MONITORING_EVENT_PY_RESUME 1
#define PY_MONITORING_EVENT_PY_RETURN 2
#define PY_MONITORING_EVENT_PY_YIELD 3
#define PY_MONITORING_EVENT_CALL 4
#define PY_MONITORING_EVENT_LINE 5
#define PY_MONITORING_EVENT_INSTRUCTION 6
#define PY_MONITORING_EVENT_JUMP 7
#define PY_MONITORING_EVENT_BRANCH 8
#define PY_MONITORING_EVENT_STOP_ITERATION 9
#define PY_MONITORING_IS_INSTRUMENTED_EVENT(ev) \
((ev) < _PY_MONITORING_LOCAL_EVENTS)
/* Other events, mainly exceptions */
#define PY_MONITORING_EVENT_RAISE 10
#define PY_MONITORING_EVENT_EXCEPTION_HANDLED 11
#define PY_MONITORING_EVENT_PY_UNWIND 12
#define PY_MONITORING_EVENT_PY_THROW 13
#define PY_MONITORING_EVENT_RERAISE 14
/* Ancilliary events */
#define PY_MONITORING_EVENT_C_RETURN 15
#define PY_MONITORING_EVENT_C_RAISE 16
typedef uint32_t _PyMonitoringEventSet;
/* Tool IDs */
/* These are defined in PEP 669 for convenience to avoid clashes */
#define PY_MONITORING_DEBUGGER_ID 0
#define PY_MONITORING_COVERAGE_ID 1
#define PY_MONITORING_PROFILER_ID 2
#define PY_MONITORING_OPTIMIZER_ID 5
/* Internal IDs used to suuport sys.setprofile() and sys.settrace() */
#define PY_MONITORING_SYS_PROFILE_ID 6
#define PY_MONITORING_SYS_TRACE_ID 7
PyObject *_PyMonitoring_RegisterCallback(int tool_id, int event_id, PyObject *obj);
int _PyMonitoring_SetEvents(int tool_id, _PyMonitoringEventSet events);
extern int
_Py_call_instrumentation(PyThreadState *tstate, int event,
_PyInterpreterFrame *frame, _Py_CODEUNIT *instr);
extern int
_Py_call_instrumentation_line(PyThreadState *tstate, _PyInterpreterFrame* frame,
_Py_CODEUNIT *instr, _Py_CODEUNIT *prev);
extern int
_Py_call_instrumentation_instruction(
PyThreadState *tstate, _PyInterpreterFrame* frame, _Py_CODEUNIT *instr);
_Py_CODEUNIT *
_Py_call_instrumentation_jump(
PyThreadState *tstate, int event,
_PyInterpreterFrame *frame, _Py_CODEUNIT *instr, _Py_CODEUNIT *target);
extern int
_Py_call_instrumentation_arg(PyThreadState *tstate, int event,
_PyInterpreterFrame *frame, _Py_CODEUNIT *instr, PyObject *arg);
extern int
_Py_call_instrumentation_2args(PyThreadState *tstate, int event,
_PyInterpreterFrame *frame, _Py_CODEUNIT *instr, PyObject *arg0, PyObject *arg1);
extern void
_Py_call_instrumentation_exc2(PyThreadState *tstate, int event,
_PyInterpreterFrame *frame, _Py_CODEUNIT *instr, PyObject *arg0, PyObject *arg1);
extern int
_Py_Instrumentation_GetLine(PyCodeObject *code, int index);
extern PyObject _PyInstrumentation_MISSING;
extern PyObject _PyInstrumentation_DISABLE;
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_INSTRUMENT_H */

View File

@@ -0,0 +1,242 @@
#ifndef Py_INTERNAL_INTERP_H
#define Py_INTERNAL_INTERP_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#include <stdbool.h>
#include "pycore_ast_state.h" // struct ast_state
#include "pycore_atexit.h" // struct atexit_state
#include "pycore_atomic.h" // _Py_atomic_address
#include "pycore_ceval_state.h" // struct _ceval_state
#include "pycore_code.h" // struct callable_cache
#include "pycore_context.h" // struct _Py_context_state
#include "pycore_dict_state.h" // struct _Py_dict_state
#include "pycore_dtoa.h" // struct _dtoa_state
#include "pycore_exceptions.h" // struct _Py_exc_state
#include "pycore_floatobject.h" // struct _Py_float_state
#include "pycore_function.h" // FUNC_MAX_WATCHERS
#include "pycore_genobject.h" // struct _Py_async_gen_state
#include "pycore_gc.h" // struct _gc_runtime_state
#include "pycore_global_objects.h" // struct _Py_interp_static_objects
#include "pycore_import.h" // struct _import_state
#include "pycore_instruments.h" // _PY_MONITORING_EVENTS
#include "pycore_list.h" // struct _Py_list_state
#include "pycore_object_state.h" // struct _py_object_state
#include "pycore_obmalloc.h" // struct obmalloc_state
#include "pycore_tuple.h" // struct _Py_tuple_state
#include "pycore_typeobject.h" // struct type_cache
#include "pycore_unicodeobject.h" // struct _Py_unicode_state
#include "pycore_warnings.h" // struct _warnings_runtime_state
struct _Py_long_state {
int max_str_digits;
};
/* interpreter state */
/* PyInterpreterState holds the global state for one of the runtime's
interpreters. Typically the initial (main) interpreter is the only one.
The PyInterpreterState typedef is in Include/pytypedefs.h.
*/
struct _is {
PyInterpreterState *next;
int64_t id;
int64_t id_refcount;
int requires_idref;
PyThread_type_lock id_mutex;
/* Has been initialized to a safe state.
In order to be effective, this must be set to 0 during or right
after allocation. */
int _initialized;
int finalizing;
uint64_t monitoring_version;
uint64_t last_restart_version;
struct pythreads {
uint64_t next_unique_id;
/* The linked list of threads, newest first. */
PyThreadState *head;
/* Used in Modules/_threadmodule.c. */
long count;
/* Support for runtime thread stack size tuning.
A value of 0 means using the platform's default stack size
or the size specified by the THREAD_STACK_SIZE macro. */
/* Used in Python/thread.c. */
size_t stacksize;
} threads;
/* Reference to the _PyRuntime global variable. This field exists
to not have to pass runtime in addition to tstate to a function.
Get runtime from tstate: tstate->interp->runtime. */
struct pyruntimestate *runtime;
/* Set by Py_EndInterpreter().
Use _PyInterpreterState_GetFinalizing()
and _PyInterpreterState_SetFinalizing()
to access it, don't access it directly. */
_Py_atomic_address _finalizing;
struct _gc_runtime_state gc;
/* The following fields are here to avoid allocation during init.
The data is exposed through PyInterpreterState pointer fields.
These fields should not be accessed directly outside of init.
All other PyInterpreterState pointer fields are populated when
needed and default to NULL.
For now there are some exceptions to that rule, which require
allocation during init. These will be addressed on a case-by-case
basis. Also see _PyRuntimeState regarding the various mutex fields.
*/
// Dictionary of the sys module
PyObject *sysdict;
// Dictionary of the builtins module
PyObject *builtins;
struct _ceval_state ceval;
struct _import_state imports;
/* The per-interpreter GIL, which might not be used. */
struct _gil_runtime_state _gil;
/* ---------- IMPORTANT ---------------------------
The fields above this line are declared as early as
possible to facilitate out-of-process observability
tools. */
PyObject *codec_search_path;
PyObject *codec_search_cache;
PyObject *codec_error_registry;
int codecs_initialized;
PyConfig config;
unsigned long feature_flags;
PyObject *dict; /* Stores per-interpreter state */
PyObject *sysdict_copy;
PyObject *builtins_copy;
// Initialized to _PyEval_EvalFrameDefault().
_PyFrameEvalFunction eval_frame;
PyFunction_WatchCallback func_watchers[FUNC_MAX_WATCHERS];
// One bit is set for each non-NULL entry in func_watchers
uint8_t active_func_watchers;
Py_ssize_t co_extra_user_count;
freefunc co_extra_freefuncs[MAX_CO_EXTRA_USERS];
#ifdef HAVE_FORK
PyObject *before_forkers;
PyObject *after_forkers_parent;
PyObject *after_forkers_child;
#endif
struct _warnings_runtime_state warnings;
struct atexit_state atexit;
struct _obmalloc_state obmalloc;
PyObject *audit_hooks;
PyType_WatchCallback type_watchers[TYPE_MAX_WATCHERS];
PyCode_WatchCallback code_watchers[CODE_MAX_WATCHERS];
// One bit is set for each non-NULL entry in code_watchers
uint8_t active_code_watchers;
struct _py_object_state object_state;
struct _Py_unicode_state unicode;
struct _Py_float_state float_state;
struct _Py_long_state long_state;
struct _dtoa_state dtoa;
struct _py_func_state func_state;
/* Using a cache is very effective since typically only a single slice is
created and then deleted again. */
PySliceObject *slice_cache;
struct _Py_tuple_state tuple;
struct _Py_list_state list;
struct _Py_dict_state dict_state;
struct _Py_async_gen_state async_gen;
struct _Py_context_state context;
struct _Py_exc_state exc_state;
struct ast_state ast;
struct types_state types;
struct callable_cache callable_cache;
PyCodeObject *interpreter_trampoline;
_Py_GlobalMonitors monitors;
bool f_opcode_trace_set;
bool sys_profile_initialized;
bool sys_trace_initialized;
Py_ssize_t sys_profiling_threads; /* Count of threads with c_profilefunc set */
Py_ssize_t sys_tracing_threads; /* Count of threads with c_tracefunc set */
PyObject *monitoring_callables[PY_MONITORING_TOOL_IDS][_PY_MONITORING_EVENTS];
PyObject *monitoring_tool_names[PY_MONITORING_TOOL_IDS];
struct _Py_interp_cached_objects cached_objects;
struct _Py_interp_static_objects static_objects;
/* the initial PyInterpreterState.threads.head */
PyThreadState _initial_thread;
};
/* other API */
extern void _PyInterpreterState_Clear(PyThreadState *tstate);
static inline PyThreadState*
_PyInterpreterState_GetFinalizing(PyInterpreterState *interp) {
return (PyThreadState*)_Py_atomic_load_relaxed(&interp->_finalizing);
}
static inline void
_PyInterpreterState_SetFinalizing(PyInterpreterState *interp, PyThreadState *tstate) {
_Py_atomic_store_relaxed(&interp->_finalizing, (uintptr_t)tstate);
}
/* cross-interpreter data registry */
/* For now we use a global registry of shareable classes. An
alternative would be to add a tp_* slot for a class's
crossinterpdatafunc. It would be simpler and more efficient. */
struct _xidregitem;
struct _xidregitem {
struct _xidregitem *prev;
struct _xidregitem *next;
PyObject *cls; // weakref to a PyTypeObject
crossinterpdatafunc getdata;
};
PyAPI_FUNC(PyInterpreterState*) _PyInterpreterState_LookUpID(int64_t);
PyAPI_FUNC(int) _PyInterpreterState_IDInitref(PyInterpreterState *);
PyAPI_FUNC(int) _PyInterpreterState_IDIncref(PyInterpreterState *);
PyAPI_FUNC(void) _PyInterpreterState_IDDecref(PyInterpreterState *);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_INTERP_H */

View File

@@ -0,0 +1,32 @@
// Auto-generated by Tools/build/generate_opcode_h.py from Lib/opcode.py
/* Unary Functions: */
#define INTRINSIC_1_INVALID 0
#define INTRINSIC_PRINT 1
#define INTRINSIC_IMPORT_STAR 2
#define INTRINSIC_STOPITERATION_ERROR 3
#define INTRINSIC_ASYNC_GEN_WRAP 4
#define INTRINSIC_UNARY_POSITIVE 5
#define INTRINSIC_LIST_TO_TUPLE 6
#define INTRINSIC_TYPEVAR 7
#define INTRINSIC_PARAMSPEC 8
#define INTRINSIC_TYPEVARTUPLE 9
#define INTRINSIC_SUBSCRIPT_GENERIC 10
#define INTRINSIC_TYPEALIAS 11
#define MAX_INTRINSIC_1 11
/* Binary Functions: */
#define INTRINSIC_2_INVALID 0
#define INTRINSIC_PREP_RERAISE_STAR 1
#define INTRINSIC_TYPEVAR_WITH_BOUND 2
#define INTRINSIC_TYPEVAR_WITH_CONSTRAINTS 3
#define INTRINSIC_SET_FUNCTION_TYPE_PARAMS 4
#define MAX_INTRINSIC_2 4
typedef PyObject *(*instrinsic_func1)(PyThreadState* tstate, PyObject *value);
typedef PyObject *(*instrinsic_func2)(PyThreadState* tstate, PyObject *value1, PyObject *value2);
extern const instrinsic_func1 _PyIntrinsics_UnaryFunctions[];
extern const instrinsic_func2 _PyIntrinsics_BinaryFunctions[];

View File

@@ -0,0 +1,83 @@
#ifndef Py_INTERNAL_LIST_H
#define Py_INTERNAL_LIST_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#include "listobject.h" // _PyList_CAST()
/* runtime lifecycle */
extern void _PyList_Fini(PyInterpreterState *);
/* other API */
#ifndef WITH_FREELISTS
// without freelists
# define PyList_MAXFREELIST 0
#endif
/* Empty list reuse scheme to save calls to malloc and free */
#ifndef PyList_MAXFREELIST
# define PyList_MAXFREELIST 80
#endif
struct _Py_list_state {
#if PyList_MAXFREELIST > 0
PyListObject *free_list[PyList_MAXFREELIST];
int numfree;
#endif
};
#define _PyList_ITEMS(op) _Py_RVALUE(_PyList_CAST(op)->ob_item)
extern int
_PyList_AppendTakeRefListResize(PyListObject *self, PyObject *newitem);
static inline int
_PyList_AppendTakeRef(PyListObject *self, PyObject *newitem)
{
assert(self != NULL && newitem != NULL);
assert(PyList_Check(self));
Py_ssize_t len = PyList_GET_SIZE(self);
Py_ssize_t allocated = self->allocated;
assert((size_t)len + 1 < PY_SSIZE_T_MAX);
if (allocated > len) {
PyList_SET_ITEM(self, len, newitem);
Py_SET_SIZE(self, len + 1);
return 0;
}
return _PyList_AppendTakeRefListResize(self, newitem);
}
// Repeat the bytes of a buffer in place
static inline void
_Py_memory_repeat(char* dest, Py_ssize_t len_dest, Py_ssize_t len_src)
{
assert(len_src > 0);
Py_ssize_t copied = len_src;
while (copied < len_dest) {
Py_ssize_t bytes_to_copy = Py_MIN(copied, len_dest - copied);
memcpy(dest + copied, dest, bytes_to_copy);
copied += bytes_to_copy;
}
}
typedef struct {
PyObject_HEAD
Py_ssize_t it_index;
PyListObject *it_seq; /* Set to NULL when iterator is exhausted */
} _PyListIterObject;
extern PyObject *_PyList_FromArraySteal(PyObject *const *src, Py_ssize_t n);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_LIST_H */

View File

@@ -0,0 +1,258 @@
#ifndef Py_INTERNAL_LONG_H
#define Py_INTERNAL_LONG_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#include "pycore_global_objects.h" // _PY_NSMALLNEGINTS
#include "pycore_runtime.h" // _PyRuntime
/*
* Default int base conversion size limitation: Denial of Service prevention.
*
* Chosen such that this isn't wildly slow on modern hardware and so that
* everyone's existing deployed numpy test suite passes before
* https://github.com/numpy/numpy/issues/22098 is widely available.
*
* $ python -m timeit -s 's = "1"*4300' 'int(s)'
* 2000 loops, best of 5: 125 usec per loop
* $ python -m timeit -s 's = "1"*4300; v = int(s)' 'str(v)'
* 1000 loops, best of 5: 311 usec per loop
* (zen2 cloud VM)
*
* 4300 decimal digits fits a ~14284 bit number.
*/
#define _PY_LONG_DEFAULT_MAX_STR_DIGITS 4300
/*
* Threshold for max digits check. For performance reasons int() and
* int.__str__() don't checks values that are smaller than this
* threshold. Acts as a guaranteed minimum size limit for bignums that
* applications can expect from CPython.
*
* % python -m timeit -s 's = "1"*640; v = int(s)' 'str(int(s))'
* 20000 loops, best of 5: 12 usec per loop
*
* "640 digits should be enough for anyone." - gps
* fits a ~2126 bit decimal number.
*/
#define _PY_LONG_MAX_STR_DIGITS_THRESHOLD 640
#if ((_PY_LONG_DEFAULT_MAX_STR_DIGITS != 0) && \
(_PY_LONG_DEFAULT_MAX_STR_DIGITS < _PY_LONG_MAX_STR_DIGITS_THRESHOLD))
# error "_PY_LONG_DEFAULT_MAX_STR_DIGITS smaller than threshold."
#endif
/* runtime lifecycle */
extern PyStatus _PyLong_InitTypes(PyInterpreterState *);
extern void _PyLong_FiniTypes(PyInterpreterState *interp);
/* other API */
#define _PyLong_SMALL_INTS _Py_SINGLETON(small_ints)
// _PyLong_GetZero() and _PyLong_GetOne() must always be available
// _PyLong_FromUnsignedChar must always be available
#if _PY_NSMALLPOSINTS < 257
# error "_PY_NSMALLPOSINTS must be greater than or equal to 257"
#endif
// Return a borrowed reference to the zero singleton.
// The function cannot return NULL.
static inline PyObject* _PyLong_GetZero(void)
{ return (PyObject *)&_PyLong_SMALL_INTS[_PY_NSMALLNEGINTS]; }
// Return a borrowed reference to the one singleton.
// The function cannot return NULL.
static inline PyObject* _PyLong_GetOne(void)
{ return (PyObject *)&_PyLong_SMALL_INTS[_PY_NSMALLNEGINTS+1]; }
static inline PyObject* _PyLong_FromUnsignedChar(unsigned char i)
{
return Py_NewRef((PyObject *)&_PyLong_SMALL_INTS[_PY_NSMALLNEGINTS+i]);
}
PyObject *_PyLong_Add(PyLongObject *left, PyLongObject *right);
PyObject *_PyLong_Multiply(PyLongObject *left, PyLongObject *right);
PyObject *_PyLong_Subtract(PyLongObject *left, PyLongObject *right);
/* Used by Python/mystrtoul.c, _PyBytes_FromHex(),
_PyBytes_DecodeEscape(), etc. */
PyAPI_DATA(unsigned char) _PyLong_DigitValue[256];
/* Format the object based on the format_spec, as defined in PEP 3101
(Advanced String Formatting). */
PyAPI_FUNC(int) _PyLong_FormatAdvancedWriter(
_PyUnicodeWriter *writer,
PyObject *obj,
PyObject *format_spec,
Py_ssize_t start,
Py_ssize_t end);
PyAPI_FUNC(int) _PyLong_FormatWriter(
_PyUnicodeWriter *writer,
PyObject *obj,
int base,
int alternate);
PyAPI_FUNC(char*) _PyLong_FormatBytesWriter(
_PyBytesWriter *writer,
char *str,
PyObject *obj,
int base,
int alternate);
/* Long value tag bits:
* 0-1: Sign bits value = (1-sign), ie. negative=2, positive=0, zero=1.
* 2: Reserved for immortality bit
* 3+ Unsigned digit count
*/
#define SIGN_MASK 3
#define SIGN_ZERO 1
#define SIGN_NEGATIVE 2
#define NON_SIZE_BITS 3
/* The functions _PyLong_IsCompact and _PyLong_CompactValue are defined
* in Include/cpython/longobject.h, since they need to be inline.
*
* "Compact" values have at least one bit to spare,
* so that addition and subtraction can be performed on the values
* without risk of overflow.
*
* The inline functions need tag bits.
* For readability, rather than do `#define SIGN_MASK _PyLong_SIGN_MASK`
* we define them to the numbers in both places and then assert that
* they're the same.
*/
static_assert(SIGN_MASK == _PyLong_SIGN_MASK, "SIGN_MASK does not match _PyLong_SIGN_MASK");
static_assert(NON_SIZE_BITS == _PyLong_NON_SIZE_BITS, "NON_SIZE_BITS does not match _PyLong_NON_SIZE_BITS");
/* All *compact" values are guaranteed to fit into
* a Py_ssize_t with at least one bit to spare.
* In other words, for 64 bit machines, compact
* will be signed 63 (or fewer) bit values
*/
/* Return 1 if the argument is compact int */
static inline int
_PyLong_IsNonNegativeCompact(const PyLongObject* op) {
assert(PyLong_Check(op));
return op->long_value.lv_tag <= (1 << NON_SIZE_BITS);
}
static inline int
_PyLong_BothAreCompact(const PyLongObject* a, const PyLongObject* b) {
assert(PyLong_Check(a));
assert(PyLong_Check(b));
return (a->long_value.lv_tag | b->long_value.lv_tag) < (2 << NON_SIZE_BITS);
}
static inline bool
_PyLong_IsZero(const PyLongObject *op)
{
return (op->long_value.lv_tag & SIGN_MASK) == SIGN_ZERO;
}
static inline bool
_PyLong_IsNegative(const PyLongObject *op)
{
return (op->long_value.lv_tag & SIGN_MASK) == SIGN_NEGATIVE;
}
static inline bool
_PyLong_IsPositive(const PyLongObject *op)
{
return (op->long_value.lv_tag & SIGN_MASK) == 0;
}
static inline Py_ssize_t
_PyLong_DigitCount(const PyLongObject *op)
{
assert(PyLong_Check(op));
return op->long_value.lv_tag >> NON_SIZE_BITS;
}
/* Equivalent to _PyLong_DigitCount(op) * _PyLong_NonCompactSign(op) */
static inline Py_ssize_t
_PyLong_SignedDigitCount(const PyLongObject *op)
{
assert(PyLong_Check(op));
Py_ssize_t sign = 1 - (op->long_value.lv_tag & SIGN_MASK);
return sign * (Py_ssize_t)(op->long_value.lv_tag >> NON_SIZE_BITS);
}
static inline int
_PyLong_CompactSign(const PyLongObject *op)
{
assert(PyLong_Check(op));
assert(_PyLong_IsCompact(op));
return 1 - (op->long_value.lv_tag & SIGN_MASK);
}
static inline int
_PyLong_NonCompactSign(const PyLongObject *op)
{
assert(PyLong_Check(op));
assert(!_PyLong_IsCompact(op));
return 1 - (op->long_value.lv_tag & SIGN_MASK);
}
/* Do a and b have the same sign? */
static inline int
_PyLong_SameSign(const PyLongObject *a, const PyLongObject *b)
{
return (a->long_value.lv_tag & SIGN_MASK) == (b->long_value.lv_tag & SIGN_MASK);
}
#define TAG_FROM_SIGN_AND_SIZE(sign, size) ((1 - (sign)) | ((size) << NON_SIZE_BITS))
static inline void
_PyLong_SetSignAndDigitCount(PyLongObject *op, int sign, Py_ssize_t size)
{
assert(size >= 0);
assert(-1 <= sign && sign <= 1);
assert(sign != 0 || size == 0);
op->long_value.lv_tag = TAG_FROM_SIGN_AND_SIZE(sign, (size_t)size);
}
static inline void
_PyLong_SetDigitCount(PyLongObject *op, Py_ssize_t size)
{
assert(size >= 0);
op->long_value.lv_tag = (((size_t)size) << NON_SIZE_BITS) | (op->long_value.lv_tag & SIGN_MASK);
}
#define NON_SIZE_MASK ~((1 << NON_SIZE_BITS) - 1)
static inline void
_PyLong_FlipSign(PyLongObject *op) {
unsigned int flipped_sign = 2 - (op->long_value.lv_tag & SIGN_MASK);
op->long_value.lv_tag &= NON_SIZE_MASK;
op->long_value.lv_tag |= flipped_sign;
}
#define _PyLong_DIGIT_INIT(val) \
{ \
.ob_base = _PyObject_HEAD_INIT(&PyLong_Type) \
.long_value = { \
.lv_tag = TAG_FROM_SIGN_AND_SIZE( \
(val) == 0 ? 0 : ((val) < 0 ? -1 : 1), \
(val) == 0 ? 0 : 1), \
{ ((val) >= 0 ? (val) : -(val)) }, \
} \
}
#define _PyLong_FALSE_TAG TAG_FROM_SIGN_AND_SIZE(0, 0)
#define _PyLong_TRUE_TAG TAG_FROM_SIGN_AND_SIZE(1, 1)
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_LONG_H */

View File

@@ -0,0 +1,18 @@
#ifndef Py_INTERNAL_MEMORYOBJECT_H
#define Py_INTERNAL_MEMORYOBJECT_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
PyObject *
_PyMemoryView_FromBufferProc(PyObject *v, int flags,
getbufferproc bufferproc);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_MEMORYOBJECT_H */

View File

@@ -0,0 +1,45 @@
#ifndef Py_INTERNAL_MODULEOBJECT_H
#define Py_INTERNAL_MODULEOBJECT_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
typedef struct {
PyObject_HEAD
PyObject *md_dict;
PyModuleDef *md_def;
void *md_state;
PyObject *md_weaklist;
// for logging purposes after md_dict is cleared
PyObject *md_name;
} PyModuleObject;
static inline PyModuleDef* _PyModule_GetDef(PyObject *mod) {
assert(PyModule_Check(mod));
return ((PyModuleObject *)mod)->md_def;
}
static inline void* _PyModule_GetState(PyObject* mod) {
assert(PyModule_Check(mod));
return ((PyModuleObject *)mod)->md_state;
}
static inline PyObject* _PyModule_GetDict(PyObject *mod) {
assert(PyModule_Check(mod));
PyObject *dict = ((PyModuleObject *)mod) -> md_dict;
// _PyModule_GetDict(mod) must not be used after calling module_clear(mod)
assert(dict != NULL);
return dict;
}
PyObject* _Py_module_getattro_impl(PyModuleObject *m, PyObject *name, int suppress);
PyObject* _Py_module_getattro(PyModuleObject *m, PyObject *name);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_MODULEOBJECT_H */

View File

@@ -0,0 +1,20 @@
// Simple namespace object interface
#ifndef Py_INTERNAL_NAMESPACE_H
#define Py_INTERNAL_NAMESPACE_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
PyAPI_DATA(PyTypeObject) _PyNamespace_Type;
PyAPI_FUNC(PyObject *) _PyNamespace_New(PyObject *kwds);
#ifdef __cplusplus
}
#endif
#endif // !Py_INTERNAL_NAMESPACE_H

View File

@@ -0,0 +1,443 @@
#ifndef Py_INTERNAL_OBJECT_H
#define Py_INTERNAL_OBJECT_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#include <stdbool.h>
#include "pycore_gc.h" // _PyObject_GC_IS_TRACKED()
#include "pycore_interp.h" // PyInterpreterState.gc
#include "pycore_pystate.h" // _PyInterpreterState_GET()
#include "pycore_runtime.h" // _PyRuntime
/* We need to maintain an internal copy of Py{Var}Object_HEAD_INIT to avoid
designated initializer conflicts in C++20. If we use the deinition in
object.h, we will be mixing designated and non-designated initializers in
pycore objects which is forbiddent in C++20. However, if we then use
designated initializers in object.h then Extensions without designated break.
Furthermore, we can't use designated initializers in Extensions since these
are not supported pre-C++20. Thus, keeping an internal copy here is the most
backwards compatible solution */
#define _PyObject_HEAD_INIT(type) \
{ \
_PyObject_EXTRA_INIT \
.ob_refcnt = _Py_IMMORTAL_REFCNT, \
.ob_type = (type) \
},
#define _PyVarObject_HEAD_INIT(type, size) \
{ \
.ob_base = _PyObject_HEAD_INIT(type) \
.ob_size = size \
},
PyAPI_FUNC(void) _Py_NO_RETURN _Py_FatalRefcountErrorFunc(
const char *func,
const char *message);
#define _Py_FatalRefcountError(message) \
_Py_FatalRefcountErrorFunc(__func__, (message))
#ifdef Py_REF_DEBUG
/* The symbol is only exposed in the API for the sake of extensions
built against the pre-3.12 stable ABI. */
PyAPI_DATA(Py_ssize_t) _Py_RefTotal;
extern void _Py_AddRefTotal(PyInterpreterState *, Py_ssize_t);
extern void _Py_IncRefTotal(PyInterpreterState *);
extern void _Py_DecRefTotal(PyInterpreterState *);
# define _Py_DEC_REFTOTAL(interp) \
interp->object_state.reftotal--
#endif
// Increment reference count by n
static inline void _Py_RefcntAdd(PyObject* op, Py_ssize_t n)
{
if (_Py_IsImmortal(op)) {
return;
}
#ifdef Py_REF_DEBUG
_Py_AddRefTotal(_PyInterpreterState_GET(), n);
#endif
op->ob_refcnt += n;
}
#define _Py_RefcntAdd(op, n) _Py_RefcntAdd(_PyObject_CAST(op), n)
static inline void _Py_SetImmortal(PyObject *op)
{
if (op) {
op->ob_refcnt = _Py_IMMORTAL_REFCNT;
}
}
#define _Py_SetImmortal(op) _Py_SetImmortal(_PyObject_CAST(op))
/* _Py_ClearImmortal() should only be used during runtime finalization. */
static inline void _Py_ClearImmortal(PyObject *op)
{
if (op) {
assert(op->ob_refcnt == _Py_IMMORTAL_REFCNT);
op->ob_refcnt = 1;
Py_DECREF(op);
}
}
#define _Py_ClearImmortal(op) \
do { \
_Py_ClearImmortal(_PyObject_CAST(op)); \
op = NULL; \
} while (0)
static inline void
_Py_DECREF_SPECIALIZED(PyObject *op, const destructor destruct)
{
if (_Py_IsImmortal(op)) {
return;
}
_Py_DECREF_STAT_INC();
#ifdef Py_REF_DEBUG
_Py_DEC_REFTOTAL(_PyInterpreterState_GET());
#endif
if (--op->ob_refcnt != 0) {
assert(op->ob_refcnt > 0);
}
else {
#ifdef Py_TRACE_REFS
_Py_ForgetReference(op);
#endif
destruct(op);
}
}
static inline void
_Py_DECREF_NO_DEALLOC(PyObject *op)
{
if (_Py_IsImmortal(op)) {
return;
}
_Py_DECREF_STAT_INC();
#ifdef Py_REF_DEBUG
_Py_DEC_REFTOTAL(_PyInterpreterState_GET());
#endif
op->ob_refcnt--;
#ifdef Py_DEBUG
if (op->ob_refcnt <= 0) {
_Py_FatalRefcountError("Expected a positive remaining refcount");
}
#endif
}
#ifdef Py_REF_DEBUG
# undef _Py_DEC_REFTOTAL
#endif
PyAPI_FUNC(int) _PyType_CheckConsistency(PyTypeObject *type);
PyAPI_FUNC(int) _PyDict_CheckConsistency(PyObject *mp, int check_content);
/* Update the Python traceback of an object. This function must be called
when a memory block is reused from a free list.
Internal function called by _Py_NewReference(). */
extern int _PyTraceMalloc_NewReference(PyObject *op);
// Fast inlined version of PyType_HasFeature()
static inline int
_PyType_HasFeature(PyTypeObject *type, unsigned long feature) {
return ((type->tp_flags & feature) != 0);
}
extern void _PyType_InitCache(PyInterpreterState *interp);
extern void _PyObject_InitState(PyInterpreterState *interp);
/* Inline functions trading binary compatibility for speed:
_PyObject_Init() is the fast version of PyObject_Init(), and
_PyObject_InitVar() is the fast version of PyObject_InitVar().
These inline functions must not be called with op=NULL. */
static inline void
_PyObject_Init(PyObject *op, PyTypeObject *typeobj)
{
assert(op != NULL);
Py_SET_TYPE(op, typeobj);
if (_PyType_HasFeature(typeobj, Py_TPFLAGS_HEAPTYPE)) {
Py_INCREF(typeobj);
}
_Py_NewReference(op);
}
static inline void
_PyObject_InitVar(PyVarObject *op, PyTypeObject *typeobj, Py_ssize_t size)
{
assert(op != NULL);
assert(typeobj != &PyLong_Type);
_PyObject_Init((PyObject *)op, typeobj);
Py_SET_SIZE(op, size);
}
/* Tell the GC to track this object.
*
* The object must not be tracked by the GC.
*
* NB: While the object is tracked by the collector, it must be safe to call the
* ob_traverse method.
*
* Internal note: interp->gc.generation0->_gc_prev doesn't have any bit flags
* because it's not object header. So we don't use _PyGCHead_PREV() and
* _PyGCHead_SET_PREV() for it to avoid unnecessary bitwise operations.
*
* See also the public PyObject_GC_Track() function.
*/
static inline void _PyObject_GC_TRACK(
// The preprocessor removes _PyObject_ASSERT_FROM() calls if NDEBUG is defined
#ifndef NDEBUG
const char *filename, int lineno,
#endif
PyObject *op)
{
_PyObject_ASSERT_FROM(op, !_PyObject_GC_IS_TRACKED(op),
"object already tracked by the garbage collector",
filename, lineno, __func__);
PyGC_Head *gc = _Py_AS_GC(op);
_PyObject_ASSERT_FROM(op,
(gc->_gc_prev & _PyGC_PREV_MASK_COLLECTING) == 0,
"object is in generation which is garbage collected",
filename, lineno, __func__);
PyInterpreterState *interp = _PyInterpreterState_GET();
PyGC_Head *generation0 = interp->gc.generation0;
PyGC_Head *last = (PyGC_Head*)(generation0->_gc_prev);
_PyGCHead_SET_NEXT(last, gc);
_PyGCHead_SET_PREV(gc, last);
_PyGCHead_SET_NEXT(gc, generation0);
generation0->_gc_prev = (uintptr_t)gc;
}
/* Tell the GC to stop tracking this object.
*
* Internal note: This may be called while GC. So _PyGC_PREV_MASK_COLLECTING
* must be cleared. But _PyGC_PREV_MASK_FINALIZED bit is kept.
*
* The object must be tracked by the GC.
*
* See also the public PyObject_GC_UnTrack() which accept an object which is
* not tracked.
*/
static inline void _PyObject_GC_UNTRACK(
// The preprocessor removes _PyObject_ASSERT_FROM() calls if NDEBUG is defined
#ifndef NDEBUG
const char *filename, int lineno,
#endif
PyObject *op)
{
_PyObject_ASSERT_FROM(op, _PyObject_GC_IS_TRACKED(op),
"object not tracked by the garbage collector",
filename, lineno, __func__);
PyGC_Head *gc = _Py_AS_GC(op);
PyGC_Head *prev = _PyGCHead_PREV(gc);
PyGC_Head *next = _PyGCHead_NEXT(gc);
_PyGCHead_SET_NEXT(prev, next);
_PyGCHead_SET_PREV(next, prev);
gc->_gc_next = 0;
gc->_gc_prev &= _PyGC_PREV_MASK_FINALIZED;
}
// Macros to accept any type for the parameter, and to automatically pass
// the filename and the filename (if NDEBUG is not defined) where the macro
// is called.
#ifdef NDEBUG
# define _PyObject_GC_TRACK(op) \
_PyObject_GC_TRACK(_PyObject_CAST(op))
# define _PyObject_GC_UNTRACK(op) \
_PyObject_GC_UNTRACK(_PyObject_CAST(op))
#else
# define _PyObject_GC_TRACK(op) \
_PyObject_GC_TRACK(__FILE__, __LINE__, _PyObject_CAST(op))
# define _PyObject_GC_UNTRACK(op) \
_PyObject_GC_UNTRACK(__FILE__, __LINE__, _PyObject_CAST(op))
#endif
#ifdef Py_REF_DEBUG
extern void _PyInterpreterState_FinalizeRefTotal(PyInterpreterState *);
extern void _Py_FinalizeRefTotal(_PyRuntimeState *);
extern void _PyDebug_PrintTotalRefs(void);
#endif
#ifdef Py_TRACE_REFS
extern void _Py_AddToAllObjects(PyObject *op, int force);
extern void _Py_PrintReferences(PyInterpreterState *, FILE *);
extern void _Py_PrintReferenceAddresses(PyInterpreterState *, FILE *);
#endif
/* Return the *address* of the object's weaklist. The address may be
* dereferenced to get the current head of the weaklist. This is useful
* for iterating over the linked list of weakrefs, especially when the
* list is being modified externally (e.g. refs getting removed).
*
* The returned pointer should not be used to change the head of the list
* nor should it be used to add, remove, or swap any refs in the list.
* That is the sole responsibility of the code in weakrefobject.c.
*/
static inline PyObject **
_PyObject_GET_WEAKREFS_LISTPTR(PyObject *op)
{
if (PyType_Check(op) &&
((PyTypeObject *)op)->tp_flags & _Py_TPFLAGS_STATIC_BUILTIN) {
PyInterpreterState *interp = _PyInterpreterState_GET();
static_builtin_state *state = _PyStaticType_GetState(
interp, (PyTypeObject *)op);
return _PyStaticType_GET_WEAKREFS_LISTPTR(state);
}
// Essentially _PyObject_GET_WEAKREFS_LISTPTR_FROM_OFFSET():
Py_ssize_t offset = Py_TYPE(op)->tp_weaklistoffset;
return (PyObject **)((char *)op + offset);
}
/* This is a special case of _PyObject_GET_WEAKREFS_LISTPTR().
* Only the most fundamental lookup path is used.
* Consequently, static types should not be used.
*
* For static builtin types the returned pointer will always point
* to a NULL tp_weaklist. This is fine for any deallocation cases,
* since static types are never deallocated and static builtin types
* are only finalized at the end of runtime finalization.
*
* If the weaklist for static types is actually needed then use
* _PyObject_GET_WEAKREFS_LISTPTR().
*/
static inline PyWeakReference **
_PyObject_GET_WEAKREFS_LISTPTR_FROM_OFFSET(PyObject *op)
{
assert(!PyType_Check(op) ||
((PyTypeObject *)op)->tp_flags & Py_TPFLAGS_HEAPTYPE);
Py_ssize_t offset = Py_TYPE(op)->tp_weaklistoffset;
return (PyWeakReference **)((char *)op + offset);
}
// Fast inlined version of PyObject_IS_GC()
static inline int
_PyObject_IS_GC(PyObject *obj)
{
return (PyType_IS_GC(Py_TYPE(obj))
&& (Py_TYPE(obj)->tp_is_gc == NULL
|| Py_TYPE(obj)->tp_is_gc(obj)));
}
// Fast inlined version of PyType_IS_GC()
#define _PyType_IS_GC(t) _PyType_HasFeature((t), Py_TPFLAGS_HAVE_GC)
static inline size_t
_PyType_PreHeaderSize(PyTypeObject *tp)
{
return _PyType_IS_GC(tp) * sizeof(PyGC_Head) +
_PyType_HasFeature(tp, Py_TPFLAGS_PREHEADER) * 2 * sizeof(PyObject *);
}
void _PyObject_GC_Link(PyObject *op);
// Usage: assert(_Py_CheckSlotResult(obj, "__getitem__", result != NULL));
extern int _Py_CheckSlotResult(
PyObject *obj,
const char *slot_name,
int success);
// Test if a type supports weak references
static inline int _PyType_SUPPORTS_WEAKREFS(PyTypeObject *type) {
return (type->tp_weaklistoffset != 0);
}
extern PyObject* _PyType_AllocNoTrack(PyTypeObject *type, Py_ssize_t nitems);
extern int _PyObject_InitializeDict(PyObject *obj);
extern int _PyObject_StoreInstanceAttribute(PyObject *obj, PyDictValues *values,
PyObject *name, PyObject *value);
PyObject * _PyObject_GetInstanceAttribute(PyObject *obj, PyDictValues *values,
PyObject *name);
typedef union {
PyObject *dict;
/* Use a char* to generate a warning if directly assigning a PyDictValues */
char *values;
} PyDictOrValues;
static inline PyDictOrValues *
_PyObject_DictOrValuesPointer(PyObject *obj)
{
assert(Py_TYPE(obj)->tp_flags & Py_TPFLAGS_MANAGED_DICT);
return ((PyDictOrValues *)obj)-3;
}
static inline int
_PyDictOrValues_IsValues(PyDictOrValues dorv)
{
return ((uintptr_t)dorv.values) & 1;
}
static inline PyDictValues *
_PyDictOrValues_GetValues(PyDictOrValues dorv)
{
assert(_PyDictOrValues_IsValues(dorv));
return (PyDictValues *)(dorv.values + 1);
}
static inline PyObject *
_PyDictOrValues_GetDict(PyDictOrValues dorv)
{
assert(!_PyDictOrValues_IsValues(dorv));
return dorv.dict;
}
static inline void
_PyDictOrValues_SetValues(PyDictOrValues *ptr, PyDictValues *values)
{
ptr->values = ((char *)values) - 1;
}
#define MANAGED_WEAKREF_OFFSET (((Py_ssize_t)sizeof(PyObject *))*-4)
extern PyObject ** _PyObject_ComputedDictPointer(PyObject *);
extern void _PyObject_FreeInstanceAttributes(PyObject *obj);
extern int _PyObject_IsInstanceDictEmpty(PyObject *);
PyAPI_FUNC(PyObject *) _PyObject_LookupSpecial(PyObject *, PyObject *);
/* C function call trampolines to mitigate bad function pointer casts.
*
* Typical native ABIs ignore additional arguments or fill in missing
* values with 0/NULL in function pointer cast. Compilers do not show
* warnings when a function pointer is explicitly casted to an
* incompatible type.
*
* Bad fpcasts are an issue in WebAssembly. WASM's indirect_call has strict
* function signature checks. Argument count, types, and return type must
* match.
*
* Third party code unintentionally rely on problematic fpcasts. The call
* trampoline mitigates common occurrences of bad fpcasts on Emscripten.
*/
#if defined(__EMSCRIPTEN__) && defined(PY_CALL_TRAMPOLINE)
#define _PyCFunction_TrampolineCall(meth, self, args) \
_PyCFunctionWithKeywords_TrampolineCall( \
(*(PyCFunctionWithKeywords)(void(*)(void))(meth)), (self), (args), NULL)
extern PyObject* _PyCFunctionWithKeywords_TrampolineCall(
PyCFunctionWithKeywords meth, PyObject *, PyObject *, PyObject *);
#else
#define _PyCFunction_TrampolineCall(meth, self, args) \
(meth)((self), (args))
#define _PyCFunctionWithKeywords_TrampolineCall(meth, self, args, kw) \
(meth)((self), (args), (kw))
#endif // __EMSCRIPTEN__ && PY_CALL_TRAMPOLINE
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_OBJECT_H */

View File

@@ -0,0 +1,36 @@
#ifndef Py_INTERNAL_OBJECT_STATE_H
#define Py_INTERNAL_OBJECT_STATE_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
struct _py_object_runtime_state {
#ifdef Py_REF_DEBUG
Py_ssize_t interpreter_leaks;
#endif
int _not_used;
};
struct _py_object_state {
#ifdef Py_REF_DEBUG
Py_ssize_t reftotal;
#endif
#ifdef Py_TRACE_REFS
/* Head of circular doubly-linked list of all objects. These are linked
* together via the _ob_prev and _ob_next members of a PyObject, which
* exist only in a Py_TRACE_REFS build.
*/
PyObject refchain;
#endif
int _not_used;
};
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_OBJECT_STATE_H */

View File

@@ -0,0 +1,698 @@
#ifndef Py_INTERNAL_OBMALLOC_H
#define Py_INTERNAL_OBMALLOC_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
typedef unsigned int pymem_uint; /* assuming >= 16 bits */
#undef uint
#define uint pymem_uint
/* An object allocator for Python.
Here is an introduction to the layers of the Python memory architecture,
showing where the object allocator is actually used (layer +2), It is
called for every object allocation and deallocation (PyObject_New/Del),
unless the object-specific allocators implement a proprietary allocation
scheme (ex.: ints use a simple free list). This is also the place where
the cyclic garbage collector operates selectively on container objects.
Object-specific allocators
_____ ______ ______ ________
[ int ] [ dict ] [ list ] ... [ string ] Python core |
+3 | <----- Object-specific memory -----> | <-- Non-object memory --> |
_______________________________ | |
[ Python's object allocator ] | |
+2 | ####### Object memory ####### | <------ Internal buffers ------> |
______________________________________________________________ |
[ Python's raw memory allocator (PyMem_ API) ] |
+1 | <----- Python memory (under PyMem manager's control) ------> | |
__________________________________________________________________
[ Underlying general-purpose allocator (ex: C library malloc) ]
0 | <------ Virtual memory allocated for the python process -------> |
=========================================================================
_______________________________________________________________________
[ OS-specific Virtual Memory Manager (VMM) ]
-1 | <--- Kernel dynamic storage allocation & management (page-based) ---> |
__________________________________ __________________________________
[ ] [ ]
-2 | <-- Physical memory: ROM/RAM --> | | <-- Secondary storage (swap) --> |
*/
/*==========================================================================*/
/* A fast, special-purpose memory allocator for small blocks, to be used
on top of a general-purpose malloc -- heavily based on previous art. */
/* Vladimir Marangozov -- August 2000 */
/*
* "Memory management is where the rubber meets the road -- if we do the wrong
* thing at any level, the results will not be good. And if we don't make the
* levels work well together, we are in serious trouble." (1)
*
* (1) Paul R. Wilson, Mark S. Johnstone, Michael Neely, and David Boles,
* "Dynamic Storage Allocation: A Survey and Critical Review",
* in Proc. 1995 Int'l. Workshop on Memory Management, September 1995.
*/
/* #undef WITH_MEMORY_LIMITS */ /* disable mem limit checks */
/*==========================================================================*/
/*
* Allocation strategy abstract:
*
* For small requests, the allocator sub-allocates <Big> blocks of memory.
* Requests greater than SMALL_REQUEST_THRESHOLD bytes are routed to the
* system's allocator.
*
* Small requests are grouped in size classes spaced 8 bytes apart, due
* to the required valid alignment of the returned address. Requests of
* a particular size are serviced from memory pools of 4K (one VMM page).
* Pools are fragmented on demand and contain free lists of blocks of one
* particular size class. In other words, there is a fixed-size allocator
* for each size class. Free pools are shared by the different allocators
* thus minimizing the space reserved for a particular size class.
*
* This allocation strategy is a variant of what is known as "simple
* segregated storage based on array of free lists". The main drawback of
* simple segregated storage is that we might end up with lot of reserved
* memory for the different free lists, which degenerate in time. To avoid
* this, we partition each free list in pools and we share dynamically the
* reserved space between all free lists. This technique is quite efficient
* for memory intensive programs which allocate mainly small-sized blocks.
*
* For small requests we have the following table:
*
* Request in bytes Size of allocated block Size class idx
* ----------------------------------------------------------------
* 1-8 8 0
* 9-16 16 1
* 17-24 24 2
* 25-32 32 3
* 33-40 40 4
* 41-48 48 5
* 49-56 56 6
* 57-64 64 7
* 65-72 72 8
* ... ... ...
* 497-504 504 62
* 505-512 512 63
*
* 0, SMALL_REQUEST_THRESHOLD + 1 and up: routed to the underlying
* allocator.
*/
/*==========================================================================*/
/*
* -- Main tunable settings section --
*/
/*
* Alignment of addresses returned to the user. 8-bytes alignment works
* on most current architectures (with 32-bit or 64-bit address buses).
* The alignment value is also used for grouping small requests in size
* classes spaced ALIGNMENT bytes apart.
*
* You shouldn't change this unless you know what you are doing.
*/
#if SIZEOF_VOID_P > 4
#define ALIGNMENT 16 /* must be 2^N */
#define ALIGNMENT_SHIFT 4
#else
#define ALIGNMENT 8 /* must be 2^N */
#define ALIGNMENT_SHIFT 3
#endif
/* Return the number of bytes in size class I, as a uint. */
#define INDEX2SIZE(I) (((pymem_uint)(I) + 1) << ALIGNMENT_SHIFT)
/*
* Max size threshold below which malloc requests are considered to be
* small enough in order to use preallocated memory pools. You can tune
* this value according to your application behaviour and memory needs.
*
* Note: a size threshold of 512 guarantees that newly created dictionaries
* will be allocated from preallocated memory pools on 64-bit.
*
* The following invariants must hold:
* 1) ALIGNMENT <= SMALL_REQUEST_THRESHOLD <= 512
* 2) SMALL_REQUEST_THRESHOLD is evenly divisible by ALIGNMENT
*
* Although not required, for better performance and space efficiency,
* it is recommended that SMALL_REQUEST_THRESHOLD is set to a power of 2.
*/
#define SMALL_REQUEST_THRESHOLD 512
#define NB_SMALL_SIZE_CLASSES (SMALL_REQUEST_THRESHOLD / ALIGNMENT)
/*
* The system's VMM page size can be obtained on most unices with a
* getpagesize() call or deduced from various header files. To make
* things simpler, we assume that it is 4K, which is OK for most systems.
* It is probably better if this is the native page size, but it doesn't
* have to be. In theory, if SYSTEM_PAGE_SIZE is larger than the native page
* size, then `POOL_ADDR(p)->arenaindex' could rarely cause a segmentation
* violation fault. 4K is apparently OK for all the platforms that python
* currently targets.
*/
#define SYSTEM_PAGE_SIZE (4 * 1024)
/*
* Maximum amount of memory managed by the allocator for small requests.
*/
#ifdef WITH_MEMORY_LIMITS
#ifndef SMALL_MEMORY_LIMIT
#define SMALL_MEMORY_LIMIT (64 * 1024 * 1024) /* 64 MB -- more? */
#endif
#endif
#if !defined(WITH_PYMALLOC_RADIX_TREE)
/* Use radix-tree to track arena memory regions, for address_in_range().
* Enable by default since it allows larger pool sizes. Can be disabled
* using -DWITH_PYMALLOC_RADIX_TREE=0 */
#define WITH_PYMALLOC_RADIX_TREE 1
#endif
#if SIZEOF_VOID_P > 4
/* on 64-bit platforms use larger pools and arenas if we can */
#define USE_LARGE_ARENAS
#if WITH_PYMALLOC_RADIX_TREE
/* large pools only supported if radix-tree is enabled */
#define USE_LARGE_POOLS
#endif
#endif
/*
* The allocator sub-allocates <Big> blocks of memory (called arenas) aligned
* on a page boundary. This is a reserved virtual address space for the
* current process (obtained through a malloc()/mmap() call). In no way this
* means that the memory arenas will be used entirely. A malloc(<Big>) is
* usually an address range reservation for <Big> bytes, unless all pages within
* this space are referenced subsequently. So malloc'ing big blocks and not
* using them does not mean "wasting memory". It's an addressable range
* wastage...
*
* Arenas are allocated with mmap() on systems supporting anonymous memory
* mappings to reduce heap fragmentation.
*/
#ifdef USE_LARGE_ARENAS
#define ARENA_BITS 20 /* 1 MiB */
#else
#define ARENA_BITS 18 /* 256 KiB */
#endif
#define ARENA_SIZE (1 << ARENA_BITS)
#define ARENA_SIZE_MASK (ARENA_SIZE - 1)
#ifdef WITH_MEMORY_LIMITS
#define MAX_ARENAS (SMALL_MEMORY_LIMIT / ARENA_SIZE)
#endif
/*
* Size of the pools used for small blocks. Must be a power of 2.
*/
#ifdef USE_LARGE_POOLS
#define POOL_BITS 14 /* 16 KiB */
#else
#define POOL_BITS 12 /* 4 KiB */
#endif
#define POOL_SIZE (1 << POOL_BITS)
#define POOL_SIZE_MASK (POOL_SIZE - 1)
#if !WITH_PYMALLOC_RADIX_TREE
#if POOL_SIZE != SYSTEM_PAGE_SIZE
# error "pool size must be equal to system page size"
#endif
#endif
#define MAX_POOLS_IN_ARENA (ARENA_SIZE / POOL_SIZE)
#if MAX_POOLS_IN_ARENA * POOL_SIZE != ARENA_SIZE
# error "arena size not an exact multiple of pool size"
#endif
/*
* -- End of tunable settings section --
*/
/*==========================================================================*/
/* When you say memory, my mind reasons in terms of (pointers to) blocks */
typedef uint8_t pymem_block;
/* Pool for small blocks. */
struct pool_header {
union { pymem_block *_padding;
uint count; } ref; /* number of allocated blocks */
pymem_block *freeblock; /* pool's free list head */
struct pool_header *nextpool; /* next pool of this size class */
struct pool_header *prevpool; /* previous pool "" */
uint arenaindex; /* index into arenas of base adr */
uint szidx; /* block size class index */
uint nextoffset; /* bytes to virgin block */
uint maxnextoffset; /* largest valid nextoffset */
};
typedef struct pool_header *poolp;
/* Record keeping for arenas. */
struct arena_object {
/* The address of the arena, as returned by malloc. Note that 0
* will never be returned by a successful malloc, and is used
* here to mark an arena_object that doesn't correspond to an
* allocated arena.
*/
uintptr_t address;
/* Pool-aligned pointer to the next pool to be carved off. */
pymem_block* pool_address;
/* The number of available pools in the arena: free pools + never-
* allocated pools.
*/
uint nfreepools;
/* The total number of pools in the arena, whether or not available. */
uint ntotalpools;
/* Singly-linked list of available pools. */
struct pool_header* freepools;
/* Whenever this arena_object is not associated with an allocated
* arena, the nextarena member is used to link all unassociated
* arena_objects in the singly-linked `unused_arena_objects` list.
* The prevarena member is unused in this case.
*
* When this arena_object is associated with an allocated arena
* with at least one available pool, both members are used in the
* doubly-linked `usable_arenas` list, which is maintained in
* increasing order of `nfreepools` values.
*
* Else this arena_object is associated with an allocated arena
* all of whose pools are in use. `nextarena` and `prevarena`
* are both meaningless in this case.
*/
struct arena_object* nextarena;
struct arena_object* prevarena;
};
#define POOL_OVERHEAD _Py_SIZE_ROUND_UP(sizeof(struct pool_header), ALIGNMENT)
#define DUMMY_SIZE_IDX 0xffff /* size class of newly cached pools */
/* Round pointer P down to the closest pool-aligned address <= P, as a poolp */
#define POOL_ADDR(P) ((poolp)_Py_ALIGN_DOWN((P), POOL_SIZE))
/* Return total number of blocks in pool of size index I, as a uint. */
#define NUMBLOCKS(I) ((pymem_uint)(POOL_SIZE - POOL_OVERHEAD) / INDEX2SIZE(I))
/*==========================================================================*/
/*
* Pool table -- headed, circular, doubly-linked lists of partially used pools.
This is involved. For an index i, usedpools[i+i] is the header for a list of
all partially used pools holding small blocks with "size class idx" i. So
usedpools[0] corresponds to blocks of size 8, usedpools[2] to blocks of size
16, and so on: index 2*i <-> blocks of size (i+1)<<ALIGNMENT_SHIFT.
Pools are carved off an arena's highwater mark (an arena_object's pool_address
member) as needed. Once carved off, a pool is in one of three states forever
after:
used == partially used, neither empty nor full
At least one block in the pool is currently allocated, and at least one
block in the pool is not currently allocated (note this implies a pool
has room for at least two blocks).
This is a pool's initial state, as a pool is created only when malloc
needs space.
The pool holds blocks of a fixed size, and is in the circular list headed
at usedpools[i] (see above). It's linked to the other used pools of the
same size class via the pool_header's nextpool and prevpool members.
If all but one block is currently allocated, a malloc can cause a
transition to the full state. If all but one block is not currently
allocated, a free can cause a transition to the empty state.
full == all the pool's blocks are currently allocated
On transition to full, a pool is unlinked from its usedpools[] list.
It's not linked to from anything then anymore, and its nextpool and
prevpool members are meaningless until it transitions back to used.
A free of a block in a full pool puts the pool back in the used state.
Then it's linked in at the front of the appropriate usedpools[] list, so
that the next allocation for its size class will reuse the freed block.
empty == all the pool's blocks are currently available for allocation
On transition to empty, a pool is unlinked from its usedpools[] list,
and linked to the front of its arena_object's singly-linked freepools list,
via its nextpool member. The prevpool member has no meaning in this case.
Empty pools have no inherent size class: the next time a malloc finds
an empty list in usedpools[], it takes the first pool off of freepools.
If the size class needed happens to be the same as the size class the pool
last had, some pool initialization can be skipped.
Block Management
Blocks within pools are again carved out as needed. pool->freeblock points to
the start of a singly-linked list of free blocks within the pool. When a
block is freed, it's inserted at the front of its pool's freeblock list. Note
that the available blocks in a pool are *not* linked all together when a pool
is initialized. Instead only "the first two" (lowest addresses) blocks are
set up, returning the first such block, and setting pool->freeblock to a
one-block list holding the second such block. This is consistent with that
pymalloc strives at all levels (arena, pool, and block) never to touch a piece
of memory until it's actually needed.
So long as a pool is in the used state, we're certain there *is* a block
available for allocating, and pool->freeblock is not NULL. If pool->freeblock
points to the end of the free list before we've carved the entire pool into
blocks, that means we simply haven't yet gotten to one of the higher-address
blocks. The offset from the pool_header to the start of "the next" virgin
block is stored in the pool_header nextoffset member, and the largest value
of nextoffset that makes sense is stored in the maxnextoffset member when a
pool is initialized. All the blocks in a pool have been passed out at least
once when and only when nextoffset > maxnextoffset.
Major obscurity: While the usedpools vector is declared to have poolp
entries, it doesn't really. It really contains two pointers per (conceptual)
poolp entry, the nextpool and prevpool members of a pool_header. The
excruciating initialization code below fools C so that
usedpool[i+i]
"acts like" a genuine poolp, but only so long as you only reference its
nextpool and prevpool members. The "- 2*sizeof(pymem_block *)" gibberish is
compensating for that a pool_header's nextpool and prevpool members
immediately follow a pool_header's first two members:
union { pymem_block *_padding;
uint count; } ref;
pymem_block *freeblock;
each of which consume sizeof(pymem_block *) bytes. So what usedpools[i+i] really
contains is a fudged-up pointer p such that *if* C believes it's a poolp
pointer, then p->nextpool and p->prevpool are both p (meaning that the headed
circular list is empty).
It's unclear why the usedpools setup is so convoluted. It could be to
minimize the amount of cache required to hold this heavily-referenced table
(which only *needs* the two interpool pointer members of a pool_header). OTOH,
referencing code has to remember to "double the index" and doing so isn't
free, usedpools[0] isn't a strictly legal pointer, and we're crucially relying
on that C doesn't insert any padding anywhere in a pool_header at or before
the prevpool member.
**************************************************************************** */
#define OBMALLOC_USED_POOLS_SIZE (2 * ((NB_SMALL_SIZE_CLASSES + 7) / 8) * 8)
struct _obmalloc_pools {
poolp used[OBMALLOC_USED_POOLS_SIZE];
};
/*==========================================================================
Arena management.
`arenas` is a vector of arena_objects. It contains maxarenas entries, some of
which may not be currently used (== they're arena_objects that aren't
currently associated with an allocated arena). Note that arenas proper are
separately malloc'ed.
Prior to Python 2.5, arenas were never free()'ed. Starting with Python 2.5,
we do try to free() arenas, and use some mild heuristic strategies to increase
the likelihood that arenas eventually can be freed.
unused_arena_objects
This is a singly-linked list of the arena_objects that are currently not
being used (no arena is associated with them). Objects are taken off the
head of the list in new_arena(), and are pushed on the head of the list in
PyObject_Free() when the arena is empty. Key invariant: an arena_object
is on this list if and only if its .address member is 0.
usable_arenas
This is a doubly-linked list of the arena_objects associated with arenas
that have pools available. These pools are either waiting to be reused,
or have not been used before. The list is sorted to have the most-
allocated arenas first (ascending order based on the nfreepools member).
This means that the next allocation will come from a heavily used arena,
which gives the nearly empty arenas a chance to be returned to the system.
In my unscientific tests this dramatically improved the number of arenas
that could be freed.
Note that an arena_object associated with an arena all of whose pools are
currently in use isn't on either list.
Changed in Python 3.8: keeping usable_arenas sorted by number of free pools
used to be done by one-at-a-time linear search when an arena's number of
free pools changed. That could, overall, consume time quadratic in the
number of arenas. That didn't really matter when there were only a few
hundred arenas (typical!), but could be a timing disaster when there were
hundreds of thousands. See bpo-37029.
Now we have a vector of "search fingers" to eliminate the need to search:
nfp2lasta[nfp] returns the last ("rightmost") arena in usable_arenas
with nfp free pools. This is NULL if and only if there is no arena with
nfp free pools in usable_arenas.
*/
/* How many arena_objects do we initially allocate?
* 16 = can allocate 16 arenas = 16 * ARENA_SIZE = 4MB before growing the
* `arenas` vector.
*/
#define INITIAL_ARENA_OBJECTS 16
struct _obmalloc_mgmt {
/* Array of objects used to track chunks of memory (arenas). */
struct arena_object* arenas;
/* Number of slots currently allocated in the `arenas` vector. */
uint maxarenas;
/* The head of the singly-linked, NULL-terminated list of available
* arena_objects.
*/
struct arena_object* unused_arena_objects;
/* The head of the doubly-linked, NULL-terminated at each end, list of
* arena_objects associated with arenas that have pools available.
*/
struct arena_object* usable_arenas;
/* nfp2lasta[nfp] is the last arena in usable_arenas with nfp free pools */
struct arena_object* nfp2lasta[MAX_POOLS_IN_ARENA + 1];
/* Number of arenas allocated that haven't been free()'d. */
size_t narenas_currently_allocated;
/* Total number of times malloc() called to allocate an arena. */
size_t ntimes_arena_allocated;
/* High water mark (max value ever seen) for narenas_currently_allocated. */
size_t narenas_highwater;
Py_ssize_t raw_allocated_blocks;
};
#if WITH_PYMALLOC_RADIX_TREE
/*==========================================================================*/
/* radix tree for tracking arena usage. If enabled, used to implement
address_in_range().
memory address bit allocation for keys
64-bit pointers, IGNORE_BITS=0 and 2^20 arena size:
15 -> MAP_TOP_BITS
15 -> MAP_MID_BITS
14 -> MAP_BOT_BITS
20 -> ideal aligned arena
----
64
64-bit pointers, IGNORE_BITS=16, and 2^20 arena size:
16 -> IGNORE_BITS
10 -> MAP_TOP_BITS
10 -> MAP_MID_BITS
8 -> MAP_BOT_BITS
20 -> ideal aligned arena
----
64
32-bit pointers and 2^18 arena size:
14 -> MAP_BOT_BITS
18 -> ideal aligned arena
----
32
*/
#if SIZEOF_VOID_P == 8
/* number of bits in a pointer */
#define POINTER_BITS 64
/* High bits of memory addresses that will be ignored when indexing into the
* radix tree. Setting this to zero is the safe default. For most 64-bit
* machines, setting this to 16 would be safe. The kernel would not give
* user-space virtual memory addresses that have significant information in
* those high bits. The main advantage to setting IGNORE_BITS > 0 is that less
* virtual memory will be used for the top and middle radix tree arrays. Those
* arrays are allocated in the BSS segment and so will typically consume real
* memory only if actually accessed.
*/
#define IGNORE_BITS 0
/* use the top and mid layers of the radix tree */
#define USE_INTERIOR_NODES
#elif SIZEOF_VOID_P == 4
#define POINTER_BITS 32
#define IGNORE_BITS 0
#else
/* Currently this code works for 64-bit or 32-bit pointers only. */
#error "obmalloc radix tree requires 64-bit or 32-bit pointers."
#endif /* SIZEOF_VOID_P */
/* arena_coverage_t members require this to be true */
#if ARENA_BITS >= 32
# error "arena size must be < 2^32"
#endif
/* the lower bits of the address that are not ignored */
#define ADDRESS_BITS (POINTER_BITS - IGNORE_BITS)
#ifdef USE_INTERIOR_NODES
/* number of bits used for MAP_TOP and MAP_MID nodes */
#define INTERIOR_BITS ((ADDRESS_BITS - ARENA_BITS + 2) / 3)
#else
#define INTERIOR_BITS 0
#endif
#define MAP_TOP_BITS INTERIOR_BITS
#define MAP_TOP_LENGTH (1 << MAP_TOP_BITS)
#define MAP_TOP_MASK (MAP_TOP_LENGTH - 1)
#define MAP_MID_BITS INTERIOR_BITS
#define MAP_MID_LENGTH (1 << MAP_MID_BITS)
#define MAP_MID_MASK (MAP_MID_LENGTH - 1)
#define MAP_BOT_BITS (ADDRESS_BITS - ARENA_BITS - 2*INTERIOR_BITS)
#define MAP_BOT_LENGTH (1 << MAP_BOT_BITS)
#define MAP_BOT_MASK (MAP_BOT_LENGTH - 1)
#define MAP_BOT_SHIFT ARENA_BITS
#define MAP_MID_SHIFT (MAP_BOT_BITS + MAP_BOT_SHIFT)
#define MAP_TOP_SHIFT (MAP_MID_BITS + MAP_MID_SHIFT)
#define AS_UINT(p) ((uintptr_t)(p))
#define MAP_BOT_INDEX(p) ((AS_UINT(p) >> MAP_BOT_SHIFT) & MAP_BOT_MASK)
#define MAP_MID_INDEX(p) ((AS_UINT(p) >> MAP_MID_SHIFT) & MAP_MID_MASK)
#define MAP_TOP_INDEX(p) ((AS_UINT(p) >> MAP_TOP_SHIFT) & MAP_TOP_MASK)
#if IGNORE_BITS > 0
/* Return the ignored part of the pointer address. Those bits should be same
* for all valid pointers if IGNORE_BITS is set correctly.
*/
#define HIGH_BITS(p) (AS_UINT(p) >> ADDRESS_BITS)
#else
#define HIGH_BITS(p) 0
#endif
/* This is the leaf of the radix tree. See arena_map_mark_used() for the
* meaning of these members. */
typedef struct {
int32_t tail_hi;
int32_t tail_lo;
} arena_coverage_t;
typedef struct arena_map_bot {
/* The members tail_hi and tail_lo are accessed together. So, it
* better to have them as an array of structs, rather than two
* arrays.
*/
arena_coverage_t arenas[MAP_BOT_LENGTH];
} arena_map_bot_t;
#ifdef USE_INTERIOR_NODES
typedef struct arena_map_mid {
struct arena_map_bot *ptrs[MAP_MID_LENGTH];
} arena_map_mid_t;
typedef struct arena_map_top {
struct arena_map_mid *ptrs[MAP_TOP_LENGTH];
} arena_map_top_t;
#endif
struct _obmalloc_usage {
/* The root of radix tree. Note that by initializing like this, the memory
* should be in the BSS. The OS will only memory map pages as the MAP_MID
* nodes get used (OS pages are demand loaded as needed).
*/
#ifdef USE_INTERIOR_NODES
arena_map_top_t arena_map_root;
/* accounting for number of used interior nodes */
int arena_map_mid_count;
int arena_map_bot_count;
#else
arena_map_bot_t arena_map_root;
#endif
};
#endif /* WITH_PYMALLOC_RADIX_TREE */
struct _obmalloc_global_state {
int dump_debug_stats;
Py_ssize_t interpreter_leaks;
};
struct _obmalloc_state {
struct _obmalloc_pools pools;
struct _obmalloc_mgmt mgmt;
struct _obmalloc_usage usage;
};
#undef uint
/* Allocate memory directly from the O/S virtual memory system,
* where supported. Otherwise fallback on malloc */
void *_PyObject_VirtualAlloc(size_t size);
void _PyObject_VirtualFree(void *, size_t size);
/* This function returns the number of allocated memory blocks, regardless of size */
extern Py_ssize_t _Py_GetGlobalAllocatedBlocks(void);
#define _Py_GetAllocatedBlocks() \
_Py_GetGlobalAllocatedBlocks()
extern Py_ssize_t _PyInterpreterState_GetAllocatedBlocks(PyInterpreterState *);
extern void _PyInterpreterState_FinalizeAllocatedBlocks(PyInterpreterState *);
#ifdef WITH_PYMALLOC
// Export the symbol for the 3rd party guppy3 project
PyAPI_FUNC(int) _PyObject_DebugMallocStats(FILE *out);
#endif
#ifdef __cplusplus
}
#endif
#endif // !Py_INTERNAL_OBMALLOC_H

View File

@@ -0,0 +1,73 @@
#ifndef Py_INTERNAL_OBMALLOC_INIT_H
#define Py_INTERNAL_OBMALLOC_INIT_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
/****************************************************/
/* the default object allocator's state initializer */
#define PTA(pools, x) \
((poolp )((uint8_t *)&(pools.used[2*(x)]) - 2*sizeof(pymem_block *)))
#define PT(p, x) PTA(p, x), PTA(p, x)
#define PT_8(p, start) \
PT(p, start), \
PT(p, start+1), \
PT(p, start+2), \
PT(p, start+3), \
PT(p, start+4), \
PT(p, start+5), \
PT(p, start+6), \
PT(p, start+7)
#if NB_SMALL_SIZE_CLASSES <= 8
# define _obmalloc_pools_INIT(p) \
{ PT_8(p, 0) }
#elif NB_SMALL_SIZE_CLASSES <= 16
# define _obmalloc_pools_INIT(p) \
{ PT_8(p, 0), PT_8(p, 8) }
#elif NB_SMALL_SIZE_CLASSES <= 24
# define _obmalloc_pools_INIT(p) \
{ PT_8(p, 0), PT_8(p, 8), PT_8(p, 16) }
#elif NB_SMALL_SIZE_CLASSES <= 32
# define _obmalloc_pools_INIT(p) \
{ PT_8(p, 0), PT_8(p, 8), PT_8(p, 16), PT_8(p, 24) }
#elif NB_SMALL_SIZE_CLASSES <= 40
# define _obmalloc_pools_INIT(p) \
{ PT_8(p, 0), PT_8(p, 8), PT_8(p, 16), PT_8(p, 24), PT_8(p, 32) }
#elif NB_SMALL_SIZE_CLASSES <= 48
# define _obmalloc_pools_INIT(p) \
{ PT_8(p, 0), PT_8(p, 8), PT_8(p, 16), PT_8(p, 24), PT_8(p, 32), PT_8(p, 40) }
#elif NB_SMALL_SIZE_CLASSES <= 56
# define _obmalloc_pools_INIT(p) \
{ PT_8(p, 0), PT_8(p, 8), PT_8(p, 16), PT_8(p, 24), PT_8(p, 32), PT_8(p, 40), PT_8(p, 48) }
#elif NB_SMALL_SIZE_CLASSES <= 64
# define _obmalloc_pools_INIT(p) \
{ PT_8(p, 0), PT_8(p, 8), PT_8(p, 16), PT_8(p, 24), PT_8(p, 32), PT_8(p, 40), PT_8(p, 48), PT_8(p, 56) }
#else
# error "NB_SMALL_SIZE_CLASSES should be less than 64"
#endif
#define _obmalloc_global_state_INIT \
{ \
.dump_debug_stats = -1, \
}
#define _obmalloc_state_INIT(obmalloc) \
{ \
.pools = { \
.used = _obmalloc_pools_INIT(obmalloc.pools), \
}, \
}
#ifdef __cplusplus
}
#endif
#endif // !Py_INTERNAL_OBMALLOC_INIT_H

View File

@@ -0,0 +1,587 @@
// Auto-generated by Tools/build/generate_opcode_h.py from Lib/opcode.py
#ifndef Py_INTERNAL_OPCODE_H
#define Py_INTERNAL_OPCODE_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#include "opcode.h"
extern const uint32_t _PyOpcode_Jump[9];
extern const uint8_t _PyOpcode_Caches[256];
extern const uint8_t _PyOpcode_Deopt[256];
#ifdef NEED_OPCODE_TABLES
const uint32_t _PyOpcode_Jump[9] = {
0U,
0U,
536870912U,
135020544U,
4163U,
0U,
0U,
0U,
48U,
};
const uint8_t _PyOpcode_Caches[256] = {
[BINARY_SUBSCR] = 1,
[STORE_SUBSCR] = 1,
[UNPACK_SEQUENCE] = 1,
[FOR_ITER] = 1,
[STORE_ATTR] = 4,
[LOAD_ATTR] = 9,
[COMPARE_OP] = 1,
[LOAD_GLOBAL] = 4,
[BINARY_OP] = 1,
[SEND] = 1,
[LOAD_SUPER_ATTR] = 1,
[CALL] = 3,
};
const uint8_t _PyOpcode_Deopt[256] = {
[BEFORE_ASYNC_WITH] = BEFORE_ASYNC_WITH,
[BEFORE_WITH] = BEFORE_WITH,
[BINARY_OP] = BINARY_OP,
[BINARY_OP_ADD_FLOAT] = BINARY_OP,
[BINARY_OP_ADD_INT] = BINARY_OP,
[BINARY_OP_ADD_UNICODE] = BINARY_OP,
[BINARY_OP_INPLACE_ADD_UNICODE] = BINARY_OP,
[BINARY_OP_MULTIPLY_FLOAT] = BINARY_OP,
[BINARY_OP_MULTIPLY_INT] = BINARY_OP,
[BINARY_OP_SUBTRACT_FLOAT] = BINARY_OP,
[BINARY_OP_SUBTRACT_INT] = BINARY_OP,
[BINARY_SLICE] = BINARY_SLICE,
[BINARY_SUBSCR] = BINARY_SUBSCR,
[BINARY_SUBSCR_DICT] = BINARY_SUBSCR,
[BINARY_SUBSCR_GETITEM] = BINARY_SUBSCR,
[BINARY_SUBSCR_LIST_INT] = BINARY_SUBSCR,
[BINARY_SUBSCR_TUPLE_INT] = BINARY_SUBSCR,
[BUILD_CONST_KEY_MAP] = BUILD_CONST_KEY_MAP,
[BUILD_LIST] = BUILD_LIST,
[BUILD_MAP] = BUILD_MAP,
[BUILD_SET] = BUILD_SET,
[BUILD_SLICE] = BUILD_SLICE,
[BUILD_STRING] = BUILD_STRING,
[BUILD_TUPLE] = BUILD_TUPLE,
[CACHE] = CACHE,
[CALL] = CALL,
[CALL_BOUND_METHOD_EXACT_ARGS] = CALL,
[CALL_BUILTIN_CLASS] = CALL,
[CALL_BUILTIN_FAST_WITH_KEYWORDS] = CALL,
[CALL_FUNCTION_EX] = CALL_FUNCTION_EX,
[CALL_INTRINSIC_1] = CALL_INTRINSIC_1,
[CALL_INTRINSIC_2] = CALL_INTRINSIC_2,
[CALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS] = CALL,
[CALL_NO_KW_BUILTIN_FAST] = CALL,
[CALL_NO_KW_BUILTIN_O] = CALL,
[CALL_NO_KW_ISINSTANCE] = CALL,
[CALL_NO_KW_LEN] = CALL,
[CALL_NO_KW_LIST_APPEND] = CALL,
[CALL_NO_KW_METHOD_DESCRIPTOR_FAST] = CALL,
[CALL_NO_KW_METHOD_DESCRIPTOR_NOARGS] = CALL,
[CALL_NO_KW_METHOD_DESCRIPTOR_O] = CALL,
[CALL_NO_KW_STR_1] = CALL,
[CALL_NO_KW_TUPLE_1] = CALL,
[CALL_NO_KW_TYPE_1] = CALL,
[CALL_PY_EXACT_ARGS] = CALL,
[CALL_PY_WITH_DEFAULTS] = CALL,
[CHECK_EG_MATCH] = CHECK_EG_MATCH,
[CHECK_EXC_MATCH] = CHECK_EXC_MATCH,
[CLEANUP_THROW] = CLEANUP_THROW,
[COMPARE_OP] = COMPARE_OP,
[COMPARE_OP_FLOAT] = COMPARE_OP,
[COMPARE_OP_INT] = COMPARE_OP,
[COMPARE_OP_STR] = COMPARE_OP,
[CONTAINS_OP] = CONTAINS_OP,
[COPY] = COPY,
[COPY_FREE_VARS] = COPY_FREE_VARS,
[DELETE_ATTR] = DELETE_ATTR,
[DELETE_DEREF] = DELETE_DEREF,
[DELETE_FAST] = DELETE_FAST,
[DELETE_GLOBAL] = DELETE_GLOBAL,
[DELETE_NAME] = DELETE_NAME,
[DELETE_SUBSCR] = DELETE_SUBSCR,
[DICT_MERGE] = DICT_MERGE,
[DICT_UPDATE] = DICT_UPDATE,
[END_ASYNC_FOR] = END_ASYNC_FOR,
[END_FOR] = END_FOR,
[END_SEND] = END_SEND,
[EXTENDED_ARG] = EXTENDED_ARG,
[FORMAT_VALUE] = FORMAT_VALUE,
[FOR_ITER] = FOR_ITER,
[FOR_ITER_GEN] = FOR_ITER,
[FOR_ITER_LIST] = FOR_ITER,
[FOR_ITER_RANGE] = FOR_ITER,
[FOR_ITER_TUPLE] = FOR_ITER,
[GET_AITER] = GET_AITER,
[GET_ANEXT] = GET_ANEXT,
[GET_AWAITABLE] = GET_AWAITABLE,
[GET_ITER] = GET_ITER,
[GET_LEN] = GET_LEN,
[GET_YIELD_FROM_ITER] = GET_YIELD_FROM_ITER,
[IMPORT_FROM] = IMPORT_FROM,
[IMPORT_NAME] = IMPORT_NAME,
[INSTRUMENTED_CALL] = INSTRUMENTED_CALL,
[INSTRUMENTED_CALL_FUNCTION_EX] = INSTRUMENTED_CALL_FUNCTION_EX,
[INSTRUMENTED_END_FOR] = INSTRUMENTED_END_FOR,
[INSTRUMENTED_END_SEND] = INSTRUMENTED_END_SEND,
[INSTRUMENTED_FOR_ITER] = INSTRUMENTED_FOR_ITER,
[INSTRUMENTED_INSTRUCTION] = INSTRUMENTED_INSTRUCTION,
[INSTRUMENTED_JUMP_BACKWARD] = INSTRUMENTED_JUMP_BACKWARD,
[INSTRUMENTED_JUMP_FORWARD] = INSTRUMENTED_JUMP_FORWARD,
[INSTRUMENTED_LINE] = INSTRUMENTED_LINE,
[INSTRUMENTED_LOAD_SUPER_ATTR] = INSTRUMENTED_LOAD_SUPER_ATTR,
[INSTRUMENTED_POP_JUMP_IF_FALSE] = INSTRUMENTED_POP_JUMP_IF_FALSE,
[INSTRUMENTED_POP_JUMP_IF_NONE] = INSTRUMENTED_POP_JUMP_IF_NONE,
[INSTRUMENTED_POP_JUMP_IF_NOT_NONE] = INSTRUMENTED_POP_JUMP_IF_NOT_NONE,
[INSTRUMENTED_POP_JUMP_IF_TRUE] = INSTRUMENTED_POP_JUMP_IF_TRUE,
[INSTRUMENTED_RESUME] = INSTRUMENTED_RESUME,
[INSTRUMENTED_RETURN_CONST] = INSTRUMENTED_RETURN_CONST,
[INSTRUMENTED_RETURN_VALUE] = INSTRUMENTED_RETURN_VALUE,
[INSTRUMENTED_YIELD_VALUE] = INSTRUMENTED_YIELD_VALUE,
[INTERPRETER_EXIT] = INTERPRETER_EXIT,
[IS_OP] = IS_OP,
[JUMP_BACKWARD] = JUMP_BACKWARD,
[JUMP_BACKWARD_NO_INTERRUPT] = JUMP_BACKWARD_NO_INTERRUPT,
[JUMP_FORWARD] = JUMP_FORWARD,
[KW_NAMES] = KW_NAMES,
[LIST_APPEND] = LIST_APPEND,
[LIST_EXTEND] = LIST_EXTEND,
[LOAD_ASSERTION_ERROR] = LOAD_ASSERTION_ERROR,
[LOAD_ATTR] = LOAD_ATTR,
[LOAD_ATTR_CLASS] = LOAD_ATTR,
[LOAD_ATTR_GETATTRIBUTE_OVERRIDDEN] = LOAD_ATTR,
[LOAD_ATTR_INSTANCE_VALUE] = LOAD_ATTR,
[LOAD_ATTR_METHOD_LAZY_DICT] = LOAD_ATTR,
[LOAD_ATTR_METHOD_NO_DICT] = LOAD_ATTR,
[LOAD_ATTR_METHOD_WITH_VALUES] = LOAD_ATTR,
[LOAD_ATTR_MODULE] = LOAD_ATTR,
[LOAD_ATTR_PROPERTY] = LOAD_ATTR,
[LOAD_ATTR_SLOT] = LOAD_ATTR,
[LOAD_ATTR_WITH_HINT] = LOAD_ATTR,
[LOAD_BUILD_CLASS] = LOAD_BUILD_CLASS,
[LOAD_CLOSURE] = LOAD_CLOSURE,
[LOAD_CONST] = LOAD_CONST,
[LOAD_CONST__LOAD_FAST] = LOAD_CONST,
[LOAD_DEREF] = LOAD_DEREF,
[LOAD_FAST] = LOAD_FAST,
[LOAD_FAST_AND_CLEAR] = LOAD_FAST_AND_CLEAR,
[LOAD_FAST_CHECK] = LOAD_FAST_CHECK,
[LOAD_FAST__LOAD_CONST] = LOAD_FAST,
[LOAD_FAST__LOAD_FAST] = LOAD_FAST,
[LOAD_FROM_DICT_OR_DEREF] = LOAD_FROM_DICT_OR_DEREF,
[LOAD_FROM_DICT_OR_GLOBALS] = LOAD_FROM_DICT_OR_GLOBALS,
[LOAD_GLOBAL] = LOAD_GLOBAL,
[LOAD_GLOBAL_BUILTIN] = LOAD_GLOBAL,
[LOAD_GLOBAL_MODULE] = LOAD_GLOBAL,
[LOAD_LOCALS] = LOAD_LOCALS,
[LOAD_NAME] = LOAD_NAME,
[LOAD_SUPER_ATTR] = LOAD_SUPER_ATTR,
[LOAD_SUPER_ATTR_ATTR] = LOAD_SUPER_ATTR,
[LOAD_SUPER_ATTR_METHOD] = LOAD_SUPER_ATTR,
[MAKE_CELL] = MAKE_CELL,
[MAKE_FUNCTION] = MAKE_FUNCTION,
[MAP_ADD] = MAP_ADD,
[MATCH_CLASS] = MATCH_CLASS,
[MATCH_KEYS] = MATCH_KEYS,
[MATCH_MAPPING] = MATCH_MAPPING,
[MATCH_SEQUENCE] = MATCH_SEQUENCE,
[NOP] = NOP,
[POP_EXCEPT] = POP_EXCEPT,
[POP_JUMP_IF_FALSE] = POP_JUMP_IF_FALSE,
[POP_JUMP_IF_NONE] = POP_JUMP_IF_NONE,
[POP_JUMP_IF_NOT_NONE] = POP_JUMP_IF_NOT_NONE,
[POP_JUMP_IF_TRUE] = POP_JUMP_IF_TRUE,
[POP_TOP] = POP_TOP,
[PUSH_EXC_INFO] = PUSH_EXC_INFO,
[PUSH_NULL] = PUSH_NULL,
[RAISE_VARARGS] = RAISE_VARARGS,
[RERAISE] = RERAISE,
[RESERVED] = RESERVED,
[RESUME] = RESUME,
[RETURN_CONST] = RETURN_CONST,
[RETURN_GENERATOR] = RETURN_GENERATOR,
[RETURN_VALUE] = RETURN_VALUE,
[SEND] = SEND,
[SEND_GEN] = SEND,
[SETUP_ANNOTATIONS] = SETUP_ANNOTATIONS,
[SET_ADD] = SET_ADD,
[SET_UPDATE] = SET_UPDATE,
[STORE_ATTR] = STORE_ATTR,
[STORE_ATTR_INSTANCE_VALUE] = STORE_ATTR,
[STORE_ATTR_SLOT] = STORE_ATTR,
[STORE_ATTR_WITH_HINT] = STORE_ATTR,
[STORE_DEREF] = STORE_DEREF,
[STORE_FAST] = STORE_FAST,
[STORE_FAST__LOAD_FAST] = STORE_FAST,
[STORE_FAST__STORE_FAST] = STORE_FAST,
[STORE_GLOBAL] = STORE_GLOBAL,
[STORE_NAME] = STORE_NAME,
[STORE_SLICE] = STORE_SLICE,
[STORE_SUBSCR] = STORE_SUBSCR,
[STORE_SUBSCR_DICT] = STORE_SUBSCR,
[STORE_SUBSCR_LIST_INT] = STORE_SUBSCR,
[SWAP] = SWAP,
[UNARY_INVERT] = UNARY_INVERT,
[UNARY_NEGATIVE] = UNARY_NEGATIVE,
[UNARY_NOT] = UNARY_NOT,
[UNPACK_EX] = UNPACK_EX,
[UNPACK_SEQUENCE] = UNPACK_SEQUENCE,
[UNPACK_SEQUENCE_LIST] = UNPACK_SEQUENCE,
[UNPACK_SEQUENCE_TUPLE] = UNPACK_SEQUENCE,
[UNPACK_SEQUENCE_TWO_TUPLE] = UNPACK_SEQUENCE,
[WITH_EXCEPT_START] = WITH_EXCEPT_START,
[YIELD_VALUE] = YIELD_VALUE,
};
#endif // NEED_OPCODE_TABLES
#ifdef Py_DEBUG
static const char *const _PyOpcode_OpName[267] = {
[CACHE] = "CACHE",
[POP_TOP] = "POP_TOP",
[PUSH_NULL] = "PUSH_NULL",
[INTERPRETER_EXIT] = "INTERPRETER_EXIT",
[END_FOR] = "END_FOR",
[END_SEND] = "END_SEND",
[BINARY_OP_ADD_FLOAT] = "BINARY_OP_ADD_FLOAT",
[BINARY_OP_ADD_INT] = "BINARY_OP_ADD_INT",
[BINARY_OP_ADD_UNICODE] = "BINARY_OP_ADD_UNICODE",
[NOP] = "NOP",
[BINARY_OP_INPLACE_ADD_UNICODE] = "BINARY_OP_INPLACE_ADD_UNICODE",
[UNARY_NEGATIVE] = "UNARY_NEGATIVE",
[UNARY_NOT] = "UNARY_NOT",
[BINARY_OP_MULTIPLY_FLOAT] = "BINARY_OP_MULTIPLY_FLOAT",
[BINARY_OP_MULTIPLY_INT] = "BINARY_OP_MULTIPLY_INT",
[UNARY_INVERT] = "UNARY_INVERT",
[BINARY_OP_SUBTRACT_FLOAT] = "BINARY_OP_SUBTRACT_FLOAT",
[RESERVED] = "RESERVED",
[BINARY_OP_SUBTRACT_INT] = "BINARY_OP_SUBTRACT_INT",
[BINARY_SUBSCR_DICT] = "BINARY_SUBSCR_DICT",
[BINARY_SUBSCR_GETITEM] = "BINARY_SUBSCR_GETITEM",
[BINARY_SUBSCR_LIST_INT] = "BINARY_SUBSCR_LIST_INT",
[BINARY_SUBSCR_TUPLE_INT] = "BINARY_SUBSCR_TUPLE_INT",
[CALL_PY_EXACT_ARGS] = "CALL_PY_EXACT_ARGS",
[CALL_PY_WITH_DEFAULTS] = "CALL_PY_WITH_DEFAULTS",
[BINARY_SUBSCR] = "BINARY_SUBSCR",
[BINARY_SLICE] = "BINARY_SLICE",
[STORE_SLICE] = "STORE_SLICE",
[CALL_BOUND_METHOD_EXACT_ARGS] = "CALL_BOUND_METHOD_EXACT_ARGS",
[CALL_BUILTIN_CLASS] = "CALL_BUILTIN_CLASS",
[GET_LEN] = "GET_LEN",
[MATCH_MAPPING] = "MATCH_MAPPING",
[MATCH_SEQUENCE] = "MATCH_SEQUENCE",
[MATCH_KEYS] = "MATCH_KEYS",
[CALL_BUILTIN_FAST_WITH_KEYWORDS] = "CALL_BUILTIN_FAST_WITH_KEYWORDS",
[PUSH_EXC_INFO] = "PUSH_EXC_INFO",
[CHECK_EXC_MATCH] = "CHECK_EXC_MATCH",
[CHECK_EG_MATCH] = "CHECK_EG_MATCH",
[CALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS] = "CALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS",
[CALL_NO_KW_BUILTIN_FAST] = "CALL_NO_KW_BUILTIN_FAST",
[CALL_NO_KW_BUILTIN_O] = "CALL_NO_KW_BUILTIN_O",
[CALL_NO_KW_ISINSTANCE] = "CALL_NO_KW_ISINSTANCE",
[CALL_NO_KW_LEN] = "CALL_NO_KW_LEN",
[CALL_NO_KW_LIST_APPEND] = "CALL_NO_KW_LIST_APPEND",
[CALL_NO_KW_METHOD_DESCRIPTOR_FAST] = "CALL_NO_KW_METHOD_DESCRIPTOR_FAST",
[CALL_NO_KW_METHOD_DESCRIPTOR_NOARGS] = "CALL_NO_KW_METHOD_DESCRIPTOR_NOARGS",
[CALL_NO_KW_METHOD_DESCRIPTOR_O] = "CALL_NO_KW_METHOD_DESCRIPTOR_O",
[CALL_NO_KW_STR_1] = "CALL_NO_KW_STR_1",
[CALL_NO_KW_TUPLE_1] = "CALL_NO_KW_TUPLE_1",
[WITH_EXCEPT_START] = "WITH_EXCEPT_START",
[GET_AITER] = "GET_AITER",
[GET_ANEXT] = "GET_ANEXT",
[BEFORE_ASYNC_WITH] = "BEFORE_ASYNC_WITH",
[BEFORE_WITH] = "BEFORE_WITH",
[END_ASYNC_FOR] = "END_ASYNC_FOR",
[CLEANUP_THROW] = "CLEANUP_THROW",
[CALL_NO_KW_TYPE_1] = "CALL_NO_KW_TYPE_1",
[COMPARE_OP_FLOAT] = "COMPARE_OP_FLOAT",
[COMPARE_OP_INT] = "COMPARE_OP_INT",
[COMPARE_OP_STR] = "COMPARE_OP_STR",
[STORE_SUBSCR] = "STORE_SUBSCR",
[DELETE_SUBSCR] = "DELETE_SUBSCR",
[FOR_ITER_LIST] = "FOR_ITER_LIST",
[FOR_ITER_TUPLE] = "FOR_ITER_TUPLE",
[FOR_ITER_RANGE] = "FOR_ITER_RANGE",
[FOR_ITER_GEN] = "FOR_ITER_GEN",
[LOAD_SUPER_ATTR_ATTR] = "LOAD_SUPER_ATTR_ATTR",
[LOAD_SUPER_ATTR_METHOD] = "LOAD_SUPER_ATTR_METHOD",
[GET_ITER] = "GET_ITER",
[GET_YIELD_FROM_ITER] = "GET_YIELD_FROM_ITER",
[LOAD_ATTR_CLASS] = "LOAD_ATTR_CLASS",
[LOAD_BUILD_CLASS] = "LOAD_BUILD_CLASS",
[LOAD_ATTR_GETATTRIBUTE_OVERRIDDEN] = "LOAD_ATTR_GETATTRIBUTE_OVERRIDDEN",
[LOAD_ATTR_INSTANCE_VALUE] = "LOAD_ATTR_INSTANCE_VALUE",
[LOAD_ASSERTION_ERROR] = "LOAD_ASSERTION_ERROR",
[RETURN_GENERATOR] = "RETURN_GENERATOR",
[LOAD_ATTR_MODULE] = "LOAD_ATTR_MODULE",
[LOAD_ATTR_PROPERTY] = "LOAD_ATTR_PROPERTY",
[LOAD_ATTR_SLOT] = "LOAD_ATTR_SLOT",
[LOAD_ATTR_WITH_HINT] = "LOAD_ATTR_WITH_HINT",
[LOAD_ATTR_METHOD_LAZY_DICT] = "LOAD_ATTR_METHOD_LAZY_DICT",
[LOAD_ATTR_METHOD_NO_DICT] = "LOAD_ATTR_METHOD_NO_DICT",
[LOAD_ATTR_METHOD_WITH_VALUES] = "LOAD_ATTR_METHOD_WITH_VALUES",
[RETURN_VALUE] = "RETURN_VALUE",
[LOAD_CONST__LOAD_FAST] = "LOAD_CONST__LOAD_FAST",
[SETUP_ANNOTATIONS] = "SETUP_ANNOTATIONS",
[LOAD_FAST__LOAD_CONST] = "LOAD_FAST__LOAD_CONST",
[LOAD_LOCALS] = "LOAD_LOCALS",
[LOAD_FAST__LOAD_FAST] = "LOAD_FAST__LOAD_FAST",
[POP_EXCEPT] = "POP_EXCEPT",
[STORE_NAME] = "STORE_NAME",
[DELETE_NAME] = "DELETE_NAME",
[UNPACK_SEQUENCE] = "UNPACK_SEQUENCE",
[FOR_ITER] = "FOR_ITER",
[UNPACK_EX] = "UNPACK_EX",
[STORE_ATTR] = "STORE_ATTR",
[DELETE_ATTR] = "DELETE_ATTR",
[STORE_GLOBAL] = "STORE_GLOBAL",
[DELETE_GLOBAL] = "DELETE_GLOBAL",
[SWAP] = "SWAP",
[LOAD_CONST] = "LOAD_CONST",
[LOAD_NAME] = "LOAD_NAME",
[BUILD_TUPLE] = "BUILD_TUPLE",
[BUILD_LIST] = "BUILD_LIST",
[BUILD_SET] = "BUILD_SET",
[BUILD_MAP] = "BUILD_MAP",
[LOAD_ATTR] = "LOAD_ATTR",
[COMPARE_OP] = "COMPARE_OP",
[IMPORT_NAME] = "IMPORT_NAME",
[IMPORT_FROM] = "IMPORT_FROM",
[JUMP_FORWARD] = "JUMP_FORWARD",
[LOAD_GLOBAL_BUILTIN] = "LOAD_GLOBAL_BUILTIN",
[LOAD_GLOBAL_MODULE] = "LOAD_GLOBAL_MODULE",
[STORE_ATTR_INSTANCE_VALUE] = "STORE_ATTR_INSTANCE_VALUE",
[POP_JUMP_IF_FALSE] = "POP_JUMP_IF_FALSE",
[POP_JUMP_IF_TRUE] = "POP_JUMP_IF_TRUE",
[LOAD_GLOBAL] = "LOAD_GLOBAL",
[IS_OP] = "IS_OP",
[CONTAINS_OP] = "CONTAINS_OP",
[RERAISE] = "RERAISE",
[COPY] = "COPY",
[RETURN_CONST] = "RETURN_CONST",
[BINARY_OP] = "BINARY_OP",
[SEND] = "SEND",
[LOAD_FAST] = "LOAD_FAST",
[STORE_FAST] = "STORE_FAST",
[DELETE_FAST] = "DELETE_FAST",
[LOAD_FAST_CHECK] = "LOAD_FAST_CHECK",
[POP_JUMP_IF_NOT_NONE] = "POP_JUMP_IF_NOT_NONE",
[POP_JUMP_IF_NONE] = "POP_JUMP_IF_NONE",
[RAISE_VARARGS] = "RAISE_VARARGS",
[GET_AWAITABLE] = "GET_AWAITABLE",
[MAKE_FUNCTION] = "MAKE_FUNCTION",
[BUILD_SLICE] = "BUILD_SLICE",
[JUMP_BACKWARD_NO_INTERRUPT] = "JUMP_BACKWARD_NO_INTERRUPT",
[MAKE_CELL] = "MAKE_CELL",
[LOAD_CLOSURE] = "LOAD_CLOSURE",
[LOAD_DEREF] = "LOAD_DEREF",
[STORE_DEREF] = "STORE_DEREF",
[DELETE_DEREF] = "DELETE_DEREF",
[JUMP_BACKWARD] = "JUMP_BACKWARD",
[LOAD_SUPER_ATTR] = "LOAD_SUPER_ATTR",
[CALL_FUNCTION_EX] = "CALL_FUNCTION_EX",
[LOAD_FAST_AND_CLEAR] = "LOAD_FAST_AND_CLEAR",
[EXTENDED_ARG] = "EXTENDED_ARG",
[LIST_APPEND] = "LIST_APPEND",
[SET_ADD] = "SET_ADD",
[MAP_ADD] = "MAP_ADD",
[STORE_ATTR_SLOT] = "STORE_ATTR_SLOT",
[COPY_FREE_VARS] = "COPY_FREE_VARS",
[YIELD_VALUE] = "YIELD_VALUE",
[RESUME] = "RESUME",
[MATCH_CLASS] = "MATCH_CLASS",
[STORE_ATTR_WITH_HINT] = "STORE_ATTR_WITH_HINT",
[STORE_FAST__LOAD_FAST] = "STORE_FAST__LOAD_FAST",
[FORMAT_VALUE] = "FORMAT_VALUE",
[BUILD_CONST_KEY_MAP] = "BUILD_CONST_KEY_MAP",
[BUILD_STRING] = "BUILD_STRING",
[STORE_FAST__STORE_FAST] = "STORE_FAST__STORE_FAST",
[STORE_SUBSCR_DICT] = "STORE_SUBSCR_DICT",
[STORE_SUBSCR_LIST_INT] = "STORE_SUBSCR_LIST_INT",
[UNPACK_SEQUENCE_LIST] = "UNPACK_SEQUENCE_LIST",
[LIST_EXTEND] = "LIST_EXTEND",
[SET_UPDATE] = "SET_UPDATE",
[DICT_MERGE] = "DICT_MERGE",
[DICT_UPDATE] = "DICT_UPDATE",
[UNPACK_SEQUENCE_TUPLE] = "UNPACK_SEQUENCE_TUPLE",
[UNPACK_SEQUENCE_TWO_TUPLE] = "UNPACK_SEQUENCE_TWO_TUPLE",
[SEND_GEN] = "SEND_GEN",
[169] = "<169>",
[170] = "<170>",
[CALL] = "CALL",
[KW_NAMES] = "KW_NAMES",
[CALL_INTRINSIC_1] = "CALL_INTRINSIC_1",
[CALL_INTRINSIC_2] = "CALL_INTRINSIC_2",
[LOAD_FROM_DICT_OR_GLOBALS] = "LOAD_FROM_DICT_OR_GLOBALS",
[LOAD_FROM_DICT_OR_DEREF] = "LOAD_FROM_DICT_OR_DEREF",
[177] = "<177>",
[178] = "<178>",
[179] = "<179>",
[180] = "<180>",
[181] = "<181>",
[182] = "<182>",
[183] = "<183>",
[184] = "<184>",
[185] = "<185>",
[186] = "<186>",
[187] = "<187>",
[188] = "<188>",
[189] = "<189>",
[190] = "<190>",
[191] = "<191>",
[192] = "<192>",
[193] = "<193>",
[194] = "<194>",
[195] = "<195>",
[196] = "<196>",
[197] = "<197>",
[198] = "<198>",
[199] = "<199>",
[200] = "<200>",
[201] = "<201>",
[202] = "<202>",
[203] = "<203>",
[204] = "<204>",
[205] = "<205>",
[206] = "<206>",
[207] = "<207>",
[208] = "<208>",
[209] = "<209>",
[210] = "<210>",
[211] = "<211>",
[212] = "<212>",
[213] = "<213>",
[214] = "<214>",
[215] = "<215>",
[216] = "<216>",
[217] = "<217>",
[218] = "<218>",
[219] = "<219>",
[220] = "<220>",
[221] = "<221>",
[222] = "<222>",
[223] = "<223>",
[224] = "<224>",
[225] = "<225>",
[226] = "<226>",
[227] = "<227>",
[228] = "<228>",
[229] = "<229>",
[230] = "<230>",
[231] = "<231>",
[232] = "<232>",
[233] = "<233>",
[234] = "<234>",
[235] = "<235>",
[236] = "<236>",
[INSTRUMENTED_LOAD_SUPER_ATTR] = "INSTRUMENTED_LOAD_SUPER_ATTR",
[INSTRUMENTED_POP_JUMP_IF_NONE] = "INSTRUMENTED_POP_JUMP_IF_NONE",
[INSTRUMENTED_POP_JUMP_IF_NOT_NONE] = "INSTRUMENTED_POP_JUMP_IF_NOT_NONE",
[INSTRUMENTED_RESUME] = "INSTRUMENTED_RESUME",
[INSTRUMENTED_CALL] = "INSTRUMENTED_CALL",
[INSTRUMENTED_RETURN_VALUE] = "INSTRUMENTED_RETURN_VALUE",
[INSTRUMENTED_YIELD_VALUE] = "INSTRUMENTED_YIELD_VALUE",
[INSTRUMENTED_CALL_FUNCTION_EX] = "INSTRUMENTED_CALL_FUNCTION_EX",
[INSTRUMENTED_JUMP_FORWARD] = "INSTRUMENTED_JUMP_FORWARD",
[INSTRUMENTED_JUMP_BACKWARD] = "INSTRUMENTED_JUMP_BACKWARD",
[INSTRUMENTED_RETURN_CONST] = "INSTRUMENTED_RETURN_CONST",
[INSTRUMENTED_FOR_ITER] = "INSTRUMENTED_FOR_ITER",
[INSTRUMENTED_POP_JUMP_IF_FALSE] = "INSTRUMENTED_POP_JUMP_IF_FALSE",
[INSTRUMENTED_POP_JUMP_IF_TRUE] = "INSTRUMENTED_POP_JUMP_IF_TRUE",
[INSTRUMENTED_END_FOR] = "INSTRUMENTED_END_FOR",
[INSTRUMENTED_END_SEND] = "INSTRUMENTED_END_SEND",
[INSTRUMENTED_INSTRUCTION] = "INSTRUMENTED_INSTRUCTION",
[INSTRUMENTED_LINE] = "INSTRUMENTED_LINE",
[255] = "<255>",
[SETUP_FINALLY] = "SETUP_FINALLY",
[SETUP_CLEANUP] = "SETUP_CLEANUP",
[SETUP_WITH] = "SETUP_WITH",
[POP_BLOCK] = "POP_BLOCK",
[JUMP] = "JUMP",
[JUMP_NO_INTERRUPT] = "JUMP_NO_INTERRUPT",
[LOAD_METHOD] = "LOAD_METHOD",
[LOAD_SUPER_METHOD] = "LOAD_SUPER_METHOD",
[LOAD_ZERO_SUPER_METHOD] = "LOAD_ZERO_SUPER_METHOD",
[LOAD_ZERO_SUPER_ATTR] = "LOAD_ZERO_SUPER_ATTR",
[STORE_FAST_MAYBE_NULL] = "STORE_FAST_MAYBE_NULL",
};
#endif
#define EXTRA_CASES \
case 169: \
case 170: \
case 177: \
case 178: \
case 179: \
case 180: \
case 181: \
case 182: \
case 183: \
case 184: \
case 185: \
case 186: \
case 187: \
case 188: \
case 189: \
case 190: \
case 191: \
case 192: \
case 193: \
case 194: \
case 195: \
case 196: \
case 197: \
case 198: \
case 199: \
case 200: \
case 201: \
case 202: \
case 203: \
case 204: \
case 205: \
case 206: \
case 207: \
case 208: \
case 209: \
case 210: \
case 211: \
case 212: \
case 213: \
case 214: \
case 215: \
case 216: \
case 217: \
case 218: \
case 219: \
case 220: \
case 221: \
case 222: \
case 223: \
case 224: \
case 225: \
case 226: \
case 227: \
case 228: \
case 229: \
case 230: \
case 231: \
case 232: \
case 233: \
case 234: \
case 235: \
case 236: \
case 255: \
;
#ifdef __cplusplus
}
#endif
#endif // !Py_INTERNAL_OPCODE_H

View File

@@ -0,0 +1,92 @@
#ifndef Py_INTERNAL_OPCODE_UTILS_H
#define Py_INTERNAL_OPCODE_UTILS_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#include "pycore_opcode.h" // _PyOpcode_Jump
#define MAX_REAL_OPCODE 254
#define IS_WITHIN_OPCODE_RANGE(opcode) \
(((opcode) >= 0 && (opcode) <= MAX_REAL_OPCODE) || \
IS_PSEUDO_OPCODE(opcode))
#define IS_JUMP_OPCODE(opcode) \
is_bit_set_in_table(_PyOpcode_Jump, opcode)
#define IS_BLOCK_PUSH_OPCODE(opcode) \
((opcode) == SETUP_FINALLY || \
(opcode) == SETUP_WITH || \
(opcode) == SETUP_CLEANUP)
#define HAS_TARGET(opcode) \
(IS_JUMP_OPCODE(opcode) || IS_BLOCK_PUSH_OPCODE(opcode))
/* opcodes that must be last in the basicblock */
#define IS_TERMINATOR_OPCODE(opcode) \
(IS_JUMP_OPCODE(opcode) || IS_SCOPE_EXIT_OPCODE(opcode))
/* opcodes which are not emitted in codegen stage, only by the assembler */
#define IS_ASSEMBLER_OPCODE(opcode) \
((opcode) == JUMP_FORWARD || \
(opcode) == JUMP_BACKWARD || \
(opcode) == JUMP_BACKWARD_NO_INTERRUPT)
#define IS_BACKWARDS_JUMP_OPCODE(opcode) \
((opcode) == JUMP_BACKWARD || \
(opcode) == JUMP_BACKWARD_NO_INTERRUPT)
#define IS_UNCONDITIONAL_JUMP_OPCODE(opcode) \
((opcode) == JUMP || \
(opcode) == JUMP_NO_INTERRUPT || \
(opcode) == JUMP_FORWARD || \
(opcode) == JUMP_BACKWARD || \
(opcode) == JUMP_BACKWARD_NO_INTERRUPT)
#define IS_SCOPE_EXIT_OPCODE(opcode) \
((opcode) == RETURN_VALUE || \
(opcode) == RETURN_CONST || \
(opcode) == RAISE_VARARGS || \
(opcode) == RERAISE)
#define IS_SUPERINSTRUCTION_OPCODE(opcode) \
((opcode) == LOAD_FAST__LOAD_FAST || \
(opcode) == LOAD_FAST__LOAD_CONST || \
(opcode) == LOAD_CONST__LOAD_FAST || \
(opcode) == STORE_FAST__LOAD_FAST || \
(opcode) == STORE_FAST__STORE_FAST)
#define LOG_BITS_PER_INT 5
#define MASK_LOW_LOG_BITS 31
static inline int
is_bit_set_in_table(const uint32_t *table, int bitindex) {
/* Is the relevant bit set in the relevant word? */
/* 512 bits fit into 9 32-bits words.
* Word is indexed by (bitindex>>ln(size of int in bits)).
* Bit within word is the low bits of bitindex.
*/
if (bitindex >= 0 && bitindex < 512) {
uint32_t word = table[bitindex >> LOG_BITS_PER_INT];
return (word >> (bitindex & MASK_LOW_LOG_BITS)) & 1;
}
else {
return 0;
}
}
#undef LOG_BITS_PER_INT
#undef MASK_LOW_LOG_BITS
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_OPCODE_UTILS_H */

View File

@@ -0,0 +1,66 @@
#ifndef Py_INTERNAL_PARSER_H
#define Py_INTERNAL_PARSER_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#include "pycore_ast.h" // struct _expr
#include "pycore_global_strings.h" // _Py_DECLARE_STR()
#include "pycore_pyarena.h" // PyArena
#ifdef Py_DEBUG
#define _PYPEGEN_NSTATISTICS 2000
#endif
struct _parser_runtime_state {
#ifdef Py_DEBUG
long memo_statistics[_PYPEGEN_NSTATISTICS];
#else
int _not_used;
#endif
struct _expr dummy_name;
};
_Py_DECLARE_STR(empty, "")
#define _parser_runtime_state_INIT \
{ \
.dummy_name = { \
.kind = Name_kind, \
.v.Name.id = &_Py_STR(empty), \
.v.Name.ctx = Load, \
.lineno = 1, \
.col_offset = 0, \
.end_lineno = 1, \
.end_col_offset = 0, \
}, \
}
extern struct _mod* _PyParser_ASTFromString(
const char *str,
PyObject* filename,
int mode,
PyCompilerFlags *flags,
PyArena *arena);
extern struct _mod* _PyParser_ASTFromFile(
FILE *fp,
PyObject *filename_ob,
const char *enc,
int mode,
const char *ps1,
const char *ps2,
PyCompilerFlags *flags,
int *errcode,
PyArena *arena);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_PARSER_H */

View File

@@ -0,0 +1,24 @@
#ifndef Py_INTERNAL_PATHCONFIG_H
#define Py_INTERNAL_PATHCONFIG_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
PyAPI_FUNC(void) _PyPathConfig_ClearGlobal(void);
extern PyStatus _PyPathConfig_ReadGlobal(PyConfig *config);
extern PyStatus _PyPathConfig_UpdateGlobal(const PyConfig *config);
extern const wchar_t * _PyPathConfig_GetGlobalModuleSearchPath(void);
extern int _PyPathConfig_ComputeSysPath0(
const PyWideStringList *argv,
PyObject **path0);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_PATHCONFIG_H */

View File

@@ -0,0 +1,64 @@
/* An arena-like memory interface for the compiler.
*/
#ifndef Py_INTERNAL_PYARENA_H
#define Py_INTERNAL_PYARENA_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
typedef struct _arena PyArena;
/* _PyArena_New() and _PyArena_Free() create a new arena and free it,
respectively. Once an arena has been created, it can be used
to allocate memory via _PyArena_Malloc(). Pointers to PyObject can
also be registered with the arena via _PyArena_AddPyObject(), and the
arena will ensure that the PyObjects stay alive at least until
_PyArena_Free() is called. When an arena is freed, all the memory it
allocated is freed, the arena releases internal references to registered
PyObject*, and none of its pointers are valid.
XXX (tim) What does "none of its pointers are valid" mean? Does it
XXX mean that pointers previously obtained via _PyArena_Malloc() are
XXX no longer valid? (That's clearly true, but not sure that's what
XXX the text is trying to say.)
_PyArena_New() returns an arena pointer. On error, it
returns a negative number and sets an exception.
XXX (tim): Not true. On error, _PyArena_New() actually returns NULL,
XXX and looks like it may or may not set an exception (e.g., if the
XXX internal PyList_New(0) returns NULL, _PyArena_New() passes that on
XXX and an exception is set; OTOH, if the internal
XXX block_new(DEFAULT_BLOCK_SIZE) returns NULL, that's passed on but
XXX an exception is not set in that case).
*/
PyAPI_FUNC(PyArena*) _PyArena_New(void);
PyAPI_FUNC(void) _PyArena_Free(PyArena *);
/* Mostly like malloc(), return the address of a block of memory spanning
* `size` bytes, or return NULL (without setting an exception) if enough
* new memory can't be obtained. Unlike malloc(0), _PyArena_Malloc() with
* size=0 does not guarantee to return a unique pointer (the pointer
* returned may equal one or more other pointers obtained from
* _PyArena_Malloc()).
* Note that pointers obtained via _PyArena_Malloc() must never be passed to
* the system free() or realloc(), or to any of Python's similar memory-
* management functions. _PyArena_Malloc()-obtained pointers remain valid
* until _PyArena_Free(ar) is called, at which point all pointers obtained
* from the arena `ar` become invalid simultaneously.
*/
PyAPI_FUNC(void*) _PyArena_Malloc(PyArena *, size_t size);
/* This routine isn't a proper arena allocation routine. It takes
* a PyObject* and records it so that it can be DECREFed when the
* arena is freed.
*/
PyAPI_FUNC(int) _PyArena_AddPyObject(PyArena *, PyObject *);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_PYARENA_H */

View File

@@ -0,0 +1,117 @@
#ifndef Py_INTERNAL_PYERRORS_H
#define Py_INTERNAL_PYERRORS_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
/* runtime lifecycle */
extern PyStatus _PyErr_InitTypes(PyInterpreterState *);
extern void _PyErr_FiniTypes(PyInterpreterState *);
/* other API */
static inline PyObject* _PyErr_Occurred(PyThreadState *tstate)
{
assert(tstate != NULL);
if (tstate->current_exception == NULL) {
return NULL;
}
return (PyObject *)Py_TYPE(tstate->current_exception);
}
static inline void _PyErr_ClearExcState(_PyErr_StackItem *exc_state)
{
Py_CLEAR(exc_state->exc_value);
}
PyAPI_FUNC(PyObject*) _PyErr_StackItemToExcInfoTuple(
_PyErr_StackItem *err_info);
PyAPI_FUNC(void) _PyErr_Fetch(
PyThreadState *tstate,
PyObject **type,
PyObject **value,
PyObject **traceback);
extern PyObject *
_PyErr_GetRaisedException(PyThreadState *tstate);
PyAPI_FUNC(int) _PyErr_ExceptionMatches(
PyThreadState *tstate,
PyObject *exc);
void
_PyErr_SetRaisedException(PyThreadState *tstate, PyObject *exc);
PyAPI_FUNC(void) _PyErr_Restore(
PyThreadState *tstate,
PyObject *type,
PyObject *value,
PyObject *traceback);
PyAPI_FUNC(void) _PyErr_SetObject(
PyThreadState *tstate,
PyObject *type,
PyObject *value);
PyAPI_FUNC(void) _PyErr_ChainStackItem(
_PyErr_StackItem *exc_info);
PyAPI_FUNC(void) _PyErr_Clear(PyThreadState *tstate);
PyAPI_FUNC(void) _PyErr_SetNone(PyThreadState *tstate, PyObject *exception);
PyAPI_FUNC(PyObject *) _PyErr_NoMemory(PyThreadState *tstate);
PyAPI_FUNC(void) _PyErr_SetString(
PyThreadState *tstate,
PyObject *exception,
const char *string);
PyAPI_FUNC(PyObject *) _PyErr_Format(
PyThreadState *tstate,
PyObject *exception,
const char *format,
...);
PyAPI_FUNC(void) _PyErr_NormalizeException(
PyThreadState *tstate,
PyObject **exc,
PyObject **val,
PyObject **tb);
PyAPI_FUNC(PyObject *) _PyErr_FormatFromCauseTstate(
PyThreadState *tstate,
PyObject *exception,
const char *format,
...);
PyAPI_FUNC(PyObject *) _PyExc_CreateExceptionGroup(
const char *msg,
PyObject *excs);
PyAPI_FUNC(PyObject *) _PyExc_PrepReraiseStar(
PyObject *orig,
PyObject *excs);
PyAPI_FUNC(int) _PyErr_CheckSignalsTstate(PyThreadState *tstate);
PyAPI_FUNC(void) _Py_DumpExtensionModules(int fd, PyInterpreterState *interp);
extern PyObject* _Py_Offer_Suggestions(PyObject* exception);
PyAPI_FUNC(Py_ssize_t) _Py_UTF8_Edit_Cost(PyObject *str_a, PyObject *str_b,
Py_ssize_t max_cost);
void _PyErr_FormatNote(const char *format, ...);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_PYERRORS_H */

View File

@@ -0,0 +1,40 @@
#ifndef Py_INTERNAL_HASH_H
#define Py_INTERNAL_HASH_H
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
struct pyhash_runtime_state {
struct {
#ifndef MS_WINDOWS
int fd;
dev_t st_dev;
ino_t st_ino;
#else
// This is a placeholder so the struct isn't empty on Windows.
int _not_used;
#endif
} urandom_cache;
};
#ifndef MS_WINDOWS
# define _py_urandom_cache_INIT \
{ \
.fd = -1, \
}
#else
# define _py_urandom_cache_INIT {0}
#endif
#define pyhash_state_INIT \
{ \
.urandom_cache = _py_urandom_cache_INIT, \
}
uint64_t _Py_KeyedHash(uint64_t, const char *, Py_ssize_t);
#endif // Py_INTERNAL_HASH_H

View File

@@ -0,0 +1,99 @@
#ifndef Py_INTERNAL_LIFECYCLE_H
#define Py_INTERNAL_LIFECYCLE_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#include "pycore_runtime.h" // _PyRuntimeState
/* Forward declarations */
struct _PyArgv;
struct pyruntimestate;
extern int _Py_SetFileSystemEncoding(
const char *encoding,
const char *errors);
extern void _Py_ClearFileSystemEncoding(void);
extern PyStatus _PyUnicode_InitEncodings(PyThreadState *tstate);
#ifdef MS_WINDOWS
extern int _PyUnicode_EnableLegacyWindowsFSEncoding(void);
#endif
PyAPI_FUNC(void) _Py_ClearStandardStreamEncoding(void);
PyAPI_FUNC(int) _Py_IsLocaleCoercionTarget(const char *ctype_loc);
/* Various one-time initializers */
extern void _Py_InitVersion(void);
extern PyStatus _PyFaulthandler_Init(int enable);
extern PyObject * _PyBuiltin_Init(PyInterpreterState *interp);
extern PyStatus _PySys_Create(
PyThreadState *tstate,
PyObject **sysmod_p);
extern PyStatus _PySys_ReadPreinitWarnOptions(PyWideStringList *options);
extern PyStatus _PySys_ReadPreinitXOptions(PyConfig *config);
extern int _PySys_UpdateConfig(PyThreadState *tstate);
extern void _PySys_FiniTypes(PyInterpreterState *interp);
extern int _PyBuiltins_AddExceptions(PyObject * bltinmod);
extern PyStatus _Py_HashRandomization_Init(const PyConfig *);
extern PyStatus _PyTime_Init(void);
extern PyStatus _PyGC_Init(PyInterpreterState *interp);
extern PyStatus _PyAtExit_Init(PyInterpreterState *interp);
extern int _Py_Deepfreeze_Init(void);
/* Various internal finalizers */
extern int _PySignal_Init(int install_signal_handlers);
extern void _PySignal_Fini(void);
extern void _PyGC_Fini(PyInterpreterState *interp);
extern void _Py_HashRandomization_Fini(void);
extern void _PyFaulthandler_Fini(void);
extern void _PyHash_Fini(void);
extern void _PyTraceMalloc_Fini(void);
extern void _PyWarnings_Fini(PyInterpreterState *interp);
extern void _PyAST_Fini(PyInterpreterState *interp);
extern void _PyAtExit_Fini(PyInterpreterState *interp);
extern void _PyThread_FiniType(PyInterpreterState *interp);
extern void _Py_Deepfreeze_Fini(void);
extern void _PyArg_Fini(void);
extern void _Py_FinalizeAllocatedBlocks(_PyRuntimeState *);
extern PyStatus _PyGILState_Init(PyInterpreterState *interp);
extern PyStatus _PyGILState_SetTstate(PyThreadState *tstate);
extern void _PyGILState_Fini(PyInterpreterState *interp);
PyAPI_FUNC(void) _PyGC_DumpShutdownStats(PyInterpreterState *interp);
PyAPI_FUNC(PyStatus) _Py_PreInitializeFromPyArgv(
const PyPreConfig *src_config,
const struct _PyArgv *args);
PyAPI_FUNC(PyStatus) _Py_PreInitializeFromConfig(
const PyConfig *config,
const struct _PyArgv *args);
PyAPI_FUNC(wchar_t *) _Py_GetStdlibDir(void);
PyAPI_FUNC(int) _Py_HandleSystemExit(int *exitcode_p);
PyAPI_FUNC(PyObject*) _PyErr_WriteUnraisableDefaultHook(PyObject *unraisable);
PyAPI_FUNC(void) _PyErr_Print(PyThreadState *tstate);
PyAPI_FUNC(void) _PyErr_Display(PyObject *file, PyObject *exception,
PyObject *value, PyObject *tb);
PyAPI_FUNC(void) _PyErr_DisplayException(PyObject *file, PyObject *exc);
PyAPI_FUNC(void) _PyThreadState_DeleteCurrent(PyThreadState *tstate);
extern void _PyAtExit_Call(PyInterpreterState *interp);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_LIFECYCLE_H */

View File

@@ -0,0 +1,205 @@
#ifndef Py_INTERNAL_PYMATH_H
#define Py_INTERNAL_PYMATH_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
/* _Py_ADJUST_ERANGE1(x)
* _Py_ADJUST_ERANGE2(x, y)
* Set errno to 0 before calling a libm function, and invoke one of these
* macros after, passing the function result(s) (_Py_ADJUST_ERANGE2 is useful
* for functions returning complex results). This makes two kinds of
* adjustments to errno: (A) If it looks like the platform libm set
* errno=ERANGE due to underflow, clear errno. (B) If it looks like the
* platform libm overflowed but didn't set errno, force errno to ERANGE. In
* effect, we're trying to force a useful implementation of C89 errno
* behavior.
* Caution:
* This isn't reliable. C99 no longer requires libm to set errno under
* any exceptional condition, but does require +- HUGE_VAL return
* values on overflow. A 754 box *probably* maps HUGE_VAL to a
* double infinity, and we're cool if that's so, unless the input
* was an infinity and an infinity is the expected result. A C89
* system sets errno to ERANGE, so we check for that too. We're
* out of luck if a C99 754 box doesn't map HUGE_VAL to +Inf, or
* if the returned result is a NaN, or if a C89 box returns HUGE_VAL
* in non-overflow cases.
*/
static inline void _Py_ADJUST_ERANGE1(double x)
{
if (errno == 0) {
if (x == Py_HUGE_VAL || x == -Py_HUGE_VAL) {
errno = ERANGE;
}
}
else if (errno == ERANGE && x == 0.0) {
errno = 0;
}
}
static inline void _Py_ADJUST_ERANGE2(double x, double y)
{
if (x == Py_HUGE_VAL || x == -Py_HUGE_VAL ||
y == Py_HUGE_VAL || y == -Py_HUGE_VAL)
{
if (errno == 0) {
errno = ERANGE;
}
}
else if (errno == ERANGE) {
errno = 0;
}
}
//--- HAVE_PY_SET_53BIT_PRECISION macro ------------------------------------
//
// The functions _Py_dg_strtod() and _Py_dg_dtoa() in Python/dtoa.c (which are
// required to support the short float repr introduced in Python 3.1) require
// that the floating-point unit that's being used for arithmetic operations on
// C doubles is set to use 53-bit precision. It also requires that the FPU
// rounding mode is round-half-to-even, but that's less often an issue.
//
// If your FPU isn't already set to 53-bit precision/round-half-to-even, and
// you want to make use of _Py_dg_strtod() and _Py_dg_dtoa(), then you should:
//
// #define HAVE_PY_SET_53BIT_PRECISION 1
//
// and also give appropriate definitions for the following three macros:
//
// * _Py_SET_53BIT_PRECISION_HEADER: any variable declarations needed to
// use the two macros below.
// * _Py_SET_53BIT_PRECISION_START: store original FPU settings, and
// set FPU to 53-bit precision/round-half-to-even
// * _Py_SET_53BIT_PRECISION_END: restore original FPU settings
//
// The macros are designed to be used within a single C function: see
// Python/pystrtod.c for an example of their use.
// Get and set x87 control word for gcc/x86
#ifdef HAVE_GCC_ASM_FOR_X87
#define HAVE_PY_SET_53BIT_PRECISION 1
// Functions defined in Python/pymath.c
extern unsigned short _Py_get_387controlword(void);
extern void _Py_set_387controlword(unsigned short);
#define _Py_SET_53BIT_PRECISION_HEADER \
unsigned short old_387controlword, new_387controlword
#define _Py_SET_53BIT_PRECISION_START \
do { \
old_387controlword = _Py_get_387controlword(); \
new_387controlword = (old_387controlword & ~0x0f00) | 0x0200; \
if (new_387controlword != old_387controlword) { \
_Py_set_387controlword(new_387controlword); \
} \
} while (0)
#define _Py_SET_53BIT_PRECISION_END \
do { \
if (new_387controlword != old_387controlword) { \
_Py_set_387controlword(old_387controlword); \
} \
} while (0)
#endif
// Get and set x87 control word for VisualStudio/x86.
// x87 is not supported in 64-bit or ARM.
#if defined(_MSC_VER) && !defined(_WIN64) && !defined(_M_ARM)
#define HAVE_PY_SET_53BIT_PRECISION 1
#include <float.h> // __control87_2()
#define _Py_SET_53BIT_PRECISION_HEADER \
unsigned int old_387controlword, new_387controlword, out_387controlword
// We use the __control87_2 function to set only the x87 control word.
// The SSE control word is unaffected.
#define _Py_SET_53BIT_PRECISION_START \
do { \
__control87_2(0, 0, &old_387controlword, NULL); \
new_387controlword = \
(old_387controlword & ~(_MCW_PC | _MCW_RC)) | (_PC_53 | _RC_NEAR); \
if (new_387controlword != old_387controlword) { \
__control87_2(new_387controlword, _MCW_PC | _MCW_RC, \
&out_387controlword, NULL); \
} \
} while (0)
#define _Py_SET_53BIT_PRECISION_END \
do { \
if (new_387controlword != old_387controlword) { \
__control87_2(old_387controlword, _MCW_PC | _MCW_RC, \
&out_387controlword, NULL); \
} \
} while (0)
#endif
// MC68881
#ifdef HAVE_GCC_ASM_FOR_MC68881
#define HAVE_PY_SET_53BIT_PRECISION 1
#define _Py_SET_53BIT_PRECISION_HEADER \
unsigned int old_fpcr, new_fpcr
#define _Py_SET_53BIT_PRECISION_START \
do { \
__asm__ ("fmove.l %%fpcr,%0" : "=g" (old_fpcr)); \
/* Set double precision / round to nearest. */ \
new_fpcr = (old_fpcr & ~0xf0) | 0x80; \
if (new_fpcr != old_fpcr) { \
__asm__ volatile ("fmove.l %0,%%fpcr" : : "g" (new_fpcr));\
} \
} while (0)
#define _Py_SET_53BIT_PRECISION_END \
do { \
if (new_fpcr != old_fpcr) { \
__asm__ volatile ("fmove.l %0,%%fpcr" : : "g" (old_fpcr)); \
} \
} while (0)
#endif
// Default definitions are empty
#ifndef _Py_SET_53BIT_PRECISION_HEADER
# define _Py_SET_53BIT_PRECISION_HEADER
# define _Py_SET_53BIT_PRECISION_START
# define _Py_SET_53BIT_PRECISION_END
#endif
//--- _PY_SHORT_FLOAT_REPR macro -------------------------------------------
// If we can't guarantee 53-bit precision, don't use the code
// in Python/dtoa.c, but fall back to standard code. This
// means that repr of a float will be long (17 significant digits).
//
// Realistically, there are two things that could go wrong:
//
// (1) doubles aren't IEEE 754 doubles, or
// (2) we're on x86 with the rounding precision set to 64-bits
// (extended precision), and we don't know how to change
// the rounding precision.
#if !defined(DOUBLE_IS_LITTLE_ENDIAN_IEEE754) && \
!defined(DOUBLE_IS_BIG_ENDIAN_IEEE754) && \
!defined(DOUBLE_IS_ARM_MIXED_ENDIAN_IEEE754)
# define _PY_SHORT_FLOAT_REPR 0
#endif
// Double rounding is symptomatic of use of extended precision on x86.
// If we're seeing double rounding, and we don't have any mechanism available
// for changing the FPU rounding precision, then don't use Python/dtoa.c.
#if defined(X87_DOUBLE_ROUNDING) && !defined(HAVE_PY_SET_53BIT_PRECISION)
# define _PY_SHORT_FLOAT_REPR 0
#endif
#ifndef _PY_SHORT_FLOAT_REPR
# define _PY_SHORT_FLOAT_REPR 1
#endif
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_PYMATH_H */

View File

@@ -0,0 +1,98 @@
#ifndef Py_INTERNAL_PYMEM_H
#define Py_INTERNAL_PYMEM_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#include "pymem.h" // PyMemAllocatorName
typedef struct {
/* We tag each block with an API ID in order to tag API violations */
char api_id;
PyMemAllocatorEx alloc;
} debug_alloc_api_t;
struct _pymem_allocators {
PyThread_type_lock mutex;
struct {
PyMemAllocatorEx raw;
PyMemAllocatorEx mem;
PyMemAllocatorEx obj;
} standard;
struct {
debug_alloc_api_t raw;
debug_alloc_api_t mem;
debug_alloc_api_t obj;
} debug;
PyObjectArenaAllocator obj_arena;
};
/* Set the memory allocator of the specified domain to the default.
Save the old allocator into *old_alloc if it's non-NULL.
Return on success, or return -1 if the domain is unknown. */
PyAPI_FUNC(int) _PyMem_SetDefaultAllocator(
PyMemAllocatorDomain domain,
PyMemAllocatorEx *old_alloc);
/* Special bytes broadcast into debug memory blocks at appropriate times.
Strings of these are unlikely to be valid addresses, floats, ints or
7-bit ASCII.
- PYMEM_CLEANBYTE: clean (newly allocated) memory
- PYMEM_DEADBYTE dead (newly freed) memory
- PYMEM_FORBIDDENBYTE: untouchable bytes at each end of a block
Byte patterns 0xCB, 0xDB and 0xFB have been replaced with 0xCD, 0xDD and
0xFD to use the same values than Windows CRT debug malloc() and free().
If modified, _PyMem_IsPtrFreed() should be updated as well. */
#define PYMEM_CLEANBYTE 0xCD
#define PYMEM_DEADBYTE 0xDD
#define PYMEM_FORBIDDENBYTE 0xFD
/* Heuristic checking if a pointer value is newly allocated
(uninitialized), newly freed or NULL (is equal to zero).
The pointer is not dereferenced, only the pointer value is checked.
The heuristic relies on the debug hooks on Python memory allocators which
fills newly allocated memory with CLEANBYTE (0xCD) and newly freed memory
with DEADBYTE (0xDD). Detect also "untouchable bytes" marked
with FORBIDDENBYTE (0xFD). */
static inline int _PyMem_IsPtrFreed(const void *ptr)
{
uintptr_t value = (uintptr_t)ptr;
#if SIZEOF_VOID_P == 8
return (value == 0
|| value == (uintptr_t)0xCDCDCDCDCDCDCDCD
|| value == (uintptr_t)0xDDDDDDDDDDDDDDDD
|| value == (uintptr_t)0xFDFDFDFDFDFDFDFD);
#elif SIZEOF_VOID_P == 4
return (value == 0
|| value == (uintptr_t)0xCDCDCDCD
|| value == (uintptr_t)0xDDDDDDDD
|| value == (uintptr_t)0xFDFDFDFD);
#else
# error "unknown pointer size"
#endif
}
PyAPI_FUNC(int) _PyMem_GetAllocatorName(
const char *name,
PyMemAllocatorName *allocator);
/* Configure the Python memory allocators.
Pass PYMEM_ALLOCATOR_DEFAULT to use default allocators.
PYMEM_ALLOCATOR_NOT_SET does nothing. */
PyAPI_FUNC(int) _PyMem_SetupAllocators(PyMemAllocatorName allocator);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_PYMEM_H */

View File

@@ -0,0 +1,85 @@
#ifndef Py_INTERNAL_PYMEM_INIT_H
#define Py_INTERNAL_PYMEM_INIT_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#include "pycore_pymem.h"
/********************************/
/* the allocators' initializers */
extern void * _PyMem_RawMalloc(void *, size_t);
extern void * _PyMem_RawCalloc(void *, size_t, size_t);
extern void * _PyMem_RawRealloc(void *, void *, size_t);
extern void _PyMem_RawFree(void *, void *);
#define PYRAW_ALLOC {NULL, _PyMem_RawMalloc, _PyMem_RawCalloc, _PyMem_RawRealloc, _PyMem_RawFree}
#ifdef WITH_PYMALLOC
extern void* _PyObject_Malloc(void *, size_t);
extern void* _PyObject_Calloc(void *, size_t, size_t);
extern void _PyObject_Free(void *, void *);
extern void* _PyObject_Realloc(void *, void *, size_t);
# define PYOBJ_ALLOC {NULL, _PyObject_Malloc, _PyObject_Calloc, _PyObject_Realloc, _PyObject_Free}
#else
# define PYOBJ_ALLOC PYRAW_ALLOC
#endif // WITH_PYMALLOC
#define PYMEM_ALLOC PYOBJ_ALLOC
extern void* _PyMem_DebugRawMalloc(void *, size_t);
extern void* _PyMem_DebugRawCalloc(void *, size_t, size_t);
extern void* _PyMem_DebugRawRealloc(void *, void *, size_t);
extern void _PyMem_DebugRawFree(void *, void *);
extern void* _PyMem_DebugMalloc(void *, size_t);
extern void* _PyMem_DebugCalloc(void *, size_t, size_t);
extern void* _PyMem_DebugRealloc(void *, void *, size_t);
extern void _PyMem_DebugFree(void *, void *);
#define PYDBGRAW_ALLOC(runtime) \
{&(runtime).allocators.debug.raw, _PyMem_DebugRawMalloc, _PyMem_DebugRawCalloc, _PyMem_DebugRawRealloc, _PyMem_DebugRawFree}
#define PYDBGMEM_ALLOC(runtime) \
{&(runtime).allocators.debug.mem, _PyMem_DebugMalloc, _PyMem_DebugCalloc, _PyMem_DebugRealloc, _PyMem_DebugFree}
#define PYDBGOBJ_ALLOC(runtime) \
{&(runtime).allocators.debug.obj, _PyMem_DebugMalloc, _PyMem_DebugCalloc, _PyMem_DebugRealloc, _PyMem_DebugFree}
extern void * _PyMem_ArenaAlloc(void *, size_t);
extern void _PyMem_ArenaFree(void *, void *, size_t);
#ifdef Py_DEBUG
# define _pymem_allocators_standard_INIT(runtime) \
{ \
PYDBGRAW_ALLOC(runtime), \
PYDBGMEM_ALLOC(runtime), \
PYDBGOBJ_ALLOC(runtime), \
}
#else
# define _pymem_allocators_standard_INIT(runtime) \
{ \
PYRAW_ALLOC, \
PYMEM_ALLOC, \
PYOBJ_ALLOC, \
}
#endif
#define _pymem_allocators_debug_INIT \
{ \
{'r', PYRAW_ALLOC}, \
{'m', PYMEM_ALLOC}, \
{'o', PYOBJ_ALLOC}, \
}
# define _pymem_allocators_obj_arena_INIT \
{ NULL, _PyMem_ArenaAlloc, _PyMem_ArenaFree }
#ifdef __cplusplus
}
#endif
#endif // !Py_INTERNAL_PYMEM_INIT_H

View File

@@ -0,0 +1,163 @@
#ifndef Py_INTERNAL_PYSTATE_H
#define Py_INTERNAL_PYSTATE_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#include "pycore_runtime.h" /* PyRuntimeState */
/* Check if the current thread is the main thread.
Use _Py_IsMainInterpreter() to check if it's the main interpreter. */
static inline int
_Py_IsMainThread(void)
{
unsigned long thread = PyThread_get_thread_ident();
return (thread == _PyRuntime.main_thread);
}
static inline PyInterpreterState *
_PyInterpreterState_Main(void)
{
return _PyRuntime.interpreters.main;
}
static inline int
_Py_IsMainInterpreter(PyInterpreterState *interp)
{
return (interp == _PyInterpreterState_Main());
}
static inline int
_Py_IsMainInterpreterFinalizing(PyInterpreterState *interp)
{
return (_PyRuntimeState_GetFinalizing(interp->runtime) != NULL &&
interp == &interp->runtime->_main_interpreter);
}
static inline const PyConfig *
_Py_GetMainConfig(void)
{
PyInterpreterState *interp = _PyInterpreterState_Main();
if (interp == NULL) {
return NULL;
}
return _PyInterpreterState_GetConfig(interp);
}
/* Only handle signals on the main thread of the main interpreter. */
static inline int
_Py_ThreadCanHandleSignals(PyInterpreterState *interp)
{
return (_Py_IsMainThread() && _Py_IsMainInterpreter(interp));
}
/* Variable and static inline functions for in-line access to current thread
and interpreter state */
#if defined(HAVE_THREAD_LOCAL) && !defined(Py_BUILD_CORE_MODULE)
extern _Py_thread_local PyThreadState *_Py_tss_tstate;
#endif
PyAPI_DATA(PyThreadState *) _PyThreadState_GetCurrent(void);
/* Get the current Python thread state.
This function is unsafe: it does not check for error and it can return NULL.
The caller must hold the GIL.
See also PyThreadState_Get() and _PyThreadState_UncheckedGet(). */
static inline PyThreadState*
_PyThreadState_GET(void)
{
#if defined(HAVE_THREAD_LOCAL) && !defined(Py_BUILD_CORE_MODULE)
return _Py_tss_tstate;
#else
return _PyThreadState_GetCurrent();
#endif
}
static inline void
_Py_EnsureFuncTstateNotNULL(const char *func, PyThreadState *tstate)
{
if (tstate == NULL) {
_Py_FatalErrorFunc(func,
"the function must be called with the GIL held, "
"after Python initialization and before Python finalization, "
"but the GIL is released (the current Python thread state is NULL)");
}
}
// Call Py_FatalError() if tstate is NULL
#define _Py_EnsureTstateNotNULL(tstate) \
_Py_EnsureFuncTstateNotNULL(__func__, (tstate))
/* Get the current interpreter state.
The function is unsafe: it does not check for error and it can return NULL.
The caller must hold the GIL.
See also _PyInterpreterState_Get()
and _PyGILState_GetInterpreterStateUnsafe(). */
static inline PyInterpreterState* _PyInterpreterState_GET(void) {
PyThreadState *tstate = _PyThreadState_GET();
#ifdef Py_DEBUG
_Py_EnsureTstateNotNULL(tstate);
#endif
return tstate->interp;
}
// PyThreadState functions
PyAPI_FUNC(PyThreadState *) _PyThreadState_New(PyInterpreterState *interp);
PyAPI_FUNC(void) _PyThreadState_Bind(PyThreadState *tstate);
// We keep this around exclusively for stable ABI compatibility.
PyAPI_FUNC(void) _PyThreadState_Init(
PyThreadState *tstate);
PyAPI_FUNC(void) _PyThreadState_DeleteExcept(PyThreadState *tstate);
/* Other */
PyAPI_FUNC(PyThreadState *) _PyThreadState_Swap(
_PyRuntimeState *runtime,
PyThreadState *newts);
PyAPI_FUNC(PyStatus) _PyInterpreterState_Enable(_PyRuntimeState *runtime);
#ifdef HAVE_FORK
extern PyStatus _PyInterpreterState_DeleteExceptMain(_PyRuntimeState *runtime);
extern void _PySignal_AfterFork(void);
#endif
PyAPI_FUNC(int) _PyState_AddModule(
PyThreadState *tstate,
PyObject* module,
PyModuleDef* def);
PyAPI_FUNC(int) _PyOS_InterruptOccurred(PyThreadState *tstate);
#define HEAD_LOCK(runtime) \
PyThread_acquire_lock((runtime)->interpreters.mutex, WAIT_LOCK)
#define HEAD_UNLOCK(runtime) \
PyThread_release_lock((runtime)->interpreters.mutex)
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_PYSTATE_H */

View File

@@ -0,0 +1,81 @@
#ifndef Py_INTERNAL_PYTHREAD_H
#define Py_INTERNAL_PYTHREAD_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#ifndef _POSIX_THREADS
/* This means pthreads are not implemented in libc headers, hence the macro
not present in unistd.h. But they still can be implemented as an external
library (e.g. gnu pth in pthread emulation) */
# ifdef HAVE_PTHREAD_H
# include <pthread.h> /* _POSIX_THREADS */
# endif
# ifndef _POSIX_THREADS
/* Check if we're running on HP-UX and _SC_THREADS is defined. If so, then
enough of the Posix threads package is implemented to support python
threads.
This is valid for HP-UX 11.23 running on an ia64 system. If needed, add
a check of __ia64 to verify that we're running on an ia64 system instead
of a pa-risc system.
*/
# ifdef __hpux
# ifdef _SC_THREADS
# define _POSIX_THREADS
# endif
# endif
# endif /* _POSIX_THREADS */
#endif /* _POSIX_THREADS */
#if defined(_POSIX_THREADS) || defined(HAVE_PTHREAD_STUBS)
# define _USE_PTHREADS
#endif
#if defined(_USE_PTHREADS) && defined(HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
// monotonic is supported statically. It doesn't mean it works on runtime.
# define CONDATTR_MONOTONIC
#endif
#if defined(HAVE_PTHREAD_STUBS)
// pthread_key
struct py_stub_tls_entry {
bool in_use;
void *value;
};
#endif
struct _pythread_runtime_state {
int initialized;
#ifdef _USE_PTHREADS
// This matches when thread_pthread.h is used.
struct {
/* NULL when pthread_condattr_setclock(CLOCK_MONOTONIC) is not supported. */
pthread_condattr_t *ptr;
# ifdef CONDATTR_MONOTONIC
/* The value to which condattr_monotonic is set. */
pthread_condattr_t val;
# endif
} _condattr_monotonic;
#endif // USE_PTHREADS
#if defined(HAVE_PTHREAD_STUBS)
struct {
struct py_stub_tls_entry tls_entries[PTHREAD_KEYS_MAX];
} stubs;
#endif
};
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_PYTHREAD_H */

View File

@@ -0,0 +1,21 @@
#ifndef Py_INTERNAL_RANGE_H
#define Py_INTERNAL_RANGE_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
typedef struct {
PyObject_HEAD
long start;
long step;
long len;
} _PyRangeIterObject;
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_RANGE_H */

View File

@@ -0,0 +1,215 @@
#ifndef Py_INTERNAL_RUNTIME_H
#define Py_INTERNAL_RUNTIME_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#include "pycore_atexit.h" // struct atexit_runtime_state
#include "pycore_atomic.h" /* _Py_atomic_address */
#include "pycore_ceval_state.h" // struct _ceval_runtime_state
#include "pycore_floatobject.h" // struct _Py_float_runtime_state
#include "pycore_faulthandler.h" // struct _faulthandler_runtime_state
#include "pycore_global_objects.h" // struct _Py_global_objects
#include "pycore_import.h" // struct _import_runtime_state
#include "pycore_interp.h" // PyInterpreterState
#include "pycore_object_state.h" // struct _py_object_runtime_state
#include "pycore_parser.h" // struct _parser_runtime_state
#include "pycore_pymem.h" // struct _pymem_allocators
#include "pycore_pyhash.h" // struct pyhash_runtime_state
#include "pycore_pythread.h" // struct _pythread_runtime_state
#include "pycore_signal.h" // struct _signals_runtime_state
#include "pycore_time.h" // struct _time_runtime_state
#include "pycore_tracemalloc.h" // struct _tracemalloc_runtime_state
#include "pycore_typeobject.h" // struct types_runtime_state
#include "pycore_unicodeobject.h" // struct _Py_unicode_runtime_ids
struct _getargs_runtime_state {
PyThread_type_lock mutex;
struct _PyArg_Parser *static_parsers;
};
/* GIL state */
struct _gilstate_runtime_state {
/* bpo-26558: Flag to disable PyGILState_Check().
If set to non-zero, PyGILState_Check() always return 1. */
int check_enabled;
/* The single PyInterpreterState used by this process'
GILState implementation
*/
/* TODO: Given interp_main, it may be possible to kill this ref */
PyInterpreterState *autoInterpreterState;
};
/* Runtime audit hook state */
typedef struct _Py_AuditHookEntry {
struct _Py_AuditHookEntry *next;
Py_AuditHookFunction hookCFunction;
void *userData;
} _Py_AuditHookEntry;
/* Full Python runtime state */
/* _PyRuntimeState holds the global state for the CPython runtime.
That data is exposed in the internal API as a static variable (_PyRuntime).
*/
typedef struct pyruntimestate {
/* Has been initialized to a safe state.
In order to be effective, this must be set to 0 during or right
after allocation. */
int _initialized;
/* Is running Py_PreInitialize()? */
int preinitializing;
/* Is Python preinitialized? Set to 1 by Py_PreInitialize() */
int preinitialized;
/* Is Python core initialized? Set to 1 by _Py_InitializeCore() */
int core_initialized;
/* Is Python fully initialized? Set to 1 by Py_Initialize() */
int initialized;
/* Set by Py_FinalizeEx(). Only reset to NULL if Py_Initialize()
is called again.
Use _PyRuntimeState_GetFinalizing() and _PyRuntimeState_SetFinalizing()
to access it, don't access it directly. */
_Py_atomic_address _finalizing;
struct pyinterpreters {
PyThread_type_lock mutex;
/* The linked list of interpreters, newest first. */
PyInterpreterState *head;
/* The runtime's initial interpreter, which has a special role
in the operation of the runtime. It is also often the only
interpreter. */
PyInterpreterState *main;
/* next_id is an auto-numbered sequence of small
integers. It gets initialized in _PyInterpreterState_Enable(),
which is called in Py_Initialize(), and used in
PyInterpreterState_New(). A negative interpreter ID
indicates an error occurred. The main interpreter will
always have an ID of 0. Overflow results in a RuntimeError.
If that becomes a problem later then we can adjust, e.g. by
using a Python int. */
int64_t next_id;
} interpreters;
unsigned long main_thread;
/* ---------- IMPORTANT ---------------------------
The fields above this line are declared as early as
possible to facilitate out-of-process observability
tools. */
// XXX Remove this field once we have a tp_* slot.
struct _xidregistry {
PyThread_type_lock mutex;
struct _xidregitem *head;
} xidregistry;
struct _pymem_allocators allocators;
struct _obmalloc_global_state obmalloc;
struct pyhash_runtime_state pyhash_state;
struct _time_runtime_state time;
struct _pythread_runtime_state threads;
struct _signals_runtime_state signals;
/* Used for the thread state bound to the current thread. */
Py_tss_t autoTSSkey;
/* Used instead of PyThreadState.trash when there is not current tstate. */
Py_tss_t trashTSSkey;
PyWideStringList orig_argv;
struct _parser_runtime_state parser;
struct _atexit_runtime_state atexit;
struct _import_runtime_state imports;
struct _ceval_runtime_state ceval;
struct _gilstate_runtime_state gilstate;
struct _getargs_runtime_state getargs;
struct _fileutils_state fileutils;
struct _faulthandler_runtime_state faulthandler;
struct _tracemalloc_runtime_state tracemalloc;
PyPreConfig preconfig;
// Audit values must be preserved when Py_Initialize()/Py_Finalize()
// is called multiple times.
Py_OpenCodeHookFunction open_code_hook;
void *open_code_userdata;
struct {
PyThread_type_lock mutex;
_Py_AuditHookEntry *head;
} audit_hooks;
struct _py_object_runtime_state object_state;
struct _Py_float_runtime_state float_state;
struct _Py_unicode_runtime_state unicode_state;
struct _types_runtime_state types;
/* All the objects that are shared by the runtime's interpreters. */
struct _Py_static_objects static_objects;
/* The following fields are here to avoid allocation during init.
The data is exposed through _PyRuntimeState pointer fields.
These fields should not be accessed directly outside of init.
All other _PyRuntimeState pointer fields are populated when
needed and default to NULL.
For now there are some exceptions to that rule, which require
allocation during init. These will be addressed on a case-by-case
basis. Most notably, we don't pre-allocated the several mutex
(PyThread_type_lock) fields, because on Windows we only ever get
a pointer type.
*/
/* PyInterpreterState.interpreters.main */
PyInterpreterState _main_interpreter;
} _PyRuntimeState;
/* other API */
PyAPI_DATA(_PyRuntimeState) _PyRuntime;
PyAPI_FUNC(PyStatus) _PyRuntimeState_Init(_PyRuntimeState *runtime);
PyAPI_FUNC(void) _PyRuntimeState_Fini(_PyRuntimeState *runtime);
#ifdef HAVE_FORK
extern PyStatus _PyRuntimeState_ReInitThreads(_PyRuntimeState *runtime);
#endif
/* Initialize _PyRuntimeState.
Return NULL on success, or return an error message on failure. */
PyAPI_FUNC(PyStatus) _PyRuntime_Initialize(void);
PyAPI_FUNC(void) _PyRuntime_Finalize(void);
static inline PyThreadState*
_PyRuntimeState_GetFinalizing(_PyRuntimeState *runtime) {
return (PyThreadState*)_Py_atomic_load_relaxed(&runtime->_finalizing);
}
static inline void
_PyRuntimeState_SetFinalizing(_PyRuntimeState *runtime, PyThreadState *tstate) {
_Py_atomic_store_relaxed(&runtime->_finalizing, (uintptr_t)tstate);
}
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_RUNTIME_H */

View File

@@ -0,0 +1,193 @@
#ifndef Py_INTERNAL_RUNTIME_INIT_H
#define Py_INTERNAL_RUNTIME_INIT_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#include "pycore_long.h"
#include "pycore_object.h"
#include "pycore_parser.h"
#include "pycore_pymem_init.h"
#include "pycore_obmalloc_init.h"
extern PyTypeObject _PyExc_MemoryError;
/* The static initializers defined here should only be used
in the runtime init code (in pystate.c and pylifecycle.c). */
#define _PyRuntimeState_INIT(runtime) \
{ \
.allocators = { \
.standard = _pymem_allocators_standard_INIT(runtime), \
.debug = _pymem_allocators_debug_INIT, \
.obj_arena = _pymem_allocators_obj_arena_INIT, \
}, \
.obmalloc = _obmalloc_global_state_INIT, \
.pyhash_state = pyhash_state_INIT, \
.signals = _signals_RUNTIME_INIT, \
.interpreters = { \
/* This prevents interpreters from getting created \
until _PyInterpreterState_Enable() is called. */ \
.next_id = -1, \
}, \
/* A TSS key must be initialized with Py_tss_NEEDS_INIT \
in accordance with the specification. */ \
.autoTSSkey = Py_tss_NEEDS_INIT, \
.parser = _parser_runtime_state_INIT, \
.ceval = { \
.perf = _PyEval_RUNTIME_PERF_INIT, \
}, \
.gilstate = { \
.check_enabled = 1, \
}, \
.fileutils = { \
.force_ascii = -1, \
}, \
.faulthandler = _faulthandler_runtime_state_INIT, \
.tracemalloc = _tracemalloc_runtime_state_INIT, \
.float_state = { \
.float_format = _py_float_format_unknown, \
.double_format = _py_float_format_unknown, \
}, \
.types = { \
.next_version_tag = 1, \
}, \
.static_objects = { \
.singletons = { \
.small_ints = _Py_small_ints_INIT, \
.bytes_empty = _PyBytes_SIMPLE_INIT(0, 0), \
.bytes_characters = _Py_bytes_characters_INIT, \
.strings = { \
.literals = _Py_str_literals_INIT, \
.identifiers = _Py_str_identifiers_INIT, \
.ascii = _Py_str_ascii_INIT, \
.latin1 = _Py_str_latin1_INIT, \
}, \
.tuple_empty = { \
.ob_base = _PyVarObject_HEAD_INIT(&PyTuple_Type, 0) \
}, \
.hamt_bitmap_node_empty = { \
.ob_base = _PyVarObject_HEAD_INIT(&_PyHamt_BitmapNode_Type, 0) \
}, \
.context_token_missing = { \
.ob_base = _PyObject_HEAD_INIT(&_PyContextTokenMissing_Type) \
}, \
}, \
}, \
._main_interpreter = _PyInterpreterState_INIT(runtime._main_interpreter), \
}
#define _PyInterpreterState_INIT(INTERP) \
{ \
.id_refcount = -1, \
.imports = IMPORTS_INIT, \
.obmalloc = _obmalloc_state_INIT(INTERP.obmalloc), \
.ceval = { \
.recursion_limit = Py_DEFAULT_RECURSION_LIMIT, \
}, \
.gc = { \
.enabled = 1, \
.generations = { \
/* .head is set in _PyGC_InitState(). */ \
{ .threshold = 700, }, \
{ .threshold = 10, }, \
{ .threshold = 10, }, \
}, \
}, \
.object_state = _py_object_state_INIT(INTERP), \
.dtoa = _dtoa_state_INIT(&(INTERP)), \
.dict_state = _dict_state_INIT, \
.func_state = { \
.next_version = 1, \
}, \
.types = { \
.next_version_tag = _Py_TYPE_BASE_VERSION_TAG, \
}, \
.static_objects = { \
.singletons = { \
._not_used = 1, \
.hamt_empty = { \
.ob_base = _PyObject_HEAD_INIT(&_PyHamt_Type) \
.h_root = (PyHamtNode*)&_Py_SINGLETON(hamt_bitmap_node_empty), \
}, \
.last_resort_memory_error = { \
_PyObject_HEAD_INIT(&_PyExc_MemoryError) \
}, \
}, \
}, \
._initial_thread = _PyThreadState_INIT, \
}
#define _PyThreadState_INIT \
{ \
.py_recursion_limit = Py_DEFAULT_RECURSION_LIMIT, \
.context_ver = 1, \
}
#ifdef Py_TRACE_REFS
# define _py_object_state_INIT(INTERP) \
{ \
.refchain = {&INTERP.object_state.refchain, &INTERP.object_state.refchain}, \
}
#else
# define _py_object_state_INIT(INTERP) \
{ 0 }
#endif
// global objects
#define _PyBytes_SIMPLE_INIT(CH, LEN) \
{ \
_PyVarObject_HEAD_INIT(&PyBytes_Type, (LEN)) \
.ob_shash = -1, \
.ob_sval = { (CH) }, \
}
#define _PyBytes_CHAR_INIT(CH) \
{ \
_PyBytes_SIMPLE_INIT((CH), 1) \
}
#define _PyUnicode_ASCII_BASE_INIT(LITERAL, ASCII) \
{ \
.ob_base = _PyObject_HEAD_INIT(&PyUnicode_Type) \
.length = sizeof(LITERAL) - 1, \
.hash = -1, \
.state = { \
.kind = 1, \
.compact = 1, \
.ascii = (ASCII), \
}, \
}
#define _PyASCIIObject_INIT(LITERAL) \
{ \
._ascii = _PyUnicode_ASCII_BASE_INIT((LITERAL), 1), \
._data = (LITERAL) \
}
#define INIT_STR(NAME, LITERAL) \
._py_ ## NAME = _PyASCIIObject_INIT(LITERAL)
#define INIT_ID(NAME) \
._py_ ## NAME = _PyASCIIObject_INIT(#NAME)
#define _PyUnicode_LATIN1_INIT(LITERAL, UTF8) \
{ \
._latin1 = { \
._base = _PyUnicode_ASCII_BASE_INIT((LITERAL), 0), \
.utf8 = (UTF8), \
.utf8_length = sizeof(UTF8) - 1, \
}, \
._data = (LITERAL), \
}
#include "pycore_runtime_init_generated.h"
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_RUNTIME_INIT_H */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,98 @@
// Define Py_NSIG constant for signal handling.
#ifndef Py_INTERNAL_SIGNAL_H
#define Py_INTERNAL_SIGNAL_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#include "pycore_atomic.h" // _Py_atomic_address
#include <signal.h> // NSIG
#ifdef _SIG_MAXSIG
// gh-91145: On FreeBSD, <signal.h> defines NSIG as 32: it doesn't include
// realtime signals: [SIGRTMIN,SIGRTMAX]. Use _SIG_MAXSIG instead. For
// example on x86-64 FreeBSD 13, SIGRTMAX is 126 and _SIG_MAXSIG is 128.
# define Py_NSIG _SIG_MAXSIG
#elif defined(NSIG)
# define Py_NSIG NSIG
#elif defined(_NSIG)
# define Py_NSIG _NSIG // BSD/SysV
#elif defined(_SIGMAX)
# define Py_NSIG (_SIGMAX + 1) // QNX
#elif defined(SIGMAX)
# define Py_NSIG (SIGMAX + 1) // djgpp
#else
# define Py_NSIG 64 // Use a reasonable default value
#endif
#define INVALID_FD (-1)
struct _signals_runtime_state {
volatile struct {
_Py_atomic_int tripped;
/* func is atomic to ensure that PyErr_SetInterrupt is async-signal-safe
* (even though it would probably be otherwise, anyway).
*/
_Py_atomic_address func;
} handlers[Py_NSIG];
volatile struct {
#ifdef MS_WINDOWS
/* This would be "SOCKET fd" if <winsock2.h> were always included.
It isn't so we must cast to SOCKET where appropriate. */
volatile int fd;
#elif defined(__VXWORKS__)
int fd;
#else
sig_atomic_t fd;
#endif
int warn_on_full_buffer;
#ifdef MS_WINDOWS
int use_send;
#endif
} wakeup;
/* Speed up sigcheck() when none tripped */
_Py_atomic_int is_tripped;
/* These objects necessarily belong to the main interpreter. */
PyObject *default_handler;
PyObject *ignore_handler;
#ifdef MS_WINDOWS
/* This would be "HANDLE sigint_event" if <windows.h> were always included.
It isn't so we must cast to HANDLE everywhere "sigint_event" is used. */
void *sigint_event;
#endif
/* True if the main interpreter thread exited due to an unhandled
* KeyboardInterrupt exception, suggesting the user pressed ^C. */
int unhandled_keyboard_interrupt;
};
#ifdef MS_WINDOWS
# define _signals_WAKEUP_INIT \
{.fd = INVALID_FD, .warn_on_full_buffer = 1, .use_send = 0}
#else
# define _signals_WAKEUP_INIT \
{.fd = INVALID_FD, .warn_on_full_buffer = 1}
#endif
#define _signals_RUNTIME_INIT \
{ \
.wakeup = _signals_WAKEUP_INIT, \
}
#ifdef __cplusplus
}
#endif
#endif // !Py_INTERNAL_SIGNAL_H

View File

@@ -0,0 +1,22 @@
#ifndef Py_INTERNAL_SLICEOBJECT_H
#define Py_INTERNAL_SLICEOBJECT_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
/* runtime lifecycle */
extern void _PySlice_Fini(PyInterpreterState *);
extern PyObject *
_PyBuildSlice_ConsumeRefs(PyObject *start, PyObject *stop);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_SLICEOBJECT_H */

View File

@@ -0,0 +1,36 @@
#ifndef Py_INTERNAL_STRHEX_H
#define Py_INTERNAL_STRHEX_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
// Returns a str() containing the hex representation of argbuf.
PyAPI_FUNC(PyObject*) _Py_strhex(const
char* argbuf,
const Py_ssize_t arglen);
// Returns a bytes() containing the ASCII hex representation of argbuf.
PyAPI_FUNC(PyObject*) _Py_strhex_bytes(
const char* argbuf,
const Py_ssize_t arglen);
// These variants include support for a separator between every N bytes:
PyAPI_FUNC(PyObject*) _Py_strhex_with_sep(
const char* argbuf,
const Py_ssize_t arglen,
PyObject* sep,
const int bytes_per_group);
PyAPI_FUNC(PyObject*) _Py_strhex_bytes_with_sep(
const char* argbuf,
const Py_ssize_t arglen,
PyObject* sep,
const int bytes_per_group);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_STRHEX_H */

View File

@@ -0,0 +1,39 @@
#ifndef Py_INTERNAL_STRUCTSEQ_H
#define Py_INTERNAL_STRUCTSEQ_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
/* other API */
PyAPI_FUNC(PyTypeObject *) _PyStructSequence_NewType(
PyStructSequence_Desc *desc,
unsigned long tp_flags);
extern int _PyStructSequence_InitBuiltinWithFlags(
PyInterpreterState *interp,
PyTypeObject *type,
PyStructSequence_Desc *desc,
unsigned long tp_flags);
static inline int
_PyStructSequence_InitBuiltin(PyInterpreterState *interp,
PyTypeObject *type,
PyStructSequence_Desc *desc)
{
return _PyStructSequence_InitBuiltinWithFlags(interp, type, desc, 0);
}
extern void _PyStructSequence_FiniBuiltin(
PyInterpreterState *interp,
PyTypeObject *type);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_STRUCTSEQ_H */

View File

@@ -0,0 +1,158 @@
#ifndef Py_INTERNAL_SYMTABLE_H
#define Py_INTERNAL_SYMTABLE_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
struct _mod; // Type defined in pycore_ast.h
typedef enum _block_type {
FunctionBlock, ClassBlock, ModuleBlock,
// Used for annotations if 'from __future__ import annotations' is active.
// Annotation blocks cannot bind names and are not evaluated.
AnnotationBlock,
// Used for generics and type aliases. These work mostly like functions
// (see PEP 695 for details). The three different blocks function identically;
// they are different enum entries only so that error messages can be more
// precise.
TypeVarBoundBlock, TypeAliasBlock, TypeParamBlock
} _Py_block_ty;
typedef enum _comprehension_type {
NoComprehension = 0,
ListComprehension = 1,
DictComprehension = 2,
SetComprehension = 3,
GeneratorExpression = 4 } _Py_comprehension_ty;
struct _symtable_entry;
struct symtable {
PyObject *st_filename; /* name of file being compiled,
decoded from the filesystem encoding */
struct _symtable_entry *st_cur; /* current symbol table entry */
struct _symtable_entry *st_top; /* symbol table entry for module */
PyObject *st_blocks; /* dict: map AST node addresses
* to symbol table entries */
PyObject *st_stack; /* list: stack of namespace info */
PyObject *st_global; /* borrowed ref to st_top->ste_symbols */
int st_nblocks; /* number of blocks used. kept for
consistency with the corresponding
compiler structure */
PyObject *st_private; /* name of current class or NULL */
PyFutureFeatures *st_future; /* module's future features that affect
the symbol table */
int recursion_depth; /* current recursion depth */
int recursion_limit; /* recursion limit */
};
typedef struct _symtable_entry {
PyObject_HEAD
PyObject *ste_id; /* int: key in ste_table->st_blocks */
PyObject *ste_symbols; /* dict: variable names to flags */
PyObject *ste_name; /* string: name of current block */
PyObject *ste_varnames; /* list of function parameters */
PyObject *ste_children; /* list of child blocks */
PyObject *ste_directives;/* locations of global and nonlocal statements */
_Py_block_ty ste_type;
int ste_nested; /* true if block is nested */
unsigned ste_free : 1; /* true if block has free variables */
unsigned ste_child_free : 1; /* true if a child block has free vars,
including free refs to globals */
unsigned ste_generator : 1; /* true if namespace is a generator */
unsigned ste_coroutine : 1; /* true if namespace is a coroutine */
_Py_comprehension_ty ste_comprehension; /* Kind of comprehension (if any) */
unsigned ste_varargs : 1; /* true if block has varargs */
unsigned ste_varkeywords : 1; /* true if block has varkeywords */
unsigned ste_returns_value : 1; /* true if namespace uses return with
an argument */
unsigned ste_needs_class_closure : 1; /* for class scopes, true if a
closure over __class__
should be created */
unsigned ste_needs_classdict : 1; /* for class scopes, true if a closure
over the class dict should be created */
unsigned ste_comp_inlined : 1; /* true if this comprehension is inlined */
unsigned ste_comp_iter_target : 1; /* true if visiting comprehension target */
unsigned ste_can_see_class_scope : 1; /* true if this block can see names bound in an
enclosing class scope */
int ste_comp_iter_expr; /* non-zero if visiting a comprehension range expression */
int ste_lineno; /* first line of block */
int ste_col_offset; /* offset of first line of block */
int ste_end_lineno; /* end line of block */
int ste_end_col_offset; /* end offset of first line of block */
int ste_opt_lineno; /* lineno of last exec or import * */
int ste_opt_col_offset; /* offset of last exec or import * */
struct symtable *ste_table;
} PySTEntryObject;
extern PyTypeObject PySTEntry_Type;
#define PySTEntry_Check(op) Py_IS_TYPE((op), &PySTEntry_Type)
extern long _PyST_GetSymbol(PySTEntryObject *, PyObject *);
extern int _PyST_GetScope(PySTEntryObject *, PyObject *);
extern int _PyST_IsFunctionLike(PySTEntryObject *);
extern struct symtable* _PySymtable_Build(
struct _mod *mod,
PyObject *filename,
PyFutureFeatures *future);
PyAPI_FUNC(PySTEntryObject *) PySymtable_Lookup(struct symtable *, void *);
extern void _PySymtable_Free(struct symtable *);
extern PyObject* _Py_Mangle(PyObject *p, PyObject *name);
/* Flags for def-use information */
#define DEF_GLOBAL 1 /* global stmt */
#define DEF_LOCAL 2 /* assignment in code block */
#define DEF_PARAM 2<<1 /* formal parameter */
#define DEF_NONLOCAL 2<<2 /* nonlocal stmt */
#define USE 2<<3 /* name is used */
#define DEF_FREE 2<<4 /* name used but not defined in nested block */
#define DEF_FREE_CLASS 2<<5 /* free variable from class's method */
#define DEF_IMPORT 2<<6 /* assignment occurred via import */
#define DEF_ANNOT 2<<7 /* this name is annotated */
#define DEF_COMP_ITER 2<<8 /* this name is a comprehension iteration variable */
#define DEF_TYPE_PARAM 2<<9 /* this name is a type parameter */
#define DEF_COMP_CELL 2<<10 /* this name is a cell in an inlined comprehension */
#define DEF_BOUND (DEF_LOCAL | DEF_PARAM | DEF_IMPORT)
/* GLOBAL_EXPLICIT and GLOBAL_IMPLICIT are used internally by the symbol
table. GLOBAL is returned from PyST_GetScope() for either of them.
It is stored in ste_symbols at bits 13-16.
*/
#define SCOPE_OFFSET 12
#define SCOPE_MASK (DEF_GLOBAL | DEF_LOCAL | DEF_PARAM | DEF_NONLOCAL)
#define LOCAL 1
#define GLOBAL_EXPLICIT 2
#define GLOBAL_IMPLICIT 3
#define FREE 4
#define CELL 5
#define GENERATOR 1
#define GENERATOR_EXPRESSION 2
// Used by symtablemodule.c
extern struct symtable* _Py_SymtableStringObjectFlags(
const char *str,
PyObject *filename,
int start,
PyCompilerFlags *flags);
int _PyFuture_FromAST(
struct _mod * mod,
PyObject *filename,
PyFutureFeatures* futures);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_SYMTABLE_H */

View File

@@ -0,0 +1,29 @@
#ifndef Py_INTERNAL_SYSMODULE_H
#define Py_INTERNAL_SYSMODULE_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
PyAPI_FUNC(int) _PySys_Audit(
PyThreadState *tstate,
const char *event,
const char *argFormat,
...);
/* We want minimal exposure of this function, so use extern rather than
PyAPI_FUNC() to not export the symbol. */
extern void _PySys_ClearAuditHooks(PyThreadState *tstate);
PyAPI_FUNC(int) _PySys_SetAttr(PyObject *, PyObject *);
extern int _PySys_ClearAttrString(PyInterpreterState *interp,
const char *name, int verbose);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_SYSMODULE_H */

View File

@@ -0,0 +1,25 @@
#ifndef Py_INTERNAL_TIME_H
#define Py_INTERNAL_TIME_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
struct _time_runtime_state {
#ifdef HAVE_TIMES
int ticks_per_second_initialized;
long ticks_per_second;
#else
int _not_used;
#endif
};
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_TIME_H */

View File

@@ -0,0 +1,108 @@
/* Auto-generated by Tools/build/generate_token.py */
/* Token types */
#ifndef Py_INTERNAL_TOKEN_H
#define Py_INTERNAL_TOKEN_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#undef TILDE /* Prevent clash of our definition with system macro. Ex AIX, ioctl.h */
#define ENDMARKER 0
#define NAME 1
#define NUMBER 2
#define STRING 3
#define NEWLINE 4
#define INDENT 5
#define DEDENT 6
#define LPAR 7
#define RPAR 8
#define LSQB 9
#define RSQB 10
#define COLON 11
#define COMMA 12
#define SEMI 13
#define PLUS 14
#define MINUS 15
#define STAR 16
#define SLASH 17
#define VBAR 18
#define AMPER 19
#define LESS 20
#define GREATER 21
#define EQUAL 22
#define DOT 23
#define PERCENT 24
#define LBRACE 25
#define RBRACE 26
#define EQEQUAL 27
#define NOTEQUAL 28
#define LESSEQUAL 29
#define GREATEREQUAL 30
#define TILDE 31
#define CIRCUMFLEX 32
#define LEFTSHIFT 33
#define RIGHTSHIFT 34
#define DOUBLESTAR 35
#define PLUSEQUAL 36
#define MINEQUAL 37
#define STAREQUAL 38
#define SLASHEQUAL 39
#define PERCENTEQUAL 40
#define AMPEREQUAL 41
#define VBAREQUAL 42
#define CIRCUMFLEXEQUAL 43
#define LEFTSHIFTEQUAL 44
#define RIGHTSHIFTEQUAL 45
#define DOUBLESTAREQUAL 46
#define DOUBLESLASH 47
#define DOUBLESLASHEQUAL 48
#define AT 49
#define ATEQUAL 50
#define RARROW 51
#define ELLIPSIS 52
#define COLONEQUAL 53
#define EXCLAMATION 54
#define OP 55
#define AWAIT 56
#define ASYNC 57
#define TYPE_IGNORE 58
#define TYPE_COMMENT 59
#define SOFT_KEYWORD 60
#define FSTRING_START 61
#define FSTRING_MIDDLE 62
#define FSTRING_END 63
#define COMMENT 64
#define NL 65
#define ERRORTOKEN 66
#define N_TOKENS 68
#define NT_OFFSET 256
/* Special definitions for cooperation with parser */
#define ISTERMINAL(x) ((x) < NT_OFFSET)
#define ISNONTERMINAL(x) ((x) >= NT_OFFSET)
#define ISEOF(x) ((x) == ENDMARKER)
#define ISWHITESPACE(x) ((x) == ENDMARKER || \
(x) == NEWLINE || \
(x) == INDENT || \
(x) == DEDENT)
#define ISSTRINGLIT(x) ((x) == STRING || \
(x) == FSTRING_MIDDLE)
// Symbols exported for test_peg_generator
PyAPI_DATA(const char * const) _PyParser_TokenNames[]; /* Token names */
PyAPI_FUNC(int) _PyToken_OneChar(int);
PyAPI_FUNC(int) _PyToken_TwoChars(int, int);
PyAPI_FUNC(int) _PyToken_ThreeChars(int, int, int);
#ifdef __cplusplus
}
#endif
#endif // !Py_INTERNAL_TOKEN_H

View File

@@ -0,0 +1,101 @@
#ifndef Py_INTERNAL_TRACEBACK_H
#define Py_INTERNAL_TRACEBACK_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
/* Write the Python traceback into the file 'fd'. For example:
Traceback (most recent call first):
File "xxx", line xxx in <xxx>
File "xxx", line xxx in <xxx>
...
File "xxx", line xxx in <xxx>
This function is written for debug purpose only, to dump the traceback in
the worst case: after a segmentation fault, at fatal error, etc. That's why,
it is very limited. Strings are truncated to 100 characters and encoded to
ASCII with backslashreplace. It doesn't write the source code, only the
function name, filename and line number of each frame. Write only the first
100 frames: if the traceback is truncated, write the line " ...".
This function is signal safe. */
PyAPI_FUNC(void) _Py_DumpTraceback(
int fd,
PyThreadState *tstate);
/* Write the traceback of all threads into the file 'fd'. current_thread can be
NULL.
Return NULL on success, or an error message on error.
This function is written for debug purpose only. It calls
_Py_DumpTraceback() for each thread, and so has the same limitations. It
only write the traceback of the first 100 threads: write "..." if there are
more threads.
If current_tstate is NULL, the function tries to get the Python thread state
of the current thread. It is not an error if the function is unable to get
the current Python thread state.
If interp is NULL, the function tries to get the interpreter state from
the current Python thread state, or from
_PyGILState_GetInterpreterStateUnsafe() in last resort.
It is better to pass NULL to interp and current_tstate, the function tries
different options to retrieve this information.
This function is signal safe. */
PyAPI_FUNC(const char*) _Py_DumpTracebackThreads(
int fd,
PyInterpreterState *interp,
PyThreadState *current_tstate);
/* Write a Unicode object into the file descriptor fd. Encode the string to
ASCII using the backslashreplace error handler.
Do nothing if text is not a Unicode object. The function accepts Unicode
string which is not ready (PyUnicode_WCHAR_KIND).
This function is signal safe. */
PyAPI_FUNC(void) _Py_DumpASCII(int fd, PyObject *text);
/* Format an integer as decimal into the file descriptor fd.
This function is signal safe. */
PyAPI_FUNC(void) _Py_DumpDecimal(
int fd,
size_t value);
/* Format an integer as hexadecimal with width digits into fd file descriptor.
The function is signal safe. */
PyAPI_FUNC(void) _Py_DumpHexadecimal(
int fd,
uintptr_t value,
Py_ssize_t width);
PyAPI_FUNC(PyObject*) _PyTraceBack_FromFrame(
PyObject *tb_next,
PyFrameObject *frame);
#define EXCEPTION_TB_HEADER "Traceback (most recent call last):\n"
#define EXCEPTION_GROUP_TB_HEADER "Exception Group Traceback (most recent call last):\n"
/* Write the traceback tb to file f. Prefix each line with
indent spaces followed by the margin (if it is not NULL). */
PyAPI_FUNC(int) _PyTraceBack_Print_Indented(
PyObject *tb, int indent, const char* margin,
const char *header_margin, const char *header, PyObject *f);
PyAPI_FUNC(int) _Py_WriteIndentedMargin(int, const char*, PyObject *);
PyAPI_FUNC(int) _Py_WriteIndent(int, PyObject *);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_TRACEBACK_H */

View File

@@ -0,0 +1,123 @@
#ifndef Py_INTERNAL_TRACEMALLOC_H
#define Py_INTERNAL_TRACEMALLOC_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#include "pycore_hashtable.h" // _Py_hashtable_t
/* Trace memory blocks allocated by PyMem_RawMalloc() */
#define TRACE_RAW_MALLOC
struct _PyTraceMalloc_Config {
/* Module initialized?
Variable protected by the GIL */
enum {
TRACEMALLOC_NOT_INITIALIZED,
TRACEMALLOC_INITIALIZED,
TRACEMALLOC_FINALIZED
} initialized;
/* Is tracemalloc tracing memory allocations?
Variable protected by the GIL */
int tracing;
/* limit of the number of frames in a traceback, 1 by default.
Variable protected by the GIL. */
int max_nframe;
};
/* Pack the frame_t structure to reduce the memory footprint on 64-bit
architectures: 12 bytes instead of 16. */
#if defined(_MSC_VER)
#pragma pack(push, 4)
#endif
struct
#ifdef __GNUC__
__attribute__((packed))
#endif
tracemalloc_frame {
/* filename cannot be NULL: "<unknown>" is used if the Python frame
filename is NULL */
PyObject *filename;
unsigned int lineno;
};
#ifdef _MSC_VER
#pragma pack(pop)
#endif
struct tracemalloc_traceback {
Py_uhash_t hash;
/* Number of frames stored */
uint16_t nframe;
/* Total number of frames the traceback had */
uint16_t total_nframe;
struct tracemalloc_frame frames[1];
};
struct _tracemalloc_runtime_state {
struct _PyTraceMalloc_Config config;
/* Protected by the GIL */
struct {
PyMemAllocatorEx mem;
PyMemAllocatorEx raw;
PyMemAllocatorEx obj;
} allocators;
#if defined(TRACE_RAW_MALLOC)
PyThread_type_lock tables_lock;
#endif
/* Size in bytes of currently traced memory.
Protected by TABLES_LOCK(). */
size_t traced_memory;
/* Peak size in bytes of traced memory.
Protected by TABLES_LOCK(). */
size_t peak_traced_memory;
/* Hash table used as a set to intern filenames:
PyObject* => PyObject*.
Protected by the GIL */
_Py_hashtable_t *filenames;
/* Buffer to store a new traceback in traceback_new().
Protected by the GIL. */
struct tracemalloc_traceback *traceback;
/* Hash table used as a set to intern tracebacks:
traceback_t* => traceback_t*
Protected by the GIL */
_Py_hashtable_t *tracebacks;
/* pointer (void*) => trace (trace_t*).
Protected by TABLES_LOCK(). */
_Py_hashtable_t *traces;
/* domain (unsigned int) => traces (_Py_hashtable_t).
Protected by TABLES_LOCK(). */
_Py_hashtable_t *domains;
struct tracemalloc_traceback empty_traceback;
Py_tss_t reentrant_key;
};
#define _tracemalloc_runtime_state_INIT \
{ \
.config = { \
.initialized = TRACEMALLOC_NOT_INITIALIZED, \
.tracing = 0, \
.max_nframe = 1, \
}, \
.reentrant_key = Py_tss_NEEDS_INIT, \
}
#ifdef __cplusplus
}
#endif
#endif // !Py_INTERNAL_TRACEMALLOC_H

View File

@@ -0,0 +1,79 @@
#ifndef Py_INTERNAL_TUPLE_H
#define Py_INTERNAL_TUPLE_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#include "tupleobject.h" /* _PyTuple_CAST() */
/* runtime lifecycle */
extern PyStatus _PyTuple_InitGlobalObjects(PyInterpreterState *);
extern void _PyTuple_Fini(PyInterpreterState *);
/* other API */
// PyTuple_MAXSAVESIZE - largest tuple to save on free list
// PyTuple_MAXFREELIST - maximum number of tuples of each size to save
#if defined(PyTuple_MAXSAVESIZE) && PyTuple_MAXSAVESIZE <= 0
// A build indicated that tuple freelists should not be used.
# define PyTuple_NFREELISTS 0
# undef PyTuple_MAXSAVESIZE
# undef PyTuple_MAXFREELIST
#elif !defined(WITH_FREELISTS)
# define PyTuple_NFREELISTS 0
# undef PyTuple_MAXSAVESIZE
# undef PyTuple_MAXFREELIST
#else
// We are using a freelist for tuples.
# ifndef PyTuple_MAXSAVESIZE
# define PyTuple_MAXSAVESIZE 20
# endif
# define PyTuple_NFREELISTS PyTuple_MAXSAVESIZE
# ifndef PyTuple_MAXFREELIST
# define PyTuple_MAXFREELIST 2000
# endif
#endif
struct _Py_tuple_state {
#if PyTuple_NFREELISTS > 0
/* There is one freelist for each size from 1 to PyTuple_MAXSAVESIZE.
The empty tuple is handled separately.
Each tuple stored in the array is the head of the linked list
(and the next available tuple) for that size. The actual tuple
object is used as the linked list node, with its first item
(ob_item[0]) pointing to the next node (i.e. the previous head).
Each linked list is initially NULL. */
PyTupleObject *free_list[PyTuple_NFREELISTS];
int numfree[PyTuple_NFREELISTS];
#else
char _unused; // Empty structs are not allowed.
#endif
};
#define _PyTuple_ITEMS(op) _Py_RVALUE(_PyTuple_CAST(op)->ob_item)
extern PyObject *_PyTuple_FromArray(PyObject *const *, Py_ssize_t);
extern PyObject *_PyTuple_FromArraySteal(PyObject *const *, Py_ssize_t);
typedef struct {
PyObject_HEAD
Py_ssize_t it_index;
PyTupleObject *it_seq; /* Set to NULL when iterator is exhausted */
} _PyTupleIterObject;
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_TUPLE_H */

View File

@@ -0,0 +1,147 @@
#ifndef Py_INTERNAL_TYPEOBJECT_H
#define Py_INTERNAL_TYPEOBJECT_H
#ifdef __cplusplus
extern "C" {
#endif
#include "pycore_moduleobject.h"
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
/* state */
#define _Py_TYPE_BASE_VERSION_TAG (2<<16)
#define _Py_MAX_GLOBAL_TYPE_VERSION_TAG (_Py_TYPE_BASE_VERSION_TAG - 1)
struct _types_runtime_state {
/* Used to set PyTypeObject.tp_version_tag for core static types. */
// bpo-42745: next_version_tag remains shared by all interpreters
// because of static types.
unsigned int next_version_tag;
};
// Type attribute lookup cache: speed up attribute and method lookups,
// see _PyType_Lookup().
struct type_cache_entry {
unsigned int version; // initialized from type->tp_version_tag
PyObject *name; // reference to exactly a str or None
PyObject *value; // borrowed reference or NULL
};
#define MCACHE_SIZE_EXP 12
struct type_cache {
struct type_cache_entry hashtable[1 << MCACHE_SIZE_EXP];
};
/* For now we hard-code this to a value for which we are confident
all the static builtin types will fit (for all builds). */
#define _Py_MAX_STATIC_BUILTIN_TYPES 200
typedef struct {
PyTypeObject *type;
int readying;
int ready;
// XXX tp_dict can probably be statically allocated,
// instead of dynamically and stored on the interpreter.
PyObject *tp_dict;
PyObject *tp_subclasses;
/* We never clean up weakrefs for static builtin types since
they will effectively never get triggered. However, there
are also some diagnostic uses for the list of weakrefs,
so we still keep it. */
PyObject *tp_weaklist;
} static_builtin_state;
struct types_state {
/* Used to set PyTypeObject.tp_version_tag.
It starts at _Py_MAX_GLOBAL_TYPE_VERSION_TAG + 1,
where all those lower numbers are used for core static types. */
unsigned int next_version_tag;
struct type_cache type_cache;
size_t num_builtins_initialized;
static_builtin_state builtins[_Py_MAX_STATIC_BUILTIN_TYPES];
};
/* runtime lifecycle */
extern PyStatus _PyTypes_InitTypes(PyInterpreterState *);
extern void _PyTypes_FiniTypes(PyInterpreterState *);
extern void _PyTypes_Fini(PyInterpreterState *);
/* other API */
/* Length of array of slotdef pointers used to store slots with the
same __name__. There should be at most MAX_EQUIV-1 slotdef entries with
the same __name__, for any __name__. Since that's a static property, it is
appropriate to declare fixed-size arrays for this. */
#define MAX_EQUIV 10
typedef struct wrapperbase pytype_slotdef;
static inline PyObject **
_PyStaticType_GET_WEAKREFS_LISTPTR(static_builtin_state *state)
{
assert(state != NULL);
return &state->tp_weaklist;
}
/* Like PyType_GetModuleState, but skips verification
* that type is a heap type with an associated module */
static inline void *
_PyType_GetModuleState(PyTypeObject *type)
{
assert(PyType_Check(type));
assert(type->tp_flags & Py_TPFLAGS_HEAPTYPE);
PyHeapTypeObject *et = (PyHeapTypeObject *)type;
assert(et->ht_module);
PyModuleObject *mod = (PyModuleObject *)(et->ht_module);
assert(mod != NULL);
return mod->md_state;
}
extern int _PyStaticType_InitBuiltin(PyInterpreterState *, PyTypeObject *type);
extern static_builtin_state * _PyStaticType_GetState(PyInterpreterState *, PyTypeObject *);
extern void _PyStaticType_ClearWeakRefs(PyInterpreterState *, PyTypeObject *type);
extern void _PyStaticType_Dealloc(PyInterpreterState *, PyTypeObject *);
PyAPI_FUNC(PyObject *) _PyType_GetDict(PyTypeObject *);
extern PyObject * _PyType_GetBases(PyTypeObject *type);
extern PyObject * _PyType_GetMRO(PyTypeObject *type);
extern PyObject* _PyType_GetSubclasses(PyTypeObject *);
extern int _PyType_HasSubclasses(PyTypeObject *);
// PyType_Ready() must be called if _PyType_IsReady() is false.
// See also the Py_TPFLAGS_READY flag.
static inline int
_PyType_IsReady(PyTypeObject *type)
{
return _PyType_GetDict(type) != NULL;
}
PyObject *
_Py_type_getattro_impl(PyTypeObject *type, PyObject *name, int *suppress_missing_attribute);
PyObject *
_Py_type_getattro(PyTypeObject *type, PyObject *name);
PyObject *_Py_slot_tp_getattro(PyObject *self, PyObject *name);
PyObject *_Py_slot_tp_getattr_hook(PyObject *self, PyObject *name);
PyAPI_DATA(PyTypeObject) _PyBufferWrapper_Type;
PyObject *
_PySuper_Lookup(PyTypeObject *su_type, PyObject *su_obj, PyObject *name, int *meth_found);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_TYPEOBJECT_H */

View File

@@ -0,0 +1,24 @@
#ifndef Py_INTERNAL_TYPEVAROBJECT_H
#define Py_INTERNAL_TYPEVAROBJECT_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
extern PyObject *_Py_make_typevar(PyObject *, PyObject *, PyObject *);
extern PyObject *_Py_make_paramspec(PyThreadState *, PyObject *);
extern PyObject *_Py_make_typevartuple(PyThreadState *, PyObject *);
extern PyObject *_Py_make_typealias(PyThreadState *, PyObject *);
extern PyObject *_Py_subscript_generic(PyThreadState *, PyObject *);
extern int _Py_initialize_generic(PyInterpreterState *);
extern void _Py_clear_generic_types(PyInterpreterState *);
extern PyTypeObject _PyTypeAlias_Type;
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_TYPEVAROBJECT_H */

View File

@@ -0,0 +1,34 @@
/* Unicode name database interface */
#ifndef Py_INTERNAL_UCNHASH_H
#define Py_INTERNAL_UCNHASH_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
/* revised ucnhash CAPI interface (exported through a "wrapper") */
#define PyUnicodeData_CAPSULE_NAME "unicodedata._ucnhash_CAPI"
typedef struct {
/* Get name for a given character code.
Returns non-zero if success, zero if not.
Does not set Python exceptions. */
int (*getname)(Py_UCS4 code, char* buffer, int buflen,
int with_alias_and_seq);
/* Get character code for a given name.
Same error handling as for getname(). */
int (*getcode)(const char* name, int namelen, Py_UCS4* code,
int with_named_seq);
} _PyUnicode_Name_CAPI;
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_UCNHASH_H */

View File

@@ -0,0 +1,70 @@
#ifndef Py_INTERNAL_UNICODEOBJECT_H
#define Py_INTERNAL_UNICODEOBJECT_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#include "pycore_fileutils.h" // _Py_error_handler
#include "pycore_ucnhash.h" // _PyUnicode_Name_CAPI
void _PyUnicode_ExactDealloc(PyObject *op);
Py_ssize_t _PyUnicode_InternedSize(void);
/* runtime lifecycle */
extern void _PyUnicode_InitState(PyInterpreterState *);
extern PyStatus _PyUnicode_InitGlobalObjects(PyInterpreterState *);
extern PyStatus _PyUnicode_InitTypes(PyInterpreterState *);
extern void _PyUnicode_Fini(PyInterpreterState *);
extern void _PyUnicode_FiniTypes(PyInterpreterState *);
extern PyTypeObject _PyUnicodeASCIIIter_Type;
/* other API */
struct _Py_unicode_runtime_ids {
PyThread_type_lock lock;
// next_index value must be preserved when Py_Initialize()/Py_Finalize()
// is called multiple times: see _PyUnicode_FromId() implementation.
Py_ssize_t next_index;
};
struct _Py_unicode_runtime_state {
struct _Py_unicode_runtime_ids ids;
};
/* fs_codec.encoding is initialized to NULL.
Later, it is set to a non-NULL string by _PyUnicode_InitEncodings(). */
struct _Py_unicode_fs_codec {
char *encoding; // Filesystem encoding (encoded to UTF-8)
int utf8; // encoding=="utf-8"?
char *errors; // Filesystem errors (encoded to UTF-8)
_Py_error_handler error_handler;
};
struct _Py_unicode_ids {
Py_ssize_t size;
PyObject **array;
};
struct _Py_unicode_state {
struct _Py_unicode_fs_codec fs_codec;
_PyUnicode_Name_CAPI *ucnhash_capi;
// Unicode identifiers (_Py_Identifier): see _PyUnicode_FromId()
struct _Py_unicode_ids ids;
};
extern void _PyUnicode_InternInPlace(PyInterpreterState *interp, PyObject **p);
extern void _PyUnicode_ClearInterned(PyInterpreterState *interp);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_UNICODEOBJECT_H */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,23 @@
#ifndef Py_INTERNAL_UNIONOBJECT_H
#define Py_INTERNAL_UNIONOBJECT_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
extern PyTypeObject _PyUnion_Type;
#define _PyUnion_Check(op) Py_IS_TYPE((op), &_PyUnion_Type)
extern PyObject *_Py_union_type_or(PyObject *, PyObject *);
#define _PyGenericAlias_Check(op) PyObject_TypeCheck((op), &Py_GenericAliasType)
extern PyObject *_Py_subs_parameters(PyObject *, PyObject *, PyObject *, PyObject *);
extern PyObject *_Py_make_parameters(PyObject *);
extern PyObject *_Py_union_args(PyObject *self);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_UNIONOBJECT_H */

View File

@@ -0,0 +1,29 @@
#ifndef Py_INTERNAL_WARNINGS_H
#define Py_INTERNAL_WARNINGS_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
struct _warnings_runtime_state {
/* Both 'filters' and 'onceregistry' can be set in warnings.py;
get_warnings_attr() will reset these variables accordingly. */
PyObject *filters; /* List */
PyObject *once_registry; /* Dict */
PyObject *default_action; /* String */
long filters_version;
};
extern int _PyWarnings_InitState(PyInterpreterState *interp);
PyAPI_FUNC(PyObject*) _PyWarnings_Init(void);
extern void _PyErr_WarnUnawaitedCoroutine(PyObject *coro);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_WARNINGS_H */