コード例 #1
0
ファイル: rclass.py プロジェクト: nipengadmaster/pypy
 def __init__(self, rtyper, classdef):
     AbstractClassRepr.__init__(self, rtyper, classdef)
     if classdef is None:
         # 'object' root type
         self.vtable_type = OBJECT_VTABLE
     else:
         self.vtable_type = lltype.ForwardReference()
     self.lowleveltype = Ptr(self.vtable_type)
コード例 #2
0
ファイル: test_genc.py プロジェクト: nipengadmaster/pypy
def test_recursive_llhelper():
    from pypy.rpython.annlowlevel import llhelper
    from pypy.rpython.lltypesystem import lltype
    from pypy.rlib.objectmodel import specialize
    from pypy.rlib.nonconst import NonConstant
    FT = lltype.ForwardReference()
    FTPTR = lltype.Ptr(FT)
    STRUCT = lltype.Struct("foo", ("bar", FTPTR))
    FT.become(lltype.FuncType([lltype.Ptr(STRUCT)], lltype.Signed))

    class A:
        def __init__(self, func, name):
            self.func = func
            self.name = name

        def _freeze_(self):
            return True

        @specialize.memo()
        def make_func(self):
            f = getattr(self, "_f", None)
            if f is not None:
                return f
            f = lambda *args: self.func(*args)
            f.c_name = self.name
            f.relax_sig_check = True
            f.__name__ = "WRAP%s" % (self.name, )
            self._f = f
            return f

        def get_llhelper(self):
            return llhelper(FTPTR, self.make_func())

    def f(s):
        if s.bar == t.bar:
            lltype.free(s, flavor="raw")
            return 1
        lltype.free(s, flavor="raw")
        return 0

    def g(x):
        return 42

    def chooser(x):
        s = lltype.malloc(STRUCT, flavor="raw")
        if x:
            s.bar = llhelper(FTPTR, a_f.make_func())
        else:
            s.bar = llhelper(FTPTR, a_g.make_func())
        return f(s)

    a_f = A(f, "f")
    a_g = A(g, "g")
    t = lltype.malloc(STRUCT, flavor="raw", immortal=True)
    t.bar = llhelper(FTPTR, a_f.make_func())
    fn = compile(chooser, [bool])
    assert fn(True)
コード例 #3
0
 def test_recursive_struct(self):
     SX = lltype.ForwardReference()
     S1 = lltype.Struct('S1', ('p', lltype.Ptr(SX)), ('x', lltype.Signed))
     SX.become(S1)
     # a chained list
     s1 = lltype.malloc(S1, flavor='raw')
     s2 = lltype.malloc(S1, flavor='raw')
     s3 = lltype.malloc(S1, flavor='raw')
     s1.x = 111
     s2.x = 222
     s3.x = 333
     s1.p = s2
     s2.p = s3
     s3.p = lltype.nullptr(S1)
     sc1 = lltype2ctypes(s1)
     sc2 = sc1.contents.p
     sc3 = sc2.contents.p
     assert not sc3.contents.p
     assert sc1.contents.x == 111
     assert sc2.contents.x == 222
     assert sc3.contents.x == 333
     sc3.contents.x += 1
     assert s3.x == 334
     s3.x += 2
     assert sc3.contents.x == 336
     lltype.free(s1, flavor='raw')
     lltype.free(s2, flavor='raw')
     lltype.free(s3, flavor='raw')
     # a self-cycle
     s1 = lltype.malloc(S1, flavor='raw')
     s1.x = 12
     s1.p = s1
     sc1 = lltype2ctypes(s1)
     assert sc1.contents.x == 12
     assert (ctypes.addressof(sc1.contents.p.contents) == ctypes.addressof(
         sc1.contents))
     s1.x *= 5
     assert sc1.contents.p.contents.p.contents.p.contents.x == 60
     lltype.free(s1, flavor='raw')
     # a longer cycle
     s1 = lltype.malloc(S1, flavor='raw')
     s2 = lltype.malloc(S1, flavor='raw')
     s1.x = 111
     s1.p = s2
     s2.x = 222
     s2.p = s1
     sc1 = lltype2ctypes(s1)
     assert sc1.contents.x == 111
     assert sc1.contents.p.contents.x == 222
     assert (ctypes.addressof(sc1.contents.p.contents) != ctypes.addressof(
         sc1.contents))
     assert (ctypes.addressof(
         sc1.contents.p.contents.p.contents) == ctypes.addressof(
             sc1.contents))
     lltype.free(s1, flavor='raw')
     lltype.free(s2, flavor='raw')
     assert not ALLOCATED  # detects memory leaks in the test
コード例 #4
0
def cpython_struct(name, fields, forward=None, level=1):
    configname = name.replace(' ', '__')
    if level == 1:
        config = CConfig
    else:
        config = CConfig2
    setattr(config, configname, rffi_platform.Struct(name, fields))
    if forward is None:
        forward = lltype.ForwardReference()
    TYPES[configname] = forward
    return forward
コード例 #5
0
    def test_indirect_recursive_struct_more(self):
        NODE = lltype.ForwardReference()
        NODE2 = lltype.Struct('NODE2', ('ping', lltype.Ptr(NODE)))
        NODE.become(lltype.Struct('NODE', ('pong', NODE2)))

        # Building NODE2 first used to fail.
        get_ctypes_type(NODE2)

        CNODEPTR = get_ctypes_type(NODE)
        pc = CNODEPTR()
        pc.pong.ping = ctypes.pointer(pc)
        p = ctypes2lltype(lltype.Ptr(NODE), ctypes.pointer(pc))
        assert p.pong.ping == p
コード例 #6
0
 def test_recursive_struct_more(self):
     NODE = lltype.ForwardReference()
     NODE.become(
         lltype.Struct('NODE', ('value', lltype.Signed),
                       ('next', lltype.Ptr(NODE))))
     CNODEPTR = get_ctypes_type(NODE)
     pc = CNODEPTR()
     pc.value = 42
     pc.next = ctypes.pointer(pc)
     p = ctypes2lltype(lltype.Ptr(NODE), ctypes.pointer(pc))
     assert p.value == 42
     assert p.next == p
     pc2 = lltype2ctypes(p)
     assert pc2.contents.value == 42
     assert pc2.contents.next.contents.value == 42
コード例 #7
0
ファイル: annlowlevel.py プロジェクト: chyyuu/pygirl
 def graph2delayed(self, graph, FUNCTYPE=None):
     if self.rtyper.type_system.name == 'lltypesystem':
         if FUNCTYPE is None:
             FUNCTYPE = lltype.ForwardReference()
         # obscure hack: embed the name of the function in the string, so
         # that the genc database can get it even before the delayedptr
         # is really computed
         name = "delayed!%s" % (graph.name, )
         delayedptr = lltype._ptr(lltype.Ptr(FUNCTYPE), name, solid=True)
     else:
         if FUNCTYPE is None:
             FUNCTYPE = ootype.ForwardReference()
         name = "delayed!%s" % (graph.name, )
         delayedptr = ootype._forward_static_meth(FUNCTYPE, _name=name)
     self.delayedfuncs.append((delayedptr, graph))
     return delayedptr
コード例 #8
0
ファイル: support.py プロジェクト: njues/Sypy
def get_chunk_manager(chunk_size=DEFAULT_CHUNK_SIZE, cache={}):
    try:
        return cache[chunk_size]
    except KeyError:
        pass

    CHUNK = lltype.ForwardReference()
    CHUNK.become(
        lltype.Struct(
            'AddressChunk', ('next', lltype.Ptr(CHUNK)),
            ('items', lltype.FixedSizeArray(llmemory.Address, chunk_size))))
    null_chunk = lltype.nullptr(CHUNK)

    class FreeList(object):
        _alloc_flavor_ = "raw"

        def __init__(self):
            self.free_list = null_chunk

        def get(self):
            if not self.free_list:
                # we zero-initialize the chunks to make the translation
                # backends happy, but we don't need to do it at run-time.
                zero = not we_are_translated()
                return lltype.malloc(CHUNK,
                                     flavor="raw",
                                     zero=zero,
                                     track_allocation=False)

            result = self.free_list
            self.free_list = result.next
            return result

        def put(self, chunk):
            if we_are_translated():
                chunk.next = self.free_list
                self.free_list = chunk
            else:
                # Don't cache the old chunks but free them immediately.
                # Helps debugging, and avoids that old chunks full of
                # addresses left behind by a test end up in genc...
                lltype.free(chunk, flavor="raw", track_allocation=False)

    unused_chunks = FreeList()
    cache[chunk_size] = unused_chunks, null_chunk
    return unused_chunks, null_chunk
コード例 #9
0
ファイル: rvirtualizable.py プロジェクト: griels/pypy-sc
    def _setup_repr(self):
        llfields = []
        ACCESS = lltype.ForwardReference()
        if self.top_of_virtualizable_hierarchy:
            llfields.append(('vable_base', llmemory.Address))
            llfields.append(('vable_rti', VABLERTIPTR))
            llfields.append(('vable_access', lltype.Ptr(ACCESS)))
        InstanceRepr._setup_repr(self,
                                 llfields,
                                 hints={'virtualizable': True},
                                 adtmeths={'ACCESS': ACCESS})
        rbase = self.rbase
        accessors = []
        if self.top_of_virtualizable_hierarchy:
            if len(rbase.allinstancefields) != 1:
                raise TyperError("virtulizable class cannot have"
                                 " non-virtualizable base class with instance"
                                 " fields: %r" % self.classdef)
            redirected_fields = []

        else:
            accessors.append(('parent', rbase.ACCESS))
            redirected_fields = list(rbase.ACCESS.redirected_fields)
        name = self.lowleveltype.TO._name
        TOPPTR = self.get_top_virtualizable_type()
        self.my_redirected_fields = my_redirected_fields = {}
        for name, (mangled_name, r) in self.fields.items():
            T = r.lowleveltype
            if T is lltype.Void:
                continue
            GETTER = lltype.Ptr(lltype.FuncType([TOPPTR], T))
            SETTER = lltype.Ptr(lltype.FuncType([TOPPTR, T], lltype.Void))
            accessors.append(('get_' + mangled_name, GETTER))
            accessors.append(('set_' + mangled_name, SETTER))
            redirected_fields.append(mangled_name)
            my_redirected_fields[name] = None
        ACCESS.become(
            lltype.Struct(
                name + '_access',
                hints={'immutable': True},
                adtmeths={'redirected_fields': tuple(redirected_fields)},
                *accessors))

        self.ACCESS = ACCESS
コード例 #10
0
 def test_indirect_recursive_struct(self):
     S2Forward = lltype.ForwardReference()
     S1 = lltype.Struct('S1', ('p', lltype.Ptr(S2Forward)))
     A2 = lltype.Array(lltype.Ptr(S1), hints={'nolength': True})
     S2 = lltype.Struct('S2', ('a', lltype.Ptr(A2)))
     S2Forward.become(S2)
     s1 = lltype.malloc(S1, flavor='raw')
     a2 = lltype.malloc(A2, 10, flavor='raw')
     s2 = lltype.malloc(S2, flavor='raw')
     s2.a = a2
     a2[5] = s1
     s1.p = s2
     ac2 = lltype2ctypes(a2, normalize=False)
     sc1 = ac2.contents.items[5]
     sc2 = sc1.contents.p
     assert (ctypes.addressof(sc2.contents.a.contents) == ctypes.addressof(
         ac2.contents))
     lltype.free(s1, flavor='raw')
     lltype.free(a2, flavor='raw')
     lltype.free(s2, flavor='raw')
     assert not ALLOCATED  # detects memory leaks in the test
コード例 #11
0
ファイル: minimarkpage.py プロジェクト: njues/Sypy
from pypy.rlib.rarithmetic import LONG_BIT, r_uint
from pypy.rlib.objectmodel import we_are_translated
from pypy.rlib.debug import ll_assert

WORD = LONG_BIT // 8
NULL = llmemory.NULL
WORD_POWER_2 = {32: 2, 64: 3}[LONG_BIT]
assert 1 << WORD_POWER_2 == WORD

# Terminology: the memory is subdivided into "arenas" containing "pages".
# A page contains a number of allocated objects, called "blocks".

# The actual allocation occurs in whole arenas, which are then subdivided
# into pages.  For each arena we allocate one of the following structures:

ARENA_PTR = lltype.Ptr(lltype.ForwardReference())
ARENA = lltype.Struct(
    'ArenaReference',
    # -- The address of the arena, as returned by malloc()
    ('base', llmemory.Address),
    # -- The number of free and the total number of pages in the arena
    ('nfreepages', lltype.Signed),
    ('totalpages', lltype.Signed),
    # -- A chained list of free pages in the arena.  Ends with NULL.
    ('freepages', llmemory.Address),
    # -- A linked list of arenas.  See below.
    ('nextarena', ARENA_PTR),
)
ARENA_PTR.TO.become(ARENA)
ARENA_NULL = lltype.nullptr(ARENA)
コード例 #12
0
ファイル: RSDL.py プロジェクト: griels/pypy-sc
from pypy.rlib.rsdl.constants import _constants
from pypy.rlib.rsdl.eci import get_rsdl_compilation_info
from pypy.rlib.objectmodel import we_are_translated
import py
import sys

# ------------------------------------------------------------------------------

eci = get_rsdl_compilation_info()

def external(name, args, result):
    return rffi.llexternal(name, args, result, compilation_info=eci)

# ------------------------------------------------------------------------------

RectPtr             = lltype.Ptr(lltype.ForwardReference())
SurfacePtr          = lltype.Ptr(lltype.ForwardReference())
PixelFormatPtr      = lltype.Ptr(lltype.ForwardReference())
EventPtr            = lltype.Ptr(lltype.ForwardReference())
KeyboardEventPtr    = lltype.Ptr(lltype.ForwardReference())
MouseButtonEventPtr = lltype.Ptr(lltype.ForwardReference())
MouseMotionEventPtr = lltype.Ptr(lltype.ForwardReference())
KeyPtr              = lltype.Ptr(lltype.ForwardReference())
RWopsPtr            = lltype.Ptr(lltype.ForwardReference())

# ------------------------------------------------------------------------------

class CConfig:
    _compilation_info_ = eci

    Uint8  = platform.SimpleType('Uint8',  rffi.INT)
コード例 #13
0
ファイル: pystate.py プロジェクト: nipengadmaster/pypy
from pypy.module.cpyext.api import (cpython_api, generic_cpy_call, CANNOT_FAIL,
                                    CConfig, cpython_struct)
from pypy.module.cpyext.pyobject import PyObject, Py_DecRef, make_ref
from pypy.rpython.lltypesystem import rffi, lltype

PyInterpreterStateStruct = lltype.ForwardReference()
PyInterpreterState = lltype.Ptr(PyInterpreterStateStruct)
cpython_struct("PyInterpreterState", [('next', PyInterpreterState)],
               PyInterpreterStateStruct)
PyThreadState = lltype.Ptr(
    cpython_struct("PyThreadState", [
        ('interp', PyInterpreterState),
        ('dict', PyObject),
    ]))


@cpython_api([], PyThreadState, error=CANNOT_FAIL)
def PyEval_SaveThread(space):
    """Release the global interpreter lock (if it has been created and thread
    support is enabled) and reset the thread state to NULL, returning the
    previous thread state.  If the lock has been created,
    the current thread must have acquired it.  (This function is available even
    when thread support is disabled at compile time.)"""
    state = space.fromcache(InterpreterState)
    if rffi.aroundstate.before:
        rffi.aroundstate.before()
    tstate = state.swap_thread_state(space, lltype.nullptr(PyThreadState.TO))
    return tstate


@cpython_api([PyThreadState], lltype.Void)
コード例 #14
0
            if( !VirtualQueryEx(GetCurrentProcess(), &fopen, &mi, sizeof(mi)) )
                return 0;

            return (HMODULE)mi.AllocationBase;
        }
        '''],
        separate_module_files = [libffidir.join('ffi.c'),
                                 libffidir.join('prep_cif.c'),
                                 libffidir.join('win32.c'),
                                 libffidir.join('pypy_ffi.c'),
                                 ],
        export_symbols = ['ffi_call', 'ffi_prep_cif', 'ffi_prep_closure',
                          'get_libc_handle'],
        )

FFI_TYPE_P = lltype.Ptr(lltype.ForwardReference())
FFI_TYPE_PP = rffi.CArrayPtr(FFI_TYPE_P)

class CConfig:
    _compilation_info_ = eci

    RTLD_LOCAL = rffi_platform.DefinedConstantInteger('RTLD_LOCAL')
    RTLD_GLOBAL = rffi_platform.DefinedConstantInteger('RTLD_GLOBAL')
    RTLD_NOW = rffi_platform.DefinedConstantInteger('RTLD_NOW')

    FFI_OK = rffi_platform.ConstantInteger('FFI_OK')
    FFI_BAD_TYPEDEF = rffi_platform.ConstantInteger('FFI_BAD_TYPEDEF')
    FFI_DEFAULT_ABI = rffi_platform.ConstantInteger('FFI_DEFAULT_ABI')
    if _MS_WINDOWS:
        FFI_STDCALL = rffi_platform.ConstantInteger('FFI_STDCALL')
コード例 #15
0
def customtrace(obj, prev):
    stackletrootwalker = get_stackletrootwalker()
    return stackletrootwalker.next(obj, prev)


SUSPSTACK = lltype.GcStruct('SuspStack', ('handle', _c.handle),
                            ('anchor', llmemory.Address),
                            rtti=True)
NULL_SUSPSTACK = lltype.nullptr(SUSPSTACK)
CUSTOMTRACEFUNC = lltype.FuncType([llmemory.Address, llmemory.Address],
                                  llmemory.Address)
customtraceptr = llhelper(lltype.Ptr(CUSTOMTRACEFUNC), customtrace)
lltype.attachRuntimeTypeInfo(SUSPSTACK, customtraceptr=customtraceptr)

ASM_FRAMEDATA_HEAD_PTR = lltype.Ptr(lltype.ForwardReference())
ASM_FRAMEDATA_HEAD_PTR.TO.become(
    lltype.Struct('ASM_FRAMEDATA_HEAD', ('prev', ASM_FRAMEDATA_HEAD_PTR),
                  ('next', ASM_FRAMEDATA_HEAD_PTR)))
alternateanchor = lltype.malloc(ASM_FRAMEDATA_HEAD_PTR.TO, immortal=True)
alternateanchor.prev = alternateanchor
alternateanchor.next = alternateanchor

FUNCNOARG_P = lltype.Ptr(lltype.FuncType([], _c.handle))
pypy_asm_stackwalk2 = rffi.llexternal('pypy_asm_stackwalk',
                                      [FUNCNOARG_P, ASM_FRAMEDATA_HEAD_PTR],
                                      _c.handle,
                                      sandboxsafe=True,
                                      _nowrapper=True)

コード例 #16
0
    PyNumberMethods, PyMappingMethods, PySequenceMethods, PyBufferProcs)
from pypy.module.cpyext.slotdefs import (
    slotdefs_for_tp_slots, slotdefs_for_wrappers, get_slot_tp_function)
from pypy.interpreter.buffer import Buffer
from pypy.interpreter.error import OperationError
from pypy.rlib.rstring import rsplit
from pypy.rlib.objectmodel import specialize
from pypy.module.__builtin__.abstractinst import abstract_issubclass_w
from pypy.module.__builtin__.interp_classobj import W_ClassObject
from pypy.rlib import jit

WARN_ABOUT_MISSING_SLOT_FUNCTIONS = False

PyType_Check, PyType_CheckExact = build_type_checkers("Type", "w_type")

PyHeapTypeObjectStruct = lltype.ForwardReference()
PyHeapTypeObject = lltype.Ptr(PyHeapTypeObjectStruct)
PyHeapTypeObjectFields = (
    ("ht_type", PyTypeObject),
    ("ht_name", PyObject),
    ("as_number", PyNumberMethods),
    ("as_mapping", PyMappingMethods),
    ("as_sequence", PySequenceMethods),
    ("as_buffer", PyBufferProcs),
    )
cpython_struct("PyHeapTypeObject", PyHeapTypeObjectFields, PyHeapTypeObjectStruct,
               level=2)

class W_GetSetPropertyEx(GetSetProperty):
    def __init__(self, getset, w_type):
        self.getset = getset
コード例 #17
0
ファイル: frameobject.py プロジェクト: njues/Sypy
from pypy.rpython.lltypesystem import rffi, lltype
from pypy.module.cpyext.api import (cpython_api, bootstrap_function,
                                    PyObjectFields, cpython_struct,
                                    CANNOT_FAIL)
from pypy.module.cpyext.pyobject import (PyObject, Py_DecRef, make_ref,
                                         from_ref, track_reference,
                                         make_typedescr, get_typedescr)
from pypy.module.cpyext.state import State
from pypy.module.cpyext.pystate import PyThreadState
from pypy.module.cpyext.funcobject import PyCodeObject
from pypy.interpreter.pyframe import PyFrame
from pypy.interpreter.pycode import PyCode
from pypy.interpreter.pytraceback import PyTraceback

PyFrameObjectStruct = lltype.ForwardReference()
PyFrameObject = lltype.Ptr(PyFrameObjectStruct)
PyFrameObjectFields = (PyObjectFields + (
    ("f_code", PyCodeObject),
    ("f_globals", PyObject),
    ("f_lineno", rffi.INT),
))
cpython_struct("PyFrameObject", PyFrameObjectFields, PyFrameObjectStruct)


@bootstrap_function
def init_frameobject(space):
    make_typedescr(PyFrame.typedef,
                   basestruct=PyFrameObject.TO,
                   attach=frame_attach,
                   dealloc=frame_dealloc,
                   realize=frame_realize)
コード例 #18
0
##   allocate a PyStringObject structure, and a buffer with the specified
##   size, but the reference won't be stored in the global map; there is no
##   corresponding object in pypy.  When from_ref() or Py_INCREF() is called,
##   the pypy string is created, and added to the global map of tracked
##   objects.  The buffer is then supposed to be immutable.
##
## - _PyString_Resize() works only on not-yet-pypy'd strings, and returns a
##   similar object.
##
## - PyString_Size() doesn't need to force the object.
##
## - There could be an (expensive!) check in from_ref() that the buffer still
##   corresponds to the pypy gc-managed string.
##

PyStringObjectStruct = lltype.ForwardReference()
PyStringObject = lltype.Ptr(PyStringObjectStruct)
PyStringObjectFields = PyObjectFields + \
    (("buffer", rffi.CCHARP), ("size", Py_ssize_t))
cpython_struct("PyStringObject", PyStringObjectFields, PyStringObjectStruct)


@bootstrap_function
def init_stringobject(space):
    "Type description of PyStringObject"
    make_typedescr(space.w_str.instancetypedef,
                   basestruct=PyStringObject.TO,
                   attach=string_attach,
                   dealloc=string_dealloc,
                   realize=string_realize)
コード例 #19
0
ファイル: cdatetime.py プロジェクト: njues/Sypy
    w_type = space.getattr(w_datetime, space.wrap("time"))
    datetimeAPI.c_TimeType = rffi.cast(
        PyTypeObjectPtr, make_ref(space, w_type))

    w_type = space.getattr(w_datetime, space.wrap("timedelta"))
    datetimeAPI.c_DeltaType = rffi.cast(
        PyTypeObjectPtr, make_ref(space, w_type))

    return datetimeAPI

PyDateTime_Date = PyObject
PyDateTime_Time = PyObject
PyDateTime_DateTime = PyObject

PyDeltaObjectStruct = lltype.ForwardReference()
cpython_struct("PyDateTime_Delta", PyObjectFields, PyDeltaObjectStruct)
PyDateTime_Delta = lltype.Ptr(PyDeltaObjectStruct)

# Check functions

def make_check_function(func_name, type_name):
    @cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL)
    @func_renamer(func_name)
    def check(space, w_obj):
        try:
            return space.is_true(
                space.appexec([w_obj], """(obj):
                    from datetime import %s as datatype
                    return isinstance(obj, datatype)
                    """ % (type_name,)))
コード例 #20
0
from pypy.rpython.lltypesystem import rffi, lltype
from pypy.interpreter.error import OperationError
from pypy.module.cpyext.api import (cpython_api, cpython_struct,
                                    build_type_checkers, bootstrap_function,
                                    PyObject, PyObjectFields, CONST_STRING,
                                    CANNOT_FAIL, Py_ssize_t)
from pypy.module.cpyext.pyobject import (make_typedescr, track_reference,
                                         RefcountState, from_ref)
from pypy.rlib.rarithmetic import r_uint, intmask, LONG_TEST
from pypy.objspace.std.intobject import W_IntObject
import sys

PyIntObjectStruct = lltype.ForwardReference()
PyIntObject = lltype.Ptr(PyIntObjectStruct)
PyIntObjectFields = PyObjectFields + \
    (("ob_ival", rffi.LONG),)
cpython_struct("PyIntObject", PyIntObjectFields, PyIntObjectStruct)


@bootstrap_function
def init_intobject(space):
    "Type description of PyIntObject"
    make_typedescr(space.w_int.instancetypedef,
                   basestruct=PyIntObject.TO,
                   realize=int_realize)


def int_realize(space, obj):
    intval = rffi.cast(lltype.Signed, rffi.cast(PyIntObject, obj).c_ob_ival)
    w_type = from_ref(space, rffi.cast(PyObject, obj.c_ob_type))
    w_obj = space.allocate_instance(W_IntObject, w_type)
コード例 #21
0
ファイル: _rsocket_rffi.py プロジェクト: xx312022850/pypy
]
for name, default in constants_w_defaults:
    setattr(CConfig, name, platform.DefinedConstantInteger(name))

# types
if _MSVC:
    socketfd_type = rffi.UINT
else:
    socketfd_type = rffi.INT

CConfig.uint16_t = platform.SimpleType('uint16_t', rffi.USHORT)
CConfig.uint32_t = platform.SimpleType('uint32_t', rffi.UINT)
CConfig.size_t = platform.SimpleType('size_t', rffi.INT)
CConfig.ssize_t = platform.SimpleType('ssize_t', rffi.INT)
CConfig.socklen_t = platform.SimpleType('socklen_t', rffi.INT)
sockaddr_ptr = lltype.Ptr(lltype.ForwardReference())
addrinfo_ptr = lltype.Ptr(lltype.ForwardReference())

# struct types
CConfig.sockaddr = platform.Struct(
    'struct sockaddr', [('sa_family', rffi.INT),
                        ('sa_data', rffi.CFixedArray(rffi.CHAR, 1))])
CConfig.in_addr = platform.Struct('struct in_addr', [('s_addr', rffi.UINT)])
CConfig.in6_addr = platform.Struct('struct in6_addr', [])
CConfig.sockaddr_in = platform.Struct('struct sockaddr_in',
                                      [('sin_family', rffi.INT),
                                       ('sin_port', rffi.USHORT),
                                       ('sin_addr', CConfig.in_addr)])

CConfig.sockaddr_in6 = platform.Struct('struct sockaddr_in6',
                                       [('sin6_family', rffi.INT),
コード例 #22
0
from pypy.rpython.lltypesystem import lltype, rffi
from pypy.module.cpyext.api import (cpython_api, cpython_struct, PyObject,
                                    build_type_checkers)
from pypy.module.cpyext.floatobject import PyFloat_AsDouble
from pypy.objspace.std.complexobject import W_ComplexObject
from pypy.interpreter.error import OperationError

PyComplex_Check, PyComplex_CheckExact = build_type_checkers("Complex")

Py_complex_t = lltype.ForwardReference()
Py_complex_ptr = lltype.Ptr(Py_complex_t)
Py_complex_fields = (("real", rffi.DOUBLE), ("imag", rffi.DOUBLE))
cpython_struct("Py_complex", Py_complex_fields, Py_complex_t)


@cpython_api([lltype.Float, lltype.Float], PyObject)
def PyComplex_FromDoubles(space, real, imag):
    return space.newcomplex(real, imag)


@cpython_api([PyObject], lltype.Float, error=-1)
def PyComplex_RealAsDouble(space, w_obj):
    if space.is_true(space.isinstance(w_obj, space.w_complex)):
        assert isinstance(w_obj, W_ComplexObject)
        return w_obj.realval
    else:
        return space.float_w(w_obj)


@cpython_api([PyObject], lltype.Float, error=-1)
def PyComplex_ImagAsDouble(space, w_obj):
コード例 #23
0
ファイル: funcobject.py プロジェクト: nipengadmaster/pypy
from pypy.interpreter.error import OperationError
from pypy.interpreter.function import Function, Method
from pypy.interpreter.pycode import PyCode
from pypy.interpreter import pycode

CODE_FLAGS = dict(
    CO_OPTIMIZED   = 0x0001,
    CO_NEWLOCALS   = 0x0002,
    CO_VARARGS     = 0x0004,
    CO_VARKEYWORDS = 0x0008,
    CO_NESTED      = 0x0010,
    CO_GENERATOR   = 0x0020,
)
ALL_CODE_FLAGS = unrolling_iterable(CODE_FLAGS.items())

PyFunctionObjectStruct = lltype.ForwardReference()
PyFunctionObject = lltype.Ptr(PyFunctionObjectStruct)
PyFunctionObjectFields = PyObjectFields + \
    (("func_name", PyObject),)
cpython_struct("PyFunctionObject", PyFunctionObjectFields, PyFunctionObjectStruct)

PyCodeObjectStruct = lltype.ForwardReference()
PyCodeObject = lltype.Ptr(PyCodeObjectStruct)
PyCodeObjectFields = PyObjectFields + \
    (("co_name", PyObject),
     ("co_flags", rffi.INT),
     ("co_argcount", rffi.INT),
    )
cpython_struct("PyCodeObject", PyCodeObjectFields, PyCodeObjectStruct)

@bootstrap_function
コード例 #24
0
ファイル: interp_pyexpat.py プロジェクト: njues/Sypy
else:
    libname = 'expat'
eci = ExternalCompilationInfo(
    libraries=[libname],
    library_dirs=platform.preprocess_library_dirs([]),
    includes=['expat.h'],
    include_dirs=platform.preprocess_include_dirs([]),
    )

eci = rffi_platform.configure_external_library(
    libname, eci,
    [dict(prefix='expat-',
          include_dir='lib', library_dir='win32/bin/release'),
     ])

XML_Content_Ptr = lltype.Ptr(lltype.ForwardReference())
XML_Parser = rffi.COpaquePtr(typedef='XML_Parser')

xml_error_list = [
    "XML_ERROR_NO_MEMORY",
    "XML_ERROR_SYNTAX",
    "XML_ERROR_NO_ELEMENTS",
    "XML_ERROR_INVALID_TOKEN",
    "XML_ERROR_UNCLOSED_TOKEN",
    "XML_ERROR_PARTIAL_CHAR",
    "XML_ERROR_TAG_MISMATCH",
    "XML_ERROR_DUPLICATE_ATTRIBUTE",
    "XML_ERROR_JUNK_AFTER_DOC_ELEMENT",
    "XML_ERROR_PARAM_ENTITY_REF",
    "XML_ERROR_UNDEFINED_ENTITY",
    "XML_ERROR_RECURSIVE_ENTITY_REF",
コード例 #25
0
ファイル: rclass.py プロジェクト: nipengadmaster/pypy
#      }

# The type of the instances is:
#
#     struct object {       // for the root class
#         struct object_vtable* typeptr;
#     }
#
#     struct X {
#         struct Y super;   // inlined
#         ...               // extra instance attributes
#     }
#
# there's also a nongcobject

OBJECT_VTABLE = lltype.ForwardReference()
CLASSTYPE = Ptr(OBJECT_VTABLE)
OBJECT = GcStruct('object', ('typeptr', CLASSTYPE),
                  hints={
                      'immutable': True,
                      'shouldntbenull': True,
                      'typeptr': True
                  },
                  rtti=True)
OBJECTPTR = Ptr(OBJECT)
OBJECT_VTABLE.become(
    Struct(
        'object_vtable',
        #('parenttypeptr', CLASSTYPE),
        ('subclassrange_min', Signed),
        ('subclassrange_max', Signed),
コード例 #26
0
class MarkSweepGC(GCBase):
    HDR = lltype.ForwardReference()
    HDRPTR = lltype.Ptr(HDR)
    # need to maintain a linked list of malloced objects, since we used the
    # systems allocator and can't walk the heap
    HDR.become(
        lltype.Struct('header', ('typeid16', llgroup.HALFWORD),
                      ('mark', lltype.Bool), ('flags', lltype.Char),
                      ('next', HDRPTR)))
    typeid_is_in_field = 'typeid16'
    withhash_flag_is_in_field = 'flags', FL_WITHHASH

    POOL = lltype.GcStruct('gc_pool')
    POOLPTR = lltype.Ptr(POOL)

    POOLNODE = lltype.ForwardReference()
    POOLNODEPTR = lltype.Ptr(POOLNODE)
    POOLNODE.become(
        lltype.Struct('gc_pool_node', ('linkedlist', HDRPTR),
                      ('nextnode', POOLNODEPTR)))

    # the following values override the default arguments of __init__ when
    # translating to a real backend.
    TRANSLATION_PARAMS = {'start_heap_size': 8 * 1024 * 1024}  # XXX adjust

    def __init__(self, config, start_heap_size=4096, **kwds):
        self.param_start_heap_size = start_heap_size
        GCBase.__init__(self, config, **kwds)

    def setup(self):
        GCBase.setup(self)
        self.heap_usage = 0  # at the end of the latest collection
        self.bytes_malloced = 0  # since the latest collection
        self.bytes_malloced_threshold = self.param_start_heap_size
        self.total_collection_time = 0.0
        self.malloced_objects = lltype.nullptr(self.HDR)
        self.malloced_objects_with_finalizer = lltype.nullptr(self.HDR)
        # these are usually only the small bits of memory that make a
        # weakref object
        self.objects_with_weak_pointers = lltype.nullptr(self.HDR)
        # pools, for x_swap_pool():
        #   'curpool' is the current pool, lazily allocated (i.e. NULL means
        #   the current POOL object is not yet malloc'ed).  POOL objects are
        #   usually at the start of a linked list of objects, via the HDRs.
        #   The exception is 'curpool' whose linked list of objects is in
        #   'self.malloced_objects' instead of in the header of 'curpool'.
        #   POOL objects are never in the middle of a linked list themselves.
        # XXX a likely cause for the current problems with pools is:
        # not all objects live in malloced_objects, some also live in
        # malloced_objects_with_finalizer and objects_with_weak_pointers
        self.curpool = lltype.nullptr(self.POOL)
        #   'poolnodes' is a linked list of all such linked lists.  Each
        #   linked list will usually start with a POOL object, but it can
        #   also contain only normal objects if the POOL object at the head
        #   was already freed.  The objects in 'malloced_objects' are not
        #   found via 'poolnodes'.
        self.poolnodes = lltype.nullptr(self.POOLNODE)
        self.collect_in_progress = False
        self.prev_collect_end_time = 0.0

    def maybe_collect(self):
        if self.bytes_malloced > self.bytes_malloced_threshold:
            self.collect()

    def write_malloc_statistics(self, typeid16, size, result, varsize):
        pass

    def write_free_statistics(self, typeid16, result):
        pass

    def malloc_fixedsize(self,
                         typeid16,
                         size,
                         has_finalizer=False,
                         is_finalizer_light=False,
                         contains_weakptr=False):
        self.maybe_collect()
        size_gc_header = self.gcheaderbuilder.size_gc_header
        try:
            tot_size = size_gc_header + size
            usage = raw_malloc_usage(tot_size)
            bytes_malloced = ovfcheck(self.bytes_malloced + usage)
            ovfcheck(self.heap_usage + bytes_malloced)
        except OverflowError:
            raise memoryError
        result = raw_malloc(tot_size)
        if not result:
            raise memoryError
        hdr = llmemory.cast_adr_to_ptr(result, self.HDRPTR)
        hdr.typeid16 = typeid16
        hdr.mark = False
        hdr.flags = '\x00'
        if has_finalizer:
            hdr.next = self.malloced_objects_with_finalizer
            self.malloced_objects_with_finalizer = hdr
        elif contains_weakptr:
            hdr.next = self.objects_with_weak_pointers
            self.objects_with_weak_pointers = hdr
        else:
            hdr.next = self.malloced_objects
            self.malloced_objects = hdr
        self.bytes_malloced = bytes_malloced
        result += size_gc_header
        #llop.debug_print(lltype.Void, 'malloc typeid', typeid16,
        #                 '->', llmemory.cast_adr_to_int(result))
        self.write_malloc_statistics(typeid16, tot_size, result, False)
        return llmemory.cast_adr_to_ptr(result, llmemory.GCREF)

    malloc_fixedsize._dont_inline_ = True

    def malloc_fixedsize_clear(self,
                               typeid16,
                               size,
                               has_finalizer=False,
                               is_finalizer_light=False,
                               contains_weakptr=False):
        self.maybe_collect()
        size_gc_header = self.gcheaderbuilder.size_gc_header
        try:
            tot_size = size_gc_header + size
            usage = raw_malloc_usage(tot_size)
            bytes_malloced = ovfcheck(self.bytes_malloced + usage)
            ovfcheck(self.heap_usage + bytes_malloced)
        except OverflowError:
            raise memoryError
        result = raw_malloc(tot_size)
        if not result:
            raise memoryError
        raw_memclear(result, tot_size)
        hdr = llmemory.cast_adr_to_ptr(result, self.HDRPTR)
        hdr.typeid16 = typeid16
        hdr.mark = False
        hdr.flags = '\x00'
        if has_finalizer:
            hdr.next = self.malloced_objects_with_finalizer
            self.malloced_objects_with_finalizer = hdr
        elif contains_weakptr:
            hdr.next = self.objects_with_weak_pointers
            self.objects_with_weak_pointers = hdr
        else:
            hdr.next = self.malloced_objects
            self.malloced_objects = hdr
        self.bytes_malloced = bytes_malloced
        result += size_gc_header
        #llop.debug_print(lltype.Void, 'malloc typeid', typeid16,
        #                 '->', llmemory.cast_adr_to_int(result))
        self.write_malloc_statistics(typeid16, tot_size, result, False)
        return llmemory.cast_adr_to_ptr(result, llmemory.GCREF)

    malloc_fixedsize_clear._dont_inline_ = True

    def malloc_varsize(self, typeid16, length, size, itemsize,
                       offset_to_length):
        self.maybe_collect()
        size_gc_header = self.gcheaderbuilder.size_gc_header
        try:
            fixsize = size_gc_header + size
            varsize = ovfcheck(itemsize * length)
            tot_size = ovfcheck(fixsize + varsize)
            usage = raw_malloc_usage(tot_size)
            bytes_malloced = ovfcheck(self.bytes_malloced + usage)
            ovfcheck(self.heap_usage + bytes_malloced)
        except OverflowError:
            raise memoryError
        result = raw_malloc(tot_size)
        if not result:
            raise memoryError
        (result + size_gc_header + offset_to_length).signed[0] = length
        hdr = llmemory.cast_adr_to_ptr(result, self.HDRPTR)
        hdr.typeid16 = typeid16
        hdr.mark = False
        hdr.flags = '\x00'
        hdr.next = self.malloced_objects
        self.malloced_objects = hdr
        self.bytes_malloced = bytes_malloced

        result += size_gc_header
        #llop.debug_print(lltype.Void, 'malloc_varsize length', length,
        #                 'typeid', typeid16,
        #                 '->', llmemory.cast_adr_to_int(result))
        self.write_malloc_statistics(typeid16, tot_size, result, True)
        return llmemory.cast_adr_to_ptr(result, llmemory.GCREF)

    malloc_varsize._dont_inline_ = True

    def malloc_varsize_clear(self, typeid16, length, size, itemsize,
                             offset_to_length):
        self.maybe_collect()
        size_gc_header = self.gcheaderbuilder.size_gc_header
        try:
            fixsize = size_gc_header + size
            varsize = ovfcheck(itemsize * length)
            tot_size = ovfcheck(fixsize + varsize)
            usage = raw_malloc_usage(tot_size)
            bytes_malloced = ovfcheck(self.bytes_malloced + usage)
            ovfcheck(self.heap_usage + bytes_malloced)
        except OverflowError:
            raise memoryError
        result = raw_malloc(tot_size)
        if not result:
            raise memoryError
        raw_memclear(result, tot_size)
        (result + size_gc_header + offset_to_length).signed[0] = length
        hdr = llmemory.cast_adr_to_ptr(result, self.HDRPTR)
        hdr.typeid16 = typeid16
        hdr.mark = False
        hdr.flags = '\x00'
        hdr.next = self.malloced_objects
        self.malloced_objects = hdr
        self.bytes_malloced = bytes_malloced

        result += size_gc_header
        #llop.debug_print(lltype.Void, 'malloc_varsize length', length,
        #                 'typeid', typeid16,
        #                 '->', llmemory.cast_adr_to_int(result))
        self.write_malloc_statistics(typeid16, tot_size, result, True)
        return llmemory.cast_adr_to_ptr(result, llmemory.GCREF)

    malloc_varsize_clear._dont_inline_ = True

    def collect(self, gen=0):
        # 1. mark from the roots, and also the objects that objects-with-del
        #    point to (using the list of malloced_objects_with_finalizer)
        # 2. walk the list of objects-without-del and free the ones not marked
        # 3. walk the list of objects-with-del and for the ones not marked:
        #    call __del__, move the object to the list of object-without-del
        import time
        debug_start("gc-collect")
        start_time = time.time()
        self.collect_in_progress = True
        size_gc_header = self.gcheaderbuilder.size_gc_header
        ##        llop.debug_view(lltype.Void, self.malloced_objects, self.poolnodes,
        ##                        size_gc_header)

        # push the roots on the mark stack
        objects = self.AddressStack()  # mark stack
        self._mark_stack = objects
        self.root_walker.walk_roots(
            MarkSweepGC._mark_root,  # stack roots
            MarkSweepGC._mark_root,  # static in prebuilt non-gc structures
            MarkSweepGC._mark_root)  # static in prebuilt gc objects

        # from this point onwards, no more mallocs should be possible
        old_malloced = self.bytes_malloced
        self.bytes_malloced = 0
        curr_heap_size = 0
        freed_size = 0

        # mark objects reachable by objects with a finalizer, but not those
        # themselves. add their size to curr_heap_size, since they always
        # survive the collection
        hdr = self.malloced_objects_with_finalizer
        while hdr:
            next = hdr.next
            typeid = hdr.typeid16
            gc_info = llmemory.cast_ptr_to_adr(hdr)
            obj = gc_info + size_gc_header
            if not hdr.mark:
                self.add_reachable_to_stack(obj, objects)
            addr = llmemory.cast_ptr_to_adr(hdr)
            size = self.fixed_size(typeid)
            if self.is_varsize(typeid):
                length = (obj +
                          self.varsize_offset_to_length(typeid)).signed[0]
                size += self.varsize_item_sizes(typeid) * length
            estimate = raw_malloc_usage(size_gc_header + size)
            curr_heap_size += estimate
            hdr = next

        # mark thinks on the mark stack and put their descendants onto the
        # stack until the stack is empty
        while objects.non_empty():  #mark
            curr = objects.pop()
            gc_info = curr - size_gc_header
            hdr = llmemory.cast_adr_to_ptr(gc_info, self.HDRPTR)
            if hdr.mark:
                continue
            self.add_reachable_to_stack(curr, objects)
            hdr.mark = True
        objects.delete()
        # also mark self.curpool
        if self.curpool:
            gc_info = llmemory.cast_ptr_to_adr(self.curpool) - size_gc_header
            hdr = llmemory.cast_adr_to_ptr(gc_info, self.HDRPTR)
            hdr.mark = True
        # go through the list of objects containing weak pointers
        # and kill the links if they go to dead objects
        # if the object itself is not marked, free it
        hdr = self.objects_with_weak_pointers
        surviving = lltype.nullptr(self.HDR)
        while hdr:
            typeid = hdr.typeid16
            next = hdr.next
            addr = llmemory.cast_ptr_to_adr(hdr)
            size = self.fixed_size(typeid)
            estimate = raw_malloc_usage(size_gc_header + size)
            if hdr.mark:
                offset = self.weakpointer_offset(typeid)
                hdr.mark = False
                gc_info = llmemory.cast_ptr_to_adr(hdr)
                weakref_obj = gc_info + size_gc_header
                pointing_to = (weakref_obj + offset).address[0]
                if pointing_to:
                    gc_info_pointing_to = pointing_to - size_gc_header
                    hdr_pointing_to = llmemory.cast_adr_to_ptr(
                        gc_info_pointing_to, self.HDRPTR)
                    # pointed to object will die
                    # XXX what to do if the object has a finalizer which resurrects
                    # the object?
                    if not hdr_pointing_to.mark:
                        (weakref_obj + offset).address[0] = NULL
                hdr.next = surviving
                surviving = hdr
                curr_heap_size += estimate
            else:
                gc_info = llmemory.cast_ptr_to_adr(hdr)
                weakref_obj = gc_info + size_gc_header
                self.write_free_statistics(typeid, weakref_obj)
                freed_size += estimate
                raw_free(addr)
            hdr = next
        self.objects_with_weak_pointers = surviving
        # sweep: delete objects without del if they are not marked
        # unmark objects without del that are marked
        firstpoolnode = lltype.malloc(self.POOLNODE, flavor='raw')
        firstpoolnode.linkedlist = self.malloced_objects
        firstpoolnode.nextnode = self.poolnodes
        prevpoolnode = lltype.nullptr(self.POOLNODE)
        poolnode = firstpoolnode
        while poolnode:  #sweep
            ppnext = llmemory.cast_ptr_to_adr(poolnode)
            ppnext += llmemory.offsetof(self.POOLNODE, 'linkedlist')
            hdr = poolnode.linkedlist
            while hdr:  #sweep
                typeid = hdr.typeid16
                next = hdr.next
                addr = llmemory.cast_ptr_to_adr(hdr)
                size = self.fixed_size(typeid)
                if self.is_varsize(typeid):
                    length = (addr + size_gc_header +
                              self.varsize_offset_to_length(typeid)).signed[0]
                    size += self.varsize_item_sizes(typeid) * length
                estimate = raw_malloc_usage(size_gc_header + size)
                if hdr.mark:
                    hdr.mark = False
                    ppnext.address[0] = addr
                    ppnext = llmemory.cast_ptr_to_adr(hdr)
                    ppnext += llmemory.offsetof(self.HDR, 'next')
                    curr_heap_size += estimate
                else:
                    gc_info = llmemory.cast_ptr_to_adr(hdr)
                    obj = gc_info + size_gc_header
                    self.write_free_statistics(typeid, obj)
                    freed_size += estimate
                    raw_free(addr)
                hdr = next
            ppnext.address[0] = llmemory.NULL
            next = poolnode.nextnode
            if not poolnode.linkedlist and prevpoolnode:
                # completely empty node
                prevpoolnode.nextnode = next
                lltype.free(poolnode, flavor='raw')
            else:
                prevpoolnode = poolnode
            poolnode = next
        self.malloced_objects = firstpoolnode.linkedlist
        self.poolnodes = firstpoolnode.nextnode
        lltype.free(firstpoolnode, flavor='raw')
        #llop.debug_view(lltype.Void, self.malloced_objects, self.malloced_objects_with_finalizer, size_gc_header)

        end_time = time.time()
        compute_time = start_time - self.prev_collect_end_time
        collect_time = end_time - start_time

        garbage_collected = old_malloced - (curr_heap_size - self.heap_usage)

        if (collect_time * curr_heap_size >
                0.02 * garbage_collected * compute_time):
            self.bytes_malloced_threshold += self.bytes_malloced_threshold / 2
        if (collect_time * curr_heap_size <
                0.005 * garbage_collected * compute_time):
            self.bytes_malloced_threshold /= 2

        # Use atleast as much memory as current live objects.
        if curr_heap_size > self.bytes_malloced_threshold:
            self.bytes_malloced_threshold = curr_heap_size

        # Cap at 1/4 GB
        self.bytes_malloced_threshold = min(self.bytes_malloced_threshold,
                                            256 * 1024 * 1024)
        self.total_collection_time += collect_time
        self.prev_collect_end_time = end_time
        debug_print("  malloced since previous collection:", old_malloced,
                    "bytes")
        debug_print("  heap usage at start of collection: ",
                    self.heap_usage + old_malloced, "bytes")
        debug_print("  freed:                             ", freed_size,
                    "bytes")
        debug_print("  new heap usage:                    ", curr_heap_size,
                    "bytes")
        debug_print("  total time spent collecting:       ",
                    self.total_collection_time, "seconds")
        debug_print("  collecting time:                   ", collect_time)
        debug_print("  computing time:                    ", collect_time)
        debug_print("  new threshold:                     ",
                    self.bytes_malloced_threshold)
        ##        llop.debug_view(lltype.Void, self.malloced_objects, self.poolnodes,
        ##                        size_gc_header)
        assert self.heap_usage + old_malloced == curr_heap_size + freed_size

        self.heap_usage = curr_heap_size
        hdr = self.malloced_objects_with_finalizer
        self.malloced_objects_with_finalizer = lltype.nullptr(self.HDR)
        last = lltype.nullptr(self.HDR)
        while hdr:
            next = hdr.next
            if hdr.mark:
                hdr.next = lltype.nullptr(self.HDR)
                if not self.malloced_objects_with_finalizer:
                    self.malloced_objects_with_finalizer = hdr
                else:
                    last.next = hdr
                hdr.mark = False
                last = hdr
            else:
                obj = llmemory.cast_ptr_to_adr(hdr) + size_gc_header
                finalizer = self.getfinalizer(hdr.typeid16)
                # make malloced_objects_with_finalizer consistent
                # for the sake of a possible collection caused by finalizer
                if not self.malloced_objects_with_finalizer:
                    self.malloced_objects_with_finalizer = next
                else:
                    last.next = next
                hdr.next = self.malloced_objects
                self.malloced_objects = hdr
                #llop.debug_view(lltype.Void, self.malloced_objects, self.malloced_objects_with_finalizer, size_gc_header)
                finalizer(obj, llmemory.NULL)
                if not self.collect_in_progress:  # another collection was caused?
                    debug_print("outer collect interrupted "
                                "by recursive collect")
                    debug_stop("gc-collect")
                    return
                if not last:
                    if self.malloced_objects_with_finalizer == next:
                        self.malloced_objects_with_finalizer = lltype.nullptr(
                            self.HDR)
                    else:
                        # now it gets annoying: finalizer caused a malloc of something
                        # with a finalizer
                        last = self.malloced_objects_with_finalizer
                        while last.next != next:
                            last = last.next
                            last.next = lltype.nullptr(self.HDR)
                else:
                    last.next = lltype.nullptr(self.HDR)
            hdr = next
        self.collect_in_progress = False
        debug_stop("gc-collect")

    def _mark_root(self, root):  # 'root' is the address of the GCPTR
        gcobjectaddr = root.address[0]
        self._mark_stack.append(gcobjectaddr)

    def _mark_root_and_clear_bit(self, root):
        gcobjectaddr = root.address[0]
        self._mark_stack.append(gcobjectaddr)
        size_gc_header = self.gcheaderbuilder.size_gc_header
        gc_info = gcobjectaddr - size_gc_header
        hdr = llmemory.cast_adr_to_ptr(gc_info, self.HDRPTR)
        hdr.mark = False

    STAT_HEAP_USAGE = 0
    STAT_BYTES_MALLOCED = 1
    STATISTICS_NUMBERS = 2

    def get_type_id(self, obj):
        size_gc_header = self.gcheaderbuilder.size_gc_header
        gc_info = obj - size_gc_header
        hdr = llmemory.cast_adr_to_ptr(gc_info, self.HDRPTR)
        return hdr.typeid16

    def add_reachable_to_stack(self, obj, objects):
        self.trace(obj, self._add_reachable, objects)

    def _add_reachable(pointer, objects):
        obj = pointer.address[0]
        objects.append(obj)

    _add_reachable = staticmethod(_add_reachable)

    def statistics(self, index):
        # no memory allocation here!
        if index == self.STAT_HEAP_USAGE:
            return self.heap_usage
        if index == self.STAT_BYTES_MALLOCED:
            return self.bytes_malloced
        return -1

    def init_gc_object(self, addr, typeid):
        hdr = llmemory.cast_adr_to_ptr(addr, self.HDRPTR)
        hdr.typeid16 = typeid
        hdr.mark = False
        hdr.flags = '\x00'

    def init_gc_object_immortal(self, addr, typeid, flags=0):
        # prebuilt gc structures always have the mark bit set
        # ignore flags
        hdr = llmemory.cast_adr_to_ptr(addr, self.HDRPTR)
        hdr.typeid16 = typeid
        hdr.mark = True
        hdr.flags = '\x00'

    # experimental support for thread cloning
    def x_swap_pool(self, newpool):
        raise NotImplementedError("old operation deprecated")

    def x_clone(self, clonedata):
        raise NotImplementedError("old operation deprecated")

    def identityhash(self, obj):
        obj = llmemory.cast_ptr_to_adr(obj)
        hdr = self.header(obj)
        if ord(hdr.flags) & FL_WITHHASH:
            obj += self.get_size(obj)
            return obj.signed[0]
        else:
            return llmemory.cast_adr_to_int(obj)
コード例 #27
0

build_exported_objects()


def get_structtype_for_ctype(ctype):
    from pypy.module.cpyext.typeobjectdefs import PyTypeObjectPtr
    from pypy.module.cpyext.cdatetime import PyDateTime_CAPI
    return {
        "PyObject*": PyObject,
        "PyTypeObject*": PyTypeObjectPtr,
        "PyDateTime_CAPI*": lltype.Ptr(PyDateTime_CAPI)
    }[ctype]


PyTypeObject = lltype.ForwardReference()
PyTypeObjectPtr = lltype.Ptr(PyTypeObject)
# It is important that these PyObjects are allocated in a raw fashion
# Thus we cannot save a forward pointer to the wrapped object
# So we need a forward and backward mapping in our State instance
PyObjectStruct = lltype.ForwardReference()
PyObject = lltype.Ptr(PyObjectStruct)
PyObjectFields = (("ob_refcnt", lltype.Signed), ("ob_type", PyTypeObjectPtr))
PyVarObjectFields = PyObjectFields + (("ob_size", Py_ssize_t), )
cpython_struct('PyObject', PyObjectFields, PyObjectStruct)
PyVarObjectStruct = cpython_struct("PyVarObject", PyVarObjectFields)
PyVarObject = lltype.Ptr(PyVarObjectStruct)

Py_buffer = cpython_struct(
    "Py_buffer",
    (
コード例 #28
0
ファイル: RMix.py プロジェクト: purepython/pypy
if sys.platform == 'darwin':
    eci = ExternalCompilationInfo(
        includes=['SDL_mixer.h'],
        frameworks=['SDL_mixer'],
        include_dirs=['/Library/Frameworks/SDL_Mixer.framework/Headers'])
else:
    eci = ExternalCompilationInfo(
        includes=['SDL_mixer.h'],
        libraries=['SDL_mixer'],
    )

eci = eci.merge(RSDL.eci)
eci = eci.merge(eci)
eci = eci.merge(eci)

ChunkPtr = lltype.Ptr(lltype.ForwardReference())


class CConfig:
    _compilation_info_ = eci

    Chunk = platform.Struct('Mix_Chunk', [('allocated', rffi.INT),
                                          ('abuf', RSDL.Uint8P),
                                          ('alen', RSDL.Uint32),
                                          ('volume', RSDL.Uint8)])


globals().update(platform.configure(CConfig))

ChunkPtr.TO.become(Chunk)
コード例 #29
0
ファイル: unicodeobject.py プロジェクト: purepython/pypy
                                    cpython_struct, CONST_STRING,
                                    CONST_WSTRING)
from pypy.module.cpyext.pyerrors import PyErr_BadArgument
from pypy.module.cpyext.pyobject import (PyObject, PyObjectP, Py_DecRef,
                                         make_ref, from_ref, track_reference,
                                         make_typedescr, get_typedescr)
from pypy.module.cpyext.stringobject import PyString_Check
from pypy.module.sys.interp_encoding import setdefaultencoding
from pypy.objspace.std import unicodeobject, unicodetype
from pypy.rlib import runicode
from pypy.tool.sourcetools import func_renamer
import sys

## See comment in stringobject.py.

PyUnicodeObjectStruct = lltype.ForwardReference()
PyUnicodeObject = lltype.Ptr(PyUnicodeObjectStruct)
PyUnicodeObjectFields = (PyObjectFields + (("buffer", rffi.CWCHARP),
                                           ("size", Py_ssize_t)))
cpython_struct("PyUnicodeObject", PyUnicodeObjectFields, PyUnicodeObjectStruct)


@bootstrap_function
def init_unicodeobject(space):
    make_typedescr(space.w_unicode.instancetypedef,
                   basestruct=PyUnicodeObject.TO,
                   attach=unicode_attach,
                   dealloc=unicode_dealloc,
                   realize=unicode_realize)

コード例 #30
0
class MarkSweepGC(GCBase):
    HDR = lltype.ForwardReference()
    HDRPTR = lltype.Ptr(HDR)
    # need to maintain a linked list of malloced objects, since we used the
    # systems allocator and can't walk the heap
    HDR.become(
        lltype.Struct('header', ('typeid16', rffi.USHORT),
                      ('mark', lltype.Bool), ('flags', lltype.Char),
                      ('next', HDRPTR)))
    typeid_is_in_field = 'typeid16'
    withhash_flag_is_in_field = 'flags', FL_WITHHASH

    POOL = lltype.GcStruct('gc_pool')
    POOLPTR = lltype.Ptr(POOL)

    POOLNODE = lltype.ForwardReference()
    POOLNODEPTR = lltype.Ptr(POOLNODE)
    POOLNODE.become(
        lltype.Struct('gc_pool_node', ('linkedlist', HDRPTR),
                      ('nextnode', POOLNODEPTR)))

    # the following values override the default arguments of __init__ when
    # translating to a real backend.
    TRANSLATION_PARAMS = {'start_heap_size': 8 * 1024 * 1024}  # XXX adjust

    def __init__(self,
                 config,
                 chunk_size=DEFAULT_CHUNK_SIZE,
                 start_heap_size=4096):
        self.param_start_heap_size = start_heap_size
        GCBase.__init__(self, config, chunk_size)

    def setup(self):
        GCBase.setup(self)
        self.heap_usage = 0  # at the end of the latest collection
        self.bytes_malloced = 0  # since the latest collection
        self.bytes_malloced_threshold = self.param_start_heap_size
        self.total_collection_time = 0.0
        self.malloced_objects = lltype.nullptr(self.HDR)
        self.malloced_objects_with_finalizer = lltype.nullptr(self.HDR)
        # these are usually only the small bits of memory that make a
        # weakref object
        self.objects_with_weak_pointers = lltype.nullptr(self.HDR)
        # pools, for x_swap_pool():
        #   'curpool' is the current pool, lazily allocated (i.e. NULL means
        #   the current POOL object is not yet malloc'ed).  POOL objects are
        #   usually at the start of a linked list of objects, via the HDRs.
        #   The exception is 'curpool' whose linked list of objects is in
        #   'self.malloced_objects' instead of in the header of 'curpool'.
        #   POOL objects are never in the middle of a linked list themselves.
        # XXX a likely cause for the current problems with pools is:
        # not all objects live in malloced_objects, some also live in
        # malloced_objects_with_finalizer and objects_with_weak_pointers
        self.curpool = lltype.nullptr(self.POOL)
        #   'poolnodes' is a linked list of all such linked lists.  Each
        #   linked list will usually start with a POOL object, but it can
        #   also contain only normal objects if the POOL object at the head
        #   was already freed.  The objects in 'malloced_objects' are not
        #   found via 'poolnodes'.
        self.poolnodes = lltype.nullptr(self.POOLNODE)
        self.collect_in_progress = False
        self.prev_collect_end_time = 0.0

    def maybe_collect(self):
        if self.bytes_malloced > self.bytes_malloced_threshold:
            self.collect()

    def write_malloc_statistics(self, typeid16, size, result, varsize):
        pass

    def write_free_statistics(self, typeid16, result):
        pass

    def malloc_fixedsize(self,
                         typeid16,
                         size,
                         can_collect,
                         has_finalizer=False,
                         contains_weakptr=False):
        if can_collect:
            self.maybe_collect()
        size_gc_header = self.gcheaderbuilder.size_gc_header
        try:
            tot_size = size_gc_header + size
            usage = raw_malloc_usage(tot_size)
            bytes_malloced = ovfcheck(self.bytes_malloced + usage)
            ovfcheck(self.heap_usage + bytes_malloced)
        except OverflowError:
            raise memoryError
        result = raw_malloc(tot_size)
        if not result:
            raise memoryError
        hdr = llmemory.cast_adr_to_ptr(result, self.HDRPTR)
        hdr.typeid16 = typeid16
        hdr.mark = False
        hdr.flags = '\x00'
        if has_finalizer:
            hdr.next = self.malloced_objects_with_finalizer
            self.malloced_objects_with_finalizer = hdr
        elif contains_weakptr:
            hdr.next = self.objects_with_weak_pointers
            self.objects_with_weak_pointers = hdr
        else:
            hdr.next = self.malloced_objects
            self.malloced_objects = hdr
        self.bytes_malloced = bytes_malloced
        result += size_gc_header
        #llop.debug_print(lltype.Void, 'malloc typeid', typeid16,
        #                 '->', llmemory.cast_adr_to_int(result))
        self.write_malloc_statistics(typeid16, tot_size, result, False)
        return llmemory.cast_adr_to_ptr(result, llmemory.GCREF)

    malloc_fixedsize._dont_inline_ = True

    def malloc_fixedsize_clear(self,
                               typeid16,
                               size,
                               can_collect,
                               has_finalizer=False,
                               contains_weakptr=False):
        if can_collect:
            self.maybe_collect()
        size_gc_header = self.gcheaderbuilder.size_gc_header
        try:
            tot_size = size_gc_header + size
            usage = raw_malloc_usage(tot_size)
            bytes_malloced = ovfcheck(self.bytes_malloced + usage)
            ovfcheck(self.heap_usage + bytes_malloced)
        except OverflowError:
            raise memoryError
        result = raw_malloc(tot_size)
        if not result:
            raise memoryError
        raw_memclear(result, tot_size)
        hdr = llmemory.cast_adr_to_ptr(result, self.HDRPTR)
        hdr.typeid16 = typeid16
        hdr.mark = False
        hdr.flags = '\x00'
        if has_finalizer:
            hdr.next = self.malloced_objects_with_finalizer
            self.malloced_objects_with_finalizer = hdr
        elif contains_weakptr:
            hdr.next = self.objects_with_weak_pointers
            self.objects_with_weak_pointers = hdr
        else:
            hdr.next = self.malloced_objects
            self.malloced_objects = hdr
        self.bytes_malloced = bytes_malloced
        result += size_gc_header
        #llop.debug_print(lltype.Void, 'malloc typeid', typeid16,
        #                 '->', llmemory.cast_adr_to_int(result))
        self.write_malloc_statistics(typeid16, tot_size, result, False)
        return llmemory.cast_adr_to_ptr(result, llmemory.GCREF)

    malloc_fixedsize_clear._dont_inline_ = True

    def malloc_varsize(self, typeid16, length, size, itemsize,
                       offset_to_length, can_collect):
        if can_collect:
            self.maybe_collect()
        size_gc_header = self.gcheaderbuilder.size_gc_header
        try:
            fixsize = size_gc_header + size
            varsize = ovfcheck(itemsize * length)
            tot_size = ovfcheck(fixsize + varsize)
            usage = raw_malloc_usage(tot_size)
            bytes_malloced = ovfcheck(self.bytes_malloced + usage)
            ovfcheck(self.heap_usage + bytes_malloced)
        except OverflowError:
            raise memoryError
        result = raw_malloc(tot_size)
        if not result:
            raise memoryError
        (result + size_gc_header + offset_to_length).signed[0] = length
        hdr = llmemory.cast_adr_to_ptr(result, self.HDRPTR)
        hdr.typeid16 = typeid16
        hdr.mark = False
        hdr.flags = '\x00'
        hdr.next = self.malloced_objects
        self.malloced_objects = hdr
        self.bytes_malloced = bytes_malloced

        result += size_gc_header
        #llop.debug_print(lltype.Void, 'malloc_varsize length', length,
        #                 'typeid', typeid16,
        #                 '->', llmemory.cast_adr_to_int(result))
        self.write_malloc_statistics(typeid16, tot_size, result, True)
        return llmemory.cast_adr_to_ptr(result, llmemory.GCREF)

    malloc_varsize._dont_inline_ = True

    def malloc_varsize_clear(self, typeid16, length, size, itemsize,
                             offset_to_length, can_collect):
        if can_collect:
            self.maybe_collect()
        size_gc_header = self.gcheaderbuilder.size_gc_header
        try:
            fixsize = size_gc_header + size
            varsize = ovfcheck(itemsize * length)
            tot_size = ovfcheck(fixsize + varsize)
            usage = raw_malloc_usage(tot_size)
            bytes_malloced = ovfcheck(self.bytes_malloced + usage)
            ovfcheck(self.heap_usage + bytes_malloced)
        except OverflowError:
            raise memoryError
        result = raw_malloc(tot_size)
        if not result:
            raise memoryError
        raw_memclear(result, tot_size)
        (result + size_gc_header + offset_to_length).signed[0] = length
        hdr = llmemory.cast_adr_to_ptr(result, self.HDRPTR)
        hdr.typeid16 = typeid16
        hdr.mark = False
        hdr.flags = '\x00'
        hdr.next = self.malloced_objects
        self.malloced_objects = hdr
        self.bytes_malloced = bytes_malloced

        result += size_gc_header
        #llop.debug_print(lltype.Void, 'malloc_varsize length', length,
        #                 'typeid', typeid16,
        #                 '->', llmemory.cast_adr_to_int(result))
        self.write_malloc_statistics(typeid16, tot_size, result, True)
        return llmemory.cast_adr_to_ptr(result, llmemory.GCREF)

    malloc_varsize_clear._dont_inline_ = True

    def collect(self, gen=0):
        # 1. mark from the roots, and also the objects that objects-with-del
        #    point to (using the list of malloced_objects_with_finalizer)
        # 2. walk the list of objects-without-del and free the ones not marked
        # 3. walk the list of objects-with-del and for the ones not marked:
        #    call __del__, move the object to the list of object-without-del
        import time
        from pypy.rpython.lltypesystem.lloperation import llop
        debug_start("gc-collect")
        start_time = time.time()
        self.collect_in_progress = True
        size_gc_header = self.gcheaderbuilder.size_gc_header
        ##        llop.debug_view(lltype.Void, self.malloced_objects, self.poolnodes,
        ##                        size_gc_header)

        # push the roots on the mark stack
        objects = self.AddressStack()  # mark stack
        self._mark_stack = objects
        self.root_walker.walk_roots(
            MarkSweepGC._mark_root,  # stack roots
            MarkSweepGC._mark_root,  # static in prebuilt non-gc structures
            MarkSweepGC._mark_root)  # static in prebuilt gc objects

        # from this point onwards, no more mallocs should be possible
        old_malloced = self.bytes_malloced
        self.bytes_malloced = 0
        curr_heap_size = 0
        freed_size = 0

        # mark objects reachable by objects with a finalizer, but not those
        # themselves. add their size to curr_heap_size, since they always
        # survive the collection
        hdr = self.malloced_objects_with_finalizer
        while hdr:
            next = hdr.next
            typeid = hdr.typeid16
            gc_info = llmemory.cast_ptr_to_adr(hdr)
            obj = gc_info + size_gc_header
            if not hdr.mark:
                self.add_reachable_to_stack(obj, objects)
            addr = llmemory.cast_ptr_to_adr(hdr)
            size = self.fixed_size(typeid)
            if self.is_varsize(typeid):
                length = (obj +
                          self.varsize_offset_to_length(typeid)).signed[0]
                size += self.varsize_item_sizes(typeid) * length
            estimate = raw_malloc_usage(size_gc_header + size)
            curr_heap_size += estimate
            hdr = next

        # mark thinks on the mark stack and put their descendants onto the
        # stack until the stack is empty
        while objects.non_empty():  #mark
            curr = objects.pop()
            gc_info = curr - size_gc_header
            hdr = llmemory.cast_adr_to_ptr(gc_info, self.HDRPTR)
            if hdr.mark:
                continue
            self.add_reachable_to_stack(curr, objects)
            hdr.mark = True
        objects.delete()
        # also mark self.curpool
        if self.curpool:
            gc_info = llmemory.cast_ptr_to_adr(self.curpool) - size_gc_header
            hdr = llmemory.cast_adr_to_ptr(gc_info, self.HDRPTR)
            hdr.mark = True
        # go through the list of objects containing weak pointers
        # and kill the links if they go to dead objects
        # if the object itself is not marked, free it
        hdr = self.objects_with_weak_pointers
        surviving = lltype.nullptr(self.HDR)
        while hdr:
            typeid = hdr.typeid16
            next = hdr.next
            addr = llmemory.cast_ptr_to_adr(hdr)
            size = self.fixed_size(typeid)
            estimate = raw_malloc_usage(size_gc_header + size)
            if hdr.mark:
                offset = self.weakpointer_offset(typeid)
                hdr.mark = False
                gc_info = llmemory.cast_ptr_to_adr(hdr)
                weakref_obj = gc_info + size_gc_header
                pointing_to = (weakref_obj + offset).address[0]
                if pointing_to:
                    gc_info_pointing_to = pointing_to - size_gc_header
                    hdr_pointing_to = llmemory.cast_adr_to_ptr(
                        gc_info_pointing_to, self.HDRPTR)
                    # pointed to object will die
                    # XXX what to do if the object has a finalizer which resurrects
                    # the object?
                    if not hdr_pointing_to.mark:
                        (weakref_obj + offset).address[0] = NULL
                hdr.next = surviving
                surviving = hdr
                curr_heap_size += estimate
            else:
                gc_info = llmemory.cast_ptr_to_adr(hdr)
                weakref_obj = gc_info + size_gc_header
                self.write_free_statistics(typeid, weakref_obj)
                freed_size += estimate
                raw_free(addr)
            hdr = next
        self.objects_with_weak_pointers = surviving
        # sweep: delete objects without del if they are not marked
        # unmark objects without del that are marked
        firstpoolnode = lltype.malloc(self.POOLNODE, flavor='raw')
        firstpoolnode.linkedlist = self.malloced_objects
        firstpoolnode.nextnode = self.poolnodes
        prevpoolnode = lltype.nullptr(self.POOLNODE)
        poolnode = firstpoolnode
        while poolnode:  #sweep
            ppnext = llmemory.cast_ptr_to_adr(poolnode)
            ppnext += llmemory.offsetof(self.POOLNODE, 'linkedlist')
            hdr = poolnode.linkedlist
            while hdr:  #sweep
                typeid = hdr.typeid16
                next = hdr.next
                addr = llmemory.cast_ptr_to_adr(hdr)
                size = self.fixed_size(typeid)
                if self.is_varsize(typeid):
                    length = (addr + size_gc_header +
                              self.varsize_offset_to_length(typeid)).signed[0]
                    size += self.varsize_item_sizes(typeid) * length
                estimate = raw_malloc_usage(size_gc_header + size)
                if hdr.mark:
                    hdr.mark = False
                    ppnext.address[0] = addr
                    ppnext = llmemory.cast_ptr_to_adr(hdr)
                    ppnext += llmemory.offsetof(self.HDR, 'next')
                    curr_heap_size += estimate
                else:
                    gc_info = llmemory.cast_ptr_to_adr(hdr)
                    obj = gc_info + size_gc_header
                    self.write_free_statistics(typeid, obj)
                    freed_size += estimate
                    raw_free(addr)
                hdr = next
            ppnext.address[0] = llmemory.NULL
            next = poolnode.nextnode
            if not poolnode.linkedlist and prevpoolnode:
                # completely empty node
                prevpoolnode.nextnode = next
                lltype.free(poolnode, flavor='raw')
            else:
                prevpoolnode = poolnode
            poolnode = next
        self.malloced_objects = firstpoolnode.linkedlist
        self.poolnodes = firstpoolnode.nextnode
        lltype.free(firstpoolnode, flavor='raw')
        #llop.debug_view(lltype.Void, self.malloced_objects, self.malloced_objects_with_finalizer, size_gc_header)

        end_time = time.time()
        compute_time = start_time - self.prev_collect_end_time
        collect_time = end_time - start_time

        garbage_collected = old_malloced - (curr_heap_size - self.heap_usage)

        if (collect_time * curr_heap_size >
                0.02 * garbage_collected * compute_time):
            self.bytes_malloced_threshold += self.bytes_malloced_threshold / 2
        if (collect_time * curr_heap_size <
                0.005 * garbage_collected * compute_time):
            self.bytes_malloced_threshold /= 2

        # Use atleast as much memory as current live objects.
        if curr_heap_size > self.bytes_malloced_threshold:
            self.bytes_malloced_threshold = curr_heap_size

        # Cap at 1/4 GB
        self.bytes_malloced_threshold = min(self.bytes_malloced_threshold,
                                            256 * 1024 * 1024)
        self.total_collection_time += collect_time
        self.prev_collect_end_time = end_time
        debug_print("  malloced since previous collection:", old_malloced,
                    "bytes")
        debug_print("  heap usage at start of collection: ",
                    self.heap_usage + old_malloced, "bytes")
        debug_print("  freed:                             ", freed_size,
                    "bytes")
        debug_print("  new heap usage:                    ", curr_heap_size,
                    "bytes")
        debug_print("  total time spent collecting:       ",
                    self.total_collection_time, "seconds")
        debug_print("  collecting time:                   ", collect_time)
        debug_print("  computing time:                    ", collect_time)
        debug_print("  new threshold:                     ",
                    self.bytes_malloced_threshold)
        ##        llop.debug_view(lltype.Void, self.malloced_objects, self.poolnodes,
        ##                        size_gc_header)
        assert self.heap_usage + old_malloced == curr_heap_size + freed_size

        self.heap_usage = curr_heap_size
        hdr = self.malloced_objects_with_finalizer
        self.malloced_objects_with_finalizer = lltype.nullptr(self.HDR)
        last = lltype.nullptr(self.HDR)
        while hdr:
            next = hdr.next
            if hdr.mark:
                hdr.next = lltype.nullptr(self.HDR)
                if not self.malloced_objects_with_finalizer:
                    self.malloced_objects_with_finalizer = hdr
                else:
                    last.next = hdr
                hdr.mark = False
                last = hdr
            else:
                obj = llmemory.cast_ptr_to_adr(hdr) + size_gc_header
                finalizer = self.getfinalizer(hdr.typeid16)
                # make malloced_objects_with_finalizer consistent
                # for the sake of a possible collection caused by finalizer
                if not self.malloced_objects_with_finalizer:
                    self.malloced_objects_with_finalizer = next
                else:
                    last.next = next
                hdr.next = self.malloced_objects
                self.malloced_objects = hdr
                #llop.debug_view(lltype.Void, self.malloced_objects, self.malloced_objects_with_finalizer, size_gc_header)
                finalizer(obj)
                if not self.collect_in_progress:  # another collection was caused?
                    debug_print("outer collect interrupted "
                                "by recursive collect")
                    debug_stop("gc-collect")
                    return
                if not last:
                    if self.malloced_objects_with_finalizer == next:
                        self.malloced_objects_with_finalizer = lltype.nullptr(
                            self.HDR)
                    else:
                        # now it gets annoying: finalizer caused a malloc of something
                        # with a finalizer
                        last = self.malloced_objects_with_finalizer
                        while last.next != next:
                            last = last.next
                            last.next = lltype.nullptr(self.HDR)
                else:
                    last.next = lltype.nullptr(self.HDR)
            hdr = next
        self.collect_in_progress = False
        debug_stop("gc-collect")

    def _mark_root(self, root):  # 'root' is the address of the GCPTR
        gcobjectaddr = root.address[0]
        self._mark_stack.append(gcobjectaddr)

    def _mark_root_and_clear_bit(self, root):
        gcobjectaddr = root.address[0]
        self._mark_stack.append(gcobjectaddr)
        size_gc_header = self.gcheaderbuilder.size_gc_header
        gc_info = gcobjectaddr - size_gc_header
        hdr = llmemory.cast_adr_to_ptr(gc_info, self.HDRPTR)
        hdr.mark = False

    STAT_HEAP_USAGE = 0
    STAT_BYTES_MALLOCED = 1
    STATISTICS_NUMBERS = 2

    def get_type_id(self, obj):
        size_gc_header = self.gcheaderbuilder.size_gc_header
        gc_info = obj - size_gc_header
        hdr = llmemory.cast_adr_to_ptr(gc_info, self.HDRPTR)
        return hdr.typeid16

    def add_reachable_to_stack(self, obj, objects):
        self.trace(obj, self._add_reachable, objects)

    def _add_reachable(pointer, objects):
        obj = pointer.address[0]
        objects.append(obj)

    _add_reachable = staticmethod(_add_reachable)

    def statistics(self, index):
        # no memory allocation here!
        if index == self.STAT_HEAP_USAGE:
            return self.heap_usage
        if index == self.STAT_BYTES_MALLOCED:
            return self.bytes_malloced
        return -1

    def init_gc_object(self, addr, typeid):
        hdr = llmemory.cast_adr_to_ptr(addr, self.HDRPTR)
        hdr.typeid16 = typeid
        hdr.mark = False
        hdr.flags = '\x00'

    def init_gc_object_immortal(self, addr, typeid, flags=0):
        # prebuilt gc structures always have the mark bit set
        # ignore flags
        hdr = llmemory.cast_adr_to_ptr(addr, self.HDRPTR)
        hdr.typeid16 = typeid
        hdr.mark = True
        hdr.flags = '\x00'

    # experimental support for thread cloning
    def x_swap_pool(self, newpool):
        # Set newpool as the current pool (create one if newpool == NULL).
        # All malloc'ed objects are put into the current pool;this is a
        # way to separate objects depending on when they were allocated.
        size_gc_header = self.gcheaderbuilder.size_gc_header
        # invariant: each POOL GcStruct is at the _front_ of a linked list
        # of malloced objects.
        oldpool = self.curpool
        #llop.debug_print(lltype.Void, 'x_swap_pool',
        #                 lltype.cast_ptr_to_int(oldpool),
        #                 lltype.cast_ptr_to_int(newpool))
        if not oldpool:
            # make a fresh pool object, which is automatically inserted at the
            # front of the current list
            oldpool = lltype.malloc(self.POOL)
            addr = llmemory.cast_ptr_to_adr(oldpool)
            addr -= size_gc_header
            hdr = llmemory.cast_adr_to_ptr(addr, self.HDRPTR)
            # put this new POOL object in the poolnodes list
            node = lltype.malloc(self.POOLNODE, flavor="raw")
            node.linkedlist = hdr
            node.nextnode = self.poolnodes
            self.poolnodes = node
        else:
            # manually insert oldpool at the front of the current list
            addr = llmemory.cast_ptr_to_adr(oldpool)
            addr -= size_gc_header
            hdr = llmemory.cast_adr_to_ptr(addr, self.HDRPTR)
            hdr.next = self.malloced_objects

        newpool = lltype.cast_opaque_ptr(self.POOLPTR, newpool)
        if newpool:
            # newpool is at the front of the new linked list to install
            addr = llmemory.cast_ptr_to_adr(newpool)
            addr -= size_gc_header
            hdr = llmemory.cast_adr_to_ptr(addr, self.HDRPTR)
            self.malloced_objects = hdr.next
            # invariant: now that objects in the hdr.next list are accessible
            # through self.malloced_objects, make sure they are not accessible
            # via poolnodes (which has a node pointing to newpool):
            hdr.next = lltype.nullptr(self.HDR)
        else:
            # start a fresh new linked list
            self.malloced_objects = lltype.nullptr(self.HDR)
        self.curpool = newpool
        return lltype.cast_opaque_ptr(X_POOL_PTR, oldpool)

    def x_clone(self, clonedata):
        # Recursively clone the gcobject and everything it points to,
        # directly or indirectly -- but stops at objects that are not
        # in the specified pool.  A new pool is built to contain the
        # copies, and the 'gcobjectptr' and 'pool' fields of clonedata
        # are adjusted to refer to the result.

        # install a new pool into which all the mallocs go
        curpool = self.x_swap_pool(lltype.nullptr(X_POOL))

        size_gc_header = self.gcheaderbuilder.size_gc_header
        oldobjects = self.AddressStack()
        # if no pool specified, use the current pool as the 'source' pool
        oldpool = clonedata.pool or curpool
        oldpool = lltype.cast_opaque_ptr(self.POOLPTR, oldpool)
        addr = llmemory.cast_ptr_to_adr(oldpool)
        addr -= size_gc_header

        hdr = llmemory.cast_adr_to_ptr(addr, self.HDRPTR)
        hdr = hdr.next  # skip the POOL object itself
        while hdr:
            next = hdr.next
            # mark all objects from malloced_list
            hdr.flags = chr(ord(hdr.flags) | FL_CURPOOL)
            hdr.next = lltype.nullptr(self.HDR)  # abused to point to the copy
            oldobjects.append(llmemory.cast_ptr_to_adr(hdr))
            hdr = next

        # a stack of addresses of places that still points to old objects
        # and that must possibly be fixed to point to a new copy
        stack = self.AddressStack()
        stack.append(
            llmemory.cast_ptr_to_adr(clonedata) +
            llmemory.offsetof(X_CLONE, 'gcobjectptr'))
        while stack.non_empty():
            gcptr_addr = stack.pop()
            oldobj_addr = gcptr_addr.address[0]
            if not oldobj_addr:
                continue  # pointer is NULL
            oldhdr = llmemory.cast_adr_to_ptr(oldobj_addr - size_gc_header,
                                              self.HDRPTR)
            if not (ord(oldhdr.flags) & FL_CURPOOL):
                continue  # ignore objects that were not in the malloced_list
            newhdr = oldhdr.next  # abused to point to the copy
            if not newhdr:
                typeid = oldhdr.typeid16
                size = self.fixed_size(typeid)
                # XXX! collect() at the beginning if the free heap is low
                if self.is_varsize(typeid):
                    itemsize = self.varsize_item_sizes(typeid)
                    offset_to_length = self.varsize_offset_to_length(typeid)
                    length = (oldobj_addr + offset_to_length).signed[0]
                    newobj = self.malloc_varsize(typeid, length, size,
                                                 itemsize, offset_to_length,
                                                 False)
                    size += length * itemsize
                else:
                    newobj = self.malloc_fixedsize(typeid, size, False)
                    length = -1

                newobj_addr = llmemory.cast_ptr_to_adr(newobj)

                #llop.debug_print(lltype.Void, 'clone',
                #                 llmemory.cast_adr_to_int(oldobj_addr),
                #                 '->', llmemory.cast_adr_to_int(newobj_addr),
                #                 'typeid', typeid,
                #                 'length', length)

                newhdr_addr = newobj_addr - size_gc_header
                newhdr = llmemory.cast_adr_to_ptr(newhdr_addr, self.HDRPTR)

                saved_id = newhdr.typeid16  # XXX hack needed for genc
                saved_flg1 = newhdr.mark
                saved_flg2 = newhdr.flags
                saved_next = newhdr.next  # where size_gc_header == 0
                raw_memcopy(oldobj_addr, newobj_addr, size)
                newhdr.typeid16 = saved_id
                newhdr.mark = saved_flg1
                newhdr.flags = saved_flg2
                newhdr.next = saved_next

                offsets = self.offsets_to_gc_pointers(typeid)
                i = 0
                while i < len(offsets):
                    pointer_addr = newobj_addr + offsets[i]
                    stack.append(pointer_addr)
                    i += 1

                if length > 0:
                    offsets = self.varsize_offsets_to_gcpointers_in_var_part(
                        typeid)
                    itemlength = self.varsize_item_sizes(typeid)
                    offset = self.varsize_offset_to_variable_part(typeid)
                    itembaseaddr = newobj_addr + offset
                    i = 0
                    while i < length:
                        item = itembaseaddr + itemlength * i
                        j = 0
                        while j < len(offsets):
                            pointer_addr = item + offsets[j]
                            stack.append(pointer_addr)
                            j += 1
                        i += 1

                oldhdr.next = newhdr
            newobj_addr = llmemory.cast_ptr_to_adr(newhdr) + size_gc_header
            gcptr_addr.address[0] = newobj_addr
        stack.delete()

        # re-create the original linked list
        next = lltype.nullptr(self.HDR)
        while oldobjects.non_empty():
            hdr = llmemory.cast_adr_to_ptr(oldobjects.pop(), self.HDRPTR)
            hdr.flags = chr(ord(hdr.flags) & ~FL_CURPOOL)  # reset the flag
            hdr.next = next
            next = hdr
        oldobjects.delete()

        # consistency check
        addr = llmemory.cast_ptr_to_adr(oldpool)
        addr -= size_gc_header
        hdr = llmemory.cast_adr_to_ptr(addr, self.HDRPTR)
        assert hdr.next == next

        # build the new pool object collecting the new objects, and
        # reinstall the pool that was current at the beginning of x_clone()
        clonedata.pool = self.x_swap_pool(curpool)

    def identityhash(self, obj):
        obj = llmemory.cast_ptr_to_adr(obj)
        hdr = self.header(obj)
        if ord(hdr.flags) & FL_WITHHASH:
            obj += self.get_size(obj)
            return obj.signed[0]
        else:
            return llmemory.cast_adr_to_int(obj)