Пример #1
0
 def lock(self, TYPE=_c.sockaddr):
     """Return self.addr_p, cast as a pointer to TYPE.  Must call unlock()!
     """
     if not (self.minlen <= self.addrlen <= self.maxlen):
         raise RSocketError("invalid address")
     return rffi.cast(lltype.Ptr(TYPE), self.addr_p)
Пример #2
0
 def compute_result_annotation(self, s_str):
     from pypy.rpython.lltypesystem.rstr import STR, UNICODE
     if strtype is str:
         return annmodel.lltype_to_annotation(lltype.Ptr(STR))
     else:
         return annmodel.lltype_to_annotation(lltype.Ptr(UNICODE))
Пример #3
0
                           threadsafe=False)


if _POSIX:
    cConfig.timeval.__name__ = "_timeval"
    timeval = cConfig.timeval

CLOCKS_PER_SEC = cConfig.CLOCKS_PER_SEC
clock_t = cConfig.clock_t
tm = cConfig.tm
glob_buf = lltype.malloc(tm, flavor='raw', zero=True, immortal=True)

if cConfig.has_gettimeofday:
    c_gettimeofday = external('gettimeofday', [rffi.VOIDP, rffi.VOIDP],
                              rffi.INT)
TM_P = lltype.Ptr(tm)
c_clock = external('clock', [rffi.TIME_TP], clock_t)
c_time = external('time', [rffi.TIME_TP], rffi.TIME_T)
c_ctime = external('ctime', [rffi.TIME_TP], rffi.CCHARP)
c_gmtime = external('gmtime', [rffi.TIME_TP], TM_P)
c_mktime = external('mktime', [TM_P], rffi.TIME_T)
c_asctime = external('asctime', [TM_P], rffi.CCHARP)
c_localtime = external('localtime', [rffi.TIME_TP], TM_P)
if _POSIX:
    c_tzset = external('tzset', [], lltype.Void)
if _WIN:
    win_eci = ExternalCompilationInfo(
        includes=["time.h"],
        post_include_bits=[
            "long pypy_get_timezone();", "int pypy_get_daylight();",
            "char** pypy_get_tzname();"
Пример #4
0
            lst.append(ll_object)
            seen[ll_object] = ll_object
    page = LLRefTrackerPage(lst, size_gc_header)
    # auto-expand one level, for now
    auto_expand = 1
    for i in range(auto_expand):
        page = page.content()
        for ll_object in lst[1:]:
            for name, value in page.enum_content(ll_object):
                if not isinstance(value, str) and value not in seen:
                    lst.append(value)
                    seen[value] = value
        page = page.newpage(lst)
    page.display()


if __name__ == '__main__':
    try:
        sys.path.remove(os.getcwd())
    except ValueError:
        pass
    T = lltype.GcArray(lltype.Signed)
    S = lltype.GcForwardReference()
    S.become(
        lltype.GcStruct('S', ('t', lltype.Ptr(T)), ('next', lltype.Ptr(S))))
    s = lltype.malloc(S)
    s.next = lltype.malloc(S)
    s.next.t = lltype.malloc(T, 5)
    s.next.t[1] = 123
    track(s)
Пример #5
0
from pypy.rpython.lltypesystem.llmemory import raw_memcopy, raw_memclear
from pypy.rpython.lltypesystem.llmemory import NULL, raw_malloc_usage
from pypy.rpython.memory.support import DEFAULT_CHUNK_SIZE
from pypy.rpython.memory.support import get_address_stack
from pypy.rpython.memory.gcheader import GCHeaderBuilder
from pypy.rpython.lltypesystem import lltype, llmemory
from pypy.rlib.objectmodel import free_non_gc_object
from pypy.rpython.lltypesystem.lloperation import llop
from pypy.rlib.rarithmetic import ovfcheck
from pypy.rpython.memory.gc.base import GCBase


import sys, os

X_POOL = lltype.GcOpaqueType('gc.pool')
X_POOL_PTR = lltype.Ptr(X_POOL)
X_CLONE = lltype.GcStruct('CloneData', ('gcobjectptr', llmemory.GCREF),
                                       ('pool',        X_POOL_PTR))
X_CLONE_PTR = lltype.Ptr(X_CLONE)

DEBUG_PRINT = False
memoryError = MemoryError()
class MarkSweepGC(GCBase):
    _alloc_flavor_ = "raw"

    HDR = lltype.ForwardReference()
    HDRPTR = lltype.Ptr(HDR)
    # need to maintain a linked list of malloced objects, since we used the
    # systems allocator and can't walk the heap
    HDR.become(lltype.Struct('header', ('typeid', lltype.Signed),
                                       ('next', HDRPTR)))
Пример #6
0
def push_field(self, num, value):
    ptr = rffi.ptradd(self.ll_buffer, self.shape.ll_positions[num])
    TP = lltype.typeOf(value)
    T = lltype.Ptr(rffi.CArray(TP))
    rffi.cast(T, ptr)[0] = value
Пример #7
0
@cpython_api([], rffi.CCHARP)
def Py_GetVersion(space):
    """Return the version of this Python interpreter.  This is a
    string that looks something like

    "1.5 (\#67, Dec 31 1997, 22:34:28) [GCC 2.7.2.2]"

    The first word (up to the first space character) is the current
    Python version; the first three characters are the major and minor
    version separated by a period.  The returned string points into
    static storage; the caller should not modify its value.  The value
    is available to Python code as sys.version."""
    return space.fromcache(State).get_version()

@cpython_api([lltype.Ptr(lltype.FuncType([], lltype.Void))], rffi.INT_real, error=-1)
def Py_AtExit(space, func_ptr):
    """Register a cleanup function to be called by Py_Finalize().  The cleanup
    function will be called with no arguments and should return no value.  At
    most 32 cleanup functions can be registered.  When the registration is
    successful, Py_AtExit() returns 0; on failure, it returns -1.  The cleanup
    function registered last is called first. Each cleanup function will be
    called at most once.  Since Python's internal finalization will have
    completed before the cleanup function, no Python APIs should be called by
    func."""
    from pypy.module import cpyext
    w_module = space.getbuiltinmodule('cpyext')
    module = space.interp_w(cpyext.Module, w_module)
    try:
        module.register_atexit(func_ptr)
    except ValueError:
Пример #8
0
from pypy.rpython.lltypesystem import lltype
from pypy.rpython.tool import rffi_platform
from pypy.rpython.extfunc import register_external
from pypy.rpython.extregistry import ExtRegistryEntry
from pypy.module._minimal_curses import interp_curses
from pypy.translator.tool.cbuild import ExternalCompilationInfo

eci = ExternalCompilationInfo(
    includes=['curses.h', 'term.h'],
    libraries=['curses'],
)

rffi_platform.verify_eci(eci)

INT = rffi.INT
INTP = lltype.Ptr(lltype.Array(INT, hints={'nolength': True}))
c_setupterm = rffi.llexternal('setupterm', [rffi.CCHARP, INT, INTP],
                              INT,
                              compilation_info=eci)
c_tigetstr = rffi.llexternal('tigetstr', [rffi.CCHARP],
                             rffi.CCHARP,
                             compilation_info=eci)
c_tparm = rffi.llexternal(
    'tparm', [rffi.CCHARP, INT, INT, INT, INT, INT, INT, INT, INT, INT, INT],
    rffi.CCHARP,
    compilation_info=eci)

ERR = rffi.CConstant('ERR', lltype.Signed)
OK = rffi.CConstant('OK', lltype.Signed)

Пример #9
0
 def gc_shadowstackref_context(gcref):
     ssref = lltype.cast_opaque_ptr(lltype.Ptr(SHADOWSTACKREF), gcref)
     return ssref.context
Пример #10
0
 def malloc(self, TYPE, n=None):
     addr = self.gc.malloc(self.get_type_id(TYPE), n)
     return llmemory.cast_adr_to_ptr(addr, lltype.Ptr(TYPE))
Пример #11
0
 def constant_func(self, name, inputtypes, rettype, graph, **kwds):
     FUNC_TYPE = lltype.FuncType(inputtypes, rettype)
     fn_ptr = lltype.functionptr(FUNC_TYPE, name, graph=graph, **kwds)
     return Constant(fn_ptr, lltype.Ptr(FUNC_TYPE))
Пример #12
0
instantiating them and asking them to allocate memory by calling their
methods directly.  The tests need to maintain by hand what the GC should
see as the list of roots (stack and prebuilt objects).
"""

# XXX VERY INCOMPLETE, low coverage

import py
from pypy.rpython.lltypesystem import lltype, llmemory
from pypy.rpython.memory.gctypelayout import TypeLayoutBuilder

ADDR_ARRAY = lltype.Array(llmemory.Address)
S = lltype.GcForwardReference()
S.become(lltype.GcStruct('S',
                         ('x', lltype.Signed),
                         ('prev', lltype.Ptr(S)),
                         ('next', lltype.Ptr(S))))
RAW = lltype.Struct('RAW', ('p', lltype.Ptr(S)), ('q', lltype.Ptr(S)))
VAR = lltype.GcArray(lltype.Ptr(S))
VARNODE = lltype.GcStruct('VARNODE', ('a', lltype.Ptr(VAR)))


class DirectRootWalker(object):

    def __init__(self, tester):
        self.tester = tester

    def walk_roots(self, collect_stack_root,
                   collect_static_in_prebuilt_nongc,
                   collect_static_in_prebuilt_gc):
        gc = self.tester.gc
Пример #13
0
from pypy.interpreter.error import OperationError, operationerrfmt
from pypy.interpreter.function import BuiltinFunction, Method, StaticMethod
from pypy.rpython.lltypesystem import rffi, lltype
from pypy.module.cpyext.pyobject import (PyObject, from_ref, make_ref,
                                         make_typedescr, Py_DecRef)
from pypy.module.cpyext.api import (generic_cpy_call, cpython_api, PyObject,
                                    cpython_struct, METH_KEYWORDS, METH_O,
                                    CONST_STRING, METH_CLASS, METH_STATIC,
                                    METH_COEXIST, METH_NOARGS, METH_VARARGS,
                                    build_type_checkers, PyObjectFields,
                                    bootstrap_function)
from pypy.module.cpyext.pyerrors import PyErr_Occurred
from pypy.rlib.objectmodel import we_are_translated

PyCFunction_typedef = rffi.COpaquePtr(typedef='PyCFunction')
PyCFunction = lltype.Ptr(lltype.FuncType([PyObject, PyObject], PyObject))
PyCFunctionKwArgs = lltype.Ptr(
    lltype.FuncType([PyObject, PyObject, PyObject], PyObject))

PyMethodDef = cpython_struct('PyMethodDef', [
    ('ml_name', rffi.CCHARP),
    ('ml_meth', PyCFunction_typedef),
    ('ml_flags', rffi.INT_real),
    ('ml_doc', rffi.CCHARP),
])

PyCFunctionObjectStruct = cpython_struct(
    'PyCFunctionObject', PyObjectFields + (
        ('m_ml', lltype.Ptr(PyMethodDef)),
        ('m_self', PyObject),
        ('m_module', PyObject),
Пример #14
0
def test_llhelper(monkeypatch):
    """Show how to get function pointers used in type slots"""
    FT = lltype.FuncType([], lltype.Signed)
    FTPTR = lltype.Ptr(FT)

    def make_wrapper(space, func):
        def wrapper():
            return func(space)

        return wrapper

    monkeypatch.setattr(pypy.module.cpyext.api, 'make_wrapper', make_wrapper)

    @specialize.memo()
    def get_tp_function(space, typedef):
        @cpython_api([], lltype.Signed, error=-1, external=False)
        def slot_tp_function(space):
            return typedef.value

        api_func = slot_tp_function.api_func
        return lambda: llhelper(api_func.functype, api_func.get_wrapper(space))

    class Space:
        _cache = {}

        @specialize.memo()
        def fromcache(self, key):
            try:
                return self._cache[key]
            except KeyError:
                result = self._cache[key] = self.build(key)
                return result

        def _freeze_(self):
            return True

    class TypeDef:
        def __init__(self, value):
            self.value = value

        def _freeze_(self):
            return True

    class W_Type:
        def __init__(self, typedef):
            self.instancetypedef = typedef

        def _freeze(self):
            try:
                del self.funcptr
            except AttributeError:
                pass
            return False

    w_type1 = W_Type(TypeDef(123))
    w_type2 = W_Type(TypeDef(456))
    space = Space()

    def run(x):
        if x:
            w_type = w_type1
        else:
            w_type = w_type2
        typedef = w_type.instancetypedef
        w_type.funcptr = get_tp_function(space, typedef)()
        return w_type.funcptr()

    fn = compile(run, [bool])
    assert fn(True) == 123
    assert fn(False) == 456
Пример #15
0
def ll_weakref_deref(wref):
    plink = llmemory.cast_weakrefptr_to_ptr(lltype.Ptr(WEAKLINK), wref)
    return plink[0]
Пример #16
0
 def gc_shadowstackref_destroy(gcref):
     ssref = lltype.cast_opaque_ptr(lltype.Ptr(SHADOWSTACKREF), gcref)
     shadow_stack_pool.destroy(ssref)
Пример #17
0
                        ]
for name, default in constants_w_defaults:
    setattr(CConfig, name, platform.DefinedConstantInteger(name))

# types
if _MSVC:
    socketfd_type = rffi.UINT
else:
    socketfd_type = rffi.INT

CConfig.uint16_t = platform.SimpleType('uint16_t', rffi.USHORT)
CConfig.uint32_t = platform.SimpleType('uint32_t', rffi.UINT)
CConfig.size_t = platform.SimpleType('size_t', rffi.INT)
CConfig.ssize_t = platform.SimpleType('ssize_t', rffi.INT)
CConfig.socklen_t = platform.SimpleType('socklen_t', rffi.INT)
sockaddr_ptr = lltype.Ptr(lltype.ForwardReference())
addrinfo_ptr = lltype.Ptr(lltype.ForwardReference())

# struct types
CConfig.sockaddr = platform.Struct('struct sockaddr',
                                             [('sa_family', rffi.INT),
                                   ('sa_data', rffi.CFixedArray(rffi.CHAR, 1))])
CConfig.in_addr = platform.Struct('struct in_addr',
                                         [('s_addr', rffi.UINT)])
CConfig.in6_addr = platform.Struct('struct in6_addr',
                                          [])
CConfig.sockaddr_in = platform.Struct('struct sockaddr_in',
                                        [('sin_family', rffi.INT),
                                         ('sin_port',   rffi.USHORT),
                                         ('sin_addr',   CConfig.in_addr)])
Пример #18
0
 def gc_save_current_state_away(gcref, ncontext):
     ssref = lltype.cast_opaque_ptr(lltype.Ptr(SHADOWSTACKREF), gcref)
     shadow_stack_pool.save_current_state_away(ssref, ncontext)
Пример #19
0
def cast_pos(self, i, ll_t):
    pos = rffi.ptradd(self.ll_buffer, self.shape.ll_positions[i])
    TP = lltype.Ptr(rffi.CArray(ll_t))
    return rffi.cast(TP, pos)[0]
Пример #20
0
 def gc_restore_state_from(gcref):
     ssref = lltype.cast_opaque_ptr(lltype.Ptr(SHADOWSTACKREF), gcref)
     shadow_stack_pool.restore_state_from(ssref)
Пример #21
0
 def tracing_before_residual_call(self, gcref):
     if not self.is_virtual_ref(gcref):
         return
     vref = lltype.cast_opaque_ptr(lltype.Ptr(self.JIT_VIRTUAL_REF), gcref)
     assert vref.virtual_token == self.TOKEN_NONE
     vref.virtual_token = self.TOKEN_TRACING_RESCALL
Пример #22
0
    ht.delete(key)
    return True


@api_func(lltype.Bool, [ht_ptr, zval_ptr])
def hash_prepend(ptr, ptr2):
    ht = _cast_ptr_to_ht(ptr)
    zval = _cast_ptr_to_zval(ptr2)
    ht.prepend(zval)
    return True


_api_struct_args = [(fn_name, fn_ll_type)
                    for fn_name, _, fn_ll_type, _, _ in _API_FUNCS]
api_type = lltype.Struct('API', *_api_struct_args)
api_ptr_type = lltype.Ptr(api_type)

_API_FUNCS_iterable = unrolling_iterable(_API_FUNCS)


def _get_api_ptr():
    api_ptr = rffi.make(api_type)
    for fn_name, fn, fn_ll_type, _, _ in _API_FUNCS_iterable:
        setattr(api_ptr, fn_name, rffi.llhelper(fn_ll_type, fn))
    return api_ptr


# FIXME: use actual rffi types
_type_dict = {
    lltype.Signed: 'long',
    zval_ptr: 'zval',
Пример #23
0
def rundemo(entrypoint, *args):
    view = conftest.option.view
    seed = demo_conftest.option.randomseed
    benchmark = bench_conftest.option.benchmark

    logfile = str(udir.join('%s.log' % (entrypoint.__name__, )))
    try:
        os.unlink(logfile)
    except OSError:
        pass
    os.environ['PYPYJITLOG'] = logfile

    if benchmark:
        py.test.skip("benchmarking: working in progress")
        arglist = ', '.join(['a%d' % i for i in range(len(args))])
        miniglobals = {
            'Benchmark': bench_conftest.Benchmark,
            'original_entrypoint': entrypoint
        }
        exec py.code.Source("""
            def benchmark_runner(%s):
                bench = Benchmark()
                while 1:
                    res = original_entrypoint(%s)
                    if bench.stop():
                        break
                return res
        """ % (arglist, arglist)).compile() in miniglobals
        entrypoint = miniglobals['benchmark_runner']

    nb_args = len(args)  # XXX ints only for now
    if machine_code_dumper:
        machine_code_dumper._freeze_()  # clean up state
    rgenop = RGenOp()
    gv_entrypoint = rcompile(rgenop,
                             entrypoint, [int] * nb_args,
                             random_seed=seed)
    if machine_code_dumper:
        machine_code_dumper._freeze_()  # clean up state

    print
    print 'Random seed value: %d' % (seed, )
    print

    print 'Running %s(%s)...' % (entrypoint.__name__, ', '.join(map(
        repr, args)))
    expected = entrypoint(*args)
    print 'Python ===>', expected
    F1 = lltype.FuncType([lltype.Signed] * nb_args, lltype.Signed)
    fp = RGenOp.get_python_callable(lltype.Ptr(F1), gv_entrypoint)
    res = runfp(fp, *args)
    print '%-6s ===>' % RGenOp.__name__, res
    print
    if res != expected:
        raise AssertionError("expected return value is %s, got %s\nseed = %s" %
                             (expected, res, seed))

    if view and machine_code_dumper:
        from pypy.jit.codegen.i386.viewcode import World
        world = World()
        world.parse(open(logfile))
        world.show()
Пример #24
0
def define_ptr(type_name):
    type = lltype.OpaqueType(type_name)
    ptr = lltype.Ptr(type)
    null = lltype.nullptr(ptr.TO)
    return type, ptr, null
Пример #25
0
class MarkSweepGC(GCBase):
    _alloc_flavor_ = "raw"

    HDR = lltype.ForwardReference()
    HDRPTR = lltype.Ptr(HDR)
    # need to maintain a linked list of malloced objects, since we used the
    # systems allocator and can't walk the heap
    HDR.become(lltype.Struct('header', ('typeid', lltype.Signed),
                                       ('next', HDRPTR)))

    POOL = lltype.GcStruct('gc_pool')
    POOLPTR = lltype.Ptr(POOL)

    POOLNODE = lltype.ForwardReference()
    POOLNODEPTR = lltype.Ptr(POOLNODE)
    POOLNODE.become(lltype.Struct('gc_pool_node', ('linkedlist', HDRPTR),
                                                  ('nextnode', POOLNODEPTR)))

    # the following values override the default arguments of __init__ when
    # translating to a real backend.
    TRANSLATION_PARAMS = {'start_heap_size': 8*1024*1024} # XXX adjust

    def __init__(self, chunk_size=DEFAULT_CHUNK_SIZE, start_heap_size=4096):
        self.heap_usage = 0          # at the end of the latest collection
        self.bytes_malloced = 0      # since the latest collection
        self.bytes_malloced_threshold = start_heap_size
        self.total_collection_time = 0.0
        self.AddressStack = get_address_stack(chunk_size)
        self.malloced_objects = lltype.nullptr(self.HDR)
        self.malloced_objects_with_finalizer = lltype.nullptr(self.HDR)
        # these are usually only the small bits of memory that make a
        # weakref object
        self.objects_with_weak_pointers = lltype.nullptr(self.HDR)
        self.gcheaderbuilder = GCHeaderBuilder(self.HDR)
        # pools, for x_swap_pool():
        #   'curpool' is the current pool, lazily allocated (i.e. NULL means
        #   the current POOL object is not yet malloc'ed).  POOL objects are
        #   usually at the start of a linked list of objects, via the HDRs.
        #   The exception is 'curpool' whose linked list of objects is in
        #   'self.malloced_objects' instead of in the header of 'curpool'.
        #   POOL objects are never in the middle of a linked list themselves.
        # XXX a likely cause for the current problems with pools is:
        # not all objects live in malloced_objects, some also live in
        # malloced_objects_with_finalizer and objects_with_weak_pointers
        self.curpool = lltype.nullptr(self.POOL)
        #   'poolnodes' is a linked list of all such linked lists.  Each
        #   linked list will usually start with a POOL object, but it can
        #   also contain only normal objects if the POOL object at the head
        #   was already freed.  The objects in 'malloced_objects' are not
        #   found via 'poolnodes'.
        self.poolnodes = lltype.nullptr(self.POOLNODE)
        self.collect_in_progress = False
        self.prev_collect_end_time = 0.0

    def maybe_collect(self):
        if self.bytes_malloced > self.bytes_malloced_threshold:
            self.collect()

    def write_malloc_statistics(self, typeid, size, result, varsize):
        pass

    def write_free_statistics(self, typeid, result):
        pass

    def malloc_fixedsize(self, typeid, size, can_collect, has_finalizer=False,
                         contains_weakptr=False):
        if can_collect:
            self.maybe_collect()
        size_gc_header = self.gcheaderbuilder.size_gc_header
        try:
            tot_size = size_gc_header + size
            usage = raw_malloc_usage(tot_size)
            bytes_malloced = ovfcheck(self.bytes_malloced+usage)
            ovfcheck(self.heap_usage + bytes_malloced)
        except OverflowError:
            raise memoryError
        result = raw_malloc(tot_size)
        if not result:
            raise memoryError
        hdr = llmemory.cast_adr_to_ptr(result, self.HDRPTR)
        hdr.typeid = typeid << 1
        if has_finalizer:
            hdr.next = self.malloced_objects_with_finalizer
            self.malloced_objects_with_finalizer = hdr
        elif contains_weakptr:
            hdr.next = self.objects_with_weak_pointers
            self.objects_with_weak_pointers = hdr
        else:
            hdr.next = self.malloced_objects
            self.malloced_objects = hdr
        self.bytes_malloced = bytes_malloced
        result += size_gc_header
        #llop.debug_print(lltype.Void, 'malloc typeid', typeid,
        #                 '->', llmemory.cast_adr_to_int(result))
        self.write_malloc_statistics(typeid, tot_size, result, False)
        return llmemory.cast_adr_to_ptr(result, llmemory.GCREF)
    malloc_fixedsize._dont_inline_ = True

    def malloc_fixedsize_clear(self, typeid, size, can_collect,
                               has_finalizer=False, contains_weakptr=False):
        if can_collect:
            self.maybe_collect()
        size_gc_header = self.gcheaderbuilder.size_gc_header
        try:
            tot_size = size_gc_header + size
            usage = raw_malloc_usage(tot_size)
            bytes_malloced = ovfcheck(self.bytes_malloced+usage)
            ovfcheck(self.heap_usage + bytes_malloced)
        except OverflowError:
            raise memoryError
        result = raw_malloc(tot_size)
        if not result:
            raise memoryError
        raw_memclear(result, tot_size)
        hdr = llmemory.cast_adr_to_ptr(result, self.HDRPTR)
        hdr.typeid = typeid << 1
        if has_finalizer:
            hdr.next = self.malloced_objects_with_finalizer
            self.malloced_objects_with_finalizer = hdr
        elif contains_weakptr:
            hdr.next = self.objects_with_weak_pointers
            self.objects_with_weak_pointers = hdr
        else:
            hdr.next = self.malloced_objects
            self.malloced_objects = hdr
        self.bytes_malloced = bytes_malloced
        result += size_gc_header
        #llop.debug_print(lltype.Void, 'malloc typeid', typeid,
        #                 '->', llmemory.cast_adr_to_int(result))
        self.write_malloc_statistics(typeid, tot_size, result, False)
        return llmemory.cast_adr_to_ptr(result, llmemory.GCREF)
    malloc_fixedsize_clear._dont_inline_ = True

    def malloc_varsize(self, typeid, length, size, itemsize, offset_to_length,
                       can_collect, has_finalizer=False):
        if can_collect:
            self.maybe_collect()
        size_gc_header = self.gcheaderbuilder.size_gc_header
        try:
            fixsize = size_gc_header + size
            varsize = ovfcheck(itemsize * length)
            tot_size = ovfcheck(fixsize + varsize)
            usage = raw_malloc_usage(tot_size)
            bytes_malloced = ovfcheck(self.bytes_malloced+usage)
            ovfcheck(self.heap_usage + bytes_malloced)
        except OverflowError:
            raise memoryError
        result = raw_malloc(tot_size)
        if not result:
            raise memoryError
        (result + size_gc_header + offset_to_length).signed[0] = length
        hdr = llmemory.cast_adr_to_ptr(result, self.HDRPTR)
        hdr.typeid = typeid << 1
        if has_finalizer:
            hdr.next = self.malloced_objects_with_finalizer
            self.malloced_objects_with_finalizer = hdr
        else:
            hdr.next = self.malloced_objects
            self.malloced_objects = hdr
        self.bytes_malloced = bytes_malloced
            
        result += size_gc_header
        #llop.debug_print(lltype.Void, 'malloc_varsize length', length,
        #                 'typeid', typeid,
        #                 '->', llmemory.cast_adr_to_int(result))
        self.write_malloc_statistics(typeid, tot_size, result, True)
        return llmemory.cast_adr_to_ptr(result, llmemory.GCREF)
    malloc_varsize._dont_inline_ = True

    def malloc_varsize_clear(self, typeid, length, size, itemsize,
                             offset_to_length, can_collect,
                             has_finalizer=False):
        if can_collect:
            self.maybe_collect()
        size_gc_header = self.gcheaderbuilder.size_gc_header
        try:
            fixsize = size_gc_header + size
            varsize = ovfcheck(itemsize * length)
            tot_size = ovfcheck(fixsize + varsize)
            usage = raw_malloc_usage(tot_size)
            bytes_malloced = ovfcheck(self.bytes_malloced+usage)
            ovfcheck(self.heap_usage + bytes_malloced)
        except OverflowError:
            raise memoryError
        result = raw_malloc(tot_size)
        if not result:
            raise memoryError
        raw_memclear(result, tot_size)        
        (result + size_gc_header + offset_to_length).signed[0] = length
        hdr = llmemory.cast_adr_to_ptr(result, self.HDRPTR)
        hdr.typeid = typeid << 1
        if has_finalizer:
            hdr.next = self.malloced_objects_with_finalizer
            self.malloced_objects_with_finalizer = hdr
        else:
            hdr.next = self.malloced_objects
            self.malloced_objects = hdr
        self.bytes_malloced = bytes_malloced
            
        result += size_gc_header
        #llop.debug_print(lltype.Void, 'malloc_varsize length', length,
        #                 'typeid', typeid,
        #                 '->', llmemory.cast_adr_to_int(result))
        self.write_malloc_statistics(typeid, tot_size, result, True)
        return llmemory.cast_adr_to_ptr(result, llmemory.GCREF)
    malloc_varsize_clear._dont_inline_ = True

    def collect(self):
        # 1. mark from the roots, and also the objects that objects-with-del
        #    point to (using the list of malloced_objects_with_finalizer)
        # 2. walk the list of objects-without-del and free the ones not marked
        # 3. walk the list of objects-with-del and for the ones not marked:
        #    call __del__, move the object to the list of object-without-del
        import time
        from pypy.rpython.lltypesystem.lloperation import llop
        if DEBUG_PRINT:
            llop.debug_print(lltype.Void, 'collecting...')
        start_time = time.time()
        self.collect_in_progress = True
        size_gc_header = self.gcheaderbuilder.size_gc_header
##        llop.debug_view(lltype.Void, self.malloced_objects, self.poolnodes,
##                        size_gc_header)

        # push the roots on the mark stack
        objects = self.AddressStack() # mark stack
        self._mark_stack = objects
        self.root_walker.walk_roots(
            MarkSweepGC._mark_root,  # stack roots
            MarkSweepGC._mark_root,  # static in prebuilt non-gc structures
            MarkSweepGC._mark_root)  # static in prebuilt gc objects

        # from this point onwards, no more mallocs should be possible
        old_malloced = self.bytes_malloced
        self.bytes_malloced = 0
        curr_heap_size = 0
        freed_size = 0

        # mark objects reachable by objects with a finalizer, but not those
        # themselves. add their size to curr_heap_size, since they always
        # survive the collection
        hdr = self.malloced_objects_with_finalizer
        while hdr:
            next = hdr.next
            typeid = hdr.typeid >> 1
            gc_info = llmemory.cast_ptr_to_adr(hdr)
            obj = gc_info + size_gc_header
            if not hdr.typeid & 1:
                self.add_reachable_to_stack(obj, objects)
            addr = llmemory.cast_ptr_to_adr(hdr)
            size = self.fixed_size(typeid)
            if self.is_varsize(typeid):
                length = (obj + self.varsize_offset_to_length(typeid)).signed[0]
                size += self.varsize_item_sizes(typeid) * length
            estimate = raw_malloc_usage(size_gc_header + size)
            curr_heap_size += estimate
            hdr = next

        # mark thinks on the mark stack and put their descendants onto the
        # stack until the stack is empty
        while objects.non_empty():  #mark
            curr = objects.pop()
            gc_info = curr - size_gc_header
            hdr = llmemory.cast_adr_to_ptr(gc_info, self.HDRPTR)
            if hdr.typeid & 1:
                continue
            self.add_reachable_to_stack(curr, objects)
            hdr.typeid = hdr.typeid | 1
        objects.delete()
        # also mark self.curpool
        if self.curpool:
            gc_info = llmemory.cast_ptr_to_adr(self.curpool) - size_gc_header
            hdr = llmemory.cast_adr_to_ptr(gc_info, self.HDRPTR)
            hdr.typeid = hdr.typeid | 1
        # go through the list of objects containing weak pointers
        # and kill the links if they go to dead objects
        # if the object itself is not marked, free it
        hdr = self.objects_with_weak_pointers
        surviving = lltype.nullptr(self.HDR)
        while hdr:
            typeid = hdr.typeid >> 1
            next = hdr.next
            addr = llmemory.cast_ptr_to_adr(hdr)
            size = self.fixed_size(typeid)
            estimate = raw_malloc_usage(size_gc_header + size)
            if hdr.typeid & 1:
                typeid = hdr.typeid >> 1
                offset = self.weakpointer_offset(typeid)
                hdr.typeid = hdr.typeid & (~1)
                gc_info = llmemory.cast_ptr_to_adr(hdr)
                weakref_obj = gc_info + size_gc_header
                pointing_to = (weakref_obj + offset).address[0]
                if pointing_to:
                    gc_info_pointing_to = pointing_to - size_gc_header
                    hdr_pointing_to = llmemory.cast_adr_to_ptr(
                        gc_info_pointing_to, self.HDRPTR)
                    # pointed to object will die
                    # XXX what to do if the object has a finalizer which resurrects
                    # the object?
                    if not hdr_pointing_to.typeid & 1:
                        (weakref_obj + offset).address[0] = NULL
                hdr.next = surviving
                surviving = hdr
                curr_heap_size += estimate
            else:
                gc_info = llmemory.cast_ptr_to_adr(hdr)
                weakref_obj = gc_info + size_gc_header
                self.write_free_statistics(typeid, weakref_obj)
                freed_size += estimate
                raw_free(addr)
            hdr = next
        self.objects_with_weak_pointers = surviving
        # sweep: delete objects without del if they are not marked
        # unmark objects without del that are marked
        firstpoolnode = lltype.malloc(self.POOLNODE, flavor='raw')
        firstpoolnode.linkedlist = self.malloced_objects
        firstpoolnode.nextnode = self.poolnodes
        prevpoolnode = lltype.nullptr(self.POOLNODE)
        poolnode = firstpoolnode
        while poolnode:   #sweep
            ppnext = llmemory.cast_ptr_to_adr(poolnode)
            ppnext += llmemory.offsetof(self.POOLNODE, 'linkedlist')
            hdr = poolnode.linkedlist
            while hdr:  #sweep
                typeid = hdr.typeid >> 1
                next = hdr.next
                addr = llmemory.cast_ptr_to_adr(hdr)
                size = self.fixed_size(typeid)
                if self.is_varsize(typeid):
                    length = (addr + size_gc_header + self.varsize_offset_to_length(typeid)).signed[0]
                    size += self.varsize_item_sizes(typeid) * length
                estimate = raw_malloc_usage(size_gc_header + size)
                if hdr.typeid & 1:
                    hdr.typeid = hdr.typeid & (~1)
                    ppnext.address[0] = addr
                    ppnext = llmemory.cast_ptr_to_adr(hdr)
                    ppnext += llmemory.offsetof(self.HDR, 'next')
                    curr_heap_size += estimate
                else:
                    gc_info = llmemory.cast_ptr_to_adr(hdr)
                    obj = gc_info + size_gc_header
                    self.write_free_statistics(typeid, obj)
                    freed_size += estimate
                    raw_free(addr)
                hdr = next
            ppnext.address[0] = llmemory.NULL
            next = poolnode.nextnode
            if not poolnode.linkedlist and prevpoolnode:
                # completely empty node
                prevpoolnode.nextnode = next
                lltype.free(poolnode, flavor='raw')
            else:
                prevpoolnode = poolnode
            poolnode = next
        self.malloced_objects = firstpoolnode.linkedlist
        self.poolnodes = firstpoolnode.nextnode
        lltype.free(firstpoolnode, flavor='raw')
        #llop.debug_view(lltype.Void, self.malloced_objects, self.malloced_objects_with_finalizer, size_gc_header)

        end_time = time.time()
        compute_time = start_time - self.prev_collect_end_time
        collect_time = end_time - start_time

        garbage_collected = old_malloced - (curr_heap_size - self.heap_usage)

        if (collect_time * curr_heap_size >
            0.02 * garbage_collected * compute_time): 
            self.bytes_malloced_threshold += self.bytes_malloced_threshold / 2
        if (collect_time * curr_heap_size <
            0.005 * garbage_collected * compute_time):
            self.bytes_malloced_threshold /= 2

        # Use atleast as much memory as current live objects.
        if curr_heap_size > self.bytes_malloced_threshold:
            self.bytes_malloced_threshold = curr_heap_size

        # Cap at 1/4 GB
        self.bytes_malloced_threshold = min(self.bytes_malloced_threshold,
                                            256 * 1024 * 1024)
        self.total_collection_time += collect_time
        self.prev_collect_end_time = end_time
        if DEBUG_PRINT:
            llop.debug_print(lltype.Void,
                             "  malloced since previous collection:",
                             old_malloced, "bytes")
            llop.debug_print(lltype.Void,
                             "  heap usage at start of collection: ",
                             self.heap_usage + old_malloced, "bytes")
            llop.debug_print(lltype.Void,
                             "  freed:                             ",
                             freed_size, "bytes")
            llop.debug_print(lltype.Void,
                             "  new heap usage:                    ",
                             curr_heap_size, "bytes")
            llop.debug_print(lltype.Void,
                             "  total time spent collecting:       ",
                             self.total_collection_time, "seconds")
            llop.debug_print(lltype.Void,
                             "  collecting time:                   ",
                             collect_time)
            llop.debug_print(lltype.Void,
                             "  computing time:                    ",
                             collect_time)
            llop.debug_print(lltype.Void,
                             "  new threshold:                     ",
                             self.bytes_malloced_threshold)
##        llop.debug_view(lltype.Void, self.malloced_objects, self.poolnodes,
##                        size_gc_header)
        assert self.heap_usage + old_malloced == curr_heap_size + freed_size

        self.heap_usage = curr_heap_size
        hdr = self.malloced_objects_with_finalizer
        self.malloced_objects_with_finalizer = lltype.nullptr(self.HDR)
        last = lltype.nullptr(self.HDR)
        while hdr:
            next = hdr.next
            if hdr.typeid & 1:
                hdr.next = lltype.nullptr(self.HDR)
                if not self.malloced_objects_with_finalizer:
                    self.malloced_objects_with_finalizer = hdr
                else:
                    last.next = hdr
                hdr.typeid = hdr.typeid & (~1)
                last = hdr
            else:
                obj = llmemory.cast_ptr_to_adr(hdr) + size_gc_header
                finalizer = self.getfinalizer(hdr.typeid >> 1)
                # make malloced_objects_with_finalizer consistent
                # for the sake of a possible collection caused by finalizer
                if not self.malloced_objects_with_finalizer:
                    self.malloced_objects_with_finalizer = next
                else:
                    last.next = next
                hdr.next = self.malloced_objects
                self.malloced_objects = hdr
                #llop.debug_view(lltype.Void, self.malloced_objects, self.malloced_objects_with_finalizer, size_gc_header)
                finalizer(obj)
                if not self.collect_in_progress: # another collection was caused?
                    llop.debug_print(lltype.Void, "outer collect interrupted "
                                                  "by recursive collect")
                    return
                if not last:
                    if self.malloced_objects_with_finalizer == next:
                        self.malloced_objects_with_finalizer = lltype.nullptr(self.HDR)
                    else:
                        # now it gets annoying: finalizer caused a malloc of something
                        # with a finalizer
                        last = self.malloced_objects_with_finalizer
                        while last.next != next:
                            last = last.next
                            last.next = lltype.nullptr(self.HDR)
                else:
                    last.next = lltype.nullptr(self.HDR)
            hdr = next
        self.collect_in_progress = False

    def _mark_root(self, root):   # 'root' is the address of the GCPTR
        gcobjectaddr = root.address[0]
        self._mark_stack.append(gcobjectaddr)

    def _mark_root_and_clear_bit(self, root):
        gcobjectaddr = root.address[0]
        self._mark_stack.append(gcobjectaddr)
        size_gc_header = self.gcheaderbuilder.size_gc_header
        gc_info = gcobjectaddr - size_gc_header
        hdr = llmemory.cast_adr_to_ptr(gc_info, self.HDRPTR)
        hdr.typeid = hdr.typeid & (~1)

    STAT_HEAP_USAGE     = 0
    STAT_BYTES_MALLOCED = 1
    STATISTICS_NUMBERS  = 2

    def get_type_id(self, obj):
        size_gc_header = self.gcheaderbuilder.size_gc_header
        gc_info = obj - size_gc_header
        hdr = llmemory.cast_adr_to_ptr(gc_info, self.HDRPTR)
        return hdr.typeid >> 1

    def add_reachable_to_stack(self, obj, objects):
        self.trace(obj, self._add_reachable, objects)

    def _add_reachable(pointer, objects):
        obj = pointer.address[0]
        if obj:
            objects.append(obj)
    _add_reachable = staticmethod(_add_reachable)

    def statistics(self, index):
        # no memory allocation here!
        if index == self.STAT_HEAP_USAGE:
            return self.heap_usage
        if index == self.STAT_BYTES_MALLOCED:
            return self.bytes_malloced
        return -1

    def init_gc_object(self, addr, typeid):
        hdr = llmemory.cast_adr_to_ptr(addr, self.HDRPTR)
        hdr.typeid = typeid << 1

    def init_gc_object_immortal(self, addr, typeid, flags=0):
        # prebuilt gc structures always have the mark bit set
        # ignore flags
        hdr = llmemory.cast_adr_to_ptr(addr, self.HDRPTR)
        hdr.typeid = (typeid << 1) | 1

    # experimental support for thread cloning
    def x_swap_pool(self, newpool):
        # Set newpool as the current pool (create one if newpool == NULL).
        # All malloc'ed objects are put into the current pool;this is a
        # way to separate objects depending on when they were allocated.
        size_gc_header = self.gcheaderbuilder.size_gc_header
        # invariant: each POOL GcStruct is at the _front_ of a linked list
        # of malloced objects.
        oldpool = self.curpool
        #llop.debug_print(lltype.Void, 'x_swap_pool',
        #                 lltype.cast_ptr_to_int(oldpool),
        #                 lltype.cast_ptr_to_int(newpool))
        if not oldpool:
            # make a fresh pool object, which is automatically inserted at the
            # front of the current list
            oldpool = lltype.malloc(self.POOL)
            addr = llmemory.cast_ptr_to_adr(oldpool)
            addr -= size_gc_header
            hdr = llmemory.cast_adr_to_ptr(addr, self.HDRPTR)
            # put this new POOL object in the poolnodes list
            node = lltype.malloc(self.POOLNODE, flavor="raw")
            node.linkedlist = hdr
            node.nextnode = self.poolnodes
            self.poolnodes = node
        else:
            # manually insert oldpool at the front of the current list
            addr = llmemory.cast_ptr_to_adr(oldpool)
            addr -= size_gc_header
            hdr = llmemory.cast_adr_to_ptr(addr, self.HDRPTR)
            hdr.next = self.malloced_objects

        newpool = lltype.cast_opaque_ptr(self.POOLPTR, newpool)
        if newpool:
            # newpool is at the front of the new linked list to install
            addr = llmemory.cast_ptr_to_adr(newpool)
            addr -= size_gc_header
            hdr = llmemory.cast_adr_to_ptr(addr, self.HDRPTR)
            self.malloced_objects = hdr.next
            # invariant: now that objects in the hdr.next list are accessible
            # through self.malloced_objects, make sure they are not accessible
            # via poolnodes (which has a node pointing to newpool):
            hdr.next = lltype.nullptr(self.HDR)
        else:
            # start a fresh new linked list
            self.malloced_objects = lltype.nullptr(self.HDR)
        self.curpool = newpool
        return lltype.cast_opaque_ptr(X_POOL_PTR, oldpool)

    def x_clone(self, clonedata):
        # Recursively clone the gcobject and everything it points to,
        # directly or indirectly -- but stops at objects that are not
        # in the specified pool.  A new pool is built to contain the
        # copies, and the 'gcobjectptr' and 'pool' fields of clonedata
        # are adjusted to refer to the result.
        CURPOOL_FLAG = sys.maxint // 2 + 1

        # install a new pool into which all the mallocs go
        curpool = self.x_swap_pool(lltype.nullptr(X_POOL))

        size_gc_header = self.gcheaderbuilder.size_gc_header
        oldobjects = self.AddressStack()
        # if no pool specified, use the current pool as the 'source' pool
        oldpool = clonedata.pool or curpool
        oldpool = lltype.cast_opaque_ptr(self.POOLPTR, oldpool)
        addr = llmemory.cast_ptr_to_adr(oldpool)
        addr -= size_gc_header

        hdr = llmemory.cast_adr_to_ptr(addr, self.HDRPTR)
        hdr = hdr.next   # skip the POOL object itself
        while hdr:
            next = hdr.next
            hdr.typeid |= CURPOOL_FLAG   # mark all objects from malloced_list
            hdr.next = lltype.nullptr(self.HDR)  # abused to point to the copy
            oldobjects.append(llmemory.cast_ptr_to_adr(hdr))
            hdr = next

        # a stack of addresses of places that still points to old objects
        # and that must possibly be fixed to point to a new copy
        stack = self.AddressStack()
        stack.append(llmemory.cast_ptr_to_adr(clonedata)
                     + llmemory.offsetof(X_CLONE, 'gcobjectptr'))
        while stack.non_empty():
            gcptr_addr = stack.pop()
            oldobj_addr = gcptr_addr.address[0]
            if not oldobj_addr:
                continue   # pointer is NULL
            oldhdr = llmemory.cast_adr_to_ptr(oldobj_addr - size_gc_header,
                                              self.HDRPTR)
            typeid = oldhdr.typeid
            if not (typeid & CURPOOL_FLAG):
                continue   # ignore objects that were not in the malloced_list
            newhdr = oldhdr.next      # abused to point to the copy
            if not newhdr:
                typeid = (typeid & ~CURPOOL_FLAG) >> 1
                size = self.fixed_size(typeid)
                # XXX! collect() at the beginning if the free heap is low
                if self.is_varsize(typeid):
                    itemsize = self.varsize_item_sizes(typeid)
                    offset_to_length = self.varsize_offset_to_length(typeid)
                    length = (oldobj_addr + offset_to_length).signed[0]
                    newobj = self.malloc_varsize(typeid, length, size,
                                                 itemsize, offset_to_length,
                                                 False)
                    size += length*itemsize
                else:
                    newobj = self.malloc_fixedsize(typeid, size, False)
                    length = -1

                newobj_addr = llmemory.cast_ptr_to_adr(newobj)

                #llop.debug_print(lltype.Void, 'clone',
                #                 llmemory.cast_adr_to_int(oldobj_addr),
                #                 '->', llmemory.cast_adr_to_int(newobj_addr),
                #                 'typeid', typeid,
                #                 'length', length)

                newhdr_addr = newobj_addr - size_gc_header
                newhdr = llmemory.cast_adr_to_ptr(newhdr_addr, self.HDRPTR)

                saved_id   = newhdr.typeid    # XXX hack needed for genc
                saved_next = newhdr.next      # where size_gc_header == 0
                raw_memcopy(oldobj_addr, newobj_addr, size)
                newhdr.typeid = saved_id
                newhdr.next   = saved_next

                offsets = self.offsets_to_gc_pointers(typeid)
                i = 0
                while i < len(offsets):
                    pointer_addr = newobj_addr + offsets[i]
                    stack.append(pointer_addr)
                    i += 1

                if length > 0:
                    offsets = self.varsize_offsets_to_gcpointers_in_var_part(
                        typeid)
                    itemlength = self.varsize_item_sizes(typeid)
                    offset = self.varsize_offset_to_variable_part(typeid)
                    itembaseaddr = newobj_addr + offset
                    i = 0
                    while i < length:
                        item = itembaseaddr + itemlength * i
                        j = 0
                        while j < len(offsets):
                            pointer_addr = item + offsets[j]
                            stack.append(pointer_addr)
                            j += 1
                        i += 1

                oldhdr.next = newhdr
            newobj_addr = llmemory.cast_ptr_to_adr(newhdr) + size_gc_header
            gcptr_addr.address[0] = newobj_addr
        stack.delete()

        # re-create the original linked list
        next = lltype.nullptr(self.HDR)
        while oldobjects.non_empty():
            hdr = llmemory.cast_adr_to_ptr(oldobjects.pop(), self.HDRPTR)
            hdr.typeid &= ~CURPOOL_FLAG   # reset the flag
            hdr.next = next
            next = hdr
        oldobjects.delete()

        # consistency check
        addr = llmemory.cast_ptr_to_adr(oldpool)
        addr -= size_gc_header
        hdr = llmemory.cast_adr_to_ptr(addr, self.HDRPTR)
        assert hdr.next == next

        # build the new pool object collecting the new objects, and
        # reinstall the pool that was current at the beginning of x_clone()
        clonedata.pool = self.x_swap_pool(curpool)
Пример #26
0
]
for name, default in constants_w_defaults:
    setattr(CConfig, name, platform.DefinedConstantInteger(name))

# types
if _MS_WINDOWS:
    socketfd_type = rffi.UINT
else:
    socketfd_type = rffi.INT

CConfig.uint16_t = platform.SimpleType('uint16_t', rffi.USHORT)
CConfig.uint32_t = platform.SimpleType('uint32_t', rffi.UINT)
CConfig.size_t = platform.SimpleType('size_t', rffi.INT)
CConfig.ssize_t = platform.SimpleType('ssize_t', rffi.INT)
CConfig.socklen_t = platform.SimpleType('socklen_t', rffi.INT)
sockaddr_ptr = lltype.Ptr(lltype.ForwardReference())
addrinfo_ptr = lltype.Ptr(lltype.ForwardReference())

# struct types
CConfig.sockaddr = platform.Struct(
    'struct sockaddr', [('sa_family', rffi.INT),
                        ('sa_data', rffi.CFixedArray(rffi.CHAR, 1))])
CConfig.in_addr = platform.Struct('struct in_addr', [('s_addr', rffi.UINT)])
CConfig.in6_addr = platform.Struct('struct in6_addr', [])
CConfig.sockaddr_in = platform.Struct('struct sockaddr_in',
                                      [('sin_family', rffi.INT),
                                       ('sin_port', rffi.USHORT),
                                       ('sin_addr', CConfig.in_addr)])

CConfig.sockaddr_in6 = platform.Struct('struct sockaddr_in6',
                                       [('sin6_family', rffi.INT),
Пример #27
0
 def expand(s_TYPE, *args_s):
     assert isinstance(s_TYPE, annmodel.SomePBC)
     assert s_TYPE.is_constant()
     return lltype.Ptr(s_TYPE.const)
Пример #28
0
class BoehmGCTransformer(GCTransformer):
    malloc_zero_filled = True
    FINALIZER_PTR = lltype.Ptr(lltype.FuncType([llmemory.Address],
                                               lltype.Void))

    def __init__(self, translator, inline=False):
        super(BoehmGCTransformer, self).__init__(translator, inline=inline)
        self.finalizer_funcptrs = {}

        atomic_mh = mallocHelpers()
        atomic_mh.allocate = lambda size: llop.boehm_malloc_atomic(
            llmemory.Address, size)
        ll_malloc_fixedsize_atomic = atomic_mh._ll_malloc_fixedsize

        mh = mallocHelpers()
        mh.allocate = lambda size: llop.boehm_malloc(llmemory.Address, size)
        c_realloc = rffi.llexternal('GC_REALLOC', [rffi.VOIDP, rffi.INT],
                                    rffi.VOIDP)

        def _realloc(ptr, size):
            return llmemory.cast_ptr_to_adr(
                c_realloc(rffi.cast(rffi.VOIDP, ptr), size))

        mh.realloc = _realloc
        ll_malloc_fixedsize = mh._ll_malloc_fixedsize

        # XXX, do we need/want an atomic version of this function?
        ll_malloc_varsize_no_length = mh.ll_malloc_varsize_no_length
        ll_malloc_varsize = mh.ll_malloc_varsize

        ll_realloc = mh.ll_realloc

        if self.translator:
            self.malloc_fixedsize_ptr = self.inittime_helper(
                ll_malloc_fixedsize, [lltype.Signed], llmemory.Address)
            self.malloc_fixedsize_atomic_ptr = self.inittime_helper(
                ll_malloc_fixedsize_atomic, [lltype.Signed], llmemory.Address)
            self.malloc_varsize_no_length_ptr = self.inittime_helper(
                ll_malloc_varsize_no_length, [lltype.Signed] * 3,
                llmemory.Address,
                inline=False)
            self.malloc_varsize_ptr = self.inittime_helper(ll_malloc_varsize,
                                                           [lltype.Signed] * 4,
                                                           llmemory.Address,
                                                           inline=False)
            self.weakref_create_ptr = self.inittime_helper(ll_weakref_create,
                                                           [llmemory.Address],
                                                           llmemory.WeakRefPtr,
                                                           inline=False)
            self.weakref_deref_ptr = self.inittime_helper(
                ll_weakref_deref, [llmemory.WeakRefPtr], llmemory.Address)
            self.realloc_ptr = self.inittime_helper(
                ll_realloc, [llmemory.Address] + [lltype.Signed] * 4,
                llmemory.Address)
            self.mixlevelannotator.finish()  # for now
            self.mixlevelannotator.backend_optimize()

    def push_alive_nopyobj(self, var, llops):
        pass

    def pop_alive_nopyobj(self, var, llops):
        pass

    def _can_realloc(self):
        return True

    def perform_realloc(self, hop, v_ptr, v_newlgt, c_const_size, c_item_size,
                        c_lengthofs, c_grow):
        args = [
            self.realloc_ptr, v_ptr, v_newlgt, c_const_size, c_item_size,
            c_lengthofs
        ]
        return hop.genop('direct_call', args, resulttype=llmemory.Address)

    def gct_fv_gc_malloc(self, hop, flags, TYPE, c_size):
        # XXX same behavior for zero=True: in theory that's wrong
        if TYPE._is_atomic():
            funcptr = self.malloc_fixedsize_atomic_ptr
        else:
            funcptr = self.malloc_fixedsize_ptr
        v_raw = hop.genop("direct_call", [funcptr, c_size],
                          resulttype=llmemory.Address)
        finalizer_ptr = self.finalizer_funcptr_for_type(TYPE)
        if finalizer_ptr:
            c_finalizer_ptr = Constant(finalizer_ptr, self.FINALIZER_PTR)
            hop.genop("boehm_register_finalizer", [v_raw, c_finalizer_ptr])
        return v_raw

    def gct_fv_gc_malloc_varsize(self, hop, flags, TYPE, v_length,
                                 c_const_size, c_item_size,
                                 c_offset_to_length):
        # XXX same behavior for zero=True: in theory that's wrong
        if c_offset_to_length is None:
            v_raw = hop.genop("direct_call", [
                self.malloc_varsize_no_length_ptr, v_length, c_const_size,
                c_item_size
            ],
                              resulttype=llmemory.Address)
        else:
            v_raw = hop.genop("direct_call", [
                self.malloc_varsize_ptr, v_length, c_const_size, c_item_size,
                c_offset_to_length
            ],
                              resulttype=llmemory.Address)
        return v_raw

    def finalizer_funcptr_for_type(self, TYPE):
        if TYPE in self.finalizer_funcptrs:
            return self.finalizer_funcptrs[TYPE]

        rtti = get_rtti(TYPE)
        if rtti is not None and hasattr(rtti._obj, 'destructor_funcptr'):
            destrptr = rtti._obj.destructor_funcptr
            DESTR_ARG = lltype.typeOf(destrptr).TO.ARGS[0]
        else:
            destrptr = None
            DESTR_ARG = None

        if type_contains_pyobjs(TYPE):
            if destrptr:
                raise Exception("can't mix PyObjects and __del__ with Boehm")

            static_body = '\n'.join(
                _static_deallocator_body_for_type('v', TYPE))
            d = {
                'pop_alive': LLTransformerOp(self.pop_alive),
                'PTR_TYPE': lltype.Ptr(TYPE),
                'cast_adr_to_ptr': llmemory.cast_adr_to_ptr
            }
            src = ("def ll_finalizer(addr):\n"
                   "    v = cast_adr_to_ptr(addr, PTR_TYPE)\n"
                   "%s\n") % (static_body, )
            exec src in d
            fptr = self.annotate_finalizer(d['ll_finalizer'],
                                           [llmemory.Address], lltype.Void)
        elif destrptr:
            EXC_INSTANCE_TYPE = self.translator.rtyper.exceptiondata.lltype_of_exception_value

            def ll_finalizer(addr):
                exc_instance = llop.gc_fetch_exception(EXC_INSTANCE_TYPE)
                v = llmemory.cast_adr_to_ptr(addr, DESTR_ARG)
                ll_call_destructor(destrptr, v)
                llop.gc_restore_exception(lltype.Void, exc_instance)

            fptr = self.annotate_finalizer(ll_finalizer, [llmemory.Address],
                                           lltype.Void)
        else:
            fptr = lltype.nullptr(self.FINALIZER_PTR.TO)

        self.finalizer_funcptrs[TYPE] = fptr
        return fptr

    def gct_weakref_create(self, hop):
        v_instance, = hop.spaceop.args
        v_addr = hop.genop("cast_ptr_to_adr", [v_instance],
                           resulttype=llmemory.Address)
        v_wref = hop.genop("direct_call", [self.weakref_create_ptr, v_addr],
                           resulttype=llmemory.WeakRefPtr)
        hop.cast_result(v_wref)

    def gct_weakref_deref(self, hop):
        v_wref, = hop.spaceop.args
        v_addr = hop.genop("direct_call", [self.weakref_deref_ptr, v_wref],
                           resulttype=llmemory.Address)
        hop.cast_result(v_addr)

    def gct_gc_id(self, hop):
        # this is the logic from the HIDE_POINTER macro in <gc/gc.h>
        v_int = hop.genop('cast_ptr_to_int', [hop.spaceop.args[0]],
                          resulttype=lltype.Signed)
        hop.genop('int_invert', [v_int], resultvar=hop.spaceop.result)
Пример #29
0
def test_get_array_descr():
    U = lltype.Struct('U')
    T = lltype.GcStruct('T')
    A1 = lltype.GcArray(lltype.Char)
    A2 = lltype.GcArray(lltype.Ptr(T))
    A3 = lltype.GcArray(lltype.Ptr(U))
    A4 = lltype.GcArray(lltype.Float)
    A5 = lltype.GcArray(
        lltype.Struct('x', ('v', lltype.Signed), ('k', lltype.Signed)))
    A6 = lltype.GcArray(lltype.SingleFloat)
    #
    c0 = GcCache(False)
    descr1 = get_array_descr(c0, A1)
    descr2 = get_array_descr(c0, A2)
    descr3 = get_array_descr(c0, A3)
    descr4 = get_array_descr(c0, A4)
    descr5 = get_array_descr(c0, A5)
    descr6 = get_array_descr(c0, A6)
    assert isinstance(descr1, ArrayDescr)
    assert descr1 == get_array_descr(c0, lltype.GcArray(lltype.Char))
    assert descr1.flag == FLAG_UNSIGNED
    assert descr2.flag == FLAG_POINTER
    assert descr3.flag == FLAG_UNSIGNED
    assert descr4.flag == FLAG_FLOAT
    assert descr5.flag == FLAG_STRUCT
    assert descr6.flag == FLAG_UNSIGNED

    #
    def get_alignment(code):
        # Retrieve default alignment for the compiler/platform
        return struct.calcsize('l' + code) - struct.calcsize(code)

    assert descr1.basesize == get_alignment('c')
    assert descr2.basesize == get_alignment('p')
    assert descr3.basesize == get_alignment('p')
    assert descr4.basesize == get_alignment('d')
    assert descr5.basesize == get_alignment('f')
    assert descr1.lendescr.offset == 0
    assert descr2.lendescr.offset == 0
    assert descr3.lendescr.offset == 0
    assert descr4.lendescr.offset == 0
    assert descr5.lendescr.offset == 0
    assert descr1.itemsize == rffi.sizeof(lltype.Char)
    assert descr2.itemsize == rffi.sizeof(lltype.Ptr(T))
    assert descr3.itemsize == rffi.sizeof(lltype.Ptr(U))
    assert descr4.itemsize == rffi.sizeof(lltype.Float)
    assert descr5.itemsize == rffi.sizeof(lltype.Signed) * 2
    assert descr6.itemsize == rffi.sizeof(lltype.SingleFloat)
    #
    CA = rffi.CArray(lltype.Signed)
    descr = get_array_descr(c0, CA)
    assert descr.flag == FLAG_SIGNED
    assert descr.basesize == 0
    assert descr.lendescr is None
    CA = rffi.CArray(lltype.Ptr(lltype.GcStruct('S')))
    descr = get_array_descr(c0, CA)
    assert descr.flag == FLAG_POINTER
    assert descr.basesize == 0
    assert descr.lendescr is None
    CA = rffi.CArray(lltype.Ptr(lltype.Struct('S')))
    descr = get_array_descr(c0, CA)
    assert descr.flag == FLAG_UNSIGNED
    assert descr.basesize == 0
    assert descr.lendescr is None
    CA = rffi.CArray(lltype.Float)
    descr = get_array_descr(c0, CA)
    assert descr.flag == FLAG_FLOAT
    assert descr.basesize == 0
    assert descr.lendescr is None
    CA = rffi.CArray(rffi.FLOAT)
    descr = get_array_descr(c0, CA)
    assert descr.flag == FLAG_UNSIGNED
    assert descr.basesize == 0
    assert descr.itemsize == rffi.sizeof(lltype.SingleFloat)
    assert descr.lendescr is None
Пример #30
0
        include_dirs=include_dirs,
        includes=['Python.h', 'stdarg.h'],
        compile_extra=['-DPy_BUILD_CORE'],
    )


class CConfig2:
    _compilation_info_ = CConfig._compilation_info_


class CConfig_constants:
    _compilation_info_ = CConfig._compilation_info_


VA_LIST_P = rffi.VOIDP  # rffi.COpaquePtr('va_list')
CONST_STRING = lltype.Ptr(lltype.Array(lltype.Char, hints={'nolength': True}),
                          use_cache=False)
CONST_WSTRING = lltype.Ptr(lltype.Array(lltype.UniChar,
                                        hints={'nolength': True}),
                           use_cache=False)
assert CONST_STRING is not rffi.CCHARP
assert CONST_STRING == rffi.CCHARP
assert CONST_WSTRING is not rffi.CWCHARP
assert CONST_WSTRING == rffi.CWCHARP

# FILE* interface
FILEP = rffi.COpaquePtr('FILE')
fopen = rffi.llexternal('fopen', [CONST_STRING, CONST_STRING], FILEP)
fclose = rffi.llexternal('fclose', [FILEP], rffi.INT)
fwrite = rffi.llexternal('fwrite',
                         [rffi.VOIDP, rffi.SIZE_T, rffi.SIZE_T, FILEP],
                         rffi.SIZE_T)