Esempio n. 1
0
def ll_weakref_create(targetaddr):
    link = llop.boehm_malloc_atomic(llmemory.Address, sizeof_weakreflink)
    if not link:
        raise MemoryError
    plink = llmemory.cast_adr_to_ptr(link, lltype.Ptr(WEAKLINK))
    plink[0] = targetaddr
    llop.boehm_disappearing_link(lltype.Void, link, targetaddr)
    return llmemory.cast_ptr_to_weakrefptr(plink)
Esempio n. 2
0
def ll_weakref_create(targetaddr):
    link = llop.boehm_malloc_atomic(llmemory.Address, sizeof_weakreflink)
    if not link:
        raise MemoryError
    plink = llmemory.cast_adr_to_ptr(link, lltype.Ptr(WEAKLINK))
    plink[0] = targetaddr
    llop.boehm_disappearing_link(lltype.Void, link, targetaddr)
    return llmemory.cast_ptr_to_weakrefptr(plink)
Esempio n. 3
0
 def ll_weakref_create(target_gcref):
     if revdb:
         plink = llop.revdb_weakref_create(lltype.Ptr(WEAKLINK),
                                           target_gcref)
     else:
         link = llop.boehm_malloc_atomic(llmemory.Address,
                                         sizeof_weakreflink)
         if not link:
             raise MemoryError
         plink = llmemory.cast_adr_to_ptr(link, lltype.Ptr(WEAKLINK))
         plink.addr = llmemory.cast_ptr_to_adr(target_gcref)
         llop.boehm_disappearing_link(lltype.Void, link, target_gcref)
     return llmemory.cast_ptr_to_weakrefptr(plink)
Esempio n. 4
0
    def __init__(self, translator, inline=False):
        super(BoehmGCTransformer, self).__init__(translator, inline=inline)
        self.finalizer_funcptrs = {}

        atomic_mh = mallocHelpers()
        atomic_mh.allocate = lambda size: llop.boehm_malloc_atomic(
            llmemory.Address, size)
        ll_malloc_fixedsize_atomic = atomic_mh._ll_malloc_fixedsize

        mh = mallocHelpers()
        mh.allocate = lambda size: llop.boehm_malloc(llmemory.Address, size)
        ll_malloc_fixedsize = mh._ll_malloc_fixedsize

        # XXX, do we need/want an atomic version of this function?
        ll_malloc_varsize_no_length = mh.ll_malloc_varsize_no_length
        ll_malloc_varsize = mh.ll_malloc_varsize

        HDRPTR = lltype.Ptr(self.HDR)

        def ll_identityhash(addr):
            obj = llmemory.cast_adr_to_ptr(addr, HDRPTR)
            h = obj.hash
            if h == 0:
                obj.hash = h = ~llmemory.cast_adr_to_int(addr)
            return h

        if self.translator:
            self.malloc_fixedsize_ptr = self.inittime_helper(
                ll_malloc_fixedsize, [lltype.Signed], llmemory.Address)
            self.malloc_fixedsize_atomic_ptr = self.inittime_helper(
                ll_malloc_fixedsize_atomic, [lltype.Signed], llmemory.Address)
            self.malloc_varsize_no_length_ptr = self.inittime_helper(
                ll_malloc_varsize_no_length, [lltype.Signed] * 3,
                llmemory.Address,
                inline=False)
            self.malloc_varsize_ptr = self.inittime_helper(ll_malloc_varsize,
                                                           [lltype.Signed] * 4,
                                                           llmemory.Address,
                                                           inline=False)
            self.weakref_create_ptr = self.inittime_helper(ll_weakref_create,
                                                           [llmemory.Address],
                                                           llmemory.WeakRefPtr,
                                                           inline=False)
            self.weakref_deref_ptr = self.inittime_helper(
                ll_weakref_deref, [llmemory.WeakRefPtr], llmemory.Address)
            self.identityhash_ptr = self.inittime_helper(ll_identityhash,
                                                         [llmemory.Address],
                                                         lltype.Signed,
                                                         inline=False)
            self.mixlevelannotator.finish()  # for now
            self.mixlevelannotator.backend_optimize()
Esempio n. 5
0
    def __init__(self, translator, inline=False):
        super(BoehmGCTransformer, self).__init__(translator, inline=inline)
        self.finalizer_funcptrs = {}

        atomic_mh = mallocHelpers()
        atomic_mh.allocate = lambda size: llop.boehm_malloc_atomic(llmemory.Address, size)
        ll_malloc_fixedsize_atomic = atomic_mh._ll_malloc_fixedsize

        mh = mallocHelpers()
        mh.allocate = lambda size: llop.boehm_malloc(llmemory.Address, size)
        ll_malloc_fixedsize = mh._ll_malloc_fixedsize

        # XXX, do we need/want an atomic version of this function?
        ll_malloc_varsize_no_length = mh.ll_malloc_varsize_no_length
        ll_malloc_varsize = mh.ll_malloc_varsize

        HDRPTR = lltype.Ptr(self.HDR)

        def ll_identityhash(addr):
            obj = llmemory.cast_adr_to_ptr(addr, HDRPTR)
            h = obj.hash
            if h == 0:
                obj.hash = h = ~llmemory.cast_adr_to_int(addr)
            return h

        if self.translator:
            self.malloc_fixedsize_ptr = self.inittime_helper(
                ll_malloc_fixedsize, [lltype.Signed], llmemory.Address)
            self.malloc_fixedsize_atomic_ptr = self.inittime_helper(
                ll_malloc_fixedsize_atomic, [lltype.Signed], llmemory.Address)
            self.malloc_varsize_no_length_ptr = self.inittime_helper(
                ll_malloc_varsize_no_length, [lltype.Signed]*3, llmemory.Address, inline=False)
            self.malloc_varsize_ptr = self.inittime_helper(
                ll_malloc_varsize, [lltype.Signed]*4, llmemory.Address, inline=False)
            if self.translator.config.translation.rweakref:
                self.weakref_create_ptr = self.inittime_helper(
                    ll_weakref_create, [llmemory.Address], llmemory.WeakRefPtr,
                    inline=False)
                self.weakref_deref_ptr = self.inittime_helper(
                    ll_weakref_deref, [llmemory.WeakRefPtr], llmemory.Address)
            self.identityhash_ptr = self.inittime_helper(
                ll_identityhash, [llmemory.Address], lltype.Signed,
                inline=False)
            self.mixlevelannotator.finish()   # for now
            self.mixlevelannotator.backend_optimize()

        self.finalizer_triggers = []
        self.finalizer_queue_indexes = {}    # {fq: index}
Esempio n. 6
0
    def __init__(self, translator, inline=False):
        super(BoehmGCTransformer, self).__init__(translator, inline=inline)
        self.finalizer_funcptrs = {}

        atomic_mh = mallocHelpers(gckind='gc')
        atomic_mh.allocate = lambda size: llop.boehm_malloc_atomic(
            llmemory.GCREF, size)
        ll_malloc_fixedsize_atomic = atomic_mh._ll_malloc_fixedsize

        mh = mallocHelpers(gckind='gc')
        mh.allocate = lambda size: llop.boehm_malloc(llmemory.GCREF, size)
        ll_malloc_fixedsize = mh._ll_malloc_fixedsize

        # XXX, do we need/want an atomic version of this function?
        ll_malloc_varsize_no_length = mh.ll_malloc_varsize_no_length
        ll_malloc_varsize = mh.ll_malloc_varsize

        fields = []
        if translator and translator.config.translation.reverse_debugger:
            fields.append(("hash", lltype.Signed))
            fields.append(("uid", lltype.SignedLongLong))
        hints = {'hints': {'gcheader': True}}
        self.HDR = lltype.Struct("header", *fields, **hints)

        if self.translator:
            self.malloc_fixedsize_ptr = self.inittime_helper(
                ll_malloc_fixedsize, [lltype.Signed], llmemory.GCREF)
            self.malloc_fixedsize_atomic_ptr = self.inittime_helper(
                ll_malloc_fixedsize_atomic, [lltype.Signed], llmemory.GCREF)
            self.malloc_varsize_no_length_ptr = self.inittime_helper(
                ll_malloc_varsize_no_length, [lltype.Signed] * 3,
                llmemory.GCREF,
                inline=False)
            self.malloc_varsize_ptr = self.inittime_helper(ll_malloc_varsize,
                                                           [lltype.Signed] * 4,
                                                           llmemory.GCREF,
                                                           inline=False)
            if self.translator.config.translation.rweakref:
                (ll_weakref_create, ll_weakref_deref, self.WEAKLINK,
                 self.convert_weakref_to) = build_weakref(
                     self.translator.config)
                self.weakref_create_ptr = self.inittime_helper(
                    ll_weakref_create, [llmemory.GCREF],
                    llmemory.WeakRefPtr,
                    inline=False)
                self.weakref_deref_ptr = self.inittime_helper(
                    ll_weakref_deref, [llmemory.WeakRefPtr], llmemory.GCREF)

            if not translator.config.translation.reverse_debugger:

                def ll_identityhash(addr):
                    h = ~llmemory.cast_adr_to_int(addr)
                    return h

                self.identityhash_ptr = self.inittime_helper(
                    ll_identityhash, [llmemory.Address],
                    lltype.Signed,
                    inline=False)
                self.NO_HEADER = True

            self.mixlevelannotator.finish()  # for now
            self.mixlevelannotator.backend_optimize()

        self.finalizer_triggers = []
        self.finalizer_queue_indexes = {}  # {fq: index}