def __init__(self, translator, inline=False): super(BoehmGCTransformer, self).__init__(translator, inline=inline) self.finalizer_funcptrs = {} atomic_mh = mallocHelpers() atomic_mh.allocate = lambda size: llop.boehm_malloc_atomic(llmemory.Address, size) ll_malloc_fixedsize_atomic = atomic_mh._ll_malloc_fixedsize mh = mallocHelpers() mh.allocate = lambda size: llop.boehm_malloc(llmemory.Address, size) ll_malloc_fixedsize = mh._ll_malloc_fixedsize # XXX, do we need/want an atomic version of this function? ll_malloc_varsize_no_length = mh.ll_malloc_varsize_no_length ll_malloc_varsize = mh.ll_malloc_varsize if self.translator: self.malloc_fixedsize_ptr = self.inittime_helper( ll_malloc_fixedsize, [lltype.Signed], llmemory.Address) self.malloc_fixedsize_atomic_ptr = self.inittime_helper( ll_malloc_fixedsize_atomic, [lltype.Signed], llmemory.Address) self.malloc_varsize_no_length_ptr = self.inittime_helper( ll_malloc_varsize_no_length, [lltype.Signed]*3, llmemory.Address, inline=False) self.malloc_varsize_ptr = self.inittime_helper( ll_malloc_varsize, [lltype.Signed]*4, llmemory.Address, inline=False) self.weakref_create_ptr = self.inittime_helper( ll_weakref_create, [llmemory.Address], llmemory.WeakRefPtr, inline=False) self.weakref_deref_ptr = self.inittime_helper( ll_weakref_deref, [llmemory.WeakRefPtr], llmemory.Address) self.mixlevelannotator.finish() # for now self.mixlevelannotator.backend_optimize()
def ll_malloc_fixedsize(size, finalizer): result = llop.boehm_malloc(llmemory.Address, size) if not result: raise memoryError if finalizer: # XXX runtime check here is bad? llop.boehm_register_finalizer(lltype.Void, result, finalizer) return result
def ll_malloc_varsize_no_length(length, size, itemsize): try: varsize = ovfcheck(itemsize * length) tot_size = ovfcheck(size + varsize) except OverflowError: raise memoryError result = llop.boehm_malloc(llmemory.Address, tot_size) if not result: raise memoryError return result
def __init__(self, translator, inline=False): super(BoehmGCTransformer, self).__init__(translator, inline=inline) self.finalizer_funcptrs = {} atomic_mh = mallocHelpers() atomic_mh.allocate = lambda size: llop.boehm_malloc_atomic(llmemory.Address, size) ll_malloc_fixedsize_atomic = atomic_mh._ll_malloc_fixedsize mh = mallocHelpers() mh.allocate = lambda size: llop.boehm_malloc(llmemory.Address, size) c_realloc = rffi.llexternal('GC_REALLOC', [rffi.VOIDP, rffi.INT], rffi.VOIDP, sandboxsafe=True) def _realloc(ptr, size): return llmemory.cast_ptr_to_adr(c_realloc(rffi.cast(rffi.VOIDP, ptr), size)) mh.realloc = _realloc ll_malloc_fixedsize = mh._ll_malloc_fixedsize # XXX, do we need/want an atomic version of this function? ll_malloc_varsize_no_length = mh.ll_malloc_varsize_no_length ll_malloc_varsize = mh.ll_malloc_varsize ll_realloc = mh.ll_realloc HDRPTR = lltype.Ptr(self.HDR) def ll_identityhash(addr): obj = llmemory.cast_adr_to_ptr(addr, HDRPTR) h = obj.hash if h == 0: obj.hash = h = ~llmemory.cast_adr_to_int(addr) return h if self.translator: self.malloc_fixedsize_ptr = self.inittime_helper( ll_malloc_fixedsize, [lltype.Signed], llmemory.Address) self.malloc_fixedsize_atomic_ptr = self.inittime_helper( ll_malloc_fixedsize_atomic, [lltype.Signed], llmemory.Address) self.malloc_varsize_no_length_ptr = self.inittime_helper( ll_malloc_varsize_no_length, [lltype.Signed]*3, llmemory.Address, inline=False) self.malloc_varsize_ptr = self.inittime_helper( ll_malloc_varsize, [lltype.Signed]*4, llmemory.Address, inline=False) self.weakref_create_ptr = self.inittime_helper( ll_weakref_create, [llmemory.Address], llmemory.WeakRefPtr, inline=False) self.weakref_deref_ptr = self.inittime_helper( ll_weakref_deref, [llmemory.WeakRefPtr], llmemory.Address) self.realloc_ptr = self.inittime_helper( ll_realloc, [llmemory.Address] + [lltype.Signed] * 4, llmemory.Address) self.identityhash_ptr = self.inittime_helper( ll_identityhash, [llmemory.Address], lltype.Signed, inline=False) self.mixlevelannotator.finish() # for now self.mixlevelannotator.backend_optimize()
def __init__(self, translator, inline=False): super(BoehmGCTransformer, self).__init__(translator, inline=inline) self.finalizer_funcptrs = {} atomic_mh = mallocHelpers() atomic_mh.allocate = lambda size: llop.boehm_malloc_atomic( llmemory.Address, size) ll_malloc_fixedsize_atomic = atomic_mh._ll_malloc_fixedsize mh = mallocHelpers() mh.allocate = lambda size: llop.boehm_malloc(llmemory.Address, size) ll_malloc_fixedsize = mh._ll_malloc_fixedsize # XXX, do we need/want an atomic version of this function? ll_malloc_varsize_no_length = mh.ll_malloc_varsize_no_length ll_malloc_varsize = mh.ll_malloc_varsize HDRPTR = lltype.Ptr(self.HDR) def ll_identityhash(addr): obj = llmemory.cast_adr_to_ptr(addr, HDRPTR) h = obj.hash if h == 0: obj.hash = h = ~llmemory.cast_adr_to_int(addr) return h if self.translator: self.malloc_fixedsize_ptr = self.inittime_helper( ll_malloc_fixedsize, [lltype.Signed], llmemory.Address) self.malloc_fixedsize_atomic_ptr = self.inittime_helper( ll_malloc_fixedsize_atomic, [lltype.Signed], llmemory.Address) self.malloc_varsize_no_length_ptr = self.inittime_helper( ll_malloc_varsize_no_length, [lltype.Signed] * 3, llmemory.Address, inline=False) self.malloc_varsize_ptr = self.inittime_helper(ll_malloc_varsize, [lltype.Signed] * 4, llmemory.Address, inline=False) self.weakref_create_ptr = self.inittime_helper(ll_weakref_create, [llmemory.Address], llmemory.WeakRefPtr, inline=False) self.weakref_deref_ptr = self.inittime_helper( ll_weakref_deref, [llmemory.WeakRefPtr], llmemory.Address) self.identityhash_ptr = self.inittime_helper(ll_identityhash, [llmemory.Address], lltype.Signed, inline=False) self.mixlevelannotator.finish() # for now self.mixlevelannotator.backend_optimize()
def __init__(self, translator, inline=False): super(BoehmGCTransformer, self).__init__(translator, inline=inline) self.finalizer_funcptrs = {} atomic_mh = mallocHelpers() atomic_mh.allocate = lambda size: llop.boehm_malloc_atomic( llmemory.Address, size) ll_malloc_fixedsize_atomic = atomic_mh._ll_malloc_fixedsize mh = mallocHelpers() mh.allocate = lambda size: llop.boehm_malloc(llmemory.Address, size) c_realloc = rffi.llexternal('GC_REALLOC', [rffi.VOIDP, rffi.INT], rffi.VOIDP) def _realloc(ptr, size): return llmemory.cast_ptr_to_adr( c_realloc(rffi.cast(rffi.VOIDP, ptr), size)) mh.realloc = _realloc ll_malloc_fixedsize = mh._ll_malloc_fixedsize # XXX, do we need/want an atomic version of this function? ll_malloc_varsize_no_length = mh.ll_malloc_varsize_no_length ll_malloc_varsize = mh.ll_malloc_varsize ll_realloc = mh.ll_realloc if self.translator: self.malloc_fixedsize_ptr = self.inittime_helper( ll_malloc_fixedsize, [lltype.Signed], llmemory.Address) self.malloc_fixedsize_atomic_ptr = self.inittime_helper( ll_malloc_fixedsize_atomic, [lltype.Signed], llmemory.Address) self.malloc_varsize_no_length_ptr = self.inittime_helper( ll_malloc_varsize_no_length, [lltype.Signed] * 3, llmemory.Address, inline=False) self.malloc_varsize_ptr = self.inittime_helper(ll_malloc_varsize, [lltype.Signed] * 4, llmemory.Address, inline=False) self.weakref_create_ptr = self.inittime_helper(ll_weakref_create, [llmemory.Address], llmemory.WeakRefPtr, inline=False) self.weakref_deref_ptr = self.inittime_helper( ll_weakref_deref, [llmemory.WeakRefPtr], llmemory.Address) self.realloc_ptr = self.inittime_helper( ll_realloc, [llmemory.Address] + [lltype.Signed] * 4, llmemory.Address) self.mixlevelannotator.finish() # for now self.mixlevelannotator.backend_optimize()