def __new__(meta, type_name, bases, members): if members.get('base'): cls = super().__new__(meta, type_name, bases, members) return cls m = members['members'] cls_ffi = members['ffi'] = FFI() cls_ffi.cdef(build_udt_def(type_name, m)) t = ffi.new('GrB_Type*') _check(lib.GrB_Type_new(t, cls_ffi.sizeof(type_name))) cffi_support.map_type(cls_ffi.typeof(type_name), use_record_dtype=True) members['gb_type'] = t[0] cls = super().__new__(meta, type_name, bases, members) cls.member_def = list(map(methodcaller('split'), m)) cls.C = type_name cls.ptr = type_name + '*' get = partial(getattr, lib) cls.Matrix_setElement = lib.GrB_Matrix_setElement_UDT cls.Matrix_extractElement = lib.GrB_Matrix_extractElement_UDT cls.Matrix_extractTuples = lib.GrB_Matrix_extractTuples_UDT cls.Matrix_assignScalar = lib.GrB_Matrix_assign_UDT cls.Vector_setElement = lib.GrB_Vector_setElement_UDT cls.Vector_extractElement = lib.GrB_Vector_extractElement_UDT cls.Vector_extractTuples = lib.GrB_Vector_extractTuples_UDT cls.Vector_assignScalar = lib.GrB_Vector_assign_UDT cls.Scalar_setElement = lib.GxB_Scalar_setElement_UDT cls.Scalar_extractElement = lib.GxB_Scalar_extractElement_UDT cls.identity = cls.from_value(cls.identity) for op_name in ['eq_op', 'add_op', 'mult_op']: if not hasattr(cls, op_name): setattr(cls, op_name, cls.ffi.NULL) return cls
def test_unsupport_bitsize(self): ffi = self.get_ffi() with self.assertRaises(ValueError) as raises: cffi_support.map_type( ffi.typeof('error'), use_record_dtype=True, ) # When bitsize is provided, bitshift defaults to 0. self.assertEqual( "field 'bits' has bitshift, this is not supported", str(raises.exception) )
def __new__(meta, type_name, bases, attrs): if attrs.get('base', False): cls = super().__new__(meta, type_name, bases, attrs) return cls if 'members' in attrs: m = attrs['members'] cls_ffi = FFI() cls_ffi.cdef(build_udt_def(type_name, m)) t = core_ffi.new('GrB_Type*') _check(lib.GrB_Type_new(t, cls_ffi.sizeof(type_name))) cffi_support.map_type(cls_ffi.typeof(type_name), use_record_dtype=True) attrs['ffi'] = cls_ffi attrs['gb_type'] = t[0] attrs['C'] = type_name attrs['member_def'] = list(map(methodcaller('split'), m)) attrs['base_name'] = 'UDT' else: attrs['ffi'] = core_ffi gb_type_name = type_name cls = super().__new__(meta, type_name, bases, attrs) meta._gb_type_map[cls.gb_type] = cls cls.ptr = cls.C + '*' cls.zero = getattr(cls, 'zero', core_ffi.NULL) cls.one = getattr(cls, 'one', core_ffi.NULL) get = partial(getattr, lib) cls.base_name = base_name = getattr(cls, 'base_name', cls.__name__) cls.Monoid_new = get('GrB_Monoid_new_{}'.format(base_name)) cls.Matrix_setElement = get( 'GrB_Matrix_setElement_{}'.format(base_name)) cls.Matrix_extractElement = get( 'GrB_Matrix_extractElement_{}'.format(base_name)) cls.Matrix_extractTuples = get( 'GrB_Matrix_extractTuples_{}'.format(base_name)) cls.Matrix_assignScalar = get('GrB_Matrix_assign_{}'.format(base_name)) cls.Vector_setElement = get( 'GrB_Vector_setElement_{}'.format(base_name)) cls.Vector_extractElement = get( 'GrB_Vector_extractElement_{}'.format(base_name)) cls.Vector_extractTuples = get( 'GrB_Vector_extractTuples_{}'.format(base_name)) cls.Vector_assignScalar = get('GrB_Vector_assign_{}'.format(base_name)) cls.Scalar_setElement = get( 'GxB_Scalar_setElement_{}'.format(base_name)) cls.Scalar_extractElement = get( 'GxB_Scalar_extractElement_{}'.format(base_name)) return cls
def test_cfunc_callback(self): ffi = self.get_ffi() big_struct = ffi.typeof('big_struct') nb_big_struct = cffi_support.map_type(big_struct, use_record_dtype=True) sig = cffi_support.map_type(ffi.typeof('myfunc'), use_record_dtype=True) @njit def calc(base): tmp = 0 for i in range(base.size): elem = base[i] tmp += elem.i1 * elem.f2 / elem.d3 tmp += base[i].af4.sum() return tmp @cfunc(sig) def foo(ptr, n): base = carray(ptr, n) return calc(base) # Make data mydata = ffi.new('big_struct[3]') ptr = ffi.cast('big_struct*', mydata) for i in range(3): ptr[i].i1 = i * 123 ptr[i].f2 = i * 213 ptr[i].d3 = (1 + i) * 213 for j in range(9): ptr[i].af4[j] = i * 10 + j # Address of my data addr = int(ffi.cast('size_t', ptr)) got = foo.ctypes(addr, 3) # Make numpy array from the cffi buffer array = np.ndarray( buffer=ffi.buffer(mydata), dtype=numpy_support.as_dtype(nb_big_struct), shape=3, ) expect = calc(array) self.assertEqual(got, expect)
def test_sin_function(self, flags=enable_pyobj_flags): signature = cffi_support.map_type(ffi.typeof(cffi_sin)) self.assertEqual(len(signature.args), 1) self.assertEqual(signature.args[0], types.double) pyfunc = use_cffi_sin cres = compile_isolated(pyfunc, [types.double], flags=flags) cfunc = cres.entry_point for x in [-1.2, -1, 0, 0.1]: self.assertPreciseEqual(pyfunc(x), cfunc(x))
def test_cffi_sin_function(self, flags=enable_pyobj_flags): signature = cffi_support.map_type(ffi.typeof(c_sin)) self.assertEqual(len(signature.args), 1) self.assertEqual(signature.args[0], types.double) pyfunc = use_cffi_sin cres = compile_isolated(pyfunc, [types.double], flags=flags) cfunc = cres.entry_point for x in [-1.2, -1, 0, 0.1]: self.assertEqual(pyfunc(x), cfunc(x))
def test_type_parsing(self): ffi = self.get_ffi() # Check struct typedef big_struct = ffi.typeof('big_struct') nbtype = cffi_support.map_type(big_struct, use_record_dtype=True) self.assertIsInstance(nbtype, types.Record) self.assertEqual(len(nbtype), 4) self.assertEqual(nbtype.typeof('i1'), types.int32) self.assertEqual(nbtype.typeof('f2'), types.float32) self.assertEqual(nbtype.typeof('d3'), types.float64) self.assertEqual( nbtype.typeof('af4'), types.NestedArray(dtype=types.float32, shape=(9,)), ) # Check function typedef myfunc = ffi.typeof('myfunc') sig = cffi_support.map_type(myfunc, use_record_dtype=True) self.assertIsInstance(sig, typing.Signature) self.assertEqual(sig.args[0], types.CPointer(nbtype)) self.assertEqual(sig.args[1], types.uintp) self.assertEqual(sig.return_type, types.float64)
def __set_name__(self, cls, name): func_name = self.func.__name__ cls_name = cls.__name__ cls.ffi.cdef(build_binop_def(cls_name, func_name, boolean)) sig = cffi_support.map_type(cls.ffi.typeof( binop_name(cls_name, func_name)), use_record_dtype=True) jitfunc = jit(self.func, nopython=True) @cfunc(sig) def wrapper(z_, x_, y_): z = carray(z_, 1)[0] x = carray(x_, 1)[0] y = carray(y_, 1)[0] jitfunc(z, x, y) self.op = BinaryOp(func_name, cls_name, wrapper, cls, boolean) setattr(cls, func_name, self.op)
def test_type_map(self): signature = cffi_support.map_type(mod.ffi.typeof(mod.cffi_sin)) self.assertEqual(len(signature.args), 1) self.assertEqual(signature.args[0], types.double)
typedef struct my_struct { int i1; float f2; double d3; float af4[7]; // arrays are supported } my_struct; /* Define a callback function */ typedef double (*my_func)(my_struct*, size_t); """ ffi = FFI() ffi.cdef(src) # Get the function signature from *my_func* sig = cffi_support.map_type(ffi.typeof('my_func'), use_record_dtype=True) # Make the cfunc from numba import cfunc, carray @cfunc(sig) def foo(ptr, n): base = carray(ptr, n) # view pointer as an array of my_struct tmp = 0 for i in range(n): tmp += base[i].i1 * base[i].f2 / base[i].d3 tmp += base[i].af4.sum() # nested arrays are like normal numpy array return tmp