def test_verify_struct(): ffi = FFI() ffi.cdef("""struct foo_s { int b; short a; ...; }; struct bar_s { struct foo_s *f; };""") lib = verify(ffi, 'test_verify_struct', """struct foo_s { short a; int b; }; struct bar_s { struct foo_s *f; };""") ffi.typeof("struct bar_s *") p = ffi.new("struct foo_s *", {'a': -32768, 'b': -2147483648}) assert p.a == -32768 assert p.b == -2147483648 py.test.raises(OverflowError, "p.a -= 1") py.test.raises(OverflowError, "p.b -= 1") q = ffi.new("struct bar_s *", {'f': p}) assert q.f == p # assert ffi.offsetof("struct foo_s", "a") == 0 assert ffi.offsetof("struct foo_s", "b") == 4 assert ffi.offsetof(u+"struct foo_s", u+"b") == 4 # py.test.raises(TypeError, ffi.addressof, p) assert ffi.addressof(p[0]) == p assert ffi.typeof(ffi.addressof(p[0])) is ffi.typeof("struct foo_s *") assert ffi.typeof(ffi.addressof(p, "b")) is ffi.typeof("int *") assert ffi.addressof(p, "b")[0] == p.b
def test_vararg(self): if not sys.platform.startswith('linux'): py.test.skip("probably no symbol 'stderr' in the lib") ffi = FFI(backend=self.Backend()) ffi.cdef(""" int fprintf(void *, const char *format, ...); void *stderr; """) ffi.C = ffi.dlopen(None) with FdWriteCapture() as fd: ffi.C.fprintf(ffi.C.stderr, b"hello with no arguments\n") ffi.C.fprintf(ffi.C.stderr, b"hello, %s!\n", ffi.new("char[]", b"world")) ffi.C.fprintf(ffi.C.stderr, ffi.new("char[]", b"hello, %s!\n"), ffi.new("char[]", b"world2")) ffi.C.fprintf(ffi.C.stderr, b"hello int %d long %ld long long %lld\n", ffi.cast("int", 42), ffi.cast("long", 84), ffi.cast("long long", 168)) ffi.C.fprintf(ffi.C.stderr, b"hello %p\n", ffi.NULL) res = fd.getvalue() assert res == (b"hello with no arguments\n" b"hello, world!\n" b"hello, world2!\n" b"hello int 42 long 84 long long 168\n" b"hello (nil)\n")
def test_explicit_cdecl_stdcall(self): if sys.platform != 'win32': py.test.skip("Windows-only test") if self.Backend is CTypesBackend: py.test.skip("not with the ctypes backend") win64 = (sys.maxsize > 2**32) # ffi = FFI(backend=self.Backend()) ffi.cdef(""" BOOL QueryPerformanceFrequency(LONGLONG *lpFrequency); """) m = ffi.dlopen("Kernel32.dll") tp = ffi.typeof(m.QueryPerformanceFrequency) assert str(tp) == "<ctype 'int(*)(long long *)'>" # ffi = FFI(backend=self.Backend()) ffi.cdef(""" BOOL __cdecl QueryPerformanceFrequency(LONGLONG *lpFrequency); """) m = ffi.dlopen("Kernel32.dll") tpc = ffi.typeof(m.QueryPerformanceFrequency) assert tpc is tp # ffi = FFI(backend=self.Backend()) ffi.cdef(""" BOOL WINAPI QueryPerformanceFrequency(LONGLONG *lpFrequency); """) m = ffi.dlopen("Kernel32.dll") tps = ffi.typeof(m.QueryPerformanceFrequency) if win64: assert tps is tpc else: assert tps is not tpc assert str(tps) == "<ctype 'int(__stdcall *)(long long *)'>" # ffi = FFI(backend=self.Backend()) ffi.cdef("typedef int (__cdecl *fnc_t)(int);") ffi.cdef("typedef int (__stdcall *fns_t)(int);") tpc = ffi.typeof("fnc_t") tps = ffi.typeof("fns_t") assert str(tpc) == "<ctype 'int(*)(int)'>" if win64: assert tps is tpc else: assert str(tps) == "<ctype 'int(__stdcall *)(int)'>" # fnc = ffi.cast("fnc_t", 0) fns = ffi.cast("fns_t", 0) ffi.new("fnc_t[]", [fnc]) if not win64: py.test.raises(TypeError, ffi.new, "fnc_t[]", [fns]) py.test.raises(TypeError, ffi.new, "fns_t[]", [fnc]) ffi.new("fns_t[]", [fns])
def test_simple(): ffi = FFI() # Cannot load header directly # cffi does not parse directives yet header = '''int cpp_test(int argc, char **argv); ''' ffi.cdef(header) lib = ffi.dlopen('libcpptest.so') args = sys.argv argv_keepalive = [ffi.new("char[]", arg) for arg in args] argv = ffi.new("char *[]", argv_keepalive) lib.cpp_test(len(args), argv)
def test_memmove(self): ffi = FFI() p = ffi.new("short[]", [-1234, -2345, -3456, -4567, -5678]) ffi.memmove(p, p + 1, 4) assert list(p) == [-2345, -3456, -3456, -4567, -5678] p[2] = 999 ffi.memmove(p + 2, p, 6) assert list(p) == [-2345, -3456, -2345, -3456, 999] ffi.memmove(p + 4, ffi.new("char[]", b"\x71\x72"), 2) if sys.byteorder == 'little': assert list(p) == [-2345, -3456, -2345, -3456, 0x7271] else: assert list(p) == [-2345, -3456, -2345, -3456, 0x7172]
def test_char16_t(self): ffi = FFI() x = ffi.new("char16_t[]", 5) assert len(x) == 5 and ffi.sizeof(x) == 10 x[2] = u+'\u1324' assert x[2] == u+'\u1324' y = ffi.new("char16_t[]", u+'\u1234\u5678') assert len(y) == 3 assert list(y) == [u+'\u1234', u+'\u5678', u+'\x00'] assert ffi.string(y) == u+'\u1234\u5678' z = ffi.new("char16_t[]", u+'\U00012345') assert len(z) == 3 assert list(z) == [u+'\ud808', u+'\udf45', u+'\x00'] assert ffi.string(z) == u+'\U00012345'
def test_memmove_buffer(self): import array ffi = FFI() a = array.array('H', [10000, 20000, 30000]) p = ffi.new("short[]", 5) ffi.memmove(p, a, 6) assert list(p) == [10000, 20000, 30000, 0, 0] ffi.memmove(p + 1, a, 6) assert list(p) == [10000, 10000, 20000, 30000, 0] b = array.array('h', [-1000, -2000, -3000]) ffi.memmove(b, a, 4) assert b.tolist() == [10000, 20000, -3000] assert a.tolist() == [10000, 20000, 30000] p[0] = 999 p[1] = 998 p[2] = 997 p[3] = 996 p[4] = 995 ffi.memmove(b, p, 2) assert b.tolist() == [999, 20000, -3000] ffi.memmove(b, p + 2, 4) assert b.tolist() == [997, 996, -3000] p[2] = -p[2] p[3] = -p[3] ffi.memmove(b, p + 2, 6) assert b.tolist() == [-997, -996, 995]
def test_verify_anonymous_struct_with_star_typedef(): ffi = FFI() ffi.cdef("typedef struct { int a; long b; } *foo_t;") verify(ffi, 'test_verify_anonymous_struct_with_star_typedef', "typedef struct { int a; long b; } *foo_t;") p = ffi.new("foo_t", {'b': 42}) assert p.b == 42
def test_verify_anonymous_struct_with_typedef(): ffi = FFI() ffi.cdef("typedef struct { int a; long b; ...; } foo_t;") verify(ffi, 'test_verify_anonymous_struct_with_typedef', "typedef struct { long b; int hidden, a; } foo_t;") p = ffi.new("foo_t *", {'b': 42}) assert p.b == 42 assert repr(p).startswith("<cdata 'foo_t *' ")
def test_open_array_in_struct(): ffi = FFI() ffi.cdef("struct foo_s { int b; int a[]; };") verify(ffi, 'test_open_array_in_struct', "struct foo_s { int b; int a[]; };") assert ffi.sizeof("struct foo_s") == 4 p = ffi.new("struct foo_s *", [5, [10, 20, 30]]) assert p.a[2] == 30
def test_passing_array(self): ffi = FFI(backend=self.Backend()) ffi.cdef(""" int strlen(char[]); """) ffi.C = ffi.dlopen(None) p = ffi.new("char[]", b"hello") res = ffi.C.strlen(p) assert res == 5
def test_strchr(self): ffi = FFI(backend=self.Backend()) ffi.cdef(""" char *strchr(const char *s, int c); """) ffi.C = ffi.dlopen(None) p = ffi.new("char[]", b"hello world!") q = ffi.C.strchr(p, ord('w')) assert ffi.string(q) == b"world!"
def test_char32_t(self): ffi = FFI() x = ffi.new("char32_t[]", 5) assert len(x) == 5 and ffi.sizeof(x) == 20 x[3] = u+'\U00013245' assert x[3] == u+'\U00013245' y = ffi.new("char32_t[]", u+'\u1234\u5678') assert len(y) == 3 assert list(y) == [u+'\u1234', u+'\u5678', u+'\x00'] py_uni = u+'\U00012345' z = ffi.new("char32_t[]", py_uni) assert len(z) == 2 assert list(z) == [py_uni, u+'\x00'] # maybe a 2-unichars string assert ffi.string(z) == py_uni if len(py_uni) == 1: # 4-bytes unicodes in Python s = ffi.new("char32_t[]", u+'\ud808\udf00') assert len(s) == 3 assert list(s) == [u+'\ud808', u+'\udf00', u+'\x00']
def test_struct_array_no_length(self): ffi = FFI() ffi.cdef("struct foo_s { int x; int a[]; };") p = ffi.new("struct foo_s *", [100, [200, 300, 400]]) assert p.x == 100 assert ffi.typeof(p.a) is ffi.typeof("int *") # no length available assert p.a[0] == 200 assert p.a[1] == 300 assert p.a[2] == 400
def test_dotdotdot_length_of_array_field(): ffi = FFI() ffi.cdef("struct foo_s { int a[...]; int b[...]; };") verify(ffi, 'test_dotdotdot_length_of_array_field', "struct foo_s { int a[42]; int b[11]; };") assert ffi.sizeof("struct foo_s") == (42 + 11) * 4 p = ffi.new("struct foo_s *") assert p.a[41] == p.b[10] == 0 py.test.raises(IndexError, "p.a[42]") py.test.raises(IndexError, "p.b[11]")
def test_verify(): ffi = FFI() header = open("foo.h").read() ffi.cdef(header) source = open("foo.c").read() lib_dir = os.path.abspath(".") lib = ffi.verify(source, include_dirs=[lib_dir]) varsp = ffi.new("foo_s*") print varsp[0].a, varsp[0].b lib.test(varsp) print varsp[0].a, varsp[0].b
def test_struct_array_guess_length_3(): ffi = FFI() ffi.cdef("struct foo_s { int a[][...]; };") lib = verify(ffi, 'test_struct_array_guess_length_3', "struct foo_s { int x; int a[5][7]; int y; };") assert ffi.sizeof('struct foo_s') == 37 * ffi.sizeof('int') s = ffi.new("struct foo_s *") assert ffi.typeof(s.a) == ffi.typeof("int(*)[7]") assert s.a[4][6] == 0 py.test.raises(IndexError, 's.a[4][7]') assert ffi.typeof(s.a[0]) == ffi.typeof("int[7]")
def test_memmove_readonly_readwrite(self): ffi = FFI() p = ffi.new("signed char[]", 5) ffi.memmove(p, b"abcde", 3) assert list(p) == [ord("a"), ord("b"), ord("c"), 0, 0] ffi.memmove(p, bytearray(b"ABCDE"), 2) assert list(p) == [ord("A"), ord("B"), ord("c"), 0, 0] py.test.raises((TypeError, BufferError), ffi.memmove, b"abcde", p, 3) ba = bytearray(b"xxxxx") ffi.memmove(dest=ba, src=p, n=3) assert ba == bytearray(b"ABcxx")
def test_cannot_instantiate_manually(self): ffi = FFI() ct = type(ffi.typeof("void *")) py.test.raises(TypeError, ct) py.test.raises(TypeError, ct, ffi.NULL) for cd in [type(ffi.cast("void *", 0)), type(ffi.new("char[]", 3)), type(ffi.gc(ffi.NULL, lambda x: None))]: py.test.raises(TypeError, cd) py.test.raises(TypeError, cd, ffi.NULL) py.test.raises(TypeError, cd, ffi.typeof("void *"))
def test_incomplete_struct_as_arg(): ffi = FFI() ffi.cdef("struct foo_s { int x; ...; }; int f(int, struct foo_s);") lib = verify(ffi, "test_incomplete_struct_as_arg", "struct foo_s { int a, x, z; };\n" "int f(int b, struct foo_s s) { return s.x * b; }") s = ffi.new("struct foo_s *", [21]) assert s.x == 21 assert ffi.sizeof(s[0]) == 12 assert ffi.offsetof(ffi.typeof(s), 'x') == 4 assert lib.f(2, s[0]) == 42 assert ffi.typeof(lib.f) == ffi.typeof("int(*)(int, struct foo_s)")
def test_struct_by_value(self): if self.module is None: py.test.skip("fix the auto-generation of the tiny test lib") ffi = FFI(backend=self.Backend()) ffi.cdef(""" typedef struct { long x; long y; } POINT; typedef struct { long left; long top; long right; long bottom; } RECT; long left, top, right, bottom; RECT ReturnRect(int i, RECT ar, RECT* br, POINT cp, RECT dr, RECT *er, POINT fp, RECT gr); """) ownlib = ffi.dlopen(self.module) rect = ffi.new('RECT[1]') pt = ffi.new('POINT[1]') pt[0].x = 15 pt[0].y = 25 rect[0].left = ownlib.left rect[0].right = ownlib.right rect[0].top = ownlib.top rect[0].bottom = ownlib.bottom for i in range(4): ret = ownlib.ReturnRect(i, rect[0], rect, pt[0], rect[0], rect, pt[0], rect[0]) assert ret.left == ownlib.left assert ret.right == ownlib.right assert ret.top == ownlib.top assert ret.bottom == ownlib.bottom
class RangeLib: def __init__(self, config_file): self.ffi = FFI() self.ffi.cdef(""" typedef void easy_lr ; // avoid exposing the struct internals, fake it as void easy_lr* range_easy_create(const char* config_file); const char ** range_easy_expand(easy_lr* elr, const char * c_range); const char * range_easy_eval(easy_lr* elr, const char * c_range); char * range_easy_compress(easy_lr* elr, const char ** c_nodes); int range_easy_destroy(easy_lr* elr); """) self.rangelib_ffi = self.ffi.dlopen("libcrange.so") self.elr = self.rangelib_ffi.range_easy_create(self.ffi.new("char[]", config_file)) def __charpp_to_native(self, arg): i = 0 arr = [] while arg[i] != self.ffi.NULL: arr.append(self.ffi.string(arg[i])) i = i + 1 return arr def expand(self, c_range): ret = self.rangelib_ffi.range_easy_expand(self.elr, self.ffi.new("char[]", c_range)) return self.__charpp_to_native(ret) def compress(self, nodes): char_arg = map(lambda x: self.ffi.new("char[]", x), nodes) char_arg.append(self.ffi.NULL) ret = self.rangelib_ffi.range_easy_compress(self.elr, self.ffi.new("char*[]", char_arg)) return self.ffi.string(ret) def eval(self, c_range): ret = self.rangelib_ffi.range_easy_eval(self.elr, self.ffi.new("char[]", c_range)) return self.ffi.string(ret) def __del__(self): self.rangelib_ffi.range_easy_destroy(self.elr)
def main_verbose(): """ Pass a numpy array to a C function and get a numpy array back out More Verbose, but valid """ ffi = FFI() ffi.cdef("void copy(float *in, float *out, int len);") C = ffi.dlopen("libcopy.so") float_in = ffi.new("float[16]") float_out = ffi.new("float[16]") arr_in = 42 * np.ones(16, dtype=np.float32) float_in[0:16] = arr_in[0:16] C.copy(float_in, float_out, 16) arr_out = np.frombuffer(ffi.buffer(float_out, 16*4), dtype = np.float32) print(arr_out) return
def test_windows_stdcall(self): if sys.platform != 'win32': py.test.skip("Windows-only test") if self.Backend is CTypesBackend: py.test.skip("not with the ctypes backend") ffi = FFI(backend=self.Backend()) ffi.cdef(""" BOOL QueryPerformanceFrequency(LONGLONG *lpFrequency); """) m = ffi.dlopen("Kernel32.dll") p_freq = ffi.new("LONGLONG *") res = m.QueryPerformanceFrequency(p_freq) assert res != 0 assert p_freq[0] != 0
def test_function_with_struct_argument(self): if sys.platform == 'win32': py.test.skip("no 'inet_ntoa'") if (self.Backend is CTypesBackend and '__pypy__' in sys.builtin_module_names): py.test.skip("ctypes limitation on pypy") ffi = FFI(backend=self.Backend()) ffi.cdef(""" struct in_addr { unsigned int s_addr; }; char *inet_ntoa(struct in_addr in); """) ffi.C = ffi.dlopen(None) ina = ffi.new("struct in_addr *", [0x04040404]) a = ffi.C.inet_ntoa(ina[0]) assert ffi.string(a) == b'4.4.4.4'
def test_misdeclared_field_1(): ffi = FFI() ffi.cdef("struct foo_s { int a[5]; };") try: verify(ffi, 'test_misdeclared_field_1', "struct foo_s { int a[6]; };") except VerificationError: pass # ok, fail during compilation already (e.g. C++) else: assert ffi.sizeof("struct foo_s") == 24 # found by the actual C code p = ffi.new("struct foo_s *") # lazily build the fields and boom: e = py.test.raises(ffi.error, "p.a") assert str(e.value).startswith("struct foo_s: wrong size for field 'a' " "(cdef says 20, but C compiler says 24)")
def test_address_of_function_with_struct(): ffi = FFI() ffi.cdef("struct foo_s { int x; }; long myfunc(struct foo_s);") lib = verify(ffi, "test_addressof_function_with_struct", """ struct foo_s { int x; }; char myfunc(struct foo_s input) { return (char)(input.x + 42); } """) s = ffi.new("struct foo_s *", [5])[0] assert lib.myfunc(s) == 47 assert not isinstance(lib.myfunc, ffi.CData) assert ffi.typeof(lib.myfunc) == ffi.typeof("long(*)(struct foo_s)") addr = ffi.addressof(lib, 'myfunc') assert addr(s) == 47 assert isinstance(addr, ffi.CData) assert ffi.typeof(addr) == ffi.typeof("long(*)(struct foo_s)")
def test_macro_var_callback(): ffi = FFI() ffi.cdef("int my_value; int *(*get_my_value)(void);") lib = verify(ffi, 'test_macro_var_callback', "int *(*get_my_value)(void);\n" "#define my_value (*get_my_value())") # values = ffi.new("int[50]") def it(): for i in range(50): yield i it = it() # @ffi.callback("int *(*)(void)") def get_my_value(): for nextvalue in it: return values + nextvalue lib.get_my_value = get_my_value # values[0] = 41 assert lib.my_value == 41 # [0] p = ffi.addressof(lib, 'my_value') # [1] assert p == values + 1 assert p[-1] == 41 assert p[+1] == 0 lib.my_value = 42 # [2] assert values[2] == 42 assert p[-1] == 41 assert p[+1] == 42 # # if get_my_value raises or returns nonsense, the exception is printed # to stderr like with any callback, but then the C expression 'my_value' # expand to '*NULL'. We assume here that '&my_value' will return NULL # without segfaulting, and check for NULL when accessing the variable. @ffi.callback("int *(*)(void)") def get_my_value(): raise LookupError lib.get_my_value = get_my_value py.test.raises(ffi.error, getattr, lib, 'my_value') py.test.raises(ffi.error, setattr, lib, 'my_value', 50) py.test.raises(ffi.error, ffi.addressof, lib, 'my_value') @ffi.callback("int *(*)(void)") def get_my_value(): return "hello" lib.get_my_value = get_my_value py.test.raises(ffi.error, getattr, lib, 'my_value') e = py.test.raises(ffi.error, setattr, lib, 'my_value', 50) assert str(e.value) == "global variable 'my_value' is at address NULL"
def test_include_4(): ffi1 = FFI() ffi1.cdef("typedef struct { int x; } mystruct_t;") verify(ffi1, "test_include_4_parent", "typedef struct { int x; } mystruct_t;") ffi = FFI() ffi.include(ffi1) ffi.cdef("mystruct_t *ff4(mystruct_t *);") lib = verify(ffi, "test_include_4", "typedef struct {int x; } mystruct_t; //usually from a #include\n" "mystruct_t *ff4(mystruct_t *p) { p->x += 42; return p; }") p = ffi.new("mystruct_t *", [10]) q = lib.ff4(p) assert q == p assert p.x == 52 assert ffi1.typeof("mystruct_t") is ffi.typeof("mystruct_t")
def test_struct_array_no_length(self): ffi = FFI() ffi.cdef("struct foo_s { int x; int a[]; };") p = ffi.new("struct foo_s *", [100, [200, 300, 400]]) assert p.x == 100 assert ffi.typeof(p.a) is ffi.typeof("int[]") assert len(p.a) == 3 # length recorded assert p.a[0] == 200 assert p.a[1] == 300 assert p.a[2] == 400 assert list(p.a) == [200, 300, 400] q = ffi.cast("struct foo_s *", p) assert q.x == 100 assert ffi.typeof(q.a) is ffi.typeof("int *") # no length recorded py.test.raises(TypeError, len, q.a) assert q.a[0] == 200 assert q.a[1] == 300 assert q.a[2] == 400 py.test.raises(TypeError, list, q.a)
def symbols(headers): ffi = FFI() ffi.cdef(headers) nanomsg = ffi.dlopen('nanomsg') lines = [] for i in range(1024): val = ffi.new('int*') name = nanomsg.nn_symbol(i, val) if name == ffi.NULL: break name = ffi.string(name) name = name[3:] if name.startswith('NN_') else name lines.append('%s = %s' % (name, val[0])) return '\n'.join(lines) + '\n'
def __init__( s, vcd_file='' ): # initialize FFI, define the exposed interface ffi = FFI() ffi.cdef(''' typedef struct {{ // Exposed port interface {port_decls} // Verilator model void * model; }} V{model_name}_t; V{model_name}_t * create_model( const char * ); void destroy_model( V{model_name}_t *); void eval( V{model_name}_t * ); ''') # set vcd_file attribute, give verilator_vcd_file a slightly # different name so PyMTL .vcd and Verilator .vcd can coexist s.vcd_file = vcd_file verilator_vcd_file = vcd_file if vcd_file: filen, ext = os.path.splitext( vcd_file ) verilator_vcd_file = '{{}}.verilator{{}}{{}}'.format(filen, s.id_, ext) # import the shared library containing the model and construct it s._ffi = ffi.dlopen('./{lib_file}') s._m = s._ffi.create_model( ffi.new("char[]", verilator_vcd_file) ) # dummy class to emulate PortBundles class BundleProxy( PortBundle ): flip = False # define the port interface {port_defs} # increment instance count {model_name}.id_ += 1
def test_include_5(): ffi1 = FFI() ffi1.cdef("typedef struct { int x[2]; int y; } *mystruct_p;") verify(ffi1, "test_include_5_parent", "typedef struct { int x[2]; int y; } *mystruct_p;") ffi = FFI() ffi.include(ffi1) ffi.cdef("mystruct_p ff5(mystruct_p);") lib = verify(ffi, "test_include_5", "typedef struct {int x[2]; int y; } *mystruct_p; //usually #include\n" "mystruct_p ff5(mystruct_p p) { p->x[1] += 42; return p; }") assert ffi.alignof(ffi.typeof("mystruct_p").item) == 4 assert ffi1.typeof("mystruct_p") is ffi.typeof("mystruct_p") p = ffi.new("mystruct_p", [[5, 10], -17]) q = lib.ff5(p) assert q == p assert p.x[0] == 5 assert p.x[1] == 52 assert p.y == -17 assert ffi.alignof(ffi.typeof(p[0])) == 4
def test_modify_struct_value(self): if self.module is None: py.test.skip("fix the auto-generation of the tiny test lib") ffi = FFI(backend=self.Backend()) ffi.cdef(""" typedef struct { long left; long top; long right; long bottom; } RECT; void modify_struct_value(RECT r); """) lib = ffi.dlopen(self.module) s = ffi.new("RECT *", [11, 22, 33, 44]) lib.modify_struct_value(s[0]) assert s.left == 11 assert s.top == 22 assert s.right == 33 assert s.bottom == 44
def as_native_time_series(ffi: FFI, data: TimeSeriesLike) -> OwningCffiNativeHandle: ptr = ffi.new("multi_regular_time_series_data*") tsg = get_native_tsgeom(ffi, data) ptr.time_series_geometry = tsg.obj if isinstance(data, xr.DataArray): ensemble_size = len(data.coords[ENSEMBLE_DIMNAME].values) np_data = data.values elif isinstance(data, pd.Series): ensemble_size = 1 np_data = data.values elif isinstance(data, pd.DataFrame): ensemble_size = data.shape[1] np_data = data.values.transpose() else: raise TypeError("Not recognised as a type of time series: " + str(type(data))) ptr.ensemble_size = ensemble_size num_data = two_d_np_array_double_to_native(ffi, np_data) ptr.numeric_data = num_data.ptr result = OwningCffiNativeHandle(ptr) result.keepalive = [tsg, num_data] return result
from cffi import FFI import subprocess # build with # g++ -dynamiclib -DSTAND_ALONE -o queue.dylib queue.c header = "queue.h" ffi = FFI() ffi.cdef( subprocess.Popen( ['cc', '-E', '-DQUEUE_API=', '-DQUEUE_NO_INCLUDES', header], stdout=subprocess.PIPE).communicate()[0]) lib = ffi.dlopen('queue.dylib') size = 13 buf = ffi.new("uint32_t[]", size) q = ffi.new("queue_t *") lib.queue_init(q, buf, size) for i in range(45): lib.queue_put(q, i) val = ffi.new("uint32_t *") vals = [] has_elems = True while has_elems: if lib.queue_get(q, val): vals.append(val[0]) else: has_elems = False
class BlackHoleSolver(object): """ The main solver class. """ BLACK_HOLE_SOLVER__SUCCESS = 0 BLACK_HOLE_SOLVER__END = 9 BLACK_HOLE_SOLVER__OUT_OF_ITERS = 10 # TEST:$num_befs_weights=5; BHS__BLACK_HOLE__BITS_PER_COL = 2 BHS__BLACK_HOLE__MAX_NUM_CARDS_IN_COL = 3 BHS__BLACK_HOLE__NUM_COLUMNS = 17 def __init__(self, ffi=None, lib=None): self.user = None if ffi: self.ffi = ffi self.lib = lib else: self.ffi = FFI() self.lib = self.ffi.dlopen("libblack_hole_solver." + ("dll" if ( platform.system() == 'Windows') else "so.1")) if not self.lib: self.ffi = None raise ImportError("Could not find shared library") self.ffi.cdef(''' typedef struct { unsigned long nothing; } black_hole_solver_instance_t; int black_hole_solver_create( black_hole_solver_instance_t **ret_instance); int black_hole_solver_read_board( black_hole_solver_instance_t *instance, const char *board_string, int *error_line_number, unsigned int num_columns, unsigned int max_num_cards_in_col, unsigned int bits_per_column); int black_hole_solver_set_max_iters_limit( black_hole_solver_instance_t *instance, unsigned long limit); int black_hole_solver_enable_place_queens_on_kings( black_hole_solver_instance_t *instance, bool enabled_status); int black_hole_solver_enable_wrap_ranks( black_hole_solver_instance_t *instance, bool enabled_status); int black_hole_solver_enable_rank_reachability_prune( black_hole_solver_instance_t *instance, bool enabled_status); #define BLACK_HOLE_SOLVER__API__REQUIRES_SETUP_CALL 1 int black_hole_solver_config_setup( black_hole_solver_instance_t *instance); int black_hole_solver_setup( black_hole_solver_instance_t *instance); int black_hole_solver_run( black_hole_solver_instance_t *instance); int black_hole_solver_recycle( black_hole_solver_instance_t *instance); int black_hole_solver_free( black_hole_solver_instance_t *instance); void black_hole_solver_init_solution_moves( black_hole_solver_instance_t *instance); int black_hole_solver_get_next_move( black_hole_solver_instance_t *instance, int *col_idx_ptr, int *card_rank_ptr, int *card_suit_ptr /* Will return H=0, C=1, D=2, S=3 */ ); unsigned long black_hole_solver_get_num_states_in_collection( black_hole_solver_instance_t *instance); unsigned long black_hole_solver_get_iterations_num( black_hole_solver_instance_t *instance); int black_hole_solver_get_current_solution_board( black_hole_solver_instance_t *instance, char *output); const char *black_hole_solver_get_lib_version(void); ''') self._user_container = self.ffi.new('black_hole_solver_instance_t * *') self._error_on_line = self.ffi.new('int *') self._col_idx_ptr = self.ffi.new('int *') self._card_rank_ptr = self.ffi.new('int *') self._card_suit_ptr = self.ffi.new('int *') assert 0 == self.lib.black_hole_solver_create(self._user_container) self.user = self._user_container[0] self.lib.black_hole_solver_enable_rank_reachability_prune(self.user, 1) def new_bhs_user_handle(self): return self.__class__(ffi=self.ffi, lib=self.lib) def ret_code_is_suspend(self, ret_code): """docstring for ret_code_is_suspend""" return ret_code == self.BLACK_HOLE_SOLVER__OUT_OF_ITERS def get_next_move(self): """ Returns the next move or None if they were all retrieved. """ if len(self._moves): return self._moves.pop(0) return None def input_cmd_line(self, cmd_line_args): return {'last_arg': 0, 'cmd_line_args_len': len(cmd_line_args)} def __del__(self): if self.user: self.lib.black_hole_solver_free(self.user) self.user = None self._user_container = None self._error_on_line = None self._col_idx_ptr = None self._card_rank_ptr = None self._card_suit_ptr = None NUM_COLUMNS = {'black_hole': 17, 'all_in_a_row': 13, 'golf': 7} MAX_NUM_CARDS_IN_COL = {'black_hole': 3, 'all_in_a_row': 4, 'golf': 5} BITS_PER_COL = {'black_hole': 2, 'all_in_a_row': 3, 'golf': 3} def read_board(self, board, game_type, place_queens_on_kings, wrap_ranks): """ game_type is either 'golf' , 'black_hole' or 'all_in_a_row' """ self.lib.black_hole_solver_enable_wrap_ranks(self.user, wrap_ranks) self.lib.black_hole_solver_enable_place_queens_on_kings( self.user, place_queens_on_kings) self.lib.black_hole_solver_config_setup(self.user) ret = self.lib.black_hole_solver_read_board( self.user, bytes(board, 'UTF-8'), self._error_on_line, self.NUM_COLUMNS[game_type], self.MAX_NUM_CARDS_IN_COL[game_type], self.BITS_PER_COL[game_type], ) assert ret == 0 assert 0 == self.lib.black_hole_solver_setup(self.user) return ret def resume_solution(self): ret = self.lib.black_hole_solver_run(self.user) if ret == self.BLACK_HOLE_SOLVER__SUCCESS: self.lib.black_hole_solver_init_solution_moves(self.user) def wrap(): return self.lib.black_hole_solver_get_next_move( self.user, self._col_idx_ptr, self._card_rank_ptr, self._card_suit_ptr, ) _moves = [] ret_code = wrap() while ret_code == self.BLACK_HOLE_SOLVER__SUCCESS: _moves.append( BlackHoleSolverMove(column_idx=self._col_idx_ptr[0])) ret_code = wrap() assert ret_code == self.BLACK_HOLE_SOLVER__END self._moves = _moves return ret def limit_iterations(self, max_iters): self.lib.black_hole_solver_set_max_iters_limit(self.user, max_iters) def get_num_times(self): return self.lib.black_hole_solver_get_iterations_num(self.user) def get_num_states_in_collection(self): return self.lib.black_hole_solver_get_num_states_in_collection( self.user) def recycle(self): return self.lib.black_hole_solver_recycle(self.user)
def test_unpack(self): ffi = FFI() p = ffi.new("char[]", b"abc\x00def") assert ffi.unpack(p + 1, 7) == b"bc\x00def\x00" p = ffi.new("int[]", [-123456789]) assert ffi.unpack(p, 1) == [-123456789]
#!/usr/bin/env python import os import sys from cffi import FFI ffi = FFI() ffi.cdef(''' void count_records(char *); ''') if sys.platform == 'darwin': prefix = 'lib' ext = 'dylib' elif sys.platform == 'win32': prefix = '' ext = 'dll' else: prefix = 'lib' ext = 'so' dylib_file = 'rust/cffi/target/release/{}cffi_rust_csv_parser.{}'.format( prefix, ext) path = os.path.dirname(os.path.abspath(__file__)) CAPI = ffi.dlopen(os.path.join(path, dylib_file)) c_filename = ffi.new('char[]', sys.argv[1]) CAPI.count_records(c_filename)
ffi = FFI() libtyro = ffi.dlopen('../../target/debug/libtyro.so') print('Loaded lib {0}'.format(libtyro)) # Describe the data type and function prototype to cffi. ffi.cdef(''' typedef void** Tyro; void hello(int a); Tyro new_tyro(void); int add_100(Tyro, int); void drop_tyro(Tyro); ''') # Create an array of DataPoint structs and initialize it. tyro = ffi.new('Tyro', libtyro.new_tyro()) print('Calling add_100 via cffi') # Interesting variation: passing invalid arguments to add_data will trigger # a cffi type-checking exception. # dout = lib.add_data(dps, 4) libtyro.hello(5) addend = libtyro.add_100(tyro[0], 55) libtyro.hello(addend) libtyro.drop_tyro(tyro[0])
from cffi import FFI ffi = FFI() ## I got a Py string, want to process in C, and get back ## a Py string. Roundtrip over C. This will incur 2 copies. N = 1000 s_in = "\x00" * N buf = ffi.new("char[]", s_in) print len(buf) ## => Do something in C with buf #s_out = ffi.buffer(buf)[:-1] # ffi will append a \0 octet, eat it! s_out = ffi.buffer(buf, len(buf) - 1)[:] # ffi will append a \0 octet, eat it! buf = None # GC of underlying mem assert (len(s_in), len(s_out))
class _PcapFfi(object): """ This class represents the low-level interface to the libpcap library. It encapsulates all the cffi calls and C/Python conversions, as well as translation of errors and error codes to PcapExceptions. It is intended to be used as a singleton class through the PcapDumper and PcapLiveDevice classes, below. """ _instance = None __slots__ = ['_ffi', '_libpcap', '_interfaces', '_windows'] def __init__(self): """ Assumption: this class is instantiated once in the main thread before any other threads have a chance to try instantiating it. """ if _PcapFfi._instance: raise Exception("Can't initialize this class more than once!") _PcapFfi._instance = self self._windows = False self._ffi = FFI() self._ffi.cdef(cc, override=True) self._ffi.cdef(cc_packed, override=True, packed=1) if sys.platform == 'darwin': libname = 'libpcap.dylib' elif "win" in sys.platform[:3]: libname = 'wpcap.dll' # winpcap self._windows = True else: # if not macOS (darwin) or windows, assume we're on # some unix-based system and try for libpcap.so libname = 'libpcap.so' try: self._libpcap = self._ffi.dlopen(libname) except Exception as e: raise PcapException("Error opening libpcap: {}".format(e)) self._interfaces = [] self.discoverdevs() @staticmethod def instance(): if not _PcapFfi._instance: _PcapFfi._instance = _PcapFfi() return _PcapFfi._instance @property def version(self): return self._ffi.string(self._libpcap.pcap_lib_version()) def discoverdevs(self): """ Find all the pcap-eligible devices on the local system. """ if len(self._interfaces): raise PcapException("Device discovery should only be done once.") ppintf = self._ffi.new("pcap_if_t * *") errbuf = self._ffi.new("char []", 128) rv = self._libpcap.pcap_findalldevs(ppintf, errbuf) if rv: raise PcapException("pcap_findalldevs returned failure: {}".format( self._ffi.string(errbuf))) pintf = ppintf[0] tmp = pintf pindex = 0 while tmp != self._ffi.NULL: xname = self._ffi.string( tmp.name) # "internal name"; still stored as bytes object xname = xname.decode('ascii', 'ignore') if self._windows: ext_name = "port{}".format(pindex) else: ext_name = xname pindex += 1 if tmp.description == self._ffi.NULL: xdesc = ext_name else: xdesc = self._ffi.string(tmp.description) xdesc = xdesc.decode('ascii', 'ignore') # NB: on WinPcap, only loop flag is set isloop = (tmp.flags & 0x1) == 0x1 isup = (tmp.flags & 0x2) == 0x2 isrunning = (tmp.flags & 0x4) == 0x4 xif = PcapInterface(ext_name, xname, xdesc, isloop, isup, isrunning) self._interfaces.append(xif) tmp = tmp.next self._libpcap.pcap_freealldevs(pintf) @property def devices(self): return self._interfaces @property def lib(self): return self._libpcap @property def ffi(self): return self._ffi def _process_packet(self, xdev, header, packet, nroots): # MPLS header mpls = self._ffi.new("union mpls *") # IP header iph = self._ffi.new("struct nfstream_iphdr *") # IPv6 header iph6 = self._ffi.new("struct nfstream_ipv6hdr *") # lengths and offsets eth_offset = 0 radio_len = 0 fc = 0 type = 0 wifi_len = 0 pyld_eth_len = 0 check = 0 ip_offset = 0 ip_len = 0 frag_off = 0 vlan_id = 0 proto = 0 time = 0 time = (header.tv_sec * TICK_RESOLUTION) + (header.tv_usec / (1000000 / TICK_RESOLUTION)) datalink_type = self._libpcap.pcap_datalink(xdev) datalink_check = True while datalink_check: datalink_check = False if Dlt(datalink_type) == Dlt.DLT_NULL: tmp_dlt_null = self._ffi.cast('struct pp_32 *', packet + eth_offset) if int(ntohs(tmp_dlt_null.value)) == 2: type = 0x0800 else: type = 0x86dd ip_offset = 4 + eth_offset elif Dlt( datalink_type ) == Dlt.DLT_PPP_SERIAL: # Cisco PPP in HDLC - like framing - 50 chdlc = self._ffi.cast('struct nfstream_chdlc *', packet + eth_offset) ip_offset = self._ffi.sizeof( 'struct nfstream_chdlc') # CHDLC_OFF = 4 type = ntohs(chdlc.proto_code) elif (Dlt(datalink_type) == Dlt.DLT_C_HDLC) or ( Dlt(datalink_type) == Dlt.DLT_PPP): # Cisco PPP - 9 or 104 chdlc = self._ffi.cast('struct nfstream_chdlc *', packet + eth_offset) # CHDLC_OFF = 4 ip_offset = self._ffi.sizeof( 'struct nfstream_chdlc') # CHDLC_OFF = 4 type = ntohs(chdlc.proto_code) elif Dlt(datalink_type ) == Dlt.DLT_EN10MB: # IEEE 802.3 Ethernet - 1 */ ethernet = self._ffi.cast('struct nfstream_ethhdr *', packet + eth_offset) ip_offset = self._ffi.sizeof( 'struct nfstream_ethhdr') + eth_offset check = ntohs(ethernet.h_proto) if check <= 1500: pyld_eth_len = check elif check >= 1536: type = check if pyld_eth_len != 0: llc = self._ffi.cast('struct nfstream_llc_header_snap *', packet + ip_offset) if (llc.dsap == 0xaa) or ( llc.ssap == 0xaa): # check for LLC layer with SNAP ext type = llc.snap.proto_ID ip_offset += 8 elif (llc.dsap == 0x42) or (llc.ssap == 0x42): # No SNAP ext return None elif Dlt(datalink_type ) == Dlt.DLT_LINUX_SLL: # Linux Cooked Capture - 113 type = (packet[eth_offset + 14] << 8) + packet[eth_offset + 15] ip_offset = 16 + eth_offset elif Dlt( datalink_type ) == Dlt.DLT_IEEE802_11_RADIO: # Radiotap link - layer - 127 radiotap = self._ffi.cast('struct nfstream_radiotap_header *', packet + eth_offset) radio_len = radiotap.len if (radiotap.flags & 0x50) == 0x50: # Check Bad FCS presence return None # Calculate 802.11 header length(variable) wifi = self._ffi.cast('struct nfstream_wifi_header *', packet + (eth_offset + radio_len)) fc = wifi.fc # Check wifi data presence if fcf_type(fc) == 0x2: if (fcf_to_ds(fc) and fcf_from_ds(fc) == 0x0) or ( fcf_to_ds(fc) == 0x0 and fcf_from_ds(fc)): wifi_len = 26 # + 4 byte fcs else: pass # Check ether_type from LLC llc = self._ffi.cast( 'struct nfstream_llc_header_snap *', packet + (eth_offset + wifi_len + radio_len)) if llc.dsap == 0xaa: type = ntohs(llc.snap.proto_ID) # Set IP header offset ip_offset = wifi_len + radio_len + self._ffi.sizeof( 'struct nfstream_llc_header_snap') + eth_offset elif Dlt(datalink_type) == Dlt.DLT_RAW: ip_offset = 0 eth_offset = 0 else: return None ether_type_check = True while ether_type_check: ether_type_check = False if type == 0x8100: vlan_id = ((packet[ip_offset] << 8) + packet[ip_offset + 1]) & 0xFFF type = (packet[ip_offset + 2] << 8) + packet[ip_offset + 3] ip_offset += 4 while type == 0x8100 and ip_offset < header.caplen: # Double tagging for 802.1Q vlan_id = ((packet[ip_offset] << 8) + packet[ip_offset + 1]) & 0xFFF type = ( packet[ip_offset + 2] << 8) + packet[ip_offset + 3] ip_offset += 4 ether_type_check = True elif (type == 0x8847) or (type == 0x8848): tmp_u32 = self._ffi.cast('struct pp_32 *', packet + ip_offset) mpls.u32 = int(ntohl(tmp_u32.value)) type = 0x0800 ip_offset += 4 while not mpls.mpls.s: tmp_u32_loop = self._ffi.cast('struct pp_32 *', packet + ip_offset) mpls.u32 = int(ntohl(tmp_u32_loop.value)) ip_offset += 4 ether_type_check = True elif type == 0x8864: type = 0x0800 ip_offset += 8 ether_type_check = True else: pass ip_check = True while ip_check: ip_check = False # Check and set IP header size and total packet length iph = self._ffi.cast('struct nfstream_iphdr *', packet + ip_offset) # Just work on Ethernet packets that contain IP if (type == 0x0800) and (header.caplen >= ip_offset): frag_off = ntohs(iph.frag_off) if header.caplen < header.len: pass if iph.version == 4: ip_len = iph.ihl * 4 iph6 = self._ffi.NULL if iph.protocol == 41: # IPPROTO_IPV6 ip_offset += ip_len ip_check = True if (frag_off & 0x1FFF) != 0: return None elif iph.version == 6: iph6 = self._ffi.cast('struct nfstream_ipv6hdr *', packet + ip_offset) ip_len = self._ffi.sizeof('struct nfstream_ipv6hdr') if iph6.ip6_hdr.ip6_un1_nxt == 60: # IPv6 destination option options = self._ffi.cast('uint8_t *', packet + (ip_offset + ip_len)) ip_len += 8 * (options[1] + 1) iph = self._ffi.NULL else: return None l4_offset = 0 ipsize = 0 src_addr = 0 dst_addr = 0 l4_packet_len = 0 version = 0 nfstream_hash = 0 if iph6 == self._ffi.NULL: version = 4 l4_packet_len = ntohs(iph.tot_len) - (iph.ihl * 4) ipsize = header.caplen - ip_offset proto = iph.protocol src_addr = ntohl(iph.saddr) dst_addr = ntohl(iph.daddr) nfstream_hash += iph.saddr + iph.daddr + proto + vlan_id else: version = 6 src_addr = ntohl(iph6.ip6_src.u6_addr.u6_addr32[0]) << 96 | ntohl( iph6.ip6_src.u6_addr.u6_addr32[1]) << 64 | ntohl( iph6.ip6_src.u6_addr.u6_addr32[2]) << 32 | ntohl( iph6.ip6_src.u6_addr.u6_addr32[3]) dst_addr = ntohl(iph6.ip6_dst.u6_addr.u6_addr32[0]) << 96 | ntohl( iph6.ip6_dst.u6_addr.u6_addr32[1]) << 64 | ntohl( iph6.ip6_dst.u6_addr.u6_addr32[2]) << 32 | ntohl( iph6.ip6_dst.u6_addr.u6_addr32[3]) proto = iph6.ip6_hdr.ip6_un1_nxt if proto == 60: options = self._ffi.cast( 'uint8_t *', iph6) + self._ffi.sizeof('struct nfstream_ipv6hdr') proto = options[0] l4_packet_len = ntohs(iph6.ip6_hdr.ip6_un1_plen) nfstream_hash += ( iph6.ip6_src.u6_addr.u6_addr32[2] + iph6.ip6_src.u6_addr.u6_addr32[3]) + ( iph6.ip6_dst.u6_addr.u6_addr32[2] + iph6.ip6_dst.u6_addr.u6_addr32[3]) + proto + vlan_id if version == 4: if ipsize < 20: return None if ((iph.ihl * 4) > ipsize) or (ipsize < ntohs(iph.tot_len)): return None l4_offset = iph.ihl * 4 l3 = self._ffi.cast('uint8_t *', iph) else: l4_offset = self._ffi.sizeof('struct nfstream_ipv6hdr') l3 = self._ffi.cast('uint8_t *', iph6) l4 = self._ffi.cast('uint8_t *', l3) + l4_offset syn, cwr, ece, urg, ack, psh, rst, fin = 0, 0, 0, 0, 0, 0, 0, 0 if (proto == 6 ) and l4_packet_len >= self._ffi.sizeof('struct nfstream_tcphdr'): tcph = self._ffi.cast('struct nfstream_tcphdr *', l4) sport = int(ntohs(tcph.source)) dport = int(ntohs(tcph.dest)) syn = int(tcph.syn) cwr = int(tcph.cwr) ece = int(tcph.ece) urg = int(tcph.urg) ack = int(tcph.ack) psh = int(tcph.psh) rst = int(tcph.rst) fin = int(tcph.fin) elif (proto == 17) and l4_packet_len >= self._ffi.sizeof( 'struct nfstream_udphdr'): udph = self._ffi.cast('struct nfstream_udphdr *', l4) sport = int(ntohs(udph.source)) dport = int(ntohs(udph.dest)) else: sport = 0 dport = 0 nfstream_hash += sport + dport if version == 4: return NFPacket(time=int(time), capture_length=header.caplen, length=header.len, nfhash=nfstream_hash, ip_src=src_addr, ip_dst=dst_addr, src_port=sport, dst_port=dport, protocol=proto, vlan_id=vlan_id, version=version, tcpflags=tcpflags(syn=syn, cwr=cwr, ece=ece, urg=urg, ack=ack, psh=psh, rst=rst, fin=fin), raw=bytes(xffi.buffer(iph, ipsize)), root_idx=nfstream_hash % nroots) else: return NFPacket(time=int(time), capture_length=header.caplen, length=header.len, nfhash=nfstream_hash, ip_src=src_addr, ip_dst=dst_addr, src_port=sport, dst_port=dport, protocol=proto, vlan_id=vlan_id, version=version, tcpflags=tcpflags(syn=syn, cwr=cwr, ece=ece, urg=urg, ack=ack, psh=psh, rst=rst, fin=fin), raw=bytes(xffi.buffer(iph6, header.len - ip_offset)), root_idx=nfstream_hash % nroots) def _recv_packet(self, xdev, nroots=1): phdr = self._ffi.new("struct pcap_pkthdr **") pdata = self._ffi.new("unsigned char **") rv = self._libpcap.pcap_next_ex(xdev, phdr, pdata) if rv == 1: return self._process_packet(xdev, phdr[0], pdata[0], nroots) elif rv == 0: # timeout; nothing to return return 0 elif rv == -1: # error on receive; raise an exception s = self._ffi.string(self._libpcap.pcap_geterr(xdev)) raise PcapException("Error receiving packet: {}".format(s)) elif rv == -2: # reading from savefile, but none left return -2
def new_date_time_to_second(ffi: FFI) -> OwningCffiNativeHandle: ptr = ffi.new("date_time_to_second*") return OwningCffiNativeHandle(ptr)
def datetime_to_dtts(ffi: FFI, dt: datetime) -> OwningCffiNativeHandle: ptr = ffi.new("date_time_to_second*") _copy_datetime_to_dtts(dt, ptr) return OwningCffiNativeHandle(ptr)
def as_charptr(ffi: FFI, x: str, wrap=False) -> CffiData: x = ffi.new("char[]", as_bytes(x)) if wrap: return OwningCffiNativeHandle(x) else: return x
class FED(object): def __init__(self): self.ffi = FFI() # List of the functions in the library self.ffi.cdef(""" int fed_tau_by_steps(int n, float tau_max, int reordering, float **tau); int fed_tau_by_cycle_time(float t, float tau_max, int reordering, float **tau); int fed_tau_by_process_time(float T, int M, float tau_max, int reordering, float **tau); float fed_max_cycle_time_by_steps(int n, float tau_max); float fed_max_process_time_by_steps(int n, int M, float tau_max); int fastjac_relax_params(int n, float omega_max, int reordering, float **omega); """) # Load library self.fedlib = self.ffi.dlopen("fedfjlib//fed//fed.so") def fed_tau_by_steps(self, n, tau_max, reordering): """ Allocates an array of n time steps and fills it with FED time step sizes, such that the maximal stopping time for this cycle is obtained. RETURNS n if everything is ok, or 0 on failure. Input: n : Desired number of internal steps tau_max : Stability limit for explicit (0.5^Dim) reordering : Reordering flag Output: tau : Time step widths (allocated inside) """ tau = self.ffi.new("float **") # Declare the output result = self.fedlib.fed_tau_by_steps( n, tau_max, reordering, tau) # Call the function written in C return list(tau[0][0:result]) def fed_tau_by_cycle_time(self, t, tau_max, reordering): """ Allocates an array of the least number of time steps such that a certain stopping time per cycle can be obtained, and fills it with the respective FED time step sizes. RETURNS number of time steps per cycle, or 0 on failure. Input: t : Desired cycle stopping time tau_max : Stability limit for explicit (0.5^Dim) reordering : Reordering flag Output: tau : Time step widths (allocated inside) """ tau = self.ffi.new("float **") result = self.fedlib.fed_tau_by_cycle_time(t, tau_max, reordering, tau) return list(tau[0][0:result]) def fed_tau_by_process_time(self, T, M, tau_max, reordering): """ Allocates an array of the least number of time steps such that a certain stopping time for the whole process can be obtained, and fills it with the respective FED time step sizes for one cycle. RETURNS number of time steps per cycle, or 0 on failure. Input: T : Desired process stopping time M : Desired number of cycles tau_max : Stability limit for explicit (0.5^Dim) reordering : Reordering flag Output: tau : Time step widths (allocated inside) """ tau = self.ffi.new("float **") result = self.fedlib.fed_tau_by_process_time(T, M, tau_max, reordering, tau) return list(tau[0][0:result]) def fed_max_cycle_time_by_steps(self, n, tau_max): """ Computes the maximal cycle time that can be obtained using a certain number of steps. This corresponds to the cycle time that arises from a tau array which has been created using fed_tau_by_steps. RETURNS cycle time t Input: n : Number of steps per FED cycle tau_max : Stability limit for explicit (0.5^Dim) Output: t : Cycle time t """ return self.fedlib.fed_max_cycle_time_by_steps(n, tau_max) def fed_max_process_time_by_steps(self, n, M, tau_max): """ Computes the maximal process time that can be obtained using a certain number of steps. This corresponds to the cycle time that arises from a tau array which has been created using fed_tau_by_steps. RETURNS cycle time t Input: n : Number of steps per FED cycle M : Number of cycles tau_max : Stability limit for explicit (0.5^Dim) Output: t : Cycle time t """ return self.fedlib.fed_max_process_time_by_steps(n, M, tau_max) def fastjac_relax_params(self, n, omega_max, reordering): """ Allocates an array of n relaxation parameters and fills it with the FED based parameters for Fast-Jacobi. RETURNS n if everything is ok, or 0 on failure. Input: n : Cycle length omega_max : Stability limit for Jacobi over-relax. reordering : Reordering flag Output: omega : Relaxation parameters (allocated inside) """ omega = self.ffi.new("float **") # Declare the output result = self.fedlib.fastjac_relax_params( n, omega_max, reordering, omega) # Call the function written in C return list(omega[0][0:result])
class H264Decoder: def __init_ffi(self): self.ffi = FFI() self.ffi.cdef(''' // AVCODEC enum PixelFormat { PIX_FMT_YUV420P, PIX_FMT_RGB24, ... }; void avcodec_register_all(void); struct AVPacket { ...; uint8_t *data; int size; ...; }; void av_init_packet(struct AVPacket *pkt); enum AVCodecID { AV_CODEC_ID_H264, ... }; struct AVCodec *avcodec_find_decoder(enum AVCodecID id); struct AVCodecContext *avcodec_alloc_context3(struct AVCodec *codec); int avcodec_open2(struct AVCodecContext *avctx, struct AVCodec *codec, struct AVDictionary **options); struct AVFrame { uint8_t *data[8]; int linesize[8]; ...; int key_frame; ...; }; struct AVFrame *avcodec_alloc_frame(void); int avcodec_decode_video2(struct AVCodecContext *avctx, struct AVFrame *picture, int *got_picture_ptr, struct AVPacket *avpkt); int avcodec_close(struct AVCodecContext *avctx); void av_free(void *ptr); int avpicture_get_size(enum PixelFormat pix_fmt, int width, int height); int avpicture_fill(struct AVPicture *picture, uint8_t *ptr, int pix_fmt, int width, int height); // SWSCALE #define SWS_BILINEAR ... #define SWS_FAST_BILINEAR ... struct SwsContext *sws_getContext(int srcW, int srcH, enum PixelFormat srcFormat, int dstW, int dstH, enum PixelFormat dstFormat, int flags, struct SwsFilter *srcFilter, struct SwsFilter *dstFilter, const double *param); int sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[]); void sws_freeContext(struct SwsContext *c); ''') self.ns = self.ffi.verify(source=''' #include <libavcodec/avcodec.h> #include <libswscale/swscale.h> ''', libraries=['avcodec', 'swscale']) def __init_avcodec(self): self.ns.avcodec_register_all() self.av_packet = self.ffi.new('struct AVPacket *') self.ns.av_init_packet(self.av_packet) self.codec = self.ns.avcodec_find_decoder(self.ns.AV_CODEC_ID_H264) if not self.codec: raise Exception('avcodec_alloc_context3') self.context = self.ns.avcodec_alloc_context3(self.codec) if not self.context: raise Exception('avcodec_alloc_context3') if self.ns.avcodec_open2(self.context, self.codec, self.ffi.NULL) < 0: raise Exception('avcodec_open2') self.frame = self.ns.avcodec_alloc_frame() if not self.frame: raise Exception('avcodec_alloc_frame') self.got_frame = self.ffi.new('int *') self.out_frame = self.ns.avcodec_alloc_frame() def __init__(self): self.out_buffer, self.sws_context = None, None self.__init_ffi() self.__init_avcodec() self.update_dimensions() def close(self): self.ns.sws_freeContext(self.sws_context) self.ns.av_free(self.out_frame) self.ns.avcodec_close(self.context) self.ns.av_free(self.context) self.ns.av_free(self.frame) def update_dimensions(self): if self.sws_context is not None: self.ns.sws_freeContext(self.sws_context) self.sws_context = self.ns.sws_getContext( constants.WII_VIDEO_WIDTH, constants.WII_VIDEO_HEIGHT, self.ns.PIX_FMT_YUV420P, constants.WII_VIDEO_WIDTH, constants.WII_VIDEO_HEIGHT, self.ns.PIX_FMT_RGB24, self.ns.SWS_FAST_BILINEAR, self.ffi.NULL, self.ffi.NULL, self.ffi.NULL) bytes_req = self.ns.avpicture_get_size(self.ns.PIX_FMT_RGB24, constants.WII_VIDEO_WIDTH, constants.WII_VIDEO_HEIGHT) self.out_buffer = self.ffi.new('uint8_t [%i]' % bytes_req) self.ns.avpicture_fill( self.ffi.cast('struct AVPicture *', self.out_frame), self.out_buffer, self.ns.PIX_FMT_RGB24, constants.WII_VIDEO_WIDTH, constants.WII_VIDEO_HEIGHT) def get_image_buffer(self, encoded_nalu): in_data = self.ffi.new('uint8_t []', encoded_nalu) self.av_packet.data = in_data self.av_packet.size = len(encoded_nalu) length = self.ns.avcodec_decode_video2(self.context, self.frame, self.got_frame, self.av_packet) if length < 0: raise Exception('avcodec_decode_video2') elif length != self.av_packet.size: raise Exception('expected to decode a single complete frame') elif self.got_frame[0]: # print 'keyframe:', s.frame.key_frame # convert from YUV to RGB self.ns.sws_scale(self.sws_context, self.frame.data, self.frame.linesize, 0, constants.WII_VIDEO_HEIGHT, self.out_frame.data, self.out_frame.linesize) image_buffer = \ self.ffi.buffer(self.out_frame.data[0], self.out_frame.linesize[0] * constants.WII_VIDEO_HEIGHT) return image_buffer
def get_raw_buffer(buf): """Convert a C buffer into a Python byte sequence""" return ffi.buffer(buf)[:] class VoidPointer(object): """Model a newly allocated pointer to void""" def __init__(self): self._pp = ffi.new("void *[1]") def get(self): return self._pp[0] def address_of(self): return self._pp Array = ffi.new("uint8_t[1]").__class__.__bases__ backend = "cffi" except ImportError: from ctypes import (CDLL, c_void_p, byref, c_ulong, c_ulonglong, c_size_t, create_string_buffer) from ctypes.util import find_library from _ctypes import Array null_pointer = None def load_lib(name, cdecl): import platform bits, linkage = platform.architecture() if "." not in name and not linkage.startswith("Win"):
def test_vi_C_interface(): try: from cffi import FFI cffi_is_present = True except: cffi_is_present = False return if cffi_is_present: h = 1e-5 T = 1.0 t = 0.0 theta = 1.0 gamma = 1.0 g = 9.81 kappa = 0.4 xk = np.array((1., 10.)) ffi = FFI() ffi.cdef('void set_cstruct(uintptr_t p_env, void* p_struct);') ffi.cdef('''typedef struct { int id; double* xk; double h; double theta; double gamma; double g; double kappa; unsigned int f_eval; unsigned int nabla_eval; } data; ''') data_struct = ffi.new('data*') data_struct.id = -1 # to avoid freeing the data in the destructor data_struct.xk = ffi.cast('double *', xk.ctypes.data) data_struct.h = h data_struct.theta = theta data_struct.gamma = gamma data_struct.g = g data_struct.kappa = kappa vi = SN.VI(2) import siconos D = ffi.dlopen(siconos.__path__[0] + '/_numerics.so') D.set_cstruct(vi.get_env_as_long(), ffi.cast('void*', data_struct)) vi.set_compute_F_and_nabla_F_as_C_functions('ZhuravlevIvanov.so', 'compute_F', 'compute_nabla_F') lambda_ = np.zeros((2, )) xkp1 = np.zeros((2, )) SO = SN.SolverOptions(vi, SN.SICONOS_VI_BOX_QI) lb = np.array((-1.0, -1.0)) ub = np.array((1.0, 1.0)) vi.set_box_constraints(lb, ub) N = int(T / h + 10) print(N) SO.dparam[0] = 1e-24 SO.iparam[0] = 100 SO.iparam[2] = 1 SO.iparam[3] = 0 SO.iparam[4] = 5 signs = np.empty((N, 2)) sol = np.empty((N, 2)) sol[0, :] = xk k = 0 #SN.numerics_set_verbose(3) while t <= T: k += 1 info = SN.variationalInequality_box_newton_QiLSA( vi, lambda_, xkp1, SO) #print('iter {:} ; solver iter = {:} ; prec = {:}'.format(k, SO.iparam[1], SO.dparam[1])) if info > 0: print(lambda_) # vi_function(2, signs[k-1, :], xkp1) lambda_[0] = -np.sign(xkp1[0]) lambda_[1] = -np.sign(xkp1[1]) if np.abs(xk[0]) < 1e-10: lambda_[0] = 0.01 if np.abs(xk[1]) < 1e-10: lambda_[1] = 0.01 print('ok lambda') print(lambda_) info = SN.variationalInequality_box_newton_QiLSA( vi, lambda_, xkp1, SO) print('iter {:} ; solver iter = {:} ; prec = {:}'.format( k, SO.iparam[1], SO.dparam[1])) if info > 0: print('VI solver failed ! info = {:}'.format(info)) print(xk) print(lambda_) print(xkp1) kaboom() # else: # print('iter {:} ; solver iter = {:} ; prec = {:}'.format(k, SO.iparam[1], SO.dparam[1])) # vi_function(2, lambda_, xkp1) sol[k, 0:2] = xkp1 np.copyto(xk, xkp1, casting='no') signs[k, 0:2] = lambda_ t = k * h
class RQObject(object): _cdefs = ''' typedef uint64_t RaptorQ_OTI_Common_Data; typedef uint32_t RaptorQ_OTI_Scheme_Specific_Data; typedef enum { NONE = 0, ENC_8 = 1, ENC_16 = 2, ENC_32 = 3, ENC_64 = 4, DEC_8 = 5, DEC_16 = 6, DEC_32 = 7, DEC_64 = 8 } RaptorQ_type; struct RaptorQ_ptr; struct RaptorQ_ptr* RaptorQ_Enc ( const RaptorQ_type type, void *data, const uint64_t size, const uint16_t min_subsymbol_size, const uint16_t symbol_size, const size_t max_memory); struct RaptorQ_ptr* RaptorQ_Dec ( const RaptorQ_type type, const RaptorQ_OTI_Common_Data common, const RaptorQ_OTI_Scheme_Specific_Data scheme); // Encoding RaptorQ_OTI_Common_Data RaptorQ_OTI_Common (struct RaptorQ_ptr *enc); RaptorQ_OTI_Scheme_Specific_Data RaptorQ_OTI_Scheme (struct RaptorQ_ptr *enc); uint16_t RaptorQ_symbol_size (struct RaptorQ_ptr *ptr); uint8_t RaptorQ_blocks (struct RaptorQ_ptr *ptr); uint32_t RaptorQ_block_size (struct RaptorQ_ptr *ptr, const uint8_t sbn); uint16_t RaptorQ_symbols (struct RaptorQ_ptr *ptr, const uint8_t sbn); uint32_t RaptorQ_max_repair (struct RaptorQ_ptr *enc, const uint8_t sbn); size_t RaptorQ_precompute_max_memory (struct RaptorQ_ptr *enc); void RaptorQ_precompute ( struct RaptorQ_ptr *enc, const uint8_t threads, const bool background); uint64_t RaptorQ_encode_id ( struct RaptorQ_ptr *enc, void **data, const uint64_t size, const uint32_t id); uint64_t RaptorQ_encode ( struct RaptorQ_ptr *enc, void **data, const uint64_t size, const uint32_t esi, const uint8_t sbn); uint32_t RaptorQ_id (const uint32_t esi, const uint8_t sbn); // Decoding uint64_t RaptorQ_bytes (struct RaptorQ_ptr *dec); uint64_t RaptorQ_decode ( struct RaptorQ_ptr *dec, void **data, const size_t size); uint64_t RaptorQ_decode_block ( struct RaptorQ_ptr *dec, void **data, const size_t size, const uint8_t sbn); bool RaptorQ_add_symbol_id ( struct RaptorQ_ptr *dec, void **data, const uint32_t size, const uint32_t id); bool RaptorQ_add_symbol ( struct RaptorQ_ptr *dec, void **data, const uint32_t size, const uint32_t esi, const uint8_t sbn); // General: free memory void RaptorQ_free (struct RaptorQ_ptr **ptr); void RaptorQ_free_block (struct RaptorQ_ptr *ptr, const uint8_t sbn); ''' _ctx = None data_size_div, _rq_type, _rq_blk = 4, 32, 'uint32_t' def __init__(self): self._ffi = FFI() self._ffi.cdef(self._cdefs) # self.ffi.set_source('_rq', '#include <RaptorQ/cRaptorQ.h>') self._lib = self._ffi.dlopen( 'libRaptorQ.so') # ABI mode for simplicity self.rq_types = (['NONE', None] + list('ENC_{}'.format(2**n) for n in xrange(3, 7)) + list('DEC_{}'.format(2**n) for n in xrange(3, 7))) self._rq_blk_size = self.data_size_div def rq_type_val(self, v, pre): if isinstance(v, int) or v.isdigit(): v = '{}_{}'.format(pre, v).upper() else: v = bytes(v).upper() assert v in self.rq_types, [v, self.rq_types] return getattr(self._lib, v) def __getattr__(self, k): if k.startswith('rq_'): if not self._ctx: raise RuntimeError( 'ContextManager not initialized or already freed') return ft.partial(getattr(self._lib, 'RaptorQ_{}'.format(k[3:])), self._ctx) return self.__getattribute__(k) def open(self): self._ctx = self._ctx_init[0](*self._ctx_init[1]) return self._ctx def close(self): if self._ctx: ptr = self._ffi.new('struct RaptorQ_ptr **') ptr[0] = self._ctx self._lib.RaptorQ_free(ptr) self._ctx = None def __enter__(self): self.open() return self def __exit__(self, *err): self.close() def __del__(self): self.close() def sym_id(self, esi, sbn): return self._lib.RaptorQ_id(esi, sbn) _sym_n = None def _sym_buff(self, init=None): if not self._sym_n: self._sym_n = self.symbol_size / self._rq_blk_size buff = self._ffi.new('{}[]'.format(self._rq_blk), self._sym_n) buff_ptr = self._ffi.new('void **', buff) buff_raw = self._ffi.buffer(buff) if init: buff_raw[:] = init return buff_ptr, lambda: bytes(buff_raw)
class LibWwqParseCFFI(LibWwqLyParseBase): def __init__(self): super(LibWwqParseCFFI, self).__init__() from cffi import FFI self.ffi = FFI() self.lib = self.ffi.dlopen(self.lib_path) self.ffi.cdef(""" char * get_uuid(); char * get_name(); int parse(char * c,int length,char **result,int *result_length); int free_str(char * c); void* atomic_int64_init(); int atomic_int64_destroy(void* ptr); int64_t atomic_int64_get(void* ptr); int64_t atomic_int64_set(void* ptr, int64_t val); int64_t atomic_int64_add(void* ptr, int64_t val); int64_t atomic_int64_sub(void* ptr, int64_t val); int64_t atomic_int64_and(void* ptr, int64_t val); int64_t atomic_int64_or(void* ptr, int64_t val); int64_t atomic_int64_xor(void* ptr, int64_t val); typedef union epoll_data { void* ptr; int fd; uint32_t u32; uint64_t u64; uintptr_t sock; /* Windows specific */ void* hnd; /* Windows specific */ } epoll_data_t; typedef struct { uint32_t events; /* Epoll events and flags */ epoll_data_t data; /* User data variable */ } epoll_event ; void* epoll_create(int size); void* epoll_create1(int flags); int epoll_close(void* ephnd); int epoll_ctl(void* ephnd, int op, uintptr_t sock, epoll_event* event); int epoll_wait(void* ephnd, epoll_event* events, int maxevents, int timeout); """) self.lib.__class__.__repr__ = lambda s: "<%s object at 0x%016X>" % (s.__class__.__name__, id(s)) logging.debug("successful load lib %s" % self.lib) weakref.finalize(self, lambda: logging.debug("%s released" % self.lib) if self.ffi.dlclose(self.lib) or 1 else None) def get_uuid(self) -> bytes: return self.ffi.string(self.lib.get_uuid()) def get_name(self) -> bytes: return self.ffi.string(self.lib.get_name()) def lib_parse(self, byte_str: bytes) -> bytes: length = self.ffi.cast("int", len(byte_str)) result_length = self.ffi.new("int *") result_p = self.ffi.new("char **") # p = self.ffi.new("char []", byte_str) p = self.ffi.from_buffer(byte_str) self.lib.parse(p, length, result_p, result_length) result = self.ffi.unpack(result_p[0], result_length[0]) self.lib.free_str(result_p[0]) return result
def test_struct_with_typedef(self): ffi = FFI() ffi.cdef("typedef struct { float x; } foo_t;") p = ffi.new("foo_t *", [5.2]) assert repr(p).startswith("<cdata 'foo_t *' ")
extension_suffixes = [] for ext, mod, typ in imp.get_suffixes(): if typ == imp.C_EXTENSION: extension_suffixes.append(ext) else: from importlib import machinery extension_suffixes = machinery.EXTENSION_SUFFIXES try: from cffi import FFI ffi = FFI() null_pointer = ffi.NULL uint8_t_type = ffi.typeof(ffi.new("const uint8_t*")) def load_lib(name, cdecl): """Load a shared library and return a handle to it. @name, either an absolute path or the name of a library in the system search path. @cdecl, the C function declarations. """ lib = ffi.dlopen(name) ffi.cdef(cdecl) return lib def c_ulong(x):
class TestLocationdLib(unittest.TestCase): def setUp(self): header = '''typedef ...* Localizer_t; Localizer_t localizer_init(); void localizer_get_message_bytes(Localizer_t localizer, uint64_t logMonoTime, bool inputsOK, bool sensorsOK, bool gpsOK, char *buff, size_t buff_size); void localizer_handle_msg_bytes(Localizer_t localizer, const char *data, size_t size);''' self.ffi = FFI() self.ffi.cdef(header) self.lib = self.ffi.dlopen(LIBLOCATIOND_PATH) self.localizer = self.lib.localizer_init() self.buff_size = 2048 self.msg_buff = self.ffi.new(f'char[{self.buff_size}]') def localizer_handle_msg(self, msg_builder): bytstr = msg_builder.to_bytes() self.lib.localizer_handle_msg_bytes(self.localizer, self.ffi.from_buffer(bytstr), len(bytstr)) def localizer_get_msg(self, t=0, inputsOK=True, sensorsOK=True, gpsOK=True): self.lib.localizer_get_message_bytes( self.localizer, t, inputsOK, sensorsOK, gpsOK, self.ffi.addressof(self.msg_buff, 0), self.buff_size) return log.Event.from_bytes(self.ffi.buffer(self.msg_buff), nesting_limit=self.buff_size // 8) def test_liblocalizer(self): msg = messaging.new_message('liveCalibration') msg.liveCalibration.validBlocks = random.randint(1, 10) msg.liveCalibration.rpyCalib = [random.random() for _ in range(3)] self.localizer_handle_msg(msg) liveloc = self.localizer_get_msg() self.assertTrue(liveloc is not None) def test_device_fell(self): msg = messaging.new_message('sensorEvents', 1) msg.sensorEvents[0].sensor = 1 msg.sensorEvents[0].type = 1 msg.sensorEvents[0].init('acceleration') msg.sensorEvents[0].acceleration.v = [10.0, 0.0, 0.0] # zero with gravity self.localizer_handle_msg(msg) ret = self.localizer_get_msg() self.assertTrue(ret.liveLocationKalman.deviceStable) msg = messaging.new_message('sensorEvents', 1) msg.sensorEvents[0].sensor = 1 msg.sensorEvents[0].type = 1 msg.sensorEvents[0].init('acceleration') msg.sensorEvents[0].acceleration.v = [50.1, 0.0, 0.0] # more than 40 m/s**2 self.localizer_handle_msg(msg) ret = self.localizer_get_msg() self.assertFalse(ret.liveLocationKalman.deviceStable) def test_posenet_spike(self): for _ in range(SENSOR_DECIMATION): msg = messaging.new_message('carState') msg.carState.vEgo = 6.0 # more than 5 m/s self.localizer_handle_msg(msg) ret = self.localizer_get_msg() self.assertTrue(ret.liveLocationKalman.posenetOK) for _ in range(20 * VISION_DECIMATION): # size of hist_old msg = messaging.new_message('cameraOdometry') msg.cameraOdometry.rot = [0.0, 0.0, 0.0] msg.cameraOdometry.rotStd = [0.0, 0.0, 0.0] msg.cameraOdometry.trans = [0.0, 0.0, 0.0] msg.cameraOdometry.transStd = [2.0, 0.0, 0.0] self.localizer_handle_msg(msg) for _ in range(20 * VISION_DECIMATION): # size of hist_new msg = messaging.new_message('cameraOdometry') msg.cameraOdometry.rot = [0.0, 0.0, 0.0] msg.cameraOdometry.rotStd = [0.0, 0.0, 0.0] msg.cameraOdometry.trans = [0.0, 0.0, 0.0] msg.cameraOdometry.transStd = [8.1, 0.0, 0.0] # more than 4 times larger self.localizer_handle_msg(msg) ret = self.localizer_get_msg() self.assertFalse(ret.liveLocationKalman.posenetOK)
typedef struct { const char *data; GoInt len; } GoString; void Add(const char *host); void Remove(const char *host); const char *Hash(const char *key); void SetReplica(GoInt replica); void Cfree(void *p); """) lib = ffi.dlopen("./consistent.so") print lib.Add, lib.Remove, lib.Hash, lib.Cfree host1 = ffi.new("char[]", "host1") lib.Add(host1) key = ffi.new("char[]", "test_key") host = lib.Hash(key) print ffi.string(host) lib.Cfree(host) # print("awesome.Add(12,99) = %d" % lib.Add(12,99)) # print("awesome.Cosine(1) = %f" % lib.Cosine(1)) # # data = ffi.new("GoInt[]", [74,4,122,9,12]) # nums = ffi.new("GoSlice*", {'data':data, 'len':5, 'cap':5}) # lib.Sort(nums[0]) # print("awesome.Sort(74,4,122,9,12) = %s" % [ # ffi.cast("GoInt*", nums.data)[i] # for i in range(nums.len)])
from ctypes import c_double, c_int, pointer, POINTER import numpy as np ffi = FFI() chi2 = ffi.dlopen('./_chi2.so') ffi.cdef(""" int chi2(double m, double b, double *x, double *y, double *yerr, int N, double* result); """) if __name__ == '__main__': mu_y = 0. sig_y = 5. mu_x = 0.21 sig_x = 3. N = int(1000) m = float(1.) b = float(0.) x = np.random.normal(mu_x, sig_x, N).astype('float64') x_p = ffi.cast('double *', x.ctypes.data) y = np.random.normal(mu_y, sig_y, N).astype('float64') y_p = ffi.cast('double *', y.ctypes.data) yerr = np.array([sig_y] * N, 'float64') yerr_p = ffi.cast('double *', yerr.ctypes.data) result = ffi.new('double *') chi2.chi2(m, b, x_p, y_p, yerr_p, N, result) print(result[0])
class QmixPump(object): """ Qmix pump interface. """ def __init__(self, index, name='', external_valves=None, restore_drive_pos_counter=False, auto_enable=True): """ Parameters ---------- index : int Index of the pump to access. It is related with the config files. First pump has ``index=0``, second has ``index=1`` and so on. Takes precedence over the `name` parameter. name : str The name of the pump. restore_drive_pos_counter : bool Whether to restore the pump drive position counter from the pyqmix config file. auto_enable : bool Whether to enable (i.e., activate) the pump on object instantiation. """ cfg = config.read_config() dll_dir = cfg.get('qmix_dll_dir', None) dll_filename = 'labbCAN_Pump_API.dll' dll_path = find_dll(dll_dir=dll_dir, dll_filename=dll_filename) if dll_path is None: msg = 'Could not find the Qmix SDK DLL %s.' % dll_filename raise RuntimeError(msg) else: self.dll_path = dll_path self._ffi = FFI() self._ffi.cdef(PUMP_HEADER) self._dll = self._ffi.dlopen(self.dll_path) self.index = index self._name = name self._handle = self._ffi.new('dev_hdl *', 0) self._call('LCP_GetPumpHandle', self.index, self._handle) self._flow_rate_max = self._ffi.new('double *') self._p_fill_level = self._ffi.new('double *') self._p_dosed_volume = self._ffi.new('double *') self._p_flow_rate = self._ffi.new('double *') self._p_flow_prefix = self._ffi.new('int *') self._p_flow_volume_unit = self._ffi.new('int *') self._p_flow_time_unit = self._ffi.new('int *') self._p_volume_prefix = self._ffi.new('int *') self._p_volume_unit = self._ffi.new('int *') self._p_volume_max = self._ffi.new('double *') # Syringe dimensions. self._p_inner_diameter_mm = self._ffi.new('double *') self._p_max_piston_stroke_mm = self._ffi.new('double *') self._p_drive_pos_counter = self._ffi.new('long *') self._valve_handle = self._ffi.new('dev_hdl *', 0) self._call('LCP_GetValveHandle', self._handle[0], self._valve_handle) self.valve = QmixValve(handle=self._valve_handle) if self.is_in_fault_state: self.clear_fault_state() if not self.is_enabled: self.enable() if external_valves is None: self.ext_valves = dict() else: self.ext_valves = external_valves self.auto_enable = auto_enable if self.auto_enable: self.enable() try: # Try to restore settings from configuration file. pump_config = cfg['pumps'][self.index] # We get back CommentedOrderedMap's, so convert to dicts. volume_unit = dict(pump_config['volume_unit']) flow_unit = dict(pump_config['flow_unit']) syringe_params = dict(pump_config['syringe_params']) name = pump_config['name'] if restore_drive_pos_counter: drive_pos_counter = pump_config['drive_pos_counter'] else: drive_pos_counter = self.drive_pos_counter if self._name == '': self._name = name self.volume_unit = volume_unit self.flow_unit = flow_unit self.syringe_params = syringe_params self.drive_pos_counter = drive_pos_counter except KeyError: # Write default values to configuration file. config.add_pump(self.index) config.set_pump_name(self.index, self._name) config.set_pump_drive_pos_counter(self.index, self.drive_pos_counter) self.set_flow_unit() self.set_volume_unit() self.set_syringe_params_by_type('50 mL glass') atexit.register(self.save_drive_pos_counter) def _call(self, func_name, *args): func = getattr(self._dll, func_name) r = func(*args) return CHK(r) @property def name(self): return self._name @name.setter def name(self, name): self._name = name config.set_pump_name(self.index, name) @property def is_enabled(self): """ Query if pump drive is enabled. Only if the pump drive is enabled it is possible to pump fluid. Returns ------- int 1 - Pump drive is enabled, pumping is possible 0 - Pump drive is disabled - pump head is free running """ return self._call('LCP_IsEnabled', self._handle[0]) def enable(self): """ Enable the pump. """ self._call('LCP_Enable', self._handle[0]) def disable(self): """ Deactivate the pump drive. """ return self._call('LCP_Disable', self._handle[0]) @property def is_in_fault_state(self): """ Check if pump is in a fault state. If the device is in fault state then it is necessary to call :func:`qmix.QmixPump.clear_fault_state``, followed by a call to :func:`QmixPump.enable` to enable the pump. Returns ------- int 1 - Pump is in fault state 0 - Pump is not in fault state """ return self._call('LCP_IsInFaultState', self._handle[0]) def clear_fault_state(self): """ Clear fault condition. Clears the last fault and resets the device to an error-free state. If `qmix.QmixPump.is_in_fault_state` indicates that device is in fault state, then this method may resolve this problem. If the device is still in fault state after this method was called, we have to assume that a serious failure occurred. """ self._call('LCP_ClearFault', self._handle[0]) @property def is_calibration_finished(self): """ Check if calibration is finished still ongoing. Returns ------- bool True - Device calibration has finished/was perfomed False - Device is calibrating """ r = self._call('LCP_IsCalibrationFinished', self._handle[0]) if r == 0: return False else: return True def calibrate(self, wait_until_done=False): """ Executes a reference move for a syringe pump. .. warning:: Executing the calibration move with a syringe fitted on the device may cause damage to the syringe. Parameters ---------- wait_until_done : bool Whether to block further program execution until done. """ self._call('LCP_SyringePumpCalibrate', self._handle[0]) if wait_until_done: while not self.is_calibration_finished: time.sleep(0.0005) @property def n_pumps(self): """ The number of dosing units. Returns ------- int Number of detected pump devices """ return self._call('LCP_GetNoOfPumps') def set_volume_unit(self, prefix='milli', unit='litres'): """ Set the default volume unit. All parameters of subsequent dosing method calls are given in this new unit. Parameters ---------- prefix : str The prefix of the SIunit: ``centi``, ``deci``, ``mircro``, ``milli``, ``unit``. unit : str The volume unit identifier: ``litres``. """ self._call('LCP_SetVolumeUnit', self._handle[0], getattr(self._dll, prefix.upper()), getattr(self._dll, unit.upper())) config.set_pump_volume_unit(self.index, prefix=prefix, unit=unit) def get_volume_unit(self): """ Return the currently set default volume unit. Returns ------- OrderedDict A dictionary with the keys `prefix` and `unit`. """ self._call('LCP_GetVolumeUnit', self._handle[0], self._p_volume_prefix, self._p_volume_unit) if self._p_volume_prefix[0] == self._dll.MICRO: prefix = 'micro' elif self._p_volume_prefix[0] == self._dll.MILLI: prefix = 'milli' elif self._p_volume_prefix[0] == self._dll.CENTI: prefix = 'centi' elif self._p_volume_prefix[0] == self._dll.DECI: prefix = 'deci' else: raise RuntimeError('Invalid volume unit prefix retrieved.') if self._p_volume_unit[0] == self._dll.LITRES: unit = 'litres' else: raise RuntimeError('Invalid flow volume unit retrieved.') return OrderedDict([('prefix', prefix), ('unit', unit)]) @property def volume_unit(self): """ The currently set default volume unit. Returns ------- OrderedDict A dictionary with the keys `prefix` and `unit`. """ return self.get_volume_unit() @volume_unit.setter def volume_unit(self, volume_unit): self.set_volume_unit(**volume_unit) @property def volume_max(self): self._call('LCP_GetVolumeMax', self._handle[0], self._p_volume_max) return self._p_volume_max[0] def set_flow_unit(self, prefix='milli', volume_unit='litres', time_unit='per_second'): """ Set the flow unit for a certain pump. The flow unit defines the unit to be used for all flow values passed to API functions or retrieved from API functions. Parameters ---------- prefix : str The prefix of the SI unit: ``centi``, ``deci``, ``milli``, ``micro``. volume_unit : str The volume unit identifier: ``litres``. time_unit : str The time unit (denominator) of the velocity unit: ``per_hour``, ``per_minute``, ``per_second``. """ self._call('LCP_SetFlowUnit', self._handle[0], getattr(self._dll, prefix.upper()), getattr(self._dll, volume_unit.upper()), getattr(self._dll, time_unit.upper())) config.set_pump_flow_unit(self.index, prefix=prefix, volume_unit=volume_unit, time_unit=time_unit) def get_flow_unit(self): """ Return the currently set flow unit. Returns ------- OrderedDict A dictionary with the keys `prefix`, `volume_unit`, and `time_unit`. """ self._call('LCP_GetFlowUnit', self._handle[0], self._p_flow_prefix, self._p_flow_volume_unit, self._p_flow_time_unit) if self._p_flow_prefix[0] == self._dll.MICRO: prefix = 'micro' elif self._p_flow_prefix[0] == self._dll.MILLI: prefix = 'milli' elif self._p_flow_prefix[0] == self._dll.CENTI: prefix = 'centi' elif self._p_flow_prefix[0] == self._dll.DECI: prefix = 'deci' else: raise RuntimeError('Invalid flow unit prefix retrieved.') if self._p_flow_volume_unit[0] == self._dll.LITRES: volume_unit = 'litres' else: raise RuntimeError('Invalid flow volume unit retrieved.') if self._p_flow_time_unit[0] == self._dll.PER_SECOND: time_unit = 'per_second' elif self._p_flow_time_unit[0] == self._dll.PER_MINUTE: time_unit = 'per_minute' elif self._p_flow_time_unit[0] == self._dll.PER_HOUR: time_unit = 'per_hour' else: raise RuntimeError('Invalid flow time unit retrieved.') return OrderedDict([('prefix', prefix), ('volume_unit', volume_unit), ('time_unit', time_unit)]) @property def flow_unit(self): """ The currently set flow unit. Returns ------- OrderedDict A dictionary with the keys `prefix`, `volume_unit`, and `time_unit`. """ return self.get_flow_unit() @flow_unit.setter def flow_unit(self, flow_unit): self.set_flow_unit(**flow_unit) def set_syringe_params(self, inner_diameter_mm=32.5735, max_piston_stroke_mm=60): """ Set syringe properties. If you change the syringe in one device, you need to setup the new syringe parameters to get proper conversion of flow rate und volume units. Parameters ---------- inner_diameter_mm : float Inner diameter of the syringe tube in millimetres. max_piston_stroke_mm : float The maximum piston stroke defines the maximum position the piston can be moved to before it slips out of the syringe tube. The maximum piston stroke limits the maximum travel range of the syringe pump pusher. """ self._call('LCP_SetSyringeParam', self._handle[0], inner_diameter_mm, max_piston_stroke_mm) config.set_pump_syringe_params( self.index, inner_diameter_mm=inner_diameter_mm, max_piston_stroke_mm=max_piston_stroke_mm) def set_syringe_params_by_type(self, syringe_type='50 mL glass'): """ Convenience method to set syringe parameters based on syringe type. Parameters ---------- syringe_type : string Any of `25 mL glass` and `50 mL glass`. Notes ----- This method simply looks up pre-defined syringe parameters (inner diameter and max. piston stroke), and passes these parameters to :func:~`pyqmix.QmixPump.set_syringe_params`. """ if syringe_type not in syringes.keys(): raise ValueError('Unknown syringe type.') else: syringe = syringes[syringe_type] self.set_syringe_params(**syringe) def get_syringe_params(self): """ Get the currently set syringe properties. Returns ------- OrderedDict Returns a dictionary with the keys `inner_diameter_mm` and `max_piston_stroke_mm`. """ self._call('LCP_GetSyringeParam', self._handle[0], self._p_inner_diameter_mm, self._p_max_piston_stroke_mm) return OrderedDict([ ('inner_diameter_mm', self._p_inner_diameter_mm[0]), ('max_piston_stroke_mm', self._p_max_piston_stroke_mm[0]) ]) @property def syringe_params(self): """ The currently set syringe properties. Returns ------- OrderedDict Returns a dictionary with the keys `inner_diameter_mm` and `max_piston_stroke_mm`. """ return self.get_syringe_params() @syringe_params.setter def syringe_params(self, params): self.set_syringe_params(**params) @property def max_flow_rate(self): """ Maximum flow rate for the current dosing unit configuration. The maximum flow rate depends on the mechanical configuration of the dosing unit (gear) and on the syringe configuration. If larger syringes are used then larger flow rates are realizable. Returns ------- float The maximum flow rate in configured SI unit """ self._call('LCP_GetFlowRateMax', self._handle[0], self._flow_rate_max) return self._flow_rate_max[0] def aspirate(self, volume, flow_rate, wait_until_done=False, switch_valve_when_done=False): """ Aspirate a certain volume with the specified flow rate. Parameters ---------- volume : float > 0 The volume to aspirate in physical units. flow_rate : float > 0 The flow rate to use to aspirate the volume, negative flow rates are invalid. wait_until_done : bool Whether to block until done. switch_valve_when_done : bool If set to ``True``, it switches valve to dispense position after the aspiration is finished. Implies `wait_until_done=True`. Raises ------ ValueError If the specified volume or flow rate are non-positive, or if the fill level at the end of the aspiration procedure would exceed the syringe volume. Notes ----- This method switches the valve to aspiration position before the actual aspiration begins. """ if volume <= 0: raise ValueError('Volume must be positive.') if flow_rate <= 0: raise ValueError('Flow rate must be positive.') if self.fill_level + volume > self.volume_max: msg = 'Aspiration would exceed syringe volume.' raise ValueError(msg) if switch_valve_when_done: wait_until_done = True self.valve.switch_position(self.valve.aspirate_pos) self._call('LCP_Aspirate', self._handle[0], volume, flow_rate) if wait_until_done: # Wait until pumping has actually started. while not self.is_pumping: time.sleep(0.0005) # Now wait until the pumping has finished. while self.is_pumping: time.sleep(0.0005) if switch_valve_when_done: self.valve.switch_position(self.valve.dispense_pos) def dispense(self, volume, flow_rate, wait_until_done=False, switch_valve_when_done=False): """ Dispense a certain volume with a certain flow rate. It also switches the valve in position 0 (green led off). Parameters ---------- volume : float > 0 The volume to dispense in physical units. flow_rate : float > 0 The flow rate to use to dispense the volume, negative flow rates are invalid. wait_until_done : bool Whether to halt program execution until done. switch_valve_when_done : bool If set to ``True``, it switches valve to aspirate position after the dispense is finished. Implies `wait_until_done=True`. Raises ------ ValueError If the specified volume or flow rate are non-positive, or if the syringe is currently not sufficiently filled to dispense the desired volume. Notes ----- This method switches the valve to dispense position before the actual aspiration begins. """ if volume <= 0: raise ValueError('Volume must be positive.') if flow_rate <= 0: raise ValueError('Flow rate must be positive.') if self.fill_level < volume: msg = 'Current syringe fill level is insufficient.' raise ValueError(msg) if switch_valve_when_done: wait_until_done = True self.valve.switch_position(self.valve.dispense_pos) self._call('LCP_Dispense', self._handle[0], volume, flow_rate) if wait_until_done: # Wait until pumping has actually started. while not self.is_pumping: time.sleep(0.0005) # Now wait until the pumping has finished. while self.is_pumping: time.sleep(0.0005) if switch_valve_when_done: self.valve.switch_position(self.valve.aspirate_pos) def set_fill_level(self, level, flow_rate, wait_until_done=False, switch_valve_when_done=False): """ Pumps fluid with the given flow rate until the requested fill level is reached. Depending on the requested fill level given in ``level`` parameter this function may cause aspiration or dispension of fluid. If it aspirates it switches the valve in position 1. 0 if it dispenses. Parameters ---------- level : float => 0 The requested fill level. A level of 0 indicates a completely empty syringe. flow_rate : float > 0 The flow rate to use for pumping. wait_until_done : bool Whether to halt program execution until done. switch_valve_when_done : bool If set to ``True``, it switches valve to dispense position after the aspiration is finished. Implies `wait_until_done=True`. Raises ------ ValueError If specified target fill level is negative, or if flow rate is non-positive. """ if level < 0: raise ValueError('Target level must be >= 0.') if flow_rate <= 0: raise ValueError('Flow rate must be positive.') if switch_valve_when_done: wait_until_done = True # Switch the valves to inlet or outlet position, depending on # whether we are going to aspirate or to dispense. if level < self.get_fill_level(): self.valve.switch_position(self.valve.dispense_pos) else: self.valve.switch_position(self.valve.aspirate_pos) self._call('LCP_SetFillLevel', self._handle[0], level, flow_rate) if wait_until_done: # Wait until pumping has actually started. while not self.is_pumping: time.sleep(0.0005) # Now wait until the pumping has finished. while self.is_pumping: time.sleep(0.0005) if switch_valve_when_done: self.valve.switch_position(self.valve.aspirate_pos) def generate_flow(self, flow_rate, wait_until_done=False, switch_valve_when_done=False): """ Generate a continuous flow. If it aspirates it switches the valve in position 1. 0 if it dispenses. Parameters ---------- flow_rate : float != 0 A positive flow rate indicates dispensing and a negative flow rate indicates aspiration. wait_until_done : bool Whether to halt program execution until done. switch_valve_when_done : bool If set to ``True``, it switches valve to dispense position after the aspiration is finished. Implies `wait_until_done=True`. Raises ------ ValueError If a flow rate of zero is specified. """ if flow_rate == 0: raise ValueError('Flow rate must be non-zero.') if switch_valve_when_done: wait_until_done = True if flow_rate > 0: self.valve.switch_position(self.valve.dispense_pos) else: self.valve.switch_position(self.valve.aspirate_pos) self._call('LCP_GenerateFlow', self._handle[0], flow_rate) if wait_until_done: # Wait until pumping has actually started. while not self.is_pumping: time.sleep(0.0005) # Now wait until the pumping has finished. while self.is_pumping: time.sleep(0.0005) if switch_valve_when_done: self.valve.switch_position(self.valve.aspirate_pos) def fill(self, flow_rate, wait_until_done=False, switch_valve_when_done=False): """ Fill the syringe. Parameters ---------- flow_rate : float > 0 The flow rate to use. wait_until_done : bool Whether to halt program execution until done. switch_valve_when_done : bool If set to ``True``, it switches valve to dispense position after the aspiration is finished. Implies `wait_until_done=True`. Raises ------ ValueError If the specified flow rate is non-positive. Notes ----- This is a convenience method that simply passes the specified parameters to :func:~`pyqmix.QmixPump.generate_flow`. Note that `flow_rate` is multiplied by `-1` to ensure the syringe is being filled. """ if flow_rate <= 0: raise ValueError('Flow rate must be positive.') if switch_valve_when_done: wait_until_done = True self.generate_flow(-flow_rate, wait_until_done=wait_until_done, switch_valve_when_done=switch_valve_when_done) def empty(self, flow_rate, wait_until_done=False, switch_valve_when_done=False): """ Empty the syringe. Parameters ---------- flow_rate : float > 0 The flow rate to use. wait_until_done : bool Whether to halt program execution until done. switch_valve_when_done : bool If set to ``True``, it switches valve to dispense position after the aspiration is finished. Implies `wait_until_done=True`. Raises ------ ValueError If the specified flow rate is non-positive. Notes ----- This is a convenience method that simply passes the specified parameters to :func:~`pyqmix.QmixPump.generate_flow`. """ if flow_rate <= 0: raise ValueError('Flow rate must be positive.') if switch_valve_when_done: wait_until_done = True self.generate_flow(flow_rate, wait_until_done=wait_until_done, switch_valve_when_done=switch_valve_when_done) def stop(self): """ Immediately stop pumping. """ self._call('LCP_StopPumping', self._handle[0]) def stop_all_pumps(self): """ Immediately stop all pumps. """ self._call('LCP_StopAllPumps') @property def dosed_volume(self): """ Get the already dosed volume. Returns ------- float The already dosed volume """ self._call('LCP_GetDosedVolume', self._handle[0], self._p_dosed_volume) return self._p_dosed_volume[0] def get_fill_level(self): """ Returns the current fill level of the pump. Returns ------- float The current fill level of the syringe """ self._call('LCP_GetFillLevel', self._handle[0], self._p_fill_level) return self._p_fill_level[0] @property def fill_level(self): """ Returns the current fill level of the pump. Notes ----- This is identical to a call to :func:~`pyqmix.QmixPump.get_fill_level`. """ return self.get_fill_level() @property def current_flow_rate(self): """ Read the current flow rate. This does not assess the actual current flow rate. Instead, this method simply returns the cached (desired) flow rate value. Returns ------- float The current flow rate demand value """ self._call('LCP_GetFlowIs', self._handle[0], self._p_flow_rate) return self._p_flow_rate[0] @property def is_pumping(self): """ Check if device is currently stopped or dosing. Returns ------- bool `True` if pumping, `False` otherwise. """ r = self._call('LCP_IsPumping', self._handle[0]) return bool(r) @property def has_valve(self): """ Check if the pump has a valve assigned. Returns ------- bool `True` if a valve is present, `False` otherwise. """ r = self._call('LCP_HasValve', self._handle[0]) return bool(r) @property def valve_handle(self): """ Returns the valve handle of the pump valve. Returns ------- int Handle to valve device, or 0 if no valve is associated """ self._call('LCP_GetValveHandle', self._handle[0], self._valve_handle) return self._valve_handle[0] def add_external_valve(self, valve, name): self.ext_valves[name] = valve def remove_external_valve(self, name): del self.ext_valves[name] @property def drive_pos_counter(self): """ Current drive position counter of the pump. The position counter gets reset to zero when the pump system is powered off. To avoid having to recalibrate the system (i.e., doing a reference move, which requires removal of the syringes), this function may be used to save the current drive position counter to the configuration file, from where it can be safely restored once the system is powered on again. Returns ------- int The current value of the drive position counter. """ self._call('LCP_GetDrivePosCnt', self._handle[0], self._p_drive_pos_counter) return self._p_drive_pos_counter[0] @drive_pos_counter.setter def drive_pos_counter(self, value): value = int(value) self._call('LCP_RestoreDrivePosCnt', self._handle[0], value) def save_drive_pos_counter(self): """ Save the current drive position counter to the configuration file. """ config.set_pump_drive_pos_counter(self.index, self.drive_pos_counter)
print("Pass a cffi built structure to the VE as argument on the stack.") print("Sum the elements and multiply with a factor. Correct result is 30.") print("\n") p = veo.VeoProc(0) lib = p.load_library(os.getcwd() + "/libvetest6.so") c = p.open_context() ffi = FFI() ffi.cdef(""" struct abc { int a, b, c; }; """) abc = ffi.new("struct abc *") abc.a = 1 abc.b = 2 abc.c = 3 # we'll pass the struct * as a void * lib.multeach.args_type("void *", "int") lib.multeach.ret_type("int") req = lib.multeach(c, veo.OnStack(ffi.buffer(abc)), 5) r = req.wait_result() print("result = %r" % r) del p print("finished")
class QuadraticMarchingCubes: def __init__(self): self.ffi = FFI() with open("Src/exported_routines.h") as header: header_str = header.read() cstr = "" ignore = False for line in header_str.splitlines(): if (line.startswith("#if")): ignore = True if (ignore == False): cstr += line if (line.startswith("#end")): ignore = False self.ffi.cdef(cstr) correctWorkingDirectory = os.getcwd() libname_start = correctWorkingDirectory + "/build/libquadratic_iso" if (platform.system() == "Darwin"): if os.path.exists(libname_start + ".dylib"): libname = libname_start + ".dylib" else: libname = libname_start + "d.dylib" elif (platform.system() == "Windows"): libname = correctWorkingDirectory + "/build/Debug/quadratic_iso.dll" else: if os.path.exists(libname_start + ".so"): libname = libname_start + ".so" else: libname = libname_start + "d.so" self.isosurf = self.ffi.dlopen(libname) os.chdir(os.getcwd()) print(self.isosurf) def run(self, isovalue, np_sdf_data, dim, path=None): #Allocate the maximum possible amount of memory used for buffers #passed into the C++ code. np_tris = np.zeros((dim * dim * dim, 3), dtype=np.int32) np_verts = np.zeros((dim * dim * dim, 3), dtype=np.float32) ffi_vert_count = self.ffi.new("int*") ffi_tri_count = self.ffi.new("int*") #Run the C++ code. self.isosurf.run_quadratic_mc( self.ffi.cast("int", dim), self.ffi.cast("float*", np_sdf_data.ctypes.data), self.ffi.cast("float", isovalue), self.ffi.cast("float*", np_verts.ctypes.data), ffi_vert_count, self.ffi.cast("int*", np_tris.ctypes.data), ffi_tri_count) #Trim off unused memory np_verts = np_verts[:ffi_vert_count[0], :] np_tris = np_tris[:ffi_tri_count[0], :] if path is not None: with open(path, "w") as f: f.write("# OBJ file\n") for i in range(ffi_vert_count[0]): f.write( f"v {np_verts[i,0]:3.4f} {np_verts[i,1]:3.4f} {np_verts[i,2]:3.4f}\n" ) for i in range(ffi_tri_count[0]): f.write( f"f {np_tris[i,0]+1:d} {np_tris[i,1]+1:d} {np_tris[i,2]+1:d}\n" ) return np_verts, np_tris
class RangeLib(object): def __init__(self, config_file): self.ffi = FFI() self.ffi.cdef(""" typedef void easy_lr ; // avoid exposing the struct internals, fake it as void easy_lr* range_easy_create(const char* config_file); const char ** range_easy_expand(easy_lr* elr, const char * c_range); const char * range_easy_eval(easy_lr* elr, const char * c_range); char * range_easy_compress(easy_lr* elr, const char ** c_nodes); int range_easy_destroy(easy_lr* elr); void free(void *ptr); """) self.rangelib_ffi = self.ffi.dlopen("libcrange.so") self.libc_ffi = self.ffi.dlopen("libc.so.6") self.elr = self.rangelib_ffi.range_easy_create(self.ffi.new("char[]", config_file)) def __charpp_to_native(self, arg): i = 0 arr = [] while arg[i] != self.ffi.NULL: x = self.ffi.string(arg[i]) self.libc_ffi.free(arg[i]) arr.append(x) i += 1 self.libc_ffi.free(arg) return arr def expand(self, c_range): ret = self.rangelib_ffi.range_easy_expand(self.elr, self.ffi.new("char[]", c_range)) x = self.__charpp_to_native(ret) return x def compress(self, nodes): char_arg = [ self.ffi.new("char[]", x) for x in nodes ] char_arg.append(self.ffi.NULL) retptr = self.rangelib_ffi.range_easy_compress(self.elr, self.ffi.new("char*[]", char_arg)) ret = self.ffi.string(retptr) self.libc_ffi.free(retptr) return ret def eval(self, c_range): retptr = self.rangelib_ffi.range_easy_eval(self.elr, self.ffi.new("char[]", c_range)) ret = self.ffi.string(retptr) self.libc_ffi.free(retptr) return ret def __del__(self): self.rangelib_ffi.range_easy_destroy(self.elr)