def setUpClass(cls): # Reset everything back to the default values first. cls.ffi = None cls.kernel32 = None cls.ws2_32 = None cls.HAS_INTERNET = None # First run and this test case requires internet access. Determine # if we have access to the internet then cache the value. if cls.REQUIRES_INTERNET and SharedState.HAS_INTERNET is None: original_timeout = socket.getdefaulttimeout() socket.setdefaulttimeout(1) try: for hostname in cls.INTERNET_HOSTS: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: sock.connect((hostname, cls.INTERNET_PORT)) SharedState.HAS_INTERNET = True break # pylint: disable=broad-except except Exception: # pragma: no cover pass finally: sock.close() else: # pragma: no cover SharedState.HAS_INTERNET = False finally: socket.setdefaulttimeout(original_timeout) if os.name == "nt" and SharedState.ffi is None: try: ffi = FFI() ffi.set_unicode(True) ffi.cdef(dedent(""" // kernel32 functions DWORD GetLastError(void); void SetLastError(DWORD); // ws2_32 functions void WSASetLastError(int); int WSAGetLastError(void); """)) SharedState.ffi = ffi SharedState.kernel32 = ffi.dlopen("kernel32") SharedState.ws2_32 = ffi.dlopen("ws2_32") # pylint: disable=broad-except except Exception as error: # pragma: no cover if os.name == "nt": # pylint: disable=redefined-variable-type SharedState.ffi = error cls.HAS_INTERNET = SharedState.HAS_INTERNET cls.ffi = SharedState.ffi cls.kernel32 = SharedState.kernel32 cls.ws2_32 = SharedState.ws2_32
def test_explicit_cdecl_stdcall(self): if sys.platform != 'win32': py.test.skip("Windows-only test") if self.Backend is CTypesBackend: py.test.skip("not with the ctypes backend") win64 = (sys.maxsize > 2**32) # ffi = FFI(backend=self.Backend()) ffi.cdef(""" BOOL QueryPerformanceFrequency(LONGLONG *lpFrequency); """) m = ffi.dlopen("Kernel32.dll") tp = ffi.typeof(m.QueryPerformanceFrequency) assert str(tp) == "<ctype 'int(*)(long long *)'>" # ffi = FFI(backend=self.Backend()) ffi.cdef(""" BOOL __cdecl QueryPerformanceFrequency(LONGLONG *lpFrequency); """) m = ffi.dlopen("Kernel32.dll") tpc = ffi.typeof(m.QueryPerformanceFrequency) assert tpc is tp # ffi = FFI(backend=self.Backend()) ffi.cdef(""" BOOL WINAPI QueryPerformanceFrequency(LONGLONG *lpFrequency); """) m = ffi.dlopen("Kernel32.dll") tps = ffi.typeof(m.QueryPerformanceFrequency) if win64: assert tps is tpc else: assert tps is not tpc assert str(tps) == "<ctype 'int(__stdcall *)(long long *)'>" # ffi = FFI(backend=self.Backend()) ffi.cdef("typedef int (__cdecl *fnc_t)(int);") ffi.cdef("typedef int (__stdcall *fns_t)(int);") tpc = ffi.typeof("fnc_t") tps = ffi.typeof("fns_t") assert str(tpc) == "<ctype 'int(*)(int)'>" if win64: assert tps is tpc else: assert str(tps) == "<ctype 'int(__stdcall *)(int)'>" # fnc = ffi.cast("fnc_t", 0) fns = ffi.cast("fns_t", 0) ffi.new("fnc_t[]", [fnc]) if not win64: py.test.raises(TypeError, ffi.new, "fnc_t[]", [fns]) py.test.raises(TypeError, ffi.new, "fns_t[]", [fnc]) ffi.new("fns_t[]", [fns])
class BGPDump: def __init__(self, filename): self.filename = str(filename) if isinstance( filename, unicode ) else filename self.ffi = None self.libc = None self.libbgpdump = None self.handle = None def __enter__(self): self.ffi = FFI() libdir = resource_filename( 'bgpdumpy', 'lib' ) or '' self.libc = self.ffi.dlopen(None) self.libbgpdump = self.ffi.dlopen( os.path.join( libdir, 'libbgpdump.so' ) ) self.ffi.cdef(CTypes) self.handle = self.libbgpdump.bgpdump_open_dump(self.filename) return self def __exit__(self, exc_type, exc_val, exc_tb): self.libbgpdump.bgpdump_close_dump(self.handle) del self.libbgpdump self.libbgpdump = None del self.libc self.libc = None del self.ffi self.ffi = None def __iter__(self): while True: if self.handle.eof == 1: break currentEntry = self.libbgpdump.bgpdump_read_next(self.handle) if currentEntry == self.ffi.NULL: continue # sometimes we get NULL back for no apparent reason try: yield BGPEntry(self, currentEntry) finally: self.libbgpdump.bgpdump_free_mem(currentEntry) @property def version(self): return self.ffi.string(self.libbgpdump.bgpdump_version())
def test_dlopen_filename(self): path = ctypes.util.find_library(lib_m) if not path: py.test.skip("%s not found" % lib_m) ffi = FFI(backend=self.Backend()) ffi.cdef(""" double cos(double x); """) m = ffi.dlopen(path) x = m.cos(1.23) assert x == math.cos(1.23) m = ffi.dlopen(os.path.basename(path)) x = m.cos(1.23) assert x == math.cos(1.23)
def test_function_pointer(self): ffi = FFI(backend=self.Backend()) def cb(charp): assert repr(charp).startswith("<cdata 'char *' 0x") return 42 fptr = ffi.callback("int(*)(const char *txt)", cb) assert fptr != ffi.callback("int(*)(const char *)", cb) assert repr(fptr) == "<cdata 'int(*)(char *)' calling %r>" % (cb,) res = fptr(b"Hello") assert res == 42 # if not sys.platform.startswith('linux'): py.test.skip("probably no symbol 'stderr' in the lib") ffi.cdef(""" int fputs(const char *, void *); void *stderr; """) ffi.C = ffi.dlopen(None) fptr = ffi.cast("int(*)(const char *txt, void *)", ffi.C.fputs) assert fptr == ffi.C.fputs assert repr(fptr).startswith("<cdata 'int(*)(char *, void *)' 0x") with FdWriteCapture() as fd: fptr(b"world\n", ffi.C.stderr) res = fd.getvalue() assert res == b'world\n'
def test_missing_function(self): ffi = FFI(backend=self.Backend()) ffi.cdef(""" int nonexistent(); """) m = ffi.dlopen(lib_m) assert not hasattr(m, 'nonexistent')
def generate_tgl_update(): from cffi import FFI ffi_ = FFI() ffi_.cdef("""int printf(const char *format, ...);""") C = ffi_.dlopen(None) cb = ffi.new('struct tgl_update_callback *') cb.new_msg = _tgl_upd_new_msg_cb cb.marked_read = _tgl_upd_marked_read_cb cb.logprintf = C.printf cb.type_notification = _tgl_upd_type_notification_cb cb.type_in_chat_notification = _tgl_upd_type_in_chat_notification_cb cb.type_in_secret_chat_notification = _tgl_upd_type_in_secret_chat_notification_cb cb.status_notification = _tgl_upd_status_notification_cb cb.user_registered = _tgl_upd_user_registered_cb cb.user_activated = _tgl_upd_user_activated_cb cb.new_authorization = _tgl_upd_new_authorization_cb cb.chat_update = _tgl_upd_chat_update_cb cb.user_update = _tgl_upd_user_update_cb cb.secret_chat_update = _tgl_upd_secret_chat_update_cb cb.msg_receive = _tgl_upd_msg_receive_cb cb.our_id = _tgl_upd_our_id_cb cb.notification = _tgl_upd_notification_cb cb.user_status_update = _tgl_upd_user_status_update_cb #use the default implementation #cb.create_print_name = _tgl_upd_create_print_name_cb return cb
def _init_api(): lib_name = ctypes.util.find_library("ssh") if not lib_name: raise exceptions.PystasshException("libssh not found, please visit https://www.libssh.org/get-it/") ffi = FFI() lib = ffi.dlopen(lib_name) ffi.cdef(""" void* ssh_new(); int ssh_options_set(void*, int, char*); int ssh_connect(void*); int ssh_disconnect(void*); int ssh_is_connected(void*); char* ssh_get_error(void*); int ssh_userauth_password(void*, char*, char*); int ssh_userauth_autopubkey(void*, char*); void* ssh_channel_new(void*); int ssh_channel_open_session(void*); int ssh_channel_is_open(void*); void ssh_channel_free(void*); int ssh_channel_request_exec(void*, char*); int ssh_channel_request_pty(void*); int ssh_channel_request_shell(void*); int ssh_channel_get_exit_status(void*); int ssh_channel_read(void*, char*, int, int); int ssh_channel_send_eof(void*); """) return ffi, lib
def test_no_args(): ffi = FFI(backend=FakeBackend()) ffi.cdef(""" int foo(void); """) C = ffi.dlopen(None) assert C.foo.BType == '<func (), <int>, False>'
def test_simple(): ffi = FFI(backend=FakeBackend()) ffi.cdef("double sin(double x);") m = ffi.dlopen(lib_m) func = m.sin # should be a callable on real backends assert func.name == 'sin' assert func.BType == '<func (<double>), <double>, False>'
def test_pipe(): ffi = FFI(backend=FakeBackend()) ffi.cdef("int pipe(int pipefd[2]);") C = ffi.dlopen(None) func = C.pipe assert func.name == 'pipe' assert func.BType == '<func (<pointer to <int>>), <int>, False>'
def test_vararg(): ffi = FFI(backend=FakeBackend()) ffi.cdef("short foo(int, ...);") C = ffi.dlopen(None) func = C.foo assert func.name == 'foo' assert func.BType == '<func (<int>), <short>, True>'
def init(): '''Initialize the module.''' with StackContext(Privilege.fileaccess): try: shutil.rmtree('container/standard/home') except FileNotFoundError: pass os.mkdir('container/standard/home', mode=0o771) try: shutil.rmtree('container/standard/cache') except FileNotFoundError: pass os.mkdir('container/standard/cache', mode=0o771) ffi = FFI() ffi.cdef('''int mount(const char source[], const char target[], const char filesystemtype[], unsigned long mountflags, const void *data);''') ffi.cdef('''int umount(const char *target);''') libc = ffi.dlopen('libc.so.6') with StackContext(Privilege.fullaccess): libc.umount(b'container/standard/dev') libc.mount(b'/dev', b'container/standard/dev', b'', MS_BIND, \ ffi.NULL) StdChal.null_fd = os.open('/dev/null', os.O_RDWR | os.O_CLOEXEC) StdChal.build_cache = {} StdChal.build_cache_refcount = {}
def test_msobox_function_interface_on_ffcn_so_calling_ffcn( temp_mf_so_from_mf_f_file ): """.""" # path to shared library so_path = str(temp_mf_so_from_mf_f_file) # print "so_path: ", so_path # load shared library as module module = import_shared_library(so_path) # initialize foreign function interface for library header = """ void ffcn_(double *f, double *t, double *x, double *p, double *u); void ffcn_d_xpu_v_( double *f, double *f_d, double *t, double *x, double *x_d, double *p, double *p_d, double *u, double *u_d, int *nbdirs ); """ # open shared library ffi = FFI() ffi.cdef(header) module = ffi.dlopen(so_path) # function declaration and dimensions func = { "type": "ffcn", "name": "ffcn", "args": ["f", "t", "x", "p", "u"], "deriv": [] } dims = {"f": 5, "t": 1, "x": 5, "p": 5, "u": 4} # create function ffcn = Function(module, dims, func, ffi=ffi, verbose=False) # define input values t = numpy.random.random(dims["t"]) x = numpy.random.random(dims["x"]) p = numpy.random.random(dims["p"]) u = numpy.random.random(dims["u"]) # define output variables desired = numpy.zeros(dims["f"]) actual = numpy.zeros(dims["f"]) # call functions ffcn(actual, t, x, p, u) ffcn_py(desired, t, x, p, u) # compare values print "" print "actual: ", actual print "desired: ", desired print "error: ", lg.norm(desired - actual) assert_allclose(actual, desired)
def test_vararg(self): if not sys.platform.startswith('linux'): py.test.skip("probably no symbol 'stderr' in the lib") ffi = FFI(backend=self.Backend()) ffi.cdef(""" int fprintf(void *, const char *format, ...); void *stderr; """) ffi.C = ffi.dlopen(None) with FdWriteCapture() as fd: ffi.C.fprintf(ffi.C.stderr, b"hello with no arguments\n") ffi.C.fprintf(ffi.C.stderr, b"hello, %s!\n", ffi.new("char[]", b"world")) ffi.C.fprintf(ffi.C.stderr, ffi.new("char[]", b"hello, %s!\n"), ffi.new("char[]", b"world2")) ffi.C.fprintf(ffi.C.stderr, b"hello int %d long %ld long long %lld\n", ffi.cast("int", 42), ffi.cast("long", 84), ffi.cast("long long", 168)) ffi.C.fprintf(ffi.C.stderr, b"hello %p\n", ffi.NULL) res = fd.getvalue() assert res == (b"hello with no arguments\n" b"hello, world!\n" b"hello, world2!\n" b"hello int 42 long 84 long long 168\n" b"hello (nil)\n")
def load_inline_module(): """ Create an inline module, return the corresponding ffi and dll objects. """ from cffi import FFI # We can't rely on libc availability on Windows anymore, so we use our # own compiled wrappers (see https://bugs.python.org/issue23606). defs = """ double _numba_test_sin(double x); double _numba_test_cos(double x); int foo(int a, int b, int c); """ source = """ static int foo(int a, int b, int c) { return a + b * c; } """ ffi = FFI() ffi.cdef(defs) # Load the _helperlib namespace from numba import _helperlib return ffi, ffi.dlopen(_helperlib.__file__)
def solve_it(input_data): arr = [(int(n[0]), int(n[1])) for n in [line.split() for line in input_data.split("\n") if line]] nitems = arr[0][0] capacity = arr[0][1] values, weights = zip(*arr[1:]) ffi = FFI() ffi.cdef(""" typedef enum {false, true} bool; typedef struct { bool success; int value; bool *route; } Result; Result run(int *values, int *weights, int nitems, int capacity); """) fpath = "data/ks_4_0" lib = ffi.dlopen("libknap.so") res = lib.run(values, weights, nitems, capacity) out = "%d 1\n" % res.value out += " ".join([str(res.route[i]) for i in range(nitems)]) return out
def netscape_spki_from_b64(b64): """Converts a base64 encoded Netscape SPKI DER to a crypto.NetscapeSPKI. PyOpenSSL does not yet support doing that by itself, so some work around through FFI and "internals-patching" trickery is required to perform this conversion. https://github.com/pyca/pyopenssl/issues/177 tracks the issue upstream. """ if not hasattr(netscape_spki_from_b64, 'NETSCAPE_SPKI_b64_decode'): from cffi import FFI as CFFI from OpenSSL._util import ffi as _sslffi, lib as _ssllib cffi = CFFI() cffi.cdef('void* NETSCAPE_SPKI_b64_decode(const char *str, int len);') lib = cffi.dlopen('libssl.so') def wrapper(b64, lib=lib): if isinstance(b64, str): b64 = b64.encode('ascii') b64_ptr = _sslffi.new('char[]', b64) spki_obj = lib.NETSCAPE_SPKI_b64_decode(b64_ptr, len(b64)) if spki_obj == cffi.NULL: raise ValueError("Invalid SPKI base64") def free(spki_obj, ref=b64_ptr): _ssllib.NETSCAPE_SPKI_free(spki_obj) return _sslffi.gc(spki_obj, free) netscape_spki_from_b64.func = wrapper ret = crypto.NetscapeSPKI() ret._spki = netscape_spki_from_b64.func(b64) return ret
def test_void_star_accepts_string(self): ffi = FFI(backend=self.Backend()) ffi.cdef("""int strlen(const void *);""") needs_dlopen_none() lib = ffi.dlopen(None) res = lib.strlen(b"hello") assert res == 5
def __init__(self, file_path, headers_path): ffi = FFI() self.ffi = ffi self.lib = ffi.dlopen(file_path) self.headers = [] if headers_path is not None: with open(headers_path) as headers: self.headers = headers.readlines()
def load(self): ffi = FFI() ffi.include(self.ffi) ffi.cdef('\n'.join(self.cdef)) lib = ffi.dlopen(None) for n, cb in self.callbacks.items(): setattr(lib, '_' + n, cb) self.lib = lib
def loadCffi(libname, cdef_text, libpath): if libname in _ft_cffi: return _ft_cffi[libname] _ffi = FFI() _ffi.cdef(cdef_text) sofile = libpath + "/" + libname + ".so" _lib = _ffi.dlopen(sofile) _ft_cffi[libname] = (_lib, _ffi)
def test_override(): ffi = FFI(backend=FakeBackend()) C = ffi.dlopen(None) ffi.cdef("int foo(void);") py.test.raises(FFIError, ffi.cdef, "long foo(void);") assert C.foo.BType == '<func (), <int>, False>' ffi.cdef("long foo(void);", override=True) assert C.foo.BType == '<func (), <long>, False>'
def test_sin(self): ffi = FFI(backend=self.Backend()) ffi.cdef(""" double sin(double x); """) m = ffi.dlopen(lib_m) x = m.sin(1.23) assert x == math.sin(1.23)
def test_unsigned_char_star_accepts_string(self): if self.Backend is CTypesBackend: py.test.skip("not supported by the ctypes backend") ffi = FFI(backend=self.Backend()) ffi.cdef("""int strlen(unsigned char *);""") lib = ffi.dlopen(None) res = lib.strlen(b"hello") assert res == 5
def test_dlopen_flags(self): ffi = FFI(backend=self.Backend()) ffi.cdef(""" double cos(double x); """) m = ffi.dlopen(lib_m, ffi.RTLD_LAZY | ffi.RTLD_LOCAL) x = m.cos(1.23) assert x == math.cos(1.23)
def test_sin_no_return_value(self): # check that 'void'-returning functions work too ffi = FFI(backend=self.Backend()) ffi.cdef(""" void sin(double x); """) m = ffi.dlopen(lib_m) x = m.sin(1.23) assert x is None
def test_strchr(self): ffi = FFI(backend=self.Backend()) ffi.cdef(""" char *strchr(const char *s, int c); """) ffi.C = ffi.dlopen(None) p = ffi.new("char[]", b"hello world!") q = ffi.C.strchr(p, ord('w')) assert ffi.string(q) == b"world!"
def test_function_typedef(self): ffi = FFI(backend=self.Backend()) ffi.cdef(""" typedef double func_t(double); func_t sin; """) m = ffi.dlopen(lib_m) x = m.sin(1.23) assert x == math.sin(1.23)
"libpicosat.so", os.path.join(BASE_DIR, "libpicosat.so"), # TODO: Ideally, supporting multiple OS should boild down to trying to # open the right shared library, e.g.: # "libpicosat.dylib", # os.path.join(BASE_DIR, "libpicosat.dylib"), # "picosat.dll", # os.path.join(BASE_DIR, "libpicosat.dylib"), ] ffi = FFI() ffi.cdef(PICOSAT_MINIMAL_H) picosat = None for libname in NAMES: try: picosat = ffi.dlopen(libname) except OSError: picosat = None if picosat: break if not picosat: raise ImportError("Cannot find %s." % libname) if __name__ == "__main__": # Demo v_cdata = picosat.picosat_version() # Picosat Version print(ffi.string(v_cdata)) # Defines work as expected print(picosat.PICOSAT_SATISFIABLE) pico = picosat.picosat_init()
libfile = '' if 'linux' in platform.lower(): if os_is_64_bit: libfile = os.path.join(pathlib, 'cbc-c-linux-x86-64.so') elif platform.lower().startswith('win'): if os_is_64_bit: libfile = os.path.join(pathlib, 'cbc-c-windows-x86-64.dll') else: libfile = os.path.join(pathlib, 'cbc-c-windows-x86-32.dll') elif platform.lower().startswith('darwin') or platform.lower().startswith( 'macos'): if os_is_64_bit: libfile = os.path.join(pathlib, 'cbc-c-darwin-x86-64.dylib') if not libfile: raise Exception("You operating system/platform is not supported") cbclib = ffi.dlopen(libfile) has_cbc = True except Exception: has_cbc = False print('cbc not found') if has_cbc: ffi.cdef(""" typedef int(*cbc_progress_callback)(void *model, int phase, int step, const char *phaseName, double seconds, double lb, double ub,
lib_path = find_library("gurobi{}{}".format( major_ver, minor_ver)) if lib_path is not None: break if lib_path is not None: break if lib_path is None: raise Exception("""Gurobi not found. Plase check if the Gurobi dynamic loadable library is reachable or define the environment variable GUROBI_HOME indicating the gurobi installation path. """) ffi = FFI() grblib = ffi.dlopen(lib_path) print("gurobi version {}.{} found".format(major_ver, minor_ver)) except Exception: raise ImportError CData = ffi.CData os_is_64_bit = maxsize > 2**32 INF = float("inf") MAX_NAME_SIZE = 512 # for variables and constraints ffi.cdef(""" typedef struct _GRBmodel GRBmodel; typedef struct _GRBenv GRBenv; typedef int(*gurobi_callback)(GRBmodel *model, void *cbdata, int where, void *usrdata);
class CVecEnv: """ An environment instance created by an EnvLib, uses the VecEnv interface. https://github.com/openai/baselines/blob/master/baselines/common/vec_env/__init__.py Args: num_envs: number of environments to create lib_dir: a folder containing either lib{name}.so (Linux), lib{name}.dylib (Mac), or {name}.dll (Windows) lib_name: name of the library (minus the lib part) c_func_defs: list of cdefs that are passed to FFI in order to define custom functions that can then be called with env.call_func() options: options to pass to the libenv_make() call for this environment debug: if set to True, check array data to make sure it matches the provided spaces reuse_arrays: reduce allocations by using the same numpy arrays for each reset(), step(), and render() call """ class C_Space: def __init__(self, name: str, is_discrete: bool, shape: tuple, dtype: type, limits: tuple) -> None: self.name = name self.is_discrete = is_discrete self.shape = shape self.dtype = dtype self.limits = limits def to_libenv_space(self, ffi, c_lib): c_space = ffi.new("struct libenv_space *") assert (len(self.name) < c_lib.LIBENV_MAX_NAME_LEN - 1), "length of space name is too long" c_space.name = self.name.encode("utf8") c_space.type = c_lib.LIBENV_SPACE_TYPE_DISCRETE if self.is_discrete else c_lib.LIBENV_SPACE_TYPE_BOX c_space.dtype = CVecEnv._var_to_libenv(ffi, c_lib, self.dtype) for i, dim in enumerate(self.shape): c_space.shape[i] = dim c_space.ndim = len(self.shape) if c_space.dtype == c_lib.LIBENV_DTYPE_UINT8: c_space.low.uint8 = self.limits[0] c_space.high.uint8 = self.limits[1] elif c_space.dtype == c_lib.LIBENV_DTYPE_INT32: c_space.low.int32 = self.limits[0] c_space.high.int32 = self.limits[1] elif c_space.dtype == c_lib.LIBENV_DTYPE_FLOAT32: c_space.low.float32 = self.limits[0] c_space.high.float32 = self.limits[1] else: assert False, "Unknown dtype! This should never happen!" return c_space def __init__( self, num_envs: int, lib_dir: str, lib_name: str = "env", c_func_defs: Optional[List[str]] = None, options: Optional[Dict] = None, debug: bool = False, reuse_arrays: bool = False, additional_info_spaces: list = None, additional_obs_spaces: list = None, ) -> None: self._debug = debug self._reuse_arrays = reuse_arrays if options is None: options = {} options = copy.deepcopy(options) if platform.system() == "Linux": lib_filename = f"lib{lib_name}.so" elif platform.system() == "Darwin": lib_filename = f"lib{lib_name}.dylib" elif platform.system() == "Windows": lib_filename = f"{lib_name}.dll" else: raise Exception(f"unrecognized platform {platform.system()}") if c_func_defs is None: c_func_defs = [] # load cdef for libenv.h libenv_cdef = "" with open(os.path.join(SCRIPT_DIR, "libenv.h")) as f: inside_cdef = False for line in f: if line.startswith("// BEGIN_CDEF"): inside_cdef = True elif line.startswith("// END_CDEF"): inside_cdef = False elif line.startswith("#if") or line.startswith("#endif"): continue if inside_cdef: line = line.replace("LIBENV_API", "") libenv_cdef += line self._ffi = FFI() self._ffi.cdef(libenv_cdef) for cdef in c_func_defs: self._ffi.cdef(cdef) self._lib_path = os.path.join(lib_dir, lib_filename) assert os.path.exists( self._lib_path), f"lib not found at {self._lib_path}" # unclear if this is necessary, but nice to not have symbols conflict if possible dlopen_flags = ( self._ffi.RTLD_NOW | self._ffi.RTLD_LOCAL # pylint: disable=no-member ) if platform.system() == "Linux": dlopen_flags |= self._ffi.RTLD_DEEPBIND # pylint: disable=no-member self._c_lib = self._ffi.dlopen(name=self._lib_path, flags=dlopen_flags) # dlclose will be called automatically when the library goes out of scope # https://cffi.readthedocs.io/en/latest/cdef.html#ffi-dlopen-loading-libraries-in-abi-mode # on mac os x, the library may not always be unloaded when you expect # https://developer.apple.com/videos/play/wwdc2017/413/?time=1776 # loading/unloading the library all the time can be slow # it may be useful to keep a reference to an environment (and thus the c_lib object) # to avoid this happening self._options = options self._state = STATE_NEEDS_RESET c_options, self._options_keepalives = self._convert_options( self._ffi, self._c_lib, options) self._c_env = self._c_lib.libenv_make(num_envs, c_options[0]) if additional_obs_spaces is not None: for space in additional_obs_spaces: print(f"adding obs space: {space}") self._c_lib.libenv_add_space( self._c_env, self._c_lib.LIBENV_SPACES_OBSERVATION, space.to_libenv_space(self._ffi, self._c_lib)) if additional_info_spaces is not None: for space in additional_info_spaces: print(f"adding info space: {space}") self._c_lib.libenv_add_space( self._c_env, self._c_lib.LIBENV_SPACES_INFO, space.to_libenv_space(self._ffi, self._c_lib)) self.reward_range = (float("-inf"), float("inf")) self.spec = None self.num_envs = num_envs self.observation_space = self._get_spaces( self._c_lib.LIBENV_SPACES_OBSERVATION) self._action_space = self._get_spaces(self._c_lib.LIBENV_SPACES_ACTION) self._info_space = self._get_spaces(self._c_lib.LIBENV_SPACES_INFO) self._render_space = self._get_spaces(self._c_lib.LIBENV_SPACES_RENDER) # allocate buffers self._observations, self._observation_buffers = self._allocate_dict_space( self.num_envs, self.observation_space) # we only use dict spaces for consistency, but action is always a single space # the private version is the dict space, while the public version is a single space assert len(self._action_space.spaces ) == 1, "action space can only be 1 element" assert list(self._action_space.spaces.keys())[0] == ACTION_KEY self.action_space = self._action_space.spaces[ACTION_KEY] dict_actions, self._action_buffers = self._allocate_dict_space( self.num_envs, self._action_space) self._actions = dict_actions[ACTION_KEY] self._renders, self._renders_buffers = self._allocate_dict_space( self.num_envs, self._render_space) self.metadata = { "render.modes": list(self._render_space.spaces.keys()) } self._infos, self._infos_buffers = self._allocate_dict_space( self.num_envs, self._info_space) self._rews, self._rews_buffer = self._allocate_array( self.num_envs, np.dtype("float32")) self._dones, self._dones_buffer = self._allocate_array( self.num_envs, np.dtype("bool")) assert np.dtype("bool").itemsize == 1 c_step = self._ffi.new("struct libenv_step *") c_step.obs = self._observation_buffers # cast the pointer to the buffer to avoid a warning from cffi c_step.rews = self._ffi.cast( self._ffi.typeof(c_step.rews).cname, self._rews_buffer) c_step.dones = self._ffi.cast( self._ffi.typeof(c_step.dones).cname, self._dones_buffer) c_step.infos = self._infos_buffers self._c_step = c_step self.closed = False self.viewer = None def __repr__(self): return f"<CVecEnv lib_path={self._lib_path} options={self._options}>" def _numpy_aligned(self, shape, dtype, align=64): """ Allocate an aligned numpy array, based on https://github.com/numpy/numpy/issues/5312#issuecomment-299533915 """ n_bytes = np.prod(shape) * dtype.itemsize arr = np.zeros(n_bytes + (align - 1), dtype=np.uint8) data_align = arr.ctypes.data % align offset = 0 if data_align == 0 else (align - data_align) view = arr[offset:offset + n_bytes].view(dtype) return view.reshape(shape) def _allocate_dict_space( self, num_envs: int, dict_space: gym.spaces.Dict ) -> Tuple[collections.OrderedDict, Any]: """ Allocate arrays for a space, returns an OrderedDict of numpy arrays along with a backing bytearray """ result = collections.OrderedDict() # type: collections.OrderedDict length = len(dict_space.spaces) * num_envs buffers = self._ffi.new(f"void *[{length}]") for space_idx, (name, space) in enumerate(dict_space.spaces.items()): actual_shape = (num_envs, ) + space.shape arr = self._numpy_aligned(shape=actual_shape, dtype=space.dtype) result[name] = arr for env_idx in range(num_envs): buffers[space_idx * num_envs + env_idx] = self._ffi.from_buffer(arr.data[env_idx:]) return result, buffers def _allocate_array(self, num_envs: int, dtype: np.dtype) -> Tuple[np.ndarray, Any]: arr = self._numpy_aligned(shape=(num_envs, ), dtype=dtype) return arr, self._ffi.from_buffer(arr.data) @staticmethod def _convert_options(ffi: Any, c_lib: Any, options: Dict) -> Any: """ Convert a dictionary to libenv_options """ keepalives = ( [] ) # add variables to here to keep them alive after this function returns c_options = ffi.new("struct libenv_options *") c_option_array = ffi.new("struct libenv_option[%d]" % len(options)) for i, (k, v) in enumerate(options.items()): name = str(k).encode("utf8") assert (len(name) < c_lib.LIBENV_MAX_NAME_LEN - 1), "length of options key is too long" if isinstance(v, bytes): c_data = ffi.new("char[]", v) dtype = c_lib.LIBENV_DTYPE_UINT8 count = len(v) elif isinstance(v, str): c_data = ffi.new("char[]", v.encode("utf8")) dtype = c_lib.LIBENV_DTYPE_UINT8 count = len(v) elif isinstance(v, bool): c_data = ffi.new("uint8_t*", v) dtype = c_lib.LIBENV_DTYPE_UINT8 count = 1 elif isinstance(v, int): assert -2**31 < v < 2**31 c_data = ffi.new("int32_t*", v) dtype = c_lib.LIBENV_DTYPE_INT32 count = 1 elif isinstance(v, float): c_data = ffi.new("float*", v) dtype = c_lib.LIBENV_DTYPE_FLOAT32 count = 1 elif isinstance(v, np.ndarray): c_data = ffi.new("char[]", v.tobytes()) if v.dtype == np.dtype("uint8"): dtype = c_lib.LIBENV_DTYPE_UINT8 elif v.dtype == np.dtype("int32"): dtype = c_lib.LIBENV_DTYPE_INT32 elif v.dtype == np.dtype("float32"): dtype = c_lib.LIBENV_DTYPE_FLOAT32 else: assert False, f"unsupported type {v.dtype}" count = v.size else: assert False, f"unsupported value {v} for option {k}" c_option_array[i].name = name c_option_array[i].dtype = dtype c_option_array[i].count = count c_option_array[i].data = c_data keepalives.append(c_data) keepalives.append(c_option_array) c_options.items = c_option_array c_options.count = len(options) return c_options, keepalives @staticmethod def _check_arrays(arrays: Dict[str, np.ndarray], num_envs: int, dict_space: gym.spaces.Dict) -> None: """ Make sure each array in arrays matches the given dict_space """ for name, space in dict_space.spaces.items(): arr = arrays[name] assert isinstance(arr, np.ndarray) expected_shape = (num_envs, ) + space.shape assert ( arr.shape == expected_shape ), f"array is invalid shape expected={expected_shape} received={arr.shape}" assert ( arr.dtype == space.dtype ), f"array has invalid dtype expected={space.dtype} received={arr.dtype}" if isinstance(space, gym.spaces.Box): # we only support single low/high values assert (len(np.unique(space.low)) == 1 and len(np.unique(space.high)) == 1) low = np.min(space.low) high = np.max(space.high) elif isinstance(space, gym.spaces.Discrete): low = 0 high = space.n - 1 else: assert False, "unrecognized space type" assert np.min(arr) >= low, ( '"%s" has values below space lower bound, %f < %f' % (name, np.min(arr), low)) assert np.max(arr) <= high, ( '"%s" has values above space upper bound, %f > %f' % (name, np.max(arr), high)) def _maybe_copy_ndarray(self, obj: np.ndarray) -> np.ndarray: """ Copy a single numpy array if reuse_arrays is False, otherwise just return the object """ if self._reuse_arrays: return obj else: return obj.copy() def _maybe_copy_dict(self, obj: Dict[str, Any]) -> Dict[str, Any]: """ Copy a list of dicts of numpy arrays if reuse_arrays is False, otherwise just return the object """ if self._reuse_arrays: return obj else: result = {} for name, arr in obj.items(): result[name] = arr.copy() return result def _get_spaces(self, c_name: Any) -> gym.spaces.Dict: """ Get a c space and convert to a gym space """ count = self._c_lib.libenv_get_spaces(self._c_env, c_name, self._ffi.NULL) if count == 0: return gym.spaces.Dict([]) c_spaces = self._ffi.new("struct libenv_space[%d]" % count) self._c_lib.libenv_get_spaces(self._c_env, c_name, c_spaces) # convert to gym spaces spaces = [] for i in range(count): c_space = c_spaces[i] name = self._ffi.string(c_space.name).decode("utf8") shape = [] for j in range(c_space.ndim): shape.append(c_space.shape[j]) if c_space.dtype == self._c_lib.LIBENV_DTYPE_UINT8: dtype = np.dtype("uint8") low = c_space.low.uint8 high = c_space.high.uint8 elif c_space.dtype == self._c_lib.LIBENV_DTYPE_INT32: dtype = np.dtype("int32") low = c_space.low.int32 high = c_space.high.int32 elif c_space.dtype == self._c_lib.LIBENV_DTYPE_FLOAT32: dtype = np.dtype("float32") low = c_space.low.float32 high = c_space.high.float32 else: assert False, "unknown dtype" if c_space.type == self._c_lib.LIBENV_SPACE_TYPE_BOX: space = gym.spaces.Box(shape=shape, low=low, high=high, dtype=dtype) elif c_space.type == self._c_lib.LIBENV_SPACE_TYPE_DISCRETE: assert shape == [1], "discrete space must have scalar shape" assert low == 0 and high > 0, "discrete low/high bounds are incorrect" space = gym.spaces.Discrete(n=high + 1) space.dtype = dtype else: assert False, "unknown space type" spaces.append((name, space)) # c spaces are aways a single-layer Dict space return gym.spaces.Dict(spaces) def reset(self) -> Dict[str, np.ndarray]: """ Reset the environment and return the first observation """ self._state = STATE_WAIT_ACT self._c_lib.libenv_reset(self._c_env, self._c_step) return self._maybe_copy_dict(self._observations) def step_async(self, actions: np.ndarray) -> None: """ Asynchronously take an action in the environment, doesn't return anything. """ assert self._state == STATE_WAIT_ACT self._state = STATE_WAIT_WAIT if self._debug: self._check_arrays({"action": actions}, self.num_envs, self._action_space) self._actions[:] = actions self._c_lib.libenv_step_async(self._c_env, self._action_buffers, self._c_step) def step_wait( self ) -> Tuple[Dict[str, np.ndarray], np.ndarray, np.ndarray, List[Dict[str, Any]]]: """ Step the environment, returns (obs, rews, dones, infos) """ assert self._state == STATE_WAIT_WAIT self._state = STATE_WAIT_ACT self._c_lib.libenv_step_wait(self._c_env) if self._debug: self._check_arrays(self._observations, self.num_envs, self.observation_space) infos = [{} for _ in range(self.num_envs)] # type: List[Dict] for key, values in self._infos.items(): for env_idx in range(self.num_envs): v = values[env_idx] if v.shape == (1, ): # extract scalar values v = v[0] infos[env_idx][key] = v return ( self._maybe_copy_dict(self._observations), self._maybe_copy_ndarray(self._rews), self._maybe_copy_ndarray(self._dones), infos, ) def step( self, actions: np.ndarray ) -> Tuple[Dict[str, np.ndarray], np.ndarray, np.ndarray, List[Dict[str, Any]]]: """ Step the environment, combines step_async() and step_wait() and makes this interface mostly compatible with the normal gym interface """ self.step_async(actions) return self.step_wait() def render(self, mode: str = "human") -> Union[bool, np.ndarray]: """ Render the environment. mode='human' tells the environment to render in some human visible way and returns True if the user user visible window created by the environment is still open Other modes are up to the environment, but end up returning a numpy array of some kind that will be tiled as if it were an image by this code. Call get_images() instead if that is not the behavior you want. """ if (mode == "human" and "human" not in self.metadata["render.modes"] and "rgb_array" in self.metadata["render.modes"]): # fallback human mode viewer = self.get_viewer() self._render(mode="rgb_array") images = self._maybe_copy_ndarray(self._renders["rgb_array"]) viewer.imshow(self._tile_images(images)) return viewer.isopen isopen = self._render(mode=mode) if mode == "human": return isopen else: images = self._maybe_copy_ndarray(self._renders[mode]) return self._tile_images(images) def _render(self, mode) -> Union[bool, np.ndarray]: """Internal render method, returns is_open and updates the buffers in self._render""" assert self._state == STATE_WAIT_ACT assert mode in self.metadata["render.modes"], "unsupported render mode" c_mode = self._ffi.new("char[]", mode.encode("utf8")) # only pass the render buffers for the selected mode space_idx = list(self._render_space.spaces.keys()).index(mode) render_buffers = self._renders_buffers[space_idx * self.num_envs:(space_idx + 1) * self.num_envs] return self._c_lib.libenv_render(self._c_env, c_mode, render_buffers) def _tile_images(self, images: np.ndarray) -> np.ndarray: """Tile a set of NHWC images""" num_images, height, width, chans = images.shape width_images = int(np.ceil(np.sqrt(num_images))) height_images = int(np.ceil(float(num_images) / width_images)) result = np.zeros( (height_images * height, width_images * width, chans), dtype=np.uint8) for col in range(width_images): for row in range(height_images): idx = row * width_images + col if idx >= len(images): continue result[row * height:(row + 1) * height, col * width:(col + 1) * width, :, ] = images[idx] return result @staticmethod def _var_to_libenv(ffi, c_lib, var): _t, _v = (var, None) if (type(var) == type) else (type(var), var) print(f"hmm... {_t} {_v}") if issubclass(_t, bytes): dtype = c_lib.LIBENV_DTYPE_UINT8 if _v is None: return dtype return dtype, ffi.new("char[]", _v), len(_v) if issubclass(_t, str): dtype = c_lib.LIBENV_DTYPE_UINT8 if _v is None: return dtype return dtype, ffi.new("char[]", _v.encode("utf8")), len(_v) if issubclass(_t, bool): dtype = c_lib.LIBENV_DTYPE_UINT8 if _v is None: return dtype return dtype, ffi.new("uint8_t*", _v), 1 if issubclass(_t, int): dtype = c_lib.LIBENV_DTYPE_INT32 if _v is None: return dtype assert -2**31 < _v < 2**31 return dtype, ffi.new("int32_t*", _v), 1 if issubclass(_t, float): dtype = c_lib.LIBENV_DTYPE_FLOAT32 if _v is None: return dtype return dtype, ffi.new("float*", _v), 1 if issubclass(_t, np.ndarray): assert _v is not None, f"{_t} can have different dtypes. Use a specific instance." if _v.dtype == np.dtype("uint8"): dtype = c_lib.LIBENV_DTYPE_UINT8 elif _v.dtype == np.dtype("int32"): dtype = c_lib.LIBENV_DTYPE_INT32 elif _v.dtype == np.dtype("float32"): dtype = c_lib.LIBENV_DTYPE_FLOAT32 else: assert False, f"unsupported type {_v.dtype}" return dtype, ffi.new("char[]", _v.tobytes()), _v.size assert False, f"unsupported value {var}" @staticmethod def _var_from_libenv(): pass def get_images(self) -> np.ndarray: """ Get rendered images from the environments, if supported. The returned array's shape will be (num_envs, height, width, num_colors) """ self._render(mode="rgb_array") return self._maybe_copy_ndarray(self._renders["rgb_array"]) def get_viewer(self): """Get the viewer instance being used by render()""" if self.viewer is None: from gym.envs.classic_control import rendering self.viewer = rendering.SimpleImageViewer() return self.viewer def close_extras(self): """ Override this to close environment-specific resources without having to override close()' This method is guaranteed to only be called once, unlike close() """ def close(self) -> None: """Close the environment and free any resources associated with it""" if not hasattr(self, "closed") or self.closed: return self.closed = True self._c_lib.libenv_close(self._c_env) self._c_lib = None self._ffi = None self._options_keepalives = None if self.viewer is not None: self.viewer.close() self.close_extras() def call_func(self, name: str, *args: Any) -> Any: """ Call a function of the libenv declared in c_func_defs """ return getattr(self._c_lib, name)(*args) @property def unwrapped(self): if hasattr(self, "venv"): return self.venv.unwrapped # pylint: disable=no-member else: return self def seed(self, seed=None): """ Seed the environment, this isn't used by VecEnvs but is part of the Gym Env API """ def __del__(self): self.close()
import os import sys from cffi import FFI FFI_ = FFI() FFI_.cdef('extern int AddIntegers(int a, int b);') libs_path = os.path.join(os.path.dirname(__file__), 'fplib') dll_path = os.path.join(libs_path, "test_interop.dll") LIB = FFI_.dlopen(dll_path, 1) # 1 for Lazy loading dir(LIB) blah = LIB.AddIntegers(1, 5) print(blah)
#from ctypes import * #import os from cffi import FFI ffi = FFI() lib = ffi.dlopen('./libunit.so') ffi.cdef(''' int add(int x, int y); ''') #lib = cdll.LoadLibrary(os.path.abspath("libunit.so")) #lib.add.argtypes = [c_int, c_int] #lib.add.restype = c_int print("2+2 = ", lib.add(2, 2))
class FFIWrapper(ModelRunner): def __init__(self, force_build=False, template=None, mhash=None): if mhash is not None: self.template = template self._init_ffi(mhash) else: self.reload(force_build, template) def _invalidate(self): import gc gc.collect() self.ffi = None self.awralib = None self.forcing = None self.outputs = None self.initial_states = None self.final_states = None self.parameters = None self.spatial = None self.hruspatial = None self.hruparams = None self.hypso = None gc.collect() def _init_ffi(self, mhash): from .support import get_input_meta _imeta = get_input_meta() self._STATE_KEYS = list(_imeta['STATES_CELL']) self._STATE_KEYS_HRU = list(_imeta['STATES_HRU']) self._HYPSO_KEYS = list(_imeta['INPUTS_HYPSO']) from cffi import FFI self.ffi = FFI() header_fn = filename_for_hash(mhash, '.h') lib_fn = filename_for_hash(mhash, '.so') with open(header_fn, 'r') as fh: self.ffi.cdef(fh.read()) self.awralib = self.ffi.dlopen(lib_fn) self.forcing = self.ffi.new("Forcing*") self.outputs = self.ffi.new("Outputs*") self.initial_states = self.ffi.new("States *") self.final_states = self.ffi.new("States *") self.parameters = self.ffi.new("Parameters *") self.spatial = self.ffi.new("Spatial *") self.hruspatial = self.ffi.new("HRUSpatial[2]") self.hruparams = self.ffi.new("HRUParameters[2]") self.hypso = self.ffi.new("Hypsometry *") def reload(self, force_build=False, template=None): self._invalidate() if template is None: from .settings import DEFAULT_TEMPLATE template = DEFAULT_TEMPLATE mhash = validate_or_rebuild(template, force=force_build) self.template = template self._init_ffi(mhash) def _cast(self, ndarr, to_type=np.float64, promote=True): ''' Ensures inputs are in correct datatypes for model. ''' if not ndarr.flags['C_CONTIGUOUS']: ndarr = ndarr.flatten() if ndarr.dtype != to_type: if promote: ndarr = ndarr.astype(to_type) else: raise Exception("Incorrect dtype", ndarr.dtype, to_type) self._temp_cast.append(ndarr) typestr = TYPEMAP[to_type] return self.ffi.cast(typestr, ndarr.ctypes.data) def _promote(self, v, shape): if isinstance(v, Number): out = np.empty(shape, dtype=np.float64) out[...] = v self._temp_cast.append(out) return out else: return v def _promote_except(self, v, shape): if isinstance(v, Number): raise Exception("Scalar %s supplied for spatial value" % v) else: return v def run_over_dimensions(self, inputs, dims): return self.run_from_mapping(inputs, dims['time'], dims['cell']) def run_from_mapping(self, mapping, timesteps, cells): #forcing_np = {} #forcealive = [] self._temp_cast = [] promote = self._promote #for k in forcing_args: for k in self.template['INPUTS_FORCING']: nval = promote(mapping[k], ( timesteps, cells, )) self.forcing.__setattr__(k, self._cast(nval)) outputs_np = {} #outputs_hru_np = [] ALL_OUTPUTS = self.template['OUTPUTS_AVG'] + self.template[ 'OUTPUTS_CELL'] for k in ALL_OUTPUTS: outputs_np[k] = arr = np.empty((timesteps, cells)) self.outputs.__setattr__(k, self._cast(arr)) for hru in range(2): for k in self.template['OUTPUTS_HRU']: full_k = k + '_hrusr' if hru is 0 else k + '_hrudr' outputs_np[full_k] = arr = np.empty((timesteps, cells)) self.outputs.hru[hru].__setattr__(k, self._cast(arr)) outputs_np['final_states'] = {} for k in self._STATE_KEYS: nval = promote(mapping['init_' + k], (cells, )) self.initial_states.__setattr__(k, self._cast(nval)) outputs_np['final_states'][k] = sval = np.empty((cells, )) self.final_states.__setattr__(k, self._cast(sval)) for k in self._STATE_KEYS_HRU: nval0 = promote(mapping['init_' + k + '_hrusr'], (cells, )) self.initial_states.hru[0].__setattr__(k, self._cast(nval0)) nval1 = promote(mapping['init_' + k + '_hrudr'], (cells, )) self.initial_states.hru[1].__setattr__(k, self._cast(nval1)) outputs_np['final_states'][k + '_hrusr'] = srval = np.empty( (cells, )) outputs_np['final_states'][k + '_hrudr'] = drval = np.empty( (cells, )) self.final_states.hru[0].__setattr__(k, self._cast(srval)) self.final_states.hru[1].__setattr__(k, self._cast(drval)) for k in self.template['INPUTS_SCALAR']: rs = mapping[k] self.parameters.__setattr__(k, rs) for k in self.template['INPUTS_SCALAR_HRU']: self.hruparams[0].__setattr__(k, mapping[k + '_hrusr']) self.hruparams[1].__setattr__(k, mapping[k + '_hrudr']) for k in self.template['INPUTS_SPATIAL']: nval = promote(mapping[k], (cells, )) self.spatial.__setattr__(k, self._cast(nval)) for k in self.template['INPUTS_SPATIAL_HRU']: self.hruspatial[0].__setattr__( k, self._cast(promote(mapping[k + '_hrusr'], (cells, )))) self.hruspatial[1].__setattr__( k, self._cast(promote(mapping[k + '_hrudr'], (cells, )))) for k in self._HYPSO_KEYS: nval = mapping[k] if k == 'height': #+++ Right now our hypso grids present data in the opposite order to the model's expectation nval = nval.T.astype(np.float64).flatten() self._temp_cast.append(nval) self.hypso.__setattr__(k, self._cast(nval)) self.awralib.awral(self.forcing[0],self.outputs[0],self.initial_states[0],self.final_states[0],\ self.parameters[0],self.spatial[0],self.hypso[0],self.hruparams,self.hruspatial,timesteps,cells) self._temp_cast = [] return outputs_np
from cffi import FFI ffi = FFI() ffi.cdef(""" int dummy(); """) lib = ffi.dlopen("../target/debug/dummy.dll") print(lib.dummy())
int (* yajl_start_array)(void * ctx); int (* yajl_end_array)(void * ctx); } yajl_callbacks; int yajl_version(void); yajl_handle yajl_alloc(const yajl_callbacks *callbacks, yajl_alloc_funcs *afs, void *ctx); int yajl_config(yajl_handle h, yajl_option opt, ...); yajl_status yajl_parse(yajl_handle hand, const unsigned char *jsonText, size_t jsonTextLength); yajl_status yajl_complete_parse(yajl_handle hand); unsigned char* yajl_get_error(yajl_handle hand, int verbose, const unsigned char *jsonText, size_t jsonTextLength); void yajl_free_error(yajl_handle hand, unsigned char * str); void yajl_free(yajl_handle handle); """) yajl = ffi.dlopen('yajl') major, rest = divmod(yajl.yajl_version(), 10000) minor, micro = divmod(rest, 100) if major < 2: raise YAJLImportError('YAJL version %s.x required, found %s.%s.%s' % (2, major, minor, micro)) YAJL_OK = 0 YAJL_CANCELLED = 1 YAJL_INSUFFICIENT_DATA = 2 YAJL_ERROR = 3 # constants defined in yajl_parse.h YAJL_ALLOW_COMMENTS = 1 YAJL_MULTIPLE_VALUES = 8
@author: chral """ from cffi import FFI import numpy as np from fparser.two.parser import ParserFactory from fparser.two.utils import walk_ast from fparser.two.Fortran2003 import ( Subroutine_Stmt, Dummy_Arg_List, Type_Declaration_Stmt, Entity_Decl, Name, Intrinsic_Type_Spec, Entity_Decl, Explicit_Shape_Spec ) ffi = FFI() libuq = ffi.dlopen('/home/calbert/code/uqp/libuq.so') #libuq = ffi.dlopen('/Users/ert/code/uqp/libuq.so') #%% from fffi import fortran_module, fdef, f2003_parser, debug from fparser.common.readfortran import FortranStringReader mod_index = fortran_module(libuq, 'mod_index') code = """ subroutine test(nfterme, npterme, np, mmax) integer, dimension(0:np) :: jterme, jtermo integer :: jterme2(0:np), jterme3(4) integer nfterme, npterme, mmax integer np end subroutine
from cffi import FFI def processSlice(id, sliceSize, delta): return processSliceModule.processSlice(id, sliceSize, delta) def execute(processCount): n = 1000000000 delta = 1.0 / n startTime = time() sliceSize = n // processCount with ProcessPoolExecutor(max_workers=processCount) as executor: results = [ executor.submit(processSlice, i, sliceSize, delta) for i in range(processCount) ] pi = 4.0 * delta * sum(item.result() for item in results) elapseTime = time() - startTime out(__file__, pi, n, elapseTime, processCount) if __name__ == '__main__': ffi = FFI() ffi.cdef('double processSlice(int, int, double);') processSliceModule = ffi.dlopen('processSlice_library_d.so') execute(1) execute(2) execute(8) execute(32)
from cffi import FFI import os.path import weakref # Call functions from the standard lib C library ffi = FFI() ffi.cdef(""" typedef unsigned int gid_t; typedef unsigned int uid_t; typedef int pid_t; gid_t getgid(void); uid_t getuid(void); pid_t getpid(void); """) libc = ffi.dlopen(None) print("getuid() = {}".format(libc.getuid())) print("getgid() = {}".format(libc.getgid())) print("getpid() = {}".format(libc.getpid())) # Use ffi.verify to compile code ffi = FFI() ffi.cdef('const size_t LONG_SIZE;') libv = ffi.verify('const size_t LONG_SIZE = sizeof(long);') print("sizeof(long) = {}".format(libv.LONG_SIZE)) # Add C definitions from header file, without any preprocessor macro ffi = FFI() current_dir = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(current_dir, 'cffi_example.h'), 'r') as fhead: cdefs = [
import numpy as np import mdtraj as md from cffi import FFI ffi = FFI() ffi.cdef("void dotsphere(int density, double* points);") ffi.cdef( "void vdw_surface(double* coordinates, char* elements, int n_elements, double scale_factor, double density, double* out, int* n_out);" ) C = ffi.dlopen('dotsphere.so') n_points = 72 points = np.empty((n_points, 3)) C.dotsphere(len(points), ffi.cast('double*', points.ctypes.data)) def plot_vertices(vertices): """Plot a set of vertices in 3D for debugging Example ------- >>> plot_vertices(dotsphere1(100)) """ import matplotlib.pyplot as pp from mpl_toolkits.mplot3d import Axes3D fig = pp.figure() ax = fig.add_subplot(111, projection='3d') ax.scatter(vertices[:, 0], vertices[:, 1], vertices[:, 2], c='k',
#! /usr/bin/env python2 from cffi import FFI ffi = FFI() rust_component = ffi.dlopen("target/debug/librust_component.so")
# yuk if _is_windows: _glib_libname = 'libglib-2.0-0.dll' _gobject_libname = 'libgobject-2.0-0.dll' _vips_libname = 'libvips-42.dll' elif _is_mac: _glib_libname = None _vips_libname = 'libvips.42.dylib' _gobject_libname = 'libgobject-2.0.dylib' else: _glib_libname = None _vips_libname = 'libvips.so.42' _gobject_libname = 'libgobject-2.0.so.0' # possibly use ctypes.util.find_library() to locate the lib? gobject_lib = ffi.dlopen(_gobject_libname) vips_lib = ffi.dlopen(_vips_libname) if _glib_libname: glib_lib = ffi.dlopen(_glib_libname) else: glib_lib = gobject_lib logger.debug('Loaded lib %s', vips_lib) logger.debug('Loaded lib %s', gobject_lib) ffi.cdef(''' int vips_init (const char* argv0); int vips_version (int flag); ''') if vips_lib.vips_init(sys.argv[0].encode()) != 0:
tangram_model* model, const char* input, const char* options, char** output ); int tangram_model_id( tangram_model* model, char** output ); int tangram_string_free(const char* ptr); int tangram_model_free(tangram_model* model); """) operating_system = sys.platform cpu = platform.machine() if (operating_system == "linux" or operating_system == "linux2") and (cpu == 'x86_64' or cpu == 'AMD64'): library_path = "libtangram/linux_amd64/libtangram.so" elif operating_system == "darwin" and (cpu == 'x86_64' or cpu == 'AMD64'): library_path = "libtangram/macos_amd64/libtangram.dylib" elif operating_system == "win32" and (cpu == 'x86_64' or cpu == 'AMD64'): library_path = "libtangram/windows_amd64/tangram.dll" else: raise Exception( 'tangram-python does not yet support your combination of operating system and CPU architecture. Want support for your platform? Get in touch at [email protected].' ) libtangram_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), library_path) libtangram = ffi.dlopen(libtangram_path)
__all__ = ['PCO_Camera'] ffi = FFI() with open(os.path.join(os.path.dirname(__file__), '_pco', 'clean.h')) as f: ffi.cdef(f.read()) ffi.cdef(""" #define WAIT_OBJECT_0 0x00L #define WAIT_ABANDONED 0x80L #define WAIT_TIMEOUT 0x102L #define WAIT_FAILED 0xFFFFFFFF #define INFINITE 0xFFFFFFFF DWORD WaitForSingleObject(HANDLE hHandle, DWORD dwMilliseconds); BOOL ResetEvent(HANDLE hEvent); """) lib = ffi.dlopen('SC2_Cam.dll') winlib = ffi.dlopen('Kernel32.dll') def get_error_text(ret_code): pbuf = errortext.ffi.new('char[]', 1024) errortext.lib.PCO_GetErrorText( errortext.ffi.cast('unsigned int', ret_code), pbuf, len(pbuf)) return errortext.ffi.string(pbuf) class NicePCO(NiceLib): def _ret_wrap(code): if code != 0: e = Error(get_error_text(code)) e.code = code & 0xFFFFFFFF
class RQObject(object): _cdefs = ''' typedef uint64_t RaptorQ_OTI_Common_Data; typedef uint32_t RaptorQ_OTI_Scheme_Specific_Data; typedef enum { NONE = 0, ENC_8 = 1, ENC_16 = 2, ENC_32 = 3, ENC_64 = 4, DEC_8 = 5, DEC_16 = 6, DEC_32 = 7, DEC_64 = 8 } RaptorQ_type; struct RaptorQ_ptr; struct RaptorQ_ptr* RaptorQ_Enc ( const RaptorQ_type type, void *data, const uint64_t size, const uint16_t min_subsymbol_size, const uint16_t symbol_size, const size_t max_memory); struct RaptorQ_ptr* RaptorQ_Dec ( const RaptorQ_type type, const RaptorQ_OTI_Common_Data common, const RaptorQ_OTI_Scheme_Specific_Data scheme); // Encoding RaptorQ_OTI_Common_Data RaptorQ_OTI_Common (struct RaptorQ_ptr *enc); RaptorQ_OTI_Scheme_Specific_Data RaptorQ_OTI_Scheme (struct RaptorQ_ptr *enc); uint16_t RaptorQ_symbol_size (struct RaptorQ_ptr *ptr); uint8_t RaptorQ_blocks (struct RaptorQ_ptr *ptr); uint32_t RaptorQ_block_size (struct RaptorQ_ptr *ptr, const uint8_t sbn); uint16_t RaptorQ_symbols (struct RaptorQ_ptr *ptr, const uint8_t sbn); uint32_t RaptorQ_max_repair (struct RaptorQ_ptr *enc, const uint8_t sbn); size_t RaptorQ_precompute_max_memory (struct RaptorQ_ptr *enc); void RaptorQ_precompute ( struct RaptorQ_ptr *enc, const uint8_t threads, const bool background); uint64_t RaptorQ_encode_id ( struct RaptorQ_ptr *enc, void **data, const uint64_t size, const uint32_t id); uint64_t RaptorQ_encode ( struct RaptorQ_ptr *enc, void **data, const uint64_t size, const uint32_t esi, const uint8_t sbn); uint32_t RaptorQ_id (const uint32_t esi, const uint8_t sbn); // Decoding uint64_t RaptorQ_bytes (struct RaptorQ_ptr *dec); uint64_t RaptorQ_decode ( struct RaptorQ_ptr *dec, void **data, const size_t size); uint64_t RaptorQ_decode_block ( struct RaptorQ_ptr *dec, void **data, const size_t size, const uint8_t sbn); bool RaptorQ_add_symbol_id ( struct RaptorQ_ptr *dec, void **data, const uint32_t size, const uint32_t id); bool RaptorQ_add_symbol ( struct RaptorQ_ptr *dec, void **data, const uint32_t size, const uint32_t esi, const uint8_t sbn); // General: free memory void RaptorQ_free (struct RaptorQ_ptr **ptr); void RaptorQ_free_block (struct RaptorQ_ptr *ptr, const uint8_t sbn); ''' _ctx = None data_size_div, _rq_type, _rq_blk = 4, 32, 'uint32_t' def __init__(self): self._ffi = FFI() self._ffi.cdef(self._cdefs) # self.ffi.set_source('_rq', '#include <RaptorQ/cRaptorQ.h>') lib_name = ctypes.util.find_library( 'RaptorQ') # newer cffi should not do that automatically self._lib = self._ffi.dlopen(lib_name) # ABI mode for simplicity self.rq_types = (['NONE', None] + list('ENC_{}'.format(2**n) for n in xrange(3, 7)) + list('DEC_{}'.format(2**n) for n in xrange(3, 7))) self._rq_blk_size = self.data_size_div def rq_type_val(self, v, pre): if isinstance(v, int) or v.isdigit(): v = '{}_{}'.format(pre, v).upper() else: v = bytes(v).upper() assert v in self.rq_types, [v, self.rq_types] return getattr(self._lib, v) def __getattr__(self, k): if k.startswith('rq_'): if not self._ctx: raise RuntimeError( 'ContextManager not initialized or already freed') return ft.partial(getattr(self._lib, 'RaptorQ_{}'.format(k[3:])), self._ctx) return self.__getattribute__(k) def open(self): self._ctx = self._ctx_init[0](*self._ctx_init[1]) return self._ctx def close(self): if self._ctx: ptr = self._ffi.new('struct RaptorQ_ptr **') ptr[0] = self._ctx self._lib.RaptorQ_free(ptr) self._ctx = None def __enter__(self): self.open() return self def __exit__(self, *err): self.close() def __del__(self): self.close() def sym_id(self, esi, sbn): return self._lib.RaptorQ_id(esi, sbn) _sym_n = None def _sym_buff(self, init=None): if not self._sym_n: self._sym_n = self.symbol_size / self._rq_blk_size buff = self._ffi.new('{}[]'.format(self._rq_blk), self._sym_n) buff_ptr = self._ffi.new('void **', buff) buff_raw = self._ffi.buffer(buff) if init: buff_raw[:] = init return buff_ptr, lambda: bytes(buff_raw)
typedef void wrap_amdsysfs_handle; wrap_amdsysfs_handle* wrap_amdsysfs_create(); int wrap_amdsysfs_destroy(wrap_amdsysfs_handle* sysfsh); int wrap_amdsysfs_get_gpucount(wrap_amdsysfs_handle* sysfsh, int* gpucount); int wrap_amdsysfs_get_tempC(wrap_amdsysfs_handle* sysfsh, int index, unsigned int* tempC); int wrap_amdsysfs_get_fanpcnt(wrap_amdsysfs_handle* sysfsh, int index, unsigned int* fanpcnt); int wrap_amdsysfs_get_power_usage(wrap_amdsysfs_handle* sysfsh, int index, unsigned int* milliwatts); int wrap_amdsysfs_get_gpu_pci(wrap_amdsysfs_handle* sysfsh, int index, char* pcibuf, int bufsize); int wrap_amdsysfs_get_vid_pid_subsysid(wrap_amdsysfs_handle* sysfsh, int index, char* buf, int bufsize); int wrap_amdsysfs_get_clock(wrap_amdsysfs_handle* sysfsh, int index, unsigned int *baseCoreClock, unsigned int *baseMemoryClock, unsigned int *coreClock, unsigned int *memoryClock); ''') if platform.system() == 'Linux': lib = ffi.dlopen('./gpumon/libgpumon.so') nvHandle = lib.wrap_nvml_create() amdHandle = lib.wrap_adl_create() fsHandle = lib.wrap_amdsysfs_create() else: nvHandle = None amdHandle = None fsHandle = None def nvmlGetGpuCount(): gpuCount = 0 if nvHandle: count = ffi.new("int*", 0) lib.wrap_nvml_get_gpucount(nvHandle, count) gpuCount = count[0]
from cffi import FFI ffi = FFI() ffi.cdef(""" int printf(const char *format, ...); """) C = ffi.dlopen(None) # loads the entire C namespace arg = ffi.new("char[]", b"world") # equivalent to C code: char arg[] = "world"; C.printf(b"hi there, %s.\n", arg)
def test_dlopen(): ffi = FFI() ffi.cdef("double sin(double x);") m = ffi.dlopen("m") # unicode literal x = m.sin(1.23) assert x == math.sin(1.23)
class Sim3D(object): """ Simulation object which contains information about fault-tolerant (2 + 1D) surface code simulations, extensible to other 2D codes in principle. Methods follow a common pattern from my other simulation classes: __init__: set simulation parameters run: use parameters to populate sample list by Monte Carlo save: pickle parameters and derived statistics """ def __init__(self, d, n_meas, gate_error_model, use_blossom=True): """ Produces a simulation object which can then be run, examined and saved. Inputs: d: code distance. Must be an integer greater than 1. n_meas: number of measurement rounds. Must be an integer greater than 1. gate_error_model: specification of which errors occur at which timesteps. For an arbitrary model, input a list of lists which is the same length as the extractor of the SCLayout object, less one (). A sample from each PauliErrorModel specified in element `j` will be applied after timestep `j` of the extractor. Two special error models, 'pq' and 'fowler' are permitted, they correspond to internal models replicating the 3DZ2RPGM and the Fowler/Stephens/Groszkowski paper. use_blossom: Boolean that says whether to use C++ (if True) or NetworkX (if False). """ if not(isinstance(d, int)) or (d <= 1): raise ValueError("d must be an integer at least 2, " "{} entered.".format(d)) if not(isinstance(n_meas, int)) or (n_meas <= 1): raise ValueError("n_meas must be an integer at least 2, " "{} entered.".format(n_meas)) self.d = d self.layout = cm.SCLayoutClass.SCLayout(d) self.n_meas = n_meas #pre-fab error models #TODO: Set up default error models for often-studied simulations if gate_error_model[0] == 'pq': p, q = gate_error_model[1:] gate_error_model = pq_model(self.layout.extractor(), p, q) elif gate_error_model[0] == 'fowler': p = gate_error_model[1] gate_error_model = fowler_model(self.layout.extractor(), p) else: #check that the gate_error_model is a list of lists of #`PauliErrorModel`s: if not isinstance(gate_error_model, Iterable): raise ValueError("Input gate_error_model is not " "iterable:\n{}".format(gate_error_model)) for idx, elem in enumerate(gate_error_model): if not isinstance(elem, Iterable): raise ValueError(("Element {} of gate_error_model " "is not iterable:\n{}").format(idx, elem)) elif any(not(isinstance(_, em.PauliErrorModel)) for _ in elem): raise ValueError(("Element {} of gate_error_model" " contains non-error_model elements:\n{}" ).format(idx, elem)) self.gate_error_model = gate_error_model #extra derived properties self.errors = {'I' : 0, 'X' : 0, 'Y' : 0, 'Z' : 0} self.extractor = self.layout.extractor() #convenience self.use_blossom = use_blossom if self.use_blossom: self.ffi = FFI() self.blossom = self.ffi.dlopen(blossom_path) self.ffi.cdef(cdef_str) else: self.ffi = None self.blossom = None def history(self, final_perfect_rnd=True): """ Produces a list of sparse_pauli.Paulis that track the error through the `n_meas` measurement rounds. """ #ancillas (for restricting error to data bits) ancs = {self.layout.map[anc] for anc in sum(self.layout.ancillas.values(), [])} err_hist = [] synd_hist = {'X': [], 'Z': []} #perfect (quiescent state) initialization err = sp.Pauli() for meas_dx in range(self.n_meas): #just the ones synd = {'X': set(), 'Z': set()} #run circuit for stp, mdl in zip(self.extractor, self.gate_error_model): #run timestep, then sample new_synds, err = cm.apply_step(stp, err) err *= product(_.sample() for _ in mdl) for ki in synd.keys(): synd[ki] |= new_synds[ki][1] #last round of circuit, because there are n-1 errs, n gates new_synds, err = cm.apply_step(self.extractor[-1], err) for ki in synd.keys(): synd[ki] |= new_synds[ki][1] # remove remaining errors on ancilla qubits before append # (they contain no information) err.prep(ancs) for key in 'XZ': synd_hist[key].append(synd[key]) err_hist.append(err) if final_perfect_rnd: synd = {'X': set(), 'Z': set()} for ki, val in synd.items(): for idx, stab in self.layout.stabilisers()[ki].items(): if err.com(stab) == 1: val |= {idx} for key in 'XZ': synd_hist[key].append(synd[key]) return err_hist, synd_hist def correction(self, synds, metric=None, bdy_info=None): """ Given a set of recorded syndromes, returns a correction by minimum-weight perfect matching. In order to 'make room' for correlated decoders, X and Z syndromes are passed in as a single object, and any splitting is performed inside this method. Also, a single correction Pauli is returned. metric should be a function, so you'll have to wrap a matrix in table-lookup if you want to use one. bdy_info is a function that takes a flip and returns the distance to the closest boundary point """ n = self.layout.n x = self.layout.map.inv if metric is None: metric = lambda flp_1, flp_2: self.manhattan_metric(flp_1, flp_2) if bdy_info is None: bdy_info = lambda flp: self.manhattan_bdy_tpl(flp) flip_idxs = flat_flips(synds, n) # Note: 'X' syndromes are XXXX stabiliser measurement results. corr = sp.Pauli() for stab in 'XZ': matching = mwpm(flip_idxs[stab], metric, bdy_info, self.use_blossom, self.ffi, self.blossom) err = 'X' if stab == 'Z' else 'Z' for u, v in matching: if isinstance(u, int) & isinstance(v, int): corr *= self.layout.path_pauli(x[u % n], x[v % n], err) elif isinstance(u, int) ^ isinstance(v, int): vert = u if isinstance(u, int) else v bdy_pt = bdy_info(vert)[1] corr *= self.layout.path_pauli(bdy_pt, x[vert % n], err) else: pass #both boundary points, no correction #TODO: Optional return syndrome correction, test inferred syndrome error rate return corr def logical_error(self, final_error, corr): """ Given an error and a correction, multiplies them and returns a single letter recording the resulting logical error (may be I, X, Y or Z) """ anticom_dict = { ( 0, 0 ) : 'I', ( 0, 1 ) : 'X', ( 1, 0 ) : 'Z', ( 1, 1 ) : 'Y' } x_bar, z_bar = self.layout.logicals() loop = final_error * corr # test that the loop commutes with the stabilisers at the end for ltr in 'XZ': for stab in self.layout.stabilisers()[ltr].values(): if loop.com(stab) == 1: raise RuntimeError("final error * correction anticommutes with stabilisers") x_com, z_com = x_bar.com(loop), z_bar.com(loop) return anticom_dict[ ( x_com, z_com ) ] def run(self, n_trials, progress=True, metric=None, bdy_info=None, final_perfect_rnd=True): """ Repeats the following cycle `n_trials` times: + Generate a list of 'n_meas' cumulative errors + determine syndromes by checking stabilisers + make those syndromes into a graph with boundary vertices + match on that graph + check for a logical error by testing anticommutation with the logical paulis """ if progress: bar = pb.ProgressBar() trials = bar(range(n_trials)) else: trials = range(n_trials) for trial in trials: err_hist, synd_hist = self.history(final_perfect_rnd) corr = self.correction(synd_hist, metric, bdy_info) log = self.logical_error(err_hist[-1], corr) self.errors[log] += 1 pass def manhattan_metric(self, flip_a, flip_b, diag=False): """ Mostly for testing/demonstration, returns a function that takes you straight from flip _indices_ to edge weights for the NetworkX maximum-weight matching. I'm making a note here about whether to take the minimum between two cases for timelike syndrome weights. Given that the first round of syndromes count as flips, and the last round is perfect, I'm only going to use paths "timelike between flips", and I won't give a pair a weight by taking each syndrome flip out to the time boundary, just the space boundaries. If diag is set to True, I'm going to take all diagonal edges to be weight-one. """ n = self.layout.n crds = self.layout.map.inv # split into (round, idx) pairs: vert_a, idx_a = divmod(flip_a, n) vert_b, idx_b = divmod(flip_b, n) # horizontal distance between syndromes, from decoding_2d horz_dist = pair_dist(crds[idx_a], crds[idx_b]) # vertical vert_dist = abs(vert_a - vert_b) if diag: vert_dist = max(0, vert_dist - horz_dist) return -(horz_dist + vert_dist) def manhattan_bdy_tpl(self, flp): """ copypaste from decoding_2d.Sim2D. Returns an edge weight for NetworkX maximum-weight matching, hence the minus sign. """ crds = self.layout.map.inv horz_dx = flp % self.layout.n crd = crds[horz_dx] min_dist = 4 * self.d #any impossibly large value will do err_type = 'Z' if crd in self.layout.x_ancs() else 'X' for pt in self.layout.boundary_points(err_type): new_dist = pair_dist(crd, pt) if new_dist < min_dist: min_dist, close_pt = new_dist, pt return -min_dist, close_pt
from ctypes.util import find_library from cffi import FFI from semantic_version import Version ffi = FFI() ffi.cdef(''' const char* drafter_version_string(void); ''') drafter_library = find_library('drafter') if not drafter_library: raise ImportError('Draughtsman require drafter to be installed') drafter = ffi.dlopen(drafter_library) def get_drafter_version(): output = drafter.drafter_version_string() string = ffi.string(output).decode('utf-8') return string.replace('v', '') def drafter4_parse_blueprint_to(blueprint: str, generate_source_map: bool = False) -> str: source = ffi.new('char []', blueprint.encode('utf-8')) output = ffi.new('char **') parse_options = ffi.new('drafter_parse_options *', [False]) serialize_options = ffi.new('drafter_serialize_options *', [generate_source_map, 1]) result = drafter.drafter_parse_blueprint_to(source, output,
# # Poezio is free software: you can redistribute it and/or modify # it under the terms of the zlib license. See the COPYING file. '''This is a template module just for instruction. And poopt.''' from typing import List, Tuple # CFFI codepath. from cffi import FFI ffi = FFI() ffi.cdef(""" typedef long wchar_t; int wcwidth(wchar_t c); """) libc = ffi.dlopen(None) # Cython codepath. #cdef extern from "wchar.h": # ctypedef Py_UCS4 wchar_t # int wcwidth(wchar_t c) # Just checking if the return value is -1. In some (all?) implementations, # wcwidth("😆") returns -1 while it should return 2. In these cases, we # return 1 instead because this is by far the most probable real value. # Since the string is received from python, and the unicode character is # extracted with mbrtowc(), and supposing these two compononents are not # bugged, and since poezio’s code should never pass '\t', '\n' or their # friends, a return value of -1 from wcwidth() is considered to be a bug in # wcwidth() (until proven otherwise). xwcwidth() is here to work around
const char* def_val; const Signal *sigs; } Val; typedef struct { const char* name; size_t num_msgs; const Msg *msgs; const Val *vals; size_t num_vals; } DBC; void* can_init(int bus, const char* dbc_name, size_t num_message_options, const MessageParseOptions* message_options, size_t num_signal_options, const SignalParseOptions* signal_options, bool sendcan, const char* tcp_addr); void can_update(void* can, uint64_t sec, bool wait); size_t can_query(void* can, uint64_t sec, bool *out_can_valid, size_t out_values_size, SignalValue* out_values); const DBC* dbc_lookup(const char* dbc_name); void* canpack_init(const char* dbc_name); uint64_t canpack_pack(void* inst, uint32_t address, size_t num_vals, const SignalPackValue *vals, int counter); """) libdbc = ffi.dlopen(libdbc_fn)
cmph_uint32 cmph_search(cmph_t *mphf, const char *key, cmph_uint32 keylen); void _cmph_setup_py_logger(void (*_py_logger)(int, char*)); cmph_io_adapter_t *cmph_io_function_adapter(char*(*readfn)(), void(*rewindfn)(), void(*destroyfn)(), cmph_uint32(*keylenfn)(), cmph_uint32 nkeys); void cmph_io_function_adapter_destroy(cmph_io_adapter_t * adapter); """) __VERSION__ = '0.3.0' ffi.C = ffi.dlopen(None) path = dirname(__file__) sources = [relpath(src) for src in glob(pthjoin(path, '*.c'))] @ffi.callback("void(int, char*)") def _cmph_py_logger(level, message): log_fn = { 0: logger.error, 1: logger.warn, 2: logger.info, }.get(level, logger.debug) log_fn(ffi.string(message).strip())
#!/usr/bin/env python3 from cffi import FFI import itertools ffi = FFI() lib = ffi.dlopen('./substrings.so') print('Loaded lib {0}'.format(lib)) # Describe the data type and function prototype to cffi. ffi.cdef(''' uint32_t substr2(char* s1, char* s2); ''') strings = [ b'The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too.', b'When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things.', b'To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights.', b'Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it.' ] print(max([lib.substr2(x, y) for (x, y) in itertools.combinations(strings, 2)]))
from cffi import FFI ffi = FFI() ffi.cdef(""" struct CpAbeCiphertext; struct CpAbeContext; struct CpAbeSecretKey; struct CpAbeContext* rabe_bsw_context_create(); void rabe_bsw_context_destroy(struct CpAbeContext* ctx); struct CpAbeSecretKey* rabe_bsw_keygen(const struct CpAbeContext* ctx, const char* attributes); void rabe_bsw_keygen_destroy(void* sk); int32_t rabe_bsw_encrypt(const void* pk, char* policy, char* pt, int32_t pt_len, char** ct, int32_t *ct_len); int32_t rabe_bsw_decrypt(const struct CpAbeSecretKey* sk, const char* ct, uint32_t ct_len, char** pt_buf, uint32_t *pt_len); """) C = ffi.dlopen("/home/spenq/Source/rabe/target/debug/librabe.so") ctx = C.rabe_bsw_context_create() print(ctx) sk = C.rabe_bsw_keygen(ctx, b"[ \"test1\", \"test2\", \"test3\" ]") print(sk) pt = b"testing123" output = C.rabe_bsw_encrypt(ctx, b"{\"OR\": [{\"ATT\": \"A\"}, {\"ATT\": \"B\"}]}", pt, len(pt)) print(output)
import os import sys import subprocess from cffi import FFI mpc_dir = os.path.dirname(os.path.abspath(__file__)) libmpc_fn = os.path.join(mpc_dir, "libcommampc.so") subprocess.check_call(["make", "-j4"], stdout=sys.stderr, cwd=mpc_dir) ffi = FFI() ffi.cdef(""" typedef struct { double x, y, psi, delta, t; } state_t; typedef struct { double x[20]; double y[20]; double psi[20]; double delta[20]; } log_t; void init(double steer_rate_cost); int run_mpc(state_t * x0, log_t * solution, double l_poly[4], double r_poly[4], double p_poly[4], double l_prob, double r_prob, double p_prob, double curvature_factor, double v_ref, double lane_width); """) libmpc = ffi.dlopen(libmpc_fn)
void set_rt_torque_last(int t); bool get_cruise_engaged_prev(void); bool get_vehicle_moving(void); int get_hw_type(void); void set_timer(uint32_t t); int safety_rx_hook(CAN_FIFOMailBox_TypeDef *to_send); int safety_tx_hook(CAN_FIFOMailBox_TypeDef *to_push); int safety_fwd_hook(int bus_num, CAN_FIFOMailBox_TypeDef *to_fwd); int set_safety_hooks(uint16_t mode, int16_t param); void init_tests(void); void init_tests_honda(void); void set_honda_fwd_brake(bool); void set_honda_alt_brake_msg(bool); void set_honda_bosch_long(bool c); int get_honda_hw(void); void init_tests_chrysler(void); bool get_subaru_global(void); void init_tests_nissan(void); void set_nissan_desired_angle_last(int t); """) libpandasafety = ffi.dlopen(libpandasafety_fn)
class RangeLib(object): def __init__(self, config_file): self.ffi = FFI() self.ffi.cdef(""" typedef void easy_lr ; // avoid exposing the struct internals, fake it as void easy_lr* range_easy_create(const char* config_file); const char ** range_easy_expand(easy_lr* elr, const char * c_range); const char * range_easy_eval(easy_lr* elr, const char * c_range); char * range_easy_compress(easy_lr* elr, const char ** c_nodes); int range_easy_destroy(easy_lr* elr); void free(void *ptr); """) self.rangelib_ffi = self.ffi.dlopen("libcrange.so") self.libc_ffi = self.ffi.dlopen("libc.so.6") self.elr = self.rangelib_ffi.range_easy_create(self.ffi.new("char[]", config_file)) def __charpp_to_native(self, arg): i = 0 arr = [] while arg[i] != self.ffi.NULL: x = self.ffi.string(arg[i]) self.libc_ffi.free(arg[i]) arr.append(x) i += 1 self.libc_ffi.free(arg) return arr def expand(self, c_range): ret = self.rangelib_ffi.range_easy_expand(self.elr, self.ffi.new("char[]", c_range)) x = self.__charpp_to_native(ret) return x def compress(self, nodes): char_arg = [ self.ffi.new("char[]", x) for x in nodes ] char_arg.append(self.ffi.NULL) retptr = self.rangelib_ffi.range_easy_compress(self.elr, self.ffi.new("char*[]", char_arg)) ret = self.ffi.string(retptr) self.libc_ffi.free(retptr) return ret def eval(self, c_range): retptr = self.rangelib_ffi.range_easy_eval(self.elr, self.ffi.new("char[]", c_range)) ret = self.ffi.string(retptr) self.libc_ffi.free(retptr) return ret def __del__(self): self.rangelib_ffi.range_easy_destroy(self.elr)