def _guess_type(typedef, verify_args, verify_kwargs, assume_pointer=True): if assume_pointer and _is_pointer_sized(typedef, verify_args, verify_kwargs): return '... *' # OK, it's not a pointer, check if it's an arithmetic type ffi = FFI() ffi.cdef("size_t type_size();") try: lib = ffi.verify( verify_args[0] + '\n' + ''' size_t type_size() { ''' + typedef + ''' foo = (''' + typedef + ''') 1; return sizeof(foo); } ''', *verify_args[1:], **verify_kwargs ) size = lib.type_size() # OK, it's an arithmetic type, is it signed or unsigned ffi = FFI() ffi.cdef("size_t type_size();") try: lib = ffi.verify( verify_args[0] + '\n' + ''' size_t type_size() { char arr[((''' + typedef + ''') -1 < 0) * -1]; return sizeof(''' + typedef + '''); } ''', *verify_args[1:], **verify_kwargs ) size = lib.type_size() except VerificationError: # It's a signed type unsigned = '' else: unsigned = 'unsigned ' # Now we know it's an arithmetic type, what's the best size if size <= ffi.sizeof(unsigned + 'char'): return unsigned + 'char' if size <= ffi.sizeof(unsigned + 'short'): return unsigned + 'short' if size <= ffi.sizeof(unsigned + 'int'): return unsigned + 'int' if size <= ffi.sizeof(unsigned + 'long'): return unsigned + 'long' if size <= ffi.sizeof(unsigned + 'long long'): return unsigned + 'long long' if size == ffi.sizeof('void *'): return '... *' else: raise TypeError("Can't figure out the type of {0} with size {1}!".format(typedef, size)) except VerificationError: # it's some kind of struct return 'struct { ...; }'
def _is_verifiable(cdef, verify_args, verify_kwargs): ffi = FFI() try: ffi.cdef(cdef) ffi.verify(*verify_args, **verify_kwargs) except (VerificationError, CDefError): return False else: return True
def _is_defined(define, verify_args, verify_kwargs): ffi = FFI() ffi.cdef("#define {0} ...".format(define)) try: ffi.verify(*verify_args, **verify_kwargs) except VerificationError: return False else: return True
def _run_callback_in_thread(): ffi = FFI() ffi.cdef(""" typedef int (*mycallback_func_t)(int, int); int threaded_ballback_test(mycallback_func_t mycb); """) lib = ffi.verify(""" #include <pthread.h> typedef int (*mycallback_func_t)(int, int); void *my_wait_function(void *ptr) { mycallback_func_t cbfunc = (mycallback_func_t)ptr; cbfunc(10, 10); cbfunc(12, 15); return NULL; } int threaded_ballback_test(mycallback_func_t mycb) { pthread_t thread; pthread_create(&thread, NULL, my_wait_function, (void*)mycb); return 0; } """, extra_compile_args=['-pthread']) seen = [] @ffi.callback('int(*)(int,int)') def mycallback(x, y): time.sleep(0.022) seen.append((x, y)) return 0 lib.threaded_ballback_test(mycallback) count = 300 while len(seen) != 2: time.sleep(0.01) count -= 1 assert count > 0, "timeout" assert seen == [(10, 10), (12, 15)]
class Binding(object): def __init__(self, extra_objects=None, include_dirs=None, libraries=None): self.ffi = FFI() self.ffi.cdef(cdef) self._lib = None if extra_objects is None: extra_objects = [] self._extra_objects = extra_objects if include_dirs is None: include_dirs = [] self._include_dirs = include_dirs if libraries is None: libraries = ["fuzzy"] self._libraries = libraries def verify(self): self._lib = self.ffi.verify( source, ext_package="ssdeep", extra_objects=self._extra_objects, include_dirs=self._include_dirs, modulename=_create_modulename(cdef, source, __version__), libraries=self._libraries, ) @property def lib(self): if self._lib is None: self.verify() return self._lib
def __init__(self, depth, alphabet_size = 2): if alphabet_size not in self._lib_cache: ffi = FFI() ffi.cdef(re.sub("ALPHABET_SIZE", str(alphabet_size), ctwnode_h)) self._lib_cache[alphabet_size] = ffi.verify(re.sub("ALPHABET_SIZE", str(alphabet_size), ctwnode_c)) self.lib = self._lib_cache[alphabet_size] self.depth = depth self.tree = self.lib.ctwnode_new()
def test_tag(self): ffi = FFI() ffi.cdef("/* %s test_tag */ double test1tag(double x);" % self) csrc = "double test1tag(double x) { return x - 42.0; }" lib = ffi.verify(csrc, force_generic_engine=self.generic, tag='xxtest_tagxx') assert lib.test1tag(143) == 101.0 assert '_cffi_xxtest_tagxx_' in ffi.verifier.modulefilename
def test_verify(): ffi = FFI() header = open("foo.h").read() # 头文件和源文件最好都写在c里,确保编译通过,也方便调试 ffi.cdef(header) source = open("foo.c").read() lib_dir = os.path.abspath(".") # 这个是必须的,不然编译时会找不到 lib = ffi.verify(source, include_dirs=[lib_dir]) print lib.test().a # 返回的是结构体, 这样的话就可以返回多个值了 print lib.test().b
def check(self, source, expected_ofs_y, expected_align, expected_size): # NOTE: 'expected_*' is the numbers expected from GCC. # The numbers expected from MSVC are not explicitly written # in this file, and will just be taken from the compiler. ffi = FFI() ffi.cdef("struct s1 { %s };" % source) ctype = ffi.typeof("struct s1") # verify the information with gcc ffi1 = FFI() ffi1.cdef( """ static const int Gofs_y, Galign, Gsize; struct s1 *try_with_value(int fieldnum, long long value); """ ) fnames = [name for name, cfield in ctype.fields if name and cfield.bitsize > 0] setters = ["case %d: s.%s = value; break;" % iname for iname in enumerate(fnames)] lib = ffi1.verify( """ struct s1 { %s }; struct sa { char a; struct s1 b; }; #define Gofs_y offsetof(struct s1, y) #define Galign offsetof(struct sa, b) #define Gsize sizeof(struct s1) struct s1 *try_with_value(int fieldnum, long long value) { static struct s1 s; memset(&s, 0, sizeof(s)); switch (fieldnum) { %s } return &s; } """ % (source, " ".join(setters)) ) if sys.platform == "win32": expected_ofs_y = lib.Gofs_y expected_align = lib.Galign expected_size = lib.Gsize else: assert (lib.Gofs_y, lib.Galign, lib.Gsize) == (expected_ofs_y, expected_align, expected_size) # the real test follows assert ffi.offsetof("struct s1", "y") == expected_ofs_y assert ffi.alignof("struct s1") == expected_align assert ffi.sizeof("struct s1") == expected_size # compare the actual storage of the two for name, cfield in ctype.fields: if cfield.bitsize < 0 or not name: continue if int(ffi.cast(cfield.type, -1)) == -1: # signed min_value = -(1 << (cfield.bitsize - 1)) max_value = (1 << (cfield.bitsize - 1)) - 1 else: min_value = 0 max_value = (1 << cfield.bitsize) - 1 for t in [1, 2, 4, 8, 16, 128, 2813, 89728, 981729, -1, -2, -4, -8, -16, -128, -2813, -89728, -981729]: if min_value <= t <= max_value: self._fieldcheck(ffi, lib, fnames, name, t)
def make_ffi(**verifier_args): ffi = FFI() ffi.cdef("/* %s, %s, %s */" % (KEY, targetpackage, ext_package)) ffi.cdef("double test1iarm(double x);") csrc = "double test1iarm(double x) { return x * 42.0; }" lib = ffi.verify(csrc, force_generic_engine=self.generic, ext_package=ext_package, **verifier_args) return ffi, lib
def load_constants(): import re global ffi global C constants = [] for match in re.finditer(r"^\s*#\s*define\s+([^_][A-Z_]*)\s+([0-9bXx]+).*$", open("/usr/include/linux/filter.h", "r").read(), re.MULTILINE): try: int(match.group(2), base=0) except ValueError: pass else: constant = match.group(1) __all__.append(constant) constants.append(constant) from cffi import FFI ffi = FFI() cdef_lines = """ /* struct sock_filter { ...; }; */ struct sock_filter { /* Filter block */ uint16_t code; /* Actual filter code */ uint8_t jt; /* Jump true */ uint8_t jf; /* Jump false */ uint32_t k; /* Generic multiuse field */ }; /* struct sock_fprog { ...; }; */ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */ unsigned short len; /* Number of filter blocks */ struct sock_filter *filter; }; void *malloc(size_t size); void free(void *ptr); """ for constant in constants: cdef_lines += "#define %s ...\n" % constant ffi.cdef(cdef_lines) C = ffi.verify(""" #include <linux/filter.h> #include <stdlib.h> """) for constant in constants: globals()[constant] = getattr(C, constant)
def test_verifier_object_from_ffi(self): ffi = FFI() ffi.cdef("double sin(double x);") csrc = "/*6%s*/\n#include <math.h>" % self lib = ffi.verify(csrc, force_generic_engine=self.generic) assert lib.sin(12.3) == math.sin(12.3) assert isinstance(ffi.verifier, Verifier) with open(ffi.verifier.sourcefilename, 'r') as f: data = f.read() assert csrc in data
def test_verify(): ffi = FFI() header = open("foo.h").read() ffi.cdef(header) source = open("foo.c").read() lib_dir = os.path.abspath(".") lib = ffi.verify(source, include_dirs=[lib_dir]) print lib.test() # result: 1 print lib.test()
def get_version(): ffi_check = FFI() ffi_check.cdef("void zmq_version(int *major, int *minor, int *patch);") C_check_version = ffi_check.verify("#include <zmq.h>", libraries=["c", "zmq"]) major = ffi.new("int*") minor = ffi.new("int*") patch = ffi.new("int*") C_check_version.zmq_version(major, minor, patch) return (int(major[0]), int(minor[0]), int(patch[0]))
def get_sizes(): types = ('dev_t', 'mode_t', 'nlink_t', 'ino_t', 'uid_t', 'gid_t', 'off_t', 'blkcnt_t', 'blksize_t', 'time_t') ffi = FFI() ffi.cdef(''.join('#define SIZE_OF_{} ...\n'.format(t) for t in types)) lib = ffi.verify('#include <sys/types.h>\n' + ''.join('#define SIZE_OF_{} sizeof({})\n'.format(t, t) for t in types)) return ''.join( 'typedef uint{}_t {};\n'.format(getattr(lib, 'SIZE_OF_'+t)*8, t) for t in types)
def test_verify(): ffi = FFI() header = open("foo.h").read() ffi.cdef(header) source = open("foo.c").read() lib_dir = os.path.abspath(".") lib = ffi.verify(source, include_dirs=[lib_dir]) varsp = ffi.new("foo_s*") print varsp[0].a, varsp[0].b lib.test(varsp) print varsp[0].a, varsp[0].b
def zmq_version_info(): ffi_check = FFI() ffi_check.cdef('void zmq_version(int *major, int *minor, int *patch);') C_check_version = ffi_check.verify('#include <zmq.h>', libraries=['c', 'zmq']) major = ffi.new('int*') minor = ffi.new('int*') patch = ffi.new('int*') C_check_version.zmq_version(major, minor, patch) return (int(major[0]), int(minor[0]), int(patch[0]))
def test_modulename(self): ffi = FFI() ffi.cdef("/* %s test_modulename */ double test1foo(double x);" % self) csrc = "double test1foo(double x) { return x - 63.0; }" modname = 'xxtest_modulenamexx%d' % (self.generic,) lib = ffi.verify(csrc, force_generic_engine=self.generic, modulename=modname) assert lib.test1foo(143) == 80.0 suffix = _get_so_suffixes()[0] fn1 = os.path.join(ffi.verifier.tmpdir, modname + '.c') fn2 = os.path.join(ffi.verifier.tmpdir, modname + suffix) assert ffi.verifier.sourcefilename == fn1 assert ffi.verifier.modulefilename == fn2
def verify(self, source='', **kwargs): extras = [] pyexports = sorted(self._pyexports.items()) for name, export in pyexports: callback_var = self.getctype(export.tp, name) extras.append("%s;" % callback_var) extras.append(source) source = '\n'.join(extras) lib = FFI.verify(self, source, **kwargs) for name, export in pyexports: cb = self.callback(export.tp, export.func) export.cb = cb setattr(lib, name, cb) return lib
def load_constants(): global ffi constants = ['AUDIT_ARCH_I386', 'AUDIT_ARCH_X86_64', 'AUDIT_ARCH_ARM'] for match in re.finditer(r"^\s*#\s*define\s+([^_][A-Z_]*)\s+([0-9bXx]+).*$", open("/usr/include/linux/seccomp.h", "r").read(), re.MULTILINE): try: int(match.group(2), base=0) except ValueError: pass else: constant = match.group(1) __all__.append(constant) constants.append(constant) syscalls_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.uname()[4] + '_syscalls.txt') for syscall in open(syscalls_file, 'r'): # we don't export the syscalls because there's too many of them, SYSCALL # below will automatically convert their string names, and we export # syscall_by_name constants.append('__NR_'+syscall.strip()) from cffi import FFI ffi = FFI() cdef_lines = """ /* struct seccomp_data { ...; }; */ struct seccomp_data { int nr; uint32_t arch; uint64_t instruction_pointer; uint64_t args[6]; }; """ for constant in constants: cdef_lines += "#define %s ...\n" % constant ffi.cdef(cdef_lines) C = ffi.verify(""" #include <linux/seccomp.h> #include <linux/audit.h> #include <linux/unistd.h> """) for constant in constants: globals()[constant] = getattr(C, constant)
def get_time_t_type(): # We need to figure out the typedef of time_t for the current platform ffi_time_t = FFI() ffi_time_t.cdef('''#define SIZE_OF_TIME_T ...''') time_t_lib = ffi_time_t.verify(''' #include <sys/types.h> #define SIZE_OF_TIME_T sizeof(time_t) ''') if time_t_lib.SIZE_OF_TIME_T == 4: return 'int32_t' elif time_t_lib.SIZE_OF_TIME_T == 8: return 'int64_t' else: raise ValueError('time_t different size than expected')
def read_core(name): ffi=FFI() with open("../libretro.h", "r") as file: iface=file.read() startpos=iface.find('#define RETRO_API_VERSION') iface=iface[startpos : iface.find('#ifdef __cplusplus', startpos)] iface=iface.replace("INT_MAX", "...") # Force it to ask the C compiler for this one. iface=iface.replace("/*", "\n/*") # Fix bugs if a comment starts on a define. iface=re.sub(r"#define +[A-Z0-9_]+\(.*", "", iface) # Delete RETRO_DEVICE_SUBCLASS. iface=re.sub(r"(#define +[A-Z0-9_]+) .*", r"\1 ...", iface) # Nuke the rest of the defines. Most of them make the parser barf. ffi.cdef(iface) core=ffi.verify("""#include "libretro.h" """, include_dirs=[".."], library_dirs=[".."], libraries=[name]) if core.retro_api_version() != core.RETRO_API_VERSION: return (None, None) return (ffi, core)
def _is_pointer_sized(typedef, verify_args, verify_kwargs): ffi = FFI() ffi.cdef("int isptr();") try: lib = ffi.verify( verify_args[0] + '\n' + ''' int isptr() { return (sizeof(''' + typedef + ''') == sizeof(void *)); } ''', *verify_args[1:], **verify_kwargs ) return lib.isptr() != 0 except VerificationError: # if the above fails to compile, 'typedef' is not a pointer type return False
def zmq_version_info(): ffi_check = FFI() ffi_check.cdef('void zmq_version(int *major, int *minor, int *patch);') cfg = load_compiler_config() C_check_version = ffi_check.verify('#include <zmq.h>', libraries=cfg['libraries'], include_dirs=cfg['include_dirs'], library_dirs=cfg['library_dirs'], runtime_library_dirs=cfg['runtime_library_dirs'], ) major = ffi.new('int*') minor = ffi.new('int*') patch = ffi.new('int*') C_check_version.zmq_version(major, minor, patch) return (int(major[0]), int(minor[0]), int(patch[0]))
def main(): """Program main.""" ffi = FFI() ffi.cdef(""" double integral_to_infinite(double a, double b, double E); double to_degrees(double radians); """) # C = ffi.dlopen(None) lib = ffi.verify(""" #include "tetaQuad.h" """, include_dirs=["."], extra_compile_args=["-O3"]) var_e = ffi.cast("double", 0.1) var_a = ffi.cast("double", 0.0) points_x = [] points_y = [] points_y_a = [] print("Calculus may take some time...") for var_b in range_f(0.0, 100, 0.5): points_x.append(var_b) rad = lib.integral_to_infinite(var_a, var_b, var_e) theta = lib.to_degrees(abs(rad)) analytic = degrees(analytic_evaluation(var_b, var_e)) print("b = {}\ttheta = {}\tanalytic = {}".format( float(var_b), theta, analytic)) points_y.append(theta) points_y_a.append(analytic) plt.ylabel('teta') plt.xlabel('b') # plt.plot(points_x, points_y, 'r--') plt.plot(points_x, points_y, 'ro') plt.plot(points_x, points_y_a, 'b--') # plt.plot(points_x, points_y_a, 'bo') red_line = mpatches.Patch(color='red', label='theta value') blue_line = mpatches.Patch(color='blue', label='analytic value') plt.legend(handles=[red_line, blue_line]) plt.grid(True) plt.show()
def test_extension_object(self): ffi = FFI() ffi.cdef("double sin(double x);") csrc = '/*7%s*/' % self + ''' #include <math.h> #ifndef TEST_EXTENSION_OBJECT # error "define_macros missing" #endif ''' lib = ffi.verify(csrc, define_macros=[('TEST_EXTENSION_OBJECT', '1')], force_generic_engine=self.generic) assert lib.sin(12.3) == math.sin(12.3) v = ffi.verifier ext = v.get_extension() assert 'distutils.extension.Extension' in str(ext.__class__) assert ext.sources == [maybe_relative_path(v.sourcefilename)] assert ext.name == v.get_module_name() assert ext.define_macros == [('TEST_EXTENSION_OBJECT', '1')]
def test_extension_object_extra_sources(self): ffi = FFI() ffi.cdef("double test1eoes(double x);") extra_source = str(udir.join('extension_extra_sources.c')) with open(extra_source, 'w') as f: f.write('double test1eoes(double x) { return x * 6.0; }\n') csrc = '/*9%s*/' % self + ''' double test1eoes(double x); /* or #include "extra_sources.h" */ ''' lib = ffi.verify(csrc, sources=[extra_source], force_generic_engine=self.generic) assert lib.test1eoes(7.0) == 42.0 v = ffi.verifier ext = v.get_extension() assert 'distutils.extension.Extension' in str(ext.__class__) assert ext.sources == [maybe_relative_path(v.sourcefilename), extra_source] assert ext.name == v.get_module_name()
def test_simple(): ffi = FFI() ffi.cdef("int main(void);") try: with open('prog.c') as r: src = r.read() # The tmpdir needs to set to '.' otherwise headers are not found libdir = os.path.abspath('.') lib = ffi.verify( src, include_dirs=[libdir], library_dirs=[libdir], runtime_library_dirs=[libdir], libraries=['test1', 'test2', 'test3']) # The main function is here treated as common library function lib.main() except Exception as inst: # useful for printing gcc compiling errors in ffi.verify print inst
def compile(self): for d in self.images: d.alloc() imgs = ''.join(d.cdeclaration for d in self.images) head = ''' long clamp(long val, long min_val, long max_val) { if (val < min_val) return min_val; if (val > max_val) return max_val; return val; } long subsample(long val) { return val & (~1); } ''' code = Code(imgs + "\n") code.includes.add('#include <math.h>') code.includes.add('#include <VX/vx.h>') for n in self.nodes: assert not n.optimized_out n.compile(code) ffi = FFI() ffi.cdef("int func(void);") if self.show_source: print str(code) inc = '\n'.join(code.includes) + '\n' tmpdir = mkdtemp() mydir = os.path.dirname(os.path.abspath(__file__)) vxdir = os.path.join(mydir, 'inc', 'headers') try: lib = ffi.verify(inc + head + "int func(void) {" + str(code) + "return VX_SUCCESS;}", extra_compile_args=["-O3", "-march=native", "-std=c99", "-I" + mydir, "-I" + vxdir], extra_link_args=code.extra_link_args, tmpdir=tmpdir) finally: rmtree(tmpdir) self.compiled_func = lib.func
def compile_library(installation=False): if installation: api_header_path = installation_api_header_path sources = glob.glob(os.path.join(src_directory, '*.c')) include_dirs = [src_directory] else: api_header_path = runtime_api_header_path sources = [] include_dirs = [] api_header = open(api_header_path).read() ffi = FFI() ffi.cdef(api_header) print("Compiling with", sources) lib = ffi.verify( api_header, ext_package=package, sources=sources, include_dirs=include_dirs) return ffi, lib
refs = [None] * len(l) for i in range(len(l)): if not is_string(l[i]): raise TypeError("Value must be a string") s = ffi.new('char []', to_str(l[i])) refs[i] = s strings[i] = s arr.strings = strings arr.count = len(l) return arr, refs dir_path = path.dirname(path.abspath(inspect.getfile(inspect.currentframe()))) decl_path = path.join(dir_path, 'decl.h') with codecs.open(decl_path, 'r', 'utf-8') as header: ffi.cdef(header.read()) # if LIBGIT2 exists, set build and link against that version libgit2_path = getenv('LIBGIT2') include_dirs = [] library_dirs = [] if libgit2_path: include_dirs = [path.join(libgit2_path, 'include')] library_dirs = [path.join(libgit2_path, 'lib')] C = ffi.verify("#include <git2.h>", libraries=["git2"], include_dirs=include_dirs, library_dirs=library_dirs)
from __future__ import print_function from cffi import FFI, VerificationError ffi = FFI() ffi.cdef( """ int launch_activate_socket(const char *name, int **fds, size_t *cnt); """ ) try: lib = ffi.verify( """ #include <launch.h> """, tag=__name__.replace(".", "_") ) except VerificationError as ve: raise ImportError(ve) def launchActivateSocket(name): fdList = [] fds = ffi.new('int **') count = ffi.new('size_t *') result = lib.launch_activate_socket(name, fds, count) if result == 0: for i in xrange(count[0]): fdList.append(fds[0][i])
int kds__contains(struct keyds *k, void *key, int key_len); void kds__setup_query(struct query_context *q, struct keyds *k); int kds__lookup(struct query_context *q, char *key, int n); void kds__teardown_query(struct query_context *q); """ ffi.cdef(KDS_STRUCTS_CDEF) ffi.cdef(KDS_API_CDEF) folder_path = os.path.dirname(__file__) if not folder_path: folder_path = os.getcwd() common_path = os.path.abspath(os.path.join(folder_path, "../common")) KDS_SO = ffi.verify("""#include "kds.h" """, libraries=["kds"], library_dirs=[folder_path], runtime_library_dirs=[folder_path], include_dirs=[folder_path, common_path], extra_compile_args=["-std=c99"]) class KeyTable(object): def __init__(self, dirname, fieldname, mode, dumper=None, loader=None): self.dirname = dirname self.fieldname = fieldname #self.path = os.path.join(dirname, '{}.db'.format(self.field)) self.mode = mode foo = ffi.new("struct keyds *k") self.c_kds_data = ffi.gc(foo, KDS_SO.kds__close) self.flushed = False self.dumper = dumper self.loader = loader
} result_t; void iplus1_init(); struct iplus1_lang_t* iplus1_get_lang(char*); char** iplus1_lang_parse(iplus1_lang_t*, char*); void iplus1_lang_parse_free(char**); result_t* iplus1_parse_full(char* nlang, char* tlang, char** str); void iplus1_parse_full_free(result_t*); """) # TODO: figure where exactly these headers will be going iplus1 = ffi.verify(""" #include "iplus1/iplus1.h" #include "iplus1/lang.h" #include "iplus1/tree.h" #include "iplus1/user.h" #include "iplus1/parse.h" """, libraries=["iplus1", 'hiredis'], runtime_library_dirs=["."], include_dirs=["./src"]) iplus1.iplus1_init() def lang_parse(langcode, sent): lang = iplus1.iplus1_get_lang(langcode) if lang == ffi.NULL: return [] words = iplus1.iplus1_lang_parse(lang, sent) word_list = []
#!/usr/bin/env python3 import functools import os from pytss.tspi_exceptions import * from cffi import FFI, VerificationError INTERFACE_H = os.path.dirname(os.path.abspath(__file__)) + '/interface.h' __all__ = ["ffi", "lib"] # Setup CFFI with libtspi ffi = FFI() ffi.cdef(open(INTERFACE_H, 'r').read()) tss_lib = ffi.verify('#include <trousers/tss.h>', libraries=['tspi']) def wrap_libtspi_func(func): @functools.wraps(func) def wrapper(*args, **kwargs): ret = func(*args, **kwargs) if ret == 0: return True if (ret & 0x3000): ret = ret & ~0x3000 if ret == tss_lib.TSS_E_FAIL: raise TSS_E_FAIL elif ret == tss_lib.TSS_E_BAD_PARAMETER: raise TSS_E_BAD_PARAMETER elif ret == tss_lib.TSS_E_INTERNAL_ERROR: raise TSS_E_INTERNAL_ERROR elif ret == tss_lib.TSS_E_OUTOFMEMORY:
# This file is part of bedup. # # bedup is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # bedup is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with bedup. If not, see <http://www.gnu.org/licenses/>. from cffi import FFI import os ffi = FFI() ffi.cdef(''' int syncfs(int fd); ''') lib = ffi.verify(''' #include <unistd.h> ''', ext_package='bedup') def syncfs(fd): if lib.syncfs(fd) != 0: raise IOError(ffi.errno, os.strerror(ffi.errno), fd)
if os.path.isdir(incdirs[0]): break config_ffi = FFI() config_ffi.cdef(""" #define TK_HEX_VERSION ... #define HAVE_WIDE_INT_TYPE ... """) config_lib = config_ffi.verify(""" #include <tk.h> #define TK_HEX_VERSION ((TK_MAJOR_VERSION << 24) | \ (TK_MINOR_VERSION << 16) | \ (TK_RELEASE_LEVEL << 8) | \ (TK_RELEASE_SERIAL << 0)) #ifdef TCL_WIDE_INT_TYPE #define HAVE_WIDE_INT_TYPE 1 #else #define HAVE_WIDE_INT_TYPE 0 #endif """, include_dirs=incdirs, libraries=linklibs, library_dirs=libdirs) TK_HEX_VERSION = config_lib.TK_HEX_VERSION HAVE_LIBTOMMATH = int((0x08050208 <= TK_HEX_VERSION < 0x08060000) or (0x08060200 <= TK_HEX_VERSION)) HAVE_WIDE_INT_TYPE = config_lib.HAVE_WIDE_INT_TYPE tkffi = FFI()
from cffi import FFI ffi = FFI() ffi.cdef(""" int add(int, int); """) C = ffi.verify(""" int add(int a, int b) { return a + b; } """) print(C.add(21, 21))
import os.path from cffi import FFI _ffi = FFI() _ffi.cdef(""" void unwrap2D( float* wrapped_image, float* unwrapped_image, unsigned char* input_mask, int image_width, int image_height, int wrap_around_x, int wrap_around_y); """) _current_directory = os.path.dirname(__file__) _lib = _ffi.verify('#include "unwrap2D.c"', ext_package="unwrap", include_dirs=[_current_directory]) _unwrap2D = _lib.unwrap2D def unwrap2D(array, mask, unwrapped_array, wrap_around_x, wrap_around_y): _unwrap2D(_ffi.cast("float *", array.ctypes.data), _ffi.cast("float *", unwrapped_array.ctypes.data), _ffi.cast("char *", mask.ctypes.data), array.shape[1], array.shape[0], wrap_around_x, wrap_around_y)
from cffi import FFI, VerificationError ffi = FFI() definitions = """ typedef unsigned char uuid_t[16]; int mbr_check_service_membership(const uuid_t user, const char* servicename, int* ismember); int mbr_user_name_to_uuid(const char* name, uuid_t uu); int mbr_group_name_to_uuid(const char* name, uuid_t uu); """ ffi.cdef(definitions) try: lib = ffi.verify(definitions, libraries=[], tag=__name__.replace(".", "_")) except VerificationError as ve: raise ImportError(ve) def checkSACL(userOrGroupName, serviceName): """ Check to see if a given user or group is a member of an OS X Server service's access group. If userOrGroupName is an empty string, we want to know if unauthenticated access is allowed for the given service. @param userOrGroupName: the name of the user or group @type userOrGroupName: C{unicode} @param serviceName: the name of the service (e.g. calendar, addressbook) @type serviceName: C{str}
extern int lo_tell(PGconn *conn, int fd); extern int lo_lseek(PGconn *conn, int fd, int offset, int whence); extern int lo_close(PGconn *conn, int fd); extern int lo_unlink(PGconn *conn, Oid lobjId); extern int lo_export(PGconn *conn, Oid lobjId, const char *filename); extern int lo_truncate(PGconn *conn, int fd, size_t len); ''') libpq = ffi.verify(''' #include <postgres_ext.h> #include <libpq-fe.h> ''', libraries=['pq'], library_dirs=[ '/usr/pgsql-9.1/lib/', ], include_dirs=[ '/usr/include/postgresql/', '/usr/pgsql-9.1/include/', ], ext_package='psycopg2cffi') # imported from postgres/src/include/postgres_ext.h libpq.PG_DIAG_SEVERITY = ord('S') libpq.PG_DIAG_SQLSTATE = ord('C') libpq.PG_DIAG_MESSAGE_PRIMARY = ord('M') libpq.PG_DIAG_MESSAGE_DETAIL = ord('D') libpq.PG_DIAG_MESSAGE_HINT = ord('H') libpq.PG_DIAG_STATEMENT_POSITION = ord('P')
uint8_t flags; }; int iw_get_ext(int skfd, const char * ifname, int request, struct iwreq * pwrq); int iw_set_ext(int skfd, const char * ifname, int request, struct iwreq * pwrq); """) ccall = ffi.verify(""" #include <sys/socket.h> #include <iwlib.h> #include <linux/wireless.h> """) def switch_to(channel, iface): C = ccall AF_INET = 2 SOCK_DGRAM = 2 SHUT_RDWR = 2 SIOCSIWFREQ = 0x8B04 SIOCGIWFREQ = 0x8B05 def channel_to_freqency(chan): if chan < 0 or chan > 14: return 0
class NFCT(object): _instance = None def __new__(cls): if not cls._instance: cls._instance = super(NFCT, cls).__new__(cls) return cls._instance def __init__(self): global _cdef, _clibs_includes, _clibs_link self.ffi = FFI() self.ffi.cdef(_cdef) self.libnfct = self.ffi.verify(_clibs_includes, libraries=list(_clibs_link)) self.libnfct_cache = dict() _cdef = _clibs_includes = _clibs_link = None def _ffi_call(self, func, args, no_check=False, check_gt0=False, check_notnull=False): '''Call lib function through cffi, checking return value and raising error, if necessary. Checks if return is >0 by default.''' res = func(*args) if no_check\ or (check_gt0 and res > 0)\ or (check_notnull and res)\ or res >= 0: return res errno_ = self.ffi.errno raise NFCTError(errno_, os.strerror(errno_)) def __getattr__(self, k): if not (k.startswith('nfct_') or k.startswith('c_')): return super(NFCT, self).__getattr__(k) if k.startswith('c_'): k = k[2:] if k not in self.libnfct_cache: func = getattr(self.libnfct, k) self.libnfct_cache[k] = lambda *a, **kw: self._ffi_call( func, a, **kw) return self.libnfct_cache[k] def generator(self, events=None, output_flags=None, handle_sigint=True): '''Generator that yields: - on first iteration - netlink fd that can be poll'ed or integrated into some event loop (twisted, gevent, ...). Also, that is the point where uid/gid/caps can be dropped. - on all subsequent iterations it does recv() on that fd, yielding XML representation of the captured conntrack event. Keywords: events: mask for event types to capture - or'ed NFNLGRP_CONNTRACK_* flags, None = all. output_flags: which info will be in resulting xml - or'ed NFCT_OF_* flags, None = set all. handle_sigint: add SIGINT handler to process it gracefully.''' if events is None: events = (self.libnfct.NFNLGRP_CONNTRACK_NEW | self.libnfct.NFNLGRP_CONNTRACK_UPDATE | self.libnfct.NFNLGRP_CONNTRACK_DESTROY) if output_flags is None: output_flags = (self.libnfct.NFCT_OF_TIME | self.libnfct.NFCT_OF_ID | self.libnfct.NFCT_OF_SHOW_LAYER3 | self.libnfct.NFCT_OF_TIMESTAMP) handle = self.nfct_open(self.libnfct.NFNL_SUBSYS_NONE, events, check_notnull=True) cb_results = list() xml_buff_size = 2048 # ipv6 events are ~1k xml_buff = self.ffi.new('char[]', xml_buff_size) @self.ffi.callback('nfct_callback') def recv_callback(handler, msg_type, ct_struct, data): try: size = self.nfct_snprintf(xml_buff, xml_buff_size, ct_struct, msg_type, self.libnfct.NFCT_O_XML, output_flags, check_gt0=True) assert size <= xml_buff_size, size # make sure xml fits data = self.ffi.buffer(xml_buff, size)[:] cb_results.append(data) except: cb_results.append(StopIteration) # breaks the generator raise return self.libnfct.NFCT_CB_STOP # to yield processed data from generator if handle_sigint: global _sigint_raise _sigint_raise = False def sigint_handler(sig, frm): global _sigint_raise _sigint_raise = True cb_results.append(StopIteration) sigint_handler = signal.signal(signal.SIGINT, sigint_handler) def break_check(val): if val is StopIteration: raise val() return val self.nfct_callback_register2(handle, self.libnfct.NFCT_T_ALL, recv_callback, self.ffi.NULL) try: peek = break_check((yield self.nfct_fd(handle) )) # yield fd for poll() on first iteration while True: if peek: peek = break_check((yield NFWouldBlock)) # poll/recv is required continue # No idea how many times callback will be used here self.nfct_catch(handle) if handle_sigint and _sigint_raise: raise KeyboardInterrupt() # Yield individual events for result in cb_results: break_check(result) peek = break_check((yield result)) cb_results = list() finally: if handle_sigint: signal.signal(signal.SIGINT, sigint_handler) self.nfct_callback_unregister2(handle, no_check=True) self.nfct_close(handle)
"cld2/internal/cld_generated_score_quad_octa_2.cc" ] ] _full_sources = _core_sources + _full_table _lite_sources = _core_sources + _lite_table _include_dirs = [ relpath(inc) for inc in ["cld2/public", "cld2/internal", "cld2"] ] if platform.system() == 'Windows': _include_dirs.append(relpath("msinttypes")) _full_cld2 = _full_ffi.verify('#include <binding_decls.h>', sources=_full_sources, include_dirs=_include_dirs, extra_compile_args=_COMPILER_ARGS) _lite_cld2 = _lite_ffi.verify('#include <binding_decls.h>', sources=_lite_sources, include_dirs=_include_dirs, extra_compile_args=_COMPILER_ARGS) def __establish_languages(ffi, cld): to_ret = [] _lingus = cld.cld_languages() _codes = cld.cld_langcodes() for i in six.moves.xrange(cld.cld_num_languages()): lingus = ffi.string(_lingus[i])
void issue_barrier(); """) lib = ffi.verify(""" long long_add_and_fetch(long *v, long l) { return __sync_add_and_fetch(v, l); }; long long_sub_and_fetch(long *v, long l) { return __sync_sub_and_fetch(v, l); }; long long_bool_compare_and_swap(long *v, long o, long n) { return __sync_bool_compare_and_swap(v, o, n); }; long long_value_compare_and_swap(long *v, long o, long n) { return __sync_val_compare_and_swap(v, o, n); }; void issue_barrier() { __sync_synchronize(); }; """) @total_ordering class AtomicLong(object):
INTERCEPTION_FILTER_MOUSE_BUTTON_4_UP, INTERCEPTION_FILTER_MOUSE_BUTTON_5_DOWN, INTERCEPTION_FILTER_MOUSE_BUTTON_5_UP, INTERCEPTION_FILTER_MOUSE_WHEEL, INTERCEPTION_FILTER_MOUSE_HWHEEL, INTERCEPTION_FILTER_MOUSE_MOVE, ... }; enum InterceptionMouseFlag { INTERCEPTION_MOUSE_MOVE_RELATIVE, INTERCEPTION_MOUSE_MOVE_ABSOLUTE, INTERCEPTION_MOUSE_VIRTUAL_DESKTOP, INTERCEPTION_MOUSE_ATTRIBUTES_CHANGED, INTERCEPTION_MOUSE_MOVE_NOCOALESCE, INTERCEPTION_MOUSE_TERMSRV_SRC_SHADOW, ... }; ''') _is_64bits = sys.maxsize > 2**32 lib = ffi.verify( ''' #include "interception.h" ''', libraries=['interception64' if _is_64bits else 'interception'], tmpdir='.')
def check(self, source, expected_ofs_y, expected_align, expected_size): # NOTE: 'expected_*' is the numbers expected from GCC. # The numbers expected from MSVC are not explicitly written # in this file, and will just be taken from the compiler. ffi = FFI() ffi.cdef("struct s1 { %s };" % source) ctype = ffi.typeof("struct s1") # verify the information with gcc ffi1 = FFI() ffi1.cdef(""" static const int Gofs_y, Galign, Gsize; struct s1 *try_with_value(int fieldnum, long long value); """) fnames = [ name for name, cfield in ctype.fields if name and cfield.bitsize > 0 ] setters = [ 'case %d: s.%s = value; break;' % iname for iname in enumerate(fnames) ] lib = ffi1.verify(""" struct s1 { %s }; struct sa { char a; struct s1 b; }; #define Gofs_y offsetof(struct s1, y) #define Galign offsetof(struct sa, b) #define Gsize sizeof(struct s1) struct s1 *try_with_value(int fieldnum, long long value) { static struct s1 s; memset(&s, 0, sizeof(s)); switch (fieldnum) { %s } return &s; } """ % (source, ' '.join(setters))) if sys.platform == 'win32': expected_ofs_y = lib.Gofs_y expected_align = lib.Galign expected_size = lib.Gsize else: assert (lib.Gofs_y, lib.Galign, lib.Gsize) == (expected_ofs_y, expected_align, expected_size) # the real test follows assert ffi.offsetof("struct s1", "y") == expected_ofs_y assert ffi.alignof("struct s1") == expected_align assert ffi.sizeof("struct s1") == expected_size # compare the actual storage of the two for name, cfield in ctype.fields: if cfield.bitsize < 0 or not name: continue if int(ffi.cast(cfield.type, -1)) == -1: # signed min_value = -(1 << (cfield.bitsize - 1)) max_value = (1 << (cfield.bitsize - 1)) - 1 else: min_value = 0 max_value = (1 << cfield.bitsize) - 1 for t in [ 1, 2, 4, 8, 16, 128, 2813, 89728, 981729, -1, -2, -4, -8, -16, -128, -2813, -89728, -981729 ]: if min_value <= t <= max_value: self._fieldcheck(ffi, lib, fnames, name, t)
from cffi import FFI ffi = FFI() ffi.cdef("int bisect_left(int arr[], int r, int x);") import os file_dir = os.path.abspath('/home/cwh/coding/TrackViz/util') lib = ffi.verify("#include <bisect.c>", include_dirs=[file_dir], libraries=[]) if __name__ == '__main__': a = range(10) print lib.bisect_left(a, len(a)-1, 5)
class H264Decoder: def __init_ffi(self): self.ffi = FFI() self.ffi.cdef(''' // AVCODEC enum PixelFormat { PIX_FMT_YUV420P, PIX_FMT_RGB24, ... }; void avcodec_register_all(void); struct AVPacket { ...; uint8_t *data; int size; ...; }; void av_init_packet(struct AVPacket *pkt); enum AVCodecID { AV_CODEC_ID_H264, ... }; struct AVCodec *avcodec_find_decoder(enum AVCodecID id); struct AVCodecContext *avcodec_alloc_context3(struct AVCodec *codec); int avcodec_open2(struct AVCodecContext *avctx, struct AVCodec *codec, struct AVDictionary **options); struct AVFrame { uint8_t *data[8]; int linesize[8]; ...; int key_frame; ...; }; struct AVFrame *avcodec_alloc_frame(void); int avcodec_decode_video2(struct AVCodecContext *avctx, struct AVFrame *picture, int *got_picture_ptr, struct AVPacket *avpkt); int avcodec_close(struct AVCodecContext *avctx); void av_free(void *ptr); int avpicture_get_size(enum PixelFormat pix_fmt, int width, int height); int avpicture_fill(struct AVPicture *picture, uint8_t *ptr, int pix_fmt, int width, int height); // SWSCALE #define SWS_BILINEAR ... #define SWS_FAST_BILINEAR ... struct SwsContext *sws_getContext(int srcW, int srcH, enum PixelFormat srcFormat, int dstW, int dstH, enum PixelFormat dstFormat, int flags, struct SwsFilter *srcFilter, struct SwsFilter *dstFilter, const double *param); int sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[]); void sws_freeContext(struct SwsContext *c); ''') self.ns = self.ffi.verify(source=''' #include <libavcodec/avcodec.h> #include <libswscale/swscale.h> ''', libraries=['avcodec', 'swscale']) def __init_avcodec(self): self.ns.avcodec_register_all() self.av_packet = self.ffi.new('struct AVPacket *') self.ns.av_init_packet(self.av_packet) self.codec = self.ns.avcodec_find_decoder(self.ns.AV_CODEC_ID_H264) if not self.codec: raise Exception('avcodec_alloc_context3') self.context = self.ns.avcodec_alloc_context3(self.codec) if not self.context: raise Exception('avcodec_alloc_context3') if self.ns.avcodec_open2(self.context, self.codec, self.ffi.NULL) < 0: raise Exception('avcodec_open2') self.frame = self.ns.avcodec_alloc_frame() if not self.frame: raise Exception('avcodec_alloc_frame') self.got_frame = self.ffi.new('int *') self.out_frame = self.ns.avcodec_alloc_frame() def __init__(self): self.out_buffer, self.sws_context = None, None self.__init_ffi() self.__init_avcodec() self.update_dimensions() def close(self): self.ns.sws_freeContext(self.sws_context) self.ns.av_free(self.out_frame) self.ns.avcodec_close(self.context) self.ns.av_free(self.context) self.ns.av_free(self.frame) def update_dimensions(self): if self.sws_context is not None: self.ns.sws_freeContext(self.sws_context) self.sws_context = self.ns.sws_getContext( constants.WII_VIDEO_WIDTH, constants.WII_VIDEO_HEIGHT, self.ns.PIX_FMT_YUV420P, constants.WII_VIDEO_WIDTH, constants.WII_VIDEO_HEIGHT, self.ns.PIX_FMT_RGB24, self.ns.SWS_FAST_BILINEAR, self.ffi.NULL, self.ffi.NULL, self.ffi.NULL) bytes_req = self.ns.avpicture_get_size(self.ns.PIX_FMT_RGB24, constants.WII_VIDEO_WIDTH, constants.WII_VIDEO_HEIGHT) self.out_buffer = self.ffi.new('uint8_t [%i]' % bytes_req) self.ns.avpicture_fill( self.ffi.cast('struct AVPicture *', self.out_frame), self.out_buffer, self.ns.PIX_FMT_RGB24, constants.WII_VIDEO_WIDTH, constants.WII_VIDEO_HEIGHT) def get_image_buffer(self, encoded_nalu): in_data = self.ffi.new('uint8_t []', encoded_nalu) self.av_packet.data = in_data self.av_packet.size = len(encoded_nalu) length = self.ns.avcodec_decode_video2(self.context, self.frame, self.got_frame, self.av_packet) if length < 0: raise Exception('avcodec_decode_video2') elif length != self.av_packet.size: raise Exception('expected to decode a single complete frame') elif self.got_frame[0]: # print 'keyframe:', s.frame.key_frame # convert from YUV to RGB self.ns.sws_scale(self.sws_context, self.frame.data, self.frame.linesize, 0, constants.WII_VIDEO_HEIGHT, self.out_frame.data, self.out_frame.linesize) image_buffer = \ self.ffi.buffer(self.out_frame.data[0], self.out_frame.linesize[0] * constants.WII_VIDEO_HEIGHT) return image_buffer
static inline __s32 i2c_smbus_read_byte_data(int file, __u8 command); static inline __s32 i2c_smbus_write_byte_data(int file, __u8 command, __u8 value); static inline __s32 i2c_smbus_read_word_data(int file, __u8 command); static inline __s32 i2c_smbus_write_word_data(int file, __u8 command, __u16 value); static inline __s32 i2c_smbus_process_call(int file, __u8 command, __u16 value); //static inline __s32 i2c_smbus_read_block_data(int file, __u8 command, __u8 *values) //static inline __s32 i2c_smbus_write_block_data(int file, __u8 command, __u8 length, const __u8 *values) """) include_dir = os.path.join(os.path.dirname(__file__), '..', 'include') SMBUS = ffi.verify(""" #include <sys/types.h> #include <linux/i2c-dev.h> """, ext_package='smbus', include_dirs=[include_dir]) class SMBus(object): """SMBus([bus]) -> SMBus Return a new SMBus object that is (optionally) connected to the specified I2C device interface. """ _fd = -1 _addr = -1 _pec = 0 # compat mode, enables some features that are not compatible with the # original smbusmodule.c _compat = False
C1 = ffi2.verify(""" #include<stdio.h> double sum1(double** mat, int x, int y, int nrow, int ncol){ int i,j,k; double tmpsm = 0; double mxval = 0; for (i=0; i<x-ncol+1; i++){ for (j=0; j<y; j++){ tmpsm =0; //printf("val %f",mat[i][j]); for (k=0; k<ncol; k++){ tmpsm += mat[i+k][j]; } //printf("tmpsm %d ",(int)tmpsm); mat[i][j] = tmpsm; } } for (i=0; i<x; i++){ for (j=0; j<y-nrow+1; j++){ tmpsm =0; //printf("val2 %f",mat[i][j]); for (k=0; k<nrow; k++){ tmpsm += mat[i][j+k]; } //printf("tmpsm2 %d ",(int)tmpsm); mat[i][j] = tmpsm; } } for (i=0; i<x-ncol+1; i++){ for (j=0; j<y-nrow+1; j++){ if(mat[i][j]>mxval) { mxval = mat[i][j]; //printf("mxval %d",(int)mxval); } } } return(mxval); } """)
class NFLOG(object): _instance = None def __new__(cls): if not cls._instance: cls._instance = super(NFLOG, cls).__new__(cls) return cls._instance def __init__(self): global _cdef, _clibs_includes, _clibs_link self.ffi = FFI() self.ffi.cdef(_cdef) self.libnflog = self.ffi.verify(_clibs_includes, libraries=list(_clibs_link)) self.libnflog_cache = dict() _cdef = _clibs_includes = _clibs_link = None def _ffi_call( self, func, args, no_check=False, check_gt0=False, check_notnull=False ): '''Call libnflog function through cffi, checking return value and raising error, if necessary. Checks if return is >0 by default.''' res = func(*args) if no_check\ or (check_gt0 and res > 0)\ or (check_notnull and res)\ or res >= 0: return res errno_ = self.ffi.errno raise NFLogError(errno_, os.strerror(errno_)) def __getattr__(self, k): if not (k.startswith('nflog_') or k.startswith('c_')): return super(NFLOG, self).__getattr__(k) if k.startswith('c_'): k = k[2:] if k not in self.libnflog_cache: func = getattr(self.libnflog, k) self.libnflog_cache[k] = lambda *a,**kw: self._ffi_call(func, a, **kw) return self.libnflog_cache[k] def generator( self, qids, pf=(socket.AF_INET, socket.AF_INET6), qthresh=None, timeout=None, nlbufsiz=None, buff_size=None, extra_attrs=None, handle_overflows=True ): '''Generator that yields: - on first iteration - netlink fd that can be poll'ed or integrated into some event loop (twisted, gevent, ...). Also, that is the point where uid/gid/caps can be dropped. - on all subsequent iterations it does recv() on that fd, returning either None (if no packet can be assembled yet) or captured packet payload. qids: nflog group ids to bind to (nflog_bind_group) Keywords: pf: address families to pass to nflog_bind_pf extra_attrs: metadata to extract from captured packets, returned in a list after packet payload, in the same order nlbufsiz (bytes): set size of netlink socket buffer for the created queues qthresh (packets): set the maximum amount of logs in buffer for each group timeout (seconds): set the maximum time to push log buffer for this group buff_size (bytes): size of the batch to fetch from libnflog to process in python (default: min(nlbufsiz, 1 MiB)) handle_overflows: supress ENOBUFS NFLogError on queue overflows (but do log warnings, default: True)''' handle = self.nflog_open(check_notnull=True) for pf in (pf if not isinstance(pf, int) else [pf]): self.nflog_unbind_pf(handle, pf) self.nflog_bind_pf(handle, pf) if isinstance(extra_attrs, bytes): extra_attrs = [extra_attrs] cb_results = list() @self.ffi.callback('nflog_callback') def recv_callback( qh, nfmsg, nfad, data, extra_attrs=extra_attrs, ts_slot=self.ffi.new('struct timeval *'), pkt_slot=self.ffi.new('char **'), ts_err_mask=frozenset([0, errno.EAGAIN]), result=None ): try: pkt_len = self.nflog_get_payload(nfad, pkt_slot) result = self.ffi.buffer(pkt_slot[0], pkt_len)[:] if extra_attrs: result = [result] for attr in extra_attrs: if attr == 'len': result.append(pkt_len) elif attr == 'ts': # Fails quite often (EAGAIN, SUCCESS, ...), not sure why try: self.nflog_get_timestamp(nfad, ts_slot) except NFLogError as err: if err.errno not in ts_err_mask: raise result.append(None) else: result.append(ts_slot.tv_sec + ts_slot.tv_usec * 1e-6) else: raise NotImplementedError('Unknown nflog attribute: {}'.format(attr)) cb_results.append(result) except: cb_results.append(StopIteration) # breaks the generator raise return 0 for qid in (qids if not isinstance(qids, int) else [qids]): qh = self.nflog_bind_group(handle, qid, check_notnull=True) self.nflog_set_mode(qh, self.libnflog.NFULNL_COPY_PACKET, 0xffff) if qthresh: self.nflog_set_qthresh(qh, qthresh) if timeout: self.nflog_set_timeout(qh, int(timeout * 100)) if nlbufsiz: self.nflog_set_nlbufsiz(qh, nlbufsiz) self.nflog_callback_register(qh, recv_callback, self.ffi.NULL) fd = self.nflog_fd(handle) if not buff_size: if nlbufsiz: buff_size = min(nlbufsiz, 1*2**20) else: buff_size = 1*2**20 buff = self.ffi.new('char[]', buff_size) peek = yield fd # yield fd for poll() on first iteration while True: if peek: peek = yield NFWouldBlock # poll/recv is required continue # Receive/process netlink data, which may contain multiple packets try: nlpkt_size = self.c_recv(fd, buff, buff_size, 0) except NFLogError as err: if handle_overflows and err.errno == errno.ENOBUFS: log.warn( 'nlbufsiz seem' ' to be insufficient to hold unprocessed packets,' ' consider raising it via corresponding function keyword' ) continue raise self.nflog_handle_packet(handle, buff, nlpkt_size, no_check=True) # yield individual L3 packets for result in cb_results: if result is StopIteration: raise result peek = yield result cb_results = list()
def inner(self, data): f(self, data) return self.getInitialState() return inner ffi = FFI() incl_dir = os.path.dirname(__file__) ffi.cdef(open(os.path.join(incl_dir, 'txpg2.h')).read()) lib = ffi.verify(""" #include "txpg2.h" """, include_dirs=['.'], sources=['txpg2.c'], relative_to=__file__, extra_compile_args=['-std=c99']) class PostgresProtocol(StatefulProtocol): def getInitialState(self): return (self.getHeader, 5) def getHandler(self, tag): handlers = { 'R': self.handle_Authentication, 'S': self.handle_ParameterStatus, 'K': self.handle_BackendKeyData, 'Z': self.handle_ReadyForQuery,
signed long long cpref__le_ref(struct cpref__stream *stream, signed long long ref); """ ffi.cdef(CPREF_STRUCT) ffi.cdef(CPREF_API) folder_path = os.path.dirname(__file__) so_path = os.path.abspath(os.path.join(folder_path, '../build/libs')) headers_path = os.path.abspath(os.path.join(folder_path, '../build/headers')) c_file_path = os.path.abspath(os.path.join(folder_path, '../src/pulp_db/kds')) # How do we unit test static functions. We cheat and include thus not static. CPREF_SO = ffi.verify("""#include "cpref.c" """, libraries=["ttrie"], library_dirs=[so_path], runtime_library_dirs=[so_path], include_dirs=[c_file_path, headers_path], extra_compile_args=["-std=c99", "-Wno-unused-function"]) W_L = 2000 PY_DATA = [x for x in range(W_L)] C_DATA = ffi.new("signed long long [{}]".format(len(PY_DATA)), PY_DATA) R_L = 1800 if not os.path.exists('data/cpref'): os.makedirs('data/cpref') def cpref_write(file_path, data, length): cp_write = ffi.new("struct cpref__obj *c")
from cffi import FFI ffi = FFI() ffi.cdef(""" // some declarations from the man page struct passwd { char *pw_name; ...; }; struct passwd *getpwuid(int uid); """) C = ffi.verify(""" // passed to the real C compiler #include <sys/types.h> #include <pwd.h> """, libraries=[]) # or a list of libraries to link with p = C.getpwuid(0) print(p) print(ffi.string(p.pw_name)) assert ffi.string(p.pw_name) == b'root' # on Python 3: b'root'
import pkgutil from cffi import FFI ffi = FFI() ffi.cdef(pkgutil.get_data('posix_spawn', 'c/cdef.h').decode('ascii')) lib = ffi.verify(pkgutil.get_data('posix_spawn', 'c/verify.h').decode('ascii')) __all__ = ('lib', 'ffi')
# r""" # void FOO_set_prop1(FOO* self, int v){ # self->prop1 = v; # } # """) try: Clib = None Clib = ffi.verify(r''' typedef struct FOO FOO; struct FOO { int prop1; int prop2; int prop3; int prop4; int prop5; int prop6; int prop7; int prop8; int prop9; int prop10; }; void FOO_set_prop1(FOO* self, int v){ self->prop1 = v; }; ''') except Exception as exp: print('ERROR CFFI: %s' % traceback.format_exc()) # ffi.compile() print("CLib", Clib) def bench_host_creation_with_attr():
MDS_API = """ static void encode_id(unsigned char table_type, unsigned int table_i, unsigned long long row_i, unsigned char id[5]); static void decode_id(const unsigned char id[5], unsigned char *table_type, unsigned int *table_i, unsigned long long *row_i); static bool power_of_two_or_zero(unsigned int i); """ ffi.cdef(TTRIE_STRUCT) ffi.cdef(MDS_API) #TTRIE_SO = ffi.dlopen("libttrie.so") # How do we unit test static functions. We cheat and include thus not static. TTRIE_SO = ffi.verify("""#include "tabletrie.c" """, libraries=["ttrie"], library_dirs=["."], include_dirs=[".", "../first_trie"], extra_compile_args=["-std=c99"]) id_raw = namedtuple('id_raw', ['table_type', 'table_i', 'row_i']) DATA = [id_raw(0, 1, 2), id_raw(0, 128, 2), id_raw(0, 100000, 2), id_raw(0, 1, 255), id_raw(1, 0, 1), id_raw(2, 0, 2), id_raw(3, 0, 4), id_raw(4, 0, 8), ]
struct redisReader { ...; }; struct redisReader *redisReaderCreate(void); void redisReaderFree(struct redisReader *r); int redisReaderFeed(struct redisReader *r, const char *buf, size_t len); int redisReaderGetReply(struct redisReader *r, redisReply **reply); void freeReplyObject(void *reply); """) hiredis = ffi.verify(""" #include "hiredis.h" """, sources=[ "hiredis/hiredis.c", "hiredis/net.c", "hiredis/sds.c", ], include_dirs=["hiredis"]) class HiredisError(Exception): pass class NoReply(HiredisError): pass class RedisReader(object):
#define MAP_FIXED ... #define MAP_GROWSDOWN ... #define MAP_HUGETLB ... #define MAP_LOCKED ... #define MAP_NONBLOCK ... #define MAP_NORESERVE ... #define MAP_POPULATE ... #define MAP_STACK ... void *mmap(void *addr, size_t length, int prot, int flags, int fd, size_t offset); void *offset(void *mapped, size_t offset); """) try: C = ffi.verify(""" #include <sys/mman.h> void *offset(void *mapped, size_t offset) { return (void*)((char*)mapped + offset); } """) globals().update({n: getattr(C, n) for n in dir(C)}) except: PROT_READ = PROT_WRITE = MAP_SHARED = PROT_NONE = MAP_PRIVATE = -1 def mmap(addr=ffi.NULL, length=0, prot=PROT_NONE, flags=MAP_PRIVATE, fd=0, offset=0, buffer=True):