Example #1
0
def main_verbose():
    """ Pass a numpy array to a C function and get a numpy array back out
    More Verbose, but valid

    """

    ffi = FFI()
    ffi.cdef("void copy(float *in, float *out, int len);")
    C = ffi.dlopen("libcopy.so")


    float_in = ffi.new("float[16]")
    float_out = ffi.new("float[16]")

    arr_in = 42 * np.ones(16, dtype=np.float32)
    float_in[0:16] = arr_in[0:16]

    C.copy(float_in, float_out, 16)

    arr_out = np.frombuffer(ffi.buffer(float_out, 16*4), dtype = np.float32)

    print(arr_out)
    return
def invoke(func, *args):
	retcode = func(*args)
	if retcode != 0:
		ret = lib.SpcGetErrMsg(retcode)
		print ret
		raise

invoke(lib.SpcInitEnvEx)

invoke(lib.SpcVerifyPIN,ffi.new("char[]","88888888"),8)

userName = ffi.new("char[250]")
lenUserName = ffi.new("unsigned int*")
lenUserName[0] = 250

invoke(lib.SpcGetCardUserInfo,userName, lenUserName)
print "Actual len:",len(userName)," ret len: ",lenUserName[0]
print ffi.string(userName)

cert = ffi.new("uint8_t[4096]")
lenCert = ffi.new("unsigned int*")
lenCert[0] = 4096
invoke(lib.SpcGetEnvCert,cert, lenCert)
print "Cert Len:",lenCert[0]
print bytes(ffi.buffer(cert,lenCert[0]))


lib.SpcClearEnv()

Example #3
0
class _PcapFfi(object):
    """
    This class represents the low-level interface to the libpcap library.
    It encapsulates all the cffi calls and C/Python conversions, as well
    as translation of errors and error codes to PcapExceptions.  It is
    intended to be used as a singleton class through the PcapDumper
    and PcapLiveDevice classes, below.
    """

    _instance = None
    __slots__ = ["_ffi", "_libpcap", "_interfaces", "_windoze"]

    def __init__(self):
        """
        Assumption: this class is instantiated once in the main thread before
        any other threads have a chance to try instantiating it.
        """
        if _PcapFfi._instance:
            raise Exception("Can't initialize this class more than once!")

        _PcapFfi._instance = self
        self._windoze = False

        self._ffi = FFI()
        self._ffi.cdef(
            """
        struct pcap;
        typedef struct pcap pcap_t;
        struct pcap_dumper;
        typedef struct pcap_dumper pcap_dumper_t;
        struct pcap_addr {
            struct pcap_addr *next;
            struct sockaddr *addr;
            struct sockaddr *netmask;
            struct sockaddr *broadaddr;
            struct sockaddr *dstaddr;
        };
        typedef struct pcap_addr pcap_addr_t;
        struct pcap_if {
            struct pcap_if *next;
            char *name;
            char *description;
            pcap_addr_t *addresses;
            int flags;
        };
        typedef struct pcap_if pcap_if_t;

        int pcap_findalldevs(pcap_if_t **, char *);
        void pcap_freealldevs(pcap_if_t *);

        struct pcap_pkthdr {
            unsigned long tv_sec;
            unsigned long tv_usec;
            unsigned int caplen;
            unsigned int len;
        };

        struct pcap_stat {
            unsigned int recv;
            unsigned int drop;
            unsigned int ifdrop;
        };

        pcap_t *pcap_open_dead(int, int);
        pcap_dumper_t *pcap_dump_open(pcap_t *, const char *);
        void pcap_dump_close(pcap_dumper_t *);
        void pcap_dump(pcap_dumper_t *, struct pcap_pkthdr *, unsigned char *);

        // live capture
        pcap_t *pcap_create(const char *, char *); // source, errbuf
        pcap_t *pcap_open_live(const char *, int, int, int, char *);
        pcap_t *pcap_open_offline(const char *fname, char *errbuf);
        int pcap_set_snaplen(pcap_t *, int); // 0 on success
        int pcap_snapshot(pcap_t *);
        int pcap_set_promisc(pcap_t *, int); // 0 on success
        int pcap_set_buffer_size(pcap_t *, int); // 0 on success
        int pcap_datalink(pcap_t *);
        int pcap_setnonblock(pcap_t *, int, char *); // 0 on success
        int pcap_getnonblock(pcap_t *, char *); 
        int pcap_next_ex(pcap_t *, struct pcap_pkthdr **, const unsigned char **);
        int pcap_activate(pcap_t *);
        void pcap_close(pcap_t *);
        int pcap_get_selectable_fd(pcap_t *);
        int pcap_sendpacket(pcap_t *, const unsigned char *, int);
        char *pcap_geterr(pcap_t *);
        char *pcap_lib_version();
        int pcap_stats(pcap_t *, struct pcap_stat *);

        struct bpf_insn;
        struct bpf_program {
            unsigned int bf_len;
            struct bpf_insn *bf_insns;
        };
        int pcap_setfilter(pcap_t *, struct bpf_program *);
        int pcap_compile(pcap_t *, struct bpf_program *,
            const char *, int, unsigned int);
        void pcap_freecode(struct bpf_program *);
        """
        )
        if sys.platform == "darwin":
            self._libpcap = self._ffi.dlopen("libpcap.dylib")  # standard libpcap
        elif sys.platform == "linux":
            self._libpcap = self._ffi.dlopen("libpcap.so")  # standard libpcap
        elif sys.platform == "win32":
            self._libpcap = self._ffi.dlopen("wpcap.dll")  # winpcap
            self._windoze = True
        else:
            raise PcapException("Don't know how to locate libpcap on this platform: {}".format(sys.platform))
        self._interfaces = []
        self.discoverdevs()

    @staticmethod
    def instance():
        if not _PcapFfi._instance:
            _PcapFfi._instance = _PcapFfi()
        return _PcapFfi._instance

    @property
    def version(self):
        return self._ffi.string(self._libpcap.pcap_lib_version())

    def discoverdevs(self):
        """
        Find all the pcap-eligible devices on the local system.
        """
        if len(self._interfaces):
            raise PcapException("Device discovery should only be done once.")

        ppintf = self._ffi.new("pcap_if_t * *")
        errbuf = self._ffi.new("char []", 128)
        rv = self._libpcap.pcap_findalldevs(ppintf, errbuf)
        if rv:
            raise PcapException("pcap_findalldevs returned failure: {}".format(self._ffi.string(errbuf)))
        pintf = ppintf[0]
        tmp = pintf
        pindex = 0
        while tmp != self._ffi.NULL:
            xname = self._ffi.string(tmp.name)  # "internal name"; still stored as bytes object
            xname = xname.decode("ascii", "ignore")

            if self._windoze:
                ext_name = "port{}".format(pindex)
            else:
                ext_name = xname
            pindex += 1

            if tmp.description == self._ffi.NULL:
                xdesc = ext_name
            else:
                xdesc = self._ffi.string(tmp.description)
                xdesc = xdesc.decode("ascii", "ignore")

            # NB: on WinPcap, only loop flag is set
            isloop = (tmp.flags & 0x1) == 0x1
            isup = (tmp.flags & 0x2) == 0x2
            isrunning = (tmp.flags & 0x4) == 0x4

            xif = Interface(ext_name, xname, xdesc, isloop, isup, isrunning)

            self._interfaces.append(xif)
            tmp = tmp.next
        self._libpcap.pcap_freealldevs(pintf)

    @property
    def devices(self):
        return self._interfaces

    def open_dumper(self, outfile, dltype=Dlt.DLT_EN10MB, snaplen=65535):
        pcap = self._libpcap.pcap_open_dead(dltype.value, snaplen)
        xoutfile = self._ffi.new("char []", bytes(outfile, "ascii"))
        pcapdump = self._libpcap.pcap_dump_open(pcap, xoutfile)
        dl = self._libpcap.pcap_datalink(pcap)
        snaplen = self._libpcap.pcap_snapshot(pcap)
        return PcapDev(Dlt(dl), 0, snaplen, self.version, pcapdump)

    def close_dumper(self, pcapdump):
        self._libpcap.pcap_dump_close(pcapdump)

    def write_packet(self, dumper, pkt, ts=None):
        pkthdr = self._ffi.new("struct pcap_pkthdr *")
        if not ts:
            ts = time()
        pkthdr.tv_sec = int(ts)
        pkthdr.tv_usec = int(1000000 * (ts - int(ts)))
        pkthdr.caplen = len(pkt)
        pkthdr.len = len(pkt)
        xpkt = self._ffi.new("char []", pkt)
        self._libpcap.pcap_dump(dumper, pkthdr, xpkt)

    def open_pcap_file(self, filename):
        errbuf = self._ffi.new("char []", 128)
        pcap = self._libpcap.pcap_open_offline(bytes(filename, "ascii"), errbuf)
        if pcap == self._ffi.NULL:
            raise PcapException(
                "Failed to open pcap file for reading: {}: {}".format(filename, self._ffi.string(errbuf))
            )

        dl = self._libpcap.pcap_datalink(pcap)
        try:
            dl = Dlt(dl)
        except ValueError as e:
            raise PcapException("Don't know how to handle datalink type {}".format(dl))
        return PcapDev(dl, 0, 0, self.version, pcap)

    def open_live(self, device, snaplen=65535, promisc=1, to_ms=100, nonblock=True):
        errbuf = self._ffi.new("char []", 128)
        internal_name = None
        for dev in self._interfaces:
            if dev.name == device:
                internal_name = dev.internal_name
                break
        if internal_name is None:
            raise Exception("No such device {} exists.".format(device))

        pcap = self._libpcap.pcap_open_live(bytes(internal_name, "ascii"), snaplen, promisc, to_ms, errbuf)
        if pcap == self._ffi.NULL:
            raise PcapException("Failed to open live device {}: {}".format(internal_name, self._ffi.string(errbuf)))

        if nonblock:
            rv = self._libpcap.pcap_setnonblock(pcap, 1, errbuf)
            if rv != 0:
                raise PcapException(
                    "Error setting pcap device in nonblocking state: {}".format(self._ffi.string(errbuf))
                )

        # gather what happened
        nblock = self._libpcap.pcap_getnonblock(pcap, errbuf)
        snaplen = self._libpcap.pcap_snapshot(pcap)
        dl = self._libpcap.pcap_datalink(pcap)
        try:
            dl = Dlt(dl)
        except ValueError as e:
            raise PcapException("Don't know how to handle datalink type {}".format(dl))
        return PcapDev(dl, nblock, snaplen, self.version, pcap)

    def close_live(self, pcap):
        self._libpcap.pcap_close(pcap)

    def get_select_fd(self, xpcap):
        try:
            return self._libpcap.pcap_get_selectable_fd(xpcap)
        except:
            return -1

    def send_packet(self, xpcap, xbuffer):
        if not isinstance(xbuffer, bytes):
            raise PcapException("Packets to be sent via libpcap must be serialized as a bytes object")
        xlen = len(xbuffer)
        rv = self._libpcap.pcap_sendpacket(xpcap, xbuffer, xlen)
        if rv == 0:
            return True
        s = self._ffi.string(self._libpcap.pcap_geterr(xpcap))
        raise PcapException("Error sending packet: {}".format(s))

    def recv_packet(self, xpcap):
        phdr = self._ffi.new("struct pcap_pkthdr **")
        pdata = self._ffi.new("unsigned char **")
        rv = self._libpcap.pcap_next_ex(xpcap, phdr, pdata)
        if rv == 1:
            rawpkt = bytes(self._ffi.buffer(pdata[0], phdr[0].caplen))
            ts = float("{}.{}".format(phdr[0].tv_sec, phdr[0].tv_usec))
            return PcapPacket(ts, phdr[0].caplen, phdr[0].len, rawpkt)
        elif rv == 0:
            # timeout; nothing to return
            return None
        elif rv == -1:
            # error on receive; raise an exception
            s = self._ffi.string(self._libpcap.pcap_geterr(xpcap))
            raise PcapException("Error receiving packet: {}".format(s))
        elif rv == -2:
            # reading from savefile, but none left
            return None

    def set_filter(self, xpcap, filterstr):
        bpf = self._ffi.new("struct bpf_program *")
        cfilter = self._ffi.new("char []", bytes(filterstr, "ascii"))
        compile_result = self._libpcap.pcap_compile(xpcap.pcap, bpf, cfilter, 0, 0xFFFFFFFF)
        if compile_result < 0:
            # get error, raise exception
            s = self._ffi.string(self._libpcap.pcap_geterr(xpcap.pcap))
            raise PcapException("Error compiling filter expression: {}".format(s))

        sf_result = self._libpcap.pcap_setfilter(xpcap.pcap, bpf)
        if sf_result < 0:
            # get error, raise exception
            s = self._ffi.string(self._libpcap.pcap_geterr(xpcap.pcap))
            raise PcapException("Error setting filter on pcap handle: {}".format(s))
        self._libpcap.pcap_freecode(bpf)

    def stats(self, xpcap):
        pstat = self._ffi.new("struct pcap_stat *")
        rv = self._libpcap.pcap_stats(xpcap, pstat)
        if rv == 0:
            return PcapStats(pstat.recv, pstat.drop, pstat.ifdrop)
        else:
            s = self._ffi.string(self._libpcap.pcap_geterr(xpcap))
            raise PcapException("Error getting stats: {}".format(s))
Example #4
0
class Sentech(object):

    def __init__(self):
        self.ffi = FFI()
        self.ffi.set_unicode(True)

        self.library_handler = None
        self.camera_handle = None

        self.is_transferring_image = False
        self.is_inside_callback_function = False

        self.init_api()

    def init_api(self):
        include_file_path = self.get_include_file_path()
        with open(include_file_path, 'r') as include_file:
            logging.info("Reading the include file")
            self.ffi.cdef(include_file.read())

        dll_file_path = self.get_dll_file_path()
        self.library_handler = self.ffi.dlopen(dll_file_path)

    def get_dll_file_path(self):
        if platform.architecture()[0] == '64bit':
            dll_file_path = get_current_module_path(__file__, "../../../lib/camera/x64/StCamD.dll")
            logging.debug("64 bit platform found")
        elif platform.architecture()[0] == '32bit':
            dll_file_path = get_current_module_path(__file__, "../../../lib/camera/x86/StCamD.dll")
            logging.debug("32 bit platform found")
        else:
            message = "Cannot determine platform architecture: 32-bit or 64-bit."
            logging.error(message)
            raise RuntimeError(message)

        logging.debug("dll_file_path: {:s}".format(dll_file_path))
        return dll_file_path

    def get_include_file_path(self):
        include_file_path = get_current_module_path(__file__, "../../../include/camera/StCamD_stripped.h")
        logging.info("include_file_path: {:s}".format(include_file_path))
        return include_file_path

    def get_api_version(self):
        if self.library_handler is None:
            logging.info("No library handle found, try to initialize the api")
            self.init_api()

        file_version_ms = self.ffi.new("PDWORD", 0)
        file_version_ls = self.ffi.new("PDWORD", 0)
        _product_version_ms = self.ffi.new("PDWORD", 0)
        _product_version_ls = self.ffi.new("PDWORD", 0)

        status = self.library_handler.StTrg_GetDllVersion(file_version_ms, file_version_ls, _product_version_ms,
                                                          _product_version_ls)

        if not status:
            logging.error("Cannot get the dll version")

        s = Struct("4B")
        logging.debug(s.unpack(self.ffi.buffer(file_version_ms, 4)))
        minor, _, major, _ = s.unpack(self.ffi.buffer(file_version_ms, 4))
        build = file_version_ls[0]
        logging.debug(file_version_ls[0])

        api_version = (major, minor, 0, build)

        return api_version

    def get_api_file_version(self):
        if self.library_handler is None:
            logging.info("No library handle found, try to initialize the api")
            self.init_api()

        _file_version_ms = self.ffi.new("PDWORD", 0)
        _file_version_ls = self.ffi.new("PDWORD", 0)
        product_version_ms = self.ffi.new("PDWORD", 0)
        product_version_ls = self.ffi.new("PDWORD", 0)

        status = self.library_handler.StTrg_GetDllVersion(_file_version_ms, _file_version_ls, product_version_ms,
                                                          product_version_ls)

        if not status:
            logging.error("Cannot get the ddl version")

        s = Struct("4B")
        logging.debug(s.unpack(self.ffi.buffer(product_version_ms, 4)))
        minor, _, major, _ = s.unpack(self.ffi.buffer(product_version_ms, 4))
        build = product_version_ls[0]
        logging.debug(product_version_ls[0])

        api_file_version = (major, minor, 0, build)

        return api_file_version

    def get_camera_version(self):
        self._find_camera()

        usb_vendor_id = self.ffi.new("PWORD", 0)
        usb_product_id = self.ffi.new("PWORD", 0)
        fpga_version = self.ffi.new("PWORD", 0)
        firmwre_version = self.ffi.new("PWORD", 0)

        status = self.library_handler.StTrg_GetCameraVersion(self.camera_handle, usb_vendor_id, usb_product_id,
                                                             fpga_version, firmwre_version)

        if not status:
            logging.error("Cannot get the ddl version")

        logging.info(usb_vendor_id[0])
        logging.info(usb_product_id[0])
        logging.info(fpga_version[0])
        logging.info(firmwre_version[0])

        return usb_vendor_id[0], usb_product_id[0], fpga_version[0], firmwre_version[0]

    def open_camera(self):
        if self.library_handler is None:
            logging.info("No library handle found, try to initialize the api")
            self.init_api()

        self.camera_handle = self.library_handler.StTrg_Open()

        if self.camera_handle_value == -1:
            logging.error("Cannot open the camera")
            logging.info(self.camera_handle)

    def close_camera(self):
        if self.library_handler is None:
            logging.info("No library handle found, the camera is not open")
            return

        if self.camera_handle is None or self.camera_handle_value == -1:
            logging.info("No camera handle found, the camera is not open")
            self.camera_handle = None
            return

        if self.is_inside_callback_function:
            logging.warning("Inside the callback function, cannot close the camera.")
            return

        self.library_handler.StTrg_Close(self.camera_handle)

        self.camera_handle = None

    def get_product_name(self):
        self._find_camera()

        product_name = ""

#        product_name_buffer = self.ffi.new("PSTR[]", 256)
#        buffer_size = len(product_name_buffer)
#        status = self.library_handler.StTrg_GetProductNameA(self.camera_handle, product_name_buffer, buffer_size)
#        if not status:
#            logging.error("Cannot get the GetProductNameA")
#        logging.info(product_name_buffer[0])
#        logging.info(product_name_buffer)
#        #logging.info(self.ffi.string(product_name_buffer))
#        logging.info(buffer_size)

        product_name_buffer = self.ffi.new("PWSTR")
        buffer_size = 256
        status = self.library_handler.StTrg_GetProductNameW(self.camera_handle, product_name_buffer, buffer_size)
        if not status:
            logging.error("Cannot get the GetProductNameW")
        logging.info(product_name_buffer[0])
        logging.info(self.ffi.string(product_name_buffer))
        logging.info(buffer_size)

        product_name = self.ffi.string(product_name_buffer)

        return product_name

    def has_function(self, camera_function_id):
        self._find_camera()

        function_availability = self.ffi.new("BOOL*", False)
        status = self.library_handler.StTrg_HasFunction(self.camera_handle, camera_function_id, function_availability)
        if not status:
            logging.error("Cannot get the GetProductNameW")
        logging.info(camera_function_id)
        logging.info(function_availability[0])

        return bool(function_availability[0])

    def _find_camera(self):
        if self.library_handler is None:
            logging.info("No library handle found, try to initialize the api")
            self.init_api()
        if self.library_handler is not None and (self.camera_handle is None or self.camera_handle_value == -1):
            logging.info("No camera handle found, try to open the camera")
            self.open_camera()

    def check_all_functions(self):
        camera_function_ids = [0, 1, 2, 4, 5, 7, 8, 15, 17, 18, 21, 22, 23, 24, 25, 26, 27, 28, 29, 55, 56, 57, 58, 59,
                               60, 61, 62, 63, 68, 69, 72, 131, 256, 257, 258, 259, 260, 261,
                               0x00050006, 0x00090004, 0x00090005, 0x00090009, 0x0009000A, 0x0009000C, 0x0009000F,
                               0x000A000C]

        function_availabilities = {}

        for camera_function_id in camera_function_ids:
            function_availability = self.has_function(camera_function_id)
            function_availabilities[camera_function_id] = function_availability

        return function_availabilities

    def print_available_functions(self):
        function_availabilities = self.check_all_functions()

        for key, value in function_availabilities.items():
            if value:
                print("{:d} -> {:s}".format(key, str(value)))

    def get_color_array(self):
        self._find_camera()

        color_array = self.ffi.new("PWORD", 0)
        status = self.library_handler.StTrg_GetColorArray(self.camera_handle, color_array)

        if not status:
            logging.error("Cannot get the color array")

        logging.info(color_array[0])

        return ColorArray(color_array[0])

    def get_camera_user_id(self):
        self._find_camera()

        camera_id = self.ffi.new("PDWORD", 0)
        if six.PY3:
            camera_name_buffer = self.ffi.new("PWSTR")
            buffer_size = 250
            status = self.library_handler.StTrg_ReadCameraUserIDW(self.camera_handle, camera_id, camera_name_buffer, buffer_size)
        elif six.PY2:
            #camera_name_buffer = self.ffi.new("PSTR")
            camera_name_buffer = self.ffi.new("PWSTR")
            buffer_size = 250
            #status = self.library_handler.StTrg_ReadCameraUserIDA(self.camera_handle, camera_id, camera_name_buffer, buffer_size)
            status = self.library_handler.StTrg_ReadCameraUserIDW(self.camera_handle, camera_id, camera_name_buffer, buffer_size)
        if not status:
            logging.error("Cannot get the camera user ID")

        logging.info(camera_id[0])
        logging.info(camera_name_buffer[0])
        logging.info(self.ffi.string(camera_name_buffer))
        logging.info(buffer_size)

        camera_name = self.ffi.string(camera_name_buffer)

        return camera_id[0], camera_name

    def is_prohibited_call_timing(self):
        prohibited = self.is_transferring_image or self.is_inside_callback_function
        return prohibited

    def read_setting_file(self, setting_file_path):
        if self.is_prohibited_call_timing():
            logging.warning("Prohibited function call timing, read_setting_file.")
            return

        self._find_camera()

        if six.PY3:
            setting_file_path_buffer = self.ffi.new("PCWSTR", setting_file_path[0])
            status = self.library_handler.StTrg_ReadSettingFileW(self.camera_handle, setting_file_path_buffer)
        elif six.PY2:
            setting_file_path_buffer = self.ffi.new("PCSTR", setting_file_path[0])
            status = self.library_handler.StTrg_ReadSettingFileA(self.camera_handle, setting_file_path_buffer)
        if not status:
            logging.error("Cannot get the read setting file: {:s}".format(setting_file_path))
        logging.info(setting_file_path_buffer[0])
        logging.info(self.ffi.string(setting_file_path_buffer))

    def write_setting_file(self, setting_file_path):
        if self.is_prohibited_call_timing():
            logging.warning("Prohibited function call timing, write_setting_file.")
            return

        self._find_camera()

        if six.PY3:
            setting_file_path_buffer = self.ffi.new("PCWSTR", setting_file_path[0])
            status = self.library_handler.StTrg_WriteSettingFileW(self.camera_handle, setting_file_path_buffer)
        elif six.PY2:
            setting_file_path_buffer = self.ffi.new("PCSTR", setting_file_path[0])
            status = self.library_handler.StTrg_WriteSettingFileA(self.camera_handle, setting_file_path_buffer)
        if not status:
            logging.error("Cannot get the write setting file: {:s}".format(setting_file_path))

        logging.info(setting_file_path_buffer[0])
        logging.info(self.ffi.string(setting_file_path_buffer))

    def get_available_scan_mode(self):
        self._find_camera()

        enable_scan_mode = self.ffi.new("PWORD", 0)
        status = self.library_handler.StTrg_GetEnableScanMode(self.camera_handle, enable_scan_mode)
        if not status:
            logging.error("Cannot get the available scan mode")

        logging.info(enable_scan_mode[0])

        return hex(enable_scan_mode[0])

    def get_scan_mode(self):
        self._find_camera()

        scan_mode = self.ffi.new("PWORD", 0)
        offset_x = self.ffi.new("PDWORD", 0)
        offset_y = self.ffi.new("PDWORD", 0)
        width = self.ffi.new("PDWORD", 0)
        height = self.ffi.new("PDWORD", 0)
        status = self.library_handler.StTrg_GetScanMode(self.camera_handle, scan_mode, offset_x, offset_y, width, height)
        if not status:
            logging.error("Cannot get the scan mode")

        logging.info(scan_mode[0])
        logging.info(offset_x[0])
        logging.info(offset_y[0])
        logging.info(width[0])
        logging.info(height[0])

        return ScanMode(scan_mode[0]), offset_x[0], offset_y[0], width[0], height[0]

    def set_scan_mode(self, scan_mode, offset_x, offset_y, width, height):
        if self.is_prohibited_call_timing():
            logging.warning("Prohibited function call timing, set_scan_mode.")
            return

        self._find_camera()

        status = self.library_handler.StTrg_SetScanMode(self.camera_handle, scan_mode.value, offset_x, offset_y, width, height)
        if not status:
            logging.error("Cannot set the scan mode")

    @property
    def camera_handle_value(self):
        if self.camera_handle is None:
            return None
        else:
            camera_handle_value = int(self.ffi.cast('int', self.camera_handle))
            return camera_handle_value
Example #5
0
class SGXInterface:
    def __init__(self):

        self.ffi = FFI()
        dir_path = os.path.dirname(os.path.realpath(__file__))
        with open(os.path.join(dir_path, "sgx.h")) as stream:
            self.ffi.cdef(stream.read())

        self.ffi.set_source(
            "_sgx_interface",
            """
            #include "sgx_eid.h"
            #include "sgx_key_exchange.h"
            #include "common.h"
            #include "network_ra.h"
            #include "barbie_server.h"
            #include "barbie_client.h"
            #include "ra_client.h"
            #include "ra_server.h"
            #include <stdbool.h>
            #include "service_provider.h"
            """,
            include_dirs=['/usr/include', '/opt/intel/sgxsdk/include'],
            library_dirs=['/usr/local/lib', '/opt/intel/sgxsdk/lib64/'],
            libraries=["sample_libcrypto", "BarbiE_Client", "BarbiE_Server"])

        self.ffi.compile(tmpdir=dir_path)

        libuae = self.ffi.dlopen("sgx_uae_service", self.ffi.RTLD_GLOBAL)
        liburts = self.ffi.dlopen("sgx_urts", self.ffi.RTLD_GLOBAL)
        libcrypto = self.ffi.dlopen("sample_libcrypto", self.ffi.RTLD_GLOBAL)

        self.barbie_s = self.ffi.dlopen("BarbiE_Server", self.ffi.RTLD_LAZY)
        self.barbie_c = self.ffi.dlopen("BarbiE_Client", self.ffi.RTLD_LAZY)
        self.iv = 12
        self.mac = 16

    def init_env_variables(self):
        separator = "="
        with open("/opt/BarbiE/env.properties") as f:
            for line in f:
                if separator in line:
                    name, value = line.split(separator)
                    os.environ[name.strip()] = value.strip()

    def init_enclave(self, target_lib):
        try:
            p_enclave_id = self.ffi.new("sgx_enclave_id_t *")
            status = target_lib.initialize_enclave(p_enclave_id)
            return p_enclave_id[0]
        except Exception as e:
            raise Exception("Error in initializing enclave!", e)

    def generate_key_pair(self, key_dir):
        pub_key_path = os.path.join(key_dir, "public_key.pem")
        priv_key_path = os.path.join(key_dir, "private_key.pem")
        if not os.path.exists(pub_key_path) and not os.path.exists(
                priv_key_path):
            priv_key = SigningKey.generate(curve=NIST256p)
            pub_key = priv_key.get_verifying_key()
            open(priv_key_path, "w").write(priv_key.to_pem())
            open(pub_key_path, "w").write(pub_key.to_pem())
        else:
            priv_key = SigningKey.from_pem(open(priv_key_path).read())
            pub_key = VerifyingKey.from_pem(open(pub_key_path).read())

        pk64 = pub_key.to_string()
        pk_x, pk_y = pk64[:len(pk64) / 2], pk64[len(pk64) / 2:]

        hex_priv_key = priv_key.to_string()
        hex_sk = hex_priv_key.encode('hex')

        pk_x = pk_x.encode('hex')
        pk_y = pk_y.encode('hex')
        hex_priv_key_out = [hex_sk[i:i + 2] for i in range(0, len(hex_sk), 2)]
        pk_x_out = [pk_x[i:i + 2] for i in range(0, len(pk_x), 2)]
        pk_y_out = [pk_y[i:i + 2] for i in range(0, len(pk_y), 2)]

        pk_x_out.reverse()
        pk_y_out.reverse()

        pub_key = ""
        for i in range(len(pk_x_out)):
            pub_key = pub_key + pk_x_out[i]
        for i in range(len(pk_y_out)):
            pub_key = pub_key + pk_y_out[i]
        hex_priv_key_out.reverse()
        priv_key = ""
        for i in range(len(hex_priv_key_out)):
            priv_key = priv_key + hex_priv_key_out[i]

        pub_key = base64.b64encode(pub_key + '\0')
        priv_key = base64.b64encode(priv_key + '\0')
        return pub_key, priv_key

    def get_crt(self, resp_crt=None):
        pattern = '-----END CERTIFICATE-----\n'
        crt = resp_crt.split(pattern)
        return crt[0] + pattern, crt[1] + pattern

    def verify_certificate(self, crt=None, cacrt=None):
        try:
            cert = load_certificate(FILETYPE_PEM, crt)
            intermediate_cert = load_certificate(FILETYPE_PEM, cacrt)
            validation_cert = load_certificate(FILETYPE_PEM, cacrt)
            store = X509Store()
            store.add_cert(intermediate_cert)
            store.add_cert(cert)
            store_ctx = X509StoreContext(store, validation_cert)
            if (store_ctx.verify_certificate() == None):
                print "Certificate verification Passed on Client side"
                return True
            else:
                raise Exception(
                    "Certificate Verification Failed on Client side")
        except Exception as e:
            raise Exception("Certificate Validation Failed on Client side", e)

    def verify_signature(self, crt=None, sign=None, resp_body=None):
        try:
            x509 = load_certificate(FILETYPE_PEM, crt)
            pub_key = x509.get_pubkey()
            ias_public_key = dump_publickey(FILETYPE_PEM, pub_key)
            public_key = load_publickey(FILETYPE_PEM, ias_public_key)
            x509 = X509()
            x509.set_pubkey(public_key)
            if verify(x509, base64.b64decode(sign), resp_body,
                      'sha256') == None:
                print "Signature verification Passed on Client side"
                return True
        except Exception as e:
            raise Exception("Signature verification Failed on Client side", e)

    def gen_msg0(self, target_lib, spid=None):
        try:
            p_ctxt = self.ffi.new("sgx_ra_context_t *")
            p_req0 = self.ffi.new("ra_samp_msg0_request_header_t **")
            ret = target_lib.gen_msg0(p_req0, spid)
            msg0 = base64.b64encode(self.ffi.buffer(p_req0[0]))
            return ret, msg0
        except Exception as e:
            raise Exception("Error in generating msg0", e)

    def proc_msg0(self, target_lib, msg0, spid=None, client_verify_ias=False):
        try:
            if spid is None:
                spid = self.ffi.NULL
            msg0 = self.ffi.from_buffer(base64.b64decode(msg0))
            p_net_ctxt = self.ffi.new("void **")
            ret = target_lib.proc_msg0(msg0, p_net_ctxt, spid,
                                       client_verify_ias)
            return ret, p_net_ctxt
        except Exception as e:
            raise Exception("Error in processing msg0", e)

    def gen_msg1(self, target_lib, enclave_id, pub_key):
        try:
            if pub_key != None:
                pub_key = base64.b64decode(pub_key)
                key = self.ffi.new("char[]", pub_key)
            else:
                key = self.ffi.NULL

            p_ctxt = self.ffi.new("sgx_ra_context_t *")
            p_req1 = self.ffi.new("ra_samp_msg1_request_header_t **")
            target_lib.gen_msg1(enclave_id, p_ctxt, p_req1, key)
            msg1 = base64.b64encode(self.ffi.buffer(p_req1[0]))
            return p_ctxt[0], msg1
        except Exception as e:
            raise Exception("Error in generating msg1", e)

    def proc_msg1_gen_msg2(self, target_lib, msg1, p_net_ctxt, priv_key):
        try:
            if priv_key != None:
                priv_key = base64.b64decode(priv_key)
                key = self.ffi.new("char[]", priv_key)
            else:
                key = self.ffi.NULL

            msg1 = self.ffi.from_buffer(base64.b64decode(msg1))
            pp_resp1 = self.ffi.new("ra_samp_msg1_response_header_t **")
            target_lib.proc_msg1(msg1, p_net_ctxt, pp_resp1, key)
            msg2 = base64.b64encode(self.ffi.buffer(pp_resp1[0]))
            return msg2
        except Exception as e:
            raise Exception("Error in generating msg2", e)

    def proc_msg2_gen_msg3(self,
                           target_lib,
                           enclave_id,
                           msg2,
                           p_ctxt,
                           ias_crt=None,
                           client_verify_ias=False,
                           server_verify_ias=False):
        try:
            if ias_crt is None:
                ias_crt = self.ffi.NULL
            msg2 = self.ffi.from_buffer(base64.b64decode(msg2))
            pp_req2 = self.ffi.new("ra_samp_msg3_request_header_t **")
            resp_crt = self.ffi.new("uint8_t[]", 4000)
            resp_sign = self.ffi.new("uint8_t[]", 500)
            resp_body = self.ffi.new("uint8_t[]", 1200)
            if not server_verify_ias and not client_verify_ias:
                server_verify_ias = True
            status = target_lib.gen_msg3(enclave_id, p_ctxt, msg2, pp_req2,
                                         ias_crt, client_verify_ias,
                                         server_verify_ias, resp_crt,
                                         resp_sign, resp_body)
            if status != 3:
                msg3 = base64.b64encode(self.ffi.buffer(pp_req2[0]))
            else:
                raise Exception("IAS verification failed")
            return (msg3, self.ffi.string(resp_crt),
                    self.ffi.string(resp_sign), self.ffi.string(resp_body))
        except Exception as e:
            raise Exception("Error in generating msg3", e)

    def proc_msg3_gen_msg4(self,
                           target_lib,
                           enclave_id,
                           s_msg3,
                           p_net_ctxt,
                           sealed_sk,
                           c_msg3,
                           project_id=None,
                           owner_mr_e=None,
                           ias_crt=None,
                           client_verify_ias=False):
        try:
            if ias_crt is None:
                ias_crt = self.ffi.NULL
            if owner_mr_e is None:
                owner_mr_e = self.ffi.NULL
            else:
                owner_mr_e = self.ffi.from_buffer(base64.b64decode(owner_mr_e))
            s_msg3 = self.ffi.from_buffer(base64.b64decode(s_msg3))
            c_msg3 = self.ffi.from_buffer(base64.b64decode(c_msg3))
            if sealed_sk is None:
                sealed_len = 0
                sealed_sk = self.ffi.NULL
            else:
                sealed_len = sealed_sk.length
                sealed_sk = self.ffi.from_buffer(
                    base64.b64decode(sealed_sk.value))
            if project_id is None:
                project_id_len = 0
                project_id = self.ffi.NULL
            else:
                project_id_len = len(project_id)
            pp_resp2 = self.ffi.new("ra_samp_msg3_response_header_t **")
            target_lib.set_enclave(p_net_ctxt, enclave_id)
            target_lib.set_secret(p_net_ctxt, sealed_sk, sealed_len,
                                  self.ffi.NULL, 0)
            status = target_lib.proc_msg3(s_msg3, p_net_ctxt, pp_resp2, c_msg3,
                                          project_id, owner_mr_e, ias_crt,
                                          client_verify_ias)
            #Initially using 177 length of msg4 but
            #after adding project id to msg4 using (209 + project id length) for msg4
            if status != 3:
                msg4 = base64.b64encode(
                    self.ffi.buffer(pp_resp2[0], (417 + project_id_len)))
            else:
                raise Exception("IAS call failed")
            return msg4
        except Exception as e:
            raise Exception("Error in generating msg4", e)

    def legacy_proc_msg3_gen_msg4(self,
                                  target_lib,
                                  msg3,
                                  p_net_ctxt,
                                  project_id=None,
                                  owner_mr_e=None,
                                  ias_crt=None,
                                  client_verify_ias=False):
        try:
            if ias_crt is None:
                ias_crt = self.ffi.NULL
            if owner_mr_e is None:
                owner_mr_e = self.ffi.NULL
            else:
                owner_mr_e = self.ffi.from_buffer(base64.b64decode(owner_mr_e))
            if project_id is None:
                project_id_len = 0
                project_id = self.ffi.NULL
            else:
                project_id_len = len(project_id)
            msg3 = self.ffi.from_buffer(base64.b64decode(msg3))
            pp_resp2 = self.ffi.new("ra_samp_msg3_response_header_t **")
            target_lib.set_secret(p_net_ctxt, self.ffi.NULL, 0, self.ffi.NULL,
                                  0)
            status = target_lib.proc_msg3(msg3, p_net_ctxt, pp_resp2,
                                          self.ffi.NULL, project_id,
                                          owner_mr_e, ias_crt,
                                          client_verify_ias)
            #Initially using 177 length of msg4 but
            #after adding project id to msg4 using (209 + project id length) for msg4
            if status != 3:
                msg4 = base64.b64encode(
                    self.ffi.buffer(pp_resp2[0], (417 + project_id_len)))
            else:
                raise Exception("IAS call failed")
            return msg4
        except Exception as e:
            raise Exception("Error in generating msg4", e)

    def proc_msg4(self, target_lib, enclave_id, msg4, p_ctxt, sealed_nonse):
        try:
            plain_sk_len = 16
            sealed_len = target_lib.get_sealed_data_len(
                enclave_id, 0, plain_sk_len)
            sealed_nonse = self.ffi.from_buffer(
                base64.b64decode(sealed_nonse.value))
            #Length is 0 as this will not be output variable
            sealed_nonse_len = 0
            msg4 = self.ffi.from_buffer(base64.b64decode(msg4))
            sealed_secret2 = self.ffi.new("uint8_t[]", sealed_len)
            status = target_lib.proc_ra(enclave_id, p_ctxt, msg4, sealed_nonse,
                                        sealed_nonse_len, sealed_secret2,
                                        sealed_len)
            secret2_buf = base64.b64encode(self.ffi.buffer(sealed_secret2))
            target_lib.close_ra(enclave_id, p_ctxt)
            return status, secret2_buf
        except Exception as e:
            raise Exception(
                "Error in prcessing msg4 and retrieving sealed session key", e)

    def get_dh_key(self, target_lib, enclave_id, msg4, p_ctxt):
        try:
            msg4 = self.ffi.from_buffer(base64.b64decode(msg4))
            plain_sk_len = 16
            sealed_len = target_lib.get_sealed_data_len(
                enclave_id, 0, plain_sk_len)
            sealed_dh = self.ffi.new("uint8_t[]", sealed_len)
            status = target_lib.get_dh_key(
                enclave_id, p_ctxt, msg4, sealed_dh,
                self.ffi.cast("uint32_t", sealed_len))
            dh_buf = base64.b64encode(self.ffi.buffer(sealed_dh))
            #target_lib.close_ra(enclave_id, p_ctxt)
            return status, dh_buf
        except Exception as e:
            raise Exception("Error in get_dh_key", e)

    def get_project_id(self, target_lib, enclave_id, msg4, p_ctxt):
        try:
            msg4 = self.ffi.from_buffer(base64.b64decode(msg4))
            proj_id_len = self.ffi.cast("uint32_t", 0)
            proj_id_len = target_lib.get_project_id_len(
                enclave_id, p_ctxt, msg4)
            proj_id = self.ffi.new("uint8_t[]", proj_id_len)
            status = target_lib.get_project_id(enclave_id, p_ctxt, msg4,
                                               proj_id)
            return proj_id, proj_id_len
        except Exception as e:
            raise Exception("Error in geting project id", e)

    def get_sk(self, target_lib, p_net_ctx, enc_sk):
        #todo extract iv and mac, call target_lib.get_sk and return plain sk
        try:
            b64_iv = 16
            b64_mac = 24
            iv = self.ffi.from_buffer(base64.b64decode(enc_sk[:b64_iv]))
            mac = self.ffi.from_buffer(
                base64.b64decode(enc_sk[b64_iv:(b64_iv + b64_mac)]))
            dh_sk = self.ffi.from_buffer(
                base64.b64decode(enc_sk[(b64_iv + b64_mac):]))
            plain_sk = self.ffi.new("uint8_t[]", 16)
            status = target_lib.get_sk(p_net_ctx, plain_sk, 16, dh_sk, iv, mac)
            return Secret(self.ffi.string(plain_sk, 16), 16)
        except Exception as e:
            raise Exception("Error in get_sk", e)

    def generate_key(self, target_lib, enclave_id, key_len):
        try:
            sealed_len = target_lib.get_sealed_data_len(enclave_id, 0, key_len)
            sealed_key = self.ffi.new("uint8_t[]", sealed_len)
            target_lib.crypto_generate_key(enclave_id, key_len, sealed_key,
                                           sealed_len)
            #use these api's to determine required plain text buffer given a sealed buffer
            #add mac always 0 for now
            #add_mac_len = target_lib.get_add_mac_len(enclave_id, sealed_key, sealed_len)
            #plain_len = target_lib.get_encrypted_len(enclave_id, sealed_key, sealed_len)
            return Secret(base64.b64encode(self.ffi.buffer(sealed_key)),
                          sealed_len)
        except Exception as e:
            raise Exception("Error in generating key", e)

    def provision_kek(self,
                      target_lib,
                      enclave_id,
                      sealed_sk,
                      sk_kek,
                      project_id=None):
        try:
            if project_id is None:
                project_id = self.ffi.NULL
                proj_id_len = 0
            else:
                proj_id_len = len(project_id)
            b64_iv = 16
            b64_mac = 24
            sealed_len = sealed_sk.length
            sealed_sk = self.ffi.from_buffer(base64.b64decode(sealed_sk.value))
            iv = self.ffi.from_buffer(base64.b64decode(sk_kek[:b64_iv]))
            mac = self.ffi.from_buffer(
                base64.b64decode(sk_kek[b64_iv:(b64_iv + b64_mac)]))
            sk_kek = self.ffi.from_buffer(
                base64.b64decode(sk_kek[(b64_iv + b64_mac):]))
            plain_kek_len = len(sk_kek)
            sealed_kek_len = target_lib.get_sealed_data_len(
                enclave_id, 0, plain_kek_len)
            sealed_kek = self.ffi.new("uint8_t[]", sealed_kek_len)
            target_lib.crypto_provision_kek(enclave_id, sealed_sk, sealed_len,
                                            sk_kek, plain_kek_len, iv, mac,
                                            sealed_kek, sealed_kek_len,
                                            project_id, proj_id_len)
            return base64.b64encode(self.ffi.buffer(sealed_kek))
        except Exception as e:
            raise Exception("Error in provisioning of kek", e)

    def legacy_encrypt(self, target_lib, plain_sk, secret):
        try:
            iv = self.ffi.new("uint8_t[]", self.iv)
            mac = self.ffi.new("uint8_t[]", self.mac)
            enc_secret = self.ffi.new("uint8_t[]", secret.length)
            target_lib.crypto_legacy_encrypt(plain_sk.value, plain_sk.length,
                                             secret.value, secret.length,
                                             enc_secret, iv, mac)
            return base64.b64encode(self.ffi.buffer(iv)) + base64.b64encode(
                self.ffi.buffer(mac)) + base64.b64encode(
                    self.ffi.buffer(enc_secret))
        except Exception as e:
            raise Exception("ERROR: Encryption of the secret failed!", e)

    def legacy_decrypt(self, plain_sk, enc_secret):
        try:
            b64_iv = 16
            b64_mac = 24
            iv = base64.b64decode(enc_secret[:b64_iv])
            mac = base64.b64decode(enc_secret[b64_iv:(b64_iv + b64_mac)])
            enc_secret = base64.b64decode(enc_secret[(b64_iv + b64_mac):])
            cipher = AES.new(plain_sk, AES.MODE_GCM, iv)
            dec_secret = cipher.decrypt(enc_secret)
            #cipher.verify(mac)
            return base64.b64encode(dec_secret)
        except Exception as e:
            raise Exception("ERROR: Legacy Decryption of the secret failed!",
                            e)

    def encrypt(self, target_lib, enclave_id, sealed_sk, secret):
        try:
            iv = self.ffi.new("uint8_t[]", self.iv)
            mac = self.ffi.new("uint8_t[]", self.mac)
            sealed_len = sealed_sk.length
            sealed_sk = self.ffi.from_buffer(base64.b64decode(sealed_sk.value))
            enc_secret = self.ffi.new("uint8_t[]", secret.length)
            target_lib.crypto_encrypt(enclave_id, sealed_sk, sealed_len,
                                      secret.value, secret.length, enc_secret,
                                      iv, mac)
            return base64.b64encode(self.ffi.buffer(iv)) + base64.b64encode(
                self.ffi.buffer(mac)) + base64.b64encode(
                    self.ffi.buffer(enc_secret))
        except Exception as e:
            raise Exception("ERROR: Encryption of the secret failed!", e)

    def decrypt(self, target_lib, enclave_id, sealed_sk, enc_secret):
        try:
            b64_iv = 16
            b64_mac = 24
            iv = self.ffi.from_buffer(base64.b64decode(enc_secret[:b64_iv]))
            mac = self.ffi.from_buffer(
                base64.b64decode(enc_secret[b64_iv:(b64_iv + b64_mac)]))
            enc_secret = self.ffi.from_buffer(
                base64.b64decode(enc_secret[(b64_iv + b64_mac):]))
            length = len(enc_secret)
            sealed_len = sealed_sk.length
            sealed_sk = self.ffi.from_buffer(base64.b64decode(sealed_sk.value))
            secret = self.ffi.new("uint8_t[]", length)
            target_lib.crypto_decrypt(enclave_id, sealed_sk, sealed_len,
                                      secret, length, enc_secret, iv, mac,
                                      self.ffi.NULL, 0)
            return base64.b64encode(self.ffi.buffer(secret))
        except Exception as e:
            raise Exception("ERROR: Decryption of the secret failed!", e)

    def transport(self,
                  target_lib,
                  enclave_id,
                  sealed_kek,
                  sealed_sk,
                  project_id=None):
        try:
            if project_id is None:
                project_id = self.ffi.NULL
                proj_id_len = 0
            else:
                proj_id_len = len(project_id)
            iv = self.ffi.new("uint8_t[]", self.iv)
            mac = self.ffi.new("uint8_t[]", self.mac)
            sealed_kek_len = sealed_kek.length
            sealed_kek = self.ffi.from_buffer(
                base64.b64decode(sealed_kek.value))
            sealed_sk_len = sealed_sk.length
            sealed_sk = self.ffi.from_buffer(base64.b64decode(sealed_sk.value))
            sk_len = target_lib.get_encrypted_len(enclave_id, sealed_sk,
                                                  sealed_sk_len)
            kek_sk = self.ffi.new("uint8_t[]", sk_len)
            target_lib.crypto_transport_secret(enclave_id, sealed_kek,
                                               sealed_kek_len, sealed_sk,
                                               sealed_sk_len, kek_sk, sk_len,
                                               iv, mac, project_id,
                                               proj_id_len)
            return base64.b64encode(self.ffi.buffer(iv)) + base64.b64encode(
                self.ffi.buffer(mac)) + base64.b64encode(
                    self.ffi.buffer(kek_sk))
        except Exception as e:
            raise Exception("Error in transporting the secret", e)

    #no need for target lib, server action only
    def kek_encrypt(self,
                    enclave_id,
                    kek_sk,
                    sealed_kek,
                    sk_secret,
                    project_id=None):
        try:
            if project_id is None:
                project_id = self.ffi.NULL
                proj_id_len = 0
            else:
                proj_id_len = len(project_id)
            b64_iv = 16
            b64_mac = 24
            iv1 = self.ffi.from_buffer(base64.b64decode(kek_sk[:b64_iv]))
            mac1 = self.ffi.from_buffer(
                base64.b64decode(kek_sk[b64_iv:(b64_iv + b64_mac)]))
            kek_sk = self.ffi.from_buffer(
                base64.b64decode(kek_sk[(b64_iv + b64_mac):]))
            sealed_kek_len = sealed_kek.length
            sealed_kek = self.ffi.from_buffer(
                base64.b64decode(sealed_kek.value))
            iv = self.ffi.from_buffer(base64.b64decode(sk_secret[:b64_iv]))
            mac = self.ffi.from_buffer(
                base64.b64decode(sk_secret[b64_iv:(b64_iv + b64_mac)]))
            sk_secret = self.ffi.from_buffer(
                base64.b64decode(sk_secret[(b64_iv + b64_mac):]))
            length = len(sk_secret)
            kek_secret = self.ffi.new("uint8_t[]", length)
            self.barbie_s.crypto_store_secret(enclave_id, kek_sk, len(kek_sk),
                                              iv1, mac1, sealed_kek,
                                              sealed_kek_len, sk_secret,
                                              length,
                                              kek_secret, length, iv, mac,
                                              str(project_id), proj_id_len)
            return base64.b64encode(self.ffi.buffer(iv)) + base64.b64encode(
                self.ffi.buffer(mac)) + base64.b64encode(
                    self.ffi.buffer(kek_secret))
        except Exception as e:
            raise Exception("Error in encrypting the secret with kek", e)

    #no need for target lib, server action only
    def kek_decrypt(self,
                    enclave_id,
                    kek_sk,
                    sealed_kek,
                    kek_secret,
                    project_id=None):
        try:
            if project_id is None:
                project_id = self.ffi.NULL
                proj_id_len = 0
            else:
                proj_id_len = len(project_id)
            b64_iv = 16
            b64_mac = 24
            iv1 = self.ffi.from_buffer(base64.b64decode(kek_sk[:b64_iv]))
            mac1 = self.ffi.from_buffer(
                base64.b64decode(kek_sk[b64_iv:(b64_iv + b64_mac)]))
            kek_sk = self.ffi.from_buffer(
                base64.b64decode(kek_sk[(b64_iv + b64_mac):]))
            sealed_kek_len = sealed_kek.length
            sealed_kek = self.ffi.from_buffer(
                base64.b64decode(sealed_kek.value))
            iv = self.ffi.from_buffer(base64.b64decode(kek_secret[:b64_iv]))
            mac = self.ffi.from_buffer(
                base64.b64decode(kek_secret[b64_iv:(b64_iv + b64_mac)]))
            kek_secret = self.ffi.from_buffer(
                base64.b64decode(kek_secret[(b64_iv + b64_mac):]))
            length = len(kek_secret)
            sk_secret = self.ffi.new("uint8_t[]", length)
            self.barbie_s.crypto_get_secret(enclave_id, kek_sk, len(kek_sk),
                                            iv1, mac1, sealed_kek,
                                            sealed_kek_len, kek_secret, length,
                                            sk_secret, length, iv, mac,
                                            str(project_id), proj_id_len)
            return base64.b64encode(self.ffi.buffer(iv)) + base64.b64encode(
                self.ffi.buffer(mac)) + base64.b64encode(
                    self.ffi.buffer(sk_secret))
        except Exception as e:
            raise Exception("Error in decrypting the secret with kek", e)

    def compare_secret(self, target_lib, secret1, secret2, secret_len):
        try:
            secret1 = self.ffi.from_buffer(base64.b64decode(secret1))
            secret2 = self.ffi.from_buffer(base64.b64decode(secret2))
            if target_lib.crypto_cmp(secret1, secret2, secret_len) == 0:
                return True
            return False
        except Exception as e:
            raise Exception("Error in comparing the secrets", e)

    def compare_sealed_secret(self, target_lib, encalve_id, secret1, secret2):
        try:
            secret1 = self.ffi.from_buffer(base64.b64decode(secret1))
            secret2 = self.ffi.from_buffer(base64.b64decode(secret2))
            if target_lib.crypto_sealed_cmp(encalve_id, secret1, len(secret1),
                                            secret2, len(secret2)) == 0:
                return True
            return False
        except Exception as e:
            raise Exception("Error in comparing the sealed secrets", e)

    def compare_sealed_secret(self, target_lib, enclave_id, secret1, secret2):
        try:
            secret1 = self.ffi.from_buffer(base64.b64decode(secret1))
            secret2 = self.ffi.from_buffer(base64.b64decode(secret2))
            if target_lib.crypto_sealed_cmp(enclave_id, secret1, len(secret1),
                                            secret2, len(secret2)) == 0:
                return True
            return False
        except Exception as e:
            raise Exception("Error in comparing the sealed secrets", e)

    def destroy_enclave(self, target_lib, enclave_id):
        try:
            target_lib.destroy_enclave(enclave_id)
        except Exception as e:
            raise Exception("Error in destroying enclave!", e)

    def write_buffer_to_file(self, filename, buff):
        try:
            dir_path = os.path.dirname(os.path.realpath(__file__))
            write_file = os.path.join(dir_path, filename)
            with open(write_file, 'w') as f:
                f.write(buff)
        except Exception as e:
            raise Exception("Error writing buffer to file!", e)

    def read_buffer_from_file(self, filename):
        try:
            dir_path = os.path.dirname(os.path.realpath(__file__))
            read_file = os.path.join(dir_path, filename)
            if os.path.exists(os.path.join(dir_path, read_file)):
                with open(read_file, 'r') as f:
                    read_buffer = f.read()
                    return read_buffer
        except Exception as e:
            raise Exception("Error reading buffer from file!", e)

    def get_mr_enclave(self, msg3):
        try:
            msg3 = self.ffi.from_buffer(base64.b64decode(msg3))
            mr_e = self.barbie_s.get_mr_e(msg3)
            #return self.ffi.string(mr_e)
            #return self.ffi.buffer(mr_e)
            return base64.b64encode(self.ffi.buffer(mr_e, 32))
        except Exception as e:
            raise Exception("Error in retrieveing mr enclave", e)

    def get_mr_signer(self, msg3):
        try:
            msg3 = self.ffi.from_buffer(base64.b64decode(msg3))
            mr_s = self.barbie_s.get_mr_s(msg3)
            #return self.ffi.string(mr_s)
            #return self.ffi.buffer(mr_s)
            return base64.b64encode(self.ffi.buffer(mr_s, 32))
        except Exception as e:
            raise Exception("Error in retrieveing mr signer", e)

    def get_report_sha256(self, target_lib, msg3):
        try:
            msg3 = self.ffi.from_buffer(base64.b64decode(msg3))
            sha256 = self.ffi.new("uint8_t []", 32)
            target_lib.get_report_sha256(msg3, sha256)
            return base64.b64encode(self.ffi.buffer(sha256))
        except Exception as e:
            raise Exception("Error getting SHA256", e)

    def test_legacy_client(self):
        try:

            #plain_secret = "my-private-secre"
            secret = "This-Is-My-Private-Secret"
            plain_secret = Secret(secret, len(secret))

            enclave_id = self.init_enclave(self.barbie_s)

            #To simulate KEK of server side
            sealed_kek = self.generate_key(self.barbie_s, enclave_id, 16)

            enc_secret = self.encrypt(self.barbie_s, enclave_id, sealed_kek,
                                      plain_secret)

            r_secret = self.decrypt(self.barbie_s, enclave_id, sealed_kek,
                                    enc_secret)
            r_secret = base64.b64decode(r_secret)

            if r_secret == secret:
                print "Legacy Client : Secret Management done!"
            else:
                print "Legacy Client : Secret Management failed!"

        finally:
            self.destroy_enclave(self.barbie_s, enclave_id)

    def test_sgx_client_wo_sgx_hw(self, spid=None, crt_path=None, kdir=None):
        try:

            pub_key, priv_key = self.generate_key_pair(kdir)
            s_eid = self.init_enclave(self.barbie_s)

            plain_sk = Secret("", len(""))

            #Perform attestation
            ret, msg0 = self.gen_msg0(self.barbie_s, spid)

            p_ctxt, msg1 = self.gen_msg1(self.barbie_s, s_eid, pub_key)
            print "gen_msg1 returned: " + msg1

            ret, p_net_ctxt = self.proc_msg0(self.barbie_c, msg0, spid, False)
            msg2 = self.proc_msg1_gen_msg2(self.barbie_c, msg1, p_net_ctxt,
                                           priv_key)
            print "send_msg1_recv_msg2 returned: " + msg2

            msg3, crt, sig, resp_body = self.proc_msg2_gen_msg3(
                self.barbie_s, s_eid, msg2, p_ctxt, crt_path, False)
            print "proc_msg2_gen_msg3 returned: " + msg3

            msg4 = self.legacy_proc_msg3_gen_msg4(self.barbie_c, msg3,
                                                  p_net_ctxt, "sgx_wo_hw",
                                                  None, crt_path, False)
            print "send_msg3_recv_msg4 returned: " + str(msg4)

            status, s_dh = self.get_dh_key(self.barbie_s, s_eid, msg4, p_ctxt)
            print "get_dh_key returned: " + str(status)

            proj_id, proj_id_size = self.get_project_id(
                self.barbie_s, s_eid, msg4, p_ctxt)

            s_sk = self.generate_key(self.barbie_s, s_eid, 16)
            plain_kek_len = 16
            sealed_len = self.barbie_s.get_sealed_data_len(
                s_eid, 0, plain_kek_len)
            dh_sk = self.transport(self.barbie_s, s_eid,
                                   Secret(s_dh, sealed_len), s_sk, None)
            plain_sk = self.get_sk(self.barbie_c, p_net_ctxt, dh_sk)
            #status, plain_sk = self.get_sk(self.barbie_c, p_net_ctxt, 16, dh_sk)
            #status, sk = self.proc_msg4(self.barbie_s, s_eid, msg4, p_ctxt)
            #sealed_sk = Secret(sk, sealed_len)

            #Perform kek provisioning
            kek = "yek etyb neetxis"
            plain_kek = Secret(kek, len(kek))

            sk_kek = self.legacy_encrypt(self.barbie_c, plain_sk, plain_kek)

            kek = self.provision_kek(self.barbie_s, s_eid, s_sk, sk_kek, None)
            plain_kek_len = 16
            sealed_len = self.barbie_s.get_sealed_data_len(
                s_eid, 0, plain_kek_len)
            sealed_kek = Secret(kek, sealed_len)

            kek_sk = self.transport(self.barbie_c, s_eid, sealed_kek, s_sk,
                                    proj_id)

            #Perform secret management
            secret = "my-private-secret"
            plain_secret = Secret(secret, len(secret))

            sk_secret = self.legacy_encrypt(self.barbie_c, plain_sk,
                                            plain_secret)

            kek_secret = self.kek_encrypt(s_eid, kek_sk, sealed_kek, sk_secret,
                                          "sgx_wo_hw")

            rec = self.kek_decrypt(s_eid, kek_sk, sealed_kek, kek_secret,
                                   "sgx_wo_hw")

            if self.compare_secret(self.barbie_c, rec[40:], sk_secret[40:],
                                   plain_secret.length):
                print "SGX Aware Client Without SGX hardware : Secret Management done!"
            else:
                print "SGX Aware Cliwnt Without SGX hardware : Secret Management failed!"

        finally:
            self.destroy_enclave(self.barbie_s, s_eid)
Example #6
0
	unsigned short reserve3;    // Reserved 3
	union rtdata3 {             // Monitor data 3 .
		POSE pos3;              // XYZ type [mm/rad] .
		JOINT jnt3;             // JOINT type [mm/rad] .
		PULSE pls3;             // PULSE type [mm/rad] or Integer type [% / non-unit].
		long lng3[8];           // Integer type [% / non-unit] .
	} dat3;
} MXTCMD;
""")

if __name__ == '__main__':
    sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
    sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
    data = ffi.new("MXTCMD *")
    data.Command = 1
    sock.sendto(ffi.buffer(data), ("localhost", 10000))

#class JointServer(DatagramProtocol):
#    _mxt_cmd = ffi.new("MXTCMD *")
#    _counter = 0
#    _last_jnt = [random() * 3.14 for i in xrange(6)]
#    #_last_jnt = [0 for i in xrange(6)]

#    def datagramReceived(self, datagram, address):
#        #print "Received from address: " + str(address)
#        #print str(datagram)
#        ffi.buffer(self._mxt_cmd)[:] = datagram
#        #self._counter += 1
#        #if self._counter > 1000:
#        #    self._counter = 0
#        #    self._last_jnt = [random() * 3.14 for i in xrange(6)]
class _PcapFfi(object):
    '''
    This class represents the low-level interface to the libpcap library.
    It encapsulates all the cffi calls and C/Python conversions, as well
    as translation of errors and error codes to PcapExceptions.  It is
    intended to be used as a singleton class through the PcapDumper
    and PcapLiveDevice classes, below.
    '''
    _instance = None
    __slots__ = ['_ffi', '_libpcap','_interfaces','_windoze']

    def __init__(self):
        '''
        Assumption: this class is instantiated once in the main thread before
        any other threads have a chance to try instantiating it.
        '''
        if _PcapFfi._instance:
            raise Exception("Can't initialize this class more than once!")

        _PcapFfi._instance = self
        self._windoze = False

        self._ffi = FFI()
        self._ffi.cdef('''
        struct pcap;
        typedef struct pcap pcap_t;
        struct pcap_dumper;
        typedef struct pcap_dumper pcap_dumper_t;
        struct pcap_addr {
            struct pcap_addr *next;
            struct sockaddr *addr;
            struct sockaddr *netmask;
            struct sockaddr *broadaddr;
            struct sockaddr *dstaddr;
        };
        typedef struct pcap_addr pcap_addr_t;
        struct pcap_if {
            struct pcap_if *next;
            char *name;
            char *description;
            pcap_addr_t *addresses;
            int flags;
        };
        typedef struct pcap_if pcap_if_t;

        int pcap_findalldevs(pcap_if_t **, char *);
        void pcap_freealldevs(pcap_if_t *);

        struct pcap_pkthdr {
            unsigned long tv_sec;
            unsigned long tv_usec;
            unsigned int caplen;
            unsigned int len;
        };

        struct pcap_stat {
            unsigned int recv;
            unsigned int drop;
            unsigned int ifdrop;
        };

        pcap_t *pcap_open_dead(int, int);
        pcap_dumper_t *pcap_dump_open(pcap_t *, const char *);
        void pcap_dump_close(pcap_dumper_t *);
        void pcap_dump(pcap_dumper_t *, struct pcap_pkthdr *, unsigned char *);

        // live capture
        pcap_t *pcap_create(const char *, char *); // source, errbuf
        pcap_t *pcap_open_live(const char *, int, int, int, char *);
        pcap_t *pcap_open_offline(const char *fname, char *errbuf);
        int pcap_set_snaplen(pcap_t *, int); // 0 on success
        int pcap_snapshot(pcap_t *);
        int pcap_set_promisc(pcap_t *, int); // 0 on success
        int pcap_set_buffer_size(pcap_t *, int); // 0 on success
        int pcap_datalink(pcap_t *);
        int pcap_setnonblock(pcap_t *, int, char *); // 0 on success
        int pcap_getnonblock(pcap_t *, char *); 
        int pcap_next_ex(pcap_t *, struct pcap_pkthdr **, const unsigned char **);
        int pcap_activate(pcap_t *);
        void pcap_close(pcap_t *);
        int pcap_get_selectable_fd(pcap_t *);
        int pcap_sendpacket(pcap_t *, const unsigned char *, int);
        char *pcap_geterr(pcap_t *);
        char *pcap_lib_version();
        int pcap_stats(pcap_t *, struct pcap_stat *);

        struct bpf_insn;
        struct bpf_program {
            unsigned int bf_len;
            struct bpf_insn *bf_insns;
        };
        int pcap_setfilter(pcap_t *, struct bpf_program *);
        int pcap_compile(pcap_t *, struct bpf_program *,
            const char *, int, unsigned int);
        void pcap_freecode(struct bpf_program *);
        ''')
        if sys.platform == 'darwin':
            self._libpcap = self._ffi.dlopen('libpcap.dylib') # standard libpcap
        elif sys.platform == 'linux':
            self._libpcap = self._ffi.dlopen('libpcap.so') # standard libpcap
        elif sys.platform == 'win32':
            self._libpcap = self._ffi.dlopen('wpcap.dll') # winpcap
            self._windoze = True
        else:
            raise PcapException("Don't know how to locate libpcap on this platform: {}".format(sys.platform))
        self._interfaces = []
        self.discoverdevs()

    @staticmethod
    def instance():
        if not _PcapFfi._instance:
            _PcapFfi._instance = _PcapFfi()
        return _PcapFfi._instance

    @property
    def version(self):
        return self._ffi.string(self._libpcap.pcap_lib_version())

    def discoverdevs(self):
        '''
        Find all the pcap-eligible devices on the local system.
        '''
        if len(self._interfaces):
            raise PcapException("Device discovery should only be done once.")
            
        ppintf = self._ffi.new("pcap_if_t * *")
        errbuf = self._ffi.new("char []", 128)
        rv = self._libpcap.pcap_findalldevs(ppintf, errbuf)
        if rv:
            raise PcapException("pcap_findalldevs returned failure: {}".format(self._ffi.string(errbuf)))
        pintf = ppintf[0]
        tmp = pintf
        pindex = 0
        while tmp != self._ffi.NULL:
            xname = self._ffi.string(tmp.name) # "internal name"; still stored as bytes object
            xname = xname.decode('ascii', 'ignore')

            if self._windoze:
                ext_name = "port{}".format(pindex)
            else:
                ext_name = xname
            pindex += 1

            if tmp.description == self._ffi.NULL:
                xdesc = ext_name
            else:
                xdesc = self._ffi.string(tmp.description)
                xdesc = xdesc.decode('ascii', 'ignore')

            # NB: on WinPcap, only loop flag is set
            isloop = (tmp.flags & 0x1) == 0x1
            isup = (tmp.flags & 0x2) == 0x2
            isrunning = (tmp.flags & 0x4) == 0x4

            xif = Interface(ext_name, xname, xdesc, isloop, isup, isrunning)

            self._interfaces.append(xif)
            tmp = tmp.next
        self._libpcap.pcap_freealldevs(pintf)

    @property 
    def devices(self):
        return self._interfaces

    def open_dumper(self, outfile, dltype=Dlt.DLT_EN10MB, snaplen=65535):
        pcap = self._libpcap.pcap_open_dead(dltype.value, snaplen)
        xoutfile = self._ffi.new("char []", bytes(outfile, 'ascii'))
        pcapdump = self._libpcap.pcap_dump_open(pcap, xoutfile) 
        dl = self._libpcap.pcap_datalink(pcap)
        snaplen = self._libpcap.pcap_snapshot(pcap)
        return PcapDev(Dlt(dl), 0, snaplen, self.version, pcapdump)

    def close_dumper(self, pcapdump):
        self._libpcap.pcap_dump_close(pcapdump)

    def write_packet(self, dumper, pkt, ts=None):
        pkthdr = self._ffi.new("struct pcap_pkthdr *")
        if not ts:
            ts = time()
        pkthdr.tv_sec = int(ts)
        pkthdr.tv_usec = int(1000000*(ts-int(ts)))
        pkthdr.caplen = len(pkt)
        pkthdr.len = len(pkt)
        xpkt = self._ffi.new("char []", pkt)
        self._libpcap.pcap_dump(dumper, pkthdr, xpkt)

    def open_pcap_file(self, filename):
        errbuf = self._ffi.new("char []", 128)
        pcap = self._libpcap.pcap_open_offline(bytes(filename, 'ascii'), errbuf)
        if pcap == self._ffi.NULL:
            raise PcapException("Failed to open pcap file for reading: {}: {}".format(filename, self._ffi.string(errbuf)))
        
        dl = self._libpcap.pcap_datalink(pcap)
        try:
            dl = Dlt(dl)
        except ValueError as e:
            raise PcapException("Don't know how to handle datalink type {}".format(dl))
        return PcapDev(dl, 0, 0, self.version, pcap)

    def open_live(self, device, snaplen=65535, promisc=1, to_ms=100, nonblock=True):
        errbuf = self._ffi.new("char []", 128)
        internal_name = None
        for dev in self._interfaces:
            if dev.name == device:
                internal_name = dev.internal_name
                break
        if internal_name is None:
            raise Exception("No such device {} exists.".format(device))

        pcap = self._libpcap.pcap_open_live(bytes(internal_name, 'ascii'), snaplen, promisc, to_ms, errbuf)
        if pcap == self._ffi.NULL:
            raise PcapException("Failed to open live device {}: {}".format(internal_name, self._ffi.string(errbuf)))

        if nonblock:
            rv = self._libpcap.pcap_setnonblock(pcap, 1, errbuf)
            if rv != 0:
                raise PcapException("Error setting pcap device in nonblocking state: {}".format(self._ffi.string(errbuf)))

        # gather what happened
        nblock = self._libpcap.pcap_getnonblock(pcap, errbuf)
        snaplen = self._libpcap.pcap_snapshot(pcap)
        dl = self._libpcap.pcap_datalink(pcap)
        try:
            dl = Dlt(dl)
        except ValueError as e:
            raise PcapException("Don't know how to handle datalink type {}".format(dl))
        return PcapDev(dl, nblock, snaplen, self.version, pcap)

    def close_live(self, pcap):
        self._libpcap.pcap_close(pcap)

    def get_select_fd(self, xpcap):
        try:
            return self._libpcap.pcap_get_selectable_fd(xpcap)
        except:
            return -1

    def send_packet(self, xpcap, xbuffer):
        if not isinstance(xbuffer, bytes):
            raise PcapException("Packets to be sent via libpcap must be serialized as a bytes object")
        xlen = len(xbuffer)
        rv = self._libpcap.pcap_sendpacket(xpcap, xbuffer, xlen)
        if rv == 0:
            return True
        s = self._ffi.string(self._libpcap.pcap_geterr(xpcap))
        raise PcapException("Error sending packet: {}".format(s))

    def recv_packet(self, xpcap):
        phdr = self._ffi.new("struct pcap_pkthdr **")
        pdata = self._ffi.new("unsigned char **")
        rv = self._libpcap.pcap_next_ex(xpcap, phdr, pdata)
        if rv == 1:
            rawpkt = bytes(self._ffi.buffer(pdata[0], phdr[0].caplen))
            ts = float("{}.{}".format(phdr[0].tv_sec, phdr[0].tv_usec))
            return PcapPacket(ts, phdr[0].caplen, phdr[0].len, rawpkt)
        elif rv == 0:
            # timeout; nothing to return
            return None
        elif rv == -1:
            # error on receive; raise an exception
            s = self._ffi.string(self._libpcap.pcap_geterr(xpcap))
            raise PcapException("Error receiving packet: {}".format(s)) 
        elif rv == -2:
            # reading from savefile, but none left
            return None

    def set_filter(self, xpcap, filterstr):
        bpf = self._ffi.new("struct bpf_program *")
        cfilter = self._ffi.new("char []", bytes(filterstr, 'ascii'))
        compile_result = self._libpcap.pcap_compile(xpcap.pcap, bpf, cfilter, 0, 0xffffffff)
        if compile_result < 0:
            # get error, raise exception
            s = self._ffi.string(self._libpcap.pcap_geterr(xpcap.pcap))
            raise PcapException("Error compiling filter expression: {}".format(s)) 

        sf_result = self._libpcap.pcap_setfilter(xpcap.pcap, bpf)
        if sf_result < 0:
            # get error, raise exception
            s = self._ffi.string(self._libpcap.pcap_geterr(xpcap.pcap))
            raise PcapException("Error setting filter on pcap handle: {}".format(s)) 
        self._libpcap.pcap_freecode(bpf)

    def stats(self, xpcap):
        pstat = self._ffi.new("struct pcap_stat *")
        rv = self._libpcap.pcap_stats(xpcap, pstat)
        if rv == 0:
            return PcapStats(pstat.recv,pstat.drop,pstat.ifdrop)
        else:
            s = self._ffi.string(self._libpcap.pcap_geterr(xpcap))
            raise PcapException("Error getting stats: {}".format(s))
Example #8
0
class RQObject(object):
    _cdefs = '''
		typedef uint64_t RaptorQ_OTI_Common_Data;
		typedef uint32_t RaptorQ_OTI_Scheme_Specific_Data;

		typedef enum {
			NONE = 0,
			ENC_8 = 1, ENC_16 = 2, ENC_32 = 3, ENC_64 = 4,
			DEC_8 = 5, DEC_16 = 6, DEC_32 = 7, DEC_64 = 8
		} RaptorQ_type;

		struct RaptorQ_ptr;

		struct RaptorQ_ptr* RaptorQ_Enc (
			const RaptorQ_type type,
			void *data,
			const uint64_t size,
			const uint16_t min_subsymbol_size,
			const uint16_t symbol_size,
			const size_t max_memory);

		struct RaptorQ_ptr* RaptorQ_Dec (
			const RaptorQ_type type,
			const RaptorQ_OTI_Common_Data common,
			const RaptorQ_OTI_Scheme_Specific_Data scheme);

		// Encoding

		RaptorQ_OTI_Common_Data RaptorQ_OTI_Common (struct RaptorQ_ptr *enc);
		RaptorQ_OTI_Scheme_Specific_Data RaptorQ_OTI_Scheme (struct RaptorQ_ptr *enc);

		uint16_t RaptorQ_symbol_size (struct RaptorQ_ptr *ptr);
		uint8_t RaptorQ_blocks (struct RaptorQ_ptr *ptr);
		uint32_t RaptorQ_block_size (struct RaptorQ_ptr *ptr, const uint8_t sbn);
		uint16_t RaptorQ_symbols (struct RaptorQ_ptr *ptr, const uint8_t sbn);
		uint32_t RaptorQ_max_repair (struct RaptorQ_ptr *enc, const uint8_t sbn);
		size_t RaptorQ_precompute_max_memory (struct RaptorQ_ptr *enc);

		void RaptorQ_precompute (
			struct RaptorQ_ptr *enc,
			const uint8_t threads,
			const bool background);

		uint64_t RaptorQ_encode_id (
			struct RaptorQ_ptr *enc,
			void **data,
			const uint64_t size,
			const uint32_t id);
		uint64_t RaptorQ_encode (
			struct RaptorQ_ptr *enc,
			void **data,
			const uint64_t size,
			const uint32_t esi,
			const uint8_t sbn);
		uint32_t RaptorQ_id (const uint32_t esi, const uint8_t sbn);

		// Decoding

		uint64_t RaptorQ_bytes (struct RaptorQ_ptr *dec);

		uint64_t RaptorQ_decode (
			struct RaptorQ_ptr *dec,
			void **data,
			const size_t size);
		uint64_t RaptorQ_decode_block (
			struct RaptorQ_ptr *dec,
			void **data,
			const size_t size,
			const uint8_t sbn);

		bool RaptorQ_add_symbol_id (
			struct RaptorQ_ptr *dec,
			void **data,
			const uint32_t size,
			const uint32_t id);
		bool RaptorQ_add_symbol (
			struct RaptorQ_ptr *dec,
			void **data,
			const uint32_t size,
			const uint32_t esi,
			const uint8_t sbn);

		// General: free memory

		void RaptorQ_free (struct RaptorQ_ptr **ptr);
		void RaptorQ_free_block (struct RaptorQ_ptr *ptr, const uint8_t sbn);
	'''

    _ctx = None

    data_size_div, _rq_type, _rq_blk = 4, 32, 'uint32_t'

    def __init__(self):
        self._ffi = FFI()
        self._ffi.cdef(self._cdefs)
        # self.ffi.set_source('_rq', '#include <RaptorQ/cRaptorQ.h>')
        lib_name = ctypes.util.find_library('RaptorQ')  # newer cffi should not do that automatically
        self._lib = self._ffi.dlopen(lib_name)  # ABI mode for simplicity
        self.rq_types = (['NONE', None]
                         + list('ENC_{}'.format(2 ** n) for n in range(3, 7))
                         + list('DEC_{}'.format(2 ** n) for n in range(3, 7)))
        self._rq_blk_size = self.data_size_div

    def rq_type_val(self, v, pre):
        if isinstance(v, int) or v.isdigit():
            v = '{}_{}'.format(pre, v).upper()
        else:
            v = bytes(v).upper()
        assert v in self.rq_types, [v, self.rq_types]
        return getattr(self._lib, v)

    def __getattr__(self, k):
        if k.startswith('rq_'):
            if not self._ctx: raise RuntimeError('ContextManager not initialized or already freed')
            return ft.partial(getattr(self._lib, 'RaptorQ_{}'.format(k[3:])), self._ctx)
        return self.__getattribute__(k)

    def open(self):
        self._ctx = self._ctx_init[0](*self._ctx_init[1])
        return self._ctx

    def close(self):
        if self._ctx:
            ptr = self._ffi.new('struct RaptorQ_ptr **')
            ptr[0] = self._ctx
            self._lib.RaptorQ_free(ptr)
            self._ctx = None

    def __enter__(self):
        self.open()
        return self

    def __exit__(self, *err):
        self.close()

    def __del__(self):
        self.close()

    def sym_id(self, esi, sbn):
        return self._lib.RaptorQ_id(esi, sbn)

    _sym_n = None

    def _sym_buff(self, init=None):
        if not self._sym_n: self._sym_n = self.symbol_size / self._rq_blk_size
        buff = self._ffi.new('{}[]'.format(self._rq_blk), self._sym_n)
        buff_ptr = self._ffi.new('void **', buff)
        buff_raw = self._ffi.buffer(buff)
        if init: buff_raw[:] = init
        return buff_ptr, lambda: bytes(buff_raw)
Example #9
0
class SGXInterface:

    def __init__(self):
        LOG.info("SGX Interface initialized")

        self.ffi = FFI()
        dir_path = os.path.dirname(os.path.realpath(__file__))
        with open(os.path.join(dir_path,"sgx.h")) as stream:
            self.ffi.cdef(stream.read())

        self.ffi.set_source("_sgx_interface",
            """
            #include "sgx_eid.h"
            #include "sgx_key_exchange.h"
            #include "common.h"
            #include "network_ra.h"
            #include "barbie_server.h"
            #include "barbie_client.h"
            #include "ra_client.h"
            #include "ra_server.h"
            #include <stdbool.h>
            #include "service_provider.h"
            """,
            include_dirs=['/usr/include', '/opt/intel/sgxsdk/include'],
            library_dirs=['/usr/local/lib', '/opt/intel/sgxsdk/lib64/'],
            libraries=["sample_libcrypto", "BarbiE_Client", "BarbiE_Server"])

        self.ffi.compile(tmpdir=dir_path)

        libuae = self.ffi.dlopen("sgx_uae_service", self.ffi.RTLD_GLOBAL)
        liburts = self.ffi.dlopen("sgx_urts", self.ffi.RTLD_GLOBAL)
        libcrypto = self.ffi.dlopen("sample_libcrypto", self.ffi.RTLD_GLOBAL)

        self.barbie_s = self.ffi.dlopen("BarbiE_Server", self.ffi.RTLD_LAZY)
        self.barbie_c = self.ffi.dlopen("BarbiE_Client", self.ffi.RTLD_LAZY)
        self.iv = 12
        self.mac = 16
        self.error_dict = {'0':'SP_OK', '1':'SP_UNSUPPORTED_EXTENDED_EPID_GROUP', '2':'SP_INTEGRITY_FAILED', '3':'SP_QUOTE_VERIFICATION_FAILED', '4':'SP_IAS_FAILED', '5':'SP_INTERNAL_ERROR', '6':'SP_PROTOCOL_ERROR', '7':'SP_QUOTE_VERSION_ERROR', '8':'SP_SPID_SET_ERROR'}

    def init_env_variables(self):
        separator = "="
        with open("/opt/BarbiE/env.properties") as f:
            for line in f:
                if separator in line:
                    name, value = line.split(separator)
                    os.environ[name.strip()] = value.strip()

    def get_spid(self):
        separator = "="
        spid = None
        with open("/opt/BarbiE/env.properties") as f:
            for line in f:
                if separator in line:
                    name, value = line.split(separator)
                    if name.strip() == "IAS_SPID":
                        spid =  value.strip()
                        return spid

    def get_ias_crt(self):
        separator = "="
        ias_crt = None
        with open("/opt/BarbiE/env.properties") as f:
            for line in f:
                if separator in line:
                    name, value = line.split(separator)
                    if name.strip() == "IAS_CRT_PATH":
                        ias_crt =  value.strip()
                        return ias_crt

    def get_ias_enable(self):
        separator = "="
        ias_enabled = None
        with open("/opt/BarbiE/env.properties") as f:
            for line in f:
                if separator in line:
                    name, value = line.split(separator)
                    if name.strip() == "IAS_ENABLED":
                        ias_enabled =  value.strip()
                        if ias_enabled == 'True':
                            return True
                        else :
                            return False

    def init_enclave(self, target_lib):
        try:
            p_enclave_id = self.ffi.new("sgx_enclave_id_t *")
            status = target_lib.initialize_enclave(p_enclave_id)
            return p_enclave_id[0]
        except Exception as e:
            LOG.error("Error in initializing enclave!")
            return -1

    def get_crt(self, resp_crt=None):
        pattern = '-----END CERTIFICATE-----\n'
        crt = resp_crt.split(pattern)
        return crt[0]+pattern, crt[1]+"\n"

    def verify_certificate(self, crt=None, cacrt=None):
        try:
            cert = load_certificate(FILETYPE_PEM, crt)
            intermediate_cert = load_certificate(FILETYPE_PEM, cacrt)
            validation_cert = load_certificate(FILETYPE_PEM, cacrt)
            store = X509Store()
            store.add_cert(intermediate_cert)
            store.add_cert(cert)
            store_ctx = X509StoreContext(store, validation_cert)
            if(store_ctx.verify_certificate() == None):
                LOG.info("Certificate verification Passed on Server side")
                return True
            else:
                raise Exception("Certificate Verification Failed on Server side")
        except Exception as e:
            LOG.error(str(e))
            raise Exception("Certificate Validation Failed on Server side", e)

    def verify_signature(self, crt=None, sign=None, resp_body=None):
        try:
            x509 = load_certificate(FILETYPE_PEM, crt)
            pub_key = x509.get_pubkey()
            ias_public_key = dump_publickey(FILETYPE_PEM, pub_key)
            public_key = load_publickey(FILETYPE_PEM, ias_public_key)
            x509 = X509()
            x509.set_pubkey(public_key)
            if verify(x509, base64.b64decode(sign), resp_body, 'sha256') == None:
                LOG.info("Signature verification Passed on Server side")
                return True
        except Exception as e:
            LOG.error(str(e))
            raise Exception("Signature verification Failed on Server side", e)

    def generate_key_pair(self):
        separator = "="
        key_dir = None
        with open("/opt/BarbiE/env.properties") as f:
            for line in f:
                if separator in line:
                    name, value = line.split(separator)
                    if name.strip() == "KEY_PAIR_DIR":
                        key_dir = value.strip()
        pub_key_path = os.path.join(key_dir, "public_key.pem")
        priv_key_path = os.path.join(key_dir, "private_key.pem")
        if not os.path.exists(pub_key_path):
            priv_key = SigningKey.generate(curve=NIST256p)
            pub_key = priv_key.get_verifying_key()
            open(priv_key_path,"w").write(priv_key.to_pem())
            open(pub_key_path,"w").write(pub_key.to_pem())
        else:
            priv_key = SigningKey.from_pem(open(priv_key_path).read())
            pub_key = VerifyingKey.from_pem(open(pub_key_path).read())

        pk64 = pub_key.to_string()
        pk_x, pk_y = pk64[:len(pk64)/2], pk64[len(pk64)/2:]
        hex_priv_key = priv_key.to_string()
        hex_sk = hex_priv_key.encode('hex')

        pk_x = pk_x.encode('hex')
        pk_y = pk_y.encode('hex')
        hex_priv_key_out = [hex_sk[i:i + 2]for i in range(0, len(hex_sk), 2)]
        pk_x_out = [pk_x[i:i + 2] for i in range(0,len(pk_x), 2)]
        pk_y_out = [pk_y[i:i + 2] for i in range(0,len(pk_y), 2)]

        pk_x_out.reverse()
        pk_y_out.reverse()

        pub_key = ""
        for i in range(len(pk_x_out)):
            pub_key = pub_key + pk_x_out[i]
        for i in range(len(pk_y_out)):
            pub_key = pub_key + pk_y_out[i]
        hex_priv_key_out.reverse()
        priv_key = ""
        for i in range(len(hex_priv_key_out)):
            priv_key = priv_key + hex_priv_key_out[i]

        pub_key = base64.b64encode(pub_key + '\0')
        priv_key = base64.b64encode(priv_key + '\0')

        return pub_key , priv_key

    def gen_msg0(self, target_lib, spid=None):
        try:
            if spid is None:
                spid = self.ffi.NULL
            p_ctxt = self.ffi.new("sgx_ra_context_t *")
            p_req0 = self.ffi.new("ra_samp_msg0_request_header_t **")
            ret = target_lib.gen_msg0(p_req0, spid)
            msg0 = base64.b64encode(self.ffi.buffer(p_req0[0]))
            return ret, msg0
        except Exception as e:
            LOG.error("Error in generating msg0")
            raise e

    def proc_msg0(self, target_lib, msg0, spid=None, client_verify_ias=False):
        try:
            if spid is None:
                spid = self.ffi.NULL
            msg0 = self.ffi.from_buffer(base64.b64decode(msg0))
            p_net_ctxt = self.ffi.new("void **")
            status = target_lib.proc_msg0(msg0, p_net_ctxt, spid, client_verify_ias)
            error = self.error_dict[str(status)]
            if(error != 'SP_SPID_SET_ERROR'):
                return status, p_net_ctxt
            else:
                raise Exception("SPID not set server side")
        except Exception as e:
            LOG.error("Error in processing msg0")
            raise e

    def gen_msg1(self, target_lib, enclave_id, pub_key):
        try:
            if pub_key != None:
                pub_key = base64.b64decode(pub_key)
                key = self.ffi.new("char[]", pub_key)
            else:
                key = self.ffi.NULL

            p_ctxt = self.ffi.new("sgx_ra_context_t *")
            p_req1 = self.ffi.new("ra_samp_msg1_request_header_t **")
            target_lib.gen_msg1(enclave_id, p_ctxt, p_req1, key)
            msg1 = base64.b64encode(self.ffi.buffer(p_req1[0]))
            return p_ctxt[0], msg1
        except Exception as e:
            LOG.error("Error in generating msg1")
            raise e

    def proc_msg1_gen_msg2(self, target_lib, msg1, p_net_ctxt, priv_key):
        try:
            if priv_key != None:
                priv_key = base64.b64decode(priv_key)
                key = self.ffi.new("char[]", priv_key)
            else:
                key = self.ffi.NULL

            msg1 = self.ffi.from_buffer(base64.b64decode(msg1))
            pp_resp1 = self.ffi.new("ra_samp_msg1_response_header_t **")
            target_lib.proc_msg1(msg1, p_net_ctxt, pp_resp1, key)
            msg2 = base64.b64encode(self.ffi.buffer(pp_resp1[0]))
            return msg2
        except Exception as e:
            LOG.error("Error in generating msg2")
            raise e

    def proc_msg2_gen_msg3(self, target_lib, enclave_id, msg2, p_ctxt, ias_crt=None, client_verify_ias=False, server_verify_ias=True):
        try:
            if ias_crt is None:
                ias_crt = self.ffi.NULL
            msg2 = self.ffi.from_buffer(base64.b64decode(msg2))
            pp_req2 = self.ffi.new("ra_samp_msg3_request_header_t **")
            resp_crt = self.ffi.new("uint8_t[]", 4000)
            resp_sign = self.ffi.new("uint8_t[]", 500)
            resp_body = self.ffi.new("uint8_t[]", 1200)
            status = target_lib.gen_msg3(enclave_id, p_ctxt, msg2, pp_req2, ias_crt, client_verify_ias, server_verify_ias, resp_crt, resp_sign, resp_body)
            error = self.error_dict[str(status)]
            if(error != 'SP_QUOTE_VERIFICATION_FAILED'):
                msg3 = base64.b64encode(self.ffi.buffer(pp_req2[0]))
            else:
                raise Exception("IAS verification failed")
            return msg3, self.ffi.string(resp_crt), self.ffi.string(resp_sign), self.ffi.string(resp_body)
        except Exception as e:
            LOG.error("Error in generating msg3")
            raise e

    def proc_msg3_gen_msg4(self, target_lib, enclave_id, msg3, p_net_ctxt, sealed_sk, project_id=None, owner_mr_e=None, ias_crt=None, client_verify_ias=False, sealed_key2=None):
        try:
            if ias_crt is None:
                ias_crt = self.ffi.NULL
            owner_mr_e = self.ffi.from_buffer(base64.b64decode(owner_mr_e))
            msg3 = self.ffi.from_buffer(base64.b64decode(msg3))
            if sealed_sk is None:
                sealed_len = 0
                sealed_sk = self.ffi.NULL
            else:
                sealed_len = sealed_sk.length
                sealed_sk = self.ffi.from_buffer(base64.b64decode(sealed_sk.value))
            if project_id is None:
                project_id_len = 0
                project_id = self.ffi.NULL
            else:
                project_id_len = len(project_id)
            sealed_key2_len = sealed_key2.length
            sealed_key2 = self.ffi.from_buffer(base64.b64decode(sealed_key2.value))
            pp_resp2 = self.ffi.new("ra_samp_msg3_response_header_t **")
            target_lib.set_enclave(p_net_ctxt, enclave_id)
            target_lib.set_secret(p_net_ctxt, sealed_sk, sealed_len, sealed_key2, sealed_key2_len)
            status = target_lib.proc_msg3(msg3, p_net_ctxt, pp_resp2, self.ffi.NULL, project_id, owner_mr_e, ias_crt, client_verify_ias)
            #Initially using 177 length of msg4 but
            #after adding project id to msg4 using (209 + project id length) for msg4
            error = self.error_dict[str(status)]
            if(error != 'SP_QUOTE_VERIFICATION_FAILED'):
                msg4 = base64.b64encode(self.ffi.buffer(pp_resp2[0],(417 + project_id_len)))
            else:
                raise Exception("IAS verification failed")
            return msg4
        except Exception as e:
            LOG.error("Error in generating msg4")
            raise e

    def ma_proc_msg4(self, target_lib, enclave_id, s_msg4, s_p_ctxt, c_msg3, c_p_net_ctxt, s_mk, mk_sk, policy_dict, ias_crt, client_verify_ias, project_id_len):
        try:
            plain_sk_len = 16
            b64_iv = 16
            b64_mac = 24
            if s_mk and mk_sk:
                LOG.info("Using existing buffers")
                sealed_len = s_mk.length
                sealed_mk = self.ffi.from_buffer(base64.b64decode(s_mk.value))
                iv = self.ffi.from_buffer(base64.b64decode(mk_sk[:b64_iv]))
                mac = self.ffi.from_buffer(base64.b64decode(mk_sk[b64_iv:(b64_iv + b64_mac)]))
                mk_sk = self.ffi.from_buffer(base64.b64decode(mk_sk[(b64_iv + b64_mac):]))
                mk_sk_len = len(mk_sk)
            else:
                LOG.info("Creating new buffers")
                iv = self.ffi.new("uint8_t[]", self.iv)
                mac = self.ffi.new("uint8_t[]", self.mac)
                mk_sk = self.ffi.new("uint8_t[]", plain_sk_len)
                sealed_len = target_lib.get_sealed_data_len(enclave_id, 0, plain_sk_len)
                sealed_mk = self.ffi.new("uint8_t[]", sealed_len)
                #Set sealed len zero to let native side know this is output variable
                mk_sk_len = plain_sk_len
            if policy_dict:
                policy = policy_dict['policy']
                attribute = policy_dict['attribute']
                iv1 = self.ffi.from_buffer(base64.b64decode(attribute[:b64_iv]))
                mac1 = self.ffi.from_buffer(base64.b64decode(attribute[b64_iv:(b64_iv + b64_mac)]))
                attribute = self.ffi.from_buffer(base64.b64decode(attribute[(b64_iv + b64_mac):]))
                attribute_len = len(attribute)
            else:
                policy = 0
                attribute = self.ffi.NULL
                attribute_len = 0
                iv1 = self.ffi.NULL
                mac1 = self.ffi.NULL

            s_msg4 = self.ffi.from_buffer(base64.b64decode(s_msg4))
            c_msg3 = self.ffi.from_buffer(base64.b64decode(c_msg3))
            pp_resp2 = self.ffi.new("ra_samp_msg3_response_header_t **")
            status = target_lib.ma_proc_ra(enclave_id, s_msg4, s_p_ctxt, c_msg3, c_p_net_ctxt, pp_resp2, sealed_mk, sealed_len, mk_sk, mk_sk_len, iv, mac, ias_crt, client_verify_ias, policy, attribute, attribute_len, iv1, mac1)
            if status == 0:
                c_msg4 = base64.b64encode(self.ffi.buffer(pp_resp2[0],(417 + project_id_len)))
                sealed_mk = base64.b64encode(self.ffi.buffer(sealed_mk))
                mk_sk = base64.b64encode(self.ffi.buffer(iv)) + base64.b64encode(self.ffi.buffer(mac)) + base64.b64encode(self.ffi.buffer(mk_sk))
                sealed_mk_len = target_lib.get_sealed_data_len(enclave_id, 0, plain_sk_len)
                return Secret(sealed_mk, sealed_mk_len), mk_sk, c_msg4
            else:
                raise Exception("Error getting sealed mk and mk_sk")
        except Exception as e:
            LOG.error("Error in ma_proc_msg4")
            raise e

    def proc_msg4(self, target_lib, enclave_id, msg4, p_ctxt, sha2_client, sha2_server):
        try:
            sha2_client =  self.ffi.from_buffer(base64.b64decode(sha2_client))
            sha2_server =  self.ffi.from_buffer(base64.b64decode(sha2_server))
            msg4 = self.ffi.from_buffer(base64.b64decode(msg4))
            plain_sk_len = 16
            secret1_len = target_lib.get_sealed_data_len(enclave_id, 0, plain_sk_len)
            sealed_secret1 = self.ffi.new("uint8_t[]", secret1_len)
            status = target_lib.proc_ra(enclave_id, p_ctxt, msg4, sealed_secret1,
                                        secret1_len, self.ffi.NULL, 0)
            secret1_buf = base64.b64encode(self.ffi.buffer(sealed_secret1))
            target_lib.close_ra(enclave_id, p_ctxt)
            return status, secret1_buf
        except Exception as e:
            LOG.error("Error in prcessing msg4 and retrieving sealed session key")
            raise e

    def new_proc_ra(self, target_lib, enclave_id, msg4, p_ctxt, s_mk, mk_sk):
        try:
            msg4 = self.ffi.from_buffer(base64.b64decode(msg4))
            plain_sk_len = 16
            b64_iv = 16
            b64_mac = 24
            if s_mk and mk_sk:
                LOG.info("Using existing buffers")
                sealed_len = s_mk.length
                sealed_mk = self.ffi.from_buffer(base64.b64decode(s_mk.value))
                iv = self.ffi.from_buffer(base64.b64decode(mk_sk[:b64_iv]))
                mac = self.ffi.from_buffer(base64.b64decode(mk_sk[b64_iv:(b64_iv + b64_mac)]))
                mk_sk = self.ffi.from_buffer(base64.b64decode(mk_sk[(b64_iv + b64_mac):]))
                mk_sk_len = len(mk_sk)
            else:
                LOG.info("Creating new buffers")
                iv = self.ffi.new("uint8_t[]", self.iv)
                mac = self.ffi.new("uint8_t[]", self.mac)
                mk_sk = self.ffi.new("uint8_t[]", plain_sk_len)
                sealed_len = target_lib.get_sealed_data_len(enclave_id, 0, plain_sk_len)
                sealed_mk = self.ffi.new("uint8_t[]", sealed_len)
                #Set sealed len zero to let native side know this is output variable
                sealed_len = 0
                mk_sk_len = 0
            iv1 = self.ffi.new("uint8_t[]", self.iv)
            mac1 = self.ffi.new("uint8_t[]", self.mac)
            dh_sk = self.ffi.new("uint8_t[]", plain_sk_len)
            dh_sk_len = plain_sk_len
            status = target_lib.new_proc_ra(enclave_id, p_ctxt, msg4, sealed_mk, sealed_len, mk_sk, mk_sk_len, iv, mac, dh_sk, dh_sk_len, iv1, mac1)
            if status == 0:
                sealed_mk = base64.b64encode(self.ffi.buffer(sealed_mk))
                mk_sk = base64.b64encode(self.ffi.buffer(iv)) + base64.b64encode(self.ffi.buffer(mac)) + base64.b64encode(self.ffi.buffer(mk_sk))
                dh_sk = base64.b64encode(self.ffi.buffer(iv1)) + base64.b64encode(self.ffi.buffer(mac1)) + base64.b64encode(self.ffi.buffer(dh_sk))
                return Secret(sealed_mk, 576), mk_sk, dh_sk
            else:
                raise Exception("Error getting sealed mk, mk_sk and dh_sk")
        except Exception as e:
            LOG.error("Error in new_proc_ra")
            raise e

    def get_dh_key(self, target_lib, enclave_id, msg4, p_ctxt):
        try:
            msg4 = self.ffi.from_buffer(base64.b64decode(msg4))
            plain_sk_len = 16
            sealed_len = target_lib.get_sealed_data_len(enclave_id, 0, plain_sk_len)
            sealed_dh = self.ffi.new("uint8_t[]", sealed_len)
            status = target_lib.get_dh_key(enclave_id, p_ctxt, msg4, sealed_dh, self.ffi.cast("uint32_t", sealed_len))
            dh_buf = base64.b64encode(self.ffi.buffer(sealed_dh))
            #target_lib.close_ra(enclave_id, p_ctxt)
            return status, dh_buf
        except Exception as e:
            LOG.error("Error in get_dh_key")
            raise e

    def get_project_id(self, target_lib, enclave_id, msg4, p_ctxt):
        try:
            msg4 = self.ffi.from_buffer(base64.b64decode(msg4))
            proj_id_len = self.ffi.cast("uint32_t",0)
            proj_id_len = target_lib.get_project_id_len(enclave_id, p_ctxt, msg4)
            proj_id = self.ffi.new("uint8_t []", proj_id_len)
            status = target_lib.get_project_id(enclave_id, p_ctxt, msg4, proj_id)
            return proj_id, proj_id_len
        except Exception as e:
            LOG.error("Error in geting project id")
            raise e

    def convert_to_python_data(self,project_id=None):
        project_id = self.ffi.string(project_id)
        return project_id

    def get_sk(self, target_lib, p_net_ctx, enc_sk):
        #todo extract iv and mac, call target_lib.get_sk and return plain sk
        try:
            b64_iv = 16
            b64_mac = 24
            iv = self.ffi.from_buffer(base64.b64decode(enc_sk[:b64_iv]))
            mac = self.ffi.from_buffer(base64.b64decode(enc_sk[b64_iv:(b64_iv + b64_mac)]))
            dh_sk = self.ffi.from_buffer(base64.b64decode(enc_sk[(b64_iv + b64_mac):]))
            plain_sk = self.ffi.new("uint8_t[]", 16)
            status = target_lib.get_sk(p_net_ctx, plain_sk, 16, dh_sk, iv, mac)
            return Secret(self.ffi.string(plain_sk, 16), 16)
        except Exception as e:
            LOG.error("Error in get_sk")
            raise e

    def generate_key(self, target_lib, enclave_id, key_len):
        try:
            sealed_len = target_lib.get_sealed_data_len(enclave_id, 0, key_len)
            sealed_key = self.ffi.new("uint8_t[]", sealed_len)
            target_lib.crypto_generate_key(enclave_id, key_len, sealed_key, sealed_len)
            #use these api's to determine required plain text buffer given a sealed buffer
            #add mac always 0 for now
            #add_mac_len = target_lib.get_add_mac_len(enclave_id, sealed_key, sealed_len)
            #plain_len = target_lib.get_encrypted_len(enclave_id, sealed_key, sealed_len)
            return Secret(base64.b64encode(self.ffi.buffer(sealed_key)), sealed_len)
        except Exception as e:
            LOG.error("Error in generating key")
            raise e

    def get_kek(self, target_lib, enclave_id, s_mk, mk_sk, sk_kek, project_id, project_id_len):
        try:
            plain_sk_len = 16
            b64_iv = 16
            b64_mac = 24
            sealed_len = s_mk.length
            sealed_mk = self.ffi.from_buffer(base64.b64decode(s_mk.value))
            iv = self.ffi.from_buffer(base64.b64decode(mk_sk[:b64_iv]))
            mac = self.ffi.from_buffer(base64.b64decode(mk_sk[b64_iv:(b64_iv + b64_mac)]))
            mk_sk = self.ffi.from_buffer(base64.b64decode(mk_sk[(b64_iv + b64_mac):]))
            mk_sk_len = plain_sk_len

            iv1 = self.ffi.from_buffer(base64.b64decode(sk_kek[:b64_iv]))
            mac1 = self.ffi.from_buffer(base64.b64decode(sk_kek[b64_iv:(b64_iv + b64_mac)]))
            sk_kek = self.ffi.from_buffer(base64.b64decode(sk_kek[(b64_iv + b64_mac):]))
            sk_kek_len = plain_sk_len

            sealed_kek_len = target_lib.get_sealed_data_len(enclave_id, 0, plain_sk_len)
            sealed_kek = self.ffi.new("uint8_t[]", sealed_kek_len)

            status = target_lib.get_kek(enclave_id, sealed_mk, sealed_len, mk_sk, mk_sk_len, iv, mac, sk_kek, sk_kek_len, iv1, mac1, sealed_kek, sealed_kek_len, project_id, project_id_len)
            if status != 0:
                raise Exception("Error in getting sealed kek")
            return Secret(base64.b64encode(self.ffi.buffer(sealed_kek)), sealed_len)
        except Exception as e:
            LOG.error("Error in getting sealed kek")
            raise e

    def secret_encrypt(self, target_lib, enclave_id, s_mk, mk_sk, sk_secret, project_id, project_id_len):
        try:
            plain_sk_len = 16
            b64_iv = 16
            b64_mac = 24
            sealed_len = s_mk.length
            sealed_mk = self.ffi.from_buffer(base64.b64decode(s_mk.value))
            iv = self.ffi.from_buffer(base64.b64decode(mk_sk[:b64_iv]))
            mac = self.ffi.from_buffer(base64.b64decode(mk_sk[b64_iv:(b64_iv + b64_mac)]))
            mk_sk = self.ffi.from_buffer(base64.b64decode(mk_sk[(b64_iv + b64_mac):]))
            mk_sk_len = plain_sk_len

            iv1 = self.ffi.from_buffer(base64.b64decode(sk_secret[:b64_iv]))
            mac1 = self.ffi.from_buffer(base64.b64decode(sk_secret[b64_iv:(b64_iv + b64_mac)]))
            sk_secret = self.ffi.from_buffer(base64.b64decode(sk_secret[(b64_iv + b64_mac):]))
            sk_secret_len = len(sk_secret)

            mk_secret = self.ffi.new("uint8_t[]", sk_secret_len)
            iv2 = self.ffi.new("uint8_t[]", self.iv)
            mac2 = self.ffi.new("uint8_t[]", self.mac)

            status = target_lib.secret_encrypt(enclave_id, sealed_mk, sealed_len, mk_sk, mk_sk_len, iv, mac, sk_secret, sk_secret_len, iv1, mac1, mk_secret, sk_secret_len, iv2, mac2, project_id, project_id_len)
            if status != 0:
                raise Exception("Error in getting mk encrypted secret")
            return base64.b64encode(self.ffi.buffer(iv2)) + base64.b64encode(self.ffi.buffer(mac2)) + base64.b64encode(self.ffi.buffer(mk_secret))
        except Exception as e:
            LOG.error("Error in getting mk encrypted secret")
            raise e

    def secret_decrypt(self, target_lib, enclave_id, s_mk, mk_sk, mk_secret, project_id, project_id_len):
        try:
            plain_sk_len = 16
            b64_iv = 16
            b64_mac = 24
            sealed_len = s_mk.length
            sealed_mk = self.ffi.from_buffer(base64.b64decode(s_mk.value))
            iv = self.ffi.from_buffer(base64.b64decode(mk_sk[:b64_iv]))
            mac = self.ffi.from_buffer(base64.b64decode(mk_sk[b64_iv:(b64_iv + b64_mac)]))
            mk_sk = self.ffi.from_buffer(base64.b64decode(mk_sk[(b64_iv + b64_mac):]))
            mk_sk_len = plain_sk_len

            iv1 = self.ffi.from_buffer(base64.b64decode(mk_secret[:b64_iv]))
            mac1 = self.ffi.from_buffer(base64.b64decode(mk_secret[b64_iv:(b64_iv + b64_mac)]))
            mk_secret = self.ffi.from_buffer(base64.b64decode(mk_secret[(b64_iv + b64_mac):]))
            mk_secret_len = len(mk_secret)

            sk_secret = self.ffi.new("uint8_t[]", mk_secret_len)
            iv2 = self.ffi.new("uint8_t[]", self.iv)
            mac2 = self.ffi.new("uint8_t[]", self.mac)

            status = target_lib.secret_decrypt(enclave_id, sealed_mk, sealed_len, mk_sk, mk_sk_len, iv, mac, mk_secret, mk_secret_len, iv1, mac1, sk_secret, mk_secret_len, iv2, mac2, project_id, project_id_len)
            if status != 0:
                raise Exception("Error in getting sk encrypted secret")
            return base64.b64encode(self.ffi.buffer(iv2)) + base64.b64encode(self.ffi.buffer(mac2)) + base64.b64encode(self.ffi.buffer(sk_secret))
        except Exception as e:
            LOG.error("Error in getting sk encrypted secret")
            raise e

    def provision_kek(self, target_lib, enclave_id, sealed_sk, sk_kek, project_id=None):
        try:
            if project_id is None:
                project_id = self.ffi.NULL
                proj_id_len = 0
            else:
                proj_id_len = len(project_id)
            b64_iv = 16
            b64_mac = 24
            sealed_len = sealed_sk.length
            sealed_sk = self.ffi.from_buffer(base64.b64decode(sealed_sk.value))
            iv = self.ffi.from_buffer(base64.b64decode(sk_kek[:b64_iv]))
            mac = self.ffi.from_buffer(base64.b64decode(sk_kek[b64_iv:(b64_iv + b64_mac)]))
            sk_kek = self.ffi.from_buffer(base64.b64decode(sk_kek[(b64_iv + b64_mac):]))
            plain_kek_len = len(sk_kek)
            sealed_kek_len = target_lib.get_sealed_data_len(enclave_id, 0, plain_kek_len)
            sealed_kek = self.ffi.new("uint8_t[]", sealed_kek_len)
            status = target_lib.crypto_provision_kek(enclave_id, sealed_sk, sealed_len, sk_kek, plain_kek_len, iv, mac, sealed_kek, sealed_kek_len, project_id, proj_id_len)
            if status != 0:
                raise Exception("Error in decrypting secret")
            return base64.b64encode(self.ffi.buffer(sealed_kek))
        except Exception as e:
            LOG.error("Error in provisioning of kek")
            raise e

    def legacy_encrypt(self, target_lib, plain_sk, secret):
        try:
            iv = self.ffi.new("uint8_t[]", self.iv)
            mac = self.ffi.new("uint8_t[]", self.mac)
            enc_secret = self.ffi.new("uint8_t[]", secret.length)
            target_lib.crypto_legacy_encrypt(plain_sk.value, plain_sk.length, secret.value, secret.length, enc_secret, iv, mac)
            return base64.b64encode(self.ffi.buffer(iv)) + base64.b64encode(self.ffi.buffer(mac)) + base64.b64encode(self.ffi.buffer(enc_secret))
        except Exception as e:
            LOG.error("ERROR: Encryption of the secret failed!")
            raise e

    def encrypt(self, target_lib, enclave_id, sealed_sk, secret):
        try:
            iv = self.ffi.new("uint8_t[]", self.iv)
            mac = self.ffi.new("uint8_t[]", self.mac)
            sealed_len = sealed_sk.length
            sealed_sk = self.ffi.from_buffer(base64.b64decode(sealed_sk.value))
            enc_secret = self.ffi.new("uint8_t[]", secret.length)
            target_lib.crypto_encrypt(enclave_id, sealed_sk, sealed_len, secret.value, secret.length, enc_secret, iv, mac)
            return base64.b64encode(self.ffi.buffer(iv)) + base64.b64encode(self.ffi.buffer(mac)) + base64.b64encode(self.ffi.buffer(enc_secret))
        except Exception as e:
            LOG.error("ERROR: Encryption of the secret failed!")
            raise e

    def decrypt(self, target_lib, enclave_id, sealed_sk, enc_secret):
        try:
            b64_iv = 16
            b64_mac = 24
            iv = self.ffi.from_buffer(base64.b64decode(enc_secret[:b64_iv]))
            mac = self.ffi.from_buffer(base64.b64decode(enc_secret[b64_iv:(b64_iv + b64_mac)]))
            enc_secret = self.ffi.from_buffer(base64.b64decode(enc_secret[(b64_iv + b64_mac):]))
            length = len(enc_secret)
            sealed_len = sealed_sk.length
            sealed_sk = self.ffi.from_buffer(base64.b64decode(sealed_sk.value))
            secret = self.ffi.new("uint8_t[]", length)
            target_lib.crypto_decrypt(enclave_id, sealed_sk, sealed_len, secret, length, enc_secret, iv, mac, self.ffi.NULL, 0)
            return base64.b64encode(self.ffi.buffer(secret))
        except Exception as e:
            LOG.error("ERROR: Decryption of the secret failed!")
            raise e

    def transport(self, target_lib, enclave_id, sealed_kek, sealed_sk, project_id=None):
        try:
            if project_id is None:
                project_id = self.ffi.NULL
                proj_id_len = 0
            else:
                proj_id_len = len(project_id)
            iv = self.ffi.new("uint8_t[]", self.iv)
            mac = self.ffi.new("uint8_t[]", self.mac)
            sealed_kek_len = sealed_kek.length
            sealed_kek = self.ffi.from_buffer(base64.b64decode(sealed_kek.value))
            sealed_sk_len = sealed_sk.length
            sealed_sk = self.ffi.from_buffer(base64.b64decode(sealed_sk.value))
            sk_len = target_lib.get_encrypted_len(enclave_id, sealed_sk, sealed_sk_len)
            kek_sk = self.ffi.new("uint8_t[]", sk_len)
            target_lib.crypto_transport_secret(enclave_id, sealed_kek, sealed_kek_len, sealed_sk, sealed_sk_len, kek_sk, sk_len, iv, mac, project_id, proj_id_len)
            return base64.b64encode(self.ffi.buffer(iv)) + base64.b64encode(self.ffi.buffer(mac)) + base64.b64encode(self.ffi.buffer(kek_sk))
        except Exception as e:
            LOG.error("Error in transporting the secret")
            raise e

    #no need for target lib, server action only
    def kek_encrypt(self, enclave_id, kek_sk, sealed_kek, sk_secret, project_id=None):
        try:
            if project_id is None:
                project_id = self.ffi.NULL
                proj_id_len = 0
            else:
                proj_id_len = len(project_id)
            b64_iv = 16
            b64_mac = 24
            iv1 = self.ffi.from_buffer(base64.b64decode(kek_sk[:b64_iv]))
            mac1 = self.ffi.from_buffer(base64.b64decode(kek_sk[b64_iv:(b64_iv + b64_mac)]))
            kek_sk = self.ffi.from_buffer(base64.b64decode(kek_sk[(b64_iv + b64_mac):]))
            sealed_kek_len = sealed_kek.length
            sealed_kek = self.ffi.from_buffer(base64.b64decode(sealed_kek.value))
            iv = self.ffi.from_buffer(base64.b64decode(sk_secret[:b64_iv]))
            mac = self.ffi.from_buffer(base64.b64decode(sk_secret[b64_iv:(b64_iv + b64_mac)]))
            sk_secret = self.ffi.from_buffer(base64.b64decode(sk_secret[(b64_iv + b64_mac):]))
            length = len(sk_secret)
            kek_secret = self.ffi.new("uint8_t[]", length)
            self.barbie_s.crypto_store_secret(enclave_id, kek_sk, len(kek_sk), iv1, mac1, sealed_kek, sealed_kek_len, sk_secret, length, kek_secret, length, iv, mac, str(project_id), proj_id_len)
            return base64.b64encode(self.ffi.buffer(iv)) + base64.b64encode(self.ffi.buffer(mac)) + base64.b64encode(self.ffi.buffer(kek_secret))
        except Exception as e:
            LOG.error("Error in encrypting the secret with kek")
            raise e

    #no need for target lib, server action only
    def kek_decrypt(self, enclave_id, kek_sk, sealed_kek, kek_secret, project_id=None):
        try:
            if project_id is None:
                project_id = self.ffi.NULL
                proj_id_len = 0
            else:
                proj_id_len = len(project_id)
            b64_iv = 16
            b64_mac = 24
            iv1 = self.ffi.from_buffer(base64.b64decode(kek_sk[:b64_iv]))
            mac1 = self.ffi.from_buffer(base64.b64decode(kek_sk[b64_iv:(b64_iv + b64_mac)]))
            kek_sk = self.ffi.from_buffer(base64.b64decode(kek_sk[(b64_iv + b64_mac):]))
            sealed_kek_len = sealed_kek.length
            sealed_kek = self.ffi.from_buffer(base64.b64decode(sealed_kek.value))
            iv = self.ffi.from_buffer(base64.b64decode(kek_secret[:b64_iv]))
            mac = self.ffi.from_buffer(base64.b64decode(kek_secret[b64_iv:(b64_iv + b64_mac)]))
            kek_secret = self.ffi.from_buffer(base64.b64decode(kek_secret[(b64_iv + b64_mac):]))
            length = len(kek_secret)
            sk_secret = self.ffi.new("uint8_t[]", length)
            self.barbie_s.crypto_get_secret(enclave_id, kek_sk, len(kek_sk), iv1, mac1, sealed_kek, sealed_kek_len, kek_secret, length, sk_secret, length, iv, mac, str(project_id), proj_id_len)
            return base64.b64encode(self.ffi.buffer(iv)) + base64.b64encode(self.ffi.buffer(mac)) + base64.b64encode(self.ffi.buffer(sk_secret))
        except Exception as e:
            LOG.error("Error in decrypting the secret with kek")
            raise e

    def get_mk_mr_list(self, target_lib, enclave_id, mk_sk, sealed_mk, sk_mr_list, project_id, project_id_len, mk_mr_list=None):
        try:
            b64_iv = 16
            b64_mac = 24
            iv1 = self.ffi.from_buffer(base64.b64decode(mk_sk[:b64_iv]))
            mac1 = self.ffi.from_buffer(base64.b64decode(mk_sk[b64_iv:(b64_iv + b64_mac)]))
            mk_sk = self.ffi.from_buffer(base64.b64decode(mk_sk[(b64_iv + b64_mac):]))
            sealed_mk_len = sealed_mk.length
            sealed_mk = self.ffi.from_buffer(base64.b64decode(sealed_mk.value))
            iv2 = self.ffi.from_buffer(base64.b64decode(sk_mr_list[:b64_iv]))
            mac2 = self.ffi.from_buffer(base64.b64decode(sk_mr_list[b64_iv:(b64_iv + b64_mac)]))
            sk_mr_list = self.ffi.from_buffer(base64.b64decode(sk_mr_list[(b64_iv + b64_mac):]))
            iv = self.ffi.new("uint8_t[]", self.iv)
            mac = self.ffi.new("uint8_t[]", self.mac)
            if mk_mr_list is None:
                mk_mr_list = self.ffi.NULL
                iv3 = self.ffi.NULL
                mac3 = self.ffi.NULL
                mk_mr_list_len = 0
            else:
                iv3 = self.ffi.from_buffer(base64.b64decode(mk_mr_list[:b64_iv]))
                mac3 = self.ffi.from_buffer(base64.b64decode(mk_mr_list[b64_iv:(b64_iv + b64_mac)]))
                mk_mr_list = self.ffi.from_buffer(base64.b64decode(mk_mr_list[(b64_iv + b64_mac):]))
                mk_mr_list_len = len(mk_mr_list)
            new_mk_mr_list = self.ffi.new("uint8_t[]", len(sk_mr_list))
            sk_mr_list_len = len(sk_mr_list)
            target_lib.get_mk_mr_list(enclave_id, sealed_mk, sealed_mk_len, mk_sk, sk_mr_list, sk_mr_list_len, project_id, len(project_id), mk_mr_list, mk_mr_list_len, new_mk_mr_list, iv1, mac1, iv2, mac2, iv3, mac3, iv, mac)

            return base64.b64encode(self.ffi.buffer(iv)) + base64.b64encode(self.ffi.buffer(mac)) + base64.b64encode(self.ffi.buffer(new_mk_mr_list))
        except Exception as e:
            LOG.error("Error generating mk_mr_list" + e)
            raise e

    def get_sk_data(self, target_lib, enclave_id, mk_sk, sealed_mk, mk_data, project_id, project_id_len):
        try:
            b64_iv = 16
            b64_mac = 24
            iv1 = self.ffi.from_buffer(base64.b64decode(mk_sk[:b64_iv]))
            mac1 = self.ffi.from_buffer(base64.b64decode(mk_sk[b64_iv:(b64_iv + b64_mac)]))
            mk_sk = self.ffi.from_buffer(base64.b64decode(mk_sk[(b64_iv + b64_mac):]))
            sealed_mk_len = sealed_mk.length
            sealed_mk = self.ffi.from_buffer(base64.b64decode(sealed_mk.value))
            iv2 = self.ffi.from_buffer(base64.b64decode(mk_data[:b64_iv]))
            mac2 = self.ffi.from_buffer(base64.b64decode(mk_data[b64_iv:(b64_iv + b64_mac)]))
            mk_data = self.ffi.from_buffer(base64.b64decode(mk_data[(b64_iv + b64_mac):]))
            mk_data_len = len(mk_data)
            iv = self.ffi.new("uint8_t[]", self.iv)
            mac = self.ffi.new("uint8_t[]", self.mac)
            sk_data = self.ffi.new("uint8_t[]", len(mk_data))
            target_lib.get_sk_data(enclave_id, sealed_mk, sealed_mk_len, mk_sk, mk_data, mk_data_len, project_id, len(project_id), sk_data, iv1, mac1, iv2, mac2, iv, mac)

            return base64.b64encode(self.ffi.buffer(iv)) + base64.b64encode(self.ffi.buffer(mac)) + base64.b64encode(self.ffi.buffer(sk_data))
        except Exception as e:
            LOG.error("Error generating sk_data" + e)
            raise e

    def compare_secret(self, target_lib, secret1, secret2, secret_len):
        try:
            secret1 = self.ffi.from_buffer(base64.b64decode(secret1))
            secret2 = self.ffi.from_buffer(base64.b64decode(secret2))
            if target_lib.crypto_cmp(secret1, secret2, secret_len) == 0:
                return True
            return False
        except Exception as e:
            LOG.error("Error in comparing the secrets")
            raise e

    def compare_sealed_secret(self, target_lib, encalve_id, secret1, secret2):
        try:
            secret1 = self.ffi.from_buffer(base64.b64decode(secret1))
            secret2 = self.ffi.from_buffer(base64.b64decode(secret2))
            if target_lib.crypto_sealed_cmp(encalve_id, secret1, len(secret1), secret2, len(secret2)) == 0:
                return True
            return False
        except Exception as e:
            raise Exception("Error in comparing the sealed secrets", e)

    def destroy_enclave(self, target_lib, enclave_id):
        try:
            target_lib.destroy_enclave(enclave_id)
        except Exception as e:
            LOG.error("Error in destroying enclave!")
            #raise e

    def write_buffer_to_file(self, filename, buff):
        try:
            dir_path = os.path.dirname(os.path.realpath(__file__))
            write_file = os.path.join(dir_path, filename)
            with open(write_file, 'w') as f:
                f.write(buff)
        except Exception as e:
            LOG.error("Error writing buffer to file!")
            raise e

    def read_buffer_from_file(self, filename):
        try:
            dir_path = os.path.dirname(os.path.realpath(__file__))
            read_file = os.path.join(dir_path, filename)
            if os.path.exists(os.path.join(dir_path, read_file)):
                with open(read_file, 'r') as f:
                    read_buffer = f.read()
                    return read_buffer
        except Exception as e:
            LOG.error("Error reading buffer from file!")
            raise e

    def get_mr_enclave(self, msg3):
        try:
            msg3 = self.ffi.from_buffer(base64.b64decode(msg3))
            mr_e = self.barbie_s.get_mr_e(msg3)
            #return self.ffi.string(mr_e)
            #return self.ffi.buffer(mr_e)
            return base64.b64encode(self.ffi.buffer(mr_e, 32))
        except Exception as e:
            LOG.error("Error in retrieveing mr enclave")
            raise e

    def get_mr_signer(self, msg3):
        try:
            msg3 = self.ffi.from_buffer(base64.b64decode(msg3))
            mr_s = self.barbie_s.get_mr_s(msg3)
            #return self.ffi.string(mr_s)
            #return self.ffi.buffer(mr_s)
            return base64.b64encode(self.ffi.buffer(mr_s, 32))
        except Exception as e:
            LOG.error("Error in retrieveing mr signer")
            raise e

    def get_report_sha256(self, target_lib, msg3):
        try:
            msg3 = self.ffi.from_buffer(base64.b64decode(msg3))
            sha256 = self.ffi.new("uint8_t []", 32)
            target_lib.get_report_sha256(msg3, sha256)
            return base64.b64encode(self.ffi.buffer(sha256))
        except Exception as e:
            LOG.error("Error in calculating SHA256")
            raise e

    def test_legacy_client(self):
        try:

            #plain_secret = "my-private-secre"
            secret = "This-Is-My-Private-Secret"
            plain_secret = Secret(secret, len(secret))

            enclave_id = self.init_enclave(self.barbie_s)

            #To simulate KEK of server side
            sealed_kek = self.generate_key(self.barbie_s, enclave_id, 16)

            enc_secret = self.encrypt(self.barbie_s, enclave_id, sealed_kek, plain_secret)

            r_secret = self.decrypt(self.barbie_s, enclave_id, sealed_kek, enc_secret)
            r_secret = base64.b64decode(r_secret)

            if r_secret == secret:
                print "Legacy Client : Secret Management done!"
            else:
                print "Legacy Client : Secret Management failed!"

        finally:
            self.destroy_enclave(self.barbie_s, enclave_id)


    def test_sgx_client_wo_sgx_hw(self, spid=None, crt_path=None):
        try:
            s_eid = self.init_enclave(self.barbie_s)

            plain_sk = Secret("", len(""))

            #Perform attestation
            ret, msg0 = self.gen_msg0(self.barbie_s, spid)

            p_ctxt, msg1 = self.gen_msg1(self.barbie_s, s_eid)
            print "gen_msg1 returned: " + msg1

            ret, p_net_ctxt = self.proc_msg0(self.barbie_c, msg0, spid, False)
            msg2 = self.proc_msg1_gen_msg2(self.barbie_c, msg1, p_net_ctxt)
            print "send_msg1_recv_msg2 returned: " + msg2

            msg3, crt, sig, resp_body = self.proc_msg2_gen_msg3(self.barbie_s, s_eid, msg2, p_ctxt, crt_path, False)
            print "proc_msg2_gen_msg3 returned: " + msg3

            msg4 = self.legacy_proc_msg3_gen_msg4(self.barbie_c, msg3, p_net_ctxt, plain_sk , "sgx_wo_hw", crt_path, False)
            print "send_msg3_recv_msg4 returned: " + str(msg4)

            status, s_dh = self.get_dh_key(self.barbie_s, s_eid, msg4, p_ctxt)
            print "get_dh_key returned: " + str(status)

            proj_id, proj_id_len = self.get_project_id(self.barbie_s, s_eid, msg4, p_ctxt)

            s_sk = self.generate_key(self.barbie_s, s_eid, 16)
            plain_kek_len = 16
            sealed_len = self.barbie_s.get_sealed_data_len( s_eid, 0, plain_kek_len)
            dh_sk = self.transport(self.barbie_s, s_eid, Secret(s_dh, sealed_len), s_sk ,None)
            plain_sk = self.get_sk(self.barbie_c, p_net_ctxt, dh_sk)
            #status, plain_sk = self.get_sk(self.barbie_c, p_net_ctxt, 16, dh_sk)
            #status, sk = self.proc_msg4(self.barbie_s, s_eid, msg4, p_ctxt)
            #sealed_sk = Secret(sk, sealed_len)

            #Perform kek provisioning
            kek = "yek etyb neetxis"
            plain_kek = Secret(kek, len(kek))

            sk_kek = self.legacy_encrypt(self.barbie_c, plain_sk, plain_kek)

            kek = self.provision_kek(self.barbie_s, s_eid, s_sk, sk_kek, None)
            plain_kek_len = 16
            sealed_len = self.barbie_s.get_sealed_data_len(s_eid, 0, plain_kek_len)
            sealed_kek = Secret(kek, sealed_len)

            kek_sk = self.transport(self.barbie_c, s_eid, sealed_kek, s_sk, proj_id)

            #Perform secret management
            secret = "my-private-secret"
            plain_secret = Secret(secret, len(secret))

            sk_secret = self.legacy_encrypt(self.barbie_c, plain_sk, plain_secret)

            kek_secret = self.kek_encrypt(s_eid, kek_sk, sealed_kek, sk_secret, "sgx_wo_hw")

            rec = self.kek_decrypt(s_eid, kek_sk, sealed_kek, kek_secret, "sgx_wo_hw")

            if self.compare_secret(self.barbie_c, rec[40:], sk_secret[40:], plain_secret.length):
                print "SGX Aware Client Without SGX hardware : Secret Management done!"
            else:
                print "SGX Aware Cliwnt Without SGX hardware : Secret Management failed!"

        finally:
            self.destroy_enclave(self.barbie_s, s_eid)
Example #10
0
class Gringotts(object):
    def __init__(self, ffi=None, lib=None):
        if ffi:
            self.ffi = ffi
            self.lib = lib
        else:
            self.ffi = FFI()
            self.lib = self.ffi.dlopen("libgringotts." + ("dll" if (
                platform.system() == 'Windows') else "so"))
            self.ffi.cdef('''
/*  libGringotts - generic data encoding (crypto+compression) library
 *  (c) 2002, Germano Rizzo <*****@*****.**>
 *
 *  libgringotts.h - general header file for libgringotts
 *  Author: Germano Rizzo
 *
 *  This program is free software; you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License as published by
 *  the Free Software Foundation; either version 2 of the License, or
 *  (at your option) any later version.
 *
 *  This program is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *  GNU Library General Public License for more details.
 *
 *  You should have received a copy of the GNU General Public License
 *  along with this program; if not, write to the Free Software
 *  Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 */

// #include <sys/types.h>

// if you feel a wee bit confused please
// read the manual, tipically found at
// /usr/share/doc/libgringotts-<version>/manual.htm

// TYPEDEFS & ENUMERATIONS

//encryption algorithms
typedef enum
{
    GRG_RIJNDAEL_128 = 0x00,    //00000000
    GRG_AES = 0x00,        //alias for GRG_RIJNDAEL_128
    GRG_SERPENT = 0x10,    //00010000 (default)
    GRG_TWOFISH = 0x20,    //00100000
    GRG_CAST_256 = 0x30,    //00110000
    GRG_SAFERPLUS = 0x40,    //01000000
    GRG_LOKI97 = 0x50,    //01010000
    GRG_3DES = 0x60,    //01100000
    GRG_RIJNDAEL_256 = 0x70    //01110000
}
grg_crypt_algo;

//hashing algorithms
typedef enum
{
    GRG_SHA1 = 0x00,    //00000000
    GRG_RIPEMD_160 = 0x08    //00001000 (default)
}
grg_hash_algo;

//compression algorithm
typedef enum
{
    GRG_ZLIB = 0x00,    //00000000 (default)
    GRG_BZIP = 0x04        //00000100
}
grg_comp_algo;

//compression level
typedef enum
{
    GRG_LVL_NONE = 0x00,    //00000000
    GRG_LVL_FAST = 0x01,    //00000001
    GRG_LVL_GOOD = 0x02,    //00000010
    GRG_LVL_BEST = 0x03    //00000011 (default)
}
grg_comp_ratio;

//security level
typedef enum
{
    GRG_SEC_NORMAL,        //default
    GRG_SEC_PARANOIA
}
grg_security_lvl;

// ERROR CODES

//I/O Ok
#define GRG_OK                            0

//I/O Errors
//error codes in writing
#define GRG_WRITE_COMP_ERR                -2
#define GRG_WRITE_ENC_INIT_ERR            -4
#define GRG_WRITE_FILE_ERR                -6
//unused since 1.2.1 (don't use!)        -8
#define GRG_TMP_NOT_WRITEABLE            -10

//error codes in reading
#define GRG_READ_FILE_ERR                -1
#define GRG_READ_MMAP_ERR                -19
#define GRG_READ_MAGIC_ERR                -3
#define GRG_READ_CRC_ERR                -5
#define GRG_READ_PWD_ERR                -7
#define GRG_READ_ENC_INIT_ERR            -9
//unused since 1.2.1 (don't use!)        -11
#define GRG_READ_UNSUPPORTED_VERSION    -13
#define GRG_READ_COMP_ERR                -15
#define GRG_TMP_NOT_YET_WRITTEN            -17

//error codes in file shredding
#define    GRG_SHRED_CANT_OPEN_FILE        -51
#define GRG_SHRED_YET_LINKED            -52
#define GRG_SHRED_CANT_MMAP                -53

//generic error codes
#define GRG_MEM_ALLOCATION_ERR            -71
#define GRG_ARGUMENT_ERR                -72

typedef struct _grg_context *GRG_CTX;
typedef struct _grg_key *GRG_KEY;
typedef struct _grg_tmpfile *GRG_TMPFILE;

// General purpose functions

char *grg_get_version (void);
unsigned int grg_get_int_version (void);

// Security related functions

unsigned char *grg_rnd_seq (const GRG_CTX gctx, const unsigned int size);
void grg_rnd_seq_direct (const GRG_CTX gctx, unsigned char *toOverwrite,
    const unsigned int size);
unsigned char grg_rnd_chr (const GRG_CTX gctx);
void grg_free (const GRG_CTX gctx, void *alloc_data, const long dim);
double grg_ascii_pwd_quality (const char *pwd, const long pwd_len);
double grg_file_pwd_quality (const char *pwd_path);

// libGringotts context (GRG_CTX) related functions

GRG_CTX grg_context_initialize (const char *header,
                const grg_crypt_algo crypt_algo, const grg_hash_algo hash_algo,
                const grg_comp_algo comp_algo, const grg_comp_ratio comp_lvl,
                const grg_security_lvl sec_lvl);
GRG_CTX grg_context_initialize_defaults (const char *header);
void grg_context_free (GRG_CTX gctx);

grg_crypt_algo grg_ctx_get_crypt_algo (const GRG_CTX gctx);
grg_hash_algo grg_ctx_get_hash_algo (const GRG_CTX gctx);
grg_comp_algo grg_ctx_get_comp_algo (const GRG_CTX gctx);
grg_comp_ratio grg_ctx_get_comp_ratio (const GRG_CTX gctx);
grg_security_lvl grg_ctx_get_security_lvl (const GRG_CTX gctx);

void grg_ctx_set_crypt_algo (GRG_CTX gctx, const grg_crypt_algo crypt_algo);
void grg_ctx_set_hash_algo (GRG_CTX gctx, const grg_hash_algo hash_algo);
void grg_ctx_set_comp_algo (GRG_CTX gctx, const grg_comp_algo comp_algo);
void grg_ctx_set_comp_ratio (GRG_CTX gctx, const grg_comp_ratio comp_ratio);
void grg_ctx_set_security_lvl (GRG_CTX gctx,
                   const grg_security_lvl sec_level);

unsigned int grg_get_key_size_static (const grg_crypt_algo crypt_algo);
unsigned int grg_get_key_size (const GRG_CTX gctx);
unsigned int grg_get_block_size_static (const grg_crypt_algo crypt_algo);
unsigned int grg_get_block_size (const GRG_CTX gctx);

// libGringotts keyholder (GRG_KEY) related functions

GRG_KEY grg_key_gen (const char *pwd, const int pwd_len);
GRG_KEY grg_key_clone (const GRG_KEY src);
int grg_key_compare (const GRG_KEY k1, const GRG_KEY k2);
void grg_key_free (const GRG_CTX gctx, GRG_KEY key);

// File encryption/decryption functions
int grg_validate_file (const GRG_CTX gctx, const char *path);
int grg_update_gctx_from_file (GRG_CTX gctx, const char *path);
int grg_decrypt_file (const GRG_CTX gctx, const GRG_KEY keystruct,
              const char *path, unsigned char **origData,
              long *origDim);
int grg_encrypt_file (const GRG_CTX gctx, const GRG_KEY keystruct,
              const char *path,
              const unsigned char *origData, const long origDim);

// Their "direct" versions, requiring a file descriptor instead of a path
int grg_validate_file_direct (const GRG_CTX gctx, const int fd);
int grg_update_gctx_from_file_direct (GRG_CTX gctx, const int fd);
int grg_decrypt_file_direct (const GRG_CTX gctx, const GRG_KEY keystruct,
                 const int fd, unsigned char **origData,
                 long *origDim);
int grg_encrypt_file_direct (const GRG_CTX gctx, const GRG_KEY keystruct,
                 const int fd, const unsigned char *origData,
                 const long origDim);

// Memory encryption/decryption functions
int grg_validate_mem (const GRG_CTX gctx, const void *mem, const long memDim);
int grg_update_gctx_from_mem (GRG_CTX gctx, const void *mem,
                  const long memDim);
int grg_decrypt_mem (const GRG_CTX gctx, const GRG_KEY keystruct,
             const void *mem, const long memDim,
             unsigned char **origData, long *origDim);
int grg_encrypt_mem (const GRG_CTX gctx, const GRG_KEY keystruct, void **mem,
             long *memDim, const unsigned char *origData,
             const long origDim);

// Encrypted temporary files functions
GRG_TMPFILE grg_tmpfile_gen (const GRG_CTX gctx);
int grg_tmpfile_write (const GRG_CTX gctx, GRG_TMPFILE tf,
               const unsigned char *data, const long data_len);
int grg_tmpfile_read (const GRG_CTX gctx, const GRG_TMPFILE tf,
              unsigned char **data, long *data_len);
void grg_tmpfile_close (const GRG_CTX gctx, GRG_TMPFILE tf);

// Miscellaneous file functions
unsigned char *grg_encode64 (const unsigned char *in,
                 const int inlen, unsigned int *outlen);
unsigned char *grg_decode64 (const unsigned char *in,
                 const int inlen, unsigned int *outlen);

int grg_file_shred (const char *path, const int npasses);

''')
        # self.user = self.lib.freecell_solver_user_alloc()

    def new_fcs_user_handle(self):
        return self.__class__(ffi=self.ffi, lib=self.lib)

    def test1(self):
        gctx = self.lib.grg_context_initialize_defaults("GRG".encode('ascii'))
        key = self.lib.grg_key_gen("rindolf24".encode('utf-8'), -1)
        print(key)
        fd = os.open("./tests/data/rindolf.grg", os.O_RDONLY)
        print(fd)

        text = self.ffi.new('unsigned char * *', self.ffi.NULL)
        length = self.ffi.new('long *', 0)
        errcode = self.lib.grg_decrypt_file_direct(gctx, key, fd, text, length)
        print(errcode)
        text_str = text[0]
        text_buf = bytes(self.ffi.buffer(text_str, length[0]))
        self.lib.grg_free(gctx, text_str, -1)

        os.close(fd)
        self.lib.grg_key_free(gctx, key)

        # print(bytes(text_buf))
        self.unittest.assertTrue(
            "<body>Go forth.</body>" in text_buf.decode('utf-8'))
        print(text_buf.decode('utf-8'))
        print(gctx)
        self.lib.grg_context_free(gctx)
Example #11
0
from cffi import FFI
ffi = FFI()
ffi.cdef("""
    typedef struct {
        unsigned char r, g, b, alpha;
        bool exclude_me;
        int* a;
    } pixel_t;
""")

print 800*600*(4+4+8)

image = ffi.new("pixel_t[]", 800*600)
image[100].a = ffi.new("int[]", 5)

#f = open('data', 'rb')     # binary mode -- important
#f.readinto(ffi.buffer(image))
#f.close()

image[100].r = 255
image[100].g = 192
image[100].b = 128
image[100].exclude_me = True
image[100].a[3] = 23

f = open('data', 'wb')
f.write(ffi.buffer(image))
f.close()
Example #12
0
def _convert_map(map_data_path, multiplayer_path, bitmaps_pc_path,
                 bitmaps_ce_path, sounds_pc_path, sounds_ce_path, destination,
                 combustion_lib_path):
    # Initialize the FFI
    ffi = FFI()
    # Use the c header
    ffi.cdef(
        open(os.path.join(os.path.dirname(__file__), 'combustion.h')).read())
    # Import the dynamic library
    libcombustion_r = ffi.dlopen(combustion_lib_path)

    map_data = _create_read_only_buffer_tuple(ffi, map_data_path)

    # @todo
    multiplayer = (ffi.from_buffer(ctypes.create_string_buffer(0)), 0)

    bitmaps_pc = _create_read_only_buffer_tuple(ffi, bitmaps_pc_path)
    sounds_pc = _create_read_only_buffer_tuple(ffi, sounds_pc_path)
    bitmaps_ce = _create_read_only_buffer_tuple(ffi, bitmaps_ce_path)
    sounds_ce = _create_read_only_buffer_tuple(ffi, sounds_ce_path)

    converted_map_len = libcombustion_r.convert_map_cd_len(
        map_data[0],
        map_data[1],
        multiplayer[0],
        multiplayer[1],
        bitmaps_pc[0],
        bitmaps_pc[1],
        bitmaps_ce[0],
        bitmaps_ce[1],
        sounds_pc[0],
        sounds_pc[1],
        sounds_ce[0],
        sounds_ce[1],
        # This last argument is needed to ensure that cffi passes the rest of the arguments correctly.
        # I don't know why.
        0)

    if converted_map_len is 0:
        raise Exception("ERROR!! No data to write to {}!".format(map_name))

    map_buffer = _create_writable_buffer_tuple(ffi, converted_map_len)

    converted_map_len = libcombustion_r.convert_map_cd(
        map_buffer[1],
        map_buffer[2],
        map_data[0],
        map_data[1],
        multiplayer[0],
        multiplayer[1],
        bitmaps_pc[0],
        bitmaps_pc[1],
        bitmaps_ce[0],
        bitmaps_ce[1],
        sounds_pc[0],
        sounds_pc[1],
        sounds_ce[0],
        sounds_ce[1],
        # This last argument is needed to ensure that cffi passes the rest of the arguments correctly.
        # I don't know why.
        0)

    buffer_file = open(destination, 'wb+')
    buffer_file.write(ffi.buffer(map_buffer[0]))
    buffer_file.close()
Example #13
0
from cffi import FFI
ffi = FFI()
ffi.cdef("""
typedef struct {
unsigned char r, g, b;
} pixel_t;
""")
image = ffi.new("pixel_t[]", 800*600)
f = open('data', 'rb')
# binary mode -- important
f.readinto(ffi.buffer(image))
f.close()

image[100].r = 255
image[100].g = 192
image[100].b = 128
f = open('data', 'wb')
f.write(ffi.buffer(image))
f.close()
        void (*data_deallocator)(void* data, size_t length);
    } TF_Buffer;

    TF_Buffer* TF_GetAllOpList();
    
    void TF_DeleteBuffer(TF_Buffer*);
""")

print("Loading the library")
TF = ffi.dlopen('../../../tensorflow/lib/libtensorflow.so')

print("Fetching operations list")
ops = TF.TF_GetAllOpList()

opList = op_def_pb2.OpList()
opList.ParseFromString(ffi.buffer(ops.data, ops.length)[:])


def has_tensor_list(op):
    return any(arg.type_list_attr or arg.number_attr
               for arg in chain(op.input_arg, op.output_arg))


def has_supported_types(op):
    for arg in chain(op.input_arg, op.output_arg):
        if arg.type:
            if arg.type not in typetags.keys():
                return False
        else:
            type_attr = next(
                filter(lambda attr: attr.name == arg.type_attr, op.attr))
Example #15
0
class _PcapFfi(object):
    '''
    This class represents the low-level interface to the libpcap library.
    It encapsulates all the cffi calls and C/Python conversions, as well
    as translation of errors and error codes to PcapExceptions.  It is
    intended to be used as a singleton class through the PcapDumper
    and PcapLiveDevice classes, below.
    '''
    _instance = None
    __slots__ = ['_ffi', '_libpcap','_interfaces','_windoze']

    def __init__(self):
        '''
        Assumption: this class is instantiated once in the main thread before
        any other threads have a chance to try instantiating it.
        '''
        if _PcapFfi._instance:
            raise Exception("Can't initialize this class more than once!")

        _PcapFfi._instance = self
        self._windoze = False

        self._ffi = FFI()
        self._ffi.cdef('''
        struct pcap;
        typedef struct pcap pcap_t;
        struct pcap_dumper;
        typedef struct pcap_dumper pcap_dumper_t;
        struct pcap_addr {
            struct pcap_addr *next;
            struct sockaddr *addr;
            struct sockaddr *netmask;
            struct sockaddr *broadaddr;
            struct sockaddr *dstaddr;
        };
        typedef struct pcap_addr pcap_addr_t;
        struct pcap_if {
            struct pcap_if *next;
            char *name;
            char *description;
            pcap_addr_t *addresses;
            int flags;
        };
        typedef struct pcap_if pcap_if_t;

        int pcap_findalldevs(pcap_if_t **, char *);
        void pcap_freealldevs(pcap_if_t *);

        struct pcap_pkthdr {
            long tv_sec;
            long tv_usec;
            unsigned int caplen;
            unsigned int len;
        };

        struct pcap_stat {
            unsigned int recv;
            unsigned int drop;
            unsigned int ifdrop;
        };

        typedef void (*pcap_handler)(unsigned char *, 
                                     const struct pcap_pkthdr *,
                                     const unsigned char *);

        pcap_t *pcap_open_dead(int, int);
        pcap_dumper_t *pcap_dump_open(pcap_t *, const char *);
        void pcap_dump_close(pcap_dumper_t *);
        void pcap_dump(pcap_dumper_t *, struct pcap_pkthdr *, unsigned char *);

        // live capture
        pcap_t *pcap_create(const char *, char *); 
        pcap_t *pcap_open_live(const char *, int, int, int, char *);
        pcap_t *pcap_open_offline(const char *fname, char *errbuf);
        int pcap_set_snaplen(pcap_t *, int);
        int pcap_snapshot(pcap_t *);
        int pcap_set_promisc(pcap_t *, int);

        int pcap_set_timeout(pcap_t *, int);
        int pcap_set_buffer_size(pcap_t *, int);

        int pcap_set_tstamp_precision(pcap_t *, int);
        int pcap_get_tstamp_precision(pcap_t *);
        int pcap_set_tstamp_type(pcap_t *, int);
        int pcap_list_tstamp_types(pcap_t *, int **);
        void pcap_free_tstamp_types(int *);

        int pcap_setdirection(pcap_t *, int); 
        int pcap_datalink(pcap_t *);
        int pcap_setnonblock(pcap_t *, int, char *); 
        int pcap_getnonblock(pcap_t *, char *); 
        int pcap_set_immediate_mode(pcap_t *, int);
        int pcap_next_ex(pcap_t *, struct pcap_pkthdr **, const unsigned char **);
        int pcap_dispatch(pcap_t *, int, pcap_handler, unsigned char *);
        int pcap_loop(pcap_t *, int, pcap_handler, unsigned char *);
        void pcap_breakloop(pcap_t *);
        int pcap_activate(pcap_t *);
        void pcap_close(pcap_t *);
        int pcap_get_selectable_fd(pcap_t *);
        int pcap_sendpacket(pcap_t *, const unsigned char *, int);
        char *pcap_geterr(pcap_t *);
        char *pcap_lib_version();
        int pcap_stats(pcap_t *, struct pcap_stat *);

        struct bpf_insn;
        struct bpf_program {
            unsigned int bf_len;
            struct bpf_insn *bf_insns;
        };
        int pcap_setfilter(pcap_t *, struct bpf_program *);
        int pcap_compile(pcap_t *, struct bpf_program *,
            const char *, int, unsigned int);
        void pcap_freecode(struct bpf_program *);
        ''')
        if sys.platform == 'darwin':
            libname = 'libpcap.dylib'
        elif sys.platform == 'win32':
            libname = 'wpcap.dll' # winpcap
            self._windoze = True
        else:
            # if not macOS (darwin) or windows, assume we're on
            # some unix-based system and try for libpcap.so
            libname = 'libpcap.so'

        try:
            self._libpcap = self._ffi.dlopen(libname)
        except Exception as e:
            raise PcapException("Error opening libpcap: {}".format(e))

        self._interfaces = []
        self.discoverdevs()

    @staticmethod
    def instance():
        if not _PcapFfi._instance:
            _PcapFfi._instance = _PcapFfi()
        return _PcapFfi._instance

    @property
    def version(self):
        return self._ffi.string(self._libpcap.pcap_lib_version())

    def discoverdevs(self):
        '''
        Find all the pcap-eligible devices on the local system.
        '''
        if len(self._interfaces):
            raise PcapException("Device discovery should only be done once.")
            
        ppintf = self._ffi.new("pcap_if_t * *")
        errbuf = self._ffi.new("char []", 128)
        rv = self._libpcap.pcap_findalldevs(ppintf, errbuf)
        if rv:
            raise PcapException("pcap_findalldevs returned failure: {}".format(self._ffi.string(errbuf)))
        pintf = ppintf[0]
        tmp = pintf
        pindex = 0
        while tmp != self._ffi.NULL:
            xname = self._ffi.string(tmp.name) # "internal name"; still stored as bytes object
            xname = xname.decode('ascii', 'ignore')

            if self._windoze:
                ext_name = "port{}".format(pindex)
            else:
                ext_name = xname
            pindex += 1

            if tmp.description == self._ffi.NULL:
                xdesc = ext_name
            else:
                xdesc = self._ffi.string(tmp.description)
                xdesc = xdesc.decode('ascii', 'ignore')

            # NB: on WinPcap, only loop flag is set
            isloop = (tmp.flags & 0x1) == 0x1
            isup = (tmp.flags & 0x2) == 0x2
            isrunning = (tmp.flags & 0x4) == 0x4

            # JS: I've observed that the isup and isrunning flags
            # are patently false on some systems.  As a result, we
            # blindly include all interfaces, regardless of their
            # reported status (though we still include status flags
            # in the interface object).
            # if isup and isrunning:
            xif = PcapInterface(ext_name, xname, xdesc, isloop, isup, isrunning)
            self._interfaces.append(xif)

            tmp = tmp.next
        self._libpcap.pcap_freealldevs(pintf)

    @property 
    def devices(self):
        return self._interfaces

    @property
    def lib(self):
        return self._libpcap

    @property
    def ffi(self):
        return self._ffi

    def _recv_packet(self, xdev):
        phdr = self._ffi.new("struct pcap_pkthdr **")
        pdata = self._ffi.new("unsigned char **")
        rv = self._libpcap.pcap_next_ex(xdev, phdr, pdata)
        if rv == 1:
            rawpkt = bytes(self._ffi.buffer(pdata[0], phdr[0].caplen))
            #dt = datetime.fromtimestamp(phdr[0].tv_sec)
            usec = int(xffi.cast("int", phdr[0].tv_usec))
            #ts = dt.replace(microsecond=usec)
            ts = float("{:d}.{:06d}".format(phdr[0].tv_sec, usec))
            return PcapPacket(ts, phdr[0].caplen, phdr[0].len, rawpkt)
        elif rv == 0:
            # timeout; nothing to return
            return None
        elif rv == -1:
            # error on receive; raise an exception
            s = self._ffi.string(self._libpcap.pcap_geterr(xpcap))
            raise PcapException("Error receiving packet: {}".format(s)) 
        elif rv == -2:
            # reading from savefile, but none left
            return None

    def _set_filter(self, xdev, filterstr):
        bpf = self._ffi.new("struct bpf_program *")
        cfilter = self._ffi.new("char []", bytes(filterstr, 'ascii'))
        compile_result = self._libpcap.pcap_compile(xdev, bpf, cfilter, 0, 0xffffffff)
        if compile_result < 0:
            # get error, raise exception
            s = self._ffi.string(self._libpcap.pcap_geterr(xdev))
            raise PcapException("Error compiling filter expression: {}".format(s)) 

        sf_result = self._libpcap.pcap_setfilter(xdev, bpf)
        if sf_result < 0:
            # get error, raise exception
            s = self._ffi.string(self._libpcap.pcap_geterr(xdev))
            raise PcapException("Error setting filter on pcap handle: {}".format(s)) 
        self._libpcap.pcap_freecode(bpf)
class NFCT(object):

	_instance = None

	def __new__(cls):
		if not cls._instance:
			cls._instance = super(NFCT, cls).__new__(cls)
		return cls._instance

	def __init__(self):
		global _cdef, _clibs_includes, _clibs_link
		self.ffi = FFI()
		self.ffi.cdef(_cdef)
		self.libnfct = self.ffi.verify(_clibs_includes, libraries=list(_clibs_link))
		self.libnfct_cache = dict()
		_cdef = _clibs_includes = _clibs_link = None


	def _ffi_call( self, func, args,
			no_check=False, check_gt0=False, check_notnull=False ):
		'''Call lib function through cffi,
				checking return value and raising error, if necessary.
			Checks if return is >0 by default.'''
		res = func(*args)
		if no_check\
			or (check_gt0 and res > 0)\
			or (check_notnull and res)\
			or res >= 0: return res
		errno_ = self.ffi.errno
		raise NFCTError(errno_, os.strerror(errno_))

	def __getattr__(self, k):
		if not (k.startswith('nfct_') or k.startswith('c_')):
			return super(NFCT, self).__getattr__(k)
		if k.startswith('c_'): k = k[2:]
		if k not in self.libnfct_cache:
			func = getattr(self.libnfct, k)
			self.libnfct_cache[k] = lambda *a,**kw: self._ffi_call(func, a, **kw)
		return self.libnfct_cache[k]


	def generator(self, events=None, output_flags=None, handle_sigint=True):
		'''Generator that yields:
				- on first iteration - netlink fd that can be poll'ed
					or integrated into some event loop (twisted, gevent, ...).
					Also, that is the point where uid/gid/caps can be dropped.
				- on all subsequent iterations it does recv() on that fd,
					yielding XML representation of the captured conntrack event.
			Keywords:
				events: mask for event types to capture
					- or'ed NFNLGRP_CONNTRACK_* flags, None = all.
				output_flags: which info will be in resulting xml
					- or'ed NFCT_OF_* flags, None = set all.
				handle_sigint: add SIGINT handler to process it gracefully.'''

		if events is None:
			events = (
				self.libnfct.NFNLGRP_CONNTRACK_NEW |
				self.libnfct.NFNLGRP_CONNTRACK_UPDATE |
				self.libnfct.NFNLGRP_CONNTRACK_DESTROY )
		if output_flags is None:
			output_flags = (
				self.libnfct.NFCT_OF_TIME |
				self.libnfct.NFCT_OF_ID |
				self.libnfct.NFCT_OF_SHOW_LAYER3 |
				self.libnfct.NFCT_OF_TIMESTAMP )

		handle = self.nfct_open(
			self.libnfct.NFNL_SUBSYS_NONE, events, check_notnull=True )

		cb_results = list()
		xml_buff_size = 2048 # ipv6 events are ~1k
		xml_buff = self.ffi.new('char[]', xml_buff_size)

		@self.ffi.callback('nfct_callback')
		def recv_callback(handler, msg_type, ct_struct, data):
			try:
				size = self.nfct_snprintf( xml_buff, xml_buff_size, ct_struct,
					msg_type, self.libnfct.NFCT_O_XML, output_flags, check_gt0=True )
				assert size <= xml_buff_size, size # make sure xml fits
				data = self.ffi.buffer(xml_buff, size)[:]
				cb_results.append(data)
			except:
				cb_results.append(StopIteration) # breaks the generator
				raise
			return self.libnfct.NFCT_CB_STOP # to yield processed data from generator

		if handle_sigint:
			global _sigint_raise
			_sigint_raise = False
			def sigint_handler(sig, frm):
				global _sigint_raise
				_sigint_raise = True
				cb_results.append(StopIteration)
			sigint_handler = signal.signal(signal.SIGINT, sigint_handler)

		self.nfct_callback_register2(
			handle, self.libnfct.NFCT_T_ALL, recv_callback, self.ffi.NULL )
		try:
			peek = yield self.nfct_fd(handle) # yield fd for poll() on first iteration
			while True:
				if peek:
					peek = yield NFWouldBlock # poll/recv is required
					continue
				# No idea how many times callback will be used here
				self.nfct_catch(handle)
				if handle_sigint and _sigint_raise: raise KeyboardInterrupt()
				# Yield individual events
				for result in cb_results:
					if result is StopIteration: raise result()
					peek = yield result
				cb_results = list()

		finally:
			if handle_sigint: signal.signal(signal.SIGINT, sigint_handler)
			self.nfct_callback_unregister2(handle, no_check=True)
			self.nfct_close(handle)
Example #17
0
class CTC(object):
    """
    """
    def __init__(self, on_device='cpu', blank_label=0):
        libpath = get_ctc_lib()
        self.ffi = FFI()
        self.ffi.cdef(ctc_header())
        self.ctclib = self.ffi.dlopen(libpath)

        supported_devices = ['cpu', 'gpu']
        if on_device not in supported_devices:
            print("the requested device {} is not supported".format(
                on_device), file=sys.stderr)
            sys.exit(1)
        assign_device = 0 if on_device is 'cpu' else 1

        self.options = self.ffi.new('ctcOptions*',
                                    {"loc": assign_device,
                                     "blank_label": blank_label})[0]
        self.size_in_bytes = self.ffi.new("size_t*")
        self.nout = None
        self.bsz = None

    def get_buf_size(self, ptr_to_buf):
        return self.ffi.sizeof(self.ffi.getctype(
                               self.ffi.typeof(ptr_to_buf).item))

    def buf_ref_from_array(self, arr):
        return self.ffi.from_buffer(
            self.ffi.buffer(self.ffi.cast('void*', arr.ptr), arr.nbytes))

    def buf_ref_from_ptr(self, ptr, size):
        return self.ffi.from_buffer(self.ffi.buffer(ptr, size))

    def get_gpu_workspace_size(self, lbl_lens, utt_lens, nout, bsz):
        self.nout = nout
        self.bsz = bsz
        _lbl_lens = self.ffi.cast("int*", lbl_lens.ravel().ctypes.data)
        _utt_lens = self.ffi.cast("int*", utt_lens.ravel().ctypes.data)

        status = self.ctclib.get_workspace_size(_lbl_lens,
                                                _utt_lens,
                                                self.nout,
                                                self.bsz,
                                                self.options,
                                                self.size_in_bytes)
        assert status is 0, "get_workspace_size() in warp-ctc failed"

        return self.size_in_bytes[0]

    def bind_to_gpu(self, acts, grads, lbls, lbl_lens, utt_lens,
                    costs, workspace, scratch_size, stream):

        if stream is None:
            stream_ptr = self.ffi.cast('void*', 0)
            stream_buf_size = self.ffi.sizeof(self.ffi.new_handle(stream))
            stream_buf = self.buf_ref_from_ptr(stream_ptr, stream_buf_size)
        else:
            stream_buf = self.ffi.cast("void*", stream.handle)

        self.options.stream = stream_buf

        flat_dims = np.prod(acts.shape)
        assert np.prod(grads.shape) == flat_dims

        acts_buf = self.ffi.cast("float*",
                                 self.buf_ref_from_array(acts))
        grads_buf = self.ffi.cast("float*",
                                  self.buf_ref_from_array(grads))
        costs_buf = self.ffi.cast("float*",
                                  self.buf_ref_from_array(costs))

        warp_grads_buf_size = flat_dims * self.get_buf_size(grads_buf)
        warp_costs_buf_size = self.bsz * self.get_buf_size(costs_buf)

        warp_labels = self.ffi.cast("int*", lbls.ravel().ctypes.data)
        warp_label_lens = self.ffi.cast("int*", lbl_lens.ravel().ctypes.data)
        warp_input_lens = self.ffi.cast("int*", utt_lens.ravel().ctypes.data)

        workspace_buf = self.buf_ref_from_ptr(
            self.ffi.cast('void*', workspace), int(scratch_size))

        ctc_status = self.ctclib.compute_ctc_loss(acts_buf,
                                                  grads_buf,
                                                  warp_labels,
                                                  warp_label_lens,
                                                  warp_input_lens,
                                                  self.nout,
                                                  self.bsz,
                                                  costs_buf,
                                                  workspace_buf,
                                                  self.options)

        assert ctc_status is 0, "warp-ctc run failed"

    def bind_to_cpu(self, acts, lbls, utt_lens, lbl_lens, grads, costs,
                    n_threads=1):

        self.options.num_threads = n_threads
        _, self.bsz, self.nout = acts.shape
        flat_dims = np.prod(acts.shape)
        assert np.prod(grads.shape) == flat_dims

        acts_buf = self.ffi.cast("float*", acts.ctypes.data)
        grads_buf = self.ffi.cast("float*", grads.ctypes.data)
        costs_buf = self.ffi.cast("float*", costs.ctypes.data)

        warp_grads_buf_size = flat_dims * self.get_buf_size(grads_buf)
        warp_costs_buf_size = self.bsz * self.get_buf_size(costs_buf)

        warp_labels = self.ffi.cast("int*", lbls.ravel().ctypes.data)
        warp_label_lens = self.ffi.cast("int*", lbl_lens.ravel().ctypes.data)
        warp_input_lens = self.ffi.cast("int*", utt_lens.ravel().ctypes.data)
        status = self.ctclib.get_workspace_size(warp_label_lens,
                                                warp_input_lens,
                                                self.nout,
                                                self.bsz,
                                                self.options,
                                                self.size_in_bytes)

        assert status is 0, "get_workspace_size() in warp-ctc failed"

        # TODO: workspace is a variable size buffer whose size is
        # determined during each call, so we can't initialize ahead
        # of time. Can we avoid this?
        workspace = self.ffi.new("char[]", self.size_in_bytes[0])

        ctc_status = self.ctclib.compute_ctc_loss(acts_buf,
                                                  grads_buf,
                                                  warp_labels,
                                                  warp_label_lens,
                                                  warp_input_lens,
                                                  self.nout,
                                                  self.bsz,
                                                  costs_buf,
                                                  workspace,
                                                  self.options)

        # transfer grads and costs back without copying
        self.ffi.memmove(grads, grads_buf, warp_grads_buf_size)
        grads = grads.reshape((acts.shape))
        self.ffi.memmove(costs, costs_buf, warp_costs_buf_size)

        assert ctc_status is 0, "warp-ctc run failed"
Example #18
0
	unsigned short reserve3;    // Reserved 3
	union rtdata3 {             // Monitor data 3 .
		POSE pos3;              // XYZ type [mm/rad] .
		JOINT jnt3;             // JOINT type [mm/rad] .
		PULSE pls3;             // PULSE type [mm/rad] or Integer type [% / non-unit].
		long lng3[8];           // Integer type [% / non-unit] .
	} dat3;
} MXTCMD;
""")

if __name__ == '__main__':
    sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
    sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
    data = ffi.new("MXTCMD *")
    data.Command = 1
    sock.sendto(ffi.buffer(data), ("localhost", 10000))

#class JointServer(DatagramProtocol):
#    _mxt_cmd = ffi.new("MXTCMD *")
#    _counter = 0
#    _last_jnt = [random() * 3.14 for i in xrange(6)]
#    #_last_jnt = [0 for i in xrange(6)]
 
#    def datagramReceived(self, datagram, address):
#        #print "Received from address: " + str(address)
#        #print str(datagram)
#        ffi.buffer(self._mxt_cmd)[:] = datagram
#        #self._counter += 1
#        #if self._counter > 1000:
#        #    self._counter = 0 
#        #    self._last_jnt = [random() * 3.14 for i in xrange(6)]
Example #19
0
print "Sum the elements and multiply with a factor. Correct result is 30."
print "\n"

p = veo.VeoProc(0)
lib = p.load_library(os.getcwd() + "/libvetest6.so")
f = lib.find_function("multeach")
c = p.open_context()

ffi = FFI()
ffi.cdef("""
    struct abc {
        int a, b, c;
    };
    """)

abc = ffi.new("struct abc *")
abc.a = 1
abc.b = 2
abc.c = 3

# we'll pass the struct * as a void *
f.args_type("void *", "int")
f.ret_type("int")

req = f(c, veo.OnStack(ffi.buffer(abc)), 5)
r = req.wait_result()
print "result = %r" % r

del p
print "finished"
Example #20
0
    i += 1

plot_name = 'tran1'
all_vectors = ngspice_shared.ngSpice_AllVecs(plot_name.encode('utf8'))
i = 0
while (True):
    if all_vectors[i] == ffi.NULL:
        break
    else:
        vector_name = ffi.string(all_vectors[i])
        name = '.'.join((plot_name, vector_name.decode('utf8')))
        vector_info = ngspice_shared.ngGet_Vec_Info(name.encode('utf8'))
        length = vector_info.v_length
        print("vector[{}] {} type {} flags {} length {}".format(
            i, vector_name, vector_info.v_type, vector_info.v_flags, length))
        if vector_info.v_compdata == ffi.NULL:
            print("  real data")
            # for k in range(length):
            #     print("  [{}] {}".format(k, vector_info.v_realdata[k]))
            real_array = np.frombuffer(ffi.buffer(vector_info.v_realdata,
                                                  length * 8),
                                       dtype=np.float64)
            print(real_array)
        else:
            print("  complex data")
            for k in range(length):
                value = vector_info.v_compdata[k]
                print("  [{}] {} + i {}".format(k, value.cx_real,
                                                value.cx_imag))
    i += 1
Example #21
0
    lib.ftrScanCloseDevice(hDevice)
else:
    print('Image size is ', ImageSize.nImageSize)
    pBuffer = lib.malloc(ImageSize.nImageSize)
    print('Please put your finger on the scanner:\n')
    while (1):
        if lib.ftrScanIsFingerPresent(hDevice, ffi.NULL):
            break
        for i in range(0, 100):
            pass  #sleep
    print('Capturing fingerprint ......\n')
    while (1):
        if (lib.ftrScanGetFrame(hDevice, pBuffer, ffi.NULL)):
            print('Done!\nWriting to file......\n')
            #TODO: write_bmp_file( pBuffer, ImageSize.nWidth, ImageSize.nHeight)
            #print(ImageSize.nWidth)
            #print(ImageSize.nHeight)
            #pimg = ffi.new('unsigned char[]')
            pimg = ffi.cast('unsigned char [320][480]', pBuffer)
            result = Image.frombuffer(
                'L', (ImageSize.nWidth, ImageSize.nHeight),
                ffi.buffer(pBuffer, ImageSize.nImageSize), 'raw')
            result.save('out.bmp')
            break
        else:
            PrintErrorMessage(lib.ftrScanGetLastError())
            for i in range(0, 100):
                pass  #sleep

    lib.ftrScanCloseDevice(hDevice)
Example #22
0
from cffi import FFI
ffi = FFI()
ffi.cdef("""
    typedef struct {
        unsigned char r, g, b;
    } pixel_t;
""")
image = ffi.new("pixel_t[]", 800*600)

f = open('data', 'rb')     # binary mode -- important
f.readinto(ffi.buffer(image))
f.close()

image[100].r = 255
image[100].g = 192
image[100].b = 128

f = open('data', 'wb')
f.write(ffi.buffer(image))
f.close()
Example #23
0
from cffi import FFI

ffi = FFI()

## I got a Py string, want to process in C, and get back
## a Py string. Roundtrip over C. This will incur 2 copies.

N = 1000
s_in = "\x00" * N
buf = ffi.new("char[]", s_in)
print len(buf)

## => Do something in C with buf

#s_out = ffi.buffer(buf)[:-1] # ffi will append a \0 octet, eat it!
s_out = ffi.buffer(buf, len(buf) - 1)[:] # ffi will append a \0 octet, eat it!
buf = None # GC of underlying mem

assert(len(s_in), len(s_out))
Example #24
0
class NFCT(object):  # pylint: disable=too-few-public-methods

    _instance = None
    BUF_SIZE = 102400000

    def __new__(cls):
        if not cls._instance:
            cls._instance = super(NFCT, cls).__new__(cls)
        return cls._instance

    def __init__(self):
        log.debug("Creating an instance of NFCT", level=4)
        global _cdef, _clibs_includes, _clibs_link  # pylint: disable=global-statement
        self.ffi = FFI()
        self.ffi.cdef(_cdef)
        self.libnfct = self.ffi.verify(_clibs_includes,
                                       libraries=list(_clibs_link))
        self.libnfct_cache = dict()
        _cdef = _clibs_includes = _clibs_link = None

    def _ffi_call(self,
                  func,
                  args,
                  no_check=False,
                  check_gt0=False,
                  check_notnull=False):  # pylint: disable=too-many-arguments
        # Call lib function through cffi,
        # checking return value and raising error, if necessary.
        # Checks if return is >0 by default.
        res = func(*args)
        if no_check or (check_gt0 and res > 0) or (check_notnull
                                                   and res) or res >= 0:  # pylint: disable=too-many-boolean-expressions
            return res
        errno_ = self.ffi.errno
        if errno_ == 105:
            log.debug("skipping [Errno 105] No buffer space available")
        else:
            raise NFCTError(errno_, os.strerror(errno_))

    def __getattr__(self, k):
        if not (k.startswith('nfct_') or k.startswith('c_')):
            return super(NFCT, self).__getattr__(k)
        if k.startswith('c_'):
            k = k[2:]
        if k not in self.libnfct_cache:
            func = getattr(self.libnfct, k)
            self.libnfct_cache[k] = lambda *a, **kw: self._ffi_call(
                func, a, **kw)
        return self.libnfct_cache[k]

    def generator(self, events=None, output_flags=None):
        # Generator that yields:
        # 		- on first iteration - netlink fd that can be poll'ed
        # 			or integrated into some event loop (twisted, gevent, ...).
        # 			Also, that is the point where uid/gid/caps can be dropped.
        # 		- on all subsequent iterations it does recv() on that fd,
        # 			yielding XML representation of the captured conntrack event.
        # Keywords:
        # 		events: mask for event types to capture
        # 			- or'ed NFNLGRP_CONNTRACK_* flags, None = all.
        # 		output_flags: which info will be in resulting xml
        # 			- or'ed NFCT_OF_* flags, None = set all.
        # 		handle_sigint: add SIGINT handler to process it gracefully.
        log.debug("Starting the NFCT generator", level=4)
        if events is None:
            events = (self.libnfct.NFCT_T_DESTROY | self.libnfct.NFCT_T_NEW
                      | self.libnfct.NFCT_T_UPDATE)

        if output_flags is None:
            output_flags = (self.libnfct.NFCT_OF_TIME | self.libnfct.NFCT_OF_ID
                            | self.libnfct.NFCT_OF_SHOW_LAYER3
                            | self.libnfct.NFCT_OF_TIMESTAMP)

        handle = self.nfct_open(self.libnfct.NFNL_SUBSYS_NONE,
                                events,
                                check_notnull=True)

        cb_results = list()
        xml_buff_size = self.BUF_SIZE  # ipv6 events are ~1k
        xml_buff = self.ffi.new('char[]', xml_buff_size)

        @self.ffi.callback('nfct_callback')
        def recv_callback(handler, msg_type, ct_struct, data):  # pylint: disable=unused-argument, redefined-outer-name
            try:
                size = self.nfct_snprintf(xml_buff,
                                          xml_buff_size,
                                          ct_struct,
                                          msg_type,
                                          self.libnfct.NFCT_O_XML,
                                          output_flags,
                                          check_gt0=True)
                assert size <= xml_buff_size, size  # make sure xml fits
                log.debug("Obtained data from the buffer %s", data, level=10)
                data = self.ffi.buffer(xml_buff, size)[:]
                cb_results.append(data)
            except:
                cb_results.append(StopIteration)  # breaks the generator
                raise
            return self.libnfct.NFCT_CB_STOP  # to yield processed data from generator

        def break_check(val):
            if val is StopIteration:
                raise val()
            return val

        self.nfct_callback_register2(handle, self.libnfct.NFCT_T_ALL,
                                     recv_callback, self.ffi.NULL)
        try:
            peek = break_check((yield self.nfct_fd(handle)
                                ))  # yield fd for poll() on first iteration
            while True:
                if peek:
                    peek = break_check((yield
                                        NFWouldBlock))  # poll/recv is required
                    continue
                # No idea how many times callback will be used here
                self.nfct_catch(handle)
                # Yield individual events
                for result in cb_results:
                    break_check(result)
                    peek = break_check((yield result))
                cb_results = list()

        finally:
            self.nfct_callback_unregister2(handle, no_check=True)
            self.nfct_close(handle)
Example #25
0
class RQObject(object):

	_cdefs = '''
		typedef uint64_t RaptorQ_OTI_Common_Data;
		typedef uint32_t RaptorQ_OTI_Scheme_Specific_Data;

		typedef enum {
			NONE = 0,
			ENC_8 = 1, ENC_16 = 2, ENC_32 = 3, ENC_64 = 4,
			DEC_8 = 5, DEC_16 = 6, DEC_32 = 7, DEC_64 = 8
		} RaptorQ_type;

		struct RaptorQ_ptr;

		struct RaptorQ_ptr* RaptorQ_Enc (
			const RaptorQ_type type,
			void *data,
			const uint64_t size,
			const uint16_t min_subsymbol_size,
			const uint16_t symbol_size,
			const size_t max_memory);

		struct RaptorQ_ptr* RaptorQ_Dec (
			const RaptorQ_type type,
			const RaptorQ_OTI_Common_Data common,
			const RaptorQ_OTI_Scheme_Specific_Data scheme);

		// Encoding

		RaptorQ_OTI_Common_Data RaptorQ_OTI_Common (struct RaptorQ_ptr *enc);
		RaptorQ_OTI_Scheme_Specific_Data RaptorQ_OTI_Scheme (struct RaptorQ_ptr *enc);

		uint16_t RaptorQ_symbol_size (struct RaptorQ_ptr *ptr);
		uint8_t RaptorQ_blocks (struct RaptorQ_ptr *ptr);
		uint32_t RaptorQ_block_size (struct RaptorQ_ptr *ptr, const uint8_t sbn);
		uint16_t RaptorQ_symbols (struct RaptorQ_ptr *ptr, const uint8_t sbn);
		uint32_t RaptorQ_max_repair (struct RaptorQ_ptr *enc, const uint8_t sbn);
		size_t RaptorQ_precompute_max_memory (struct RaptorQ_ptr *enc);

		void RaptorQ_precompute (
			struct RaptorQ_ptr *enc,
			const uint8_t threads,
			const bool background);

		uint64_t RaptorQ_encode_id (
			struct RaptorQ_ptr *enc,
			void **data,
			const uint64_t size,
			const uint32_t id);
		uint64_t RaptorQ_encode (
			struct RaptorQ_ptr *enc,
			void **data,
			const uint64_t size,
			const uint32_t esi,
			const uint8_t sbn);
		uint32_t RaptorQ_id (const uint32_t esi, const uint8_t sbn);

		// Decoding

		uint64_t RaptorQ_bytes (struct RaptorQ_ptr *dec);

		uint64_t RaptorQ_decode (
			struct RaptorQ_ptr *dec,
			void **data,
			const size_t size);
		uint64_t RaptorQ_decode_block (
			struct RaptorQ_ptr *dec,
			void **data,
			const size_t size,
			const uint8_t sbn);

		bool RaptorQ_add_symbol_id (
			struct RaptorQ_ptr *dec,
			void **data,
			const uint32_t size,
			const uint32_t id);
		bool RaptorQ_add_symbol (
			struct RaptorQ_ptr *dec,
			void **data,
			const uint32_t size,
			const uint32_t esi,
			const uint8_t sbn);

		// General: free memory

		void RaptorQ_free (struct RaptorQ_ptr **ptr);
		void RaptorQ_free_block (struct RaptorQ_ptr *ptr, const uint8_t sbn);
	'''
	_ctx = None

	data_size_div, _rq_type, _rq_blk = 4, 32, 'uint32_t'

	def __init__(self):
		self._ffi = FFI()
		self._ffi.cdef(self._cdefs)
		# self.ffi.set_source('_rq', '#include <RaptorQ/cRaptorQ.h>')
		self._lib = self._ffi.dlopen('libRaptorQ.so') # ABI mode for simplicity
		self.rq_types = ( ['NONE', None]
			+ list('ENC_{}'.format(2**n) for n in xrange(3, 7))
			+ list('DEC_{}'.format(2**n) for n in xrange(3, 7)) )
		self._rq_blk_size = self.data_size_div

	def rq_type_val(self, v, pre):
		if isinstance(v, int) or v.isdigit(): v = '{}_{}'.format(pre, v).upper()
		else: v = bytes(v).upper()
		assert v in self.rq_types, [v, self.rq_types]
		return getattr(self._lib, v)

	def __getattr__(self, k):
		if k.startswith('rq_'):
			if not self._ctx: raise RuntimeError('ContextManager not initialized or already freed')
			return ft.partial(getattr(self._lib, 'RaptorQ_{}'.format(k[3:])), self._ctx)
		return self.__getattribute__(k)

	def free(self):
		if self._ctx:
			ptr = self._ffi.new('struct RaptorQ_ptr **')
			ptr[0] = self._ctx
			self._lib.RaptorQ_free(ptr)
			self._ctx = None

	def __enter__(self):
		self._ctx = self._ctx_init[0](*self._ctx_init[1])
		return self

	def __exit__(self, err_t, err, err_tb): self.free()
	def __del__(self): self.free()


	def sym_id(self, esi, sbn): return self._lib.RaptorQ_id(esi, sbn)

	_sym_n = None
	def _sym_buff(self, init=None):
		if not self._sym_n: self._sym_n = self.symbol_size / self._rq_blk_size
		buff = self._ffi.new('{}[]'.format(self._rq_blk), self._sym_n)
		buff_ptr = self._ffi.new('void **', buff)
		buff_raw = self._ffi.buffer(buff)
		if init: buff_raw[:] = init
		return buff_ptr, lambda: bytes(buff_raw)
Example #26
0
class TestLocationdLib(unittest.TestCase):
    def setUp(self):
        header = '''typedef ...* Localizer_t;
Localizer_t localizer_init();
void localizer_get_message_bytes(Localizer_t localizer, uint64_t logMonoTime, bool inputsOK, bool sensorsOK, bool gpsOK, char *buff, size_t buff_size);
void localizer_handle_msg_bytes(Localizer_t localizer, const char *data, size_t size);'''

        self.ffi = FFI()
        self.ffi.cdef(header)
        self.lib = self.ffi.dlopen(LIBLOCATIOND_PATH)

        self.localizer = self.lib.localizer_init()

        self.buff_size = 2048
        self.msg_buff = self.ffi.new(f'char[{self.buff_size}]')

    def localizer_handle_msg(self, msg_builder):
        bytstr = msg_builder.to_bytes()
        self.lib.localizer_handle_msg_bytes(self.localizer,
                                            self.ffi.from_buffer(bytstr),
                                            len(bytstr))

    def localizer_get_msg(self,
                          t=0,
                          inputsOK=True,
                          sensorsOK=True,
                          gpsOK=True):
        self.lib.localizer_get_message_bytes(
            self.localizer, t, inputsOK, sensorsOK, gpsOK,
            self.ffi.addressof(self.msg_buff, 0), self.buff_size)
        return log.Event.from_bytes(self.ffi.buffer(self.msg_buff),
                                    nesting_limit=self.buff_size // 8)

    def test_liblocalizer(self):
        msg = messaging.new_message('liveCalibration')
        msg.liveCalibration.validBlocks = random.randint(1, 10)
        msg.liveCalibration.rpyCalib = [random.random() / 10 for _ in range(3)]

        self.localizer_handle_msg(msg)
        liveloc = self.localizer_get_msg()
        self.assertTrue(liveloc is not None)

    @unittest.skip("temporarily disabled due to false positives")
    def test_device_fell(self):
        msg = messaging.new_message('sensorEvents', 1)
        msg.sensorEvents[0].sensor = 1
        msg.sensorEvents[0].timestamp = msg.logMonoTime
        msg.sensorEvents[0].type = 1
        msg.sensorEvents[0].init('acceleration')
        msg.sensorEvents[0].acceleration.v = [10.0, 0.0,
                                              0.0]  # zero with gravity
        self.localizer_handle_msg(msg)

        ret = self.localizer_get_msg()
        self.assertTrue(ret.liveLocationKalman.deviceStable)

        msg = messaging.new_message('sensorEvents', 1)
        msg.sensorEvents[0].sensor = 1
        msg.sensorEvents[0].timestamp = msg.logMonoTime
        msg.sensorEvents[0].type = 1
        msg.sensorEvents[0].init('acceleration')
        msg.sensorEvents[0].acceleration.v = [50.1, 0.0,
                                              0.0]  # more than 40 m/s**2
        self.localizer_handle_msg(msg)

        ret = self.localizer_get_msg()
        self.assertFalse(ret.liveLocationKalman.deviceStable)

    def test_posenet_spike(self):
        for _ in range(SENSOR_DECIMATION):
            msg = messaging.new_message('carState')
            msg.carState.vEgo = 6.0  # more than 5 m/s
            self.localizer_handle_msg(msg)

        ret = self.localizer_get_msg()
        self.assertTrue(ret.liveLocationKalman.posenetOK)

        for _ in range(20 * VISION_DECIMATION):  # size of hist_old
            msg = messaging.new_message('cameraOdometry')
            msg.cameraOdometry.rot = [0.0, 0.0, 0.0]
            msg.cameraOdometry.rotStd = [0.1, 0.1, 0.1]
            msg.cameraOdometry.trans = [0.0, 0.0, 0.0]
            msg.cameraOdometry.transStd = [2.0, 0.1, 0.1]
            self.localizer_handle_msg(msg)

        for _ in range(20 * VISION_DECIMATION):  # size of hist_new
            msg = messaging.new_message('cameraOdometry')
            msg.cameraOdometry.rot = [0.0, 0.0, 0.0]
            msg.cameraOdometry.rotStd = [1.0, 1.0, 1.0]
            msg.cameraOdometry.trans = [0.0, 0.0, 0.0]
            msg.cameraOdometry.transStd = [10.1, 0.1,
                                           0.1]  # more than 4 times larger
            self.localizer_handle_msg(msg)

        ret = self.localizer_get_msg()
        self.assertFalse(ret.liveLocationKalman.posenetOK)
print ar, len(ar), list_from_ar

# char simple example
x = ffi.new("char[]", "hello")
print x, len(x)
print ffi.string(x)

# filling double array simple
lenght = 8
print 'Double array lenght %d' % lenght
arp = ffi.new('double[]', lenght)
lib.testArray(arp, lenght)
for i in xrange(lenght):
    print 'arp[i]= %f' % arp[i]
print 'Double array %s ! ' % str(arp)

# Returning array of doubles from C
buffer_pointer, buffer_size = ffi.new('double **'), ffi.new('mysize *')
lib.save_to_DoubleBuffer(buffer_pointer, buffer_size, ffi.new('char[]', b'junk_msg'))
mybytes = ffi.buffer(buffer_pointer[0], buffer_size[0])
mybytes = mybytes[:]
print 'printing returned doubles %s as string. TODO How to convert them?' % mybytes
print 'buffer %d' % buffer_size[0]

# Returning char array (string) from C
buffer_pointer, buffer_size = ffi.new('mychar **'), ffi.new('mysize *')
lib.save_to_buffer(buffer_pointer, buffer_size, ffi.new('char[]', b'Ondra'))
mybytes = ffi.string(buffer_pointer[0], buffer_size[0])
print 'printing returned char %s' % mybytes
print 'string %d' % buffer_size[0]
Example #28
0
class NFLOG(object):

    _instance = None

    def __new__(cls):
        if not cls._instance:
            cls._instance = super(NFLOG, cls).__new__(cls)
        return cls._instance

    def __init__(self):
        global _cdef, _clibs_includes, _clibs_link
        self.ffi = FFI()
        self.ffi.cdef(_cdef)
        self.libnflog = self.ffi.verify(_clibs_includes,
                                        libraries=list(_clibs_link))
        self.libnflog_cache = dict()
        _cdef = _clibs_includes = _clibs_link = None

    def _ffi_call(self,
                  func,
                  args,
                  no_check=False,
                  check_gt0=False,
                  check_notnull=False):
        '''Call libnflog function through cffi,
                checking return value and raising error, if necessary.
            Checks if return is >0 by default.'''
        res = func(*args)
        if no_check\
            or (check_gt0 and res > 0)\
            or (check_notnull and res)\
            or res >= 0:
            return res
        errno_ = self.ffi.errno
        raise NFLogError(errno_, os.strerror(errno_))

    def __getattr__(self, k):
        if not (k.startswith('nflog_') or k.startswith('c_')):
            return super(NFLOG, self).__getattr__(k)
        if k.startswith('c_'): k = k[2:]
        if k not in self.libnflog_cache:
            func = getattr(self.libnflog, k)
            self.libnflog_cache[k] = lambda *a, **kw: self._ffi_call(
                func, a, **kw)
        return self.libnflog_cache[k]

    def generator(self,
                  qids,
                  pf=(socket.AF_INET, socket.AF_INET6),
                  qthresh=None,
                  timeout=None,
                  nlbufsiz=None,
                  buff_size=None,
                  extra_attrs=None,
                  handle_overflows=True):
        '''Generator that yields:
                - on first iteration - netlink fd that can be poll'ed
                    or integrated into some event loop (twisted, gevent, ...).
                    Also, that is the point where uid/gid/caps can be dropped.
                - on all subsequent iterations it does recv() on that fd,
                    returning either None (if no packet can be assembled yet)
                    or captured packet payload.
            qids: nflog group ids to bind to (nflog_bind_group)
            Keywords:
                pf: address families to pass to nflog_bind_pf
                extra_attrs: metadata to extract from captured packets,
                    returned in a list after packet payload, in the same order
                nlbufsiz (bytes): set size of netlink socket buffer for the created queues
                qthresh (packets): set the maximum amount of logs in buffer for each group
                timeout (seconds): set the maximum time to push log buffer for this group
                buff_size (bytes): size of the batch to fetch
                    from libnflog to process in python (default: min(nlbufsiz, 1 MiB))
                handle_overflows: supress ENOBUFS NFLogError on
                    queue overflows (but do log warnings, default: True)'''

        handle = self.nflog_open(check_notnull=True)

        for pf in (pf if not isinstance(pf, int) else [pf]):
            self.nflog_unbind_pf(handle, pf)
            self.nflog_bind_pf(handle, pf)

        if isinstance(extra_attrs, bytes): extra_attrs = [extra_attrs]

        cb_results = list()

        @self.ffi.callback('nflog_callback')
        def recv_callback(qh,
                          nfmsg,
                          nfad,
                          data,
                          extra_attrs=extra_attrs,
                          ts_slot=self.ffi.new('struct timeval *'),
                          pkt_slot=self.ffi.new('char **'),
                          ts_err_mask=frozenset([0, errno.EAGAIN]),
                          result=None):
            try:
                pkt_len = self.nflog_get_payload(nfad, pkt_slot)
                result = self.ffi.buffer(pkt_slot[0], pkt_len)[:]
                if extra_attrs:
                    result = [result]
                    for attr in extra_attrs:
                        if attr == 'len': result.append(pkt_len)
                        elif attr == 'ts':
                            # Fails quite often (EAGAIN, SUCCESS, ...), not sure why
                            try:
                                self.nflog_get_timestamp(nfad, ts_slot)
                            except NFLogError as err:
                                if err.errno not in ts_err_mask: raise
                                result.append(None)
                            else:
                                result.append(ts_slot.tv_sec +
                                              ts_slot.tv_usec * 1e-6)
                        elif attr == 'msg_packet_hwhdr':
                            hwhdr = self.nflog_get_msg_packet_hwhdr(
                                nfad, no_check=True)
                            hwhdr_len = self.nflog_get_msg_packet_hwhdrlen(
                                nfad)
                            result.append(self.ffi.buffer(hwhdr, hwhdr_len)[:])
                        elif attr == 'prefix':
                            prefix = self.nflog_get_prefix(nfad, no_check=True)
                            result.append(self.ffi.string(prefix)[:])
                        elif attr == 'indev':
                            indev = self.nflog_get_indev(nfad, no_check=True)
                            result.append(indev)
                        elif attr == 'physindev':
                            physindev = self.nflog_get_physindev(nfad,
                                                                 no_check=True)
                            result.append(physindev)
                        elif attr == 'outdev':
                            outdev = self.nflog_get_outdev(nfad, no_check=True)
                            result.append(outdev)
                        elif attr == 'physoutdev':
                            physoutdev = self.nflog_get_physoutdev(
                                nfad, no_check=True)
                            result.append(physoutdev)
                        else:
                            raise NotImplementedError(
                                'Unknown nflog attribute: {}'.format(attr))
                cb_results.append(result)
            except:
                cb_results.append(StopIteration)  # breaks the generator
                raise
            return 0

        for qid in (qids if not isinstance(qids, int) else [qids]):
            qh = self.nflog_bind_group(handle, qid, check_notnull=True)
            self.nflog_set_mode(qh, self.libnflog.NFULNL_COPY_PACKET, 0xffff)
            if qthresh: self.nflog_set_qthresh(qh, qthresh)
            if timeout: self.nflog_set_timeout(qh, int(timeout * 100))
            if nlbufsiz: self.nflog_set_nlbufsiz(qh, nlbufsiz)
            self.nflog_callback_register(qh, recv_callback, self.ffi.NULL)

        fd = self.nflog_fd(handle)
        if not buff_size:
            if nlbufsiz: buff_size = min(nlbufsiz, 1 * 2**20)
            else: buff_size = 1 * 2**20
        buff = self.ffi.new('char[]', buff_size)

        peek = yield fd  # yield fd for poll() on first iteration
        while True:
            if peek:
                peek = yield NFWouldBlock  # poll/recv is required
                continue

            # Receive/process netlink data, which may contain multiple packets
            try:
                nlpkt_size = self.c_recv(fd, buff, buff_size, 0)
            except NFLogError as err:
                if handle_overflows and err.errno == errno.ENOBUFS:
                    log.warn(
                        'nlbufsiz seem'
                        ' to be insufficient to hold unprocessed packets,'
                        ' consider raising it via corresponding function keyword'
                    )
                    continue
                raise
            self.nflog_handle_packet(handle, buff, nlpkt_size, no_check=True)

            # yield individual L3 packets
            for result in cb_results:
                if result is StopIteration: raise result
                peek = yield result
            cb_results = list()
Example #29
0
def runKernel(opt):
    count = 1024
    DATA_SIZE = sizeof(c_int64) * count

    ffi = FFI()  # create the FFI obj
    boHandle1 = xclAllocBO(opt.handle, DATA_SIZE, xclBOKind.XCL_BO_DEVICE_RAM,
                           opt.first_mem)
    boHandle2 = xclAllocBO(opt.handle, DATA_SIZE, xclBOKind.XCL_BO_DEVICE_RAM,
                           opt.first_mem)

    bo1 = xclMapBO(opt.handle, boHandle1, True)
    bo2 = xclMapBO(opt.handle, boHandle2, True)

    bo1_fp = ffi.cast("FILE *", bo1)
    bo2_fp = ffi.cast("FILE *", bo2)

    # bo1
    bo1_arr = np.array([0x586C0C6C for _ in range(count)])
    ffi.memmove(bo1_fp, ffi.from_buffer(bo1_arr), count * 5)

    # bo2
    int_arr = np.array([i * i for i in range(count)])
    bo2_arr = np.array(map(str, int_arr.astype(int)))

    ffi.memmove(bo2_fp, ffi.from_buffer(bo2_arr), count * 7)

    # bufReference
    int_arr_2 = np.array([i * i + i * 16 for i in range(count)])
    str_arr = np.array(map(str, int_arr_2))
    buf = ffi.from_buffer(str_arr)
    bufReference = ''.join(buf)

    if xclSyncBO(opt.handle, boHandle1,
                 xclBOSyncDirection.XCL_BO_SYNC_BO_TO_DEVICE, DATA_SIZE, 0):
        return 1

    if xclSyncBO(opt.handle, boHandle2,
                 xclBOSyncDirection.XCL_BO_SYNC_BO_TO_DEVICE, DATA_SIZE, 0):
        return 1

    p = xclBOProperties()
    bo1devAddr = p.paddr if not (xclGetBOProperties(opt.handle, boHandle1,
                                                    p)) else -1
    bo2devAddr = p.paddr if not (xclGetBOProperties(opt.handle, boHandle2,
                                                    p)) else -1

    if bo1devAddr is -1 or bo2devAddr is -1:
        return 1

    # Allocate the exec_bo
    execHandle = xclAllocBO(opt.handle, DATA_SIZE,
                            xclBOKind.XCL_BO_SHARED_VIRTUAL, (1 << 31))
    execData = xclMapBO(opt.handle, execHandle, True)  # returns mmap()
    c_f = ffi.cast("FILE *", execData)

    if execData is ffi.NULL:
        print("execData is NULL")

    print("Construct the exe buf cmd to configure FPGA")

    ecmd = ert_configure_cmd()
    ecmd.m_uert.m_cmd_struct.state = 1  # ERT_CMD_STATE_NEW
    ecmd.m_uert.m_cmd_struct.opcode = 2  # ERT_CONFIGURE

    ecmd.slot_size = 1024
    ecmd.num_cus = 1
    ecmd.cu_shift = 16
    ecmd.cu_base_addr = opt.cu_base_addr

    ecmd.m_features.ert = opt.ert
    if opt.ert:
        ecmd.m_features.cu_dma = 1
        ecmd.m_features.cu_isr = 1

    # CU -> base address mapping
    ecmd.data[0] = opt.cu_base_addr
    ecmd.m_uert.m_cmd_struct.count = 5 + ecmd.num_cus

    sz = sizeof(ert_configure_cmd)
    ffi.memmove(c_f, ecmd, sz)
    print("Send the exec command and configure FPGA (ERT)")

    # Send the command.
    if xclExecBuf(opt.handle, execHandle):
        print("Unable to issue xclExecBuf")
        return 1

    print("Wait until the command finish")

    while xclExecWait(opt.handle, 1000) != 0:
        print(".")

    print("Construct the exec command to run the kernel on FPGA")
    print(
        "Due to the 1D OpenCL group size, the kernel must be launched %d times"
    ) % count

    # construct the exec buffer cmd to start the kernel
    for id in range(count):
        start_cmd = ert_start_kernel_cmd()
        rsz = XSIMPLE_CONTROL_ADDR_FOO_DATA / 4 + 2  # regmap array size
        new_data = ((start_cmd.data._type_) * rsz)()
        start_cmd.m_uert.m_start_cmd_struct.state = 1  # ERT_CMD_STATE_NEW
        start_cmd.m_uert.m_start_cmd_struct.opcode = 0  # ERT_START_CU
        start_cmd.m_uert.m_start_cmd_struct.count = 1 + rsz
        start_cmd.cu_mask = 0x1

        new_data[XSIMPLE_CONTROL_ADDR_AP_CTRL] = 0x0
        new_data[XSIMPLE_CONTROL_ADDR_GROUP_ID_X_DATA / 4] = id
        new_data[XSIMPLE_CONTROL_ADDR_S1_DATA /
                 4] = bo1devAddr & 0xFFFFFFFF  # output
        new_data[XSIMPLE_CONTROL_ADDR_S2_DATA /
                 4] = bo2devAddr & 0xFFFFFFFF  # input
        new_data[XSIMPLE_CONTROL_ADDR_FOO_DATA / 4] = 0x10  # foo
        ffi.memmove(c_f, start_cmd, 2 * sizeof(c_uint32))

        tmp_buf = ffi.buffer(
            c_f, 2 * sizeof(c_uint32) + (len(new_data) * sizeof(c_uint32)))
        data_ptr = ffi.from_buffer(tmp_buf)
        ffi.memmove(data_ptr + 2 * sizeof(c_uint32), new_data,
                    len(new_data) * sizeof(c_uint32))

        if xclExecBuf(opt.handle, execHandle):
            print("Unable to issue xclExecBuf")
            return 1

        print("Wait until the command finish")

        while xclExecWait(opt.handle, 100) != 0:
            print("reentering wait... \n")

    # get the output xclSyncBO
    print("Get the output data from the device")
    if xclSyncBO(opt.handle, boHandle1,
                 xclBOSyncDirection.XCL_BO_SYNC_BO_FROM_DEVICE, DATA_SIZE, 0):
        return 1
    rd_buf = ffi.buffer(bo1_fp, count * 7)
    print("RESULT: ")
    # print(rd_buf[:] + "\n")

    if bufReference != rd_buf[:]:
        print("FAILED TEST")
        print("Value read back does not match value written")
        sys.exit()
Example #30
0
class PynqCapture(VideoInput):

    # override
    def _enumerate_sources_func(self):
        if IkaUtils.isWindows():
            return self._videoinput_wrapper.get_device_list()
        return ['Device Enumeration not supported']

    # override
    def _initialize_driver_func(self):
        # OpenCV File doesn't need pre-initialization.
        self._cleanup_driver_func()

    # override
    def _cleanup_driver_func(self):
        self.lock.acquire()
        try:
            if self.ffi is not None:
                self.ffi = None

            if self.framebuffer is not None:
                for fb in self.framebuffer:
                    del fb

            if self.hdmi_out is not None:
                self.hdmi_out.stop()
                self.hdmi_out = None

            if self.hdmi_in is not None:
                self.hdmi_in.stop()
                self.hdmi_in = None

            self.reset()
        finally:
            self.lock.release()

    # override
    def _is_active_func(self):
        return (self.hdmi_in is not None)

    # override
    def _select_device_by_index_func(self, source):
        self._cleanup_driver_func()
        self.lock.acquire()
        try:
            self.ffi = FFI()
            self.hdmi_in = HDMI('in', init_timeout=10)
            self.hdmi_in.start()

            # TODO: under development
            if False and self._enable_output:
                self.hdmi_out = HDMI('out', frame_list=self.hdmi_in.frame_list)
                mode = self._select_output_mode(self.hdmi_in.frame_width(), self.hdmi_in.frame_height())
                self.hdmi_out.mode(mode)

            time.sleep(1)

            if self.hdmi_out is not None:
                self.hdmi_out.start()

            self.hdmi_in_geom = \
                (self.hdmi_in.frame_width(), self.hdmi_in.frame_height())

            self.framebuffer = []
            for i in range(video.VDMA_DICT['NUM_FSTORES']):
                pointer = self.ffi.cast('uint8_t *', self.hdmi_in.frame_addr(i))
                #buffer_size = video.MAX_FRAME_WIDTH * video.MAX_FRAME_HEIGHT * 3 # 3 == sizeof(RGB)
                buffer_size = self.hdmi_in_geom[0] * self.hdmi_in_geom[1] * 3
                _bf = self.ffi.buffer(pointer, buffer_size)
                bf = np.frombuffer(_bf,np.uint8).reshape(self.hdmi_in_geom[1],self.hdmi_in_geom[0],3)
                #self.framebuffer.append(bf[:self.hdmi_in_geom[1],:self.hdmi_in_geom[0],:])
                self.framebuffer.append(bf)

            IkaUtils.dprint('%s: resolution %dx%d' % (self, self.hdmi_in_geom[0], self.hdmi_in_geom[1]))

        except:
            print(traceback.format_exc())
            self.hdmi_in = None
            self.hdmi_out = None
            if self.framebuffer is not None:
                for fb in self.framebuffer:
                    del fb
            self.ffi = None

        finally:
            self.lock.release()

        self.systime_base = time.time()
        return self.is_active()

    # override
    def _select_device_by_name_func(self, source):
        IkaUtils.dprint('%s: Select device by name "%s"' % (self, source))

        try:
            index = self.enumerate_sources().index(source)
        except ValueError:
            IkaUtils.dprint('%s: Input "%s" not found' % (self, source))
            return False

        IkaUtils.dprint('%s: "%s" -> %d' % (self, source, index))
        self._select_device_by_index_func(index)

    # override
    def _get_current_timestamp_func(self):
        return int((time.time() - self.systime_base) * 1000)

    # override
    def _read_frame_func(self):
        t1 = time.time()
        if self._mode == 1 and hasattr(self.hdmi_in, 'frame_raw2'):
            # Modified version of PYNQ library has faster capture function.
            frame = self.hdmi_in.frame_raw2()
        elif self._mode == 2:
            index = self.hdmi_in.frame_index()
            self.hdmi_in.frame_index_next()
            frame = self.framebuffer[index]
        else:
            # This function is supported in original version, but 10X slow.
            frame_raw = self.hdmi_in.frame_raw()
            frame = np.frombuffer(frame_raw, dtype=np.uint8)
            frame = frame.reshape(1080, 1920, 3)
            frame = frame[0:720, 0:1280, :]
        t2 = time.time()
        if self._debug:
            print('read_frame_func: %6.6f' % (t2 - t1))
        return frame

    def _select_output_mode(self, width, height):
        if width == 640 and height == 480:
            return 0
        if width == 800 and height == 600:
            return 1
        if width == 1280 and height == 720:
            return 2
        if width == 1280 and height == 1024:
            return 3
        if width == 1920 and height == 1080:
            return 4
        raise Exception("Specific output frame size not supported: %dx%d"%(width,height))

    def __init__(self, enable_output=False, debug=False, mode=2):
        self.hdmi_in = None
        self.hdmi_out = None
        self.ffi = None
        self.framebuffer = None
        self._enable_output = enable_output
        self._debug = debug
        self._mode = mode

        IkaUtils.dprint(
            '%s: debug %s enable_output %s mode %s' %
            (self, self._debug, self._enable_output, self._mode))

        super(PynqCapture, self).__init__()
Example #31
0
from pynq import Overlay
ol = Overlay("biepincs.bit")
ol.download()
ol.bitstream.timestamp
from pynq.drivers import DMA
import numpy as np
from cffi import FFI
ffi = FFI()
W = 50
CH = 3
dmaOut = DMA(0x40400000, 1)
dmaIn = DMA(0x40400000, 0)
dmaIn.create_buf(W * W * CH)
dmaOut.create_buf(W * W)
pointIn = ffi.cast("uint8_t *", dmaIn.get_buf())
pointOut = ffi.cast("uint8_t *", dmaOut.get_buf())
c_buffer = ffi.buffer(pointOut, W * W)


def image_filter(th, image_in):
    pointIn[0] = th
    image = image_in.copy()
    pointerToCvimage = ffi.cast("uint8_t *", ffi.from_buffer(image))
    ffi.memmove(pointIn + 1, pointerToCvimage, W * W * CH)
    dmaOut.transfer(W * W, 1)
    dmaIn.transfer(W * W * CH, 0)
    dmaOut.wait()
    result = np.frombuffer(c_buffer, count=W * W, dtype=np.uint8)
    result = result.reshape(W, W)
    return result
Example #32
0
while (True):
    if all_vectors[i] == ffi.NULL:
        break
    else:
        vector_name = ffi.string(all_vectors[i])
        vector_info = ngspice_shared.ngGet_Vec_Info('.'.join((plot_name, vector_name)))
        length = vector_info.v_length
        print "vector[{}] {} type {} flags {} length {}".format(i,
                                                                vector_name,
                                                                vector_info.v_type,
                                                                vector_info.v_flags,
                                                                length)
        if vector_info.v_compdata == ffi.NULL:
            print "  real data"
            # for k in xrange(length):
            #     print "  [{}] {}".format(k, vector_info.v_realdata[k])
            real_array = np.frombuffer(ffi.buffer(vector_info.v_realdata, length*8), dtype=np.float64)
            print real_array
        else:
            print "  complex data"
            for k in xrange(length):
                value = vector_info.v_compdata[k]
                print "  [{}] {} + i {}".format(k, value.cx_real, value.cx_imag)
    i += 1

####################################################################################################
# 
# End
# 
####################################################################################################
Example #33
0
r = lib.SGFPM_SetTemplateFormat(fpm[0], lib.TEMPLATE_FORMAT_ISO19794)
if r != 0:
    raise Exception("SGFPM_SetTemplateFormat() returned {}".format(r))

max_size = ffi.new("DWORD*")
r = lib.SGFPM_GetMaxTemplateSize(fpm[0], max_size)
if r != 0:
    raise Exception("SGFPM_GetMaxTemplateSize() returned {}".format(r))

fp_info = ffi.new(
    "SGFingerInfo*", {"FingerNumber": lib.SG_FINGPOS_UK, "ViewNumber": 0, "ImpressionType": lib.SG_IMPTYPE_LP, "ImageQuality": 0}
)
raw_image = ffi.cast('BYTE*', raw.ctypes.data)
t = ffi.new("BYTE[]", max_size[0])
r = lib.SGFPM_CreateTemplate(fpm[0], fp_info, raw_image, t)
if r != 0:
    raise Exception("SGFPM_CreateTemplate() returned {}".format(r))

size = ffi.new("DWORD*")
r = lib.SGFPM_GetTemplateSize(fpm[0], t, size)
if r != 0:
    raise Exception("SGFPM_GetTemplateSize() returned {}".format(r))

data = ffi.buffer(t)[0 : size[0]]
lib.SGFPM_Terminate(fpm[0])

of = open(args.output_file, 'wb')
of.write(data)
of.close()
class NFLOG(object):

    _instance = None

    def __new__(cls):
        if not cls._instance:
            cls._instance = super(NFLOG, cls).__new__(cls)
        return cls._instance

    def __init__(self):
        global _cdef, _clibs_includes, _clibs_link
        self.ffi = FFI()
        self.ffi.cdef(_cdef)
        self.libnflog = self.ffi.verify(_clibs_includes, libraries=list(_clibs_link))
        self.libnflog_cache = dict()
        _cdef = _clibs_includes = _clibs_link = None

    def _ffi_call(self, func, args, no_check=False, check_gt0=False, check_notnull=False):
        """Call libnflog function through cffi,
				checking return value and raising error, if necessary.
			Checks if return is >0 by default."""
        res = func(*args)
        if no_check or (check_gt0 and res > 0) or (check_notnull and res) or res >= 0:
            return res
        errno_ = self.ffi.errno
        raise NFLogError(errno_, os.strerror(errno_))

    def __getattr__(self, k):
        if not (k.startswith("nflog_") or k.startswith("c_")):
            return super(NFLOG, self).__getattr__(k)
        if k.startswith("c_"):
            k = k[2:]
        if k not in self.libnflog_cache:
            func = getattr(self.libnflog, k)
            self.libnflog_cache[k] = lambda *a, **kw: self._ffi_call(func, a, **kw)
        return self.libnflog_cache[k]

    def generator(
        self,
        qids,
        pf=(socket.AF_INET, socket.AF_INET6),
        qthresh=None,
        timeout=None,
        nlbufsiz=None,
        buff_size=None,
        extra_attrs=None,
        handle_overflows=True,
    ):
        """Generator that yields:
				- on first iteration - netlink fd that can be poll'ed
					or integrated into some event loop (twisted, gevent, ...).
					Also, that is the point where uid/gid/caps can be dropped.
				- on all subsequent iterations it does recv() on that fd,
					returning either None (if no packet can be assembled yet)
					or captured packet payload.
			qids: nflog group ids to bind to (nflog_bind_group)
			Keywords:
				pf: address families to pass to nflog_bind_pf
				extra_attrs: metadata to extract from captured packets,
					returned in a list after packet payload, in the same order
				nlbufsiz (bytes): set size of netlink socket buffer for the created queues
				qthresh (packets): set the maximum amount of logs in buffer for each group
				timeout (seconds): set the maximum time to push log buffer for this group
				buff_size (bytes): size of the batch to fetch
					from libnflog to process in python (default: min(nlbufsiz, 1 MiB))
				handle_overflows: supress ENOBUFS NFLogError on
					queue overflows (but do log warnings, default: True)"""

        handle = self.nflog_open(check_notnull=True)

        for pf in pf if not isinstance(pf, int) else [pf]:
            self.nflog_unbind_pf(handle, pf)
            self.nflog_bind_pf(handle, pf)

        if isinstance(extra_attrs, bytes):
            extra_attrs = [extra_attrs]

        cb_results = list()

        @self.ffi.callback("nflog_callback")
        def recv_callback(
            qh,
            nfmsg,
            nfad,
            data,
            extra_attrs=extra_attrs,
            ts_slot=self.ffi.new("struct timeval *"),
            pkt_slot=self.ffi.new("char **"),
            ts_err_mask=frozenset([0, errno.EAGAIN]),
            result=None,
        ):
            try:
                pkt_len = self.nflog_get_payload(nfad, pkt_slot)
                result = self.ffi.buffer(pkt_slot[0], pkt_len)[:]
                if extra_attrs:
                    result = [result]
                    for attr in extra_attrs:
                        if attr == "len":
                            result.append(pkt_len)
                        elif attr == "ts":
                            # Fails quite often (EAGAIN, SUCCESS, ...), not sure why
                            try:
                                self.nflog_get_timestamp(nfad, ts_slot)
                            except NFLogError as err:
                                if err.errno not in ts_err_mask:
                                    raise
                                result.append(None)
                            else:
                                result.append(ts_slot.tv_sec + ts_slot.tv_usec * 1e-6)
                        else:
                            raise NotImplementedError("Unknown nflog attribute: {}".format(attr))
                cb_results.append(result)
            except:
                cb_results.append(StopIteration)  # breaks the generator
                raise
            return 0

        for qid in qids if not isinstance(qids, int) else [qids]:
            qh = self.nflog_bind_group(handle, qid, check_notnull=True)
            self.nflog_set_mode(qh, self.libnflog.NFULNL_COPY_PACKET, 0xFFFF)
            if qthresh:
                self.nflog_set_qthresh(qh, qthresh)
            if timeout:
                self.nflog_set_timeout(qh, int(timeout * 100))
            if nlbufsiz:
                self.nflog_set_nlbufsiz(qh, nlbufsiz)
            self.nflog_callback_register(qh, recv_callback, self.ffi.NULL)

        fd = self.nflog_fd(handle)
        if not buff_size:
            if nlbufsiz:
                buff_size = min(nlbufsiz, 1 * 2 ** 20)
            else:
                buff_size = 1 * 2 ** 20
        buff = self.ffi.new("char[]", buff_size)

        peek = yield fd  # yield fd for poll() on first iteration
        while True:
            if peek:
                peek = yield NFWouldBlock  # poll/recv is required
                continue

                # Receive/process netlink data, which may contain multiple packets
            try:
                nlpkt_size = self.c_recv(fd, buff, buff_size, 0)
            except NFLogError as err:
                if handle_overflows and err.errno == errno.ENOBUFS:
                    log.warn(
                        "nlbufsiz seem"
                        " to be insufficient to hold unprocessed packets,"
                        " consider raising it via corresponding function keyword"
                    )
                    continue
                raise
            self.nflog_handle_packet(handle, buff, nlpkt_size, no_check=True)

            # yield individual L3 packets
            for result in cb_results:
                if result is StopIteration:
                    raise result
                peek = yield result
            cb_results = list()
Example #35
0
	OozMem OozDecompressBundleAlloc(uint8_t const* src_data, size_t src_size);
""")

ooz = ffi.dlopen("oozlib.dll")

cmd = sys.argv[1]
if cmd == 'block':
    filename = sys.argv[2]
    uncompressed_size = int(sys.argv[3])
    with open(filename, 'rb') as f:
        data = f.read()
        unpacked_data = ffi.new("uint8_t[]", uncompressed_size)
        unpacked_size = ooz.OozDecompressBlock(data, len(data), unpacked_data,
                                               uncompressed_size)
        if unpacked_size != uncompressed_size:
            printf("Could not decompress block", file=sys.stderr)
            exit(1)
        sys.stdout.buffer.write(ffi.buffer(unpacked_data))

elif cmd == 'bundle':
    filename = sys.argv[2]
    with open(filename, 'rb') as f:
        data = f.read()
        bundle_mem = ooz.OozDecompressBundleAlloc(data, len(data))
        if bundle_mem:
            size = ooz.OozMemSize(bundle_mem)
            sys.stdout.buffer.write(ffi.buffer(bundle_mem, size))
            ooz.OozMemFree(bundle_mem)
        else:
            print("Could not decompress bundle", file=sys.stderr)
            exit(1)