Ejemplo n.º 1
0
    def make_img(self):
        disk_sz = os.path.getsize(self.disk)
        # although it is append at end, for they are all zero, so it's same
        # TODO produce padding file and disk separately
        super_gap = self.fill_gap(disk_sz)
        print(super_gap, disk_sz)
        print(super_gap + disk_sz)
        self.init_super(super_gap, disk_sz)
        # it is done in __init__
        # self.init_map()
        # self.init_inode_block()
        # init root directory
        # write '.' '..' in block, ie a dir
        inode_o, block_o = self.make_file(FileType.DIR, disk_sz)

        buf1 = bytearray(sizeof(DirEntry))
        current = DirEntry.from_buffer(buf1)
        current.filename = b'.'
        current.inode_off = inode_o + disk_sz
        self.write_to_file(inode_o, block_o, buf1)

        buf2 = bytearray(sizeof(DirEntry))
        father = DirEntry.from_buffer(buf2)
        father.filename = b'..'
        father.inode_off = inode_o + disk_sz
        self.write_to_file(inode_o, block_o + sizeof(current), buf2)

        with open('./hard_disk', 'wb') as img:
            img.write(bytearray(self.buf))
Ejemplo n.º 2
0
    def make_img(self):
        disk_sz = os.path.getsize(self.disk)
        # although it is append at end, for they are all zero, so it's same
        # TODO produce padding file and disk separately
        super_gap = self.fill_gap(disk_sz)
        print(super_gap, disk_sz)
        print(super_gap + disk_sz)
        self.init_super(super_gap, disk_sz)
        # it is done in __init__
        # self.init_map()
        # self.init_inode_block()
        # init root directory
        # write '.' '..' in block, ie a dir
        inode_o, block_o = self.make_file(FileType.DIR, disk_sz)

        buf1 = bytearray(sizeof(DirEntry))
        current = DirEntry.from_buffer(buf1)
        current.filename = b'.'
        current.inode_off = inode_o + disk_sz
        self.write_to_file(inode_o, block_o, buf1)

        buf2 = bytearray(sizeof(DirEntry))
        father = DirEntry.from_buffer(buf2)
        father.filename = b'..'
        father.inode_off = inode_o + disk_sz
        self.write_to_file(inode_o, block_o + sizeof(current), buf2)

        with open('./hard_disk', 'wb') as img:
            img.write(bytearray(self.buf))
Ejemplo n.º 3
0
def inet_ntop(address_family, packed_ip, encoding="UTF-8"):
    addr = sockaddr()
    addr.sa_family = address_family
    addr_size = c_int(sizeof(addr))
    ip_string = create_string_buffer(128)
    ip_string_size = c_int(sizeof(addr))

    if address_family == socket.AF_INET:
        if len(packed_ip) != sizeof(addr.ipv4_addr):
            raise socket.error('packed IP wrong length for inet_ntop')
        memmove(addr.ipv4_addr, packed_ip, 4)
    elif address_family == socket.AF_INET6:
        if len(packed_ip) != sizeof(addr.ipv6_addr):
            raise socket.error('packed IP wrong length for inet_ntop')
        memmove(addr.ipv6_addr, packed_ip, 16)
    else:
        raise socket.error('unknown address family')

    if WSAAddressToStringA(byref(addr),
                           addr_size,
                           None,
                           ip_string,
                           byref(ip_string_size)) != 0:
        raise socket.error(FormatError())

    return (ip_string[:ip_string_size.value - 1]).decode(encoding)
Ejemplo n.º 4
0
    def __call__(self, input, u, v):
        output = zeros_like(input.data)
        events = []
        in_buf, in_evt = buffer_from_ndarray(self.queue, input.data,
                                             blocking=False)
        events.append(in_evt)
        self.kernel.setarg(0, in_buf, sizeof(cl_mem))

        u_buf, u_evt = buffer_from_ndarray(self.queue, u.data, blocking=False)
        events.append(u_evt)
        self.kernel.setarg(1, u_buf, sizeof(cl_mem))

        v_buf, v_evt = buffer_from_ndarray(self.queue, v.data, blocking=False)
        events.append(v_evt)
        self.kernel.setarg(2, v_buf, sizeof(cl_mem))

        out_buf, out_evt = buffer_from_ndarray(self.queue, output,
                                               blocking=False)
        events.append(out_evt)
        self.kernel.setarg(3, out_buf, sizeof(cl_mem))
        clWaitForEvents(*events)
        evt = clEnqueueNDRangeKernel(self.queue, self.kernel, self.global_size)
        evt.wait()
        _, evt = buffer_to_ndarray(self.queue, out_buf, output)
        evt.wait()
        return Array(unique_name(), output)
    def test_set_short_out_of_range(self):
        uc = UserConfiguration()
        with self.assertRaises(ValueError):
            uc._set_short_at(sizeof(UserConfiguration), 1)

        with self.assertRaises(ValueError):
            uc._set_short_at(sizeof(UserConfiguration) - 1, 1)

        with self.assertRaises(ValueError):
            uc._set_short_at(-1, 1)
Ejemplo n.º 6
0
    def test_set_short_out_of_range(self):
        uc = UserConfiguration()
        with self.assertRaises(ValueError):
            uc._set_short_at(sizeof(UserConfiguration), 1)

        with self.assertRaises(ValueError):
            uc._set_short_at(sizeof(UserConfiguration) - 1, 1)

        with self.assertRaises(ValueError):
            uc._set_short_at(-1, 1)
Ejemplo n.º 7
0
    def __call__(self, im):
        output = zeros_like(im.data)
        in_buf, evt = buffer_from_ndarray(self.queue, im.data, blocking=False)
        evt.wait()
        self.kernel.setarg(0, in_buf, sizeof(cl_mem))

        out_buf = clCreateBuffer(self.context, output.nbytes)
        self.kernel.setarg(1, out_buf, sizeof(cl_mem))
        evt = clEnqueueNDRangeKernel(self.queue, self.kernel, self.global_size)
        evt.wait()
        _, evt = buffer_to_ndarray(self.queue, out_buf, output)
        evt.wait()
        del in_buf
        del out_buf
        return Array(unique_name(), output)
Ejemplo n.º 8
0
def create_unicode_buffer(init, size=None):
    """create_unicode_buffer(aString) -> character array
    create_unicode_buffer(anInteger) -> character array
    create_unicode_buffer(aString, anInteger) -> character array
    """
    if isinstance(init, str):
        if size is None:
            if sizeof(c_wchar) == 2:
                # UTF-16 requires a surrogate pair (2 wchar_t) for non-BMP
                # characters (outside [U+0000; U+FFFF] range). +1 for trailing
                # NUL character.
                size = sum(2 if ord(c) > 0xFFFF else 1 for c in init) + 1
            else:
                # 32-bit wchar_t (1 wchar_t per Unicode character). +1 for
                # trailing NUL character.
                size = len(init) + 1
        _sys.audit("ctypes.create_unicode_buffer", init, size)
        buftype = c_wchar * size
        buf = buftype()
        buf.value = init
        return buf
    elif isinstance(init, int):
        _sys.audit("ctypes.create_unicode_buffer", None, init)
        buftype = c_wchar * init
        buf = buftype()
        return buf
    raise TypeError(init)
    def _callback(self, message):
        nlhdr = message.hdr()
        if not nlhdr.valid_hdr():
            raise Exception('Internal error')
        for attr in nlhdr.hdr():
            attr_type = attr.type()

            if attr_type not in (TASKSTATS_TYPE_AGGR_PID, TASKSTATS_TYPE_AGGR_TGID):
                raise Exception('Nested (outer) attr is of invalid type', attr_type)

            for attr in attr.attributes():
                attr_type = attr.type()

                if attr_type == TASKSTATS_TYPE_PID:
                    print 'Dead pid is:', attr.u32
                    continue

                if attr_type == TASKSTATS_TYPE_TGID:
                    print 'Dead tgid is:', attr.u32
                    continue

                if attr_type == TASKSTATS_TYPE_STATS:
                    length = attr.len()
                    size = sizeof(Taskstats_version_1)
                    if length < size:
                        raise ValueError('Not enought data to build structure. Required at least %d, passed %d', size,
                                         length)
                    data = attr.data()
                    #noinspection PyUnresolvedReferences
                    info = Taskstats_version_1.from_address(data)
                    info.dump()
                    continue

                raise Exception('Unknown type in inner attributes', attr_type)
            print '-' * 80
Ejemplo n.º 10
0
    def test_mmap_address(self):
        import mmap
        import _multiprocessing

        # This is a bit faster than importing ctypes
        import _ctypes
        class c_double(_ctypes._SimpleCData):
            _type_ = "d"
        sizeof_double = _ctypes.sizeof(c_double)

        buf = mmap.mmap(-1, 300)
        buf[0:300] = '\0' * 300

        # Get the address of shared memory
        address, length = _multiprocessing.address_of_buffer(buf)
        assert length == 300

        # build a ctypes object from it
        var = c_double.from_address(address)
        assert buf[0:sizeof_double] == '\0' * sizeof_double
        assert var.value == 0

        # check that both objects share the same memory
        var.value = 123.456
        assert buf[0:sizeof_double] != '\0' * sizeof_double
        buf[0:sizeof_double] = '\0' * sizeof_double
        assert var.value == 0
Ejemplo n.º 11
0
    def to_c_mech(self):
        """
        Create the Param structure, then convert the data into byte arrays.

        :return: :class:`~pypkcs11.cryptoki.CK_MECHANISM`

        """
        super(PRFKDFDeriveMechanism, self).to_c_mech()
        params = CK_PRF_KDF_PARAMS()
        params.prfType = self.params['prf_type']
        if self.params['label'] is None:
            label = ''
            label_len = 0
        else:
            label, label_len = to_byte_array(self.params['label'])
        if self.params['context'] is None:
            context = ''
            context_len = 0
        else:
            context, context_len = to_byte_array(self.params['context'])
        if self.params['counter'] is None:
            counter = 1
        else:
            counter = self.params['counter']
        ul_encoding_scheme = self.params['encoding_scheme']

        params.pLabel = cast(label, CK_BYTE_PTR)
        params.ulLabelLen = label_len
        params.pContext = cast(context, CK_BYTE_PTR)
        params.ulContextLen = context_len
        params.ulCounter = counter
        params.ulEncodingScheme = ul_encoding_scheme
        self.mech.pParameter = cast(pointer(params), c_void_p)
        self.mech.ulParameterLen = CK_ULONG(sizeof(params))
        return self.mech
 def _set_short_at(self, offset, value):
     max_offset = sizeof(self) - 2
     if offset < 0 or offset > max_offset:
         raise ValueError
     addr = addressof(self) + offset
     value_string = struct.pack('<H', value)
     memmove(addr, value_string, 2)
Ejemplo n.º 13
0
def _check_size(typ, typecode = None):
    from struct import calcsize
    if typecode is None:
        typecode = typ._type_
    actual, required = sizeof(typ), calcsize(typecode)
    if actual != required:
        raise SystemError('sizeof(%s) wrong: %d instead of %d' % (typ, actual, required))
Ejemplo n.º 14
0
    def _receive_async(self, callback=None, bufsize=Defaults.PACKET_BUFFER_SIZE):
        """
        Receives a diverted packet that matched the filter passed to the handle constructor asynchronously.

        The remapped function is WinDivertRecvEx:

        BOOL WinDivertRecvEx(
            __in HANDLE handle,
            __out PVOID pPacket,
            __in UINT packetLen,
            __in UINT64 flags,
            __out_opt PWINDIVERT_ADDRESS pAddr,
            __out_opt UINT *recvLen,
            __inout_opt LPOVERLAPPED lpOverlapped
        );

        For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_recv_ex
        """
        if not hasattr(self._lib, "WinDivertRecvEx"):
            raise MethodUnsupportedException("Async receive is not supported with this version of WinDivert")

        future = FuturePacket(self._handle, callback=callback, bufsize=bufsize)

        retcode = self._lib.WinDivertRecvEx(self._handle, byref(future.packet), sizeof(future.packet), 0,
                                            byref(future.address),
                                            byref(future.recv_len),
                                            byref(future.overlapped))
        last_error = GetLastError()
        if not retcode and last_error == ErrorCodes.ERROR_IO_PENDING:
            return future.get_result()
        else:
            raise AsyncCallFailedException(
                "Async receive failed with retcode %d and LastError %d" % (retcode, last_error))
Ejemplo n.º 15
0
    def test_mmap_address(self):
        import mmap
        import _multiprocessing

        # This is a bit faster than importing ctypes
        import _ctypes

        class c_double(_ctypes._SimpleCData):
            _type_ = "d"

        sizeof_double = _ctypes.sizeof(c_double)

        buf = mmap.mmap(-1, 300)
        buf[0:300] = '\0' * 300

        # Get the address of shared memory
        address, length = _multiprocessing.address_of_buffer(buf)
        assert length == 300

        # build a ctypes object from it
        var = c_double.from_address(address)
        assert buf[0:sizeof_double] == '\0' * sizeof_double
        assert var.value == 0

        # check that both objects share the same memory
        var.value = 123.456
        assert buf[0:sizeof_double] != '\0' * sizeof_double
        buf[0:sizeof_double] = '\0' * sizeof_double
        assert var.value == 0
Ejemplo n.º 16
0
 def _set_short_at(self, offset, value):
     max_offset = sizeof(self) - 2
     if offset < 0 or offset > max_offset:
         raise ValueError
     addr = addressof(self) + offset
     value_string = struct.pack('<H', value)
     memmove(addr, value_string, 2)
Ejemplo n.º 17
0
def _check_size(typ, typecode=None):
    from struct import calcsize
    if typecode is None:
        typecode = typ._type_
    actual, required = sizeof(typ), calcsize(typecode)
    if actual != required:
        raise SystemError("sizeof(%s) wrong: %d instead of %d" % \
                          (typ, actual, required))
Ejemplo n.º 18
0
 def __init__(self, *args, **kwargs):
     super(OptaaSampleDataParticle, self).__init__(*args, **kwargs)
     # for playback, we want to obtain the elapsed run time prior to generating
     # the particle, so we'll go ahead and parse the header on object creation
     self.header = OptaaSampleHeader.from_string(self.raw_data)
     self.data = struct.unpack_from(
         '>%dH' % (self.header.num_wavelengths * 4), self.raw_data,
         sizeof(self.header))
     self.elapsed = (self.header.time_high << 16) + self.header.time_low
Ejemplo n.º 19
0
def _check_size(typ, typecode=None):
    # Check if sizeof(ctypes_type) against struct.calcsize.  This
    # should protect somewhat against a misconfigured libffi.
    from struct import calcsize
    if typecode is None:
        # Most _type_ codes are the same as used in struct
        typecode = typ._type_
    actual, required = sizeof(typ), calcsize(typecode)
    if actual != required:
        raise SystemError("sizeof(%s) wrong: %d instead of %d" % \
                          (typ, actual, required))
Ejemplo n.º 20
0
def _check_size(typ, typecode=None):
    # Check if sizeof(ctypes_type) against struct.calcsize.  This
    # should protect somewhat against a misconfigured libffi.
    from struct import calcsize
    if typecode is None:
        # Most _type_ codes are the same as used in struct
        typecode = typ._type_
    actual, required = sizeof(typ), calcsize(typecode)
    if actual != required:
        raise SystemError("sizeof(%s) wrong: %d instead of %d" % \
                          (typ, actual, required))
Ejemplo n.º 21
0
 def process_inputs(self, *args):
     events = []
     processed = []
     self.kernel.argtypes = tuple(cl_mem for _ in args)
     for index, arg in enumerate(args):
         if isinstance(arg, types.common.Array):
             arg = arg.data
         buf, evt = buffer_from_ndarray(self.queue, arg, blocking=False)
         processed.append(buf)
         events.append(evt)
         self.kernel.setarg(index, buf, sizeof(cl_mem))
     clWaitForEvents(*events)
     return processed
Ejemplo n.º 22
0
    def to_c_mech(self):
        """
        Create the Param structure, then convert the data into byte arrays.

        :return: :class:`~pypkcs11.cryptoki.CK_MECHANISM`
        """
        super(EcdsaBipDeriveMechanism, self).to_c_mech()
        params = DYCK_DERIVE_ECDSA_BIP_PARAMS()
        params.hardened = CK_BBOOL(self.params['hardened'])
        params.ulChildNumber = CK_ULONG(self.params['ulChildNumber'])
        self.mech.pParameter = cast(pointer(params), c_void_p)
        self.mech.ulParameterLen = CK_ULONG(sizeof(params))
        return self.mech
Ejemplo n.º 23
0
def inet_ntop(address_family, packed_ip, encoding="UTF-8"):
    addr = sockaddr()
    addr.sa_family = address_family
    addr_size = c_int(sizeof(addr))
    ip_string = create_string_buffer(128)
    ip_string_size = c_int(sizeof(addr))

    if address_family == socket.AF_INET:
        if len(packed_ip) != sizeof(addr.ipv4_addr):
            raise socket.error('packed IP wrong length for inet_ntop')
        memmove(addr.ipv4_addr, packed_ip, 4)
    elif address_family == socket.AF_INET6:
        if len(packed_ip) != sizeof(addr.ipv6_addr):
            raise socket.error('packed IP wrong length for inet_ntop')
        memmove(addr.ipv6_addr, packed_ip, 16)
    else:
        raise socket.error('unknown address family')

    if WSAAddressToStringA(byref(addr), addr_size, None, ip_string,
                           byref(ip_string_size)) != 0:
        raise socket.error(FormatError())

    return (ip_string[:ip_string_size.value - 1]).decode(encoding)
Ejemplo n.º 24
0
    def __call__(self, im, num_powers, border):
        out_shape = [num_powers] + list(im.shape)
        output = np.empty(out_shape, dtype=np.float32)

        in_buf, evt = buffer_from_ndarray(self.queue, im.data, blocking=False)
        evt.wait()
        self.kernel.setarg(0, in_buf, sizeof(cl_mem))

        out_buf = clCreateBuffer(self.queue.context, output.nbytes)
        self.kernel.setarg(1, out_buf, sizeof(cl_mem))

        evt = clEnqueueNDRangeKernel(self.queue, self.kernel, self.global_size)
        evt.wait()

        self.kernel2.setarg(0, out_buf, sizeof(cl_mem))

        for power in range(num_powers):
            self.kernel2.setarg(1, power, sizeof(cl_int))
            evt = clEnqueueNDRangeKernel(self.queue, self.kernel2, self.global_size)
            evt.wait()

        _, evt = buffer_to_ndarray(self.queue, out_buf, output)
        evt.wait()
        return Array(unique_name(), output)
Ejemplo n.º 25
0
def inet_pton(address_family, ip_string, encoding="UTF-8"):
    addr = sockaddr()
    addr.sa_family = address_family
    addr_size = c_int(sizeof(addr))

    if WSAStringToAddressA(ip_string.encode(encoding), address_family, None,
                           byref(addr), byref(addr_size)) != 0:
        raise socket.error(FormatError())

    if address_family == socket.AF_INET:
        return string_at(addr.ipv4_addr, 4)
    if address_family == socket.AF_INET6:
        return string_at(addr.ipv6_addr, 16)

    raise socket.error('unknown address family')
Ejemplo n.º 26
0
def inet_pton(address_family, ip_string, encoding="UTF-8"):
    addr = sockaddr()
    addr.sa_family = address_family
    addr_size = c_int(sizeof(addr))

    if WSAStringToAddressA(ip_string.encode(encoding),
                           address_family,
                           None,
                           byref(addr),
                           byref(addr_size)) != 0:
        raise socket.error(FormatError())

    if address_family == socket.AF_INET:
        return string_at(addr.ipv4_addr, 4)
    if address_family == socket.AF_INET6:
        return string_at(addr.ipv6_addr, 16)

    raise socket.error('unknown address family')
Ejemplo n.º 27
0
def trigger_picker(ind,
                   station,
                   channel,
                   trigger_type,
                   use_filter,
                   freqmin,
                   freqmax,
                   init_level,
                   stop_level,
                   sta,
                   lta=0):
    #trigger_index_s = ('%02d' % ind).encode()
    context = zmq.Context()
    socket = context.socket(zmq.SUB)
    socket.connect('tcp://localhost:' + str(Port.proxy.value))
    station_bin = prep_name(station)
    socket.setsockopt(
        zmq.SUBSCRIBE,
        Subscription.intern.value + station_bin + prep_ch(channel))

    #socket_trigger = context.socket(zmq.PUB)
    #socket_trigger.connect('tcp://localhost:' + str(Port.multi.value))

    #data_trigger = None
    #trigger_on = False
    #filter = None
    trigger_wrapper = None
    while True:
        #sleep(.1)
        raw_data = socket.recv()[1:]
        header = ChHeader()
        header_size = sizeof(ChHeader)
        BytesIO(raw_data[:header_size]).readinto(header)
        sampling_rate = header.sampling_rate
        starttime = UTCDateTime(header.ns / 10**9)
        data = np.frombuffer(raw_data[header_size:], dtype='float')
        if not trigger_wrapper:
            trigger_wrapper = TriggerWrapper(context, ind, trigger_type,
                                             sampling_rate, use_filter,
                                             freqmin, freqmax, init_level,
                                             stop_level, sta, lta)
        trigger_wrapper.pick(starttime, data)
Ejemplo n.º 28
0
    def _receive_async(self,
                       callback=None,
                       bufsize=Defaults.PACKET_BUFFER_SIZE):
        """
        Receives a diverted packet that matched the filter passed to the handle constructor asynchronously.

        The remapped function is WinDivertRecvEx:

        BOOL WinDivertRecvEx(
            __in HANDLE handle,
            __out PVOID pPacket,
            __in UINT packetLen,
            __in UINT64 flags,
            __out_opt PWINDIVERT_ADDRESS pAddr,
            __out_opt UINT *recvLen,
            __inout_opt LPOVERLAPPED lpOverlapped
        );

        For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_recv_ex
        """
        if not hasattr(self._lib, "WinDivertRecvEx"):
            raise MethodUnsupportedException(
                "Async receive is not supported with this version of WinDivert"
            )

        future = FuturePacket(self._handle, callback=callback, bufsize=bufsize)

        retcode = self._lib.WinDivertRecvEx(self._handle, byref(future.packet),
                                            sizeof(future.packet), 0,
                                            byref(future.address),
                                            byref(future.recv_len),
                                            byref(future.overlapped))
        last_error = GetLastError()
        if not retcode and last_error == ErrorCodes.ERROR_IO_PENDING:
            return future.get_result()
        else:
            raise AsyncCallFailedException(
                "Async receive failed with retcode %d and LastError %d" %
                (retcode, last_error))
Ejemplo n.º 29
0
    def to_c_mech(self):
        """
        Create the Param structure, then convert the data into byte arrays.

        :return: :class:`~pycryptoki.cryptoki.CK_MECHANISM`
        """
        super(ECDH1DeriveMechanism, self).to_c_mech()
        params = CK_ECDH1_DERIVE_PARAMS()
        params.kdf = self.params['kdf']
        if self.params['sharedData'] is None:
            shared_data = None
            shared_data_len = 0
        else:
            shared_data, shared_data_len = to_byte_array(self.params['sharedData'])
        params.pSharedData = cast(shared_data, CK_BYTE_PTR)
        params.ulSharedDataLen = shared_data_len
        public_data, public_data_len = to_byte_array(self.params['publicData'])
        params.pPublicData = cast(public_data, CK_BYTE_PTR)
        params.ulPublicDataLen = public_data_len
        self.mech.pParameter = cast(pointer(params), c_void_p)
        self.mech.usParameterLen = CK_ULONG(sizeof(params))
        return self.mech
Ejemplo n.º 30
0
    def draw_player(self, glow_index: int, health: Optional[int] = 0):
        glow_offset = glow_index * sizeof(GlowObjectDefinition)
        glow_object_definition = self._rpm(glow_offset, GlowObjectDefinition)
        if health == 100:
            glow_object_definition.r = 17
            glow_object_definition.g = 0
            glow_object_definition.b = 255
            glow_object_definition.a = 1
        elif health >= 50:
            glow_object_definition.r = 72
            glow_object_definition.g = 255
            glow_object_definition.b = 0
            glow_object_definition.a = 1
        else:
            glow_object_definition.r = 255
            glow_object_definition.g = 0
            glow_object_definition.b = 0
            glow_object_definition.a = 1

        glow_object_definition.m_bRenderWhenOccluded = 1
        glow_object_definition.m_bRenderWhenUnoccluded = 0

        self._wpm(glow_offset, bytes(glow_object_definition))
Ejemplo n.º 31
0
    def parse_packet(self, *args):
        """
        Parses a raw packet into a higher level object.
        Args could be a tuple or two different values. In each case the first one is the raw data and the second
        is the meta about the direction and interface to use.

        The function remapped is WinDivertHelperParsePacket:
        Parses a raw packet (e.g. from WinDivertRecv()) into the various packet headers
        and/or payloads that may or may not be present.

        BOOL WinDivertHelperParsePacket(
            __in PVOID pPacket,
            __in UINT packetLen,
            __out_opt PWINDIVERT_IPHDR *ppIpHdr,
            __out_opt PWINDIVERT_IPV6HDR *ppIpv6Hdr,
            __out_opt PWINDIVERT_ICMPHDR *ppIcmpHdr,
            __out_opt PWINDIVERT_ICMPV6HDR *ppIcmpv6Hdr,
            __out_opt PWINDIVERT_TCPHDR *ppTcpHdr,
            __out_opt PWINDIVERT_UDPHDR *ppUdpHdr,
            __out_opt PVOID *ppData,
            __out_opt UINT *pDataLen
        );

        For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_helper_parse_packet
        """
        if len(args) == 1:
            # Maybe this is a poor way to check the type, but it should work
            if hasattr(args[0], "__iter__") and not hasattr(args[0], "strip"):
                raw_packet, meta = args[0]
            else:
                raw_packet, meta = args[0], None
        elif len(args) == 2:
            raw_packet, meta = args[0], args[1]
        else:
            raise ValueError("Wrong number of arguments passed to parse_packet")

        packet_len = len(raw_packet)
        # Consider everything else not part of headers as payload
        # payload = ctypes.c_void_p(0)
        payload_len = c_uint(0)
        ip_hdr, ipv6_hdr = pointer(IpHeader()), pointer(Ipv6Header())
        icmp_hdr, icmpv6_hdr = pointer(IcmpHeader()), pointer(Icmpv6Header())
        tcp_hdr, udp_hdr = pointer(TcpHeader()), pointer(UdpHeader())
        headers = (ip_hdr, ipv6_hdr, icmp_hdr, icmpv6_hdr, tcp_hdr, udp_hdr)

        self._lib.WinDivertHelperParsePacket(
            raw_packet,
            packet_len,
            byref(ip_hdr),
            byref(ipv6_hdr),
            byref(icmp_hdr),
            byref(icmpv6_hdr),
            byref(tcp_hdr),
            byref(udp_hdr),
            None,
            byref(payload_len),
        )
        # headers_len = sum(ctypes.sizeof(hdr.contents) for hdr in headers if hdr)
        # headers_len = sum((getattr(hdr.contents, "HdrLength", 0) * 4) for hdr in headers if hdr)

        # clean headers, consider just those that are not None (!=NULL)
        headers = [hdr.contents for hdr in headers if hdr]

        headers_opts = []
        offset = 0
        for header in headers:
            if hasattr(header, "HdrLength"):
                header_len = getattr(header, "HdrLength", 0) * 4
                opt_len = header_len - sizeof(header)
                if opt_len:
                    opt = raw_packet[offset + header_len - opt_len : offset + header_len]
                    headers_opts.append(opt)
                else:
                    headers_opts.append("")
            else:
                headers_opts.append("")
                header_len = sizeof(header)
            offset += header_len

        return CapturedPacket(
            payload=raw_packet[offset:],
            raw_packet=raw_packet,
            headers=[HeaderWrapper(hdr, opt, self.encoding) for hdr, opt in zip(headers, headers_opts)],
            meta=meta,
            encoding=self.encoding,
        )
Ejemplo n.º 32
0
    def _query_process_info(self, handle, read_peb=True):
        """Gets an extended proc info.

        Parameters
        -----------

        handle: HANDLE
            handle to process for which the info
            should be acquired
        read_peb: boolean
            true in case the process PEB should be read

        """
        pbi_buff = malloc(sizeof(PROCESS_BASIC_INFORMATION))
        status = zw_query_information_process(handle,
                                              PROCESS_BASIC_INFO,
                                              pbi_buff,
                                              sizeof(PROCESS_BASIC_INFORMATION),
                                              byref(ULONG()))

        info = {}

        if status == STATUS_SUCCESS:
            pbi = cast(pbi_buff, POINTER(PROCESS_BASIC_INFORMATION))
            ppid = pbi.contents.inherited_from_unique_process_id
            if read_peb:
                # read the PEB to get the process parameters.
                # Because the PEB structure resides
                # in the address space of another process
                # we must read the memory block in order
                # to access the structure's fields
                peb_addr = pbi.contents.peb_base_address
                peb_buff = read_process_memory(handle, peb_addr, sizeof(PEB))
                if peb_buff:
                    peb = cast(peb_buff, POINTER(PEB))
                    # read the RTL_USER_PROCESS_PARAMETERS struct
                    # which contains the command line and the image
                    # name of the process
                    pp = peb.contents.process_parameters
                    pp_buff = read_process_memory(handle,
                                                  pp,
                                                  sizeof(RTL_USER_PROCESS_PARAMETERS))
                    if pp_buff:
                        pp = cast(pp_buff, POINTER(RTL_USER_PROCESS_PARAMETERS))

                        comm = pp.contents.command_line.buffer
                        comm_len = pp.contents.command_line.length
                        exe = pp.contents.image_path_name.buffer
                        exe_len = pp.contents.image_path_name.length

                        # these memory reads are required
                        # to copy the command line and image name buffers
                        cb = read_process_memory(handle, comm, comm_len)
                        eb = read_process_memory(handle, exe, exe_len)

                        if cb and eb:
                            # cast the buffers to
                            # UNICODE strings
                            comm = cast(cb, c_wchar_p).value
                            exe = cast(eb, c_wchar_p).value

                            # the image name contains the full path
                            # split the string to get the exec name
                            name = exe[exe.rfind('\\') + 1:]
                            info = ddict(name=name,
                                         comm=comm,
                                         parent_pid=ppid)
                            free(cb)
                            free(eb)
                        free(pp_buff)

                    free(peb_buff)
            else:
                # query only the process image file name
                exe = ctypes.create_unicode_buffer(MAX_PATH)
                size = DWORD(MAX_PATH)
                name = None
                status = query_full_process_image_name(handle,
                                                       0,
                                                       exe,
                                                       byref(size))
                if status:
                    exe = exe.value
                    name = exe[exe.rfind('\\') + 1:]
                info = ddict(name=name if name else NA,
                             comm=exe if type(exe) is str else None,
                             parent_pid=ppid)
        if pbi_buff:
            free(pbi_buff)

        return info
Ejemplo n.º 33
0
 def __init__(self, *args, **kwargs):
     super(OptaaSampleDataParticle, self).__init__(*args, **kwargs)
     # for playback, we want to obtain the elapsed run time prior to generating
     # the particle, so we'll go ahead and parse the header on object creation
     self.header = OptaaSampleHeader.from_string(self.raw_data)
     self.data = struct.unpack_from('>%dH' % (self.header.num_wavelengths*4), self.raw_data, sizeof(self.header))
     self.elapsed = (self.header.time_high << 16) + self.header.time_low
Ejemplo n.º 34
0
        self.init_super(super_gap, disk_sz)
        # it is done in __init__
        # self.init_map()
        # self.init_inode_block()
        # init root directory
        # write '.' '..' in block, ie a dir
        inode_o, block_o = self.make_file(FileType.DIR, disk_sz)

        buf1 = bytearray(sizeof(DirEntry))
        current = DirEntry.from_buffer(buf1)
        current.filename = b'.'
        current.inode_off = inode_o + disk_sz
        self.write_to_file(inode_o, block_o, buf1)

        buf2 = bytearray(sizeof(DirEntry))
        father = DirEntry.from_buffer(buf2)
        father.filename = b'..'
        father.inode_off = inode_o + disk_sz
        self.write_to_file(inode_o, block_o + sizeof(current), buf2)

        with open('./hard_disk', 'wb') as img:
            img.write(bytearray(self.buf))


if __name__ == '__main__':
    assert (sizeof(DirEntry) % 32 == 0)
    assert (sizeof(Inode) % 128 == 0)
    #sys.path.extend(['/media/zzt/01CEC27C454731B01/recent/os/ics/os/os-lab1/harddisk'])
    a = MkImg('../disk.img', 2 ** 12, 2 ** 10, 1)
    a.make_img()
Ejemplo n.º 35
0
if os.name == 'nt':
    socket_protocol = socket.IPPROTO_IP
else:
    socket_protocol = socket.IPPROTO_ICMP

sniffer = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket_protocol)
sniffer.bind((host, 0))
sniffer.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)
if os.name == 'nt':
    sniffer.ioctl(socket.SIO_RCVALL, socket.RCVALL_ON)

try:
    while True:
        raw_buffer = sniffer.recvfrom(65565)[0]
        ip_header = IP(raw_buffer[0:20])
        print(
            f'Protocol: {ip_header.protocol} {ip_header.src_address} -> {ip_header.dst_address}'
        )

        if ip_header.protocol == 'ICMP':
            offset = ip_header.ihl * 4
            buf = raw_buffer[offset:offset + sizeof(ICMP)]

            icmp_header = ICMP(buf)

            print(f'ICMP -> Type: {icmp_header.type} Code: {icmp_header.code}')

except KeyboardInterrupt:
    if os.name == 'nt':
        sniffer.ioctl(socket.SIO_RCVALL, socket.RCVALL_OFF)
Ejemplo n.º 36
0
    def _readBG(self, file_name):
        # file_name = 'C:\\Users\\PVGroup\\Desktop\\frgmapper\\Data\\20190913\\test.data'
        # totalCount = len(self.channels['Number']) * self.__countsPerChannel
        # memhandle = ul.win_buf_alloc_64(totalCount)
        # ctypesArray = ctypes.cast(memhandle, ctypes.POINTER(ctypes.c_ulonglong))

        # The size of the UL buffer to create, in seconds
        buffer_size_seconds = 2
        # The number of buffers to write. After this number of UL buffers are
        # written to file, the example will be stopped.
        num_buffers_to_write = 2

        low_chan = 0
        high_chan = 1
        num_chans = high_chan - low_chan + 1

        # Create a circular buffer that can hold buffer_size_seconds worth of
        # data, or at least 10 points (this may need to be adjusted to prevent
        # a buffer overrun)
        points_per_channel = max(self.__rate * buffer_size_seconds, 10)

        # Some hardware requires that the total_count is an integer multiple
        # of the packet size. For this case, calculate a points_per_channel
        # that is equal to or just above the points_per_channel selected
        # which matches that requirement.
        # if ai_props.continuous_requires_packet_size_multiple:
        # 	packet_size = ai_props.packet_size
        # 	remainder = points_per_channel % packet_size
        # 	if remainder != 0:
        # 		points_per_channel += packet_size - remainder

        ul_buffer_count = points_per_channel * num_chans

        # Write the UL buffer to the file num_buffers_to_write times.
        points_to_write = ul_buffer_count * num_buffers_to_write

        # When handling the buffer, we will read 1/10 of the buffer at a time
        write_chunk_size = int(ul_buffer_count / 100)

        if self.useExtClock:
            scan_options = ScanOptions.BACKGROUND | ScanOptions.CONTINUOUS | ScanOptions.SCALEDATA | ScanOptions.EXTCLOCK
        else:
            scan_options = ScanOptions.BACKGROUND | ScanOptions.CONTINUOUS | ScanOptions.SCALEDATA

        memhandle = ul.scaled_win_buf_alloc(ul_buffer_count)

        # Allocate an array of doubles temporary storage of the data
        write_chunk_array = (c_double * write_chunk_size)()

        # Check if the buffer was successfully allocated
        if not memhandle:
            print("Failed to allocate memory.")
            return

        try:
            # Start the scan
            ul.daq_in_scan(board_num=self.board_num,
                           chan_list=self.channels['Number'],
                           chan_type_list=self.channels['Type'],
                           gain_list=self.channels['Gain'],
                           chan_count=len(self.channels['Number']),
                           rate=self.__rate,
                           pretrig_count=0,
                           total_count=ul_buffer_count,
                           memhandle=memhandle,
                           options=scan_options)

            status = Status.IDLE
            # Wait for the scan to start fully
            while (status == Status.IDLE):
                status, _, _ = ul.get_status(board_num,
                                             FunctionType.DAQIFUNCTION)

            # Create a file for storing the data
            with open(file_name, 'w') as f:
                # print('Writing data to ' + file_name, end='')

                # Write a header to the file
                # for chan_num in range(low_chan, high_chan + 1):
                # 	f.write('Channel ' + str(chan_num) + ',')
                # f.write(u'\n')

                # Start the write loop
                prev_count = 0
                prev_index = 0
                write_ch_num = low_chan
                keepReading = True
                while status != Status.IDLE and keepReading:
                    # Get the latest counts
                    status, curr_count, _ = ul.get_status(
                        board_num, FunctionType.DAQIFUNCTION)

                    new_data_count = curr_count - prev_count

                    # Check for a buffer overrun before copying the data, so
                    # that no attempts are made to copy more than a full buffer
                    # of data
                    if new_data_count > ul_buffer_count:
                        # Print an error and stop writing
                        ul.stop_background(board_num,
                                           FunctionType.DAQIFUNCTION)
                        print("A buffer overrun occurred")
                        break

                    # Check if a chunk is available
                    if new_data_count > write_chunk_size:
                        wrote_chunk = True
                        # Copy the current data to a new array

                        # Check if the data wraps around the end of the UL
                        # buffer. Multiple copy operations will be required.
                        if prev_index + write_chunk_size > ul_buffer_count - 1:
                            first_chunk_size = ul_buffer_count - prev_index
                            second_chunk_size = (write_chunk_size -
                                                 first_chunk_size)

                            # Copy the first chunk of data to the
                            # write_chunk_array
                            ul.scaled_win_buf_to_array(memhandle,
                                                       write_chunk_array,
                                                       prev_index,
                                                       first_chunk_size)

                            # Create a pointer to the location in
                            # write_chunk_array where we want to copy the
                            # remaining data
                            second_chunk_pointer = cast(
                                addressof(write_chunk_array) +
                                first_chunk_size * sizeof(c_double),
                                POINTER(c_double))

                            # Copy the second chunk of data to the
                            # write_chunk_array
                            ul.scaled_win_buf_to_array(memhandle,
                                                       second_chunk_pointer, 0,
                                                       second_chunk_size)
                        else:
                            # Copy the data to the write_chunk_array
                            ul.scaled_win_buf_to_array(memhandle,
                                                       write_chunk_array,
                                                       prev_index,
                                                       write_chunk_size)

                        # Check for a buffer overrun just after copying the data
                        # from the UL buffer. This will ensure that the data was
                        # not overwritten in the UL buffer before the copy was
                        # completed. This should be done before writing to the
                        # file, so that corrupt data does not end up in it.
                        status, curr_count, _ = ul.get_status(
                            board_num, FunctionType.DAQIFUNCTION)
                        if curr_count - prev_count > ul_buffer_count:
                            # Print an error and stop writing
                            ul.stop_background(board_num,
                                               FunctionType.DAQIFUNCTION)
                            print("A buffer overrun occurred")
                            break

                        for i in range(write_chunk_size):
                            f.write(str(write_chunk_array[i]))
                            write_ch_num += 1
                            if write_ch_num == high_chan + 1:
                                write_ch_num = low_chan
                                f.write(u'\n')
                            else:
                                f.write(',')
                    else:
                        wrote_chunk = False

                    if wrote_chunk:
                        # Increment prev_count by the chunk size
                        prev_count += write_chunk_size
                        # Increment prev_index by the chunk size
                        prev_index += write_chunk_size
                        # Wrap prev_index to the size of the UL buffer
                        prev_index %= ul_buffer_count
                        if not self.acquiringBG:  #make sure to wait until after writing to check if we should stop to avoid truncation
                            keepReading = False
                        # if prev_count >= points_to_write:
                        # 	break
                        # f.write('-----\n')
                        # print('.', end='')
                    else:
                        # Wait a short amount of time for more data to be
                        # acquired.
                        time.sleep(0.01)

            ul.stop_background(board_num, FunctionType.DAQIFUNCTION)
        except ULError as e:
            pass
        finally:
            # print('Done')

            # Free the buffer in a finally block to prevent errors from causing
            # a memory leak.
            ul.win_buf_free(memhandle)
Ejemplo n.º 37
0
    def parse_file(self):

        position = 0
        metadata_generated = False

        packet_id_bytes = self._stream_handle.read(MARKER_SIZE)  # read the first four bytes of the file

        while packet_id_bytes:  # will be None when EOF is found

            if packet_id_bytes == START_MARKER:  # we found the marker
                length_bytes = self._stream_handle.read(2)
                packet_length = struct.unpack('>H', length_bytes)[0]

                self._stream_handle.seek(position)  # reset to beginning of packet
                # read entire packet
                packet_buffer = self._stream_handle.read(packet_length + SIZE_CHECKSUM + SIZE_PAD)

                # first check that the packet passes the checksum
                expected_checksum = struct.unpack_from('>H', packet_buffer, packet_length)[0]

                actual_checksum = sum(bytearray(packet_buffer[:-(SIZE_CHECKSUM + SIZE_PAD)])) & 0xFFFF

                if actual_checksum == expected_checksum:

                    # unpack the header part of the packet using BigEndianStructure utility
                    packet_header = OptaaSampleHeader.from_string(packet_buffer)
                    # unpack the rest of the packet data now that we have num_wavelengths
                    packet_data = struct.unpack_from('>%dH' % (packet_header.num_wavelengths*4),
                                                     packet_buffer, sizeof(packet_header))

                    cref = packet_data[::4]  # slice the data up using a step of 4
                    aref = packet_data[1::4]
                    csig = packet_data[2::4]
                    asig = packet_data[3::4]

                    # Extract the number of milliseconds since power-up.
                    elapsed_milli = (packet_header.time_high << 16) + packet_header.time_low
                    time_since_power_up = elapsed_milli / 1000.0

                    if not metadata_generated:  # generate 1 metadata particle per file

                        serial_number = (packet_header.serial_number_high << 16) + packet_header.serial_number_low
                        # package up the metadata parameters for the particle to decode
                        metadata = {
                            Keys.START_DATE: self.start_date,
                            Keys.PACKET_TYPE: packet_header.packet_type,
                            Keys.METER_TYPE: packet_header.meter_type,
                            Keys.SERIAL_NUMBER: serial_number
                        }

                        # create the metadata particle
                        meta_particle = self._extract_sample(self.metadata_particle_class,
                                                             None, metadata, internal_timestamp=self.ntp_time)
                        self._record_buffer.append(meta_particle)

                        # Adjust the ntp_time at power-up by
                        # the time since power-up of the first record.
                        self.ntp_time -= time_since_power_up

                        metadata_generated = True

                    # package up the instrument data parameters for the particle to decode
                    instrument_data = {Keys.A_REFERENCE_DARK_COUNTS: packet_header.a_reference_dark_counts,
                                       Keys.PRESSURE_COUNTS: packet_header.pressure_counts,
                                       Keys.A_SIGNAL_DARK_COUNTS: packet_header.a_signal_dark_counts,
                                       Keys.EXTERNAL_TEMP_RAW: packet_header.external_temp_raw,
                                       Keys.INTERNAL_TEMP_RAW: packet_header.internal_temp_raw,
                                       Keys.C_REFERENCE_DARK_COUNTS: packet_header.c_reference_dark_counts,
                                       Keys.C_SIGNAL_DARK_COUNTS: packet_header.c_signal_dark_counts,
                                       Keys.ELAPSED_RUN_TIME: elapsed_milli,
                                       Keys.NUM_WAVELENGTHS: packet_header.num_wavelengths,
                                       Keys.C_REFERENCE_COUNTS: cref,
                                       Keys.A_REFERENCE_COUNTS: aref,
                                       Keys.C_SIGNAL_COUNTS: csig,
                                       Keys.A_SIGNAL_COUNTS: asig
                                       }

                    # create the instrument particle
                    data_particle = self._extract_sample(self.instrument_particle_class,
                                                         None, instrument_data, internal_timestamp=self.ntp_time + time_since_power_up)
                    self._record_buffer.append(data_particle)

                else:  # bad checksum
                    self._exception_callback(RecoverableSampleException(
                        'Checksum error.  Actual %d vs Expected %d' %
                        (actual_checksum, expected_checksum)))

            else:  # packet_id did not match, unexpected data

                self._exception_callback(UnexpectedDataException(
                    'Invalid OPTAA Packet ID found, checking next 4 bytes'))

            position = self._stream_handle.tell()  # set the new file position
            packet_id_bytes = self._stream_handle.read(MARKER_SIZE)  # read the next two bytes of the file
Ejemplo n.º 38
0
    def _query_process_info(self, handle, read_peb=True):
        """Gets an extended proc info.

        Parameters
        -----------

        handle: HANDLE
            handle to process for which the info
            should be acquired
        read_peb: boolean
            true in case the process PEB should be read

        """
        pbi_buff = malloc(sizeof(PROCESS_BASIC_INFORMATION))
        status = zw_query_information_process(
            handle, PROCESS_BASIC_INFO, pbi_buff,
            sizeof(PROCESS_BASIC_INFORMATION), byref(ULONG()))

        info = {}

        if status == STATUS_SUCCESS:
            pbi = cast(pbi_buff, POINTER(PROCESS_BASIC_INFORMATION))
            ppid = pbi.contents.inherited_from_unique_process_id
            if read_peb:
                # read the PEB to get the process parameters.
                # Because the PEB structure resides
                # in the address space of another process
                # we must read the memory block in order
                # to access the structure's fields
                peb_addr = pbi.contents.peb_base_address
                peb_buff = read_process_memory(handle, peb_addr, sizeof(PEB))
                if peb_buff:
                    peb = cast(peb_buff, POINTER(PEB))
                    # read the RTL_USER_PROCESS_PARAMETERS struct
                    # which contains the command line and the image
                    # name of the process
                    pp = peb.contents.process_parameters
                    pp_buff = read_process_memory(
                        handle, pp, sizeof(RTL_USER_PROCESS_PARAMETERS))
                    if pp_buff:
                        pp = cast(pp_buff,
                                  POINTER(RTL_USER_PROCESS_PARAMETERS))

                        comm = pp.contents.command_line.buffer
                        comm_len = pp.contents.command_line.length
                        exe = pp.contents.image_path_name.buffer
                        exe_len = pp.contents.image_path_name.length

                        # these memory reads are required
                        # to copy the command line and image name buffers
                        cb = read_process_memory(handle, comm, comm_len)
                        eb = read_process_memory(handle, exe, exe_len)

                        if cb and eb:
                            # cast the buffers to
                            # UNICODE strings
                            comm = cast(cb, c_wchar_p).value
                            exe = cast(eb, c_wchar_p).value

                            # the image name contains the full path
                            # split the string to get the exec name
                            name = exe[exe.rfind('\\') + 1:]
                            info = ddict(name=name, comm=comm, parent_pid=ppid)
                            free(cb)
                            free(eb)
                        free(pp_buff)

                    free(peb_buff)
            else:
                # query only the process image file name
                exe = ctypes.create_unicode_buffer(MAX_PATH)
                size = DWORD(MAX_PATH)
                name = None
                status = query_full_process_image_name(handle, 0, exe,
                                                       byref(size))
                if status:
                    exe = exe.value
                    name = exe[exe.rfind('\\') + 1:]
                info = ddict(name=name if name else NA,
                             comm=exe,
                             parent_pid=ppid)
        if pbi_buff:
            free(pbi_buff)

        return info
Ejemplo n.º 39
0
def resend(conn_str, rules, pem, pet):
    context = zmq.Context()

    socket_sub = context.socket(zmq.SUB)
    conn_str_sub = 'tcp://localhost:' + str(Port.proxy.value)
    socket_sub.connect(conn_str_sub)
    socket_sub.setsockopt(zmq.SUBSCRIBE, Subscription.parameters.value)
    socket_sub.setsockopt(zmq.SUBSCRIBE, Subscription.signal.value)

    socket_confirm = context.socket(zmq.PUB)
    socket_confirm.connect('tcp://localhost:' + str(Port.multi.value))

    str_parts = conn_str.split(':')[-2:]
    host = ''
    port = int(str_parts[-1])
    print('port:', port)
    logger.debug('create stream_server, host:%s, port %d' % (host, port))
    stream_server = None

    socket_rule = context.socket(zmq.SUB)
    socket_rule.connect(conn_str_sub)
    socket_rule.setsockopt(zmq.SUBSCRIBE, Subscription.test.value + b'03')
    for rule_index in rules:
        rule_index_s = '%02d' % rule_index
        socket_rule.setsockopt(zmq.SUBSCRIBE,
                               Subscription.rule.value + rule_index_s.encode())

    trigger = 0
    buf = []
    pet_time = UTCDateTime(0)
    while True:
        #logger.debug('resender loop')
        try:
            bin_data = socket_rule.recv(zmq.NOBLOCK)
            test = bin_data[:1] == Subscription.test.value
            if test:
                logger.debug('test rule event')
                trigger_data = b'0'
                if buf:
                    trigger_time, _ = buf[-1]
                else:
                    trigger_time = None
            else:
                logger.debug('rule event')
                trigger_data = bin_data[3:4]
                trigger_time = UTCDateTime(
                    int.from_bytes(bin_data[-8:], byteorder='big') / 10**9)
            if test:
                if trigger == 0:
                    logger.info('detrigger test event')
                    if trigger_time:
                        pet_time = trigger_time + pet
                    else:
                        pet_time = None
            else:
                logger.debug('trigger_data:' + str(trigger_data))
                if trigger_data == b'1':
                    trigger += 1
                    if buf:
                        logger.info('buf item dt:' + str(buf[0][0]))
                else:
                    logger.debug('inner detriggering')
                    if trigger > 0:
                        logger.debug('decrement trigger counter')
                        trigger -= 1
                    else:
                        logger.warning('unexpected detriggering')
            if trigger == 1 and not test:
                logger.info('triggered\ntrigger time:' + str(trigger_time) +
                            '\npem time:' + str(trigger_time - pem) +
                            '\ntrigger:' + str(bin_data[1:3]))
            if trigger == 0:
                logger.info('detriggered , test:' + str(test) +
                            '\ndetrigger time:' + str(trigger_time) +
                            '\npet time:' + str(trigger_time + pet) +
                            '\ntrigger:' + str(bin_data[1:3]))
            if not buf:
                logger.warning('buf is empty')
            logger.debug('trigger:' + str(trigger))
        except zmq.ZMQError:
            pass

        socket_confirm.send(Subscription.confirm.value + b'1')
        if not socket_sub.poll(3000):
            logger.info('no signal or params data')
            continue
        raw_data = socket_sub.recv()
        #print('raw_data recvd:' + str(raw_data))
        subscription = raw_data[0]
        if subscription != 1:
            logger.debug('subscription:' + str(raw_data[0]))
        if raw_data[:1] == Subscription.parameters.value:
            logger.debug('parameters received in resender')
            #exit(1)
            init_packet = json.loads(raw_data[1:].decode())
            if stream_server is None:
                stream_server = NJSP_STREAMSERVER((host, port), init_packet)
            stream_name = list(init_packet['parameters']['streams'].keys())[0]
            if stream_name not in \
                    stream_server.init_packet['parameters']['streams']:
                stream_server.init_packet['parameters']['streams'][stream_name] = \
                    init_packet['parameters']['streams'][stream_name]
                stream_server.load_init_packet(stream_server.init_packet)
                stream_server.init_data = stream_server.NJSP_PROTOCOL_IDENTIFIER + \
                                          stream_server.encode_hdr_and_json(stream_server.init_packet)
            continue
        resent_data = raw_data[1:]
        custom_header = CustomHeader()
        header_size = sizeof(CustomHeader)
        BytesIO(resent_data[:header_size]).readinto(custom_header)
        #memmove(addressof(custom_header), resent_data[:header_size], header_size)
        # logger.debug('custom header received:' + str(custom_header))
        dt = UTCDateTime(custom_header.ns / 10**9)
        #logger.debug('dt:' + str(dt))
        # logger.debug('wait binary data')
        bdata = resent_data[header_size:]
        json_data = json.loads(bdata.decode())
        streams = json_data['streams']
        stream_name = list(streams.keys())[0]
        data_dic = streams[stream_name]['samples']
        for ch in data_dic:
            ch_data = data_dic[ch]
            data_dic[ch] = base64.decodebytes(ch_data.encode())
        # logger.debug('binary data received')
        #logger.debug('dt:' + str(UTCDateTime(dt)) + ' bdata len:' + str(len(bdata)))
        if not pet_time or trigger:
            #logger.debug('pet time is None')
            pet_time = dt + pet
        if dt < pet_time or trigger:
            #logger.debug('dt:' + str(dt) + ', pet time:' + str(pet_time) + ', trigger:' + str(trigger))
            # if buf:
            #     logger.debug('clear buf, trigger:' + str(trigger))
            while buf:
                logger.debug('send data to output from buf, dt:' +
                             str(buf[0][0]))
                if stream_server:
                    logger.debug('broadcast_data...')
                    stream_server.broadcast_data(buf[0][1])
                else:
                    logger.debug('stream_server:' + str(stream_server))
                #stream_server.send(buf[0][1])
                # logger.debug('buf item dt:' + str(buf[0][0]))
                buf = buf[1:]
            logger.debug('send regular data, dt' + str(dt))
            #logger.debug('send data to output')
            if stream_server:
                logger.debug('broadcast_data...')
                stream_server.broadcast_data(json_data)
            else:
                logger.debug('stream_server:' + str(stream_server))
        else:
            #logger.debug('append to buf with dt:' + str(dt))
            buf.append((dt, json_data))
        if buf:
            #logger.debug('buf start:' + str(buf[0][0]))
            dt_begin = buf[0][0]
            while dt_begin < dt - pem and buf[3:]:
                # logger.debug('delete from buf with dt:' + str(buf[0][0]) + '\ncurrent pem:' +
                #              str(dt-pem) + '\ncurrent buf:' + str(buf[0][0]) + '-' + str(buf[-1][0]))
                buf = buf[1:]
                dt_begin = buf[0][0]
Ejemplo n.º 40
0
    def parse_file(self):

        position = 0
        metadata_generated = False

        packet_id_bytes = self._stream_handle.read(
            MARKER_SIZE)  # read the first four bytes of the file

        while packet_id_bytes:  # will be None when EOF is found

            if packet_id_bytes == START_MARKER:  # we found the marker
                length_bytes = self._stream_handle.read(2)
                packet_length = struct.unpack('>H', length_bytes)[0]

                self._stream_handle.seek(
                    position)  # reset to beginning of packet
                # read entire packet
                packet_buffer = self._stream_handle.read(packet_length +
                                                         SIZE_CHECKSUM +
                                                         SIZE_PAD)

                # first check that the packet passes the checksum
                expected_checksum = struct.unpack_from('>H', packet_buffer,
                                                       packet_length)[0]

                actual_checksum = sum(
                    bytearray(
                        packet_buffer[:-(SIZE_CHECKSUM + SIZE_PAD)])) & 0xFFFF

                if actual_checksum == expected_checksum:

                    # unpack the header part of the packet using BigEndianStructure utility
                    packet_header = OptaaSampleHeader.from_string(
                        packet_buffer)
                    # unpack the rest of the packet data now that we have num_wavelengths
                    packet_data = struct.unpack_from(
                        '>%dH' % (packet_header.num_wavelengths * 4),
                        packet_buffer, sizeof(packet_header))

                    cref = packet_data[::
                                       4]  # slice the data up using a step of 4
                    aref = packet_data[1::4]
                    csig = packet_data[2::4]
                    asig = packet_data[3::4]

                    # Extract the number of milliseconds since power-up.
                    elapsed_milli = (
                        packet_header.time_high << 16) + packet_header.time_low
                    time_since_power_up = elapsed_milli / 1000.0

                    if not metadata_generated:  # generate 1 metadata particle per file

                        serial_number = (packet_header.serial_number_high <<
                                         16) + packet_header.serial_number_low
                        # package up the metadata parameters for the particle to decode
                        metadata = {
                            Keys.START_DATE: self.start_date,
                            Keys.PACKET_TYPE: packet_header.packet_type,
                            Keys.METER_TYPE: packet_header.meter_type,
                            Keys.SERIAL_NUMBER: serial_number
                        }

                        # create the metadata particle
                        meta_particle = self._extract_sample(
                            self.metadata_particle_class, None, metadata,
                            self.ntp_time)
                        self._record_buffer.append(meta_particle)

                        # Adjust the ntp_time at power-up by
                        # the time since power-up of the first record.
                        self.ntp_time -= time_since_power_up

                        metadata_generated = True

                    # package up the instrument data parameters for the particle to decode
                    instrument_data = {
                        Keys.A_REFERENCE_DARK_COUNTS:
                        packet_header.a_reference_dark_counts,
                        Keys.PRESSURE_COUNTS: packet_header.pressure_counts,
                        Keys.A_SIGNAL_DARK_COUNTS:
                        packet_header.a_signal_dark_counts,
                        Keys.EXTERNAL_TEMP_RAW:
                        packet_header.external_temp_raw,
                        Keys.INTERNAL_TEMP_RAW:
                        packet_header.internal_temp_raw,
                        Keys.C_REFERENCE_DARK_COUNTS:
                        packet_header.c_reference_dark_counts,
                        Keys.C_SIGNAL_DARK_COUNTS:
                        packet_header.c_signal_dark_counts,
                        Keys.ELAPSED_RUN_TIME: elapsed_milli,
                        Keys.NUM_WAVELENGTHS: packet_header.num_wavelengths,
                        Keys.C_REFERENCE_COUNTS: cref,
                        Keys.A_REFERENCE_COUNTS: aref,
                        Keys.C_SIGNAL_COUNTS: csig,
                        Keys.A_SIGNAL_COUNTS: asig
                    }

                    # create the instrument particle
                    data_particle = self._extract_sample(
                        self.instrument_particle_class, None, instrument_data,
                        self.ntp_time + time_since_power_up)
                    self._record_buffer.append(data_particle)

                else:  # bad checksum
                    self._exception_callback(
                        RecoverableSampleException(
                            'Checksum error.  Actual %d vs Expected %d' %
                            (actual_checksum, expected_checksum)))

            else:  # packet_id did not match, unexpected data

                self._exception_callback(
                    UnexpectedDataException(
                        'Invalid OPTAA Packet ID found, checking next 4 bytes')
                )

            position = self._stream_handle.tell()  # set the new file position
            packet_id_bytes = self._stream_handle.read(
                MARKER_SIZE)  # read the next two bytes of the file
Ejemplo n.º 41
0
    def start_scan(self):
        # Set filename
        self.file_name = window.rec_settings.FolderLabel.text() + '/' + window.rec_settings.NamePrefixLabel.text() + \
            datetime.datetime.now().strftime("_%Y_%m_%d_%H%M%S") + \
            '.wav'

        try:
            # Start the scan
            ul.a_in_scan(
                self.board_num, self.low_chan, self.high_chan, self.ul_buffer_count,
                self.rate, self.ai_range, self.memhandle, self.scan_options)

            self.status = Status.IDLE
            # Wait for the scan to start fully
            while(self.status == Status.IDLE):
                self.status, _, _ = ul.get_status(
                    self.board_num, FunctionType.AIFUNCTION)

            # Create a file for storing the data
            # PYSOUNDFILE MODULE
            temp_file = SoundFile(self.file_name, 'w+', self.rate, 1, 'PCM_16')
            # with SoundFile(self.file_name, 'w', self.rate, 1, 'PCM_16') as f:
            #     print('abro', self.file_name)
            # WAVE MODULE
            # with wave.open('wavemod' + self.file_name, 'w') as f:
            #     f.setnchannels(1)
            #     f.setsampwidth(2)
            #     f.setframerate(self.rate)

            # Start the write loop
            prev_count = 0
            prev_index = 0
            write_ch_num = self.low_chan

            while self.status != Status.IDLE:
                # Get the latest counts
                self.status, curr_count, _ = ul.get_status(
                    self.board_num, FunctionType.AIFUNCTION)

                new_data_count = curr_count - prev_count

                # Check for a buffer overrun before copying the data, so
                # that no attempts are made to copy more than a full buffer
                # of data
                if new_data_count > self.ul_buffer_count:
                    # Print an error and stop writing
                    # QtGui.QMessageBox.information(self, "Error", "A buffer overrun occurred")
                    ul.stop_background(self.board_num, FunctionType.AIFUNCTION)
                    print("A buffer overrun occurred")  # cambiar por critical message
                    break  # VER COMO REEMPLAZAR

                # Check if a chunk is available
                if new_data_count > self.write_chunk_size:
                    self.wrote_chunk = True
                    # Copy the current data to a new array

                    # Check if the data wraps around the end of the UL
                    # buffer. Multiple copy operations will be required.
                    if prev_index + self.write_chunk_size > self.ul_buffer_count - 1:
                        first_chunk_size = self.ul_buffer_count - prev_index
                        second_chunk_size = (
                            self.write_chunk_size - first_chunk_size)

                        # Copy the first chunk of data to the write_chunk_array
                        ul.win_buf_to_array(
                            self.memhandle, self.write_chunk_array, prev_index,
                            first_chunk_size)

                        # Create a pointer to the location in
                        # write_chunk_array where we want to copy the
                        # remaining data
                        second_chunk_pointer = cast(
                            addressof(self.write_chunk_array) + first_chunk_size
                            * sizeof(c_ushort), POINTER(c_ushort))

                        # Copy the second chunk of data to the
                        # write_chunk_array
                        ul.win_buf_to_array(
                            self.memhandle, second_chunk_pointer,
                            0, second_chunk_size)
                    else:
                        # Copy the data to the write_chunk_array
                        ul.win_buf_to_array(
                            self.memhandle, self.write_chunk_array, prev_index,
                            self.write_chunk_size)

                    # Check for a buffer overrun just after copying the data
                    # from the UL buffer. This will ensure that the data was
                    # not overwritten in the UL buffer before the copy was
                    # completed. This should be done before writing to the
                    # file, so that corrupt data does not end up in it.
                    self.status, curr_count, _ = ul.get_status(
                        self.board_num, FunctionType.AIFUNCTION)
                    # Opcion 1: original ( valores altos )
                    if curr_count - prev_count > self.ul_buffer_count:
                        # Print an error and stop writing
                        ul.stop_background(self.board_num, FunctionType.AIFUNCTION)
                        print("BUFFER OVERRUN")
                        QtGui.QMessageBox.critical(self, "Warning", "A buffer overrun occurred")
                        break
                        # VER COMO HACER PARA EVITAR QUE CIERRE EL PROGRAMA:

                    for i in range(self.write_chunk_size):

                        # opcion 1
                        self.chunk_ls.append(self.write_chunk_array[i]-32768)

                    # opcion 4
                    self.chunk_np = np.asarray(self.chunk_ls, dtype=np.int16)
                    # resampled_chunk = samplerate.resample(self.chunk_np, 44100. /
                    #                                       float(self.rate), 'sinc_best')
                    # resampled_chunk = resampy.resample(self.chunk_np, self.rate, 44100)

                    temp_file.write(self.chunk_np)
                    # self.chunk_signal.emit(self.chunk_ls)
                    # self.file_ls.extend(self.chunk_ls)
                    self.chunk_ls = []


                else:
                    self.wrote_chunk = False

                if self.wrote_chunk:
                    self.chunk_signal.emit(self.chunk_np)
                    # Increment prev_count by the chunk size
                    prev_count += self.write_chunk_size
                    # Increment prev_index by the chunk size
                    prev_index += self.write_chunk_size
                    # Wrap prev_index to the size of the UL buffer
                    prev_index %= self.ul_buffer_count

                    if prev_count % self.points_to_write == 0:
                        # self.file_signal.emit(self.file_np)
                        # self.write_wav_file(self.file_ls
                        temp_file.close()
                        self.file_name = window.rec_settings.FolderLabel.text() + '/' + window.rec_settings.NamePrefixLabel.text() + \
                            datetime.datetime.now().strftime("_%Y_%m_%d_%H%M%S") + \
                            '.wav'
                        temp_file = SoundFile(self.file_name, 'w', self.rate, 1, 'PCM_16')
                else:
                    # Wait a short amount of time for more data to be
                    # acquired.
                    time.sleep(0.1)
        except ULError as e:
            print('except')
            # QtGui.QMessageBox.critical(window, 'Error', 'Please restart program')
            self.print_ul_error(e)  # VER FUNCION Y ADAPATAR A PYQT
        finally:
            # Free the buffer in a finally block to prevent errors from causing
            # a memory leak.
            temp_file.close()
            ul.stop_background(self.board_num, FunctionType.AIFUNCTION)
            ul.win_buf_free(self.memhandle)
            self.finished_signal.emit()
Ejemplo n.º 42
0
    def parse_packet(self, *args):
        """
        Parses a raw packet into a higher level object.
        Args could be a tuple or two different values. In each case the first one is the raw data and the second
        is the meta about the direction and interface to use.

        The function remapped is WinDivertHelperParsePacket:
        Parses a raw packet (e.g. from WinDivertRecv()) into the various packet headers
        and/or payloads that may or may not be present.

        BOOL WinDivertHelperParsePacket(
            __in PVOID pPacket,
            __in UINT packetLen,
            __out_opt PWINDIVERT_IPHDR *ppIpHdr,
            __out_opt PWINDIVERT_IPV6HDR *ppIpv6Hdr,
            __out_opt PWINDIVERT_ICMPHDR *ppIcmpHdr,
            __out_opt PWINDIVERT_ICMPV6HDR *ppIcmpv6Hdr,
            __out_opt PWINDIVERT_TCPHDR *ppTcpHdr,
            __out_opt PWINDIVERT_UDPHDR *ppUdpHdr,
            __out_opt PVOID *ppData,
            __out_opt UINT *pDataLen
        );

        For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_helper_parse_packet
        """
        if len(args) == 1:
            #Maybe this is a poor way to check the type, but it should work
            if hasattr(args[0], "__iter__") and not hasattr(args[0], "strip"):
                raw_packet, meta = args[0]
            else:
                raw_packet, meta = args[0], None
        elif len(args) == 2:
            raw_packet, meta = args[0], args[1]
        else:
            raise ValueError(
                "Wrong number of arguments passed to parse_packet")

        packet_len = len(raw_packet)
        # Consider everything else not part of headers as payload
        # payload = ctypes.c_void_p(0)
        payload_len = c_uint(0)
        ip_hdr, ipv6_hdr = pointer(IpHeader()), pointer(Ipv6Header())
        icmp_hdr, icmpv6_hdr = pointer(IcmpHeader()), pointer(Icmpv6Header())
        tcp_hdr, udp_hdr = pointer(TcpHeader()), pointer(UdpHeader())
        headers = (ip_hdr, ipv6_hdr, icmp_hdr, icmpv6_hdr, tcp_hdr, udp_hdr)

        self._lib.WinDivertHelperParsePacket(raw_packet, packet_len,
                                             byref(ip_hdr), byref(ipv6_hdr),
                                             byref(icmp_hdr),
                                             byref(icmpv6_hdr), byref(tcp_hdr),
                                             byref(udp_hdr), None,
                                             byref(payload_len))
        #headers_len = sum(ctypes.sizeof(hdr.contents) for hdr in headers if hdr)
        #headers_len = sum((getattr(hdr.contents, "HdrLength", 0) * 4) for hdr in headers if hdr)

        # clean headers, consider just those that are not None (!=NULL)
        headers = [hdr.contents for hdr in headers if hdr]

        headers_opts = []
        offset = 0
        for header in headers:
            if hasattr(header, "HdrLength"):
                header_len = getattr(header, "HdrLength", 0) * 4
                opt_len = header_len - sizeof(header)
                if opt_len:
                    opt = raw_packet[offset + header_len - opt_len:offset +
                                     header_len]
                    headers_opts.append(opt)
                else:
                    headers_opts.append('')
            else:
                headers_opts.append('')
                header_len = sizeof(header)
            offset += header_len

        return CapturedPacket(payload=raw_packet[offset:],
                              raw_packet=raw_packet,
                              headers=[
                                  HeaderWrapper(hdr, opt, self.encoding)
                                  for hdr, opt in zip(headers, headers_opts)
                              ],
                              meta=meta,
                              encoding=self.encoding)
Ejemplo n.º 43
0
_check_size(c_float)


class c_double(_SimpleCData):
    _type_ = 'd'


_check_size(c_double)


class c_longdouble(_SimpleCData):
    _type_ = 'g'


if sizeof(c_longdouble) == sizeof(c_double):
    c_longdouble = c_double
if _calcsize('l') == _calcsize('q'):
    c_longlong = c_long
    c_ulonglong = c_ulong
else:

    class c_longlong(_SimpleCData):
        _type_ = 'q'

    _check_size(c_longlong)

    class c_ulonglong(_SimpleCData):
        _type_ = 'Q'

    _check_size(c_ulonglong)
Ejemplo n.º 44
0
# 0x2: u16: The length, in bytes, of the segment, in the file. A value of zero
#           indicates that the segment length is 64K, unless the selector
#           offset is also zero.
# 0x4: u16: flags (see above)
# 0x6: minimum allocation size of the segment in bytes
class SegmentTableEntryStruct(Structure):
    _pack_ = 1
    _fields_ = [
        ("offset", c_ushort),
        ("length", c_ushort),
        ("flags", c_ushort),
        ("min_alloc_size", c_ushort)
    ]


STE_SZ = sizeof(SegmentTableEntryStruct)


def is_data_seg(ste_flags: int) -> bool:
    return ste_flags & SegmentTableEntryFlags.STEF_DATA_SEG.value == 1


def is_code_seg(ste_flags: int) -> bool:
    return ste_flags & SegmentTableEntryFlags.STEF_DATA_SEG.value == 0


def seg_has_reloc(ste_flags: int) -> bool:
    return ste_flags & SegmentTableEntryFlags.STEF_HAS_RELOC.value == 1


class SegmentTableEntry(object):
Ejemplo n.º 45
0
_check_size(c_float)


class c_double(_SimpleCData):
    _type_ = "d"


_check_size(c_double)


class c_longdouble(_SimpleCData):
    _type_ = "g"


if sizeof(c_longdouble) == sizeof(c_double):
    c_longdouble = c_double

if _calcsize("l") == _calcsize("q"):
    # if long and long long have the same size, make c_longlong an alias for c_long
    c_longlong = c_long
    c_ulonglong = c_ulong
else:

    class c_longlong(_SimpleCData):
        _type_ = "q"

    _check_size(c_longlong)

    class c_ulonglong(_SimpleCData):
        _type_ = "Q"
Ejemplo n.º 46
0
    if _os.name == "nt":
        GetLastError = windll.kernel32.GetLastError
    else:
        GetLastError = windll.coredll.GetLastError

    def WinError(code=None, descr=None):
        if code is None:
            code = GetLastError()
        if descr is None:
            descr = FormatError(code).strip()
        return WindowsError(code, descr)

_pointer_type_cache[None] = c_void_p

if sizeof(c_uint) == sizeof(c_void_p):
    c_size_t = c_uint
elif sizeof(c_ulong) == sizeof(c_void_p):
    c_size_t = c_ulong

# functions

from _ctypes import _memmove_addr, _memset_addr, _string_at_addr, _cast_addr

## void *memmove(void *, const void *, size_t);
memmove = CFUNCTYPE(c_void_p, c_void_p, c_void_p, c_size_t)(_memmove_addr)

## void *memset(void *, int, size_t)
memset = CFUNCTYPE(c_void_p, c_void_p, c_int, c_size_t)(_memset_addr)

def PYFUNCTYPE(restype, *argtypes):
Ejemplo n.º 47
0
 def _getval(self):
     return swap_bytes(self._buffer[0], sizeof(self), name, 'get')
Ejemplo n.º 48
0
def check_buf(buf, direction):
    while buf:
        cursor = 0
        if buf[4:].startswith(b'<?xml'):
            cursor = buf[0:4]
            if not cursor or len(cursor) != 4:
                return None
            cursor = struct.unpack('>L', cursor)[0]
            payload = buf[4:4 + cursor]
            cursor += 4
            if not payload:
                return None
            payload = HARDWARE_PLATFORM_SUB(
                '', payload.decode('utf-8')).encode('utf-8')
            logging.debug(f'PlistByte:{payload}')
            data = plistlib.loads(payload)
            print(direction, 'PlistData', data)
        elif buf[16:].startswith(b'<?xml'):
            cursor = buf[0:4]
            cursor = struct.unpack('I', cursor)[0]
            body = buf[4:cursor]
            version, resp, tag = struct.unpack('III', body[:0xc])
            if version == 1:
                data = body[0xc:]
                logging.debug(f'PlistByte:{data}')
                data = plistlib.loads(data)
                print(direction, 'PlistData:', data)

        elif buf[4:].startswith(b'bplist00'):
            cursor = buf[0:4]
            if not cursor or len(cursor) != 4:
                return None
            cursor = struct.unpack('>L', cursor)[0]
            payload = buf[4:4 + cursor]
            cursor += 4
            if not payload:
                return None
            logging.debug(f'PlistByte:{payload}')
            data = plistlib.loads(payload)
            print(direction, 'PlistData', data)

        elif buf[:4] == b'y[=\x1f':
            try:
                _message_header = DTXMessageHeader.from_buffer_copy(
                    buf[0:sizeof(DTXMessageHeader)])
                cursor = _message_header.length + sizeof(DTXMessageHeader)
                logging.debug(f'DTXByte:{buf[:cursor]}')
                p = DTXMessage.from_bytes(buf[:cursor])
                header = p.get_selector()
                if header:
                    print(
                        f'接收 DTX Data: header:{selector_to_pyobject(p._selector)} body:{get_auxiliary_text(p)}'
                    )
                else:
                    print(direction, 'DTX buf:', buf[:cursor])
            except Exception as E:
                print(direction, 'ErrorBuf:', buf)
        else:
            print(direction, 'EncryptBuf', buf)
            return

        if not cursor:
            return
        buf = buf[cursor:]
Ejemplo n.º 49
0
def run_example():
    board_num = 0
    rate = 100
    file_name = 'scan_data.csv'

    # The size of the UL buffer to create, in seconds
    buffer_size_seconds = 2
    # The number of buffers to write. After this number of UL buffers are
    # written to file, the example will be stopped.
    num_buffers_to_write = 5

    if use_device_detection:
        ul.ignore_instacal()
        if not util.config_first_detected_device(board_num):
            print("Could not find device.")
            return

    ai_props = AnalogInputProps(board_num)
    if (ai_props.num_ai_chans < 1 or
            not ScanOptions.SCALEDATA in ai_props.supported_scan_options):
        util.print_unsupported_example(board_num)
        return

    low_chan = 0
    high_chan = min(3, ai_props.num_ai_chans - 1)
    num_chans = high_chan - low_chan + 1

    # Create a circular buffer that can hold buffer_size_seconds worth of
    # data, or at least 10 points (this may need to be adjusted to prevent
    # a buffer overrun)
    points_per_channel = max(rate * buffer_size_seconds, 10)

    # Some hardware requires that the total_count is an integer multiple
    # of the packet size. For this case, calculate a points_per_channel
    # that is equal to or just above the points_per_channel selected
    # which matches that requirement.
    if ai_props.continuous_requires_packet_size_multiple:
        packet_size = ai_props.packet_size
        remainder = points_per_channel % packet_size
        if remainder != 0:
            points_per_channel += packet_size - remainder

    ul_buffer_count = points_per_channel * num_chans

    # Write the UL buffer to the file num_buffers_to_write times.
    points_to_write = ul_buffer_count * num_buffers_to_write

    # When handling the buffer, we will read 1/10 of the buffer at a time
    write_chunk_size = int(ul_buffer_count / 10)

    ai_range = ai_props.available_ranges[0]

    scan_options = (ScanOptions.BACKGROUND | ScanOptions.CONTINUOUS |
                    ScanOptions.SCALEDATA)

    memhandle = ul.scaled_win_buf_alloc(ul_buffer_count)

    # Allocate an array of doubles temporary storage of the data
    write_chunk_array = (c_double * write_chunk_size)()

    # Check if the buffer was successfully allocated
    if not memhandle:
        print("Failed to allocate memory.")
        return

    try:
        # Start the scan
        ul.a_in_scan(
            board_num, low_chan, high_chan, ul_buffer_count,
            rate, ai_range, memhandle, scan_options)

        status = Status.IDLE
        # Wait for the scan to start fully
        while(status == Status.IDLE):
            status, _, _ = ul.get_status(
                board_num, FunctionType.AIFUNCTION)

        # Create a file for storing the data
        with open(file_name, 'w') as f:
            print('Writing data to ' + file_name, end='')

            # Write a header to the file
            for chan_num in range(low_chan, high_chan + 1):
                f.write('Channel ' + str(chan_num) + ',')
            f.write(u'\n')

            # Start the write loop
            prev_count = 0
            prev_index = 0
            write_ch_num = low_chan
            while status != Status.IDLE:
                # Get the latest counts
                status, curr_count, _ = ul.get_status(
                    board_num, FunctionType.AIFUNCTION)

                new_data_count = curr_count - prev_count

                # Check for a buffer overrun before copying the data, so
                # that no attempts are made to copy more than a full buffer
                # of data
                if new_data_count > ul_buffer_count:
                    # Print an error and stop writing
                    ul.stop_background(board_num, FunctionType.AIFUNCTION)
                    print("A buffer overrun occurred")
                    break

                # Check if a chunk is available
                if new_data_count > write_chunk_size:
                    wrote_chunk = True
                    # Copy the current data to a new array

                    # Check if the data wraps around the end of the UL
                    # buffer. Multiple copy operations will be required.
                    if prev_index + write_chunk_size > ul_buffer_count - 1:
                        first_chunk_size = ul_buffer_count - prev_index
                        second_chunk_size = (
                            write_chunk_size - first_chunk_size)

                        # Copy the first chunk of data to the
                        # write_chunk_array
                        ul.scaled_win_buf_to_array(
                            memhandle, write_chunk_array, prev_index,
                            first_chunk_size)

                        # Create a pointer to the location in
                        # write_chunk_array where we want to copy the
                        # remaining data
                        second_chunk_pointer = cast(
                            addressof(write_chunk_array) + first_chunk_size
                            * sizeof(c_double), POINTER(c_double))

                        # Copy the second chunk of data to the
                        # write_chunk_array
                        ul.scaled_win_buf_to_array(
                            memhandle, second_chunk_pointer,
                            0, second_chunk_size)
                    else:
                        # Copy the data to the write_chunk_array
                        ul.scaled_win_buf_to_array(
                            memhandle, write_chunk_array, prev_index,
                            write_chunk_size)

                    # Check for a buffer overrun just after copying the data
                    # from the UL buffer. This will ensure that the data was
                    # not overwritten in the UL buffer before the copy was
                    # completed. This should be done before writing to the
                    # file, so that corrupt data does not end up in it.
                    status, curr_count, _ = ul.get_status(
                        board_num, FunctionType.AIFUNCTION)
                    if curr_count - prev_count > ul_buffer_count:
                        # Print an error and stop writing
                        ul.stop_background(board_num, FunctionType.AIFUNCTION)
                        print("A buffer overrun occurred")
                        break

                    for i in range(write_chunk_size):
                        f.write(str(write_chunk_array[i]) + ',')
                        write_ch_num += 1
                        if write_ch_num == high_chan + 1:
                            write_ch_num = low_chan
                            f.write(u'\n')
                else:
                    wrote_chunk = False

                if wrote_chunk:
                    # Increment prev_count by the chunk size
                    prev_count += write_chunk_size
                    # Increment prev_index by the chunk size
                    prev_index += write_chunk_size
                    # Wrap prev_index to the size of the UL buffer
                    prev_index %= ul_buffer_count

                    if prev_count >= points_to_write:
                        break
                    print('.', end='')
                else:
                    # Wait a short amount of time for more data to be
                    # acquired.
                    time.sleep(0.1)

        ul.stop_background(board_num, FunctionType.AIFUNCTION)
    except ULError as e:
        util.print_ul_error(e)
    finally:
        print('Done')

        # Free the buffer in a finally block to prevent errors from causing
        # a memory leak.
        ul.win_buf_free(memhandle)

        if use_device_detection:
            ul.release_daq_device(board_num)
Ejemplo n.º 50
0
    _type_ = 'f'


_check_size(c_float)

class c_double(_SimpleCData):
    _type_ = 'd'


_check_size(c_double)

class c_longdouble(_SimpleCData):
    _type_ = 'g'


if sizeof(c_longdouble) == sizeof(c_double):
    c_longdouble = c_double
if _calcsize('l') == _calcsize('q'):
    c_longlong = c_long
    c_ulonglong = c_ulong
else:

    class c_longlong(_SimpleCData):
        _type_ = 'q'


    _check_size(c_longlong)

    class c_ulonglong(_SimpleCData):
        _type_ = 'Q'
Ejemplo n.º 51
0
 def _setval(self, value):
     d = result()
     d.value = value
     self._buffer[0] = swap_bytes(d.value, sizeof(self), name, 'set')