コード例 #1
0
ファイル: __init__.py プロジェクト: socialcube/chloride
 def digest(self):
     state = SHA512.State()
     pointer = ctypes.pointer(state)
     ctypes.memmove(pointer, self._pointer, SHA512.State.SIZE)
     digest = ctypes.create_string_buffer(SHA512.SIZE)
     _sha512_final(pointer, digest)
     return Digest(digest.raw)
コード例 #2
0
    def _decode_audio_packet(self):
        packet = self._packet
        size_out = ctypes.c_int(len(self._audio_buffer))

        while True:
            audio_packet_ptr = ctypes.cast(packet.data, ctypes.c_void_p)
            audio_packet_size = packet.size

            used = av.avbin_decode_audio(self._audio_stream,
                audio_packet_ptr, audio_packet_size,
                self._audio_buffer, size_out)

            if used < 0:
                self._audio_packet_size = 0
                break

            audio_packet_ptr.value += used
            audio_packet_size -= used

            if size_out.value <= 0:
                continue

            # XXX how did this ever work?  replaced with copy below
            # buffer = ctypes.string_at(self._audio_buffer, size_out)

            # XXX to actually copy the data.. but it never used to crash, so
            # maybe I'm  missing something
            buffer = ctypes.create_string_buffer(size_out.value)
            ctypes.memmove(buffer, self._audio_buffer, len(buffer))
            buffer = buffer.raw

            duration = float(len(buffer)) / self.audio_format.bytes_per_second
            self._audio_packet_timestamp = \
                timestamp = timestamp_from_avbin(packet.timestamp)
            return AudioData(buffer, len(buffer), timestamp, duration, []) 
コード例 #3
0
def read_struct(li, struct):
    s = struct()
    slen = ctypes.sizeof(s)
    bytes = li.read(slen)
    fit = min(len(bytes), slen)
    ctypes.memmove(ctypes.addressof(s), bytes, fit)
    return s
コード例 #4
0
ファイル: __init__.py プロジェクト: socialcube/chloride
 def digest(self):
     state = BLAKE2B.State()
     pointer = ctypes.pointer(state)
     ctypes.memmove(pointer, self._pointer,BLAKE2B.State.SIZE)
     digest = ctypes.create_string_buffer(self._size)
     _blake2b_final(pointer, digest, self._size)
     return Digest(digest.raw)
コード例 #5
0
def get_struct(str_, off, struct):
    s = struct()
    slen = ctypes.sizeof(s)
    bytes = str_[off:off+slen]
    fit = min(len(bytes), slen)
    ctypes.memmove(ctypes.addressof(s), bytes, fit)
    return s
コード例 #6
0
ファイル: windows.py プロジェクト: Itay4/pandas
    def copy_windows(text):
        # This function is heavily based on
        # http://msdn.com/ms649016#_win32_Copying_Information_to_the_Clipboard
        with window() as hwnd:
            # http://msdn.com/ms649048
            # If an application calls OpenClipboard with hwnd set to NULL,
            # EmptyClipboard sets the clipboard owner to NULL;
            # this causes SetClipboardData to fail.
            # => We need a valid hwnd to copy something.
            with clipboard(hwnd):
                safeEmptyClipboard()

                if text:
                    # http://msdn.com/ms649051
                    # If the hMem parameter identifies a memory object,
                    # the object must have been allocated using the
                    # function with the GMEM_MOVEABLE flag.
                    count = wcslen(text) + 1
                    handle = safeGlobalAlloc(GMEM_MOVEABLE,
                                             count * sizeof(c_wchar))
                    locked_handle = safeGlobalLock(handle)

                    ctypes.memmove(c_wchar_p(locked_handle), c_wchar_p(text),
                                   count * sizeof(c_wchar))

                    safeGlobalUnlock(handle)
                    safeSetClipboardData(CF_UNICODETEXT, handle)
コード例 #7
0
ファイル: test_driver.py プロジェクト: esc/numba
    def test_allocate(self):
        regions = self.gpu.regions
        # More than one region
        self.assertGreater(len(regions), 0)
        # Find kernel argument regions
        kernarg_regions = list()
        for r in regions:
            if r.supports(enums.HSA_REGION_GLOBAL_FLAG_KERNARG):
                kernarg_regions.append(r)

        self.assertGreater(len(kernarg_regions), 0)
        # Test allocating at the kernel argument region
        kernarg_region = kernarg_regions[0]
        nelem = 10
        ptr = kernarg_region.allocate(ctypes.sizeof(ctypes.c_float) * nelem)
        self.assertNotEqual(ctypes.addressof(ptr), 0,
                            "pointer must not be NULL")
        # Test writing to it
        src = np.random.random(nelem).astype(np.float32)
        ctypes.memmove(ptr, src.ctypes.data, src.nbytes)

        ref = (ctypes.c_float * nelem).from_address(ptr.value)
        for i in range(src.size):
            self.assertEqual(ref[i], src[i])
        roc.hsa_memory_free(ptr)
コード例 #8
0
ファイル: __init__.py プロジェクト: rashadkm/grass_cmake
    def get_row(self, row, row_buffer=None):
        """This method returns the row using:

            * the read mode and
            * `rowcache` method

        :param row: the number of row to obtain
        :type row: int
        :param row_buffer: Specify the Buffer object that will be instantiate
        :type row_buffer: Buffer object

            >>> elev = RasterRowIO(test_raster_name)
            >>> elev.open('r')
            >>> for row in elev:
            ...     row
            Buffer([11, 21, 31, 41], dtype=int32)
            Buffer([12, 22, 32, 42], dtype=int32)
            Buffer([13, 23, 33, 43], dtype=int32)
            Buffer([14, 24, 34, 44], dtype=int32)

            >>> elev.close()

        """
        if row_buffer is None:
            row_buffer = Buffer((self._cols,), self.mtype)
        rowio_buf = librowio.Rowio_get(ctypes.byref(self.rowio.c_rowio), row)
        ctypes.memmove(row_buffer.p, rowio_buf, self.rowio.row_size)
        return row_buffer
コード例 #9
0
ファイル: csoundmagics.py プロジェクト: fggp/ctcsound
 def fillTable(self, num, arr):
     """Fill a table with GEN2 using the data in arr.
     
     If the table did not exist, it will be created. If the table existed
     but had the wrong size, it will be resized.
     """
     if type(arr) != np.ndarray and type(arr) != list and type(arr) != tuple:
         raise TypeError("Argument is not array, list, or tuple")
     if type(arr) == np.ndarray and arr.ndim > 1:
         raise TypeError("Only one dimensional arrays are valid")
     
     if self._clientAddr:
         data = ', '.join(map(str, arr))
         data = 'gitemp ftgen {}, 0, {}, -2, '.format(num, len(arr)) + data
         self.sendCode(data)
         return
     
     p = np.array(arr).astype(ctcsound.MYFLT)
     table = self.table(num)
     if (type(table) == np.ndarray) and (table.size == p.size):
         pass
     else:
         if type(table) == np.ndarray:
             self._debugPrint("Resizing table", num, "from", table.size, "to", p.size)
         else:
             self._debugPrint("Creating table ", num)
         self.makeTable(num, p.size, -2, 0)
         table = self.table(num)
     src = p.ctypes.data_as(ctypes.POINTER(ctcsound.MYFLT))
     dest = table.ctypes.data_as(ctypes.POINTER(ctcsound.MYFLT))
     ctypes.memmove(dest, src, p.size*self._myfltSize)
コード例 #10
0
ファイル: encoders.py プロジェクト: GMercat/picamera
 def _add_exif_tag(self, tag, value):
     # Format the tag and value into an appropriate bytes string, encoded
     # with the Exif encoding (ASCII)
     if isinstance(tag, str):
         tag = tag.encode(self.exif_encoding)
     if isinstance(value, str):
         value = value.encode(self.exif_encoding)
     elif isinstance(value, datetime.datetime):
         value = value.strftime('%Y:%m:%d %H:%M:%S').encode(self.exif_encoding)
     # MMAL_PARAMETER_EXIF_T is a variable sized structure, hence all the
     # mucking about with string buffers here...
     buf = ct.create_string_buffer(
         ct.sizeof(mmal.MMAL_PARAMETER_EXIF_T) + len(tag) + len(value) + 1)
     mp = ct.cast(buf, ct.POINTER(mmal.MMAL_PARAMETER_EXIF_T))
     mp[0].hdr.id = mmal.MMAL_PARAMETER_EXIF
     mp[0].hdr.size = len(buf)
     if (b'=' in tag or b'\x00' in value):
         data = tag + value
         mp[0].keylen = len(tag)
         mp[0].value_offset = len(tag)
         mp[0].valuelen = len(value)
     else:
         data = tag + b'=' + value
     ct.memmove(mp[0].data, data, len(data))
     mmal_check(
         mmal.mmal_port_parameter_set(self.output_port, mp[0].hdr),
         prefix="Failed to set Exif tag %s" % tag)
コード例 #11
0
ファイル: pf.py プロジェクト: luserx0/sshuttle
    def query_nat(self, family, proto, src_ip, src_port, dst_ip, dst_port):
        [proto, family, src_port, dst_port] = [
            int(v) for v in [proto, family, src_port, dst_port]]

        packed_src_ip = socket.inet_pton(family, src_ip)
        packed_dst_ip = socket.inet_pton(family, dst_ip)

        assert len(packed_src_ip) == len(packed_dst_ip)
        length = len(packed_src_ip)

        pnl = self.pfioc_natlook()
        pnl.proto = proto
        pnl.direction = self.PF_OUT
        pnl.af = family
        memmove(addressof(pnl.saddr), packed_src_ip, length)
        memmove(addressof(pnl.daddr), packed_dst_ip, length)
        self._add_natlook_ports(pnl, src_port, dst_port)

        ioctl(pf_get_dev(), self.DIOCNATLOOK,
              (c_char * sizeof(pnl)).from_address(addressof(pnl)))

        ip = socket.inet_ntop(
            pnl.af, (c_char * length).from_address(addressof(pnl.rdaddr)).raw)
        port = socket.ntohs(self._get_natlook_port(pnl.rdxport))
        return (ip, port)
コード例 #12
0
ファイル: system.py プロジェクト: ankitmodi/Projects
def _set_argv(process_name):
  """
  Overwrites our argv in a similar fashion to how it's done in C with:
  strcpy(argv[0], "new_name");
  """

  if Py_GetArgcArgv is None:
    return

  global _PROCESS_NAME

  # both gets the current process name and initializes _MAX_NAME_LENGTH

  current_name = get_process_name()

  argv, argc = ctypes.c_int(0), argc_t()
  Py_GetArgcArgv(argv, ctypes.pointer(argc))

  if len(process_name) > _MAX_NAME_LENGTH:
    raise IOError("Can't rename process to something longer than our initial name (this would overwrite memory used for the env)")

  # space we need to clear
  zero_size = max(len(current_name), len(process_name))

  ctypes.memset(argc.contents, 0, zero_size + 1)  # null terminate the string's end
  ctypes.memmove(argc.contents, process_name, len(process_name))
  _PROCESS_NAME = process_name
コード例 #13
0
ファイル: winutils.py プロジェクト: PR3SID3NT3/pydivert
def inet_ntop(address_family, packed_ip, encoding="UTF-8"):
    addr = sockaddr()
    addr.sa_family = address_family
    addr_size = c_int(sizeof(addr))
    ip_string = create_string_buffer(128)
    ip_string_size = c_int(sizeof(addr))

    if address_family == socket.AF_INET:
        if len(packed_ip) != sizeof(addr.ipv4_addr):
            raise socket.error('packed IP wrong length for inet_ntop')
        memmove(addr.ipv4_addr, packed_ip, 4)
    elif address_family == socket.AF_INET6:
        if len(packed_ip) != sizeof(addr.ipv6_addr):
            raise socket.error('packed IP wrong length for inet_ntop')
        memmove(addr.ipv6_addr, packed_ip, 16)
    else:
        raise socket.error('unknown address family')

    if WSAAddressToStringA(byref(addr),
                           addr_size,
                           None,
                           ip_string,
                           byref(ip_string_size)) != 0:
        raise socket.error(FormatError())

    return (ip_string[:ip_string_size.value - 1]).decode(encoding)
コード例 #14
0
def _encode(func, image, quality):
    """
    Encode the image with the given quality using the given encoding
    function

    :param func: The encoding function
    :param image: The image to be encoded
    :param quality: The encode quality factor

    :type function: function
    :type image: BitmapHandler
    :type quality: float
    """
    # Call encode function
    data = str(image.bitmap)
    width = c_int(image.width)
    height = c_int(image.height)
    stride = c_int(image.stride)
    q_factor = c_float(quality)
    output_p = c_void_p()

    size = func(data, width, height, stride, q_factor, byref(output_p))

    # Check return size
    if size == 0:
        raise EncodeError

    # Convert output
    output = create_string_buffer(size)

    memmove(output, output_p, size)

    return WebPHandler(bytearray(output), image.width, image.height)
コード例 #15
0
    def go(self, target, outfile, infile, stderr=sys.stderr, timeout=None,
           pre_proc_started=do_nothing, post_proc_started=do_nothing):

        crashed = [False]
        hung = [False]

        # clean the SHM buffer in case we reuse the SHMInstrumentation object
        ctypes.memmove(self.trace_bytes_addr, self.empty_trace_bytes_addr,
                       MAP_SIZE)

        pre_proc_started()

        # support cStringIO - if we can't get the file number, we'll use pipes
        # instead of forwarding the fd to the subprocess
        try:
            infile_fileno = infile.fileno()
        except AttributeError:
            infile_fileno = None
        except io.UnsupportedOperation:
            infile_fileno = None
        p_stdin = infile if infile_fileno is not None else subprocess.PIPE

        p = [None]
        if timeout is not None:

            def kill_process(p, hung):
                if p[0]:
                    p[0].kill()
                    hung[0] = True
                else:
                    raise RuntimeError("Race condition at p[0].kill")
            timer = threading.Timer(timeout, lambda: kill_process(p, hung))

        p[0] = subprocess.Popen(target, stdin=p_stdin, stderr=stderr,
                                env={'__AFL_SHM_ID': str(self.shm_id)})
        if timeout is not None:
            timer.start()

        if p_stdin == subprocess.PIPE:
            p[0].stdin.write(infile.read())
            p[0].stdin.close()

        p[0].wait()

        if timeout is not None:
            timer.cancel()
        post_proc_started()

        if p[0].returncode < 0 and p[0].returncode != -9:
            crashed[0] = p[0].returncode

        trace_bytes = ctypes.string_at(ctypes.c_void_p(self.trace_bytes_addr),
                                       MAP_SIZE)

        # an equivalent of classify_counts() from afl-fuzz.
        # no idea what it does really.
        trace_bytes = ''.join(map(lambda c: count_class_lookup[ord(c)],
                                  trace_bytes))

        return trace_bytes, crashed[0], hung[0]
コード例 #16
0
ファイル: __init__.py プロジェクト: bitcraft/pyglet
    def write(self, audio_data, length):
        # Pass audio_data=None to write silence
        if length == 0:
            return 0

        self.lock()

        p1 = ctypes.c_void_p()
        l1 = lib.DWORD()
        p2 = ctypes.c_void_p()
        l2 = lib.DWORD()
        self._buffer.Lock(self._write_cursor_ring, length,
                          ctypes.byref(p1), l1, ctypes.byref(p2), l2, 0)
        assert length == l1.value + l2.value

        if audio_data:
            ctypes.memmove(p1, audio_data.data, l1.value)
            audio_data.consume(l1.value, self.source_group.audio_format)
            if l2.value:
                ctypes.memmove(p2, audio_data.data, l2.value)
                audio_data.consume(l2.value, self.source_group.audio_format)
        else:
            ctypes.memset(p1, 0, l1.value)
            if l2.value:
                ctypes.memset(p2, 0, l2.value)
        self._buffer.Unlock(p1, l1, p2, l2)

        self._write_cursor += length
        self._write_cursor_ring += length
        self._write_cursor_ring %= self._buffer_size
        self.unlock()
コード例 #17
0
ファイル: texture.py プロジェクト: adamlwgriffiths/GLETools
    def __init__(self, width, height, format=GL_RGBA, filter=GL_LINEAR, unit=GL_TEXTURE0, data=None, mipmap=0, clamp=False):
        if format in self.float_targets:
            if not gl_info.have_extension('GL_ARB_texture_float'):
                raise ExtensionMissing('no floating point texture support (GL_ARB_texture_float)')

        Context.__init__(self)
        self.clamp = clamp
        self.mipmap = mipmap
        self.width = width
        self.height = height
        self.format = format
        self.filter = filter
        self.unit = unit
        spec = self.spec = self.specs[format]
        self.buffer_type = (spec.type.obj * (width * height * spec.channels.count))
        id = self.id = gen_texture()
        if data:
            if isinstance(data, str):
                pointer = cast(c_char_p(data), c_void_p)
                source = self.buffer_type.from_address(pointer.value)
                target = self.buffer_type()
                memmove(target, source, sizeof(source))
                self.buffer = target
            else:
                self.buffer = self.buffer_type(*data)

        else:
            self._buffer = None

        self.update()
        self.display = self.make_display()
コード例 #18
0
def DerefPointer(pointer, processID):
#	print 'POINTER!!!!! %s, pointer points to address (/has value): %s (%s)' % (type(pointer.contents), ctypes.addressof(pointer.contents), hex(ctypes.addressof(pointer.contents)))
#	res = []
#	res.append('%s = %r' % (fieldName, pointer))
#	res.append('\n')

	try:
#		ctypes.addressof(value.contents)
		if ctypes.addressof(pointer.contents) != 0:
#		if bool(pointer.contents):
	#		contents = pointer.contents
			pointedAtType = type(pointer.contents)
			typeInstanceInLocalMem = pointedAtType()
			contents = ReadMem(ctypes.addressof(pointer.contents), ctypes.sizeof(pointer.contents), None, processID)

			fit = min(len(contents), ctypes.sizeof(typeInstanceInLocalMem))

			# Copy 
			ctypes.memmove(ctypes.addressof(typeInstanceInLocalMem), contents, fit)

			return typeInstanceInLocalMem

			return contents
		else:
			return None
	except:
		pass
	return None
コード例 #19
0
ファイル: controller.py プロジェクト: JohnHubcr/fibratus
 def logger_name(self, logger_name):
     name_len = len(logger_name) + 1
     if self.max_string_len < name_len:
         raise ArgumentError("Logger name %s is too long" % logger_name)
     props = self._props
     logger = c_wchar_p(addressof(props.contents) + props.contents.logger_name_offset)
     memmove(logger,  c_wchar_p(logger_name), sizeof(c_wchar) * name_len)
コード例 #20
0
    def consume(self, bytes, audio_format):
        '''Remove some data from beginning of packet.  All events are
        cleared.'''
        self.events = ()
        if bytes == self.length:
            self.data = None
            self.length = 0
            self.timestamp += self.duration
            self.duration = 0.
            return
        elif bytes == 0:
            return

        if not isinstance(self.data, str):
            # XXX Create a string buffer for the whole packet then
            #     chop it up.  Could do some pointer arith here and
            #     save a bit of data pushing, but my guess is this is
            #     faster than fudging aruond with ctypes (and easier).
            data = ctypes.create_string_buffer(self.length)
            ctypes.memmove(data, self.data, self.length)
            self.data = data
        self.data = self.data[bytes:]
        self.length -= bytes
        self.duration -= bytes / float(audio_format.bytes_per_second)
        self.timestamp += bytes / float(audio_format.bytes_per_second)
コード例 #21
0
ファイル: utils.py プロジェクト: mciepluc/cocotb
def unpack(ctypes_obj, string, bytes=None):
    """Unpack a python string into a ctypes structure

    Args:
        ctypes_obj (ctypes.Structure):  ctypes structure to pack into

        string (str):  String to copy over the ctypes_obj memory space

    Kwargs:
        bytes: Number of bytes to copy

    Raises:
        ValueError, MemoryError

    If the length of the string is not the correct size for the memory
    footprint of the ctypes structure then the bytes keyword argument must
    be used
    """
    if bytes is None:
        if len(string) != ctypes.sizeof(ctypes_obj):
            raise ValueError("Attempt to unpack a string of size %d into a \
                struct of size %d" % (len(string), ctypes.sizeof(ctypes_obj)))
        bytes = len(string)

    if bytes > ctypes.sizeof(ctypes_obj):
        raise MemoryError("Attempt to unpack %d bytes over an object \
                        of size %d" % (bytes, ctypes.sizeof(ctypes_obj)))

    ctypes.memmove(ctypes.addressof(ctypes_obj), string, bytes)
コード例 #22
0
ファイル: util.py プロジェクト: datastax/python-driver
    def inet_ntop(address_family, packed_ip):
        if address_family == socket.AF_INET:
            return socket.inet_ntoa(packed_ip)

        addr = sockaddr()
        addr.sa_family = address_family
        addr_size = ctypes.c_int(ctypes.sizeof(addr))
        ip_string = ctypes.create_string_buffer(128)
        ip_string_size = ctypes.c_int(ctypes.sizeof(ip_string))

        if address_family == socket.AF_INET6:
            if len(packed_ip) != ctypes.sizeof(addr.ipv6_addr):
                raise socket.error('packed IP wrong length for inet_ntoa')
            ctypes.memmove(addr.ipv6_addr, packed_ip, 16)
        else:
            raise socket.error('unknown address family')

        if WSAAddressToStringA(
                ctypes.byref(addr),
                addr_size,
                None,
                ip_string,
                ctypes.byref(ip_string_size)
        ) != 0:
            raise socket.error(ctypes.FormatError())

        return ip_string[:ip_string_size.value - 1]
コード例 #23
0
ファイル: __init__.py プロジェクト: aquynh/capstone
 def __init__(self, cs, all_info):
     self._raw = copy_ctypes(all_info)
     self._cs = cs
     if self._cs._detail and self._raw.id != 0:
         # save detail
         self._raw.detail = ctypes.pointer(all_info.detail._type_())
         ctypes.memmove(ctypes.byref(self._raw.detail[0]), ctypes.byref(all_info.detail[0]), ctypes.sizeof(type(all_info.detail[0])))
コード例 #24
0
ファイル: fuse.py プロジェクト: 0x08dev/archive
 def read(self, path, result, size, offset, file_info):
     data = self.operations.read(path.decode('utf-8'), size, offset, file_info.contents.fh)
     if not data:
         return 0
     data = ctypes.create_string_buffer(data[:size], size)
     ctypes.memmove(result, data, size)
     return size
コード例 #25
0
ファイル: array_type.py プロジェクト: lucciano/parakeet
  def to_python(self, obj):
    """
    For now, to avoid to dealing with the messiness of ownership, we just always
    copy data on the way out of Parakeet
    """

    shape = self.shape_t.to_python(obj.shape.contents)

    elt_size = self.elt_type.nbytes
    strides_in_elts = self.strides_t.to_python(obj.strides.contents)
    strides_in_bytes = tuple([s * elt_size for s in strides_in_elts])

    base_ptr = obj.data

    nbytes = obj.total_elts * elt_size

    dest_buf = PyBuffer_New(nbytes)
    dest_ptr, _ = buffer_info(dest_buf, self.ptr_t.ctypes_repr)
    ctypes.memmove(dest_ptr, base_ptr, nbytes)

    
    return np.ndarray(shape, dtype = self.elt_type.dtype,
                      buffer = dest_buf,
                      strides = strides_in_bytes,
                      offset = obj.offset * elt_size)
コード例 #26
0
ファイル: sodium.py プロジェクト: socialcube/binarize
 def digest(self):
     state = SHA512.State()
     pointer = ctypes.pointer(state)
     ctypes.memmove(pointer, self._pointer, SHA512.State.size)
     digest = ctypes.create_string_buffer(SHA512.size)
     _libsodium.crypto_hash_sha512_final(pointer, digest)
     return Digest(digest.raw)
コード例 #27
0
ファイル: freetype.py プロジェクト: cajlarsson/village
    def __init__(self, data):
        self.buffer = (ctypes.c_byte * len(data))()
        ctypes.memmove(self.buffer, data, len(data))

        ft_library = ft_get_library()
        self.face = FT_Face()
        r = FT_New_Memory_Face(ft_library, 
            self.buffer, len(self.buffer), 0, self.face)
        if r != 0:
            raise base.FontException('Could not load font data')

        self.name = self.face.contents.family_name
        self.bold = self.face.contents.style_flags & FT_STYLE_FLAG_BOLD != 0
        self.italic = self.face.contents.style_flags & FT_STYLE_FLAG_ITALIC != 0

        # Replace Freetype's generic family name with TTF/OpenType specific
        # name if we can find one; there are some instances where Freetype
        # gets it wrong.
        if self.face.contents.face_flags & FT_FACE_FLAG_SFNT:
            name = FT_SfntName()
            for i in range(FT_Get_Sfnt_Name_Count(self.face)):
                result = FT_Get_Sfnt_Name(self.face, i, name)
                if result != 0:
                    continue
                if not (name.platform_id == TT_PLATFORM_MICROSOFT and
                        name.encoding_id == TT_MS_ID_UNICODE_CS):
                    continue
                if name.name_id == TT_NAME_ID_FONT_FAMILY:
                    string = string_at(name.string, name.string_len)
                    self.name = string.decode('utf-16be', 'ignore')
コード例 #28
0
ファイル: xlib.py プロジェクト: dwizard/Pokemon-Game
    def get_modes(self):
        if not _have_xf86vmode:
            return []

        if self._xinerama:
            # If Xinerama/TwinView is enabled, xf86vidmode's modelines
            # correspond to metamodes, which don't distinguish one screen from
            # another.  XRandR (broken) or NV (complicated) extensions needed.
            return []

        count = ctypes.c_int()
        info_array = \
            ctypes.POINTER(ctypes.POINTER(xf86vmode.XF86VidModeModeInfo))()
        xf86vmode.XF86VidModeGetAllModeLines(
            self.display._display, self.display.x_screen, count, info_array)

        # Copy modes out of list and free list
        modes = []
        for i in range(count.value):
            info = xf86vmode.XF86VidModeModeInfo()
            ctypes.memmove(ctypes.byref(info), 
                           ctypes.byref(info_array.contents[i]), 
                           ctypes.sizeof(info))
            modes.append(XlibScreenMode(self, info))
            if info.privsize:
                xlib.XFree(info.private)
        xlib.XFree(info_array)

        return modes
コード例 #29
0
ファイル: __init__.py プロジェクト: pombreda/agilepyfs
 def WriteFile(self, path, buffer, nBytesToWrite, nBytesWritten, offset, info):
     path = normpath(path)
     fh = info.contents.Context
     (file,_,lock) = self._get_file(fh)
     lock.acquire()
     try:
         errno = self._check_lock(path,offset,nBytesToWrite,info)
         if errno:
             return errno
         #  This may be called after Cleanup, meaning we
         #  need to re-open the file.
         if file.closed:
             file = self.fs.open(path,file.mode)
             self._rereg_file(info.contents.Context,file)
         if info.contents.WriteToEndOfFile:
             file.seek(0,os.SEEK_END)
         else:
             file.seek(offset)
         data = ctypes.create_string_buffer(nBytesToWrite)
         ctypes.memmove(data,buffer,nBytesToWrite)
         file.write(data.raw)
         nBytesWritten[0] = len(data.raw)
         try:
             size_written = self._files_size_written[path][fh]
         except KeyError:
             pass
         else:
             if offset + nBytesWritten[0] > size_written:
                 new_size_written = offset + nBytesWritten[0]
                 self._files_size_written[path][fh] = new_size_written
     finally:
         lock.release()
コード例 #30
0
ファイル: encode.py プロジェクト: svn2github/Xpra
def _lossless(func, image):
    """
    Encode the image losslessly using the given encoding
    function

    :param func: The encoding function
    :param image: The image to be encoded

    :type function: function
    :type image: BitmapHandler
    """
    # Call encode function
    data = str(image.bitmap)
    width = c_int(image.width)
    height = c_int(image.height)
    stride = c_int(image.stride)
    output_p = c_void_p()

    size = func(data, width, height, stride, byref(output_p))

    # Check return size
    if size == 0:
        raise EncodeError

    # Convert output
    output = create_string_buffer(size)

    memmove(output, output_p, size)

    return WebPHandler(bytearray(output), image.width, image.height)
コード例 #31
0
ファイル: vertexbuffer.py プロジェクト: fos/fos-pyglet
 def set_data(self, data):
     super(MappableVertexBufferObject, self).set_data(data)
     ctypes.memmove(self.data, data, self.size)
     self._dirty_min = 0
     self._dirty_max = self.size
コード例 #32
0
 def __bytes__(self):
     buf = ctypes.create_string_buffer(len(self))
     data = core.BNGetDataBufferContents(self.handle)
     assert data is not None, "core.BNGetDataBufferContents returned None"
     ctypes.memmove(buf, data, len(self))
     return buf.raw
コード例 #33
0
ファイル: eigenmat.py プロジェクト: jormansa/deepnet
 def __init__(self, mat):
   self.mat = eigenmat()
   ct.memmove(ct.pointer(self.mat), ct.pointer(mat), ct.sizeof(self.mat))
   self.mat.is_trans = 1
   self.p_mat = ct.pointer(self.mat)
   self.T = mat
コード例 #34
0
def _ctype_copy(addr, var, width):
    ctypes.memmove(addr, ctypes.addressof(var), width)
    return addr + width
コード例 #35
0
def writeScalarArray(x, offset, v):
  ct.memmove(ct.addressof(x.contents)+int(offset)*ct.sizeof(v), ct.addressof(v), ct.sizeof(v))
コード例 #36
0
ファイル: ads.py プロジェクト: mosri/pyads
    def parse_notification(
        self, notification, plc_datatype, timestamp_as_filetime=False
    ):
        # type: (Any, Type, bool) -> (int, int, Any)
        """Parse a notification.

        Convert the data of the NotificationHeader into the fitting Python type.

        :param notification: The notification we recieve from PLC datatype to be
        converted. This can be any basic PLC datatype or a `ctypes.Structure`.
        :param plc_datatype: The PLC datatype that needs to be converted. This can
        be any basic PLC datatype or a `ctypes.Structure`.
        :param timestamp_as_filetime: Whether the notification timestamp should be returned
        as `datetime.datetime` (False) or Windows `FILETIME` as originally transmitted
        via ADS (True). Be aware that the precision of `datetime.datetime` is limited to
        microseconds, while FILETIME allows for 100 ns. This may be relevant when using
        task cycle times such as 62.5 µs. Default: False.

        :rtype: (int, int, Any)
        :returns: notification handle, timestamp, value

        **Usage**:

        >>> import pyads
        >>> from ctypes import size_of
        >>>
        >>> # Connect to the local TwinCAT PLC
        >>> plc = pyads.Connection('127.0.0.1.1.1', 851)
        >>> tag = {"GVL.myvalue": pyads.PLCTYPE_INT}
        >>>
        >>> # Create callback function that prints the value
        >>> def mycallback(notification, data):
        >>>     data_type = tag[data]
        >>>     handle, timestamp, value = plc.parse_notification(notification, data_type)
        >>>     print(value)
        >>>
        >>> with plc:
        >>>     # Add notification with default settings
        >>>     attr = pyads.NotificationAttrib(size_of(pyads.PLCTYPE_INT))
        >>>
        >>>     handles = plc.add_device_notification("GVL.myvalue", attr, mycallback)
        >>>
        >>>     # Remove notification
        >>>     plc.del_device_notification(handles)
        """
        contents = notification.contents
        data_size = contents.cbSampleSize
        # Get dynamically sized data array
        data = (c_ubyte * data_size).from_address(
            addressof(contents) + SAdsNotificationHeader.data.offset
        )

        if plc_datatype == PLCTYPE_STRING:
            # read only until null-termination character
            value = bytearray(data).split(b"\0", 1)[0].decode("utf-8")

        elif plc_datatype is not None and issubclass(plc_datatype, Structure):
            value = plc_datatype()
            fit_size = min(data_size, sizeof(value))
            memmove(addressof(value), addressof(data), fit_size)

        elif plc_datatype is not None and issubclass(plc_datatype, Array):
            if data_size == sizeof(plc_datatype):
                value = list(plc_datatype.from_buffer_copy(bytes(data)))
            else:
                # invalid size
                value = None

        elif plc_datatype not in DATATYPE_MAP:
            value = bytearray(data)

        else:
            value = struct.unpack(DATATYPE_MAP[plc_datatype], bytearray(data))[0]

        if timestamp_as_filetime:
            timestamp = contents.nTimeStamp
        else:
            timestamp = filetime_to_dt(contents.nTimeStamp)

        return contents.hNotification, timestamp, value
コード例 #37
0
ファイル: vertexbuffer.py プロジェクト: fos/fos-pyglet
 def set_data_region(self, data, start, length):
     ctypes.memmove(self.data_ptr + start, data, length)
     self._dirty_min = min(start, self._dirty_min)
     self._dirty_max = max(start + length, self._dirty_max)
コード例 #38
0
ファイル: vertexbuffer.py プロジェクト: fos/fos-pyglet
 def set_data_region(self, data, start, length):
     ctypes.memmove(self.ptr + start, data, length)
コード例 #39
0
 def write_variable(self, BType, name, value):
     new_ctypes_obj = BType._to_ctypes(value)
     ctypes_obj = BType._ctype.in_dll(self.cdll, name)
     ctypes.memmove(ctypes.addressof(ctypes_obj),
                    ctypes.addressof(new_ctypes_obj),
                    ctypes.sizeof(BType._ctype))
コード例 #40
0
ファイル: vertexbuffer.py プロジェクト: fos/fos-pyglet
 def resize(self, size):
     array = (ctypes.c_byte * size)()
     ctypes.memmove(array, self.array, min(size, self.size))
     self.size = size
     self.array = array
     self.ptr = ctypes.cast(self.array, ctypes.c_void_p).value
コード例 #41
0
ファイル: win32structures.py プロジェクト: yuyobit/pywinauto
def _construct(typ, buf):
    #print "construct", (typ, buf)
    obj = typ.__new__(typ)
    ctypes.memmove(ctypes.addressof(obj), buf, len(buf))
    return obj
コード例 #42
0
ファイル: vertexbuffer.py プロジェクト: fos/fos-pyglet
 def set_data(self, data):
     ctypes.memmove(self.ptr, data, self.size)
コード例 #43
0
ファイル: loader.py プロジェクト: pwaller/PeachPy
 def copy_code(self, code_segment):
     import ctypes
     ctypes.memmove(self.code_address, ctypes.c_char_p(bytes(code_segment)),
                    len(code_segment))
コード例 #44
0
def main(args):
    # Parse command line options
    args.filename.sort()
    global MAX_QUEUE_DEPTH
    MAX_QUEUE_DEPTH = min([args.queue_depth, 10])

    # Find out where the source is if needed
    if args.source is not None:
        if args.ra is None or args.dec is None:
            tempRA, tempDec, tempService = resolveTarget('PSR ' + args.source)
            print("%s resolved to %s, %s using '%s'" %
                  (args.source, tempRA, tempDec, tempService))
            out = input('=> Accept? [Y/n] ')
            if out == 'n' or out == 'N':
                sys.exit()
            else:
                args.ra = tempRA
                args.dec = tempDec

    else:
        args.source = "None"

    if args.ra is None:
        args.ra = "00:00:00.00"
    if args.dec is None:
        args.dec = "+00:00:00.0"
    args.ra = str(args.ra)
    args.dec = str(args.dec)

    # FFT length
    LFFT = args.nchan

    # Sub-integration block size
    nsblk = args.nsblk

    DM = float(args.DM)

    startTimes = []
    nFrames = []
    for filename in args.filename:
        idf = DRXFile(filename)
        o = 0

        # Find out how many frame sets are in each file
        srate = idf.get_info('sample_rate')
        beampols = idf.get_info('nbeampol')
        tunepol = beampols
        nFramesFile = idf.get_info('nframe')

        # Offset, if needed
        o = 0
        if args.skip != 0.0:
            o = idf.offset(args.skip)
        nFramesFile -= int(o * srate / 4096) * tunepol
        nFrames.append(nFramesFile // tunepol)

        # Get the start time of the file
        startTimes.append(idf.get_info('start_time_samples'))

        # Validate
        try:
            if srate != srateOld:
                raise RuntimeError(
                    "Sample rate change detected in this set of files")
        except NameError:
            srateOld = srate

        # Done
        idf.close()

    ttSkip = int(fS / srate * 4096)
    spSkip = int(fS / srate)
    frameOffsets = []
    sampleOffsets = []
    tickOffsets = []
    siCountMax = []
    for filename, startTime, nFrame in zip(args.filename, startTimes, nFrames):
        diff = max(startTimes) - startTime
        frameOffsets.append(diff // ttSkip)
        diff = diff - frameOffsets[-1] * ttSkip
        sampleOffset = diff // spSkip
        sampleOffsets.append(sampleOffset)
        if sampleOffsets[-1] == 4096:
            frameOffsets[-1] += 1
            sampleOffsets[-1] %= 4096
        if args.subsample_correction:
            tickOffsets.append(
                max(startTimes) - (startTime + frameOffsets[-1] * ttSkip +
                                   sampleOffsets[-1] * spSkip))
        else:
            tickOffsets.append(0)

        nFrame = nFrame - frameOffsets[-1] - 1
        nSubints = nFrame // (nsblk * LFFT // 4096)
        siCountMax.append(nSubints)
    siCountMax = min(siCountMax)

    print("Proposed File Time Alignment:")
    residualOffsets = []
    for filename, startTime, frameOffset, sampleOffset, tickOffset in zip(
            args.filename, startTimes, frameOffsets, sampleOffsets,
            tickOffsets):
        tStartNow = startTime
        tStartAfter = startTime + frameOffset * ttSkip + int(
            sampleOffset * fS / srate) + tickOffset
        residualOffset = max(startTimes) - tStartAfter
        print("  %s with %i frames, %i samples, %i ticks" %
              (os.path.basename(filename), frameOffset, sampleOffset,
               tickOffset))
        print("    before: %i" % tStartNow)
        print("    after:  %i" % tStartAfter)
        print("      residual: %i" % residualOffset)

        residualOffsets.append(residualOffset)
    print("Minimum Residual: %i ticks (%.1f ns)" %
          (min(residualOffsets), min(residualOffsets) * (1e9 / fS)))
    print("Maximum Residual: %i ticks (%.1f ns)" %
          (max(residualOffsets), max(residualOffsets) * (1e9 / fS)))
    if not args.yes:
        out = input('=> Accept? [Y/n] ')
        if out == 'n' or out == 'N':
            sys.exit()
    else:
        print("=> Accepted via the command line")
    print(" ")

    # Setup the processing constraints
    if (not args.no_summing):
        polNames = 'I'
        nPols = 1
        reduceEngine = CombineToIntensity
    elif args.stokes:
        polNames = 'IQUV'
        nPols = 4
        reduceEngine = CombineToStokes
    elif args.circular:
        polNames = 'LLRR'
        nPols = 2
        reduceEngine = CombineToCircular
    else:
        polNames = 'XXYY'
        nPols = 2
        reduceEngine = CombineToLinear

    if args.four_bit_data:
        OptimizeDataLevels = OptimizeDataLevels4Bit
    else:
        OptimizeDataLevels = OptimizeDataLevels8Bit

    for c, filename, frameOffset, sampleOffset, tickOffset in zip(
            range(len(args.filename)), args.filename, frameOffsets,
            sampleOffsets, tickOffsets):
        idf = DRXFile(filename)

        # Find out how many frame sets are in each file
        srate = idf.get_info('sample_rate')
        beampols = idf.get_info('nbeampol')
        tunepol = beampols
        nFramesFile = idf.get_info('nframe')

        # Offset, if needed
        o = 0
        if args.skip != 0.0:
            o = idf.offset(args.skip)
        nFramesFile -= int(o * srate / srate) * tunepol

        # Additional seek for timetag alignment across the files
        o += idf.offset(frameOffset * 4096 / srate)

        ## Date
        tStart = idf.get_info(
            'start_time') + sampleOffset * spSkip / fS + tickOffset / fS
        beginDate = tStart.datetime
        beginTime = beginDate
        mjd = tStart.mjd
        mjd_day = int(mjd)
        mjd_sec = (mjd - mjd_day) * 86400
        if args.output is None:
            args.output = "drx_%05d_%s" % (mjd_day, args.source.replace(
                ' ', ''))

        ## Tuning frequencies
        central_freq1 = idf.get_info('freq1')
        central_freq2 = idf.get_info('freq2')
        beam = idf.get_info('beam')

        ## Coherent Dedispersion Setup
        timesPerFrame = numpy.arange(4096, dtype=numpy.float64) / srate
        spectraFreq1 = numpy.fft.fftshift(
            numpy.fft.fftfreq(LFFT, d=1.0 / srate)) + central_freq1
        spectraFreq2 = numpy.fft.fftshift(
            numpy.fft.fftfreq(LFFT, d=1.0 / srate)) + central_freq2

        # File summary
        print("Input Filename: %s (%i of %i)" %
              (filename, c + 1, len(args.filename)))
        print("Date of First Frame: %s (MJD=%f)" % (str(beginDate), mjd))
        print("Tune/Pols: %i" % tunepol)
        print("Tunings: %.1f Hz, %.1f Hz" % (central_freq1, central_freq2))
        print("Sample Rate: %i Hz" % srate)
        print("Sample Time: %f s" % (LFFT / srate, ))
        print("Sub-block Time: %f s" % (LFFT / srate * nsblk, ))
        print("Frames: %i (%.3f s)" %
              (nFramesFile, 4096.0 * nFramesFile / srate / tunepol))
        print("---")
        print("Using FFTW Wisdom? %s" % useWisdom)
        print("DM: %.4f pc / cm^3" % DM)
        print("Samples Needed: %i, %i to %i, %i" %
              (get_coherent_sample_size(central_freq1 - srate / 2,
                                        1.0 * srate / LFFT, DM),
               get_coherent_sample_size(central_freq2 - srate / 2,
                                        1.0 * srate / LFFT, DM),
               get_coherent_sample_size(central_freq1 + srate / 2,
                                        1.0 * srate / LFFT, DM),
               get_coherent_sample_size(central_freq2 + srate / 2,
                                        1.0 * srate / LFFT, DM)))

        # Parameter validation
        if get_coherent_sample_size(central_freq1 - srate / 2,
                                    1.0 * srate / LFFT, DM) > nsblk:
            raise RuntimeError(
                "Too few samples for coherent dedispersion.  Considering increasing the number of channels."
            )
        elif get_coherent_sample_size(central_freq2 - srate / 2,
                                      1.0 * srate / LFFT, DM) > nsblk:
            raise RuntimeError(
                "Too few samples for coherent dedispersion.  Considering increasing the number of channels."
            )

        # Adjust the time for the padding used for coherent dedispersion
        print("MJD shifted by %.3f ms to account for padding" %
              (nsblk * LFFT / srate * 1000.0, ))
        beginDate = idf.get_info('start_time') + nsblk * LFFT / srate
        beginTime = beginDate.datetime
        mjd = beginDate.mjd

        # Create the output PSRFITS file(s)
        pfu_out = []
        for t in range(1, 2 + 1):
            ## Basic structure and bounds
            pfo = pfu.psrfits()
            pfo.basefilename = "%s_b%it%i" % (args.output, beam, t)
            pfo.filenum = 0
            pfo.tot_rows = pfo.N = pfo.T = pfo.status = pfo.multifile = 0
            pfo.rows_per_file = 32768

            ## Frequency, bandwidth, and channels
            if t == 1:
                pfo.hdr.fctr = central_freq1 / 1e6
            else:
                pfo.hdr.fctr = central_freq2 / 1e6
            pfo.hdr.BW = srate / 1e6
            pfo.hdr.nchan = LFFT
            pfo.hdr.df = srate / 1e6 / LFFT
            pfo.hdr.dt = LFFT / srate

            ## Metadata about the observation/observatory/pulsar
            pfo.hdr.observer = "writePsrfits2Multi.py"
            pfo.hdr.source = args.source
            pfo.hdr.fd_hand = 1
            pfo.hdr.nbits = 4 if args.four_bit_data else 8
            pfo.hdr.nsblk = nsblk
            pfo.hdr.ds_freq_fact = 1
            pfo.hdr.ds_time_fact = 1
            pfo.hdr.npol = nPols
            pfo.hdr.summed_polns = 1 if (not args.no_summing) else 0
            pfo.hdr.obs_mode = "SEARCH"
            pfo.hdr.telescope = "LWA"
            pfo.hdr.frontend = "LWA"
            pfo.hdr.backend = "DRX"
            pfo.hdr.project_id = "Pulsar"
            pfo.hdr.ra_str = args.ra
            pfo.hdr.dec_str = args.dec
            pfo.hdr.poln_type = "LIN" if not args.circular else "CIRC"
            pfo.hdr.poln_order = polNames
            pfo.hdr.date_obs = str(beginTime.strftime("%Y-%m-%dT%H:%M:%S"))
            pfo.hdr.MJD_epoch = pfu.get_ld(mjd)

            ## Coherent dedispersion information
            pfo.hdr.chan_dm = DM

            ## Setup the subintegration structure
            pfo.sub.tsubint = pfo.hdr.dt * pfo.hdr.nsblk
            pfo.sub.bytes_per_subint = pfo.hdr.nchan * pfo.hdr.npol * pfo.hdr.nsblk * pfo.hdr.nbits // 8
            pfo.sub.dat_freqs = pfu.malloc_doublep(
                pfo.hdr.nchan * 8)  # 8-bytes per double @ LFFT channels
            pfo.sub.dat_weights = pfu.malloc_floatp(
                pfo.hdr.nchan * 4)  # 4-bytes per float @ LFFT channels
            pfo.sub.dat_offsets = pfu.malloc_floatp(
                pfo.hdr.nchan * pfo.hdr.npol *
                4)  # 4-bytes per float @ LFFT channels per pol.
            pfo.sub.dat_scales = pfu.malloc_floatp(
                pfo.hdr.nchan * pfo.hdr.npol *
                4)  # 4-bytes per float @ LFFT channels per pol.
            if args.four_bit_data:
                pfo.sub.data = pfu.malloc_ucharp(
                    pfo.hdr.nchan * pfo.hdr.npol * pfo.hdr.nsblk
                )  # 1-byte per unsigned char @ (LFFT channels x pols. x nsblk sub-integrations) samples
                pfo.sub.rawdata = pfu.malloc_ucharp(
                    pfo.hdr.nchan * pfo.hdr.npol * pfo.hdr.nsblk // 2
                )  # 4-bits per nibble @ (LFFT channels x pols. x nsblk sub-integrations) samples
            else:
                pfo.sub.rawdata = pfu.malloc_ucharp(
                    pfo.hdr.nchan * pfo.hdr.npol * pfo.hdr.nsblk
                )  # 1-byte per unsigned char @ (LFFT channels x pols. x nsblk sub-integrations) samples

            ## Create and save it for later use
            pfu.psrfits_create(pfo)
            pfu_out.append(pfo)

        freqBaseMHz = numpy.fft.fftshift(numpy.fft.fftfreq(
            LFFT, d=1.0 / srate)) / 1e6
        for i in range(len(pfu_out)):
            # Define the frequencies available in the file (in MHz)
            pfu.convert2_double_array(pfu_out[i].sub.dat_freqs,
                                      freqBaseMHz + pfu_out[i].hdr.fctr, LFFT)

            # Define which part of the spectra are good (1) or bad (0).  All channels
            # are good except for the two outermost.
            pfu.convert2_float_array(pfu_out[i].sub.dat_weights,
                                     numpy.ones(LFFT), LFFT)
            pfu.set_float_value(pfu_out[i].sub.dat_weights, 0, 0)
            pfu.set_float_value(pfu_out[i].sub.dat_weights, LFFT - 1, 0)

            # Define the data scaling (default is a scale of one and an offset of zero)
            pfu.convert2_float_array(pfu_out[i].sub.dat_offsets,
                                     numpy.zeros(LFFT * nPols), LFFT * nPols)
            pfu.convert2_float_array(pfu_out[i].sub.dat_scales,
                                     numpy.ones(LFFT * nPols), LFFT * nPols)

        # Speed things along, the data need to be processed in units of 'nsblk'.
        # Find out how many frames per tuning/polarization that corresponds to.
        chunkSize = nsblk * LFFT // 4096
        chunkTime = LFFT / srate * nsblk

        # Frequency arrays for use with the phase rotator
        freq1 = central_freq1 + numpy.fft.fftshift(
            numpy.fft.fftfreq(LFFT, d=1.0 / srate))
        freq2 = central_freq2 + numpy.fft.fftshift(
            numpy.fft.fftfreq(LFFT, d=1.0 / srate))

        # Calculate the SK limites for weighting
        if (not args.no_sk_flagging):
            skLimits = kurtosis.get_limits(4.0, 1.0 * nsblk)

            GenerateMask = lambda x: ComputeSKMask(x, skLimits[0], skLimits[1])
        else:

            def GenerateMask(x):
                flag = numpy.ones((4, LFFT), dtype=numpy.float32)
                flag[:, 0] = 0.0
                flag[:, -1] = 0.0
                return flag

        # Create the progress bar so that we can keep up with the conversion.
        pbar = progress.ProgressBarPlus(max=siCountMax, span=52)

        # Pre-read the first frame so that we have something to pad with, if needed
        if sampleOffset != 0:
            # Pre-read the first frame
            readT, t, dataPrev = idf.read(4096 / srate)

        # Go!
        rdr = threading.Thread(target=reader,
                               args=(idf, chunkTime, readerQ),
                               kwargs={'core': 0})
        rdr.setDaemon(True)
        rdr.start()

        # Unpack - Previous data
        incoming = getFromQueue(readerQ)
        siCount, t, rawdata = incoming
        rawSpectraPrev = PulsarEngineRaw(rawdata, LFFT)

        # Unpack - Current data
        incoming = getFromQueue(readerQ)
        siCount, t, rawdata = incoming
        rawSpectra = PulsarEngineRaw(rawdata, LFFT)

        # Main Loop
        incoming = getFromQueue(readerQ)
        while incoming[0] is not None:
            ## Unpack
            siCount, t, rawdata = incoming

            ## Check to see where we are
            if siCount > siCountMax:
                ### Looks like we are done, allow the reader to finish
                incoming = getFromQueue(readerQ)
                continue

            ## Apply the sample offset
            if sampleOffset != 0:
                try:
                    dataComb[:, :4096] = dataPrev
                except NameError:
                    dataComb = numpy.zeros(
                        (rawdata.shape[0], rawdata.shape[1] + 4096),
                        dtype=rawdata.dtype)
                    dataComb[:, :4096] = dataPrev
                dataComb[:, 4096:] = rawdata
                dataPrev = dataComb[:, -4096:]
                rawdata[...] = dataComb[:, sampleOffset:sampleOffset +
                                        4096 * chunkSize]

            ## FFT
            try:
                rawSpectraNext = PulsarEngineRaw(rawdata, LFFT, rawSpectraNext)
            except NameError:
                rawSpectraNext = PulsarEngineRaw(rawdata, LFFT)

            ## Apply the sub-sample offset as a phase rotation
            if tickOffset != 0:
                PhaseRotator(rawSpectra, freq1, freq2, tickOffset / fS,
                             rawSpectra)

            ## S-K flagging
            flag = GenerateMask(rawSpectra)
            weight1 = numpy.where(flag[:2, :].sum(axis=0) == 0, 0,
                                  1).astype(numpy.float32)
            weight2 = numpy.where(flag[2:, :].sum(axis=0) == 0, 0,
                                  1).astype(numpy.float32)
            ff1 = 1.0 * (LFFT - weight1.sum()) / LFFT
            ff2 = 1.0 * (LFFT - weight2.sum()) / LFFT

            ## Dedisperse
            try:
                rawSpectraDedispersed = MultiChannelCD(
                    rawSpectra, spectraFreq1, spectraFreq2, 1.0 * srate / LFFT,
                    DM, rawSpectraPrev, rawSpectraNext, rawSpectraDedispersed)
            except NameError:
                rawSpectraDedispersed = MultiChannelCD(
                    rawSpectra, spectraFreq1, spectraFreq2, 1.0 * srate / LFFT,
                    DM, rawSpectraPrev, rawSpectraNext)

            ## Update the state variables used to get the CD process continuous
            rawSpectraPrev[...] = rawSpectra
            rawSpectra[...] = rawSpectraNext

            ## Detect power
            try:
                redData = reduceEngine(rawSpectraDedispersed, redData)
            except NameError:
                redData = reduceEngine(rawSpectraDedispersed)

            ## Optimal data scaling
            try:
                bzero, bscale, bdata = OptimizeDataLevels(
                    redData, LFFT, bzero, bscale, bdata)
            except NameError:
                bzero, bscale, bdata = OptimizeDataLevels(redData, LFFT)

            ## Polarization mangling
            bzero1 = bzero[:nPols, :].T.ravel()
            bzero2 = bzero[nPols:, :].T.ravel()
            bscale1 = bscale[:nPols, :].T.ravel()
            bscale2 = bscale[nPols:, :].T.ravel()
            bdata1 = bdata[:nPols, :].T.ravel()
            bdata2 = bdata[nPols:, :].T.ravel()

            ## Write the spectra to the PSRFITS files
            for j, sp, bz, bs, wt in zip(range(2), (bdata1, bdata2),
                                         (bzero1, bzero2), (bscale1, bscale2),
                                         (weight1, weight2)):
                ## Time
                pfu_out[j].sub.offs = (pfu_out[j].tot_rows) * pfu_out[
                    j].hdr.nsblk * pfu_out[j].hdr.dt + pfu_out[
                        j].hdr.nsblk * pfu_out[j].hdr.dt / 2.0

                ## Data
                ptr, junk = sp.__array_interface__['data']
                if args.four_bit_data:
                    ctypes.memmove(
                        int(pfu_out[j].sub.data), ptr,
                        pfu_out[j].hdr.nchan * nPols * pfu_out[j].hdr.nsblk)
                else:
                    ctypes.memmove(
                        int(pfu_out[j].sub.rawdata), ptr,
                        pfu_out[j].hdr.nchan * nPols * pfu_out[j].hdr.nsblk)

                ## Zero point
                ptr, junk = bz.__array_interface__['data']
                ctypes.memmove(int(pfu_out[j].sub.dat_offsets), ptr,
                               pfu_out[j].hdr.nchan * nPols * 4)

                ## Scale factor
                ptr, junk = bs.__array_interface__['data']
                ctypes.memmove(int(pfu_out[j].sub.dat_scales), ptr,
                               pfu_out[j].hdr.nchan * nPols * 4)

                ## SK
                ptr, junk = wt.__array_interface__['data']
                ctypes.memmove(int(pfu_out[j].sub.dat_weights), ptr,
                               pfu_out[j].hdr.nchan * 4)

                ## Save
                pfu.psrfits_write_subint(pfu_out[j])

            ## Update the progress bar and remaining time estimate
            pbar.inc()
            sys.stdout.write('%5.1f%% %5.1f%% %s %2i\r' %
                             (ff1 * 100, ff2 * 100, pbar.show(), len(readerQ)))
            sys.stdout.flush()

            ## Fetch another one
            incoming = getFromQueue(readerQ)

        rdr.join()
        if sampleOffset != 0:
            del dataComb
        del rawSpectra
        del redData
        del bzero
        del bscale
        del bdata

        # Update the progress bar with the total time used but only if we have
        # reached the end of the file
        if incoming[1]:
            pbar.amount = pbar.max
        sys.stdout.write('              %s %2i\n' %
                         (pbar.show(), len(readerQ)))
        sys.stdout.flush()

        # And close out the files
        for pfo in pfu_out:
            pfu.psrfits_close(pfo)
コード例 #45
0
 def n2c_array(a,aa):
     ##  aa should already have been allocated!
     memmove(aa,a.ctypes.data,4*a.size)
     return aa
コード例 #46
0
ファイル: loader.py プロジェクト: pwaller/PeachPy
 def copy_data(self, data_segment):
     import ctypes
     ctypes.memmove(self.data_address, ctypes.c_char_p(bytes(data_segment)),
                    len(data_segment))
コード例 #47
0
    def __Parse__(self, name = None):
        if name == None:
            print "Need to filename"
            return None

        f = open(name,'rb')
        buff= f.read()
        f.close()

        self.filesize = len(buff)

        _DosHdr_size = ctypes.sizeof(IMAGE_DOS_HEADER)
        _FileHdr_size = ctypes.sizeof(IMAGE_FILE_HEADER)
        _Opt32Hdr_size = ctypes.sizeof(IMAGE_OPTIONAL_HEADER32)
        _Opt64Hdr_size = ctypes.sizeof(IMAGE_OPTIONAL_HEADER64)

        _Dos_hdr = IMAGE_DOS_HEADER()
        _File_hdr = IMAGE_FILE_HEADER()
        _Opt32_hdr = IMAGE_OPTIONAL_HEADER32()
        _Opt64_hdr = IMAGE_OPTIONAL_HEADER64()

        #Read Dos Header
        ctypes.memmove(ctypes.addressof(_Dos_hdr),buff, _DosHdr_size)
        self.__Convert__(IMAGE_DOS_HEADER(), _Dos_hdr, 1)

        if self.DOS_HEADER['e_magic'] != self.__dos_sig__:
            print 'dos signature not a match'
            return None


        off = self.DOS_HEADER['e_lfanew']

        if off >= self.filesize :
            print 'malformed PE Structure'
            return None

        #temp = struct.unpack( '<L',buff[off:off+4] )[0]
        temp = self.__GetDword__(buff,off)
        if temp != self.__nt_sig__:
            print 'PE signature not a match'
            return None

        off += 4
        ctypes.memmove(ctypes.addressof(_File_hdr), buff[off:], _FileHdr_size)
        self.__Convert__(IMAGE_FILE_HEADER(), _File_hdr, 2)

        #offset to Optional header
        off += _FileHdr_size

        magic = self.__GetWord__(buff, off)
        if magic == self.__opt_x32__:
            self.Is32Bit = True
        elif magic == self.__opt_x64__:
            self.Is32Bit = False


        if self.Is32Bit == True:
            ctypes.memmove(ctypes.addressof(_Opt32_hdr), buff[off:], _Opt32Hdr_size)
            self.__Convert__(IMAGE_OPTIONAL_HEADER32(), _Opt32_hdr, 3)
            Imsi = off + _Opt32Hdr_size
        else:
            ctypes.memmove(ctypes.addressof(_Opt64_hdr), buff[off:], _Opt64Hdr_size)
            self.__Convert__(IMAGE_OPTIONAL_HEADER64(), _Opt64_hdr, 4)
            Imsi = off + _Opt64Hdr_size


        for i in xrange(0, self.OPTIONAL_HEADER['NumberOfRvaAndSizes']):
            self.__DATA_DIRECTORY_Struct__['VirtualAddress'] = self.__GetDword__(buff, Imsi)
            self.__DATA_DIRECTORY_Struct__['Size'] = self.__GetDword__(buff, Imsi+4)
            self.DATA_DIRECTORY.append(self.__DATA_DIRECTORY_Struct__.copy())  #do not ref copy
            Imsi += 8

        off = off + self.FILE_HEADER['SizeOfOptionalHeader']

        for i in xrange(0, self.FILE_HEADER['NumberOfSections']):
            self.__SECTION_HEADER_Struct__['Name'] = buff[off:off+8]
            self.__SECTION_HEADER_Struct__['VirtualSize'] = self.__GetDword__(buff, off+8)
            self.__SECTION_HEADER_Struct__['RVA'] = self.__GetDword__(buff, off+12)
            self.__SECTION_HEADER_Struct__['SizeOfRawData'] = self.__GetDword__(buff, off+16)
            self.__SECTION_HEADER_Struct__['PointerToRawData'] = self.__GetDword__(buff, off+20)
            self.__SECTION_HEADER_Struct__['PointerToRelocations'] = self.__GetDword__(buff, off+24)
            self.__SECTION_HEADER_Struct__['PointerToLinenumbers'] = self.__GetDword__(buff, off+28)
            self.__SECTION_HEADER_Struct__['NumberOfRelocations'] = self.__GetWord__(buff, off+32)
            self.__SECTION_HEADER_Struct__['NumberOfLinenumbers'] = self.__GetWord__(buff, off+34)
            self.__SECTION_HEADER_Struct__['Characteristics'] = self.__GetDword__(buff, off+36)
            self.SECTION_HEADER.append(self.__SECTION_HEADER_Struct__.copy())
            off += 40

        return
コード例 #48
0
def unpackStructure(s, b):
    result = s()
    ctypes.memmove(ctypes.addressof(result), b, ctypes.sizeof(s))
    return result
コード例 #49
0
def ImageGrab():
    class BITMAPFILEHEADER(ctypes.Structure):
        _pack_ = 1  # structure field byte alignment
        _fields_ = [
            ('bfType', WORD),  # file type ("BM")
            ('bfSize', DWORD),  # file size in bytes
            ('bfReserved1', WORD),  # must be zero
            ('bfReserved2', WORD),  # must be zero
            ('bfOffBits', DWORD),  # byte offset to the pixel array
        ]

    SIZEOF_BITMAPFILEHEADER = ctypes.sizeof(BITMAPFILEHEADER)

    class BITMAPINFOHEADER(ctypes.Structure):
        _pack_ = 1  # structure field byte alignment
        _fields_ = [('biSize', DWORD), ('biWidth', LONG), ('biHeight', LONG),
                    ('biPLanes', WORD), ('biBitCount', WORD),
                    ('biCompression', DWORD), ('biSizeImage', DWORD),
                    ('biXPelsPerMeter', LONG), ('biYPelsPerMeter', LONG),
                    ('biClrUsed', DWORD), ('biClrImportant', DWORD)]

    SIZEOF_BITMAPINFOHEADER = ctypes.sizeof(BITMAPINFOHEADER)

    win32clipboard.OpenClipboard()
    try:
        if win32clipboard.IsClipboardFormatAvailable(win32clipboard.CF_DIB):
            data = win32clipboard.GetClipboardData(win32clipboard.CF_DIB)
        else:
            win32clipboard.CloseClipboard()
            # #print("!#$$$#")
            raise ('clipboard does not contain an image in DIB format')
            #
            sys.exit(1)
            #
    finally:
        try:
            win32clipboard.CloseClipboard()
        except:
            pass

    bmih = BITMAPINFOHEADER()
    ctypes.memmove(ctypes.pointer(bmih), data, SIZEOF_BITMAPINFOHEADER)

    if bmih.biCompression != BI_BITFIELDS:  # RGBA?
        #print('insupported compression type {}'.format(bmih.biCompression))
        sys.exit(1)

    bmfh = BITMAPFILEHEADER()
    ctypes.memset(ctypes.pointer(bmfh), 0,
                  SIZEOF_BITMAPFILEHEADER)  # zero structure
    bmfh.bfType = ord('B') | (ord('M') << 8)
    bmfh.bfSize = SIZEOF_BITMAPFILEHEADER + len(data)  # file size
    SIZEOF_COLORTABLE = 0
    bmfh.bfOffBits = SIZEOF_BITMAPFILEHEADER + \
        SIZEOF_BITMAPINFOHEADER + SIZEOF_COLORTABLE

    bmp_filename = 'clipboard.bmp'
    with open(bmp_filename, 'wb') as bmp_file:
        bmp_file.write(bmfh)
        bmp_file.write(data)

    #print('file "{}" created from clipboard image'.format(bmp_filename))

    return f'./{bmp_filename}'
コード例 #50
0
 def c2n_array(a,m,n=1):
     ##  scipy needs array in Fortran order
     aa = empty((m,n),dtype=float32,order='F')
     memmove(aa.ctypes.data,a,4*(m*n))
     return aa
コード例 #51
0
ファイル: clientsocket.py プロジェクト: warrior6/socketpro
 def _makeGroups_(pGroup, count):  #pGroup = POINTER(c_uint)
     groups = (c_int * count)()
     memmove(addressof(groups), pGroup, count * 4)
     return list(groups)
コード例 #52
0
 def draw_color_frame(self, frame, target_surface):
     target_surface.lock()
     address = self._kinect.surface_as_array(target_surface.get_buffer())
     ctypes.memmove(address, frame.ctypes.data, frame.size)
     del address
     target_surface.unlock()
コード例 #53
0
def on_get_pub_key(user_id, id_length, key_buffer, key_buffer_length,
                   user_data):
    real_user_id = ctypes.string_at(user_id, id_length)
    pub_key = user_data[0].get_pub_key_by_id(real_user_id)
    ctypes.memmove(key_buffer, pub_key, len(pub_key))
    return 0
コード例 #54
0
ファイル: clientsocket.py プロジェクト: warrior6/socketpro
 def _prepareBytes(message, size):
     bytes = (c_char * size)()
     memmove(addressof(bytes), message, size)
     return bytes
コード例 #55
0
def generate_ports(xml_ports, hash_engine):
    """
    Create a list of SPI flash port objects from parsed XML list

    :param xml_ports: List of parsed XML of ports to be included in PCD

    :return Ports buffer, number of ports, list of port ToC entries, list of port hashes
    """

    if xml_ports is None or len(xml_ports) < 1:
        return None, 0, None, None

    ports = []
    toc_entries = []
    hashes = []
    num_ports = len(xml_ports)
    ports_len = 0

    class pcd_port(ctypes.LittleEndianStructure):
        _pack_ = 1
        _fields_ = [('port_id', ctypes.c_ubyte),
                    ('port_flags', ctypes.c_ubyte), ('policy', ctypes.c_ubyte),
                    ('pulse_interval', ctypes.c_ubyte),
                    ('spi_frequency_hz', ctypes.c_uint)]

    for id, port in xml_ports.items():
        spi_freq = int(
            manifest_common.get_key_from_dict(port, "spi_freq", "Port"))
        reset_ctrl = int(
            manifest_common.get_key_from_dict(port, "reset_ctrl", "Port"))
        flash_mode = int(
            manifest_common.get_key_from_dict(port, "flash_mode", "Port"))
        policy = int(manifest_common.get_key_from_dict(port, "policy", "Port"))
        runtime_verification = int(
            manifest_common.get_key_from_dict(port, "runtime_verification",
                                              "Port"))
        watchdog_monitoring = int(
            manifest_common.get_key_from_dict(port, "watchdog_monitoring",
                                              "Port"))
        pulse_interval = int(
            manifest_common.get_key_from_dict(port, "pulse_interval", "Port"))

        port_flags = (watchdog_monitoring << 5) | (runtime_verification << 4) | \
            (flash_mode << 2) | reset_ctrl

        port_buf = pcd_port(int(id), port_flags, policy, pulse_interval,
                            spi_freq)
        port_toc_entry = manifest_common.manifest_toc_entry(
            manifest_common.PCD_V2_SPI_FLASH_PORT_TYPE_ID,
            manifest_common.PCD_V2_ROT_TYPE_ID, 1, 0, 0,
            ctypes.sizeof(pcd_port))
        port_hash = manifest_common.generate_hash(port_buf, hash_engine)

        ports.append(port_buf)
        toc_entries.append(port_toc_entry)
        hashes.append(port_hash)

        ports_len += ctypes.sizeof(port_buf)

    ports_buf = (ctypes.c_ubyte * ports_len)()
    offset = 0

    for port in ports:
        port_len = ctypes.sizeof(port)
        ctypes.memmove(
            ctypes.addressof(ports_buf) + offset, ctypes.addressof(port),
            port_len)

        offset += port_len

    return ports_buf, num_ports, toc_entries, hashes
コード例 #56
0
ファイル: win32structures.py プロジェクト: zwxf0112/pywinauto
def _construct(typ, buf):
    obj = typ.__new__(typ)
    memmove(addressof(obj), buf, len(buf))
    return obj
コード例 #57
0
def accessory_task():
    print("Looking for device with VID 0x%0.4x" % DEVICE_VID)
    dev = usb.core.find(idVendor=DEVICE_VID)  #find device

    if dev is None:  #if is not connected
        raise ValueError("No compatible device not found with VID 0x%0.4x" %
                         DEVICE_VID)

    print("compatible device found with VID 0x%0.4x" % DEVICE_VID)

    if dev.idProduct in ACCESSORY_PID:
        print("device is in accessory mode, PID 0x%0.4x" % dev.idProduct)

    else:
        print("device is not in accessory mode yet")
        accessory(dev)  #find device again, send accessory info to device
        dev = usb.core.find(idVendor=ACCESSORY_VID)
        if dev is None:
            print("dev is None")
            raise ValueError("No compatible device not found")

        if dev.idProduct in ACCESSORY_PID:
            print("device is in accessory mode")
        else:
            print("dev.idProduct in ACCESSORY_PID == False")
            raise ValueError(
                "it looks like device doesnt't support Accessory or app is not installed."
            )

#	dev.set_configuration()
#check for working in properly again
#even if the Android device is already in accessory mode
#setting the configuration will result in the
#UsbManager starting an "accessory connected" intent
#and hence a small delay is required before communication
#works properly

#

    print("setting up...")

    #tries = 5
    #while True:
    #try:
    #if tries <= 0:
    #raise ValueError("it looks like really failed. bye")
    #dev.set_configuration()
    #break
    #	except:
    #	print("Failed to set up. retrying...")
    #tries -= 1
    #time.sleep(1)
    print("OK")

    time.sleep(1)

    dev = usb.core.find(idVendor=ACCESSORY_VID)
    if dev is None:
        dev = usb.core.find(idVendor=DEVICE_VID)

    if dev is None:
        raise ValueError(
            "device set to accessory mode but VID %04X not found" % DEVICE_VID)

    print("Getting file descriptor")
    cfg = dev.get_active_configuration(
    )  #get file discriptor, make environment for IO
    if_num = cfg[(0, 0)].bInterfaceNumber
    #	alternate_setting = usb.control.get_interface(dev, if_num)
    #	intf = usb.util.find_descriptor(cfg, bInterfaceNumber = if_num, bAlternateSetting=alternate_setting)
    intf = usb.util.find_descriptor(cfg, bInterfaceNumber=if_num)
    #	ep_out = usb.util.find_descriptor(
    #		intf,
    #		custom_match = \
    #		lambda e: \
    #			usb.util.endpoint_direction(e.bEndpointAddress) == \
    #			usb.util.ENDPOINT_OUT
    #	)

    ep_in = usb.util.find_descriptor(
     intf,
     custom_match = \
     lambda e: \
      usb.util.endpoint_direction(e.bEndpointAddress) == \
      usb.util.ENDPOINT_IN
    )

    #	print("Starting writer thread")
    #	writer_thread = threading.Thread(target = writer, args = (ep_out, ))
    #	writer_thread.start() #writing what user want in thread
    #	endpoint = dev[0][(0,0)][0]
    #	length = -1

    memory = sysv_ipc.SharedMemory(9527)

    #ctypes.memmove(memory.address, arr_addr, arr_len)
    print("Shared Memory, reading from android")
    cnt = 0
    while True:  #read data from android
        try:
            print("before ep_in.read")
            data = ep_in.read(size=BUFF_SIZE, timeout=0)
            #			for i in range(0,30000,30):
            #				print("read value :%d"% data[i])
            #				print("read value :%d"% data[i+1])
            #				print("------------%d"% i)
            #			print("read value153599 %d" % data[153599])

            #			print data.buffer_info()[0]
            #			print data.itemsize
            #			print len(data)
            ctypes.memmove(memory.address, data.buffer_info()[0], len(data))
#			ctypes.memmove(memory.address, arr_addr, arr_len)
        except usb.core.USBError:
            print("failed to send IN transfer")
            break


#	writer_thread.join()
#	fd.close()
    print("close fifo")
    print("exiting application")
コード例 #58
0
 def copyData(self, address):
     """
     Uses the C memmove function to copy data from an address in memory
     into memory allocated for the numpy array of this object.
     """
     ctypes.memmove(self.np_array.ctypes.data, address, self.size)
コード例 #59
0
ファイル: bad_connect.py プロジェクト: yiyuanliu/daos
    def test_connect(self):
        """
        Pass bad parameters to pool connect

        :avocado: tags=all,pool,full_regression,tiny,badconnect
        """
        # Accumulate a list of pass/fail indicators representing what is
        # expected for each parameter then "and" them to determine the
        # expected result of the test
        expected_for_param = []

        modelist = self.params.get("mode", '/run/connecttests/connectmode/*/')
        connectmode = modelist[0]
        expected_for_param.append(modelist[1])

        setlist = self.params.get("setname",
                                  '/run/connecttests/connectsetnames/*/')
        connectset = setlist[0]
        expected_for_param.append(setlist[1])

        uuidlist = self.params.get("uuid", '/run/connecttests/UUID/*/')
        connectuuid = uuidlist[0]
        expected_for_param.append(uuidlist[1])

        # if any parameter is FAIL then the test should FAIL, in this test
        # virtually everyone should FAIL since we are testing bad parameters
        expected_result = 'PASS'
        for result in expected_for_param:
            if result == 'FAIL':
                expected_result = 'FAIL'
                break

        puuid = (ctypes.c_ubyte * 16)()
        pgroup = ctypes.create_string_buffer(0)
        # initialize a python pool object then create the underlying
        # daos storage
        self.pool = TestPool(self.context, self.get_dmg_command())
        self.pool.get_params(self)
        self.pool.create()
        # save this uuid since we might trash it as part of the test
        ctypes.memmove(puuid, self.pool.pool.uuid, 16)

        # trash the pool group value
        pgroup = self.pool.pool.group
        if connectset == 'NULLPTR':
            self.pool.pool.group = None

        # trash the UUID value in various ways
        if connectuuid == 'NULLPTR':
            self.pool.pool.uuid = None
        if connectuuid == 'JUNK':
            self.pool.pool.uuid[4] = 244

        try:
            self.pool.connect(1 << connectmode)

            if expected_result in ['FAIL']:
                self.fail("Test was expected to fail but it passed.\n")

        except TestFail as excep:
            print(excep)
            print(traceback.format_exc())
            if expected_result in ['PASS']:
                self.fail("Test was expected to pass but it failed.\n")

        # cleanup the pool
        finally:
            if self.pool is not None and self.pool.pool.attached == 1:
                # restore values in case we trashed them during test
                self.pool.pool.group = pgroup
                if self.pool.pool.uuid is None:
                    self.pool.pool.uuid = (ctypes.c_ubyte * 16)()
                ctypes.memmove(self.pool.pool.uuid, puuid, 16)
                print("pool uuid after restore {}".format(
                    self.pool.pool.get_uuid_str()))
コード例 #60
0
        pcd_len += ctypes.sizeof(ports)
        elements_list.append(ports)
        toc_list.extend(ports_toc_entries)
        hash_list.extend(ports_hash)

toc = manifest_common.generate_toc(hash_engine, hash_type, toc_list, hash_list)
toc_len = ctypes.sizeof(toc)
pcd_len += toc_len

manifest_header.length = pcd_len + manifest_header.sig_length

pcd_buf = (ctypes.c_ubyte * pcd_len)()
offset = 0

ctypes.memmove(
    ctypes.addressof(pcd_buf) + offset, ctypes.addressof(manifest_header),
    manifest_header_len)
offset += manifest_header_len

ctypes.memmove(
    ctypes.addressof(pcd_buf) + offset, ctypes.addressof(toc), toc_len)
offset += toc_len

for element in elements_list:
    element_len = ctypes.sizeof(element)
    ctypes.memmove(
        ctypes.addressof(pcd_buf) + offset, ctypes.addressof(element),
        element_len)

    offset += element_len