コード例 #1
0
ファイル: info.py プロジェクト: carriercomm/TorrentInfo
    def _get_udp_connection(self, host, port):
        # build connection request payload
        transaction_id = int(randrange(0, 255))
        buff = struct.pack('!q', CONNECTION_ID)
        buff += struct.pack('!i', ACTION_CONNECT)
        buff += struct.pack('!i', transaction_id)

        # send payload and get response
        self._socket.sendto(buff, (host, port))
        try:
            response = self._socket.recv(2048)
        except socket.timeout:
            # TODO: issue warning here
            print "tracker down: %s" % host
            return None
        if len(response) < 16:
            # TODO: issue warning here
            print "wrong response length"
            return None

        # extract response information
        resp_action, resp_transaction_id = struct.unpack_from('!ii', response, 0)
        if transaction_id != resp_transaction_id:
            # TODO: issue warning instead
            raise ValueError('Transaction IDs do not match (req=%d resp=%d)' % (transaction_id, resp_transaction_id))
        if resp_action == ACTION_ERROR:
            error = struct.unpack_from('!s', response, 8)[0]
            # TODO: issue warning instead
            raise RuntimeError('Unable to setup a connection: %s' % error)

        elif resp_action == ACTION_CONNECT:
            connection_id = struct.unpack_from('!q', response, 8)[0]
            return connection_id
        return None
コード例 #2
0
ファイル: adcpt_m_wvs.py プロジェクト: mcworden/mi-dataset-1
    def _parse_hpr_time_series(self, offset, rules):
        """
        Convert the binary data into particle data for the Heading, Pitch, Time Series Data Type
        """
        # Unpack the unpacking rules
        (hpr_num_name, beam_angle_name, spare_name, hpr_time_names),\
        (hpr_num_fmt, beam_angle_fmt, spare_fmt, hpr_time_fmt) = zip(*rules)

        # First unpack the array length and single length value, no need to unpack spare
        (hpr_num_data, beam_angle_data) = struct.unpack_from(
            '<%s%s' % (hpr_num_fmt, beam_angle_fmt), self.raw_data, offset)

        # Then unpack the array using the retrieved lengths value
        next_offset = offset + struct.calcsize(hpr_num_fmt) + struct.calcsize(beam_angle_fmt) + \
                      struct.calcsize(spare_fmt)
        hpr_time_list_data = struct.unpack_from(
            '<%s%s' % (hpr_num_data * HPR_TIME_SERIES_ARRAY_SIZE, hpr_time_fmt), self.raw_data, next_offset)

        # convert to numpy array and reshape the data to a 2d array per IDD spec
        transformed_hpr_time_data = numpy.array(hpr_time_list_data).reshape(
            (hpr_num_data, HPR_TIME_SERIES_ARRAY_SIZE)).transpose().tolist()

        # Add to the collected parameter data
        self.final_result.extend(
            ({DataParticleKey.VALUE_ID: hpr_num_name, DataParticleKey.VALUE: hpr_num_data},
             {DataParticleKey.VALUE_ID: beam_angle_name, DataParticleKey.VALUE: beam_angle_data},
             {DataParticleKey.VALUE_ID: hpr_time_names[HEADING_TIME_SERIES_IDX],
              DataParticleKey.VALUE: transformed_hpr_time_data[HEADING_TIME_SERIES_IDX]},
             {DataParticleKey.VALUE_ID: hpr_time_names[PITCH_TIME_SERIES_IDX],
              DataParticleKey.VALUE: transformed_hpr_time_data[PITCH_TIME_SERIES_IDX]},
             {DataParticleKey.VALUE_ID: hpr_time_names[ROLL_TIME_SERIES_IDX],
              DataParticleKey.VALUE: transformed_hpr_time_data[ROLL_TIME_SERIES_IDX]}))
コード例 #3
0
ファイル: annotations.py プロジェクト: lukegb/Jawa
    def unpack(self, info):
        self._tag, = unpack_from('>c', info)
        skip = 1

        if self._tag in b'BCDFIJSZs':
            self._value, = unpack_from('>H', info[skip:])
            skip += 2
        elif self._tag == b'e':
            self._value = unpack_from('>HH', info[skip:])
            skip += 4
        elif self._tag == b'c':
            self._value, = unpack_from('>H', info[skip:])
            skip += 2
        elif self._tag == b'@':
            annotation = RuntimeVisibleAnnotation(self._cf)
            skip += annotation.unpack(info[skip:])
            self._value = annotation
        elif self._tag == b'[':
            num_values, = unpack_from('>H', info[skip:])
            skip += 2
            values = []
            for n in range(num_values):
                value = ElementValue(self._cf)
                skip += value.unpack(info[skip:])
                values.append(value)
            self._value = values
        else:
            raise ValueError("Unknown ElementValue tag {}".format(self._tag))

        return skip
コード例 #4
0
def script_GetOp(bytes):
    i = 0
    while i < len(bytes):
        vch = None
        opcode = ord(bytes[i])
        i += 1
        if opcode >= opcodes.OP_SINGLEBYTE_END:
            opcode <<= 8
            opcode |= ord(bytes[i])
            i += 1

        if opcode <= opcodes.OP_PUSHDATA4:
            nSize = opcode
            if opcode == opcodes.OP_PUSHDATA1:
                nSize = ord(bytes[i])
                i += 1
            elif opcode == opcodes.OP_PUSHDATA2:
                (nSize,) = struct.unpack_from('<H', bytes, i)
                i += 2
            elif opcode == opcodes.OP_PUSHDATA4:
                (nSize,) = struct.unpack_from('<I', bytes, i)
                i += 4
            vch = bytes[i:i+nSize]
            i += nSize

        yield (opcode, vch, i)
コード例 #5
0
ファイル: windows.py プロジェクト: kartikeyap/grr
  def Run(self, args):
    """Run."""
    # This action might crash the box so we need to flush the transaction log.
    self.SyncTransactionLog()

    # Do any initialization we need to do.
    logging.debug("Querying device %s", args.path)

    fd = win32file.CreateFile(
        args.path,
        win32file.GENERIC_READ | win32file.GENERIC_WRITE,
        win32file.FILE_SHARE_READ | win32file.FILE_SHARE_WRITE,
        None,
        win32file.OPEN_EXISTING,
        win32file.FILE_ATTRIBUTE_NORMAL,
        None)

    data = win32file.DeviceIoControl(fd, INFO_IOCTRL, "", 1024, None)
    fmt_string = "QQl"
    cr3, _, number_of_runs = struct.unpack_from(fmt_string, data)

    result = rdfvalue.MemoryInformation(
        cr3=cr3,
        device=rdfvalue.PathSpec(
            path=args.path,
            pathtype=rdfvalue.PathSpec.PathType.MEMORY))

    offset = struct.calcsize(fmt_string)
    for x in range(number_of_runs):
      start, length = struct.unpack_from("QQ", data, x * 16 + offset)
      result.runs.Append(offset=start, length=length)

    self.SendReply(result)
コード例 #6
0
    def load_label_data_set(self, label_data_dir):
        logging.info("load label set from {0}.".format(label_data_dir))

        with open(label_data_dir, "rb") as binary_file_handle:
            label_data_buffer = binary_file_handle.read()

        head = struct.unpack_from('>II' , label_data_buffer ,0)
        logging.info("head:{0}".format(head))

        label_num = head[1]
        logging.info("img_num:{0}".format(label_num))

        offset = struct.calcsize('>II')
        logging.info("offset:{0}".format(offset))

        img_num_string='>'+str(label_num)+"B"
        logging.info("img_num_string:{0}".format(img_num_string))

        all_label_2d_ndarray = struct.unpack_from(img_num_string, label_data_buffer, offset)
        all_label_2d_ndarray = np.reshape(all_label_2d_ndarray, [label_num, 1])
        logging.info("len(all_label_2d_ndarray):{0}".format(len(all_label_2d_ndarray)))
        logging.info("type(all_label_2d_ndarray):{0}".format(type(all_label_2d_ndarray)))

        logging.info("all_label_2d_ndarray[0]:{0}".format(all_label_2d_ndarray[0]))
        logging.info("type(all_label_2d_ndarray[0]):{0}".format(type(all_label_2d_ndarray[0])))

        logging.info("all_label_2d_ndarray[0][0]:{0}".format(all_label_2d_ndarray[0][0]))
        logging.info("type(all_label_2d_ndarray[0][0]):{0}".format(type(all_label_2d_ndarray[0][0])))

        logging.info("Load label finished.")
        return all_label_2d_ndarray
コード例 #7
0
ファイル: extfs.py プロジェクト: leyyer/exfsh
 def __init__(self, idx, buf):
     self.inode_no = idx
     sb = buffer(bytearray(buf))
     sz = 0
     fmt = "<2H5I2H3I"
     (self.i_mode,
      self.i_uid,
      self.i_size,
      self.i_atime,
      self.i_ctime,
      self.i_mtime,
      self.i_dtime,
      self.i_gid,
      self.i_links_count,
      self.i_blocks,
      self.i_flags,
      self.i_osd1) = struct.unpack_from(fmt, sb, sz)
     sz += struct.calcsize(fmt)
     fmt = "<15I"
     self.i_block = struct.unpack_from(fmt, sb, sz)
     sz += struct.calcsize(fmt)
     fmt = "<4I12s"
     (self.i_gneration,
      self.i_file_acl,
      self.i_dir_acl,
      self.i_faddr,
      self.i_osd2) = struct.unpack_from(fmt, sb, sz)
コード例 #8
0
ファイル: containers.py プロジェクト: j-howell/calibre
 def __init__(self, data):
     self.ident = data[:4]
     self.record_size, self.type, self.count, self.encoding = unpack_from(b'>IHHI', data, 4)
     self.encoding = {
             1252 : 'cp1252',
             65001: 'utf-8',
         }.get(self.encoding, repr(self.encoding))
     rest = list(unpack_from(b'>IIIIIIII', data, 16))
     self.num_of_resource_records = rest[2]
     self.num_of_non_dummy_resource_records = rest[3]
     self.offset_to_href_record = rest[4]
     self.unknowns1 = rest[:2]
     self.unknowns2 = rest[5]
     self.header_length = rest[6]
     self.title_length = rest[7]
     self.resources = []
     self.hrefs = []
     if data[48:52] == b'EXTH':
         self.exth = EXTHHeader(data[48:])
         self.title = data[48 + self.exth.length:][:self.title_length].decode(self.encoding)
         self.is_image_container = self.exth[539] == 'application/image'
     else:
         self.exth = ' No EXTH header present '
         self.title = ''
         self.is_image_container = False
     self.bytes_after_exth = data[self.header_length + self.title_length:]
     self.null_bytes_after_exth = len(self.bytes_after_exth) - len(self.bytes_after_exth.replace(b'\0', b''))
コード例 #9
0
def script_GetOp(bytes):
    i = 0
    while i < len(bytes):
        vch = None
        opcode = ord(bytes[i])
        i += 1

        if opcode <= opcodes.OP_PUSHDATA4:
            nSize = opcode
            if opcode == opcodes.OP_PUSHDATA1:
                nSize = ord(bytes[i])
                i += 1
            elif opcode == opcodes.OP_PUSHDATA2:
                (nSize,) = struct.unpack_from('<H', bytes, i)
                i += 2
            elif opcode == opcodes.OP_PUSHDATA4:
                (nSize,) = struct.unpack_from('<I', bytes, i)
                i += 4
            if i+nSize > len(bytes):
              vch = "_INVALID_"+bytes[i:]
              i = len(bytes)
            else:
             vch = bytes[i:i+nSize]
             i += nSize

        yield (opcode, vch, i)
コード例 #10
0
ファイル: BodyBugg.py プロジェクト: greenthrall/BB_Sync_Test
def ReadRecord(d, offset=0x0):
    id = d[0]
    d=d[1:] # Eat id
    if id == 0xff or id == 0x4: # Normal end of Data
        return id, None, None
    sztotal = 1 
    assert RecPack.has_key(id), "Unknown record ID %i at offset %i" % (id, offset)
    if RecRepeat.has_key(id):
        sz = struct.calcsize(RecPack[id])
        init=struct.unpack_from(RecRepeat[id][1], d)
        szinit=struct.calcsize(RecRepeat[id][1])
        d=d[szinit:]
        sztotal += szinit
        res=[]
        for i in range(0, RecRepeat[id][0]):
            res.append(struct.unpack_from(RecPack[id], d))
            d=d[sz:]
            sztotal += sz
    elif type(RecPack[id]) == str:
        sz = struct.calcsize(RecPack[id])
        res = struct.unpack_from(RecPack[id], d)
        sztotal += sz
    elif type(RecPack[id]) == int: # 12-bit field array
        # A padding byte 0xFF may be present
        sz = RecPack[id] - 1
        res = ReadPacked12Bit(d[:sz])
        sztotal += sz
    return id, sztotal, res
コード例 #11
0
ファイル: __init__.py プロジェクト: dansteingart/dm4reader
def _read_tag_data_info(dmfile):
    tag_array_length = struct.unpack_from('>Q', dmfile.read(8))[0] #DM4 specifies this property as always big endian
    format_str = '>' + tag_array_length * 'q' #Big endian signed long
     
    tag_array_types = struct.unpack_from(format_str, dmfile.read(8*tag_array_length))
    
    return (tag_array_length,  tag_array_types)
コード例 #12
0
ファイル: dbparse.py プロジェクト: chudooder/osutools
def parseString(db, offset):
    existence = unpack_from('b', db, offset)[0]
    if existence == 0x00:
        return ("", offset+1)
    elif existence == 0x0b:
        # decode ULEB128
        length = 0
        shift = 0
        offset += 1
        while True:
            val = unpack_from('B', db, offset)[0]
            length |= ((val & 0x7F) << shift)
            offset += 1
            if (val & (1 << 7)) == 0:
                break
            shift += 7

        string = unpack_from(str(length)+'s', db, offset)[0]
        offset += length

        unic = u''
        try:
            unic = unicode(string, 'utf-8')
        except UnicodeDecodeError:
            print "Could not parse UTF-8 string, returning empty string."

        return (unic, offset)
コード例 #13
0
 def sec_info(self, secnum):
     start_offset, flgval = struct.unpack_from('>2L', self.datain, 78+(secnum*8))
     if secnum == self.num_sections:
         next_offset = len(self.datain)
     else:
         next_offset, nflgval = struct.unpack_from('>2L', self.datain, 78+((secnum+1)*8))
     return start_offset, flgval, next_offset
コード例 #14
0
ファイル: l2tpclient.py プロジェクト: ut0mt8/l2tpclient
    def parse_resp(self,buf):

        header_offset = 2
        avp_offset = 8
        nr = 0
        ns = 0

        # read the header
        (cflag,) = struct.unpack_from('!H', buf)

        cflag_bin = int2bin(cflag,16)
        ptype = cflag_bin[0]
        blen = cflag_bin[1]
        sbit = cflag_bin[4]
        obit = cflag_bin[6]
        pbit = cflag_bin[7]
        ver  = cflag_bin[12:16]

        if self.debug:
            print "<- l2tp packet dump"
            print "<-: l2tp cflag bits : %s|%s|%s|%s|%s|%s" % (ptype, blen, sbit, obit, pbit, ver)

        if ver != '0010': #
            print '!! Not an valid l2tp packet : discarding'
            return None

        if blen == '1':
            (plen,) = struct.unpack_from('!H', buf, offset=header_offset)
            if self.debug:
                print "<-: l2tp length : %d" % plen
            header_offset += 2

        (tid, sid) = struct.unpack_from('!HH', buf, offset=header_offset)
        if self.debug:
            print "<-: l2tp tunnel_id : %d, session_id : %d" % (tid, sid)
        header_offset += 4

        if sbit == '1':
            (ns, nr) = struct.unpack_from('!HH', buf, offset=header_offset)
            if self.debug:
                print "<-: l2tp ns : %d, nr : %d" % (ns, nr)
            header_offset += 4
            avp_offset += 4

        if obit == '1':
            (offset_size, offset_pad) = struct.unpack_from('!HH', buf, offset=header_offset)
            if self.debug:
                print "<-: l2tp offset_size : %d, offset_pad : %d" % (offset_size, offset_pad)
            header_offset += 4
            avp_offset += 4

        if ptype == '0': # data packet
            # write to pppd
            data = buf[header_offset:]
            try:
                async_buf = self.pppd_sync_to_async(data)
                pty._writen(self.pppd_fd, async_buf)
            except OSError, se:
                if se.args[0] not in (errno.EAGAIN, errno.EINTR):
                    raise
コード例 #15
0
ファイル: cbor.py プロジェクト: DisposaBoy/GoSublime
def _tag_aux(fp, tb):
    bytes_read = 1
    tag = tb & CBOR_TYPE_MASK
    tag_aux = tb & CBOR_INFO_BITS
    if tag_aux <= 23:
        aux = tag_aux
    elif tag_aux == CBOR_UINT8_FOLLOWS:
        data = fp.read(1)
        aux = struct.unpack_from("!B", data, 0)[0]
        bytes_read += 1
    elif tag_aux == CBOR_UINT16_FOLLOWS:
        data = fp.read(2)
        aux = struct.unpack_from("!H", data, 0)[0]
        bytes_read += 2
    elif tag_aux == CBOR_UINT32_FOLLOWS:
        data = fp.read(4)
        aux = struct.unpack_from("!I", data, 0)[0]
        bytes_read += 4
    elif tag_aux == CBOR_UINT64_FOLLOWS:
        data = fp.read(8)
        aux = struct.unpack_from("!Q", data, 0)[0]
        bytes_read += 8
    else:
        assert tag_aux == CBOR_VAR_FOLLOWS, "bogus tag {0:02x}".format(tb)
        aux = None

    return tag, tag_aux, aux, bytes_read
コード例 #16
0
def load_mnist(im_path, lb_path):
	# loading images
	binfile = open(im_path, 'rb')
	buf = binfile.read()
	index = 0
	magic,numImages,numRows,numColumns = \
		struct.unpack_from('>IIII' , buf , index)
	index += struct.calcsize('>IIII')
	if magic!=2051:
		raise NameError('MNIST TRAIN-IMAGE INCCORECT!')
	ims = np.zeros([numImages, numRows*numColumns])
	for i in range(numImages):
		ims[i,:] = struct.unpack_from('>784B', buf, index)
		index += struct.calcsize('>784B');
	# loading labels
	binfile = open(lb_path, 'rb')
	buf = binfile.read()
	index = 0
	magic,numLabels = struct.unpack_from(
		'>II', 
		buf, 
		index
	)
	index += struct.calcsize('>II')
	if magic!=2049:
		raise NameError('MNIST TRAIN-LABEL INCORRECT!')
	lbs = np.zeros(numLabels)
	lbs[:] = struct.unpack_from(
		'>'+ str(numLabels) +'B', 
		buf, 
		index
	)
	return [ims, numRows, numColumns, lbs]
コード例 #17
0
    def load_image_data_set(self, img_data_dir):
        logging.info("Load image data set from {0}.".format(img_data_dir))


        with open(img_data_dir, "rb") as binary_file_handle:
            image_data_buffer = binary_file_handle.read()

        # '>IIII'是说使用大端法读取4个unsigned int32
        # unpack_from(...)
        # Unpack the buffer, containing packed C structure data, according to
        # fmt, starting at offset. Requires len(buffer[offset:]) >= calcsize(fmt).
        head = struct.unpack_from('>IIII' , image_data_buffer ,0)
        logging.info("head:{0}".format(head))

        magic_num = struct.calcsize('>IIII')
        img_num = head[1]
        img_width = head[2]
        img_height = head[3]
        logging.info("magic_num:{0}".format(magic_num))
        logging.info("img_num:{0}".format(img_num))
        logging.info("img_width:{0}".format(img_width))
        logging.info("img_height:{0}".format(img_height))

        #[60000]*28*28
        all_img_bit = img_num * img_width * img_height
        all_img_bit_string = '>' + str(all_img_bit) + 'B' #like '>47040000B'
        logging.info("all_img_bit_string:{0}".format(all_img_bit_string))

        all_image_2d_ndarray = struct.unpack_from(all_img_bit_string, image_data_buffer, magic_num)
        all_image_2d_ndarray = np.reshape(all_image_2d_ndarray, [img_num, img_width, img_height])
        return all_image_2d_ndarray
コード例 #18
0
ファイル: tcp_chat_client.py プロジェクト: delco225/C2W
 def receptionListUser(self,packet,status):
     self.listUser=[]
     lengthMsg=struct.unpack_from(">LH",packet)[1]
     print "00000000000000000000on reçoit la list0000000000000000000"
     lengthData=lengthMsg-8
     offset=8
     i=0
     while i< lengthData:
         lengthNameUser_Dispo=struct.unpack_from(">B",packet,offset)[0]
         lengthNameUser=lengthNameUser_Dispo>>1
         S=lengthNameUser_Dispo&1
         Data=">BH"+ str(lengthNameUser) + "s"
         userName= struct.unpack_from(Data,packet,offset)[2]
         userId=struct.unpack_from(Data,packet,offset)[1]
         if (S==1):
             ROOM=ROOM_IDS.MAIN_ROOM
         else:
             ROOM=ROOM_IDS.MOVIE_ROOM
         self.listUser.append((userName,ROOM))
         self.listUser_Id.append((userName,userId))
         i+=(lengthNameUser+3)
         offset+=(lengthNameUser+3)
     liste=[]
     if status==userStatus['waitingMainRoomUserList'] or status==userStatus['mainRoom']:
         liste=self.listUser
     elif status==userStatus['waitingfMovieRoomUserList'] or status==userStatus['movieRoom']:
         print "******************on entre dans cette boucle************************"
         i=0
         while i<len(self.listUser):
             liste.append((self.listUser[i][0],self.RoomName)) # il y'avait self.thisRoomName???
             i+=1
     if status==userStatus['mainRoom'] or status==userStatus['movieRoom']:
             self.clientProxy.setUserListONE(liste)
             print "§!§§§§§§§§§§§§§§§update ok "
コード例 #19
0
ファイル: extfs.py プロジェクト: leyyer/exfsh
 def __read_block15(self, size):
     low = self.blksize * ((self.blksize / 4) ** 2) + self.blksize * 12
     blk = []
     ndx = self.blksize / 4
     if self._cur_pos >= low:
         if self.inode.i_block[14] == 0:
             return blk
         cpos = self._cur_pos - low
         n3 = cpos / self.blksize
         r = cpos % self.blksize
         n2 = n3 / ndx
         n1 = n2 / ndx
         n0 = n1 / ndx
         fmt = "<%dI" % ndx
         b15 = struct.unpack_from(fmt, buffer(bytearray(self.filesys.read_block(self.inode.i_block[14]))))
         for x in b15[n0:]:
             assert(x > 0)
             c15 = struct.unpack_from(fmt, buffer(bytearray(self.filesys.read_block(x))))
             for y in c15[n1:]:
                 d15 = struct.unpack_from(fmt, buffer(bytearray(self.filesys.read_block(y))))
                 bk = self._do_read(d15[n2:], r, size)
                 r = 0
                 n2 = 0
                 blk += bk
                 size -= len(bk)
                 if size == 0:
                     break
             n1 = 0
             if size == 0:
                 break
     return blk          
コード例 #20
0
	def parse(self, input):
		zip_buffer = StringIO.StringIO(input)
		with zipfile.ZipFile(zip_buffer, 'r') as zip:
			payload = zip.read('z')
				
		magic, payload_len = struct.unpack_from('<II', payload)
		
		if magic != DepotManifest.PROTOBUF_PAYLOAD_MAGIC:
			raise Exception("Expecting protobuf payload")
			
		self.payload = content_manifest_pb2.ContentManifestPayload()
		self.payload.ParseFromString(payload[8:8+payload_len])

		pos_1 = 8+payload_len
		magic, meta_len = struct.unpack_from('<II', payload[pos_1:])

		if magic != DepotManifest.PROTOBUF_METADATA_MAGIC:
			raise Exception("Expecting protobuf metadata")
		
		self.metadata = content_manifest_pb2.ContentManifestMetadata()
		self.metadata.ParseFromString(payload[8+pos_1:8+pos_1+meta_len])
		
		pos_2 = 8+pos_1+meta_len
		magic, sig_len = struct.unpack_from('<II', payload[pos_2:])

		if magic != DepotManifest.PROTOBUF_SIGNATURE_MAGIC:
			raise Exception("Expecting protobuf signature")
			
		self.signature = content_manifest_pb2.ContentManifestSignature()
		self.signature.ParseFromString(payload[8+pos_2:8+pos_2+sig_len])
コード例 #21
0
ファイル: spi_descriptor.py プロジェクト: abazhaniuk/chipsec
def get_spi_regions( fd ):
    pos = fd.find( SPI_FLASH_DESCRIPTOR_SIGNATURE )
    if not (pos == 0x10):
        return None

    flmap0 = struct.unpack_from( '=I', fd[0x14:0x18] )[0]
    # Flash Region Base Address (bits [23:16])
    frba = ( (flmap0 & 0x00FF0000) >> 12 )
    # Number of Regions (bits [26:24])
    nr   = ( ((flmap0 & 0xFF000000) >> 24) & 0x7 )

    flregs = [None] * spi.SPI_REGION_NUMBER_IN_FD
    for r in range( spi.SPI_REGION_NUMBER_IN_FD ):
        flreg_off = frba + r*4
        flreg = struct.unpack_from( '=I', fd[flreg_off:flreg_off + 0x4] )[0]
        (base,limit) = spi.get_SPI_region(flreg)
        notused = (base > limit)
        flregs[r] = (r, spi.SPI_REGION_NAMES[r],flreg,base,limit,notused)

    fd_size    = flregs[spi.FLASH_DESCRIPTOR][4] - flregs[spi.FLASH_DESCRIPTOR][3] + 1
    fd_notused = flregs[spi.FLASH_DESCRIPTOR][5]
    if fd_notused or (fd_size != SPI_FLASH_DESCRIPTOR_SIZE):
        return None

    return flregs
コード例 #22
0
ファイル: wfastcgi.py プロジェクト: jsschultz/PTVS
def read_encoded_int(content, offset):
    i = struct.unpack_from('>B', content, offset)[0]

    if i < 0x80:
        return offset + 1, i
    
    return offset + 4, struct.unpack_from('>I', content, offset)[0] & ~0x80000000
コード例 #23
0
 def _parse(self, data):
     assert 104 <= len(data)
     (self.magic, self.checksum, self.signature) = struct.unpack_from("8sI20s", data, 0)
     (self.string_ids_size, self.string_ids_off) = struct.unpack_from("II", data, 56)
     (self.type_ids_size, self.type_ids_off) = struct.unpack_from("II", data, 64)
     (self.class_defs_size, self.class_defs_off) = struct.unpack_from("II", data, 96)
     (self.method_ids_size, self.method_ids_off) = struct.unpack_from("II", data, 88)
コード例 #24
0
ファイル: test_struct.py プロジェクト: M31MOTH/cpython
    def test_unpack_from(self):
        test_string = b'abcd01234'
        fmt = '4s'
        s = struct.Struct(fmt)
        for cls in (bytes, bytearray):
            data = cls(test_string)
            self.assertEqual(s.unpack_from(data), (b'abcd',))
            self.assertEqual(s.unpack_from(data, 2), (b'cd01',))
            self.assertEqual(s.unpack_from(data, 4), (b'0123',))
            for i in range(6):
                self.assertEqual(s.unpack_from(data, i), (data[i:i+4],))
            for i in range(6, len(test_string) + 1):
                self.assertRaises(struct.error, s.unpack_from, data, i)
        for cls in (bytes, bytearray):
            data = cls(test_string)
            self.assertEqual(struct.unpack_from(fmt, data), (b'abcd',))
            self.assertEqual(struct.unpack_from(fmt, data, 2), (b'cd01',))
            self.assertEqual(struct.unpack_from(fmt, data, 4), (b'0123',))
            for i in range(6):
                self.assertEqual(struct.unpack_from(fmt, data, i), (data[i:i+4],))
            for i in range(6, len(test_string) + 1):
                self.assertRaises(struct.error, struct.unpack_from, fmt, data, i)

        # keyword arguments
        self.assertEqual(s.unpack_from(buffer=test_string, offset=2),
                         (b'cd01',))
コード例 #25
0
ファイル: adcpt_m_wvs.py プロジェクト: mcworden/mi-dataset-1
    def _parse_directional_spectrum(self, offset, rules):
        """
        Convert the binary data into particle data for the Directional Spectrum Data Type
        """
        # Unpack the unpacking rules
        (num_freq_name, num_dir_name, good_name, dat_name),\
        (num_freq_fmt, num_dir_fmt, good_fmt, dat_fmt) = zip(*rules)

        # First unpack the array lengths and single length values
        (num_freq_data, num_dir_data, dspec_good_data) = struct.unpack_from(
            '<%s%s%s' % (num_freq_fmt, num_dir_fmt, good_fmt), self.raw_data, offset)

        # Then unpack the array using the retrieved lengths values
        next_offset = offset + struct.calcsize(num_freq_fmt) + struct.calcsize(num_dir_fmt) + \
                      struct.calcsize(good_fmt)
        dspec_dat_list_data = struct.unpack_from(
            '<%s%s' % (num_freq_data * num_dir_data, dat_fmt), self.raw_data, next_offset)

        # convert to numpy array and reshape the data per IDD spec
        transformed_dat_data = numpy.array(dspec_dat_list_data).reshape(
            (num_freq_data, num_dir_data)).tolist()

        # Add to the collected parameter data
        self.final_result.extend(
            ({DataParticleKey.VALUE_ID: num_freq_name, DataParticleKey.VALUE: num_freq_data},
             {DataParticleKey.VALUE_ID: num_dir_name, DataParticleKey.VALUE: num_dir_data},
             {DataParticleKey.VALUE_ID: good_name, DataParticleKey.VALUE: dspec_good_data},
             {DataParticleKey.VALUE_ID: dat_name, DataParticleKey.VALUE: transformed_dat_data}))
コード例 #26
0
ファイル: gcap.py プロジェクト: GeekOfWires/gcapy
    def _decode_var_string(self, data):
        firstByte = struct.unpack_from("B", data)[0]

        # At the moment, all strings are treated the same
        # regardless of type
        stringType = firstByte & ~0xc0
        stringSize = (firstByte & 0xc0) >> 6

        size = 0
        nextPointer = 0
        stringOut = ""

        # 1 byte
        if stringSize == 0:
            size = struct.unpack_from("B", data, 1)[0]
            nextPointer = 1 + 1 + size
            stringOut = data[2:2+size]
        # 2 bytes
        elif stringSize == 1:
            size = struct.unpack_from("H", data, 1)[0]
            nextPointer = 1 + 2 + size
            stringOut = data[3:3+size]
        # 4 bytes
        elif stringSize == 2:
            size = struct.unpack_from("I", data, 1)[0]
            nextPointer = 1 + 4 + size
            stringOut = data[5:5+size]
        else:
            raise GCAPFormatError("unsupported variable string type")

        return (stringOut, nextPointer)
コード例 #27
0
def get_images(filename):
	bin_file = open(filename,'rb')
	buf = bin_file.read()#all the file are put into memory
	bin_file.close()# release the measure of operating system
	index = 0
	magic, num_images, num_rows, num_colums = struct.unpack_from(big_endian+four_bytes, buf,index)

	index += struct.calcsize(big_endian + four_bytes)   # TODO why not multy 4?
	print num_images

	images = [] #temp images as tuple
	for x in range(num_images):
	    im = struct.unpack_from(big_endian + picture_bytes, buf, index)
	    index += struct.calcsize(big_endian + picture_bytes)
	    im = list(im)
	    for i in range(len(im)) :
	    	if im[i] >= 1 and im[i] < 64:
	    		im[i] = 1

	    	if im[i] >= 64 and im[i] < 128:
	    		im[i] = 2

	    	if im[i] >= 128 and im[i] < 192:
	    		im[i] = 3

	    	if im[i] >= 192 and im[i] < 256:
	    		im[i] = 4

	    	else: im[i] = 0
	            
	    images.append(im)
	a = np.array(images)
	return a
コード例 #28
0
ファイル: desktop.py プロジェクト: milkypostman/caw
    def _get_desktops(self, *args):
        conn = self.parent.connection
        scr = self.parent.screen
        totalc = conn.core.GetProperty(0,
                scr.root,
                self._NET_NUMBER_OF_DESKTOPS,
                xproto.Atom.CARDINAL,
                0,
                12)

        namesc = conn.core.GetProperty(0,
                scr.root,
                self._NET_DESKTOP_NAMES,
                self.UTF8_STRING,
                0,
                32)

        totalr = totalc.reply()
        self.num_desktops = struct.unpack_from("I", totalr.value.buf())[0]

        namesr = namesc.reply()
        self.desktops = struct.unpack_from("%ds" % namesr.value_len,
                namesr.value.buf())[0].strip("\x00").split("\x00")

        self._update()
コード例 #29
0
ファイル: mobi_unpack.py プロジェクト: aron/mobi-to-readmill
 def __init__(self, filename, perm):
         self.f = file(filename, perm)
         header = self.f.read(78)
         self.ident = header[0x3C:0x3C+8]
         self.num_sections, = struct.unpack_from('>H', header, 76)
         sections = self.f.read(self.num_sections*8)
         self.sections = struct.unpack_from('>%dL' % (self.num_sections*2), sections, 0)[::2] + (0xfffffff, )
コード例 #30
0
ファイル: info.py プロジェクト: carriercomm/TorrentInfo
    def _get_udp_scrape_data(self, host, port, connection_id):
        # build scrape request payload
        transaction_id = int(randrange(0, 255))
        buff = struct.pack('!q', connection_id)
        buff += struct.pack('!i', ACTION_SCRAPE)
        buff += struct.pack('!i', transaction_id)
        buff += struct.pack('!20s', self.torrent_hash)

        # send payload and get response
        self._socket.sendto(buff, (host, port))
        try:
            response = self._socket.recv(2048)
        except socket.timeout:
            return None
        if len(response) < 20:
            # TODO: issue warning here
            print "wrong response length"
            return None

        # extract response information
        resp_action, resp_transaction_id = struct.unpack_from('!ii', response, 0)
        if transaction_id != resp_transaction_id:
            # TODO: issue warning instead
            raise ValueError('Transaction IDs do not match (req=%d resp=%d)' % (transaction_id, resp_transaction_id))
        if resp_action == ACTION_ERROR:
            error = struct.unpack_from('!s', response, 8)[0]
            # TODO: issue warning instead
            raise RuntimeError('Unable to get scrape data: %s' % error)

        elif resp_action == ACTION_SCRAPE:
            seeds, complete, leeches = struct.unpack_from('!iii', response, 8)
            return seeds, complete, leeches