def print_info(file_array): def get_type(num): type = { 0: "Unknown or empty", 1: "12-bit FAT", 4: "16-bit FAT (<32MB)", 5: "Extended MS-DOS Partition", 6: "FAT-16 (32MB to 2GB)", 7: "NTFS", 11: "FAT-32 (CHS)", 12: "FAT-32 (LBA)", 14: "FAT-16 (LBA)" } type_conv = type[num] return type_conv tmp = unpack("<B", file_array[4]) type = get_type(tmp[0]) firstSectorAddress = unpack("<L", file_array[8:12]) numSectors = unpack("<L", file_array[12:]) print "Partition Type: \t", type print "Initial Sector: \t", firstSectorAddress[0] print "Size of partition: \t", 512*numSectors[0], "bytes" return numSectors[0]
def parse(self): rpos = 0 records = [] field_num, = unpack('!L', self.rbuff[rpos:rpos + 4]) rpos += 4 field_types = [] for i in xrange(field_num): type, = unpack('!B', self.rbuff[rpos:rpos + 1]) field_types.append(type) rpos += 1 while rpos < self.len: record = [] for i in xrange(field_num): data_len, = unpack('!L', self.rbuff[rpos:rpos + 4]) rpos += 4 if data_len > 0: c_fmt = str(data_len) + 's' field_value, = unpack(c_fmt, self.rbuff[rpos:rpos + data_len]) rpos += data_len if data_len == 1 and field_value == '\0': record.append("") else: record.append(field_value) else: record.append(None) records.append(record) return field_types, records
def do_response(self): rbuff = self.client.recv(FSIZE) magic_code, resp_code, id, reverse, length = unpack(FMT, rbuff) self.client.logger.debug("return resp_code[%d] length[%d],id[%d]" % (resp_code, length, id)) if id != self.id: raise response_exception("id is error!") if length > 0: rbuff = self.client.recv(length) if 400 <= resp_code < 600: rpos = 0 error_code, = unpack('!L', rbuff[rpos:rpos + 4]) if resp_code == CLIENT_STATUS_DB_ERROR: raise response_exception("mysql return code [%d]" % error_code) if error_code < len(ERROR_MSG): raise response_exception(ERROR_MSG[error_code]) else: raise response_exception("unkown error code [%d]" % error_code) elif resp_code == CLIENT_STATUS_MULTI_STATUS: ret = [] for i in xrange(len(self.ids)): ret.append(self.do_single_reponse(i)) return ret else: raise response_exception("unknown response code [%d]" % resp_code)
def do_single_reponse(self, idx): databuff = "" data_len = 0 while True: rbuff = self.client.recv(FSIZE) magic_code, resp_code, id, reverse, length = unpack(FMT, rbuff) self.client.logger.debug("return resp_code[%d] length[%d],id[%d]" % (resp_code, length, id)) if id != self.ids[idx]: raise response_exception("id is error!") rbuff = self.client.recv(length) if 400 <= resp_code < 600: rpos = 0 error_code, = unpack('!L', rbuff[rpos:rpos + 4]) if resp_code == CLIENT_STATUS_DB_ERROR: raise response_exception("mysql return code [%d]" % error_code) if error_code < len(ERROR_MSG): raise response_exception(ERROR_MSG[error_code]) else: raise response_exception("unkown error code [%d]" % error_code) elif resp_code == CLIENT_STATUS_ACCEPT: databuff += rbuff data_len += length elif resp_code == CLIENT_STATUS_OK: databuff += rbuff data_len += length self.client.logger.debug("return data_length[%d]" % data_len) return response(databuff, data_len).parse() else: raise response_exception("unknown response code [%d]" % resp_code)
def get_packet(): host = socket.gethostbyname(socket.gethostname()) s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_IP) s.bind((host, 0)) # 设置Socket s.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1) s.ioctl(socket.SIO_RCVALL, socket.RCVALL_ON) # net_data["unknow"] = 0 while True: buffer = s.recvfrom(65565) # 从Socket中获取数据,不过包含自己信息 port = _struct.unpack('HH', buffer[0][20:24]) # 从Buffer数据中获取网络IP和端口信息 src_ip = "%d.%d.%d.%d" % _struct.unpack('BBBB', buffer[0][12:16]) dest_ip = "%d.%d.%d.%d" % _struct.unpack('BBBB', buffer[0][16:20]) src_port = socket.htons(port[0]) dest_port = socket.htons(port[1]) data_len = len(buffer[0]) key = "%s:%d-%s:%d" % (src_ip, src_port, dest_ip, dest_port) net_data['process_speed'] = 0 net_data['total_speed'] = 0 if key in process_connections: net_data['process_speed'] += data_len net_data['total_speed'] += data_len
def do_single_reponse(self,idx): databuff = "" data_len = 0 while True: rbuff = self.client.recv(FSIZE) magic_code, resp_code, id, reverse, length = unpack(FMT, rbuff) self.client.logger.debug("return resp_code[%d] length[%d],id[%d]" % (resp_code, length, id)) if id != self.ids[idx]: raise response_exception("id is error!") rbuff = self.client.recv(length) if 400 <= resp_code < 600: rpos = 0 error_code, = unpack('!L', rbuff[rpos:rpos + 4]) if resp_code == CLIENT_STATUS_DB_ERROR: raise response_exception("mysql return code [%d]" % error_code) if error_code < len(ERROR_MSG): raise response_exception(ERROR_MSG[error_code]) else: raise response_exception("unkown error code [%d]" % error_code) elif resp_code == CLIENT_STATUS_ACCEPT: databuff += rbuff data_len += length elif resp_code == CLIENT_STATUS_OK: databuff += rbuff data_len += length self.client.logger.debug("return data_length[%d]" % data_len) return response(databuff, data_len).parse() else: raise response_exception("unknown response code [%d]" % resp_code)
def main(n): import _struct as struct import array a = array.array('B', struct.pack('i', 42)) i = 0 while i < n: i += 1 struct.unpack('i', a) # ID: unpack return i
def main(n): import _struct as struct import array a = array.array('c', struct.pack('i', 42)) i = 0 while i < n: i += 1 struct.unpack('<i', a) # ID: unpack return i
def test_passphrase(self, buffer, offset): if offset % 4 != 0: return # Extract the supposed length of the passphrase # and what should be the passphrase data. length,raw_data = unpack("<L65s", buffer) # The passphrase is stored in a C String so the # last character has to be null. maxLength = TC_PASSWORD_LENGTH - 1 # Volatility version 2.0 throws errors if the string # is a single space or a tab (still printable). if length > maxLength or length < 2: return passphrase = raw_data[:length] if not is_printable(passphrase) or \ not all_zero_chars(raw_data[length:]): return #print "Found TrueCrypt passphrase \"%s\" at 0x%x" \ # % (passphrase, offset) # To let a user pipe the passwords print "%d:%s" % (offset+4,passphrase)
def packager(self): """ Attempt to create a datapoint using the byte stream input This is a coroutine. a = yield b means "yield b", then wait for send(a) call, and then a = value passed. """ by = yield None # get next byte, and None because packet is not complete while (1): while by != 0xAA: by = yield None # have deliminator, so collect data tstamp = self.timestamp() # collect 4 - 4 byte values bys = bytearray() for _ibyte in range(4 * 4): b = yield None bys.insert(b) # check validity of data in case we are out of sync with data ( a number contained 0xAA and we thought it was the delimiter) d = [] d[0], d[1], d[2], d[3] = unpack(">iiii", bys) for dc in d: if dc < -20000 or dc > 20000: # out of range, therefore packet is invalid by = yield None continue bitmask = 15 # 4 bits set for 4 data are valid by = yield (tstamp, bitmask, tuple(d))
def deleted_content(csa, file): with open(file, 'rb') as f: f.seek(csa) content_array = unpack("16s", f.read( 16)) # Gets the first 16 bytes of content from the deleted file f.close() return content_array[0]
def __call__(self, value, ctx): if value is None: return None try: return _struct.unpack('>q', value)[0] except _struct.error as e: raise SerializationError(str(e))
def sha_update(sha_info, buffer): count = len(buffer) buffer_idx = 0 clo = (sha_info['count_lo'] + (count << 3)) & 0xffffffff if clo < sha_info['count_lo']: sha_info['count_hi'] += 1 sha_info['count_lo'] = clo sha_info['count_hi'] += (count >> 29) if sha_info['local']: i = SHA_BLOCKSIZE - sha_info['local'] if i > count: i = count # copy buffer for x in enumerate(buffer[buffer_idx:buffer_idx + i]): sha_info['data'][sha_info['local'] + x[0]] = struct.unpack( 'B', x[1])[0] count -= i buffer_idx += i sha_info['local'] += i if sha_info['local'] == SHA_BLOCKSIZE: sha_transform(sha_info) sha_info['local'] = 0 else: return while count >= SHA_BLOCKSIZE: # copy buffer sha_info['data'] = [ struct.unpack('B', c)[0] for c in buffer[buffer_idx:buffer_idx + SHA_BLOCKSIZE] ] count -= SHA_BLOCKSIZE buffer_idx += SHA_BLOCKSIZE sha_transform(sha_info) # copy buffer pos = sha_info['local'] sha_info['data'][pos:pos + count] = [ struct.unpack('B', c)[0] for c in buffer[buffer_idx:buffer_idx + count] ] sha_info['local'] = count
def cmd_done_evt_handler(param): # post the event into the list of done events cmd_done_condition.acquire() opcode = unpack("<H", param[1:3])[0] status = param[3:] done_events[opcode] = status cmd_done_condition.notify_all() cmd_done_condition.release()
def test_struct_bool(): import _struct for prefix in tuple("<>!=")+('',): format = str(prefix + '?') packed = _struct.pack(format, {}) unpacked = _struct.unpack(format, packed) AreEqual(len(unpacked), 1) AssertFalse(unpacked[0])
def deleted_file_info(root_directory, root_directory_size): index = 0 file_name = [] for i in range(0, root_directory_size): tmp = unpack("<B", root_directory[index]) if tmp[0] == 229: file_name = unpack("<11s", root_directory[index:index+11]) starting_cluster = unpack("<H", root_directory[(index + 26):(index + 28)]) file_size = unpack("<I", root_directory[(index + 28):(index + 32)]) print "File Name:\t\t", file_name[0] print "Size of the file: \t", file_size[0] print "Initial Cluster: \t", starting_cluster[0] return starting_cluster[0] else: index += 32
def deleted_file_info(root_directory, root_directory_size): index = 0 file_name = [] for i in range(0, root_directory_size): tmp = unpack("<B", root_directory[index]) if tmp[0] == 229: file_name = unpack("<11s", root_directory[index:index + 11]) starting_cluster = unpack( "<H", root_directory[(index + 26):(index + 28)]) file_size = unpack("<I", root_directory[(index + 28):(index + 32)]) print "File Name:\t", file_name[0] print "Size of the file: \t", file_size[0] print "Initial Cluster: \t", starting_cluster[0] return starting_cluster[0] else: index += 32
def get_root_directory(initial_sector, file): root_directory_offset = (get_size_reserved_area(volume_boot_sector(initial_sector, file)) + get_fat_area(volume_boot_sector(initial_sector, file)) + initial_sector)*512 with open(file, 'rb') as f: f.seek(root_directory_offset) # Goes to the root directory sector root_dir_array = f.readline(get_size_root_dir(volume_boot_sector(initial_sector, file))*512) f.close() fmt = str(get_size_root_dir(volume_boot_sector(initial_sector,file))*512) + 's' tmp = unpack(fmt, root_dir_array) return tmp[0]
def sha_update(sha_info, buffer): count = len(buffer) buffer_idx = 0 clo = (sha_info['count_lo'] + (count << 3)) & 0xffffffff if clo < sha_info['count_lo']: sha_info['count_hi'] += 1 sha_info['count_lo'] = clo sha_info['count_hi'] += (count >> 29) if sha_info['local']: i = SHA_BLOCKSIZE - sha_info['local'] if i > count: i = count # copy buffer for x in enumerate(buffer[buffer_idx:buffer_idx+i]): sha_info['data'][sha_info['local']+x[0]] = struct.unpack('B', x[1])[0] count -= i buffer_idx += i sha_info['local'] += i if sha_info['local'] == SHA_BLOCKSIZE: sha_transform(sha_info) sha_info['local'] = 0 else: return while count >= SHA_BLOCKSIZE: # copy buffer sha_info['data'] = [struct.unpack('B',c)[0] for c in buffer[buffer_idx:buffer_idx + SHA_BLOCKSIZE]] count -= SHA_BLOCKSIZE buffer_idx += SHA_BLOCKSIZE sha_transform(sha_info) # copy buffer pos = sha_info['local'] sha_info['data'][pos:pos+count] = [struct.unpack('B',c)[0] for c in buffer[buffer_idx:buffer_idx + count]] sha_info['local'] = count
def read(self, tp, qty=1): if tp == tipo.string: sep = '\0\0\0' i = self.poss self.poss += self[self.poss:].find(sep) + len(sep) return str( Bytes(filter(lambda a: a != 0, self[i:self.poss - len(sep)]))) else: self.poss += (tp[1] * qty) r = unpack('<' + (tp[0] * qty), self[self.poss - (tp[1] * qty):self.poss]) return r if qty > 1 else r[0]
def load_traffic(self, path): if path is not None: traffic = {} with open(path + '/simulation_traffic_data.bin', 'rb') as traffic_data: fmt = '6f' buffer = traffic_data.read(struct.calcsize(fmt)) # print(fmt), id = 0 while len(buffer) > 0: # 读取车辆位姿信息,float类型变量 v, x, y, heading, length, width = struct.unpack( fmt, buffer) # 读取车辆类型,string类型变量 fmt = 'i' name_length = struct.unpack( fmt, traffic_data.read(struct.calcsize(fmt)))[0] # 读取类型名长度 # print(fmt), fmt = str(name_length) + 's' type = struct.unpack( fmt, traffic_data.read(struct.calcsize(fmt)))[0] # print(fmt), # 读取车辆路径,string类型变量 route = [] fmt = 'i' name_length = struct.unpack( fmt, traffic_data.read(struct.calcsize(fmt)))[0] # 读取车辆路径长度 # print(name_length), for i in range(name_length): fmt = 'i' route_length = struct.unpack( fmt, traffic_data.read( struct.calcsize(fmt)))[0] # 读取路径名长度 # print(fmt), fmt = str(route_length) + 's' route.append( struct.unpack( fmt, traffic_data.read( struct.calcsize(fmt)))[0].decode()) # print(fmt), traffic[str(id)] = { 64: v, 66: (x, y), 67: heading, 68: length, 77: width, 79: type.decode(), 87: route } id += 1 fmt = '6f' buffer = traffic_data.read(struct.calcsize(fmt)) # print(fmt), return traffic else: return None
def get_root_directory(partition_sector, file): root_directory_offset = ( get_size_reserved_area(volume_boot_sector(partition_sector, file)) + get_fat_area(volume_boot_sector(partition_sector, file)) + partition_sector) * 512 with open(file, 'rb') as f: f.seek(root_directory_offset) # Goes to the root directory sector root_dir_array = f.readline( get_size_root_dir(volume_boot_sector(partition_sector, file)) * 512) f.close() fmt = str( get_size_root_dir(volume_boot_sector(initial_sector, file)) * 512) + 's' tmp = unpack(fmt, root_dir_array) return tmp[0]
def send_cmd(self, ocf, ogf, args): # for now only supporting USB opcode = (ocf << 10) | ogf msg = pack("<HB", opcode, len(args)) + args self.dev.ctrl_transfer(0x20, 0x00, 0x00, 0x00, msg) # wait for an event ret_status = None while ret_status is None: cmd_done_condition.acquire() cmd_done_condition.wait() if opcode in done_events: ret_status = done_events[opcode] del done_events[opcode] cmd_done_condition.release() err_code = unpack("<B", ret_status[0:1])[0] if err_code != 0: raise Exception("HCI command returned %d" % err_code)
def b32encode(s): """Encode a string using Base32. s is the string to encode. The encoded string is returned. """ parts = [] quanta, leftover = divmod(len(s), 5) # Pad the last quantum with zero bits if necessary if leftover: s += ('\0' * (5 - leftover)) quanta += 1 for i in range(quanta): # c1 and c2 are 16 bits wide, c3 is 8 bits wide. The intent of this # code is to process the 40 bits in units of 5 bits. So we take the 1 # leftover bit of c1 and tack it onto c2. Then we take the 2 leftover # bits of c2 and tack them onto c3. The shifts and masks are intended # to give us values of exactly 5 bits in width. c1, c2, c3 = struct.unpack('!HHB', s[i * 5:(i + 1) * 5]) c2 += (c1 & 1) << 16 # 17 bits wide c3 += (c2 & 3) << 8 # 10 bits wide parts.extend([ _b32tab[c1 >> 11], # bits 1 - 5 _b32tab[(c1 >> 6) & 0x1f], # bits 6 - 10 _b32tab[(c1 >> 1) & 0x1f], # bits 11 - 15 _b32tab[c2 >> 12], # bits 16 - 20 (1 - 5) _b32tab[(c2 >> 7) & 0x1f], # bits 21 - 25 (6 - 10) _b32tab[(c2 >> 2) & 0x1f], # bits 26 - 30 (11 - 15) _b32tab[c3 >> 5], # bits 31 - 35 (1 - 5) _b32tab[c3 & 0x1f], # bits 36 - 40 (1 - 5) ]) encoded = EMPTYSTRING.join(parts) # Adjust for any leftover partial quanta if leftover == 1: return encoded[:-6] + '======' elif leftover == 2: return encoded[:-4] + '====' elif leftover == 3: return encoded[:-3] + '===' elif leftover == 4: return encoded[:-1] + '=' return encoded
def __init__(self, data): ip_header = data[0:20] iph = unpack('!BBHHHBBH4s4s', ip_header) # self.version = iph[0] >> 4 self.ihl = iph[0] & 0b1111 header_length = self.ihl * 4 # self.dscp = iph[1] >> 2 # self.ecn = iph[1] & 0b11 # self.total_length = iph[2] # self.identification = iph[3] # self.evil_bit = (iph[4] >> 15) & 0b1 # self.df = (iph[4] >> 14) & 0b1 # self.mf = (iph[4] >> 13) & 0b1 # self.fragment_offset = iph[4] & 0b1111111111111 # self.ttl = iph[5] self.protocol = iph[6] # self.checksum = iph[7] self.source_addr = socket.inet_ntoa(iph[8]) self.destination_addr = socket.inet_ntoa(iph[9]) # self.options = None self.data = data[header_length:]
def acl_read_thread(dev): global hci_device while True: #time.sleep(1) #hci_device.ctrl_transfer(0x20, 0x00, 0x00, 0x00, msg_empy) #hci_device.ctrl_transfer(0x20, 0x00, 0x00, 0x00, msg_empy) try: packet = (hci_device.read(0x82, 256 + 2, timeout=0)).tostring() (handle, length) = unpack("<HH", packet[0:4]) handle &= 0xFFFF payload = packet[4:4 + length] if not handle in l2cap_handles: l2cap_handles[handle] = l2cap.l2cap(handle) l2cap_handles[handle].process_packet(payload) except: print "exception ..." raise
def __init__(self, data): tcp_header = data[0:20] tcph = unpack('!HHLLHHHH', tcp_header) # self.source_port = tcph[0] # self.destination_port = tcph[1] # self.sequence_number = tcph[2] # self.ack_number = tcph[3] self.offset = tcph[4] >> 12 header_length = self.offset * 4 # self.reserved = (tcph[4] >> 9) & 0b111 # self.ns = (tcph[4] >> 8) & 0b1 # self.cwr = (tcph[4] >> 7) & 0b1 # self.ece = (tcph[4] >> 6) & 0b1 # self.urg = (tcph[4] >> 5) & 0b1 # self.ack = (tcph[4] >> 4) & 0b1 # self.psh = (tcph[4] >> 3) & 0b1 # self.rst = (tcph[4] >> 2) & 0b1 # self.syn = (tcph[4] >> 1) & 0b1 # self.fin = tcph[4] & 0b1 # self.window_size = tcph[5] # self.checksum = tcph[6] # self.urg_pointer = tcph[7] # self.options = None self.data = Bytes(data[header_length:])
def packsize(self, sbinary): if sbinary and len(sbinary) == self.SBIN_SIZE: return struct.unpack('!I', sbinary)[0] else: raise PackerDecodeError('Error size')
def _floatconstants(): nan, = struct.unpack('>d', b'\x7f\xf8\x00\x00\x00\x00\x00\x00') inf, = struct.unpack('>d', b'\x7f\xf0\x00\x00\x00\x00\x00\x00') return nan, inf, -inf
def get_fat_area(vbr): fat_copies = unpack("<B", vbr[16]) fat_area = unpack("<H", vbr[22:24]) return fat_area[0] * fat_copies[0]
def process_packet(self, packet): (length, cid) = unpack("<HH", packet[0:4]) self.channels[cid].process_packet(packet[4:4+length])
def unpackInt32(self, payload): return _struct.unpack('<I', payload)[0]
if __name__ == "__main__": mode = 0 angle1 = 45 angle2 = 50 angle3 = 55 msg2 = stc.pack("HHHH", mode, angle1, angle2, angle3) ser = serial.Serial('COM3', 115200, timeout=1) ser.flush() print("connected") print(stc.unpack("HHHH", msg2)) ser.write(msg2) print("sent") while True: if ser.in_waiting > 0: line = ser.readline() line = line.decode("utf-8").rstrip() #print(sys.getsizeof(line)) #msg = stc.unpack("hhh", line) print(line) ser.close()
def _floatconstants(): nan = struct.unpack('>d', b'\x7f\xf8\x00\x00\x00\x00\x00\x00') inf = struct.unpack('>d', b'\x7f\xf0\x00\x00\x00\x00\x00\x00') nan = nan[0] inf = inf[0] return nan, inf, -inf
def unpackInt16(self, payload): return _struct.unpack('<H', payload)[0]
def get_size_root_dir(vbr): tmp = unpack("<H", vbr[17:19]) return tmp[0]*32/512
def get_first_sector_address(file_array): firstSectorAddress = unpack("<L", file_array[8:12]) return firstSectorAddress[0]
def get_n_clusters_per_sector(vbr): tmp = unpack("<B", vbr[13]) return tmp[0]
def get_size_root_dir(vbr): tmp = unpack("<H", vbr[17:19]) return tmp[0] * 32 / 512
def bite2IP(data): ip_1 = unpack('!B', data[0])[0] ip_2 = unpack('!B', data[1])[0] ip_3 = unpack('!B', data[2])[0] ip_4 = unpack('!B', data[3])[0] return '%d.%d.%d.%d' % (ip_1, ip_2, ip_3, ip_4)
def get_size_reserved_area(vbr): tmp = unpack("<H", vbr[14:16]) return tmp[0]
def get_n_sectors_per_cluster(vbr): tmp = unpack("<B", vbr[13]) return tmp[0]
def ReadMemoryEx(ptr, fmt): if not ptr: return None sz = struct.calcsize(fmt) mem = ReadMemory(ptr, sz) if not mem: return None return struct.unpack(fmt, mem)
def chk_attr_Datatype(self, attr_dataType, attr_data): TYPE_NULL = b"\x00" TYPE_REFERENCE = b"\x01" TYPE_ATTRIBUTE = b"\x02" TYPE_STRING = b"\x03" TYPE_FLOAT = b"\x04" TYPE_DIMENSION = b"\x05" TYPE_FRACTION = b"\x06" TYPE_FIRST_INT = b"\x10" TYPE_INT_DEC = b"\x10" TYPE_INT_HEX = b"\x11" TYPE_INT_BOOLEAN = b"\x12" TYPE_FIRST_COLOR_INT = b"\x1c" TYPE_INT_COLOR_ARGB8 = b"\x1c" TYPE_INT_COLOR_RGB8 = b"\x1d" TYPE_INT_COLOR_ARGB4 = b"\x1e" TYPE_INT_COLOR_RGB4 = b"\x1f" TYPE_LAST_COLOR_INT = b"\x1f" TYPE_LAST_INT = b"\x1f" if(TYPE_INT_BOOLEAN == attr_dataType): # Data를 입력받은다음 각 타입별로 변경 if attr_data == b"\x00\x00\x00\x00": return 0 else: return 1 elif((TYPE_FIRST_COLOR_INT or TYPE_INT_COLOR_ARGB8 or TYPE_INT_COLOR_RGB8 or TYPE_INT_COLOR_ARGB4 or TYPE_INT_COLOR_RGB4 or TYPE_LAST_COLOR_INT) == attr_dataType): val = "#%08X" % self.bytesToint(attr_data) return val elif((TYPE_INT_DEC or TYPE_FIRST_INT or TYPE_LAST_INT) == attr_dataType): return self.bytesToint(attr_data) elif TYPE_INT_HEX == attr_dataType: return self.bytesToint(attr_data) elif TYPE_FLOAT == attr_dataType: return _struct.unpack('f', attr_data) elif TYPE_FRACTION == attr_dataType: val1 = _struct.unpack('f', attr_data)[0] val2 = 0x7fffffff return float(val1 / val2) elif TYPE_STRING == attr_dataType: return self.getString(attr_data) elif TYPE_ATTRIBUTE == attr_dataType: val = "0x%08X" % self.bytesToint(attr_data) return val elif TYPE_NULL == attr_dataType: return 0 else: return attr_data
sendData = _struct.pack("!H8sb5sb", 1, "test.jpg", 0, "octet", 0) # 发送下载文件请求数据到指定服务器 sendAddr = (ip, 69) udpSocket.sendto(sendData, sendAddr) p_num = 0 recvFile = '' while True: recvData, recvAddr = udpSocket.recvfrom(1024) recvDataLen = len(recvData) cmdTuple = _struct.unpack("!HH", recvData[:4]) cmd = cmdTuple[0] currentPackNum = cmdTuple[1] if cmd == 3: # 是否为数据包 # 如果是第一次接收到数据,那么就创建文件 if currentPackNum == 1: recvFile = open("test.jpg", "a") # 包编号是否和上次相等 if p_num + 1 == currentPackNum: recvFile.write(recvData[4:]) p_num += 1 print("(%d)次接收到的数据" % p_num)
def deleted_content(csa, file): with open(file, 'rb') as f: f.seek(csa) content_array = unpack("16s", f.read(16)) # Gets the first 16 bytes of content from the deleted file f.close() return content_array[0]