class VirtioNetworkHeader(object): data_format = 'B B H H H H' def __init__(self, flags=0, gso_type=0, header_len=0, gso_size=0, csum_start=0, csum_offset=0): self.flags = flags self.gso_type = gso_type self.header_len = header_len self.gso_size = gso_size self.csum_start = csum_start self.csum_offset = csum_offset self.struct = Struct(self.data_format) def to_buffer(self, buffer, offset=0): self.struct.pack_into(buffer, offset, self.flags, self.gso_type, self.header_len, self.gso_size, self.csum_start, self.csum_offset) def __len__(self): return self.byte_size() @staticmethod def byte_size(): # B B H H H H ==> 10 return 10
def purify_extra_data(mm, offset, length): extra_header_struct = Struct("<HH") # 0. id # 1. length STRIPZIP_OPTION_HEADER = 0xFFFF EXTENDED_TIME_DATA = 0x5455 # Some sort of extended time data, see # ftp://ftp.info-zip.org/pub/infozip/src/zip30.zip ./proginfo/extrafld.txt # fallthrough UNIX_EXTRA_DATA = 0x7875 # Unix extra data; UID / GID stuff, see # ftp://ftp.info-zip.org/pub/infozip/src/zip30.zip ./proginfo/extrafld.txt mlen = offset + length while offset < mlen: values = list(extra_header_struct.unpack_from(mm, offset)) _, header_length = values extra_struct = Struct("<HH" + "B" * header_length) values = list(extra_struct.unpack_from(mm, offset)) header_id, header_length, rest = values[0], values[1], values[2:] if header_id in (EXTENDED_TIME_DATA, UNIX_EXTRA_DATA): values[0] = STRIPZIP_OPTION_HEADER for i in xrange(2, len(values)): values[i] = 0xff extra_struct.pack_into(mm, offset, *values) elif header_id != STRIPZIP_OPTION_HEADER: return False offset += extra_header_struct.size + header_length return True
def pack_struct(self, struct_obj: struct.Struct, elem_len: int, data: bytearray, data_offset: int, array_len: int): """ Calls the pack_into method of the struct_obj object with some added intelligence """ if isinstance(self.value, dict): if self.key is None: raise RuntimeError('key must not be None') value = self.value[self.key] else: value = self.value if array_len > 0: if not isinstance(value, list): raise ValueError('value must be a list, got {}'.format( str(value))) if len(self.value) < array_len: raise ValueError( 'Not enough elements in list value list {0}. Expected {1} items' .format(repr(self.value), array_len)) for i in range(array_len): struct_obj.pack_into(data, data_offset, value[i]) data_offset += elem_len else: struct_obj.pack_into(data, data_offset, value) data_offset += elem_len return data_offset
def test_struct_translations(self): distance_component_struct = Struct('iiiff') struct_size = distance_component_struct.size bytes = ctypes.create_string_buffer(struct_size*2) for j in range(2): i = j+1 distance_component_struct.pack_into(bytes,struct_size*j, 1*i,2*i,3*i,4*i,5*i) result = test_dump_dist_comp(bytes) expected = EXPECTED_3 self.assertEqual(result, expected)
def pack_pa_stream(self, tcbuffer, message, message_data_type): """ Converts the TCBuffer object and message into Policy Agent stream. This function takes the parameters such as request size, request id, request status and packs them into a structure format as required by VRTM server. """ # Make byte array of size equal to length of xml string + 12 Bytes header # for C to understand how much data is needed to be read. buffer_ = array('B', '\0' * (tcbuffer.get_m_reqSize() + TCBuffer.SIZE)) # Make structure object in Python indicating template for size and data types # of xml string & parameters to be sent to VRTM server. struct_obj = Struct( self.__format(tcbuffer.get_m_reqSize(), message_data_type)) arguments = [0] + tcbuffer.list() + [message] # Pack structure object with list of parameters(req_id, req_length, req_status) and xml string characters. struct_obj.pack_into(buffer_, *arguments) return buffer_
def purify_extra_data(mm, offset, length, compressed_size=0): extra_header_struct = Struct("<HH") # 0. id # 1. length STRIPZIP_OPTION_HEADER = 0xFFFF EXTENDED_TIME_DATA = 0x5455 # Some sort of extended time data, see # ftp://ftp.info-zip.org/pub/infozip/src/zip30.zip ./proginfo/extrafld.txt # fallthrough UNIX_EXTRA_DATA = 0x7875 # Unix extra data; UID / GID stuff, see # ftp://ftp.info-zip.org/pub/infozip/src/zip30.zip ./proginfo/extrafld.txt ZIP64_EXTRA_HEADER = 0x0001 zip64_extra_struct = Struct("<HHQQ") # ZIP64. # When a ZIP64 extra field is present his 8byte length # will override the 4byte length defined in canonical zips. # This is in the form: # - 0x0001 (header_id) # - 0x0010 [16] (header_length) # - ... (8byte uncompressed_length) # - ... (8byte compressed_length) mlen = offset + length while offset < mlen: values = list(extra_header_struct.unpack_from(mm, offset)) _, header_length = values extra_struct = Struct("<HH" + "B"*header_length) values = list(extra_struct.unpack_from(mm, offset)) header_id, header_length, rest = values[0], values[1], values[2:] if header_id in (EXTENDED_TIME_DATA, UNIX_EXTRA_DATA): values[0] = STRIPZIP_OPTION_HEADER for i in range(2, len(values)): values[i] = 0xff extra_struct.pack_into(mm, offset, *values) if header_id == ZIP64_EXTRA_HEADER: assert header_length == 16 values = list(zip64_extra_struct.unpack_from(mm, offset)) header_id, header_length, uncompressed_size, compressed_size = values offset += extra_header_struct.size + header_length return compressed_size
def tabser(filename, body, data): """Serialize data to file""" # XXX checksums ignored head = Struct("!BiHBxxxB") body = Struct(body) # foot = Struct("!4s") buffer = bytearray([0] * (2**16)) head.pack_into(buffer, 0, 0, int(time()), len(data), body.size, 0), offset = head.size for row in data: body.pack_into(buffer, offset, *row, 0) offset += body.size else: print("write %d rows" % len(data)) # offset = 2 ** 16 - foot.size # foot.pack_into(buffer, offset, bytes([0, 0, 0, 0])) with open(filename, "wb") as f: f.write(buffer)
def _build_native_component_offsets(self): struct = Struct('iii') ids = self.get_component_atom_ids() num_ids = 0 if len(ids) > 0: num_ids = max(ids)+1 struct_size = struct.size bytes = ctypes.create_string_buffer(struct_size * num_ids) for id in range(num_ids): if id in ids: start,end = self.get_component_range(id) native_component = id,start,end-start else: native_component = id,-1,-1 struct.pack_into(bytes, struct_size * id, *native_component) return bytes
class IxgbeStruct(object): def __init__(self, buffer, fmt): self.buffer = buffer self.data_struct = Struct(fmt) def _pack_into(self, value, field_format, prefix=''): offset = calcsize(prefix) pack_into(field_format, self.buffer, offset, value) def _unpack(self): return self.data_struct.unpack(self.buffer) def __len__(self): return self.data_struct.size def pack(self, *args): self.data_struct.pack_into(self.buffer, 0, *args) @classmethod def byte_size(cls): return calcsize(cls.data_format)
def encode(data): """""" from struct import Struct HW = Struct("<H") cap = 0x22 sz = len(data) out = bytearray(b'\x01') out.extend(HW.pack(sz)) c, cmds = 0, 3 pos, flag = 0, 1 out.append(0) out.append(0) while pos < sz: hitp, hitl = _search(data, pos, sz) if hitl < 3: # Push a raw if copying isn't possible. out.append(data[pos]) pos += 1 else: tstp, tstl = _search(data, pos + 1, sz) if (hitl + 1) < tstl: out.append(data[pos]) pos += 1 flag <<= 1 if flag & 0x10000: HW.pack_into(out, cmds, c) c, flag = 0, 1 cmds = len(out) out.append(0) out.append(0) hitl = tstl hitp = tstp c |= flag e = pos - hitp - 1 pos += hitl hitl -= 3 e |= hitl << 11 out.extend(HW.pack(e)) # Advance the flag and refill if required. flag <<= 1 if flag & 0x10000: HW.pack_into(out, cmds, c) c, flag = 0, 1 cmds = len(out) out.append(0) out.append(0) # If no cmds in final word, del it. if flag == 1: del out[-2:] else: HW.pack_into(out, cmds, c) return bytes(out)
import array from struct import Struct PAYLOAD_SIZE = 16 with open('/home/dick/bumble.yml', mode='rb') as file: raw = file.read(16) print(type(raw)) empty_payload = bytes(PAYLOAD_SIZE) page_meta = Struct('HH16c') page_data = array.array('b', [0] * page_meta.size) print(page_meta) print(page_meta.size) page_meta.pack_into() print(page_data) unpacked = page_meta.unpack_from(page_data, 0) print(unpacked)
def _zero_zip_date_time(zip_): def purify_extra_data(mm, offset, length, compressed_size=0): extra_header_struct = Struct("<HH") # 0. id # 1. length STRIPZIP_OPTION_HEADER = 0xFFFF EXTENDED_TIME_DATA = 0x5455 # Some sort of extended time data, see # ftp://ftp.info-zip.org/pub/infozip/src/zip30.zip ./proginfo/extrafld.txt # fallthrough UNIX_EXTRA_DATA = 0x7875 # Unix extra data; UID / GID stuff, see # ftp://ftp.info-zip.org/pub/infozip/src/zip30.zip ./proginfo/extrafld.txt ZIP64_EXTRA_HEADER = 0x0001 zip64_extra_struct = Struct("<HHQQ") # ZIP64. # When a ZIP64 extra field is present his 8byte length # will override the 4byte length defined in canonical zips. # This is in the form: # - 0x0001 (header_id) # - 0x0010 [16] (header_length) # - ... (8byte uncompressed_length) # - ... (8byte compressed_length) mlen = offset + length while offset < mlen: values = list(extra_header_struct.unpack_from(mm, offset)) _, header_length = values extra_struct = Struct("<HH" + "B"*header_length) values = list(extra_struct.unpack_from(mm, offset)) header_id, header_length, rest = values[0], values[1], values[2:] if header_id in (EXTENDED_TIME_DATA, UNIX_EXTRA_DATA): values[0] = STRIPZIP_OPTION_HEADER for i in range(2, len(values)): values[i] = 0xff extra_struct.pack_into(mm, offset, *values) if header_id == ZIP64_EXTRA_HEADER: assert header_length == 16 values = list(zip64_extra_struct.unpack_from(mm, offset)) header_id, header_length, uncompressed_size, compressed_size = values offset += extra_header_struct.size + header_length return compressed_size FILE_HEADER_SIGNATURE = 0x04034b50 CENDIR_HEADER_SIGNATURE = 0x02014b50 archive_size = os.fstat(zip_.fileno()).st_size signature_struct = Struct("<L") local_file_header_struct = Struct("<LHHHHHLLLHH") # 0. L signature # 1. H version_needed # 2. H gp_bits # 3. H compression_method # 4. H last_mod_time # 5. H last_mod_date # 6. L crc32 # 7. L compressed_size # 8. L uncompressed_size # 9. H name_length # 10. H extra_field_length central_directory_header_struct = Struct("<LHHHHHHLLLHHHHHLL") # 0. L signature # 1. H version_made_by # 2. H version_needed # 3. H gp_bits # 4. H compression_method # 5. H last_mod_time # 6. H last_mod_date # 7. L crc32 # 8. L compressed_size # 9. L uncompressed_size # 10. H file_name_length # 11. H extra_field_length # 12. H file_comment_length # 13. H disk_number_start # 14. H internal_attr # 15. L external_attr # 16. L rel_offset_local_header offset = 0 mm = mmap.mmap(zip_.fileno(), 0) while offset < archive_size: if signature_struct.unpack_from(mm, offset) != (FILE_HEADER_SIGNATURE,): break values = list(local_file_header_struct.unpack_from(mm, offset)) _, _, _, _, _, _, _, compressed_size, _, name_length, extra_field_length = values # reset last_mod_time values[4] = 0 # reset last_mod_date values[5] = 0x21 local_file_header_struct.pack_into(mm, offset, *values) offset += local_file_header_struct.size + name_length if extra_field_length != 0: compressed_size = purify_extra_data(mm, offset, extra_field_length, compressed_size) offset += compressed_size + extra_field_length while offset < archive_size: if signature_struct.unpack_from(mm, offset) != (CENDIR_HEADER_SIGNATURE,): break values = list(central_directory_header_struct.unpack_from(mm, offset)) _, _, _, _, _, _, _, _, _, _, file_name_length, extra_field_length, file_comment_length, _, _, _, _ = values # reset last_mod_time values[5] = 0 # reset last_mod_date values[6] = 0x21 central_directory_header_struct.pack_into(mm, offset, *values) offset += central_directory_header_struct.size + file_name_length + extra_field_length + file_comment_length if extra_field_length != 0: purify_extra_data(mm, offset-extra_field_length, extra_field_length) if offset == 0: raise NonZipFileError(zip_.name)
class StructProcessor: def __init__(self, name, format, fields, defaults=None, **flags): if "S" in format or "C" in format: # expand the format m = re.match(r"([<>!=])?(.+)", format) fmt_items = [ (int(m[1]) if m[1] else 1, m[2]) for m in re.finditer(r"(\d*)([xcCbB?hHiIlLqQnNefdsSpP])", m[2]) ] fmt_counts = [1 if f in "sSp" else count for count, f in fmt_items] fmt_offsets = list((0, *accumulate(fmt_counts))) is_str = [False] * fmt_offsets[-1] for itm, offset in zip(fmt_items, fmt_offsets[:-1]): is_str[offset] = itm[1] in "SC" self.is_str = [fields[i] for i, tf in enumerate(is_str) if tf] format = format.replace("C", "c").replace("S", "s") else: self.is_str = () self.struct = Struct(format) self.template = namedtuple(name, fields, defaults=defaults) self.flags = ((k, FlagProcessor(*v)) for k, v in flags.items()) def default(self): data = self.template() return data._replace(**{k: proc.default() for k, proc in self.flags}) def _unpack(self, data): data = self.template._make(data) return data._replace( **{ field: getattr(data, field).decode("utf-8") for field in self.is_str }, **{k: proc.unpack(getattr(data, k)) for k, proc in self.flags}, ) def unpack(self, buffer): return self._unpack(self.struct.unpack(buffer)) def unpack_from(self, buffer, offset=0): return self._unpack(self.struct.unpack_from(buffer, offset)) def _pack(self, ntuple): return ntuple._replace( **{k: proc.pack(getattr(ntuple, k)) for k, proc in self.flags}, **{field: ntuple[field].encode("utf-8") for field in self.is_str}, ) def pack(self, ntuple): return self.struct.pack(*self._pack(ntuple)) def pack_into(self, buffer, offset, ntuple): self.struct.pack_into(buffer, offset, *self._pack(ntuple)) @property def size(self): return self.struct.size
from struct import Struct struct1 = Struct('@i13sf') buffer = bytearray(56) struct1.pack_into(buffer, 0, 129, b'Hello World', 4.69) struct1.pack_into(buffer, 24, 130, b'Yoo Python!', 3.28) print(f"Butter : {buffer}")
class Native_component_list(Component_list): def __init__(self, format, translator = lambda x : x, preparer = lambda x : x): super(Native_component_list, self).__init__() #TODO remove and simplifiy! self.set_translator(translator) self.set_format(format) self._native_components = None self._native_component_offsets = None self.set_preparer(preparer) def set_translator(self, translator): self._translator = translator def set_preparer(self,preparer): self._preparer = preparer def set_format(self,format): self._component_struct = Struct(format) def _reset(self): self._native_components = None self._native_component_offsets = None def _basic_add_component(self,component): super(Native_component_list, self)._basic_add_component(component) self._reset() def clear(self): super(Native_component_list, self).clear() self._reset() def _translate_to_native_component(self, component): return self._translator(component) def _prepare_component_list(self, components): return self._preparer(components) def _build_native_components(self): target_components = self._prepare_component_list(self._components) num_components = len(target_components) if num_components > 0 : format_length = len(self._component_struct.format) data_length = len(self._translate_to_native_component(target_components[0])) if format_length != data_length: msg = 'data and format must have the same length, got len(fomat) = %i and len(data) = %i' raise Exception(msg % (format_length, data_length)) struct_size = self._component_struct.size bytes = ctypes.create_string_buffer(struct_size * num_components) for i in range(num_components): native_component = self._translate_to_native_component(target_components[i]) self._component_struct.pack_into(bytes, struct_size * i, *native_component) return bytes def get_native_components(self): if self._native_components == None: self._native_components = self._build_native_components() bytes = self._native_components return bytes def _build_native_component_offsets(self): struct = Struct('iii') ids = self.get_component_atom_ids() num_ids = 0 if len(ids) > 0: num_ids = max(ids)+1 struct_size = struct.size bytes = ctypes.create_string_buffer(struct_size * num_ids) for id in range(num_ids): if id in ids: start,end = self.get_component_range(id) native_component = id,start,end-start else: native_component = id,-1,-1 struct.pack_into(bytes, struct_size * id, *native_component) return bytes def get_native_component_offsets(self): if self._native_component_offsets == None: self._native_component_offsets = self._build_native_component_offsets() bytes = self._native_component_offsets return bytes