Exemple #1
0
def unpack(file_path: Union[Path, str]):
    if isinstance(file_path, str):
        file_path = Path(file_path)
    result = {'spout': list(), 'lever': list(), 'event': dict()}
    for attr_type in ("design", "hardware"):
        result[attr_type] = toml.load(file_path.joinpath(attr_type + ".toml"))
    packet = Struct("<BIi")
    with file_path.joinpath("temp.raw").open('rb') as fp:
        for pack in packet.iter_unpack(fp.read()):
            if 1 == pack[0]:
                result['spout'].append(pack[1:])
            elif 2 == pack[0]:
                result['lever'].append(pack[1:])
            elif 3 == pack[0]:
                event_type = event_types[pack[2]]
                if event_type in result['event']:
                    result['event'][event_type].append(pack[1])
                else:
                    result['event'][event_type] = [pack[1]]
            else:
                raise ValueError(f"wrong packet! {pack}")
        result['lever'] = np.array(result['lever'],
                                   dtype=[('timestamp', '<I'),
                                          ('value', '<i')])
        result['spout'] = np.array(result['spout'],
                                   dtype=[('timestamp', '<I'),
                                          ('value', '<i')])
        result['event'] = {
            key: np.array(value, dtype='<I')
            for key, value in result['event'].items()
        }
    return result
Exemple #2
0
class StatsVector:
    '''A class representing a VPP vector'''

    def __init__(self, stats, ptr, fmt):
        self.vec_start = ptr - stats.base
        self.vec_len = get_vec_len(stats, ptr - stats.base)
        self.struct = Struct(fmt)
        self.fmtlen = len(fmt)
        self.elementsize = self.struct.size
        self.statseg = stats.statseg
        self.stats = stats

        if self.vec_start + self.vec_len * self.elementsize >= stats.size:
            raise ValueError('Vector overruns stats segment')

    def __iter__(self):
        with self.stats.lock:
            return self.struct.iter_unpack(self.statseg[self.vec_start:self.vec_start +
                                                        self.elementsize*self.vec_len])

    def __getitem__(self, index):
        if index > self.vec_len:
            raise ValueError('Index beyond end of vector')
        with self.stats.lock:
            if self.fmtlen == 1:
                return self.struct.unpack_from(self.statseg, self.vec_start +
                                               (index * self.elementsize))[0]
            return self.struct.unpack_from(self.statseg, self.vec_start +
                                           (index * self.elementsize))
def deinterleave(data, nbytes, nsplit):
    """Deinterleaves one bytearray into nsplit many bytearrays on a nbytes basis.

    Returns a list of bytearrays.
    """
    deinterleaved = [[] for n in range(nsplit)]

    deinterleave_s = Struct('c' * nbytes)

    try:
        deinterleave_iter = deinterleave_s.iter_unpack(data)
    except error as err:
        #this error can be many things, handling generically until otherwise
        print('ERROR:', err, 'CLOSING', file=sys.stderr)
        raise err

    #this could cause rounding errors?
    iterlen = int(len(data) / (nbytes * nsplit))
    for _ in range(iterlen):
        for i, _ in enumerate(deinterleaved):
            try:
                next_ = next(deinterleave_iter)
            except StopIteration:
                pass
            deinterleaved[i].extend([*next_])

    return [b''.join(delist) for delist in deinterleaved]
Exemple #4
0
    def _tile_to_bitplanes(self, data):
        bitplanes = []
        row_fmt = Struct(8 * 'c')
        row_iter = row_fmt.iter_unpack(data)

        bitplanes = [self._pixel_row_to_4bpp(row) for row in row_iter]

        return b''.join(bitplanes)
Exemple #5
0
    def _interleave(self, subtile1, subtile2):
        """Interleaves two 8x8 tiles like
        [subtile1-row1] [subtile2-row1] ...
        [subtile1-row16] [subtile2-row16]

        Returns bytes()
        """
        interleaved = []
        interleave_fmt = Struct(4 * 'c')

        left_iter = interleave_fmt.iter_unpack(subtile1)
        right_iter = interleave_fmt.iter_unpack(subtile2)

        for i in left_iter:
            right_next = next(right_iter)
            interleaved.extend([*i, *right_next])

        return interleaved
Exemple #6
0
    def unpack(self):
        """Unpacks a 4bpp planar tile.

        Returns a byte() of pixel values.
        """
        tile_fmt = Struct(32 * 'c')
        tile_iter = tile_fmt.iter_unpack(self._tile_data)

        tiles = [self._bitplanes_to_tile(b''.join(tile)) for tile in tile_iter]
        return b''.join(tiles)
Exemple #7
0
    def deinterleave_subtiles(self):
        tile_fmt = Struct(64 * 'c')
        tile_iter = tile_fmt.iter_unpack(self._tile_data)

        subtiles = []
        for subtile in tile_iter:
            subtiles.extend(self._deinterleave(b''.join(subtile)))

        deinterleaved = [subtiles[0], subtiles[2], subtiles[1], subtiles[3]]
        return Tile(self._tile_addr, b''.join(deinterleaved),
                    self._tile_dimensions)
Exemple #8
0
    def _deinterleave(self, data):
        deinterleaved = [[], []]

        deinterleave_fmt = Struct(4 * 'c')
        deinterleave_iter = deinterleave_fmt.iter_unpack(data)

        for i in deinterleave_iter:
            deinterleaved[0].extend([*i])
            deinterleaved[1].extend([*next(deinterleave_iter)])

        return [b''.join(data) for data in deinterleaved]
Exemple #9
0
    def pack(self, data):
        """Converts pixel values into 4bpp and packs it for Tile use.
        Returns bytes()
        """

        tile_fmt = Struct(32 * 'c')
        tile_iter = tile_fmt.iter_unpack(data)

        bitplane_values = [
            self._tile_to_bitplanes(b''.join(tile)) for tile in tile_iter
        ]
        return b''.join(bitplane_values)
Exemple #10
0
def deinterleave(data, num_bytes):
    """Deinterleaves a bytearray.

    Returns two bytearrays.
    """
    evens = []
    odds = []
    deinterleave_s = Struct('c' * num_bytes)
    deinterleave_iter = deinterleave_s.iter_unpack(data)

    for i in deinterleave_iter:
        evens.extend([*i])
        odds.extend([*next(deinterleave_iter)])

    return b''.join(evens), b''.join(odds)
Exemple #11
0
def interleave(file1, file2, num_bytes):
    """Interleaves two bytearray buffers together.

    Returns a bytearray.
    """
    interleaved = []
    interleave_s = Struct('c' * num_bytes)
    file1_iter = interleave_s.iter_unpack(file1)
    file2_iter = interleave_s.iter_unpack(file2)

    for i in file1_iter:
        file2_next = next(file2_iter)
        interleave_temp = [*i, *file2_next]
        interleaved.extend(interleave_temp)

    return b''.join(interleaved)
Exemple #12
0
    def interleave_subtiles(self):
        """Row interleaves the 4 8x8 subtiles in a 16x16 tile.

        Returns a new packed Tile.
        """
        tile_fmt = Struct(32 * 'c')
        tile_iter = tile_fmt.iter_unpack(self._tile_data)

        subtiles = [b''.join(subtile) for subtile in tile_iter]

        top = self._interleave(subtiles[0], subtiles[2])
        bottom = self._interleave(subtiles[1], subtiles[3])

        interleaved = [*top, *bottom]
        return Tile(self._tile_addr, b''.join(interleaved),
                    self._tile_dimensions)
Exemple #13
0
def deinterleave(data, nbytes, nsplit):
    """Deinterleaves one bytearray into nsplit many bytearrays on a nbytes basis.

    Returns a list of bytearrays.
    """
    deinterleaved = [[] for n in range(nsplit)]

    deinterleave_s = Struct('c' * nbytes)
    deinterleave_iter = deinterleave_s.iter_unpack(data)

    for i in deinterleave_iter:
        deinterleaved[0].extend([*i])
        for j in range(1, len(deinterleaved[1:]) + 1):
            next_ = next(deinterleave_iter)
            deinterleaved[j].extend([*next_])

    return [b''.join(delist) for delist in deinterleaved]
Exemple #14
0
def interleave(data, nbytes):
    """Interleaves a list of bytearrays together on a nbytes basis.

    Returns a bytearray.
    """
    interleave_s = Struct('c' * nbytes)
    iters = [interleave_s.iter_unpack(inter) for inter in data]

    interleaved = []
    #this could cause rounding errors?
    iterlen = int(len(data[0]) / nbytes)
    for i in range(iterlen):
        nexts = [next(iter_) for iter_ in iters]
        # print('nexts is:', nexts)
        interleaved.extend([b''.join(val) for val in nexts])
        # print('interleaved is:', interleaved)

    return b''.join(interleaved)
def bool_ptr_map_of_coredump(cdump):
    struct_uint64 = Struct('=Q')

    ptr_map = []
    with MachO(args.coredump) as cdump:
        va_ranges = SortedDict((s.vmaddr, s.vmaddr + s.vmsize) for s in cdump.get_segments())

        for seg in (s for s in cdump.get_segments() if not s.initprot & VM_PROT_EXECUTE):
            # Ensure 8-byte alignment of the segment's vmaddr and vmsize
            vmaddr_8byte_aligned_up = (seg.vmaddr + 0x7) & ~0x7
            vmaddr_delta = vmaddr_8byte_aligned_up - seg.vmaddr
            vmsize_8byte_aligned_down = (seg.vmsize - vmaddr_delta) & ~0x7
            vmsize_delta = vmsize_8byte_aligned_down - seg.vmsize
            # Get 8-byte aligned segment data
            seg_data = cdump._get_data(seg.fileoff + vmaddr_delta, seg.filesize + vmsize_delta)
            seg_data_bytes = len(seg_data)

            for i, (uint64,) in enumerate(struct_uint64.iter_unpack(seg_data)):
                is_ptr = va_range_set_contains(va_ranges, uint64)
                ptr_map.append(is_ptr)

    return ptr_map, 8
Exemple #16
0
    def read_type_tree_blob(self):
        reader = self.reader
        number_of_nodes = self.reader.read_int()
        string_buffer_size = self.reader.read_int()

        type = f"{reader.endian}hb?IIiii"
        keys = [
            "m_Version",
            "m_Level",
            "m_IsArray",
            "m_TypeStrOffset",
            "m_NameStrOffset",
            "m_ByteSize",
            "m_Index",
            "m_MetaFlag",
        ]
        if self.header.version >= 19:
            type += "Q"
            keys.append("m_RefTypeHash")

        node_struct = Struct(type)
        struct_data = reader.read(node_struct.size * number_of_nodes)
        string_buffer_reader = EndianBinaryReader(
            reader.read(string_buffer_size), reader.endian)

        if not config.SERIALIZED_FILE_PARSE_TYPETREE:
            return [], string_buffer_reader.bytes

        type_tree = [
            TypeTreeNode(
                **dict(zip(keys, raw_node)),
                m_Type=read_string(string_buffer_reader, raw_node[3]),
                m_Name=read_string(string_buffer_reader, raw_node[4]),
            )
            for i, raw_node in enumerate(node_struct.iter_unpack(struct_data))
        ]

        return type_tree, string_buffer_reader.bytes
def interleave(data, nbytes):
    """Interleaves a list of bytearrays together on a nbytes basis.

    Returns a bytearray.
    """
    interleave_s = Struct('c' * nbytes)
    iters = []

    for inter in data:
        try:
            iters.append(interleave_s.iter_unpack(inter))
        except error as err:
            print('ERROR:', err, 'CLOSING', file=sys.stderr)
            raise err

    interleaved = []
    #this could cause rounding errors?
    iterlen = int(len(data[0]) / nbytes)
    for _ in range(iterlen):
        nexts = [next(iter_) for iter_ in iters]
        interleaved.extend([b''.join(val) for val in nexts])

    return b''.join(interleaved)
Exemple #18
0
class NamedStruct(object):
    """
        A type that fuse namedtuple and Struct together.
    """

    __fields__ = ('names', 'format')

    def __init__(self, name, fmt, *fields):
        self.format = Struct(fmt)
        self.names = namedtuple(name, fields)

    @classmethod
    def from_namedtuple(cls, ntuple, fmt):
        """
            Build a NamedStruct from a namedtuple

            Author: Gabriel Dube
        """
        named_struct = super(NamedStruct, cls).__new__(cls)
        named_struct.format = Struct(fmt)
        named_struct.names = ntuple

        return named_struct

    def unpack(self, data):
        x, y = len(data), self.format.size
        if (x > y) and (x % y == 0):
            return tuple(self.iter_unpack(data))

        return self.names(*self.format.unpack(data))

    def unpack_from(self, data, offset):
        return self.names(*self.format.unpack_from(data, offset))

    def iter_unpack(self, data):
        return (self.names(*d) for d in self.format.iter_unpack(data))
Exemple #19
0
class IndexSection(BaseSection):
    '''
    A section containing an index / lookup table with offsets into another
    section. (Think git packfile indexes.)
    '''
    typeid = SectionType.Index
    datatype = dict

    def __init__(self,
                 *args,
                 othersec=None,
                 othersec_idx=None,
                 keysize=32,
                 fanout=True,
                 off64=False,
                 unc_size=True,
                 varint=False,
                 endian='<',
                 **kwargs):
        # TODO: flag for whether or not there's a full fanout table
        #       (so we can skip it for small indexes)
        # TODO: flag for varint encoding of offsets/sizes
        BaseSection.__init__(self, *args, **kwargs)
        if not (othersec_idx or isinstance(othersec, BaseSection)):
            raise ValueError("expected BaseSection, got {type(othersec)}")

        # these control the output encoding and can be set/changed whenever
        self.endian = endian
        self.fanout = fanout
        self.varint = varint
        # off64 is an output encoding setting that gets set automatically
        # if a 64-bit offset/size is added
        self._off64 = off64
        # keysize and unc_size can't be changed once an index is created
        self._keysize = keysize
        self._unc_size = unc_size
        # references to the section we're an index over
        self._othersec = othersec
        self._othersec_idx = othersec_idx

        # set up
        self._key_s = Struct(f'{self._keysize}s')
        valfmt = ('L' if off64 else 'I') * (3 if unc_size else 2)
        self._val_s = Struct(f'{self.endian}{valfmt}')
        self._fanout_s = Struct(f'{self.endian}256I')

        if unc_size:
            self.add = self.add3
        else:
            self.add = self.add2

    @property
    def keysize(self):
        return self._keysize

    @staticmethod
    def parse_info(info):
        return IndexInfo.from_int(info)

    @classmethod
    def from_hdr(cls, shdr):
        info = cls.parse_info(shdr.info)
        return cls(name_idx=shdr.name,
                   flags=shdr.flags,
                   othersec_idx=info.othersec,
                   keysize=info.keysize,
                   fanout=info.fanout,
                   off64=info.off64,
                   unc_size=info.unc_size,
                   varint=bool(shdr.flags & SectionFlags.VARINT))

    @property
    def count(self):
        return len(self._data)

    @property
    def size(self):
        if self.varint:
            return (
                len(self.make_fanout()) + (self.count * self.keysize) +
                sum(len(varint_encode(i)) for v in self.values() for i in v))
        else:
            return (self._fanout_s.size + self.count *
                    (self.keysize + self._val_s.size))

    @property
    def info(self):
        return IndexInfo(keysize=self.keysize,
                         othersec=self.othersec.idx if self.othersec else 0xff,
                         fanout=self.fanout,
                         unc_size=self._unc_size,
                         off64=self._off64).to_int()

    @property
    def othersec(self):
        if self._othersec is None:
            if self._dino and self._othersec_idx:
                self._othersec = self._dino.sectab[self._othersec_idx]
        return self._othersec

    def keys(self):
        return self._data.keys()

    def values(self):
        return self._data.values()

    def items(self):
        return self._data.items()

    def get(self, key, default=None):
        return self._data.get(key, default)

    def __contains__(self, key):
        return key in self._data

    def add2(self, key, offset, size):
        self._data[self._key_s.pack(key)] = (offset, size)

    def add3(self, key, offset, size, uncsize):
        self._data[self._key_s.pack(key)] = (offset, size, uncsize)

    def remove(self, key):
        del self._data[key]

    def make_fanout(self):
        counts = Counter(k[0] for k in self.keys())
        if self.varint:
            # varint-encoded fanout just gives the counts for each byte
            return self._varint_pack(*[counts[b] for b in range(256)])
        else:
            fanout = [0] * 257
            for i in range(256):
                fanout[i + 1] = fanout[i] + counts[i]
            return self._fanout_s.pack(*fanout[1:])

    def _varint_pack(self, *values):
        return b''.join(varint_encode(i) for i in values)

    @property
    def keysize(self):
        return self._key_s.size

    def write_to(self, fobj):
        if self.count == 0:
            return 0
        dprint(f"writing index: fanout={self.fanout} varint={self.varint} "
               f"unc_size={self._unc_size} keysize={self.keysize} "
               f"count={self.count}")
        wrote = 0
        if self.fanout:
            wrote += fobj.write(self.make_fanout())
        keys, vals = zip(*(sorted(self.items())))
        dprint(f"  fanout: {wrote:7} bytes")

        prevpos = wrote
        for k in keys:
            wrote += fobj.write(self._key_s.pack(k))
        dprint(f"    keys: {wrote-prevpos:7} bytes")

        if self.varint:
            valpack = self._varint_pack
        else:
            valpack = self._val_s.pack

        prevpos = wrote
        for v in vals:
            wrote += fobj.write(valpack(*v))
        dprint(f"    vals: {wrote-prevpos:7} bytes")
        dprint(f"   total: {wrote:7} bytes")

        return wrote

    def from_file(self, fobj, size, count=0):
        # It's a little silly that we unpack this data structure into native
        # python data structures rather than using it directly, but the
        # native structures *seem* to perform better, and this is really
        # just a rapid-devel prototype anyway.
        # A real implementation of this would be native code in a library
        # that we use via the FFI or something.
        if size == 0:
            self._data = self.datatype()
            return

        dprint(f"reading index: fanout={self.fanout} varint={self.varint} "
               f"unc_size={self._unc_size} keysize={self.keysize} "
               f"count={self.count}")

        data = fobj.read(size)
        keypos = 0
        if self.fanout:
            if self.varint:
                # NOTE: varint-encoded fanout is a sequence of counts, not a
                # running count..
                fv = 0
                for v, n in varint_iter_decode(data, 256):
                    fv += v
                    keypos += n
                    fanout.append(fv)
            else:
                keypos = self._fanout_s.size
                fanout = self._fanout_s.unpack(data[0:keypos])
            if count:
                assert count == fanout[-1]
            dprint(f"  fanout: {keypos:7} bytes, count={fanout[-1]}")
        keylen = self.keysize * count
        valpos = keypos + keylen
        keydata = data[keypos:valpos]
        valdata = data[valpos:]
        dprint(f"    keys: {valpos-keypos:7} bytes")
        dprint(f"    vals: {len(valdata):7} bytes")
        keys = [i[0] for i in self._key_s.iter_unpack(keydata)]
        if self.varint:
            vals = [i[0] for i in varint_iter_decode(valdata)]
            n, m = divmod(len(vals), count)
            assert (m == 0), "Incorrect/corrupt index"
            vals = [tuple(vals[i:i + n]) for i in range(0, len(vals), n)]
        else:
            if (len(valdata) % self._val_s.size):
                print(
                    f"wtf: size {self._val_s.size} * count {count} != {len(valdata)}"
                )
            vals = self._val_s.iter_unpack(valdata)
        self._data = self.datatype(zip(keys, vals))
Exemple #20
0
 def parser(buffer):
     struct = Struct(struct_pattern)
     for chunk in struct.iter_unpack(buffer):
         yield event_maker(*chunk)
Exemple #21
0
def extract_version1(file):
    # consts
    KEYS_COUNT = 8
    PADDING_LENGTH = 432
    PAYLOAD_SIZE = 512  # sizeof (u32) * 128

    # prepare structs
    key_struct = Struct(">LL32sLL")
    header_struct = Struct(">6sH32s32s32sLL20s32sL40s" +
                           str(key_struct.size * KEYS_COUNT) + "s" +
                           str(PADDING_LENGTH) + "x")

    # read header
    header = file.read(header_struct.size)
    assert len(
        header) == header_struct.size, "File contains less data than needed"

    # convert bytes into temporary header
    header = header_struct.unpack(header)
    header = TmpHeaderVersion1(*header)

    # convert bytes into temporary keys
    tmp_keys = [
        TmpKeyVersion1(*key) for key in key_struct.iter_unpack(header.keys)
    ]

    # read keys' af
    keys = []
    for key in tmp_keys:
        file.seek(key.material_offset * SECTOR_SIZE, SEEK_SET)
        af = file.read(header.key_bytes * key.stripes)
        assert len(af) == (header.key_bytes *
                           key.stripes), "File contains less data than needed"

        key = KeyVersion1(key.active, key.iterations, key.salt, af)
        keys.append(key)

    # read payload
    file.seek(header.payload_offset * SECTOR_SIZE, SEEK_SET)
    payload = file.read(PAYLOAD_SIZE)
    assert len(payload) == PAYLOAD_SIZE, "File contains less data than needed"

    # convert into header
    header = HeaderVersion1(
        header.magic,
        header.version,
        header.cipher,
        header.mode,
        header.hash,
        payload,
        header.key_bytes * 8,
        header.digest,
        header.salt,
        header.iterations,
        header.uuid,
        keys,
    )

    # check for any active key
    for key in header.keys:
        if key.active not in [
                KeyVersion1.Active.ENABLED, KeyVersion1.Active.ENABLED_OLD
        ]:
            continue

        hash = SIGNATURE + "$".join(
            map(
                str,
                [
                    header.version,
                    header.hash,
                    header.cipher,
                    header.mode,
                    int(header.key_size),
                    key.iterations,
                    key.salt.hex(),
                    key.af.hex(),
                    header.payload.hex(),
                ],
            ))
        print(hash)
        break
    else:
        # all keys are disabled
        raise ValueError("All keys are disabled")
Exemple #22
0
def read_iter(structure: Struct,
              stream: IO[bytes]) -> Iterator[Tuple[Any, ...]]:
    return structure.iter_unpack(stream.read())
from struct import Struct

struct1 = Struct('@i13sf')
sendBytes = b'\x7f\x00\x00\x00Hello Struct!\x00\x00\x00\xc3\xf5H@\x80\x00\x00\x00Hello Python!\x00\x00\x00\x85\xebQ@'
originalIter = struct1.iter_unpack(sendBytes)

print('Object type :', originalIter)

for item in originalIter:
    print(item)
        line = f.read(record_struct.size)
        if line == b'':
            break
        yield decode_record(record_struct, line)


def _decode_record(record_struct, line):
    return tuple(s.decode() for s in record_struct.unpack_from(line))


def decode_record(rec):
    return tuple(s.decode() for s in rec)


if __name__ == '__main__':
    # Will throw an AssertionError if the Length variable within the control file is wrong
    check_ctl('/some/dir/to/keep.csv')

    field_widths, keep_fields = import_ctl('/some/dir/to/keep.csv')
    fmt_string = create_fmt(field_widths, keep_fields)
    record_struct = Struct(fmt_string)

    with open('/some/dir/to/fixedfield/split1_sample', 'rb') as infile:
        with open('/some/dir/to/fixedfield/split1_sample.csv', 'w',
                  newline='') as outfile:
            csv_writer = csv.writer(outfile, delimiter=',')
            for rec in record_struct.iter_unpack(
                    infile.read(record_struct.size * 10)):
                # for rec in read_records(record_struct, infile):
                csv_writer.writerow(decode_record(rec))