Пример #1
0
def encrypt_sng(data, key):
    header = Int32ul.build(74) + Int32ul.build(3)
    iv = bytes(16)
    payload = Int32ul.build(len(data))
    payload += zlib.compress(data, zlib.Z_BEST_COMPRESSION)
    encrypted = aes_sng(key, iv).encrypt(pad(payload))[:len(payload)]
    return header + iv + encrypted + bytes(56)
Пример #2
0
def encrypt_sng(data, key):
    header = Int32ul.build(74) + Int32ul.build(3)
    iv = bytes(16)
    payload = Int32ul.build(len(data))
    payload += zlib.compress(data, zlib.Z_BEST_COMPRESSION)
    encryptor = aes_sng(key, iv).encryptor()
    encrypted = encryptor.update(payload) + encryptor.finalize()
    return header + iv + encrypted + bytes(56)
Пример #3
0
def _parse_hvle_block(hive_path, transaction_log_stream, log_size, expected_sequence_number):
    """

    :param hive_path:
    :param transaction_log_stream:
    :param log_size:
    :param expected_sequence_number:
    :return:
    """
    recovered_dirty_pages_count = 0
    restored_hive_buffer = BytesIO(open(hive_path, 'rb').read())

    hvle_block_start_offset = transaction_log_stream.tell()

    while hvle_block_start_offset < log_size:
        logger.info(f'Parsing hvle block at {hex(hvle_block_start_offset)}')
        with boomerang_stream(transaction_log_stream) as x:
            if x.read(4) != b'HvLE':
                logger.info('Reached a non HvLE object. stopping')
                break

        logger.info(f'Parsing HvLE block at {hex(hvle_block_start_offset)}')
        parsed_hvle_block = TRANSACTION_LOG.parse_stream(transaction_log_stream)
        logger.info(f'Currently at start of dirty pages: {transaction_log_stream.tell()}')
        logger.info(f'seq number: {parsed_hvle_block.sequence_number}')
        logger.info(f'dirty pages: {parsed_hvle_block.dirty_pages_count}')

        if parsed_hvle_block.sequence_number == expected_sequence_number:
            logger.info(f'This hvle block holds valid dirty blocks')
            expected_sequence_number += 1

        for dirty_page_entry in parsed_hvle_block.dirty_pages_references:
            # Write the actual dirty page to the original hive
            target_offset = REGF_HEADER_SIZE + dirty_page_entry.offset
            restored_hive_buffer.seek(target_offset)
            transaction_log_stream_offset = transaction_log_stream.tell()
            dirty_page_buffer = transaction_log_stream.read(dirty_page_entry.size)
            restored_hive_buffer.write(dirty_page_buffer)
            logger.info(f'Restored {dirty_page_entry.size} bytes to offset {hex(target_offset)} '
                        f'from offset {hex(transaction_log_stream_offset)}')
            recovered_dirty_pages_count += 1

        # TODO: update hive flags from hvle to original header

        # Update sequence numbers are at offsets 4 & 8:
        restored_hive_buffer.seek(4)
        restored_hive_buffer.write(Int32ul.build(expected_sequence_number))
        restored_hive_buffer.write(Int32ul.build(expected_sequence_number))

        # Update hbins size from hvle to original header at offset 40
        restored_hive_buffer.seek(40)
        restored_hive_buffer.write(Int32ul.build(parsed_hvle_block.hive_bin_size))

        transaction_log_stream.seek(hvle_block_start_offset + parsed_hvle_block.log_size)
        hvle_block_start_offset = hvle_block_start_offset + parsed_hvle_block.log_size

    return restored_hive_buffer, recovered_dirty_pages_count
Пример #4
0
def add_signature(infile, outfile, signature):
    """Add a signature to a PE file."""
    # First copy infile to outfile
    infile.seek(0)
    while True:
        block = infile.read(1024)
        if not block:
            break
        outfile.write(block)

    outfile.seek(0)
    pe = pefile.parse_stream(outfile)
    if not pe.optional_header.certtable_info:
        raise ValueError(
            "Can't add a signature into this file (not enough RVA sections)"
        )

    cert = certificate.build(
        {
            "size": len(signature) + 8,
            "revision": "REV2",
            "certtype": "PKCS7",
            "data": signature,
        }
    )

    # If we already have signatures, then add the new one to the end of the file
    if pe.optional_header.certtable_offset:
        certs_offset = pe.optional_header.certtable_offset
        certs_size = pe.optional_header.certtable_size + len(cert)
        old_certs_size = pe.optional_header.certtable_size
    else:
        # Figure out the end of the file
        outfile.seek(0, 2)
        certs_offset = outfile.tell()
        # Pad to 8 byte boundary
        if certs_offset % 8:
            certs_offset += 8 - (certs_offset % 8)
        certs_size = len(cert)
        old_certs_size = 0

    # Update the certificate table info
    outfile.seek(pe.optional_header.certtable_info)
    outfile.write(Int32ul.build(certs_offset))
    outfile.write(Int32ul.build(certs_size))

    # Add the signature
    outfile.seek(certs_offset + old_certs_size)
    outfile.write(cert)

    # Update the checksum
    checksum = calc_checksum(outfile, pe.optional_header.checksum_offset)
    outfile.seek(pe.optional_header.checksum_offset)
    outfile.write(Int32ul.build(checksum))
Пример #5
0
def decrypt_sng(data, key):
    iv, data = data[8:24], data[24:-56]
    decrypted = aes_sng(key, iv).decrypt(pad(data))
    length, payload = Int32ul.parse(decrypted[:4]), decrypted[4:len(data)]
    payload = zlib.decompress(payload)
    assert len(payload) == length
    return payload
Пример #6
0
def decrypt_sng(data, key):
    iv, data = data[8:24], data[24:]
    decryptor = aes_sng(key, iv).decryptor()
    decrypted = decryptor.update(data) + decryptor.finalize()
    length, payload = Int32ul.parse(decrypted[:4]), decrypted[4:]
    payload = zlib.decompress(payload)
    assert len(payload) == length
    return payload
Пример #7
0
def pack_tcp(msgs, channels=None):
    buf = b''

    for msg in msgs:
        msg = pack(msg, channels)
        buf += Int32ul.build(len(msg)) + msg

    return buf
Пример #8
0
    def _add_config_sector(self) -> None:
        wl_config_data = WLFATFS.WL_CONFIG_T_DATA.build(
            dict(start_addr=0,
                 full_mem_size=self.partition_size,
                 page_size=self.sector_size,
                 sector_size=self.sector_size,
                 updaterate=self._update_rate,
                 wr_size=16,
                 version=self._version,
                 temp_buff_size=self._temp_buff_size))

        crc = crc32(list(wl_config_data), WLFATFS.UINT32_MAX)
        wl_config_crc = Int32ul.build(crc)

        # adding three 4 byte zeros to align the structure
        wl_config = wl_config_data + wl_config_crc + Int32ul.build(
            0) + Int32ul.build(0) + Int32ul.build(0)

        self.fatfs_binary_image += (
            wl_config +
            (self.sector_size - WLFATFS.WL_CONFIG_HEADER_SIZE) * b'\xff')
Пример #9
0
    def _encode(self, obj: Construct, context: Container, path: str) -> bytes:
        """Encodes a ``FormID`` back to bytes.

        Args:
            obj (Construct): The construct to encode
            context (Container): The contextual container to use
            path (str): The construct path

        Returns:
            bytes: The resulting encoded bytes
        """

        return Int32ul.build(obj.form_id)
Пример #10
0
 def _add_state_sectors(self) -> None:
     wl_state_data = WLFATFS.WL_STATE_T_DATA.build(
         dict(
             pos=0,
             max_pos=self.plain_fat_sectors + WLFATFS.DUMMY_SECTORS_COUNT,
             move_count=0,
             access_count=0,
             max_count=self._update_rate,
             block_size=self.sector_size,
             version=self._version,
             device_id=self._device_id or generate_4bytes_random(),
         ))
     crc = crc32(list(wl_state_data), WLFATFS.UINT32_MAX)
     wl_state_crc = Int32ul.build(crc)
     wl_state = wl_state_data + wl_state_crc
     self.fatfs_binary_image += WLFATFS.WL_STATE_COPY_COUNT * (
         (wl_state +
          (self.sector_size - WLFATFS.WL_STATE_HEADER_SIZE) * b'\xff') +
         (self.wl_state_sectors - 1) * self.sector_size * b'\xff')
Пример #11
0
    def _parse_indirect_block(stream, value):
        # This is an indirect datablock (Bigger than 16344, therefor we handle it differently)
        # The value inside the vk entry actually contains a pointer to the buffers containing the data
        big_data_block_header = BIG_DATA_BLOCK.parse(value.value)

        # Go to the start of the segment offset list
        stream.seek(REGF_HEADER_SIZE +
                    big_data_block_header.offset_to_list_of_segments)
        buffer = BytesIO()

        # Read them sequentially until we got all the size of the VK
        value_size = value.size
        while value_size > 0:
            data_segment_offset = Int32ul.parse_stream(stream)
            with boomerang_stream(stream) as tmpstream:
                tmpstream.seek(REGF_HEADER_SIZE + 4 + data_segment_offset)
                tmpbuffer = tmpstream.read(min(0x3fd8, value_size))
                value_size -= len(tmpbuffer)
                buffer.write(tmpbuffer)
        buffer.seek(0)
        return buffer.read()
Пример #12
0
except ImportError:
    pass

RISCV_GP_REGS_COUNT = 32
PRSTATUS_SIZE = 204
PRSTATUS_OFFSET_PR_CURSIG = 12
PRSTATUS_OFFSET_PR_PID = 24
PRSTATUS_OFFSET_PR_REG = 72
ELF_GREGSET_T_SIZE = 128

PrStruct = Struct(
    Padding(PRSTATUS_OFFSET_PR_CURSIG), 'pr_cursig' / Int16ul,
    Padding(PRSTATUS_OFFSET_PR_PID - PRSTATUS_OFFSET_PR_CURSIG -
            Int16ul.sizeof()), 'pr_pid' / Int32ul,
    Padding(PRSTATUS_OFFSET_PR_REG - PRSTATUS_OFFSET_PR_PID -
            Int32ul.sizeof()), 'regs' / Int32ul[RISCV_GP_REGS_COUNT],
    Padding(PRSTATUS_SIZE - PRSTATUS_OFFSET_PR_REG - ELF_GREGSET_T_SIZE))


class RiscvMethodsMixin(BaseArchMethodsMixin):
    @staticmethod
    def get_registers_from_stack(data, grows_down):
        # type: (bytes, bool) -> Tuple[list[int], Optional[dict[int, int]]]
        regs = Int32ul[RISCV_GP_REGS_COUNT].parse(data)
        if not grows_down:
            raise ESPCoreDumpLoaderError(
                'Growing up stacks are not supported for now!')
        return regs, None

    @staticmethod
    def build_prstatus_data(tcb_addr,
Пример #13
0
class CoreProfileSessionTap(Tap):
    r"""
    Kdebug is a kernel facility for tracing events occurring on a system.
    This header defines reserved debugids, which are 32-bit values that describe
    each event:

    +----------------+----------------+----------------------------+----+
    |   Class (8)    |  Subclass (8)  |          Code (14)         |Func|
    |                |                |                            |(2) |
    +----------------+----------------+----------------------------+----+
    \_________________________________/
            ClassSubclass (CSC)
    \________________________________________________________________00_/
                                    Eventid
    \___________________________________________________________________/
                                    Debugid

    The eventid is a hierarchical ID, indicating which components an event is
    referring to.  The debugid includes an eventid and two function qualifier
    bits, to determine the structural significance of an event (whether it
    starts or ends an interval).

    This tap yields kdebug events.
    """
    IDENTIFIER = 'com.apple.instruments.server.services.coreprofilesessiontap'
    STACKSHOT_HEADER = Int32ul.build(
        int(kcdata_types_enum.KCDATA_BUFFER_BEGIN_STACKSHOT))

    def __init__(self,
                 dvt: DvtSecureSocketProxyService,
                 class_filter: int = None,
                 subclass_filter: int = None):
        """
        :param dvt: Instruments service proxy.
        :param class_filter: Event class to include.
        :param subclass_filter: Event subclass to include.
        """
        self.dvt = dvt
        self.stack_shot = None
        self._thread_map = {}
        self.uuid = str(uuid.uuid4())

        k_filter = 0xffffffff
        if class_filter is not None:
            k_filter = class_filter << KDBG_CLASS_OFFSET
        if subclass_filter is not None:
            k_filter |= subclass_filter << KDBG_SUBCLASS_OFFSET

        config = {
            'tc': [{
                'csd': 128,  # Callstack frame depth.
                'kdf2': {k_filter},  # Kdebug filter, receive all classes.
                'ta': [[3], [0], [2], [1, 1, 0]],  # Actions.
                'tk': 3,  # Kind.
                'uuid': self.uuid,
            }],  # Triggers configs
            'rp':
            100,  # Recording priority
            'bm':
            0,  # Buffer mode.
        }
        super().__init__(dvt, self.IDENTIFIER, config)

    @property
    def thread_map(self):
        return self._thread_map

    @thread_map.setter
    def thread_map(self, parsed_threadmap):
        self._thread_map = {}
        for thread in parsed_threadmap:
            self._thread_map[thread.tid] = ProcessData(thread.pid,
                                                       thread.process)

    def get_stackshot(self) -> typing.Mapping:
        """
        Get a stackshot from the tap.
        """
        if self.stack_shot is not None:
            # The stackshot is sent one per TAP creation, so we cache it.
            return self.stack_shot
        data = self._channel.receive_message()
        while not data.startswith(self.STACKSHOT_HEADER):
            data = self._channel.receive_message()
        self.stack_shot = self.parse_stackshot(data)
        return self.stack_shot

    def dump(self, out: typing.BinaryIO):
        """
        Dump data from core profile session to a file.
        :param out: File object to write data to.
        """
        while True:
            data = self._channel.receive_message()
            if data.startswith(
                    self.STACKSHOT_HEADER) or data.startswith(b'bplist'):
                # Skip not kernel trace data.
                continue
            print(f'Receiving trace data ({len(data)}B)')
            out.write(data)
            out.flush()

    def watch_events(self, events_count: int = -1):
        """
        Generator for kdebug events.
        The yielded event contains timestamp (uptime), args (arguments), tid (thread id), debugid, eventid, class,
        subclass, code, func_qualifier (function qualifier).
        :param events_count: Count of events to generate, -1 for unlimited generation.
        """
        events_index = 0
        while events_index != events_count:
            data = self._channel.receive_message()
            if data.startswith(b'bplist'):
                continue
            if data.startswith(self.STACKSHOT_HEADER):
                self.stack_shot = self.parse_stackshot(data)
                continue
            if data.startswith(RAW_VERSION2_BYTES):
                parsed = kperf_data.parse(data)
                self.thread_map = parsed.threadmap
                traces = parsed.traces
            else:
                traces = Array(len(data) // kd_buf.sizeof(),
                               kd_buf).parse(data)

            for event in traces:
                if events_index == events_count:
                    break
                yield event
                events_index += 1

    @staticmethod
    def parse_stackshot(data):
        parsed = kcdata.parse(data)
        # Required for removing streams from construct output.
        stackshot = clean(parsed)
        parsed_stack_shot = {}
        jsonify_parsed_stackshot(stackshot, parsed_stack_shot)
        return parsed_stack_shot[predefined_names[
            kcdata_types_enum.KCDATA_BUFFER_BEGIN_STACKSHOT]]

    def parse_event_time(self, timestamp):
        time_info = self.stack_shot['mach_timebase_info']
        offset_usec = (((timestamp - self.stack_shot['mach_absolute_time']) *
                        time_info['numer']) / (time_info['denom'] * 1000))
        return datetime.fromtimestamp(
            (self.stack_shot['usecs_since_epoch'] + offset_usec) / 1000000)
Пример #14
0
def apply_transaction_logs(hive_path,
                           transaction_log_path,
                           restored_hive_path=None,
                           verbose=False):
    restored_hive_buffer = BytesIO(open(hive_path, 'rb').read())

    if not restored_hive_path:
        restored_hive_path = f'{hive_path}.restored'

    registry_hive = RegistryHive(hive_path)
    log_size = os.path.getsize(transaction_log_path)
    expected_sequence_number = registry_hive.header.secondary_sequence_num

    logger.info(f'Log Size: {log_size}')

    recovered_dirty_pages_count = 0
    with open(transaction_log_path, 'rb') as transaction_log:
        # Skip REGF header
        transaction_log.seek(512)
        hvle_block_start_offset = transaction_log.tell()

        while hvle_block_start_offset < log_size:
            logger.info(f'Parsing hvle block at {hvle_block_start_offset}')

            parsed_hvle_block = TRANSACTION_LOG.parse_stream(transaction_log)
            logger.info(
                f'Currently at start of dirty pages: {transaction_log.tell()}')
            logger.info(f'seq number: {parsed_hvle_block.sequence_number}')
            logger.info(f'dirty pages: {parsed_hvle_block.dirty_pages_count}')

            if parsed_hvle_block.sequence_number == expected_sequence_number:
                logger.info(f'This hvle block holds valid dirty blocks')
                expected_sequence_number += 1
            else:
                logger.info(f'This block is invalid. stopping.')
                break

            for dirty_page_entry in parsed_hvle_block.dirty_pages_references:
                # Write the actual dirty page to the original hive
                target_offset = REGF_HEADER_SIZE + dirty_page_entry.offset
                restored_hive_buffer.seek(target_offset)
                dirty_page_buffer = transaction_log.read(dirty_page_entry.size)
                restored_hive_buffer.write(dirty_page_buffer)
                logger.info(
                    f'Restored {dirty_page_entry.size} bytes to offset {target_offset}'
                )
                recovered_dirty_pages_count += 1

            # TODO: update hive flags from hvle to original header

            # Update sequence numbers are at offsets 4 & 8:
            restored_hive_buffer.seek(4)
            restored_hive_buffer.write(Int32ul.build(expected_sequence_number))
            restored_hive_buffer.write(Int32ul.build(expected_sequence_number))

            # Update hbins size from hvle to original header at offset 40
            restored_hive_buffer.seek(40)
            restored_hive_buffer.write(
                Int32ul.build(parsed_hvle_block.hive_bin_size))

            transaction_log.seek(hvle_block_start_offset +
                                 parsed_hvle_block.log_size)
            hvle_block_start_offset = hvle_block_start_offset + parsed_hvle_block.log_size

    # Write to disk the modified registry hive
    with open(restored_hive_path, 'wb') as f:
        restored_hive_buffer.seek(0)
        f.write(restored_hive_buffer.read())

    return restored_hive_path, recovered_dirty_pages_count
Пример #15
0
 def uint32(self):
     return Int32ul.parse(self.read(4))
Пример #16
0
    def _extract_bin_corefile(self):
        """
        Creates core dump ELF file
        """
        tcbsz_aligned = self._get_aligned_size(self.header.tcbsz)

        coredump_data_struct = Struct(
            'tasks' / GreedyRange(
                AlignedStruct(
                    4,
                    'task_header' / TaskHeader,
                    'tcb' / Bytes(self.header.tcbsz),
                    'stack' / Bytes(
                        abs_(this.task_header.stack_top -
                             this.task_header.stack_end)),
                )), 'mem_seg_headers' /
            MemSegmentHeader[self.core_src.header.segs_num])

        core_elf = ESPCoreDumpElfFile()
        notes = b''
        core_dump_info_notes = b''
        task_info_notes = b''

        coredump_data = coredump_data_struct.parse(self.core_src.data)
        for i, task in enumerate(coredump_data.tasks):
            stack_len_aligned = self._get_aligned_size(
                abs(task.task_header.stack_top - task.task_header.stack_end))
            task_status_kwargs = {
                'task_index':
                i,
                'task_flags':
                TASK_STATUS_CORRECT,
                'task_tcb_addr':
                task.task_header.tcb_addr,
                'task_stack_start':
                min(task.task_header.stack_top, task.task_header.stack_end),
                'task_stack_len':
                stack_len_aligned,
                'task_name':
                Padding(16).build(
                    {}
                )  # currently we don't have task_name, keep it as padding
            }

            # Write TCB
            try:
                if self.target_method_cls.tcb_is_sane(
                        task.task_header.tcb_addr, tcbsz_aligned):
                    core_elf.add_segment(task.task_header.tcb_addr, task.tcb,
                                         ElfFile.PT_LOAD,
                                         ElfSegment.PF_R | ElfSegment.PF_W)
                elif task.task_header.tcb_addr and self.target_method_cls.addr_is_fake(
                        task.task_header.tcb_addr):
                    task_status_kwargs[
                        'task_flags'] |= TASK_STATUS_TCB_CORRUPTED
            except ESPCoreDumpLoaderError as e:
                logging.warning(
                    'Skip TCB {} bytes @ 0x{:x}. (Reason: {})'.format(
                        tcbsz_aligned, task.task_header.tcb_addr, e))

            # Write stack
            try:
                if self.target_method_cls.stack_is_sane(
                        task_status_kwargs['task_stack_start']):
                    core_elf.add_segment(
                        task_status_kwargs['task_stack_start'], task.stack,
                        ElfFile.PT_LOAD, ElfSegment.PF_R | ElfSegment.PF_W)
                elif task_status_kwargs['task_stack_start'] \
                        and self.target_method_cls.addr_is_fake(task_status_kwargs['task_stack_start']):
                    task_status_kwargs[
                        'task_flags'] |= TASK_STATUS_TCB_CORRUPTED
                    core_elf.add_segment(
                        task_status_kwargs['task_stack_start'], task.stack,
                        ElfFile.PT_LOAD, ElfSegment.PF_R | ElfSegment.PF_W)
            except ESPCoreDumpLoaderError as e:
                logging.warning(
                    'Skip task\'s ({:x}) stack {} bytes @ 0x{:x}. (Reason: {})'
                    .format(task_status_kwargs['tcb_addr'],
                            task_status_kwargs['stack_len_aligned'],
                            task_status_kwargs['stack_base'], e))

            try:
                logging.debug('Stack start_end: 0x{:x} @ 0x{:x}'.format(
                    task.task_header.stack_top, task.task_header.stack_end))
                task_regs, extra_regs = self.arch_method_cls.get_registers_from_stack(
                    task.stack,
                    task.task_header.stack_end > task.task_header.stack_top)
            except Exception as e:
                raise ESPCoreDumpLoaderError(str(e))

            task_info_notes += self._build_note_section(
                'TASK_INFO', ESPCoreDumpElfFile.PT_TASK_INFO,
                EspTaskStatus.build(task_status_kwargs))
            notes += self._build_note_section(
                'CORE', ElfFile.PT_LOAD,
                self.arch_method_cls.build_prstatus_data(
                    task.task_header.tcb_addr, task_regs))

            if extra_regs and len(core_dump_info_notes) == 0:
                # actually there will be only one such note - for crashed task
                core_dump_info_notes += self._build_note_section(
                    'ESP_CORE_DUMP_INFO', ESPCoreDumpElfFile.PT_INFO,
                    Int32ul.build(self.header.ver))

                exc_regs = []
                for reg_id in extra_regs:
                    exc_regs.extend([reg_id, extra_regs[reg_id]])
                _regs = [task.task_header.tcb_addr] + exc_regs
                core_dump_info_notes += self._build_note_section(
                    'EXTRA_INFO', ESPCoreDumpElfFile.PT_EXTRA_INFO,
                    Int32ul[1 + len(exc_regs)].build(_regs))

        if self.dump_ver == self.BIN_V2:
            for header in coredump_data.mem_seg_headers:
                logging.debug('Read memory segment {} bytes @ 0x{:x}'.format(
                    header.mem_sz, header.mem_start))
                core_elf.add_segment(header.mem_start, header.data,
                                     ElfFile.PT_LOAD,
                                     ElfSegment.PF_R | ElfSegment.PF_W)

        # add notes
        try:
            core_elf.add_segment(0, notes, ElfFile.PT_NOTE, 0)
        except ESPCoreDumpLoaderError as e:
            logging.warning(
                'Skip NOTES segment {:d} bytes @ 0x{:x}. (Reason: {})'.format(
                    len(notes), 0, e))
        # add core dump info notes
        try:
            core_elf.add_segment(0, core_dump_info_notes, ElfFile.PT_NOTE, 0)
        except ESPCoreDumpLoaderError as e:
            logging.warning(
                'Skip core dump info NOTES segment {:d} bytes @ 0x{:x}. (Reason: {})'
                .format(len(core_dump_info_notes), 0, e))
        try:
            core_elf.add_segment(0, task_info_notes, ElfFile.PT_NOTE, 0)
        except ESPCoreDumpLoaderError as e:
            logging.warning(
                'Skip failed tasks info NOTES segment {:d} bytes @ 0x{:x}. (Reason: {})'
                .format(len(task_info_notes), 0, e))
        # dump core ELF
        core_elf.e_type = ElfFile.ET_CORE
        core_elf.e_machine = ESPCoreDumpElfFile.EM_XTENSA
        core_elf.dump(self.core_elf_file.name)
Пример #17
0
    def iter_values(self, as_json=False, max_len=MAX_LEN):
        """
        Get the values of a subkey. Will raise if no values exist
        :param as_json: Whether to normalize the data as JSON or not
        :param max_len: Max length of value to return
        :return: List of values for the subkey
        """
        if not self.values_count:
            return

        # Get the offset of the values key. We skip 4 because of Cell Header
        target_offset = REGF_HEADER_SIZE + 4 + self.header.values_list_offset
        self._stream.seek(target_offset)

        for _ in range(self.values_count):
            is_corrupted = False
            try:
                vk_offset = Int32ul.parse_stream(self._stream)
            except StreamError:
                logger.info(
                    f'Skipping bad registry VK at {self._stream.tell()}')
                raise RegistryParsingException(
                    f'Bad registry VK at {self._stream.tell()}')

            with boomerang_stream(self._stream) as substream:
                actual_vk_offset = REGF_HEADER_SIZE + 4 + vk_offset
                substream.seek(actual_vk_offset)
                try:
                    vk = VALUE_KEY.parse_stream(substream)
                except (ConstError, StreamError):
                    logger.error(
                        f'Could not parse VK at {substream.tell()}, registry hive is probably corrupted.'
                    )
                    return

                value = self.read_value(vk, substream)

                if vk.name_size == 0:
                    value_name = '(default)'
                elif vk.flags.VALUE_COMP_NAME:
                    # Compressed (ASCII) value name
                    value_name = vk.name.decode('ascii', errors='replace')
                else:
                    # Unicode (UTF-16) value name
                    value_name = vk.name.decode('utf-16-le', errors='replace')
                    logger.debug(
                        f'Unicode value name identified: "{value_name}"')

                # If the value is bigger than this value, it means this is a DEVPROP structure
                # https://doxygen.reactos.org/d0/dba/devpropdef_8h_source.html
                # https://sourceforge.net/p/mingw-w64/mingw-w64/ci/668a1d3e85042c409e0c292e621b3dc0aa26177c/tree/
                # mingw-w64-headers/include/devpropdef.h?diff=dd86a3b7594dadeef9d6a37c4b6be3ca42ef7e94
                # We currently do not support these, but also wouldn't like to yield this as binary data
                # This int casting will always work because the data_type is construct's EnumIntegerString
                # TODO: Add actual parsing
                if int(vk.data_type) > 0xffff0000:
                    data_type = int(vk.data_type) & 0xffff
                    continue

                # Skip this unknown data type, research pending :)
                # TODO: Add actual parsing
                if int(vk.data_type) == 0x200000:
                    continue

                data_type = str(vk.data_type)
                if data_type in ['REG_SZ', 'REG_EXPAND', 'REG_EXPAND_SZ']:
                    if vk.data_size >= 0x80000000:
                        # data is contained in the data_offset field
                        value.size -= 0x80000000
                        actual_value = vk.data_offset
                    elif vk.data_size > 0x3fd8 and value.value[:2] == b'db':
                        data = self._parse_indirect_block(substream, value)
                        actual_value = try_decode_binary(data, as_json=as_json)
                    else:
                        actual_value = try_decode_binary(value.value,
                                                         as_json=as_json)
                elif data_type in ['REG_BINARY', 'REG_NONE']:
                    if vk.data_size >= 0x80000000:
                        # data is contained in the data_offset field
                        actual_value = vk.data_offset
                    elif vk.data_size > 0x3fd8 and value.value[:2] == b'db':
                        try:
                            actual_value = self._parse_indirect_block(
                                substream, value)

                            actual_value = try_decode_binary(
                                actual_value,
                                as_json=True) if as_json else actual_value
                        except ConstError:
                            logger.error(f'Bad value at {actual_vk_offset}')
                            continue
                    else:
                        # Return the actual data
                        actual_value = binascii.b2a_hex(value.value).decode(
                        )[:max_len] if as_json else value.value
                elif data_type == 'REG_SZ':
                    actual_value = try_decode_binary(value.value,
                                                     as_json=as_json)
                elif data_type == 'REG_DWORD':
                    # If the data size is bigger than 0x80000000, data is actually stored in the VK data offset.
                    actual_value = vk.data_offset if vk.data_size >= 0x80000000 else Int32ul.parse(
                        value.value)
                elif data_type == 'REG_QWORD':
                    actual_value = vk.data_offset if vk.data_size >= 0x80000000 else Int64ul.parse(
                        value.value)
                elif data_type == 'REG_MULTI_SZ':
                    parsed_value = GreedyRange(CString('utf-16-le')).parse(
                        value.value)
                    # Because the ListContainer object returned by Construct cannot be turned into a list,
                    # we do this trick
                    actual_value = [x for x in parsed_value if x]
                # We currently dumps this as hex string or raw
                # TODO: Add actual parsing
                elif data_type in [
                        'REG_RESOURCE_REQUIREMENTS_LIST', 'REG_RESOURCE_LIST'
                ]:
                    actual_value = binascii.b2a_hex(value.value).decode(
                    )[:max_len] if as_json else value.value
                else:
                    actual_value = try_decode_binary(value.value,
                                                     as_json=as_json)
                yield Value(name=value_name,
                            value_type=str(value.value_type),
                            value=actual_value,
                            is_corrupted=is_corrupted)
Пример #18
0
def unpack_tcp(buf, channels=None):
    while len(buf):
        size = Int32ul.parse(buf)
        msg, buf = buf[4:size + 4], buf[size + 4:]
        yield unpack(msg, channels)