Exemple #1
0
def encrypt_sng(data, key):
    header = Int32ul.build(74) + Int32ul.build(3)
    iv = bytes(16)
    payload = Int32ul.build(len(data))
    payload += zlib.compress(data, zlib.Z_BEST_COMPRESSION)
    encrypted = aes_sng(key, iv).encrypt(pad(payload))[:len(payload)]
    return header + iv + encrypted + bytes(56)
Exemple #2
0
def encrypt_sng(data, key):
    header = Int32ul.build(74) + Int32ul.build(3)
    iv = bytes(16)
    payload = Int32ul.build(len(data))
    payload += zlib.compress(data, zlib.Z_BEST_COMPRESSION)
    encryptor = aes_sng(key, iv).encryptor()
    encrypted = encryptor.update(payload) + encryptor.finalize()
    return header + iv + encrypted + bytes(56)
Exemple #3
0
def _parse_hvle_block(hive_path, transaction_log_stream, log_size, expected_sequence_number):
    """

    :param hive_path:
    :param transaction_log_stream:
    :param log_size:
    :param expected_sequence_number:
    :return:
    """
    recovered_dirty_pages_count = 0
    restored_hive_buffer = BytesIO(open(hive_path, 'rb').read())

    hvle_block_start_offset = transaction_log_stream.tell()

    while hvle_block_start_offset < log_size:
        logger.info(f'Parsing hvle block at {hex(hvle_block_start_offset)}')
        with boomerang_stream(transaction_log_stream) as x:
            if x.read(4) != b'HvLE':
                logger.info('Reached a non HvLE object. stopping')
                break

        logger.info(f'Parsing HvLE block at {hex(hvle_block_start_offset)}')
        parsed_hvle_block = TRANSACTION_LOG.parse_stream(transaction_log_stream)
        logger.info(f'Currently at start of dirty pages: {transaction_log_stream.tell()}')
        logger.info(f'seq number: {parsed_hvle_block.sequence_number}')
        logger.info(f'dirty pages: {parsed_hvle_block.dirty_pages_count}')

        if parsed_hvle_block.sequence_number == expected_sequence_number:
            logger.info(f'This hvle block holds valid dirty blocks')
            expected_sequence_number += 1

        for dirty_page_entry in parsed_hvle_block.dirty_pages_references:
            # Write the actual dirty page to the original hive
            target_offset = REGF_HEADER_SIZE + dirty_page_entry.offset
            restored_hive_buffer.seek(target_offset)
            transaction_log_stream_offset = transaction_log_stream.tell()
            dirty_page_buffer = transaction_log_stream.read(dirty_page_entry.size)
            restored_hive_buffer.write(dirty_page_buffer)
            logger.info(f'Restored {dirty_page_entry.size} bytes to offset {hex(target_offset)} '
                        f'from offset {hex(transaction_log_stream_offset)}')
            recovered_dirty_pages_count += 1

        # TODO: update hive flags from hvle to original header

        # Update sequence numbers are at offsets 4 & 8:
        restored_hive_buffer.seek(4)
        restored_hive_buffer.write(Int32ul.build(expected_sequence_number))
        restored_hive_buffer.write(Int32ul.build(expected_sequence_number))

        # Update hbins size from hvle to original header at offset 40
        restored_hive_buffer.seek(40)
        restored_hive_buffer.write(Int32ul.build(parsed_hvle_block.hive_bin_size))

        transaction_log_stream.seek(hvle_block_start_offset + parsed_hvle_block.log_size)
        hvle_block_start_offset = hvle_block_start_offset + parsed_hvle_block.log_size

    return restored_hive_buffer, recovered_dirty_pages_count
Exemple #4
0
def add_signature(infile, outfile, signature):
    """Add a signature to a PE file."""
    # First copy infile to outfile
    infile.seek(0)
    while True:
        block = infile.read(1024)
        if not block:
            break
        outfile.write(block)

    outfile.seek(0)
    pe = pefile.parse_stream(outfile)
    if not pe.optional_header.certtable_info:
        raise ValueError(
            "Can't add a signature into this file (not enough RVA sections)"
        )

    cert = certificate.build(
        {
            "size": len(signature) + 8,
            "revision": "REV2",
            "certtype": "PKCS7",
            "data": signature,
        }
    )

    # If we already have signatures, then add the new one to the end of the file
    if pe.optional_header.certtable_offset:
        certs_offset = pe.optional_header.certtable_offset
        certs_size = pe.optional_header.certtable_size + len(cert)
        old_certs_size = pe.optional_header.certtable_size
    else:
        # Figure out the end of the file
        outfile.seek(0, 2)
        certs_offset = outfile.tell()
        # Pad to 8 byte boundary
        if certs_offset % 8:
            certs_offset += 8 - (certs_offset % 8)
        certs_size = len(cert)
        old_certs_size = 0

    # Update the certificate table info
    outfile.seek(pe.optional_header.certtable_info)
    outfile.write(Int32ul.build(certs_offset))
    outfile.write(Int32ul.build(certs_size))

    # Add the signature
    outfile.seek(certs_offset + old_certs_size)
    outfile.write(cert)

    # Update the checksum
    checksum = calc_checksum(outfile, pe.optional_header.checksum_offset)
    outfile.seek(pe.optional_header.checksum_offset)
    outfile.write(Int32ul.build(checksum))
Exemple #5
0
def pack_tcp(msgs, channels=None):
    buf = b''

    for msg in msgs:
        msg = pack(msg, channels)
        buf += Int32ul.build(len(msg)) + msg

    return buf
Exemple #6
0
    def _add_config_sector(self) -> None:
        wl_config_data = WLFATFS.WL_CONFIG_T_DATA.build(
            dict(start_addr=0,
                 full_mem_size=self.partition_size,
                 page_size=self.sector_size,
                 sector_size=self.sector_size,
                 updaterate=self._update_rate,
                 wr_size=16,
                 version=self._version,
                 temp_buff_size=self._temp_buff_size))

        crc = crc32(list(wl_config_data), WLFATFS.UINT32_MAX)
        wl_config_crc = Int32ul.build(crc)

        # adding three 4 byte zeros to align the structure
        wl_config = wl_config_data + wl_config_crc + Int32ul.build(
            0) + Int32ul.build(0) + Int32ul.build(0)

        self.fatfs_binary_image += (
            wl_config +
            (self.sector_size - WLFATFS.WL_CONFIG_HEADER_SIZE) * b'\xff')
Exemple #7
0
    def _encode(self, obj: Construct, context: Container, path: str) -> bytes:
        """Encodes a ``FormID`` back to bytes.

        Args:
            obj (Construct): The construct to encode
            context (Container): The contextual container to use
            path (str): The construct path

        Returns:
            bytes: The resulting encoded bytes
        """

        return Int32ul.build(obj.form_id)
Exemple #8
0
 def _add_state_sectors(self) -> None:
     wl_state_data = WLFATFS.WL_STATE_T_DATA.build(
         dict(
             pos=0,
             max_pos=self.plain_fat_sectors + WLFATFS.DUMMY_SECTORS_COUNT,
             move_count=0,
             access_count=0,
             max_count=self._update_rate,
             block_size=self.sector_size,
             version=self._version,
             device_id=self._device_id or generate_4bytes_random(),
         ))
     crc = crc32(list(wl_state_data), WLFATFS.UINT32_MAX)
     wl_state_crc = Int32ul.build(crc)
     wl_state = wl_state_data + wl_state_crc
     self.fatfs_binary_image += WLFATFS.WL_STATE_COPY_COUNT * (
         (wl_state +
          (self.sector_size - WLFATFS.WL_STATE_HEADER_SIZE) * b'\xff') +
         (self.wl_state_sectors - 1) * self.sector_size * b'\xff')
Exemple #9
0
def apply_transaction_logs(hive_path,
                           transaction_log_path,
                           restored_hive_path=None,
                           verbose=False):
    restored_hive_buffer = BytesIO(open(hive_path, 'rb').read())

    if not restored_hive_path:
        restored_hive_path = f'{hive_path}.restored'

    registry_hive = RegistryHive(hive_path)
    log_size = os.path.getsize(transaction_log_path)
    expected_sequence_number = registry_hive.header.secondary_sequence_num

    logger.info(f'Log Size: {log_size}')

    recovered_dirty_pages_count = 0
    with open(transaction_log_path, 'rb') as transaction_log:
        # Skip REGF header
        transaction_log.seek(512)
        hvle_block_start_offset = transaction_log.tell()

        while hvle_block_start_offset < log_size:
            logger.info(f'Parsing hvle block at {hvle_block_start_offset}')

            parsed_hvle_block = TRANSACTION_LOG.parse_stream(transaction_log)
            logger.info(
                f'Currently at start of dirty pages: {transaction_log.tell()}')
            logger.info(f'seq number: {parsed_hvle_block.sequence_number}')
            logger.info(f'dirty pages: {parsed_hvle_block.dirty_pages_count}')

            if parsed_hvle_block.sequence_number == expected_sequence_number:
                logger.info(f'This hvle block holds valid dirty blocks')
                expected_sequence_number += 1
            else:
                logger.info(f'This block is invalid. stopping.')
                break

            for dirty_page_entry in parsed_hvle_block.dirty_pages_references:
                # Write the actual dirty page to the original hive
                target_offset = REGF_HEADER_SIZE + dirty_page_entry.offset
                restored_hive_buffer.seek(target_offset)
                dirty_page_buffer = transaction_log.read(dirty_page_entry.size)
                restored_hive_buffer.write(dirty_page_buffer)
                logger.info(
                    f'Restored {dirty_page_entry.size} bytes to offset {target_offset}'
                )
                recovered_dirty_pages_count += 1

            # TODO: update hive flags from hvle to original header

            # Update sequence numbers are at offsets 4 & 8:
            restored_hive_buffer.seek(4)
            restored_hive_buffer.write(Int32ul.build(expected_sequence_number))
            restored_hive_buffer.write(Int32ul.build(expected_sequence_number))

            # Update hbins size from hvle to original header at offset 40
            restored_hive_buffer.seek(40)
            restored_hive_buffer.write(
                Int32ul.build(parsed_hvle_block.hive_bin_size))

            transaction_log.seek(hvle_block_start_offset +
                                 parsed_hvle_block.log_size)
            hvle_block_start_offset = hvle_block_start_offset + parsed_hvle_block.log_size

    # Write to disk the modified registry hive
    with open(restored_hive_path, 'wb') as f:
        restored_hive_buffer.seek(0)
        f.write(restored_hive_buffer.read())

    return restored_hive_path, recovered_dirty_pages_count
Exemple #10
0
class CoreProfileSessionTap(Tap):
    r"""
    Kdebug is a kernel facility for tracing events occurring on a system.
    This header defines reserved debugids, which are 32-bit values that describe
    each event:

    +----------------+----------------+----------------------------+----+
    |   Class (8)    |  Subclass (8)  |          Code (14)         |Func|
    |                |                |                            |(2) |
    +----------------+----------------+----------------------------+----+
    \_________________________________/
            ClassSubclass (CSC)
    \________________________________________________________________00_/
                                    Eventid
    \___________________________________________________________________/
                                    Debugid

    The eventid is a hierarchical ID, indicating which components an event is
    referring to.  The debugid includes an eventid and two function qualifier
    bits, to determine the structural significance of an event (whether it
    starts or ends an interval).

    This tap yields kdebug events.
    """
    IDENTIFIER = 'com.apple.instruments.server.services.coreprofilesessiontap'
    STACKSHOT_HEADER = Int32ul.build(
        int(kcdata_types_enum.KCDATA_BUFFER_BEGIN_STACKSHOT))

    def __init__(self,
                 dvt: DvtSecureSocketProxyService,
                 class_filter: int = None,
                 subclass_filter: int = None):
        """
        :param dvt: Instruments service proxy.
        :param class_filter: Event class to include.
        :param subclass_filter: Event subclass to include.
        """
        self.dvt = dvt
        self.stack_shot = None
        self._thread_map = {}
        self.uuid = str(uuid.uuid4())

        k_filter = 0xffffffff
        if class_filter is not None:
            k_filter = class_filter << KDBG_CLASS_OFFSET
        if subclass_filter is not None:
            k_filter |= subclass_filter << KDBG_SUBCLASS_OFFSET

        config = {
            'tc': [{
                'csd': 128,  # Callstack frame depth.
                'kdf2': {k_filter},  # Kdebug filter, receive all classes.
                'ta': [[3], [0], [2], [1, 1, 0]],  # Actions.
                'tk': 3,  # Kind.
                'uuid': self.uuid,
            }],  # Triggers configs
            'rp':
            100,  # Recording priority
            'bm':
            0,  # Buffer mode.
        }
        super().__init__(dvt, self.IDENTIFIER, config)

    @property
    def thread_map(self):
        return self._thread_map

    @thread_map.setter
    def thread_map(self, parsed_threadmap):
        self._thread_map = {}
        for thread in parsed_threadmap:
            self._thread_map[thread.tid] = ProcessData(thread.pid,
                                                       thread.process)

    def get_stackshot(self) -> typing.Mapping:
        """
        Get a stackshot from the tap.
        """
        if self.stack_shot is not None:
            # The stackshot is sent one per TAP creation, so we cache it.
            return self.stack_shot
        data = self._channel.receive_message()
        while not data.startswith(self.STACKSHOT_HEADER):
            data = self._channel.receive_message()
        self.stack_shot = self.parse_stackshot(data)
        return self.stack_shot

    def dump(self, out: typing.BinaryIO):
        """
        Dump data from core profile session to a file.
        :param out: File object to write data to.
        """
        while True:
            data = self._channel.receive_message()
            if data.startswith(
                    self.STACKSHOT_HEADER) or data.startswith(b'bplist'):
                # Skip not kernel trace data.
                continue
            print(f'Receiving trace data ({len(data)}B)')
            out.write(data)
            out.flush()

    def watch_events(self, events_count: int = -1):
        """
        Generator for kdebug events.
        The yielded event contains timestamp (uptime), args (arguments), tid (thread id), debugid, eventid, class,
        subclass, code, func_qualifier (function qualifier).
        :param events_count: Count of events to generate, -1 for unlimited generation.
        """
        events_index = 0
        while events_index != events_count:
            data = self._channel.receive_message()
            if data.startswith(b'bplist'):
                continue
            if data.startswith(self.STACKSHOT_HEADER):
                self.stack_shot = self.parse_stackshot(data)
                continue
            if data.startswith(RAW_VERSION2_BYTES):
                parsed = kperf_data.parse(data)
                self.thread_map = parsed.threadmap
                traces = parsed.traces
            else:
                traces = Array(len(data) // kd_buf.sizeof(),
                               kd_buf).parse(data)

            for event in traces:
                if events_index == events_count:
                    break
                yield event
                events_index += 1

    @staticmethod
    def parse_stackshot(data):
        parsed = kcdata.parse(data)
        # Required for removing streams from construct output.
        stackshot = clean(parsed)
        parsed_stack_shot = {}
        jsonify_parsed_stackshot(stackshot, parsed_stack_shot)
        return parsed_stack_shot[predefined_names[
            kcdata_types_enum.KCDATA_BUFFER_BEGIN_STACKSHOT]]

    def parse_event_time(self, timestamp):
        time_info = self.stack_shot['mach_timebase_info']
        offset_usec = (((timestamp - self.stack_shot['mach_absolute_time']) *
                        time_info['numer']) / (time_info['denom'] * 1000))
        return datetime.fromtimestamp(
            (self.stack_shot['usecs_since_epoch'] + offset_usec) / 1000000)
Exemple #11
0
    def _extract_bin_corefile(self):
        """
        Creates core dump ELF file
        """
        tcbsz_aligned = self._get_aligned_size(self.header.tcbsz)

        coredump_data_struct = Struct(
            'tasks' / GreedyRange(
                AlignedStruct(
                    4,
                    'task_header' / TaskHeader,
                    'tcb' / Bytes(self.header.tcbsz),
                    'stack' / Bytes(
                        abs_(this.task_header.stack_top -
                             this.task_header.stack_end)),
                )), 'mem_seg_headers' /
            MemSegmentHeader[self.core_src.header.segs_num])

        core_elf = ESPCoreDumpElfFile()
        notes = b''
        core_dump_info_notes = b''
        task_info_notes = b''

        coredump_data = coredump_data_struct.parse(self.core_src.data)
        for i, task in enumerate(coredump_data.tasks):
            stack_len_aligned = self._get_aligned_size(
                abs(task.task_header.stack_top - task.task_header.stack_end))
            task_status_kwargs = {
                'task_index':
                i,
                'task_flags':
                TASK_STATUS_CORRECT,
                'task_tcb_addr':
                task.task_header.tcb_addr,
                'task_stack_start':
                min(task.task_header.stack_top, task.task_header.stack_end),
                'task_stack_len':
                stack_len_aligned,
                'task_name':
                Padding(16).build(
                    {}
                )  # currently we don't have task_name, keep it as padding
            }

            # Write TCB
            try:
                if self.target_method_cls.tcb_is_sane(
                        task.task_header.tcb_addr, tcbsz_aligned):
                    core_elf.add_segment(task.task_header.tcb_addr, task.tcb,
                                         ElfFile.PT_LOAD,
                                         ElfSegment.PF_R | ElfSegment.PF_W)
                elif task.task_header.tcb_addr and self.target_method_cls.addr_is_fake(
                        task.task_header.tcb_addr):
                    task_status_kwargs[
                        'task_flags'] |= TASK_STATUS_TCB_CORRUPTED
            except ESPCoreDumpLoaderError as e:
                logging.warning(
                    'Skip TCB {} bytes @ 0x{:x}. (Reason: {})'.format(
                        tcbsz_aligned, task.task_header.tcb_addr, e))

            # Write stack
            try:
                if self.target_method_cls.stack_is_sane(
                        task_status_kwargs['task_stack_start']):
                    core_elf.add_segment(
                        task_status_kwargs['task_stack_start'], task.stack,
                        ElfFile.PT_LOAD, ElfSegment.PF_R | ElfSegment.PF_W)
                elif task_status_kwargs['task_stack_start'] \
                        and self.target_method_cls.addr_is_fake(task_status_kwargs['task_stack_start']):
                    task_status_kwargs[
                        'task_flags'] |= TASK_STATUS_TCB_CORRUPTED
                    core_elf.add_segment(
                        task_status_kwargs['task_stack_start'], task.stack,
                        ElfFile.PT_LOAD, ElfSegment.PF_R | ElfSegment.PF_W)
            except ESPCoreDumpLoaderError as e:
                logging.warning(
                    'Skip task\'s ({:x}) stack {} bytes @ 0x{:x}. (Reason: {})'
                    .format(task_status_kwargs['tcb_addr'],
                            task_status_kwargs['stack_len_aligned'],
                            task_status_kwargs['stack_base'], e))

            try:
                logging.debug('Stack start_end: 0x{:x} @ 0x{:x}'.format(
                    task.task_header.stack_top, task.task_header.stack_end))
                task_regs, extra_regs = self.arch_method_cls.get_registers_from_stack(
                    task.stack,
                    task.task_header.stack_end > task.task_header.stack_top)
            except Exception as e:
                raise ESPCoreDumpLoaderError(str(e))

            task_info_notes += self._build_note_section(
                'TASK_INFO', ESPCoreDumpElfFile.PT_TASK_INFO,
                EspTaskStatus.build(task_status_kwargs))
            notes += self._build_note_section(
                'CORE', ElfFile.PT_LOAD,
                self.arch_method_cls.build_prstatus_data(
                    task.task_header.tcb_addr, task_regs))

            if extra_regs and len(core_dump_info_notes) == 0:
                # actually there will be only one such note - for crashed task
                core_dump_info_notes += self._build_note_section(
                    'ESP_CORE_DUMP_INFO', ESPCoreDumpElfFile.PT_INFO,
                    Int32ul.build(self.header.ver))

                exc_regs = []
                for reg_id in extra_regs:
                    exc_regs.extend([reg_id, extra_regs[reg_id]])
                _regs = [task.task_header.tcb_addr] + exc_regs
                core_dump_info_notes += self._build_note_section(
                    'EXTRA_INFO', ESPCoreDumpElfFile.PT_EXTRA_INFO,
                    Int32ul[1 + len(exc_regs)].build(_regs))

        if self.dump_ver == self.BIN_V2:
            for header in coredump_data.mem_seg_headers:
                logging.debug('Read memory segment {} bytes @ 0x{:x}'.format(
                    header.mem_sz, header.mem_start))
                core_elf.add_segment(header.mem_start, header.data,
                                     ElfFile.PT_LOAD,
                                     ElfSegment.PF_R | ElfSegment.PF_W)

        # add notes
        try:
            core_elf.add_segment(0, notes, ElfFile.PT_NOTE, 0)
        except ESPCoreDumpLoaderError as e:
            logging.warning(
                'Skip NOTES segment {:d} bytes @ 0x{:x}. (Reason: {})'.format(
                    len(notes), 0, e))
        # add core dump info notes
        try:
            core_elf.add_segment(0, core_dump_info_notes, ElfFile.PT_NOTE, 0)
        except ESPCoreDumpLoaderError as e:
            logging.warning(
                'Skip core dump info NOTES segment {:d} bytes @ 0x{:x}. (Reason: {})'
                .format(len(core_dump_info_notes), 0, e))
        try:
            core_elf.add_segment(0, task_info_notes, ElfFile.PT_NOTE, 0)
        except ESPCoreDumpLoaderError as e:
            logging.warning(
                'Skip failed tasks info NOTES segment {:d} bytes @ 0x{:x}. (Reason: {})'
                .format(len(task_info_notes), 0, e))
        # dump core ELF
        core_elf.e_type = ElfFile.ET_CORE
        core_elf.e_machine = ESPCoreDumpElfFile.EM_XTENSA
        core_elf.dump(self.core_elf_file.name)