Пример #1
0
def dump_logfile(volume_path, path):
    with open(volume_path, "rb") as volume:
        bootsector = layout.NtfsBootSector.from_buffer_copy(
            volume.read(ctypes.sizeof(layout.NtfsBootSector)))

        cluster_size = bootsector.bpb.bytes_per_sector * bootsector.bpb.sectors_per_cluster

        if bootsector.clusters_per_mft_record < 0:
            file_record_size = 1 << -bootsector.clusters_per_mft_record
        else:
            file_record_size = (bootsector.clusters_per_mft_record) << \
                (layout.ffs(cluster_size) - 1)
            
        logfile_offset = bootsector.mft_lcn * cluster_size
        logfile_offset += 2 * file_record_size

        volume.seek(logfile_offset)
        
        buffer = bytearray(file_record_size)

        volume.readinto(buffer)
        layout.dofixup(buffer, sector_size=bootsector.bpb.bytes_per_sector)

        file_record = layout.FileRecordSegmentHeader.from_buffer_copy(buffer)
        
        data_stream = find_data_stream(buffer[file_record.attr_offset:])

        with open(path, "wb") as logfile_stream:
            for vcn, lcn, length in data_stream.runlist:
                volume.seek(lcn * cluster_size)

                while length > 0:
                    num = max(length, 64)
                    logfile_stream.write(volume.read(num * cluster_size))
                    length -= num
Пример #2
0
def dump_logfile(volume_path, path):
    with open(volume_path, "rb") as volume:
        bootsector = layout.NtfsBootSector.from_buffer_copy(
            volume.read(ctypes.sizeof(layout.NtfsBootSector)))

        cluster_size = bootsector.bpb.bytes_per_sector * bootsector.bpb.sectors_per_cluster

        if bootsector.clusters_per_mft_record < 0:
            file_record_size = 1 << -bootsector.clusters_per_mft_record
        else:
            file_record_size = (bootsector.clusters_per_mft_record) << \
                (layout.ffs(cluster_size) - 1)

        logfile_offset = bootsector.mft_lcn * cluster_size
        logfile_offset += 2 * file_record_size

        volume.seek(logfile_offset)

        buffer = bytearray(file_record_size)

        volume.readinto(buffer)
        layout.dofixup(buffer, sector_size=bootsector.bpb.bytes_per_sector)

        file_record = layout.FileRecordSegmentHeader.from_buffer_copy(buffer)

        data_stream = find_data_stream(buffer[file_record.attr_offset:])

        with open(path, "wb") as logfile_stream:
            for vcn, lcn, length in data_stream.runlist:
                volume.seek(lcn * cluster_size)

                while length > 0:
                    num = max(length, 64)
                    logfile_stream.write(volume.read(num * cluster_size))
                    length -= num
Пример #3
0
def get_lsn_restart_blocks(logfile):
    '''Returns restart blocks.

    Block with higher current LSN is considered to be
    valid (more up-to date). Remaining block is used as
    a backup.

    Returns:
        tuple - (valid block, backup block)
    '''
    logfile.seek(0)

    # Inspect logfile a bit...
    sector = logfile.read(SECTOR_SIZE)

    restart_header = layout.RestartPageHeader.from_buffer_copy(sector)

    logfile.seek(0)

    if restart_header.system_page_size > (1024 * 64):
        # Max cluster size is 64kB
        # https://support.microsoft.com/en-us/kb/140365
        raise ValueError("invalid system page size?")

    # Read first two pages from the logfile
    pages = [
        bytearray(restart_header.system_page_size),
        bytearray(restart_header.system_page_size)
    ]

    for page in pages:
        logfile.readinto(page)
        layout.dofixup(page, sector_size=SECTOR_SIZE)

    block_first = None
    block_second = None

    if pages[0]:
        block_first = get_restart_context(pages[0])

    if pages[1]:
        block_second = get_restart_context(pages[1])

    if block_first and block_second:
        if block_first.area.current_lsn < block_second.area.current_lsn:
            # First block has lower LSN, so swap block, since we
            # are returning first block as the valid one.
            block_first, block_second = block_second, block_first
    if not block_first:
        # Swap blocks when first block is invalid.
        block_first, block_second = block_second, block_first

    return block_first, block_second
Пример #4
0
def get_lsn_restart_blocks(logfile):
    '''Returns restart blocks.

    Block with higher current LSN is considered to be
    valid (more up-to date). Remaining block is used as
    a backup.

    Returns:
        tuple - (valid block, backup block)
    '''
    logfile.seek(0)

    # Inspect logfile a bit...
    sector = logfile.read(SECTOR_SIZE)

    restart_header = layout.RestartPageHeader.from_buffer_copy(sector)

    logfile.seek(0)

    if restart_header.system_page_size > (1024 * 64):
        # Max cluster size is 64kB
        # https://support.microsoft.com/en-us/kb/140365
        raise ValueError("invalid system page size?")

    # Read first two pages from the logfile
    pages = [bytearray(restart_header.system_page_size),
             bytearray(restart_header.system_page_size)]

    for page in pages:
        logfile.readinto(page)
        layout.dofixup(page, sector_size=SECTOR_SIZE)

    block_first = None
    block_second = None

    if pages[0]:
        block_first = get_restart_context(pages[0])

    if pages[1]:
        block_second = get_restart_context(pages[1])

    if block_first and block_second:
        if block_first.area.current_lsn < block_second.area.current_lsn:
            # First block has lower LSN, so swap block, since we
            # are returning first block as the valid one.
            block_first, block_second = block_second, block_first
    if not block_first:
        # Swap blocks when first block is invalid.
        block_first, block_second = block_second, block_first

    return block_first, block_second
Пример #5
0
 def get_log_page(self, page):
     '''Returns page from log.
     Does USN fixups.
     '''
     self.logfile_stream.seek(page)
     page = bytearray(self.lcb.log_page_size)
     self.logfile_stream.readinto(page)
     return layout.dofixup(page)
Пример #6
0
 def get_log_page(self, page):
     '''Returns page from log.
     Does USN fixups.
     '''
     self.logfile_stream.seek(page)
     page = bytearray(self.lcb.log_page_size)
     self.logfile_stream.readinto(page)
     return layout.dofixup(page)