コード例 #1
0
ファイル: ntfs.py プロジェクト: la-ci/RecuperaBit
 def __init__(self, pointer):
     DiskScanner.__init__(self, pointer)
     self.found_file = set()
     self.parsed_file_review = {}
     self.found_indx = set()
     self.parsed_indx = {}
     self.indx_list = None
     self.found_boot = []
     self.found_spc = []
コード例 #2
0
ファイル: ntfs.py プロジェクト: risusanto/RecuperaBit
 def __init__(self, pointer):
     DiskScanner.__init__(self, pointer)
     self.found_file = set()
     self.parsed_file_review = {}
     self.found_indx = set()
     self.parsed_indx = {}
     self.indx_list = None
     self.found_boot = []
     self.found_spc = []
コード例 #3
0
ファイル: ntfs.py プロジェクト: la-ci/RecuperaBit
    def add_from_indx_allocation(self, parsed, part):
        """Add ghost entries to part from INDEX_ALLOCATION attributes in parsed.

        This procedure requires that the beginning of the partition has already
        been discovered."""
        read_again = set()
        for attr in parsed['attributes']['$INDEX_ALLOCATION']:
            clusters_pos = 0
            if 'runlist' not in attr:
                continue
            runlist = attr['runlist']
            for entry in runlist:
                clusters_pos += entry['offset']
                real_pos = clusters_pos * part.sec_per_clus + part.offset
                if real_pos in self.parsed_indx:
                    content = self.parsed_indx[real_pos]
                    # Check if the entry matches
                    if parsed['record_n'] == content['parent']:
                        discovered = set(c for c in content['children']
                                         if c not in part.files)
                        # If there are new files, read the INDX again
                        if len(discovered):
                            read_again.add(real_pos)

        img = DiskScanner.get_image(self)
        for position in read_again:
            dump = sectors(img, position, INDX_size)
            entries = parse_indx_record(dump)['entries']
            self.add_indx_entries(entries, part)
コード例 #4
0
    def get_content(self, partition):
        """Extract the content of the file.

        This method works by extracting the $DATA attribute."""
        if self.is_ghost:
            logging.error(u'Cannot restore ghost file {}'.format(self))
            return None

        image = DiskScanner.get_image(partition.scanner)
        dump = sectors(image, File.get_offset(self), FILE_size)
        parsed = parse_file_record(dump)

        if not parsed['valid'] or 'attributes' not in parsed:
            logging.error(u'Invalid MFT entry for {}'.format(self))
            return None
        attrs = parsed['attributes']
        if ('$ATTRIBUTE_LIST' in attrs and
                partition.sec_per_clus is not None):
            _integrate_attribute_list(parsed, partition, image)
        if '$DATA' not in attrs:
            attrs['$DATA'] = []
        datas = [d for d in attrs['$DATA'] if d['name'] == self.ads]
        if not len(datas):
            if not self.is_directory:
                logging.error(u'Cannot restore $DATA attribute(s) '
                              'for {}'.format(self))
            return None

        # TODO implemented compressed attributes
        for d in datas:
            if d['flags'] & 0x01:
                logging.error(u'Cannot restore compressed $DATA attribute(s) '
                              'for {}'.format(self))
                return None
            elif d['flags'] & 0x4000:
                logging.warning(u'Found encrypted $DATA attribute(s) '
                                'for {}'.format(self))

        # Handle resident file content
        if len(datas) == 1 and not datas[0]['non_resident']:
            single = datas[0]
            start = single['dump_offset'] + single['content_off']
            end = start + single['content_size']
            content = dump[start:end]
            return str(content)
        else:
            if partition.sec_per_clus is None:
                logging.error(u'Cannot restore non-resident $DATA '
                              'attribute(s) for {}'.format(self))
                return None
            non_resident = sorted(
                (d for d in attrs['$DATA'] if d['non_resident']),
                key=lambda x: x['start_VCN']
            )
            if len(non_resident) != len(datas):
                logging.warning(
                    u'Found leftover resident $DATA attributes for '
                    '{}'.format(self)
                )
            return self.content_iterator(partition, image, non_resident)
コード例 #5
0
ファイル: ntfs.py プロジェクト: risusanto/RecuperaBit
    def add_from_indx_allocation(self, parsed, part):
        """Add ghost entries to part from INDEX_ALLOCATION attributes in parsed.

        This procedure requires that the beginning of the partition has already
        been discovered."""
        read_again = set()
        for attr in parsed['attributes']['$INDEX_ALLOCATION']:
            clusters_pos = 0
            if 'runlist' not in attr:
                continue
            runlist = attr['runlist']
            for entry in runlist:
                clusters_pos += entry['offset']
                real_pos = clusters_pos * part.sec_per_clus + part.offset
                if real_pos in self.parsed_indx:
                    content = self.parsed_indx[real_pos]
                    # Check if the entry matches
                    if parsed['record_n'] == content['parent']:
                        discovered = set(
                            c for c in content['children']
                            if c not in part.files
                        )
                        # If there are new files, read the INDX again
                        if len(discovered):
                            read_again.add(real_pos)

        img = DiskScanner.get_image(self)
        for position in read_again:
            dump = sectors(img, position, INDX_size)
            entries = parse_indx_record(dump)['entries']
            self.add_indx_entries(entries, part)
コード例 #6
0
ファイル: ntfs.py プロジェクト: risusanto/RecuperaBit
    def get_content(self, partition):
        """Extract the content of the file.

        This method works by extracting the $DATA attribute."""
        if self.is_ghost:
            logging.error(u'Cannot restore ghost file {}'.format(self))
            return None

        image = DiskScanner.get_image(partition.scanner)
        dump = sectors(image, File.get_offset(self), FILE_size)
        parsed = parse_file_record(dump)

        if not parsed['valid'] or 'attributes' not in parsed:
            logging.error(u'Invalid MFT entry for {}'.format(self))
            return None
        attrs = parsed['attributes']
        if ('$ATTRIBUTE_LIST' in attrs and
                partition.sec_per_clus is not None):
            _integrate_attribute_list(parsed, partition, image)
        if '$DATA' not in attrs:
            attrs['$DATA'] = []
        datas = [d for d in attrs['$DATA'] if d['name'] == self.ads]
        if not len(datas):
            if not self.is_directory:
                logging.error(u'Cannot restore $DATA attribute(s) '
                              'for {}'.format(self))
            return None

        # TODO implemented compressed attributes
        for d in datas:
            if d['flags'] & 0x01:
                logging.error(u'Cannot restore compressed $DATA attribute(s) '
                              'for {}'.format(self))
                return None
            elif d['flags'] & 0x4000:
                logging.warning(u'Found encrypted $DATA attribute(s) '
                                'for {}'.format(self))

        # Handle resident file content
        if len(datas) == 1 and not datas[0]['non_resident']:
            single = datas[0]
            start = single['dump_offset'] + single['content_off']
            end = start + single['content_size']
            content = dump[start:end]
            return str(content)
        else:
            if partition.sec_per_clus is None:
                logging.error(u'Cannot restore non-resident $DATA '
                              'attribute(s) for {}'.format(self))
                return None
            non_resident = sorted(
                (d for d in attrs['$DATA'] if d['non_resident']),
                key=lambda x: x['start_VCN']
            )
            if len(non_resident) != len(datas):
                logging.warning(
                    u'Found leftover resident $DATA attributes for '
                    '{}'.format(self)
                )
            return self.content_iterator(partition, image, non_resident)
コード例 #7
0
ファイル: ntfs.py プロジェクト: la-ci/RecuperaBit
    def add_from_attribute_list(self, parsed, part, offset):
        """Add additional entries to part from attributes in ATTRIBUTE_LIST.

        Files with many attributes may have additional attributes not in the
        MFT entry. When this happens, it is necessary to find the other
        attributes. They may contain additional information, such as $DATA
        attributes for ADS. This procedure requires that the beginning of the
        partition has already been discovered."""
        image = DiskScanner.get_image(self)
        _integrate_attribute_list(parsed, part, image)

        attrs = parsed['attributes']
        if '$DATA' in attrs:
            for attribute in attrs['$DATA']:
                ads_name = attribute['name']
                if ads_name and len(ads_name):
                    part.add_file(NTFSFile(parsed, offset, ads=ads_name))
コード例 #8
0
ファイル: ntfs.py プロジェクト: risusanto/RecuperaBit
    def add_from_attribute_list(self, parsed, part, offset):
        """Add additional entries to part from attributes in ATTRIBUTE_LIST.

        Files with many attributes may have additional attributes not in the
        MFT entry. When this happens, it is necessary to find the other
        attributes. They may contain additional information, such as $DATA
        attributes for ADS. This procedure requires that the beginning of the
        partition has already been discovered."""
        image = DiskScanner.get_image(self)
        _integrate_attribute_list(parsed, part, image)

        attrs = parsed['attributes']
        if '$DATA' in attrs:
            for attribute in attrs['$DATA']:
                ads_name = attribute['name']
                if ads_name and len(ads_name):
                    part.add_file(NTFSFile(parsed, offset, ads=ads_name))
コード例 #9
0
ファイル: ntfs.py プロジェクト: la-ci/RecuperaBit
    def add_from_mft_mirror(self, part):
        """Fix the first file records using the MFT mirror."""
        img = DiskScanner.get_image(self)
        mirrpos = part.mftmirr_pos
        if mirrpos is None:
            return

        for i in xrange(4):
            node = part.get(i)
            if node is None or node.is_ghost:
                position = mirrpos + i * FILE_size
                dump = sectors(img, position, FILE_size)
                parsed = parse_file_record(dump)
                if parsed['valid'] and '$FILE_NAME' in parsed['attributes']:
                    node = NTFSFile(parsed, position)
                    part.add_file(node)
                    logging.info(
                        u'Repaired MFT entry #%s - %s in partition at offset '
                        '%s from backup', node.index, node.name, part.offset)
コード例 #10
0
ファイル: ntfs.py プロジェクト: risusanto/RecuperaBit
    def add_from_mft_mirror(self, part):
        """Fix the first file records using the MFT mirror."""
        img = DiskScanner.get_image(self)
        mirrpos = part.mftmirr_pos
        if mirrpos is None:
            return

        for i in xrange(4):
            node = part.get(i)
            if node is None or node.is_ghost:
                position = mirrpos + i * FILE_size
                dump = sectors(img, position, FILE_size)
                parsed = parse_file_record(dump)
                if parsed['valid'] and '$FILE_NAME' in parsed['attributes']:
                    node = NTFSFile(parsed, position)
                    part.add_file(node)
                    logging.info(
                        u'Repaired MFT entry #%s - %s in partition at offset '
                        '%s from backup', node.index, node.name, part.offset
                    )
コード例 #11
0
ファイル: ntfs.py プロジェクト: la-ci/RecuperaBit
    def get_partitions(self):
        """Get a list of the found partitions."""
        partitioned_files = {}
        img = DiskScanner.get_image(self)

        logging.info('Parsing MFT entries')
        for position in self.found_file:
            dump = sectors(img, position, FILE_size)
            parsed = parse_file_record(dump)
            attrs = parsed.get('attributes', {})
            if not parsed['valid'] or '$FILE_NAME' not in attrs:
                continue

            # Partition files based on corresponding entry 0
            if parsed['record_n'] is not None:
                offset = position - parsed['record_n'] * FILE_size
                try:
                    part = partitioned_files[offset]
                except KeyError:
                    partitioned_files[offset] = NTFSPartition(self, offset)
                    part = partitioned_files[offset]
                attributes = parsed['attributes']
                if '$DATA' in attributes:
                    for attribute in attributes['$DATA']:
                        ads_name = attribute['name']
                        if ads_name:
                            part.add_file(
                                NTFSFile(parsed, position, ads=ads_name))
                """Add the file again, just in case the $DATA attributes are
                missing."""
                part.add_file(NTFSFile(parsed, position))

                # Handle information deduced from INDX records
                if '$INDEX_ROOT' in attrs:
                    self.add_from_indx_root(parsed, part)
                # Save for later use
                if '$INDEX_ALLOCATION' in attrs or '$ATTRIBUTE_LIST' in attrs:
                    self.parsed_file_review[position] = parsed
            # TODO [Future] handle files for which there is no record_number

        # Parse INDX records
        logging.info('Parsing INDX records')
        for position in self.found_indx:
            dump = sectors(img, position, INDX_size)
            parsed = parse_indx_record(dump)
            if not parsed['valid']:
                continue

            entries = parsed['entries']
            referred = (el['file_info']['parent_entry'] for el in entries)
            record_n = Counter(referred).most_common(1)[0][0]
            # Save references for future access
            self.parsed_indx[position] = {
                'parent': record_n,
                'children': set(el['record_n'] for el in entries)
            }

        indx_info = self.parsed_indx
        self.indx_list = SparseList(
            {pos: indx_info[pos]['parent']
             for pos in indx_info})

        # Extract boot record information
        logging.info('Reading boot sectors')
        for index in self.found_boot:
            dump = sectors(img, index, 1)
            parsed = unpack(dump, boot_sector_fmt)
            sec_per_clus = parsed['sectors_per_cluster']
            self.found_spc.append(sec_per_clus)
            relative = parsed['MFT_addr'] * sec_per_clus
            mirr_relative = parsed['MFTmirr_addr'] * sec_per_clus
            part = None
            # Look for matching partition, either as boot sector or backup
            for delta in (0, parsed['sectors']):
                index = index - delta
                address = relative + index
                # Set partition as recoverable
                if address in partitioned_files:
                    part = partitioned_files[address]
                    part.set_recoverable(True)
                    part.set_size(parsed['sectors'])
                    part.offset = index
                    part.sec_per_clus = sec_per_clus
                    part.mftmirr_pos = mirr_relative + index
                    break

        # Repair MFT if the mirror is available
        for address in list(partitioned_files):
            # This could have been deleted in a previous iteration
            if address not in partitioned_files:
                continue
            part = partitioned_files[address]
            mirrpos = part.mftmirr_pos
            if mirrpos is None:
                entry = part.get(1)  # $MFTMirr
                if entry is None:
                    continue
                else:
                    # Infer MFT mirror position
                    dump = sectors(img, entry.offset, FILE_size)
                    mirror = parse_file_record(dump)
                    if (mirror['valid'] and 'attributes' in mirror
                            and '$DATA' in mirror['attributes']):
                        datas = mirror['attributes']['$DATA']
                        if (len(datas) == 1 and datas[0]['non_resident']
                                and 'runlist' in datas[0]
                                and len(datas[0]['runlist']) > 0
                                and 'offset' in datas[0]['runlist'][0]):
                            relative = datas[0]['runlist'][0]['offset']
                            spc = part.sec_per_clus
                            if spc is None:
                                continue
                            mirrpos = relative * spc + part.offset
                            part.mftmirr_pos = mirrpos

            self.add_from_mft_mirror(part)

            # Remove bogus partitions generated by MFT mirrors
            if mirrpos in partitioned_files:
                bogus = partitioned_files[mirrpos]
                # Check if it looks like a MFT mirror
                if len(bogus.files) == 4 and max(bogus.files) < 4:
                    logging.debug(
                        'Dropping bogus NTFS partition with MFT '
                        'position %d generated by MFT mirror of '
                        'partition at offset %d', bogus.mft_pos, part.offset)
                    partitioned_files.pop(mirrpos)

        # Acquire additional information from $INDEX_ALLOCATION
        logging.info('Finding partition geometry')
        most_likely = self.most_likely_sec_per_clus()
        for address in partitioned_files:
            part = partitioned_files[address]
            if part.offset is None:
                # Find geometry by approximate string matching
                offset, sec_per_clus = self.find_boundary(
                    part, address, most_likely)
                if offset is not None:
                    part.set_recoverable(True)
                    part.offset = offset
                    part.sec_per_clus = sec_per_clus
            else:
                offset, sec_per_clus = part.offset, part.sec_per_clus
            if offset is not None:
                logging.info(
                    'Finalizing MFT reconstruction of partition at offset %i',
                    offset)
                self.finalize_reconstruction(part)

        # Merge pieces from fragmented MFT
        for address in list(partitioned_files):
            # This could have been deleted in a previous iteration
            if address not in partitioned_files:
                continue
            part = partitioned_files[address]
            entry = part.get(0)  # $MFT
            if entry is None or part.sec_per_clus is None:
                continue
            dump = sectors(img, entry.offset, FILE_size)
            parsed = parse_file_record(dump)
            if not parsed['valid'] or 'attributes' not in parsed:
                continue

            if '$ATTRIBUTE_LIST' in parsed['attributes']:
                _integrate_attribute_list(parsed, part, img)
            attrs = parsed['attributes']
            if '$DATA' not in attrs or len(attrs['$DATA']) < 1:
                continue

            if 'runlist' not in attrs['$DATA'][0]:
                continue
            runlist = attrs['$DATA'][0]['runlist']
            if len(runlist) > 1:
                logging.info(
                    'MFT for partition at offset %d is fragmented. Trying to '
                    'merge %d parts...', part.offset, len(runlist))
                clusters_pos = runlist[0]['offset']
                spc = part.sec_per_clus
                size = runlist[0]['length']
                for entry in runlist[1:]:
                    clusters_pos += entry['offset']
                    real_pos = clusters_pos * part.sec_per_clus + part.offset
                    position = real_pos - size * spc
                    if position in partitioned_files:
                        piece = partitioned_files[position]
                        if piece.offset is None or piece.offset == part.offset:
                            conflicts = [
                                i for i in piece.files
                                if not piece.files[i].is_ghost and i in
                                part.files and not part.files[i].is_ghost
                            ]
                            if not len(conflicts):
                                logging.debug(
                                    'Merging partition with MFT offset %d into'
                                    ' %s (fragmented MFT)', piece.mft_pos,
                                    part)
                                # Merge the partitions
                                merge(part, piece)
                                # Remove the fragment
                                partitioned_files.pop(position)
                            else:
                                logging.debug(
                                    'NOT merging partition with MFT offset %d into'
                                    ' %s (possible fragmented MFT) due to conflicts',
                                    piece.mft_pos, part)
                    size += entry['length']

        return partitioned_files
コード例 #12
0
ファイル: ntfs.py プロジェクト: risusanto/RecuperaBit
    def get_partitions(self):
        """Get a list of the found partitions."""
        partitioned_files = {}
        img = DiskScanner.get_image(self)

        logging.info('Parsing MFT entries')
        for position in self.found_file:
            dump = sectors(img, position, FILE_size)
            try:
                parsed = parse_file_record(dump)
            except NotImplementedError:
                logging.error(
                    'Problem parsing record on sector %d', position
                )
                continue
            attrs = parsed['attributes'] if 'attributes' in parsed else {}
            if not parsed['valid'] or '$FILE_NAME' not in attrs:
                continue

            # Partition files based on corresponding entry 0
            if parsed['record_n'] is not None:
                offset = position - parsed['record_n'] * FILE_size
                try:
                    part = partitioned_files[offset]
                except KeyError:
                    partitioned_files[offset] = NTFSPartition(self, offset)
                    part = partitioned_files[offset]
                attributes = parsed['attributes']
                if '$DATA' in attributes:
                    for attribute in attributes['$DATA']:
                        ads_name = attribute['name']
                        if ads_name:
                            part.add_file(NTFSFile(parsed, position, ads=ads_name))
                """Add the file again, just in case the $DATA attributes are
                missing."""
                part.add_file(NTFSFile(parsed, position))

                # Handle information deduced from INDX records
                if '$INDEX_ROOT' in attrs:
                    self.add_from_indx_root(parsed, part)
                # Save for later use
                if '$INDEX_ALLOCATION' in attrs or '$ATTRIBUTE_LIST' in attrs:
                    self.parsed_file_review[position] = parsed
            # TODO [Future] handle files for which there is no record_number

        # Parse INDX records
        logging.info('Parsing INDX records')
        for position in self.found_indx:
            dump = sectors(img, position, INDX_size)
            parsed = parse_indx_record(dump)
            if not parsed['valid']:
                continue

            entries = parsed['entries']
            referred = (el['file_info']['parent_entry'] for el in entries)
            record_n = Counter(referred).most_common(1)[0][0]
            # Save references for future access
            self.parsed_indx[position] = {
                'parent': record_n,
                'children': set(el['record_n'] for el in entries)
            }

        indx_info = self.parsed_indx
        self.indx_list = SparseList({
            pos: indx_info[pos]['parent'] for pos in indx_info
        })

        # Extract boot record information
        logging.info('Reading boot sectors')
        for index in self.found_boot:
            dump = sectors(img, index, 1)
            parsed = unpack(dump, boot_sector_fmt)
            sec_per_clus = parsed['sectors_per_cluster']
            self.found_spc.append(sec_per_clus)
            relative = parsed['MFT_addr'] * sec_per_clus
            mirr_relative = parsed['MFTmirr_addr'] * sec_per_clus
            part = None
            # Look for matching partition, either as boot sector or backup
            for delta in (0, parsed['sectors']):
                index = index - delta
                address = relative + index
                # Set partition as recoverable
                if address in partitioned_files:
                    part = partitioned_files[address]
                    part.set_recoverable(True)
                    part.set_size(parsed['sectors'])
                    part.offset = index
                    part.sec_per_clus = sec_per_clus
                    part.mftmirr_pos = mirr_relative + index
                    break

        # Repair MFT if the mirror is available
        for address in list(partitioned_files):
            # This could have been deleted in a previous iteration
            if address not in partitioned_files:
                continue
            part = partitioned_files[address]
            mirrpos = part.mftmirr_pos
            if mirrpos is None:
                entry = part.get(1)     # $MFTMirr
                if entry is None:
                    continue
                else:
                    # Infer MFT mirror position
                    dump = sectors(img, entry.offset, FILE_size)
                    mirror = parse_file_record(dump)
                    if (mirror['valid'] and 'attributes' in mirror and
                            '$DATA' in mirror['attributes']):
                        datas = mirror['attributes']['$DATA']
                        if (len(datas) == 1 and datas[0]['non_resident'] and
                                'runlist' in datas[0] and
                                len(datas[0]['runlist']) > 0 and
                                'offset' in datas[0]['runlist'][0]):
                            relative = datas[0]['runlist'][0]['offset']
                            spc = part.sec_per_clus
                            if spc is None:
                                continue
                            mirrpos = relative * spc + part.offset
                            part.mftmirr_pos = mirrpos

            self.add_from_mft_mirror(part)

            # Remove bogus partitions generated by MFT mirrors
            if mirrpos in partitioned_files:
                bogus = partitioned_files[mirrpos]
                # Check if it looks like a MFT mirror
                if len(bogus.files) == 4 and max(bogus.files) < 4:
                    logging.debug(
                        'Dropping bogus NTFS partition with MFT '
                        'position %d generated by MFT mirror of '
                        'partition at offset %d',
                        bogus.mft_pos, part.offset
                    )
                    partitioned_files.pop(mirrpos)

        # Acquire additional information from $INDEX_ALLOCATION
        logging.info('Finding partition geometry')
        most_likely = self.most_likely_sec_per_clus()
        for address in partitioned_files:
            part = partitioned_files[address]
            if part.offset is None:
                # Find geometry by approximate string matching
                offset, sec_per_clus = self.find_boundary(
                    part, address, most_likely
                )
                if offset is not None:
                    part.set_recoverable(True)
                    part.offset = offset
                    part.sec_per_clus = sec_per_clus
            else:
                offset, sec_per_clus = part.offset, part.sec_per_clus
            if offset is not None:
                logging.info(
                    'Finalizing MFT reconstruction of partition at offset %i',
                    offset
                )
                self.finalize_reconstruction(part)

        # Merge pieces from fragmented MFT
        for address in list(partitioned_files):
            # This could have been deleted in a previous iteration
            if address not in partitioned_files:
                continue
            part = partitioned_files[address]
            entry = part.get(0)     # $MFT
            if entry is None or part.sec_per_clus is None:
                continue
            dump = sectors(img, entry.offset, FILE_size)
            parsed = parse_file_record(dump)
            if not parsed['valid'] or 'attributes' not in parsed:
                continue

            if '$ATTRIBUTE_LIST' in parsed['attributes']:
                _integrate_attribute_list(parsed, part, img)
            attrs = parsed['attributes']
            if '$DATA' not in attrs or len(attrs['$DATA']) < 1:
                continue

            if 'runlist' not in attrs['$DATA'][0]:
                continue
            runlist = attrs['$DATA'][0]['runlist']
            if len(runlist) > 1:
                logging.info(
                    'MFT for partition at offset %d is fragmented. Trying to '
                    'merge %d parts...', part.offset, len(runlist)
                )
                clusters_pos = runlist[0]['offset']
                spc = part.sec_per_clus
                size = runlist[0]['length']
                for entry in runlist[1:]:
                    clusters_pos += entry['offset']
                    real_pos = clusters_pos * part.sec_per_clus + part.offset
                    position = real_pos - size*spc
                    if position in partitioned_files:
                        piece = partitioned_files[position]
                        if piece.offset is None or piece.offset == part.offset:
                            conflicts = [
                                i for i in piece.files if
                                not piece.files[i].is_ghost and
                                i in part.files and
                                not part.files[i].is_ghost
                            ]
                            if not len(conflicts):
                                logging.debug(
                                    'Merging partition with MFT offset %d into'
                                    ' %s (fragmented MFT)', piece.mft_pos, part
                                )
                                # Merge the partitions
                                merge(part, piece)
                                # Remove the fragment
                                partitioned_files.pop(position)
                            else:
                                logging.debug(
                                    'NOT merging partition with MFT offset %d into'
                                    ' %s (possible fragmented MFT) due to conflicts', piece.mft_pos, part
                                )
                    size += entry['length']

        return partitioned_files