Пример #1
0
    def _fetch_vcn(self, vcn, data_run, datamodel):
        log = Helper.logger()
        file_record = self.file_record

        (n, lcn), rel_vcn = data_run

        log.debug('\t\tVCN relative to data-run: {}'.format(rel_vcn))

        bytes_per_cluster = file_record.sectors_per_cluster * file_record.bytes_per_sector
        file_offset = (
            lcn + rel_vcn
        ) * self.file_record.sectors_per_cluster * self.file_record.bytes_per_sector
        #size_in_bytes     = n * self.file_record.sectors_per_cluster * self.file_record.bytes_per_sector

        # only one vcn
        # is it possible to have more than one cluster/entry ? !TODO
        size_in_bytes = 1 * self.file_record.sectors_per_cluster * self.file_record.bytes_per_sector

        clusters = datamodel.getStream(file_offset,
                                       file_offset + size_in_bytes)

        log.debug(
            '\t\tINDX: 0x{:04x} clusters @ LCN 0x{:04x}, @ f_offset 0x{:x}, size_in_bytes {}'
            .format(n, lcn, file_offset, size_in_bytes))

        # buffered data model
        data = DataModel.BufferDataModel(clusters, 'lcn')
        return data
Пример #2
0
    def _fixup_seq_numbers(data, update_seq_array, size_update_seq, update_seq,
                           bytes_per_sector):
        log = Helper.logger()

        size_in_bytes = data.getDataSize()

        ## apply fixup
        k = 0
        i = 0

        fixup_array = DataModel.BufferDataModel(update_seq_array, 'fixup')

        while k < size_in_bytes:
            if i >= size_update_seq:
                break

            k += bytes_per_sector
            seq = data.getWORD(k - 2)

            fixup = fixup_array.getWORD(i * 2)

            log.debug(
                '\tlast two bytes of sector: {:04x}, fixup {:04x}'.format(
                    seq, fixup))

            if seq != update_seq:
                log.debug(
                    '\tupdate sequence check failed, image may be corrupt, continue anyway'
                )

            fixup_s = fixup_array.getStream(i * 2, i * 2 + 2)
            data.getData()[k - 2:k] = fixup_s
            i += 1
Пример #3
0
    def showPermissions(self):
        if not self.w.isVisible():
            self.w.show()
            self.w.ui.tabWidget.setFocus()
            self.w.ui.tabWidget.activateWindow()
            self.w.ui.tabWidget.setCurrentIndex(0)
            self.w.ui.listPerm.setFocus()

            #q = self._parent.parent.factory(DataModel.BufferDataModel(self.apk.get_dex(), 'classes.dex'), 'classes.dex')
            import zipfile
            zip_file = zipfile.ZipFile(self.dataModel.source)

            newfile = zip_file.open('AndroidManifest.xml')
            #source = DataModel.BufferDataModel(self.apk.get_dex(), 'classes.dex')
            source = DataModel.BufferDataModel(newfile.read(), 'manige')
            q = self._parent.parent.factory()(source, 'classes.dex')
            #self._parent.parent.hbox.addWidget(q)
            q.setParent(self._parent.parent,
                        QtCore.Qt.Dialog | QtCore.Qt.WindowMinimizeButtonHint)
            q.resize(900, 600)
            q.show()
Пример #4
0
    def eventFilter(self, watched, event):
        if event.type() == QtCore.QEvent.KeyPress:
            if event.key() == QtCore.Qt.Key_Return:
                L = watched.selectedItems()
                if len(L) > 0:
                    filename = str(L[0].text(0))

                import zipfile

                #filename = 'AndroidManifest.xml'
                #self.plugin.
                print 'Loading ' + filename
                zip_file = zipfile.ZipFile(self.plugin.dataModel.source)
                newfile = zip_file.open(filename)
                source = DataModel.BufferDataModel(newfile.read(), filename)
                q = self.plugin._parent.parent.factory()(source, filename)
                q.setParent(
                    self.plugin._parent.parent,
                    QtCore.Qt.Dialog | QtCore.Qt.WindowMinimizeButtonHint)
                q.resize(900, 600)
                q.show()

        return False
Пример #5
0
    def get_file_record(self, which_file_record):
        log = Helper.logger()

        log.debug(
            '==================== [File record #{}] ===================='.
            format(which_file_record))

        datarun = self._datarun_of_file_record(which_file_record)
        if datarun is None:
            # file record not found
            return None

        n, lcn, rel_record = datarun

        start_mft = lcn * self.sectors_per_cluster * self.bytes_per_sector
        mft_size_in_bytes = n * self.sectors_per_cluster * self.bytes_per_sector

        file_record_offset = start_mft + rel_record * self.file_record_size

        # simple check
        fr = file_record_offset

        # get buffered data model
        data = DataModel.BufferDataModel(
            self.dataModel.getStream(fr, fr + self.file_record_size),
            'file_record')
        fr = 0

        magic = data.getStream(fr + 0x00, fr + 0x04)

        if magic != "FILE":
            log.debug('magic does not mach "FILE", instead: {}.'.format(magic))
            return None
            #raise NtfsError('magic should mach "FILE", offset 0x{:x}'.format(fr))

        obj = FileRecord()

        offset_update_seq = data.getWORD(fr + 0x04)
        log.debug(
            'Offset to update sequence: 0x{:0x}'.format(offset_update_seq))

        size_update_seq = data.getWORD(fr + 0x06)
        log.debug('Size in words of update sequence: 0x{:0x}'.format(
            size_update_seq))

        update_seq = data.getWORD(fr + offset_update_seq)
        log.debug('Update Sequence number: 0x{:04x}'.format(update_seq))

        # skip update seq number
        update_seq_array = data.getStream(
            fr + offset_update_seq + 2,
            fr + offset_update_seq + 2 + size_update_seq * 2)

        g = 'Update Sequence: '
        for x in update_seq_array:
            g += '{:02x} '.format(x)

        log.debug('{}'.format(g))

        # fixup things
        Helper._fixup_seq_numbers(data, update_seq_array, size_update_seq,
                                  update_seq, self.bytes_per_sector)

        off_first_attr = data.getWORD(fr + 0x14)

        flags = data.getWORD(fr + 0x16)
        log.debug('Flags: 0x{:0X}'.format(flags))

        real_size = data.getDWORD(fr + 0x18)
        log.debug('Real size of file record: 0x{:1X}'.format(real_size))

        allocated_size = data.getDWORD(fr + 0x1c)
        log.debug(
            'Allocated size of file record: 0x{:0X}'.format(allocated_size))

        file_reference = data.getQWORD(fr + 0x20)
        log.debug('File reference to the base FILE record: 0x{:0X}'.format(
            file_reference))

        next_attribute_id = data.getWORD(fr + 0x28)
        log.debug('Next Attribute Id: 0x{:0X}'.format(next_attribute_id))

        log.debug('')

        obj.off_first_attr = off_first_attr
        obj.flags = flags
        obj.real_size = real_size
        obj.allocated_size = allocated_size
        obj.file_reference = file_reference
        obj.next_attribute_id = next_attribute_id

        #save fs geometry
        obj.sectors_per_cluster = self.sectors_per_cluster
        obj.bytes_per_sector = self.bytes_per_sector

        ao = fr + off_first_attr

        log.debug('---=== attributes ===---')
        while 1:
            attribute = Attribute(self.dataModel, file_record_offset + ao)

            std_attr_type = data.getDWORD(ao + 0x00)
            if std_attr_type == 0xFFFFFFFF:
                break

            # standard attribute header
            log.debug('Attribute type: {0}'.format(
                self.AttrDef.getByType(std_attr_type).name))

            attr_length = data.getDWORD(ao + 0x04)
            log.debug('Length: 0x{:0X}'.format(attr_length))

            non_resident_flag = data.getBYTE(ao + 0x08)

            attr_name_length = data.getBYTE(ao + 0x09)

            log.debug(
                'Non-resident flag: 0x{:0X}, name length: 0x{:0X}'.format(
                    non_resident_flag, attr_name_length))

            # build instance

            attribute.std_header.type = std_attr_type
            attribute.std_header.attrdef = self.AttrDef.getByType(
                std_attr_type)
            attribute.std_header.length = attr_length
            attribute.std_header.non_resident_flag = non_resident_flag
            attribute.std_header.name_length = attr_name_length

            if not non_resident_flag and not attr_name_length:
                log.debug('Attribute is: {}'.format('resident, not named'))

                offset_to_attribute = data.getWORD(ao + 0x14)
                attr_length_2 = data.getDWORD(ao + 0x10)

                log.debug(
                    'Length of the attribute: 0x{:0X}'.format(attr_length_2))
                attr_name = ''

            if not non_resident_flag and attr_name_length:
                log.debug('Attribute is: {}'.format('resident, named'))

                offset_to_attribute = data.getWORD(ao + 0x14)

                attr_name = data.getStream(ao + 0x18,
                                           ao + 0x18 + 2 * attr_name_length)
                attr_name = Helper._widechar_to_ascii(attr_name)

                log.debug('Attribute name: {0}'.format(attr_name))

                attr_length_2 = data.getDWORD(ao + 0x10)
                log.debug(
                    'Length of the attribute: 0x{:0X}'.format(attr_length_2))

            if non_resident_flag and not attr_name_length:

                log.debug('Attribute is: {}'.format('non resident, not named'))

                starting_vcn = data.getQWORD(ao + 0x10)
                last_vcn = data.getQWORD(ao + 0x18)
                log.debug('Starting VCN: 0x{:0X}, last VCN: 0x{:0X}'.format(
                    starting_vcn, last_vcn))

                attr_real_size = data.getQWORD(ao + 0x30)
                log.debug('Real size of the attribute: 0x{:0X}'.format(
                    attr_real_size))
                attr_length_2 = attr_real_size

                # offset to datarun
                offset_to_attribute = data.getWORD(ao + 0x20)
                attr_name = ''

                attribute.std_header.starting_vcn = starting_vcn
                attribute.std_header.last_vcn = last_vcn
                attribute.std_header.attr_real_size = attr_real_size

                s = data.getStream(
                    ao + offset_to_attribute,
                    ao + offset_to_attribute + attr_length - 0x40)
                data_runs = self._decode_data_runs(s)

                attribute.data_runs = data_runs

                for data_run in data_runs:
                    n, lcn = data_run

                    file_offset = lcn * self.sectors_per_cluster * self.bytes_per_sector
                    size_in_bytes = n * self.sectors_per_cluster * self.bytes_per_sector

                    log.debug(
                        '0x{:04x} clusters @ LCN 0x{:04x}, @ f_offset 0x{:x}, size_in_bytes {}'
                        .format(n, lcn, file_offset, size_in_bytes))

            if non_resident_flag and attr_name_length:
                log.debug('Attribute is: {}'.format('non resident, named'))

                starting_vcn = data.getQWORD(ao + 0x10)
                last_vcn = data.getQWORD(ao + 0x18)
                log.debug('Starting VCN: 0x{:0X}, last VCN: 0x{:0X}'.format(
                    starting_vcn, last_vcn))

                attr_name = data.getStream(ao + 0x40,
                                           ao + 0x40 + 2 * attr_name_length)
                attr_name = Helper._widechar_to_ascii(attr_name)

                log.debug('Attribute name: {0}'.format(attr_name))

                attr_real_size = data.getQWORD(ao + 0x30)
                log.debug('Real size of the attribute: 0x{:0X}'.format(
                    attr_real_size))
                attr_length_2 = attr_real_size

                attribute.std_header.starting_vcn = starting_vcn
                attribute.std_header.last_vcn = last_vcn
                attribute.std_header.attr_real_size = attr_real_size

                # offset to datarun
                offset_to_attribute = data.getWORD(ao + 0x20)

                s = data.getStream(
                    ao + offset_to_attribute, ao + offset_to_attribute +
                    attr_length - (2 * attr_name_length + 0x40))
                data_runs = self._decode_data_runs(s)

                attribute.data_runs = data_runs

                for data_run in data_runs:
                    n, lcn = data_run

                    file_offset = lcn * self.sectors_per_cluster * self.bytes_per_sector
                    size_in_bytes = n * self.sectors_per_cluster * self.bytes_per_sector

                    log.debug(
                        '0x{:04x} clusters @ LCN 0x{:04x}, @ f_offset 0x{:x}, size_in_bytes {}'
                        .format(n, lcn, file_offset, size_in_bytes))

            # populate std_header

            attribute.std_header.offset_to_attribute = offset_to_attribute
            attribute.std_header.length = attr_length_2
            attribute.std_header.name = attr_name

            ao += attr_length

            attribute.obj = AttributeType.recognize(attribute, obj)
            if attribute.obj is None:
                self.logger.debug(
                    'Attribute {} (0x{:x}) not supported yet.'.format(
                        attribute.std_header.attrdef.name,
                        attribute.std_header.attrdef.type))
                self.logger.debug('')

            obj.attributes.append(attribute)
            obj.attributes_dict[
                attribute.std_header.attrdef.name] = attribute.obj

        log.debug('---=== end attributes ===---')

        # postprocessing
        log.debug('postprocessing....')
        for attribute in obj.attributes:
            if attribute.obj:
                attribute.obj.postprocess()

        log.debug('')
        return obj