def get_timeline_entries(record):
    entries = []
    si = record.standard_information()
    fn = record.filename_information()
    
    if si and fn:
        filename = fn.filename()        
        entries.extend(create_safe_timeline_entries(si, "$SI", filename))

    for b in record.attributes():
        if b.type() != ATTR_TYPE.FILENAME_INFORMATION:
            continue
        attr = FilenameAttribute(b.value(), 0, record)
        attr_filename = attr.filename()
        entries.extend(create_safe_timeline_entries(attr, "$FN", attr_filename))

    indxroot = record.attribute(ATTR_TYPE.INDEX_ROOT)
    if indxroot and indxroot.non_resident() == 0:
        irh = IndexRootHeader(indxroot.value(), 0, False)
        for e in irh.node_header().entries():
            fn = e.filename_information()
            fn_filename = fn.filename()
            entries.extend(create_safe_timeline_entries(fn, "INDX", fn_filename))

        for e in irh.node_header().slack_entries():
            fn = e.filename_information()
            fn_filename = fn.filename()
            entries.extend(create_safe_timeline_entries(fn, "slack-INDX", fn_filename))

    return sorted(entries, key=lambda x: x["timestamp"] or datetime.datetime(1970, 1, 1, 0, 0, 0))
Exemple #2
0
def get_timeline_entries(record):
    entries = []
    si = record.standard_information()
    fn = record.filename_information()

    if si and fn:
        filename = fn.filename()
        entries.extend(create_safe_timeline_entries(si, "$SI", filename))

    for b in record.attributes():
        if b.type() != ATTR_TYPE.FILENAME_INFORMATION:
            continue
        attr = FilenameAttribute(b.value(), 0, record)
        attr_filename = attr.filename()
        entries.extend(create_safe_timeline_entries(attr, "$FN",
                                                    attr_filename))

    indxroot = record.attribute(ATTR_TYPE.INDEX_ROOT)
    if indxroot and indxroot.non_resident() == 0:
        irh = IndexRootHeader(indxroot.value(), 0, False)
        for e in irh.node_header().entries():
            fn = e.filename_information()
            fn_filename = fn.filename()
            entries.extend(
                create_safe_timeline_entries(fn, "INDX", fn_filename))

        for e in irh.node_header().slack_entries():
            fn = e.filename_information()
            fn_filename = fn.filename()
            entries.extend(
                create_safe_timeline_entries(fn, "slack-INDX", fn_filename))

    return sorted(
        entries,
        key=lambda x: x["timestamp"] or datetime.datetime(1970, 1, 1, 0, 0, 0))
Exemple #3
0
def make_model(record, path):
    active_data = record.active_data()
    slack_data = record.slack_data()
    model = {
        "magic":
        record.magic(),
        "path":
        path,
        "inode":
        record.inode,
        "is_active":
        record.is_active(),
        "is_directory":
        record.is_directory(),
        "size":
        0,  # updated below
        "standard_information":
        make_standard_information_model(record.standard_information()),
        "filename_information":
        make_filename_information_model(record.filename_information()),
        "owner_id":
        0,  # updated below
        "security_id":
        0,  # updated below
        "quota_charged":
        0,  # updated below
        "usn":
        0,  # updated below
        "filenames": [],
        "attributes": [],
        "indx_entries": [],
        "slack_indx_entries": [],
        "timeline":
        get_timeline_entries(record),
        "active_ascii_strings":
        ascii_strings(active_data),
        "active_unicode_strings":
        unicode_strings(active_data),
        "slack_ascii_strings":
        ascii_strings(slack_data),
        "slack_unicode_strings":
        unicode_strings(slack_data),
    }

    if not record.is_directory():
        data_attr = record.data_attribute()
        if data_attr and data_attr.non_resident() > 0:
            model["size"] = data_attr.data_size()
        elif record.filename_information() is not None:
            model["size"] = record.filename_information().logical_size()
        else:
            model["size"] = 0

    for b in record.attributes():
        if b.type() != ATTR_TYPE.FILENAME_INFORMATION:
            continue
        attr = FilenameAttribute(b.value(), 0, record)
        model["filenames"].append(make_filename_information_model(attr))

    for b in record.attributes():
        model["attributes"].append(make_attribute_model(b))

    indxroot = record.attribute(ATTR_TYPE.INDEX_ROOT)
    if indxroot and indxroot.non_resident() == 0:
        irh = IndexRootHeader(indxroot.value(), 0, False)
        for e in irh.node_header().entries():
            m = make_filename_information_model(e.filename_information())
            m["inode"] = MREF(e.mft_reference())
            m["sequence_num"] = MSEQNO(e.mft_reference())
            model["indx_entries"].append(m)

        for e in irh.node_header().slack_entries():
            m = make_filename_information_model(e.filename_information())
            m["inode"] = MREF(e.mft_reference())
            m["sequence_num"] = MSEQNO(e.mft_reference())
            model["slack_indx_entries"].append(m)
    return model
Exemple #4
0
    def update(self, event):
        self._record_number.update(str(self._model.record().mft_record_number()))

        attributes = []
        if self._model.record().is_directory():
            attributes.append("directory")
        else:
            attributes.append("file")
        if self._model.record().is_active():
            attributes.append("active")
        else:
            attributes.append("deleted")
        if len(attributes) > 0:
            self._record_attributes_line.update(", ".join(attributes))
        else:
            self._record_attributes_line.update("<none>")

        size = 0
        if not self._model.record().is_directory():
            data_attr = self._model.record().data_attribute()
            if data_attr and data_attr.non_resident() > 0:
                size = data_attr.data_size()
            else:
                size = self._model.record().filename_information().logical_size()

        self._record_size_line.update(str(size))
        self._seq_line.update(str(self._model.record().sequence_number()))

        attributes = []
        if self._model.record().standard_information().attributes() & 0x01:
            attributes.append("readonly")
        if self._model.record().standard_information().attributes() & 0x02:
            attributes.append("hidden")
        if self._model.record().standard_information().attributes() & 0x04:
            attributes.append("system")
        if self._model.record().standard_information().attributes() & 0x08:
            attributes.append("unused-dos")
        if self._model.record().standard_information().attributes() & 0x10:
            attributes.append("directory-dos")
        if self._model.record().standard_information().attributes() & 0x20:
            attributes.append("archive")
        if self._model.record().standard_information().attributes() & 0x40:
            attributes.append("device")
        if self._model.record().standard_information().attributes() & 0x80:
            attributes.append("normal")
        if self._model.record().standard_information().attributes() & 0x100:
            attributes.append("temporary")
        if self._model.record().standard_information().attributes() & 0x200:
            attributes.append("sparse")
        if self._model.record().standard_information().attributes() & 0x400:
            attributes.append("reparse-point")
        if self._model.record().standard_information().attributes() & 0x800:
            attributes.append("compressed")
        if self._model.record().standard_information().attributes() & 0x1000:
            attributes.append("offline")
        if self._model.record().standard_information().attributes() & 0x2000:
            attributes.append("not-indexed")
        if self._model.record().standard_information().attributes() & 0x4000:
            attributes.append("encrypted")
        if self._model.record().standard_information().attributes() & 0x10000000:
            attributes.append("has-indx")
        if self._model.record().standard_information().attributes() & 0x20000000:
            attributes.append("has-view-index")
        if len(attributes) > 0:
            self._si_attributes_line.update(", ".join(attributes))
        else:
            self._si_attributes_line.update("<none>")

        crtime = self._model.record().standard_information().created_time().isoformat("T")
        self._si_created_line.update(crtime + "Z")

        mtime = self._model.record().standard_information().modified_time().isoformat("T")
        self._si_modified_line.update(mtime + "Z")

        chtime = self._model.record().standard_information().changed_time().isoformat("T")
        self._si_changed_line.update(chtime + "Z")

        atime = self._model.record().standard_information().accessed_time().isoformat("T")
        self._si_accessed_line.update(atime + "Z")

        for i in self._fn:
            self._fn[i].name_line.update("<not present>")
            self._fn[i].attributes_line.update("")
            self._fn[i].alloc_size_line.update("")
            self._fn[i].log_size_line.update("")
            self._fn[i].created_line.update("")
            self._fn[i].modified_line.update("")
            self._fn[i].changed_line.update("")
            self._fn[i].accessed_line.update("")

        for b in self._model.record().attributes():
            if b.type() != ATTR_TYPE.FILENAME_INFORMATION:
                continue
            try:
                attr = FilenameAttribute(b.value(), 0, self)
                a = attr.filename_type()

                self._fn[a].name_line.update(str(attr.filename()))

                attributes = []
                if attr.flags() & 0x01:
                    attributes.append("readonly")
                if attr.flags() & 0x02:
                    attributes.append("hidden")
                if attr.flags() & 0x04:
                    attributes.append("system")
                if attr.flags() & 0x08:
                    attributes.append("unused-dos")
                if attr.flags() & 0x10:
                    attributes.append("directory-dos")
                if attr.flags() & 0x20:
                    attributes.append("archive")
                if attr.flags() & 0x40:
                    attributes.append("device")
                if attr.flags() & 0x80:
                    attributes.append("normal")
                if attr.flags() & 0x100:
                    attributes.append("temporary")
                if attr.flags() & 0x200:
                    attributes.append("sparse")
                if attr.flags() & 0x400:
                    attributes.append("reparse-point")
                if attr.flags() & 0x800:
                    attributes.append("compressed")
                if attr.flags() & 0x1000:
                    attributes.append("offline")
                if attr.flags() & 0x2000:
                    attributes.append("not-indexed")
                if attr.flags() & 0x4000:
                    attributes.append("encrypted")
                if attr.flags() & 0x10000000:
                    attributes.append("has-indx")
                if attr.flags() & 0x20000000:
                    attributes.append("has-view-index")
                if len(attributes) > 0:
                    self._fn[a].attributes_line.update(", ".join(attributes))
                else:
                    self._fn[a].attributes_line.update("<none>")

                self._fn[a].alloc_size_line.update(str(attr.physical_size()))
                self._fn[a].log_size_line.update(str(attr.logical_size()))

                crtime = attr.created_time().isoformat("T")
                self._fn[a].created_line.update(crtime + "Z")

                mtime = attr.modified_time().isoformat("T")
                self._fn[a].modified_line.update(mtime + "Z")

                chtime = attr.changed_time().isoformat("T")
                self._fn[a].changed_line.update(chtime + "Z")

                atime = attr.accessed_time().isoformat("T")
                self._fn[a].accessed_line.update(atime + "Z")

            except ZeroDivisionError:
                continue
        self.Layout()
Exemple #5
0
def get_timeline_entries(record):
    entries = []
    entries.append({
        "timestamp": record.standard_information().created_time(),
        "type": "birthed",
        "source": "$SI",
        "path": record.filename_information().filename(),
    })
    entries.append({
        "timestamp": record.standard_information().accessed_time(),
        "type": "accessed",
        "source": "$SI",
        "path": record.filename_information().filename(),
    })
    entries.append({
        "timestamp": record.standard_information().modified_time(),
        "type": "modified",
        "source": "$SI",
        "path": record.filename_information().filename(),
    })
    entries.append({
        "timestamp": record.standard_information().changed_time(),
        "type": "changed",
        "source": "$SI",
        "path": record.filename_information().filename(),
    })

    for b in record.attributes():
        if b.type() != ATTR_TYPE.FILENAME_INFORMATION:
            continue
        attr = FilenameAttribute(b.value(), 0, record)
        entries.append({
            "timestamp": attr.created_time(),
            "type": "birthed",
            "source": "$FN",
            "path": attr.filename(),
        })
        entries.append({
            "timestamp": attr.accessed_time(),
            "type": "accessed",
            "source": "$FN",
            "path": attr.filename(),
        })
        entries.append({
            "timestamp": attr.modified_time(),
            "type": "modified",
            "source": "$FN",
            "path": attr.filename(),
        })
        entries.append({
            "timestamp": attr.changed_time(),
            "type": "changed",
            "source": "$FN",
            "path": attr.filename(),
        })

    indxroot = record.attribute(ATTR_TYPE.INDEX_ROOT)
    if indxroot and indxroot.non_resident() == 0:
        irh = IndexRootHeader(indxroot.value(), 0, False)
        for e in irh.node_header().entries():
            entries.append({
                "timestamp": e.filename_information().created_time(),
                "type": "birthed",
                "source": "INDX",
                "path": e.filename_information().filename()
            })
            entries.append({
                "timestamp": e.filename_information().accessed_time(),
                "type": "accessed",
                "source": "INDX",
                "path": e.filename_information().filename()
            })
            entries.append({
                "timestamp": e.filename_information().modified_time(),
                "type": "modified",
                "source": "INDX",
                "path": e.filename_information().filename()
            })
            entries.append({
                "timestamp": e.filename_information().changed_time(),
                "type": "changed",
                "source": "INDX",
                "path": e.filename_information().filename()
            })

        for e in irh.node_header().slack_entries():
            entries.append({
                "timestamp": e.filename_information().created_time(),
                "type": "birthed",
                "source": "slack-INDX",
                "path": e.filename_information().filename()
            })
            entries.append({
                "timestamp": e.filename_information().accessed_time(),
                "type": "accessed",
                "source": "slack-INDX",
                "path": e.filename_information().filename()
            })
            entries.append({
                "timestamp": e.filename_information().modified_time(),
                "type": "modified",
                "source": "slack-INDX",
                "path": e.filename_information().filename()
            })
            entries.append({
                "timestamp": e.filename_information().changed_time(),
                "type": "changed",
                "source": "slack-INDX",
                "path": e.filename_information().filename()
            })
    return sorted(entries, key=lambda x: x["timestamp"])
Exemple #6
0
def make_model(record, path, record_buf):
    model = {
        "magic": record.magic(),
        "path": path,
        "record_num": str(record.mft_record_number()),
        "is_active": record.is_active(),
        "is_directory": record.is_directory(),
        "size": 0,  # updated below
        "flags": get_flags(record.standard_information().attributes()),
        "created": record.standard_information().created_time(),
        "modified": record.standard_information().modified_time(),
        "changed": record.standard_information().changed_time(),
        "accessed": record.standard_information().accessed_time(),
        "owner_id": 0,  # updated below
        "security_id": 0,  # updated below
        "quota_charged": 0,  # updated below
        "usn": 0,  # updated below
        "filenames": [],
        "attributes": [],
        "indx_entries": [],
        "slack_indx_entries": [],
        "timeline": get_timeline_entries(record),
        "ascii_strings": ascii_strings(record_buf),
        "unicode_strings": unicode_strings(record_buf),
        }

    if not record.is_directory():
        data_attr = record.data_attribute()
        if data_attr and data_attr.non_resident() > 0:
            model["size"] = data_attr.data_size()
        else:
            model["size"] = record.filename_information().logical_size()

    try:
        # since the fields are sequential, we can handle an exception half way through here
        #  and then ignore the remaining items. Dont have to worry about individual try/catches
        model["owner_id"] = record.standard_information().owner_id()
        model["security_id"] = record.standard_information().security_id()
        model["quota_charged"] = record.standard_information().quota_charged()
        model["usn"] = record.standard_information().usn()
    except StandardInformationFieldDoesNotExist:
        pass

    for b in record.attributes():
        if b.type() != ATTR_TYPE.FILENAME_INFORMATION:
            continue
        attr = FilenameAttribute(b.value(), 0, record)
        model["filenames"].append({
            "type": ["POSIX", "WIN32", "DOS 8.3", "WIN32 + DOS 8.3"][attr.filename_type()],
            "name": str(attr.filename()),
            "flags": get_flags(attr.flags()),
            "logical_size": attr.logical_size(),
            "physical_size": attr.physical_size(),
            "modified": attr.modified_time(),
            "accessed": attr.accessed_time(),
            "changed": attr.changed_time(),
            "created": attr.created_time(),
            "parent_ref": MREF(attr.mft_parent_reference()),
            "parent_seq": MSEQNO(attr.mft_parent_reference()),
        })

    for b in record.attributes():
        attribute_model = {
            "type": Attribute.TYPES[b.type()],
            "name": b.name() or "<none>",
            "flags": get_flags(attr.flags()),
            "is_resident": b.non_resident() == 0,
            "data_size": 0,
            "allocated_size": 0,
            "value_size": 0,
            "runs": [],
        }

        if b.non_resident() > 0:
            attribute_model["data_size"] = b.data_size()
            attribute_model["allocated_size"] = b.allocated_size()

            if b.allocated_size() > 0:
                for (offset, length) in b.runlist().runs():
                    attribute_model["runs"].append({
                        "offset": offset,
                        "length": length,
                    })
        else:
            attribute_model["value_size"] = b.value_length()
        model["attributes"].append(attribute_model)

    indxroot = record.attribute(ATTR_TYPE.INDEX_ROOT)
    if indxroot and indxroot.non_resident() == 0:
        irh = IndexRootHeader(indxroot.value(), 0, False)
        for e in irh.node_header().entries():
            model["indx_entries"].append({
                "filename": e.filename_information().filename(),
                "size": e.filename_information().logical_size(),
                "modified": e.filename_information().modified_time(),
                "accessed": e.filename_information().accessed_time(),
                "changed": e.filename_information().changed_time(),
                "created": e.filename_information().created_time(),
                "record_num": MREF(e.mft_reference()),
                "sequence_num": MSEQNO(e.mft_reference()),
            })

        for e in irh.node_header().slack_entries():
            model["slack_indx_entries"].append({
                "filename": e.filename_information().filename(),
                "size": e.filename_information().logical_size(),
                "modified": e.filename_information().modified_time(),
                "accessed": e.filename_information().accessed_time(),
                "changed": e.filename_information().changed_time(),
                "created": e.filename_information().created_time(),
                "record_num": MREF(e.mft_reference()),
                "sequence_num": MSEQNO(e.mft_reference()),
            })

    return model