コード例 #1
0
def get_timeline_entries(record):
    entries = []
    si = record.standard_information()
    fn = record.filename_information()

    if si and fn:
        filename = fn.filename()
        entries.extend(create_safe_timeline_entries(si, "$SI", filename))

    for b in record.attributes():
        if b.type() != ATTR_TYPE.FILENAME_INFORMATION:
            continue
        attr = FilenameAttribute(b.value(), 0, record)
        attr_filename = attr.filename()
        entries.extend(create_safe_timeline_entries(attr, "$FN",
                                                    attr_filename))

    indxroot = record.attribute(ATTR_TYPE.INDEX_ROOT)
    if indxroot and indxroot.non_resident() == 0:
        irh = IndexRootHeader(indxroot.value(), 0, False)
        for e in irh.node_header().entries():
            fn = e.filename_information()
            fn_filename = fn.filename()
            entries.extend(
                create_safe_timeline_entries(fn, "INDX", fn_filename))

        for e in irh.node_header().slack_entries():
            fn = e.filename_information()
            fn_filename = fn.filename()
            entries.extend(
                create_safe_timeline_entries(fn, "slack-INDX", fn_filename))

    return sorted(
        entries,
        key=lambda x: x["timestamp"] or datetime.datetime(1970, 1, 1, 0, 0, 0))
コード例 #2
0
def make_model(record, path):
    active_data = record.active_data()
    slack_data = record.slack_data()
    model = {
        "magic":
        record.magic(),
        "path":
        path,
        "inode":
        record.inode,
        "is_active":
        record.is_active(),
        "is_directory":
        record.is_directory(),
        "size":
        0,  # updated below
        "standard_information":
        make_standard_information_model(record.standard_information()),
        "filename_information":
        make_filename_information_model(record.filename_information()),
        "owner_id":
        0,  # updated below
        "security_id":
        0,  # updated below
        "quota_charged":
        0,  # updated below
        "usn":
        0,  # updated below
        "filenames": [],
        "attributes": [],
        "indx_entries": [],
        "slack_indx_entries": [],
        "timeline":
        get_timeline_entries(record),
        "active_ascii_strings":
        ascii_strings(active_data),
        "active_unicode_strings":
        unicode_strings(active_data),
        "slack_ascii_strings":
        ascii_strings(slack_data),
        "slack_unicode_strings":
        unicode_strings(slack_data),
    }

    if not record.is_directory():
        data_attr = record.data_attribute()
        if data_attr and data_attr.non_resident() > 0:
            model["size"] = data_attr.data_size()
        elif record.filename_information() is not None:
            model["size"] = record.filename_information().logical_size()
        else:
            model["size"] = 0

    for b in record.attributes():
        if b.type() != ATTR_TYPE.FILENAME_INFORMATION:
            continue
        attr = FilenameAttribute(b.value(), 0, record)
        model["filenames"].append(make_filename_information_model(attr))

    for b in record.attributes():
        model["attributes"].append(make_attribute_model(b))

    indxroot = record.attribute(ATTR_TYPE.INDEX_ROOT)
    if indxroot and indxroot.non_resident() == 0:
        irh = IndexRootHeader(indxroot.value(), 0, False)
        for e in irh.node_header().entries():
            m = make_filename_information_model(e.filename_information())
            m["inode"] = MREF(e.mft_reference())
            m["sequence_num"] = MSEQNO(e.mft_reference())
            model["indx_entries"].append(m)

        for e in irh.node_header().slack_entries():
            m = make_filename_information_model(e.filename_information())
            m["inode"] = MREF(e.mft_reference())
            m["sequence_num"] = MSEQNO(e.mft_reference())
            model["slack_indx_entries"].append(m)
    return model
コード例 #3
0
def dfxml_mft_record(mft_enumerator, record, prefix, dfxml_item):
    """
    Attach all FileObjects
      associated with a single record. This includes
      standard information, filename information,
      and any resident directory index entries.
    """
    tags = []
    if not record.is_active():
        tags.append("inactive")

    path = prefix + "\\" + mft_enumerator.get_path(record)
    si = record.standard_information()
    fn = record.filename_information()

    if not record.is_active() and not fn:
        return

    inode = record.mft_record_number()
    if record.is_directory():
        size = 0
    else:
        data_attr = record.data_attribute()
        if data_attr and data_attr.non_resident() > 0:
            size = data_attr.data_size()
        elif fn:
            size = fn.logical_size()
        else:
            size = 0

    ADSs = []  # list of (name, size)
    for attr in record.attributes():
        if attr.type() != ATTR_TYPE.DATA or len(attr.name()) == 0:
            continue
        if attr.non_resident() > 0:
            size = attr.data_size()
        else:
            size = attr.value_length()
        ADSs.append((attr.name(), size))

    si_index = 0
    if si:
        try:
            si_index = si.security_id()
        except StandardInformationFieldDoesNotExist:
            pass

    indices = []  # list of (filename, size, reference, info)
    slack_indices = []  # list of (filename, size, reference, info)
    indxroot = record.attribute(ATTR_TYPE.INDEX_ROOT)
    if indxroot and indxroot.non_resident() == 0:
        # TODO(wb): don't use IndxRootHeader
        irh = IndexRootHeader(indxroot.value(), 0, False)
        for e in irh.node_header().entries():
            indices.append(
                (e.filename_information().filename(), e.mft_reference(),
                 e.filename_information().logical_size(),
                 e.filename_information()))

        for e in irh.node_header().slack_entries():
            slack_indices.append(
                (e.filename_information().filename(), e.mft_reference(),
                 e.filename_information().logical_size(),
                 e.filename_information()))

    # si
    if si:
        try:
            fo = format_dfxml(path, size, inode, si_index, si, tags)
            dfxml_item.append(fo)
        except UnicodeEncodeError:
            print "# failed to print: %s" % (list(path))

    # fn
    if fn:
        tags = ["filename"]
        if not record.is_active():
            tags.append("inactive")
            is_active = 0
        else:
            is_active = 1
        try:
            fo = format_dfxml(path, size, inode, si_index, fn, tags)
            fo.alloc_inode = is_active  #Check this
            dfxml_item.append(fo)
        except UnicodeEncodeError:
            print "# failed to print: %s" % (list(path))

    # ADS, NOTE: Future implementations can work with ADS's here.
    """
    for ads in ADSs:
        tags = []
        if not record.is_active():
            tags.append("inactive")
            is_active = 0
        else:
            is_active = 1
        try:
            fo = format_dfxml(path + ":" + ads[0], ads[1], inode, si_index, si or {}, tags)
            fo.alloc_inode = is_active #Check this
            dfxml_item.append(fo)
        except UnicodeEncodeError:
            print "# failed to print: %s" % (list(path))
    """

    # INDX
    for indx in indices:
        tags = ["indx"]
        try:
            fo = format_dfxml(path + "\\" + indx[0], indx[1], MREF(indx[2]), 0,
                              indx[3], tags)
            fo.alloc_inode = 1
            dfxml_item.append(fo)
        except UnicodeEncodeError:
            print "# failed to print: %s" % (list(path))

    for indx in slack_indices:
        tags = ["indx", "slack"]
        try:
            fo = format_dfxml(path + "\\" + indx[0], indx[1], MREF(indx[2]), 0,
                              indx[3], tags)
            fo.alloc_inode = 0
            dfxml_item.append(fo)
        except UnicodeEncodeError:
            print "# failed to print: %s" % (list(path))