def get_timeline_entries(record):
    entries = []
    si = record.standard_information()
    fn = record.filename_information()
    
    if si and fn:
        filename = fn.filename()        
        entries.extend(create_safe_timeline_entries(si, "$SI", filename))

    for b in record.attributes():
        if b.type() != ATTR_TYPE.FILENAME_INFORMATION:
            continue
        attr = FilenameAttribute(b.value(), 0, record)
        attr_filename = attr.filename()
        entries.extend(create_safe_timeline_entries(attr, "$FN", attr_filename))

    indxroot = record.attribute(ATTR_TYPE.INDEX_ROOT)
    if indxroot and indxroot.non_resident() == 0:
        irh = IndexRootHeader(indxroot.value(), 0, False)
        for e in irh.node_header().entries():
            fn = e.filename_information()
            fn_filename = fn.filename()
            entries.extend(create_safe_timeline_entries(fn, "INDX", fn_filename))

        for e in irh.node_header().slack_entries():
            fn = e.filename_information()
            fn_filename = fn.filename()
            entries.extend(create_safe_timeline_entries(fn, "slack-INDX", fn_filename))

    return sorted(entries, key=lambda x: x["timestamp"] or datetime.datetime(1970, 1, 1, 0, 0, 0))
Beispiel #2
0
def get_timeline_entries(record):
    entries = []
    si = record.standard_information()
    fn = record.filename_information()

    if si and fn:
        filename = fn.filename()
        entries.extend(create_safe_timeline_entries(si, "$SI", filename))

    for b in record.attributes():
        if b.type() != ATTR_TYPE.FILENAME_INFORMATION:
            continue
        attr = FilenameAttribute(b.value(), 0, record)
        attr_filename = attr.filename()
        entries.extend(create_safe_timeline_entries(attr, "$FN",
                                                    attr_filename))

    indxroot = record.attribute(ATTR_TYPE.INDEX_ROOT)
    if indxroot and indxroot.non_resident() == 0:
        irh = IndexRootHeader(indxroot.value(), 0, False)
        for e in irh.node_header().entries():
            fn = e.filename_information()
            fn_filename = fn.filename()
            entries.extend(
                create_safe_timeline_entries(fn, "INDX", fn_filename))

        for e in irh.node_header().slack_entries():
            fn = e.filename_information()
            fn_filename = fn.filename()
            entries.extend(
                create_safe_timeline_entries(fn, "slack-INDX", fn_filename))

    return sorted(
        entries,
        key=lambda x: x["timestamp"] or datetime.datetime(1970, 1, 1, 0, 0, 0))
def make_model(record, path):
    active_data = record.active_data()
    slack_data = record.slack_data()
    model = {
        "magic": record.magic(),
        "path": path,
        "inode": record.inode,
        "is_active": record.is_active(),
        "is_directory": record.is_directory(),
        "size": 0,  # updated below
        "standard_information": make_standard_information_model(record.standard_information()),
        "filename_information": make_filename_information_model(record.filename_information()),
        "owner_id": 0,  # updated below
        "security_id": 0,  # updated below
        "quota_charged": 0,  # updated below
        "usn": 0,  # updated below
        "filenames": [],
        "attributes": [],
        "indx_entries": [],
        "slack_indx_entries": [],
        "timeline": get_timeline_entries(record),
        "active_ascii_strings": ascii_strings(active_data),
        "active_unicode_strings": unicode_strings(active_data),
        "slack_ascii_strings": ascii_strings(slack_data),
        "slack_unicode_strings": unicode_strings(slack_data),
        }

    if not record.is_directory():
        data_attr = record.data_attribute()
        if data_attr and data_attr.non_resident() > 0:
            model["size"] = data_attr.data_size()
        elif record.filename_information() is not None:
            model["size"] = record.filename_information().logical_size()            
        else:
            model["size"] = 0

    for b in record.attributes():
        if b.type() != ATTR_TYPE.FILENAME_INFORMATION:
            continue
        attr = FilenameAttribute(b.value(), 0, record)
        model["filenames"].append(make_filename_information_model(attr))

    for b in record.attributes():
        model["attributes"].append(make_attribute_model(b))

    indxroot = record.attribute(ATTR_TYPE.INDEX_ROOT)
    if indxroot and indxroot.non_resident() == 0:
        irh = IndexRootHeader(indxroot.value(), 0, False)
        for e in irh.node_header().entries():
            m = make_filename_information_model(e.filename_information())
            m["inode"] = MREF(e.mft_reference())
            m["sequence_num"] = MSEQNO(e.mft_reference())
            model["indx_entries"].append(m)

        for e in irh.node_header().slack_entries():
            m = make_filename_information_model(e.filename_information())
            m["inode"] = MREF(e.mft_reference())
            m["sequence_num"] = MSEQNO(e.mft_reference())
            model["slack_indx_entries"].append(m)
    return model
Beispiel #4
0
def dfxml_mft_record(mft_enumerator, record, prefix, dfxml_item):
    """
    Attach all FileObjects
      associated with a single record. This includes
      standard information, filename information,
      and any resident directory index entries.
    """
    tags = []
    if not record.is_active():
        tags.append("inactive")

    path = prefix + "\\" + mft_enumerator.get_path(record)
    si = record.standard_information()
    fn = record.filename_information()

    if not record.is_active() and not fn:
        return

    inode = record.mft_record_number()
    if record.is_directory():
        size = 0
    else:
        data_attr = record.data_attribute()
        if data_attr and data_attr.non_resident() > 0:
            size = data_attr.data_size()
        elif fn:
            size = fn.logical_size()
        else:
            size = 0

    ADSs = []  # list of (name, size)
    for attr in record.attributes():
        if attr.type() != ATTR_TYPE.DATA or len(attr.name()) == 0:
            continue
        if attr.non_resident() > 0:
            size = attr.data_size()
        else:
            size = attr.value_length()
        ADSs.append((attr.name(), size))

    si_index = 0
    if si:
        try:
            si_index = si.security_id()
        except StandardInformationFieldDoesNotExist:
            pass

    indices = []  # list of (filename, size, reference, info)
    slack_indices = []  # list of (filename, size, reference, info)
    indxroot = record.attribute(ATTR_TYPE.INDEX_ROOT)
    if indxroot and indxroot.non_resident() == 0:
        # TODO(wb): don't use IndxRootHeader
        irh = IndexRootHeader(indxroot.value(), 0, False)
        for e in irh.node_header().entries():
            indices.append(
                (e.filename_information().filename(), e.mft_reference(),
                 e.filename_information().logical_size(),
                 e.filename_information()))

        for e in irh.node_header().slack_entries():
            slack_indices.append(
                (e.filename_information().filename(), e.mft_reference(),
                 e.filename_information().logical_size(),
                 e.filename_information()))

    # si
    if si:
        try:
            fo = format_dfxml(path, size, inode, si_index, si, tags)
            dfxml_item.append(fo)
        except UnicodeEncodeError:
            print "# failed to print: %s" % (list(path))

    # fn
    if fn:
        tags = ["filename"]
        if not record.is_active():
            tags.append("inactive")
            is_active = 0
        else:
            is_active = 1
        try:
            fo = format_dfxml(path, size, inode, si_index, fn, tags)
            fo.alloc_inode = is_active  #Check this
            dfxml_item.append(fo)
        except UnicodeEncodeError:
            print "# failed to print: %s" % (list(path))

    # ADS, NOTE: Future implementations can work with ADS's here.
    """
    for ads in ADSs:
        tags = []
        if not record.is_active():
            tags.append("inactive")
            is_active = 0
        else:
            is_active = 1
        try:
            fo = format_dfxml(path + ":" + ads[0], ads[1], inode, si_index, si or {}, tags)
            fo.alloc_inode = is_active #Check this
            dfxml_item.append(fo)
        except UnicodeEncodeError:
            print "# failed to print: %s" % (list(path))
    """

    # INDX
    for indx in indices:
        tags = ["indx"]
        try:
            fo = format_dfxml(path + "\\" + indx[0], indx[1], MREF(indx[2]), 0,
                              indx[3], tags)
            fo.alloc_inode = 1
            dfxml_item.append(fo)
        except UnicodeEncodeError:
            print "# failed to print: %s" % (list(path))

    for indx in slack_indices:
        tags = ["indx", "slack"]
        try:
            fo = format_dfxml(path + "\\" + indx[0], indx[1], MREF(indx[2]), 0,
                              indx[3], tags)
            fo.alloc_inode = 0
            dfxml_item.append(fo)
        except UnicodeEncodeError:
            print "# failed to print: %s" % (list(path))
Beispiel #5
0
def make_model(record, path):
    active_data = record.active_data()
    slack_data = record.slack_data()
    model = {
        "magic":
        record.magic(),
        "path":
        path,
        "inode":
        record.inode,
        "is_active":
        record.is_active(),
        "is_directory":
        record.is_directory(),
        "size":
        0,  # updated below
        "standard_information":
        make_standard_information_model(record.standard_information()),
        "filename_information":
        make_filename_information_model(record.filename_information()),
        "owner_id":
        0,  # updated below
        "security_id":
        0,  # updated below
        "quota_charged":
        0,  # updated below
        "usn":
        0,  # updated below
        "filenames": [],
        "attributes": [],
        "indx_entries": [],
        "slack_indx_entries": [],
        "timeline":
        get_timeline_entries(record),
        "active_ascii_strings":
        ascii_strings(active_data),
        "active_unicode_strings":
        unicode_strings(active_data),
        "slack_ascii_strings":
        ascii_strings(slack_data),
        "slack_unicode_strings":
        unicode_strings(slack_data),
    }

    if not record.is_directory():
        data_attr = record.data_attribute()
        if data_attr and data_attr.non_resident() > 0:
            model["size"] = data_attr.data_size()
        elif record.filename_information() is not None:
            model["size"] = record.filename_information().logical_size()
        else:
            model["size"] = 0

    for b in record.attributes():
        if b.type() != ATTR_TYPE.FILENAME_INFORMATION:
            continue
        attr = FilenameAttribute(b.value(), 0, record)
        model["filenames"].append(make_filename_information_model(attr))

    for b in record.attributes():
        model["attributes"].append(make_attribute_model(b))

    indxroot = record.attribute(ATTR_TYPE.INDEX_ROOT)
    if indxroot and indxroot.non_resident() == 0:
        irh = IndexRootHeader(indxroot.value(), 0, False)
        for e in irh.node_header().entries():
            m = make_filename_information_model(e.filename_information())
            m["inode"] = MREF(e.mft_reference())
            m["sequence_num"] = MSEQNO(e.mft_reference())
            model["indx_entries"].append(m)

        for e in irh.node_header().slack_entries():
            m = make_filename_information_model(e.filename_information())
            m["inode"] = MREF(e.mft_reference())
            m["sequence_num"] = MSEQNO(e.mft_reference())
            model["slack_indx_entries"].append(m)
    return model
Beispiel #6
0
    def update(self, event):
        self._sizer.Clear()  # Note, be sure to call self.Layout() after re-add
        self.DestroyChildren()

        if not self._model.record().is_directory():
            return

        has_runlists = False
        for attr in self._model.record().attributes():
            if attr.type() != ATTR_TYPE.INDEX_ALLOCATION:
                continue
            if attr.non_resident() != 0:
                for (_, __) in attr.runlist().runs():
                    has_runlists = True

        if has_runlists:
            warning_panel = DiskGeometryWarningPanel(self, self._model)
            self._sizer.Add(warning_panel,
                            self.NOT_EXPAND_VERTICALLY, wx.EXPAND)

        indxroot = self._model.record().attribute(ATTR_TYPE.INDEX_ROOT)
        if indxroot and indxroot.non_resident() == 0:
            # resident indx root
            irh = IndexRootHeader(indxroot.value(), 0, False)
            for e in irh.node_header().entries():
                ir_view = wx.StaticBox(self, -1, "INDX Record Information")
                ir_view_sizer = wx.StaticBoxSizer(ir_view, wx.VERTICAL)
                ir_view_sizer.Add(LabelledLine(self, "Filename", e.filename_information().filename()),
                                  self.NOT_EXPAND_VERTICALLY, wx.EXPAND)
                ir_view_sizer.Add(LabelledLine(self, "Size (bytes)", str(e.filename_information().logical_size())),
                                  self.NOT_EXPAND_VERTICALLY, wx.EXPAND)
                ir_view_sizer.Add(LabelledLine(self, "Created", e.filename_information().created_time().isoformat("T") + "Z"),
                                  self.NOT_EXPAND_VERTICALLY, wx.EXPAND)
                ir_view_sizer.Add(LabelledLine(self, "Modified", e.filename_information().modified_time().isoformat("T") + "Z"),
                                  self.NOT_EXPAND_VERTICALLY, wx.EXPAND)
                ir_view_sizer.Add(LabelledLine(self, "Changed", e.filename_information().changed_time().isoformat("T") + "Z"),
                                  self.NOT_EXPAND_VERTICALLY, wx.EXPAND)
                ir_view_sizer.Add(LabelledLine(self, "Accessed", e.filename_information().accessed_time().isoformat("T") + "Z"),
                                  self.NOT_EXPAND_VERTICALLY, wx.EXPAND)
                self._sizer.Add(ir_view_sizer,
                                self.NOT_EXPAND_VERTICALLY, wx.ALL | wx.EXPAND)
            for e in irh.node_header().slack_entries():
                ir_view = wx.StaticBox(self, -1,
                                       "Slack INDX Record Information")
                ir_view_sizer = wx.StaticBoxSizer(ir_view, wx.VERTICAL)
                ir_view_sizer.Add(LabelledLine(self, "Filename", e.filename_information().filename()), 0, wx.EXPAND)
                self._sizer.Add(ir_view_sizer,
                                self.NOT_EXPAND_VERTICALLY,
                                wx.ALL | wx.EXPAND)
        for attr in self._model.record().attributes():
            if attr.type() != ATTR_TYPE.INDEX_ALLOCATION:
                continue
            if attr.non_resident() != 0:
                # indx allocation is non-resident
                rl_view = wx.StaticBox(self, -1, "INDX_ALLOCATION Locations")
                rl_view_sizer = wx.StaticBoxSizer(rl_view, wx.VERTICAL)

                for (offset, length) in attr.runlist().runs():
                    rl_view_sizer.Add(RunlistPanel(self, offset, length, self._model),
                                      self.NOT_EXPAND_VERTICALLY,
                                      wx.EXPAND)
                self._sizer.Add(rl_view_sizer,
                                self.NOT_EXPAND_VERTICALLY,
                                wx.ALL | wx.EXPAND)
        self.SetAutoLayout(1)
        self.SetupScrolling()
Beispiel #7
0
def output_mft_record(mft_enumerator, record, prefix):
    tags = []
    if not record.is_active():
        tags.append("inactive")

    path = prefix + "\\" + mft_enumerator.get_path(record)
    si = record.standard_information()
    fn = record.filename_information()

    if not record.is_active() and not fn:
        return

    inode = record.mft_record_number()
    if record.is_directory():
        size = 0
    else:
        data_attr = record.data_attribute()
        if data_attr and data_attr.non_resident() > 0:
            size = data_attr.data_size()
        elif fn:
            size = fn.logical_size()
        else:
            size = 0

    ADSs = []  # list of (name, size)
    for attr in record.attributes():
        if attr.type() != ATTR_TYPE.DATA or len(attr.name()) == 0:
            continue
        if attr.non_resident() > 0:
            size = attr.data_size()
        else:
            size = attr.value_length()
        ADSs.append((attr.name(), size))

    si_index = 0
    if si:
        try:
            si_index = si.security_id()
        except StandardInformationFieldDoesNotExist:
            pass

    indices = []  # list of (filename, size, reference, info)
    slack_indices = []  # list of (filename, size, reference, info)
    indxroot = record.attribute(ATTR_TYPE.INDEX_ROOT)
    if indxroot and indxroot.non_resident() == 0:
        # TODO(wb): don't use IndxRootHeader
        irh = IndexRootHeader(indxroot.value(), 0, False)
        for e in irh.node_header().entries():
            indices.append((e.filename_information().filename(),
                            e.mft_reference(),
                            e.filename_information().logical_size(),
                            e.filename_information()))

        for e in irh.node_header().slack_entries():
            slack_indices.append((e.filename_information().filename(),
                                  e.mft_reference(),
                                  e.filename_information().logical_size(),
                                  e.filename_information()))

    # si
    if si:
        try:
            print format_bodyfile(path, size, inode, si_index, si, tags),
        except UnicodeEncodeError:
            print "# failed to print: %s" % (list(path))

    # fn
    if fn:
        tags = ["filename"]
        if not record.is_active():
            tags.append("inactive")
        try:
            print format_bodyfile(path, size, inode, si_index, fn, tags),
        except UnicodeEncodeError:
            print "# failed to print: %s" % (list(path))

    # ADS
    for ads in ADSs:
        tags = []
        if not record.is_active():
            tags.append("inactive")
        try:
            print format_bodyfile(path + ":" + ads[0], ads[1], inode, si_index, si or {}, tags),
        except UnicodeEncodeError:
            print "# failed to print: %s" % (list(path))

    # INDX
    for indx in indices:
        tags = ["indx"]
        try:
            print format_bodyfile(path + "\\" + indx[0], indx[1], MREF(indx[2]), 0, indx[3], tags),
        except UnicodeEncodeError:
            print "# failed to print: %s" % (list(path))

    for indx in slack_indices:
        tags = ["indx", "slack"]
        try:
            print format_bodyfile(path + "\\" + indx[0], indx[1], MREF(indx[2]), 0, indx[3], tags),
        except UnicodeEncodeError:
            print "# failed to print: %s" % (list(path))
Beispiel #8
0
def get_timeline_entries(record):
    entries = []
    entries.append({
        "timestamp": record.standard_information().created_time(),
        "type": "birthed",
        "source": "$SI",
        "path": record.filename_information().filename(),
    })
    entries.append({
        "timestamp": record.standard_information().accessed_time(),
        "type": "accessed",
        "source": "$SI",
        "path": record.filename_information().filename(),
    })
    entries.append({
        "timestamp": record.standard_information().modified_time(),
        "type": "modified",
        "source": "$SI",
        "path": record.filename_information().filename(),
    })
    entries.append({
        "timestamp": record.standard_information().changed_time(),
        "type": "changed",
        "source": "$SI",
        "path": record.filename_information().filename(),
    })

    for b in record.attributes():
        if b.type() != ATTR_TYPE.FILENAME_INFORMATION:
            continue
        attr = FilenameAttribute(b.value(), 0, record)
        entries.append({
            "timestamp": attr.created_time(),
            "type": "birthed",
            "source": "$FN",
            "path": attr.filename(),
        })
        entries.append({
            "timestamp": attr.accessed_time(),
            "type": "accessed",
            "source": "$FN",
            "path": attr.filename(),
        })
        entries.append({
            "timestamp": attr.modified_time(),
            "type": "modified",
            "source": "$FN",
            "path": attr.filename(),
        })
        entries.append({
            "timestamp": attr.changed_time(),
            "type": "changed",
            "source": "$FN",
            "path": attr.filename(),
        })

    indxroot = record.attribute(ATTR_TYPE.INDEX_ROOT)
    if indxroot and indxroot.non_resident() == 0:
        irh = IndexRootHeader(indxroot.value(), 0, False)
        for e in irh.node_header().entries():
            entries.append({
                "timestamp": e.filename_information().created_time(),
                "type": "birthed",
                "source": "INDX",
                "path": e.filename_information().filename()
            })
            entries.append({
                "timestamp": e.filename_information().accessed_time(),
                "type": "accessed",
                "source": "INDX",
                "path": e.filename_information().filename()
            })
            entries.append({
                "timestamp": e.filename_information().modified_time(),
                "type": "modified",
                "source": "INDX",
                "path": e.filename_information().filename()
            })
            entries.append({
                "timestamp": e.filename_information().changed_time(),
                "type": "changed",
                "source": "INDX",
                "path": e.filename_information().filename()
            })

        for e in irh.node_header().slack_entries():
            entries.append({
                "timestamp": e.filename_information().created_time(),
                "type": "birthed",
                "source": "slack-INDX",
                "path": e.filename_information().filename()
            })
            entries.append({
                "timestamp": e.filename_information().accessed_time(),
                "type": "accessed",
                "source": "slack-INDX",
                "path": e.filename_information().filename()
            })
            entries.append({
                "timestamp": e.filename_information().modified_time(),
                "type": "modified",
                "source": "slack-INDX",
                "path": e.filename_information().filename()
            })
            entries.append({
                "timestamp": e.filename_information().changed_time(),
                "type": "changed",
                "source": "slack-INDX",
                "path": e.filename_information().filename()
            })
    return sorted(entries, key=lambda x: x["timestamp"])
Beispiel #9
0
def make_model(record, path, record_buf):
    model = {
        "magic": record.magic(),
        "path": path,
        "record_num": str(record.mft_record_number()),
        "is_active": record.is_active(),
        "is_directory": record.is_directory(),
        "size": 0,  # updated below
        "flags": get_flags(record.standard_information().attributes()),
        "created": record.standard_information().created_time(),
        "modified": record.standard_information().modified_time(),
        "changed": record.standard_information().changed_time(),
        "accessed": record.standard_information().accessed_time(),
        "owner_id": 0,  # updated below
        "security_id": 0,  # updated below
        "quota_charged": 0,  # updated below
        "usn": 0,  # updated below
        "filenames": [],
        "attributes": [],
        "indx_entries": [],
        "slack_indx_entries": [],
        "timeline": get_timeline_entries(record),
        "ascii_strings": ascii_strings(record_buf),
        "unicode_strings": unicode_strings(record_buf),
        }

    if not record.is_directory():
        data_attr = record.data_attribute()
        if data_attr and data_attr.non_resident() > 0:
            model["size"] = data_attr.data_size()
        else:
            model["size"] = record.filename_information().logical_size()

    try:
        # since the fields are sequential, we can handle an exception half way through here
        #  and then ignore the remaining items. Dont have to worry about individual try/catches
        model["owner_id"] = record.standard_information().owner_id()
        model["security_id"] = record.standard_information().security_id()
        model["quota_charged"] = record.standard_information().quota_charged()
        model["usn"] = record.standard_information().usn()
    except StandardInformationFieldDoesNotExist:
        pass

    for b in record.attributes():
        if b.type() != ATTR_TYPE.FILENAME_INFORMATION:
            continue
        attr = FilenameAttribute(b.value(), 0, record)
        model["filenames"].append({
            "type": ["POSIX", "WIN32", "DOS 8.3", "WIN32 + DOS 8.3"][attr.filename_type()],
            "name": str(attr.filename()),
            "flags": get_flags(attr.flags()),
            "logical_size": attr.logical_size(),
            "physical_size": attr.physical_size(),
            "modified": attr.modified_time(),
            "accessed": attr.accessed_time(),
            "changed": attr.changed_time(),
            "created": attr.created_time(),
            "parent_ref": MREF(attr.mft_parent_reference()),
            "parent_seq": MSEQNO(attr.mft_parent_reference()),
        })

    for b in record.attributes():
        attribute_model = {
            "type": Attribute.TYPES[b.type()],
            "name": b.name() or "<none>",
            "flags": get_flags(attr.flags()),
            "is_resident": b.non_resident() == 0,
            "data_size": 0,
            "allocated_size": 0,
            "value_size": 0,
            "runs": [],
        }

        if b.non_resident() > 0:
            attribute_model["data_size"] = b.data_size()
            attribute_model["allocated_size"] = b.allocated_size()

            if b.allocated_size() > 0:
                for (offset, length) in b.runlist().runs():
                    attribute_model["runs"].append({
                        "offset": offset,
                        "length": length,
                    })
        else:
            attribute_model["value_size"] = b.value_length()
        model["attributes"].append(attribute_model)

    indxroot = record.attribute(ATTR_TYPE.INDEX_ROOT)
    if indxroot and indxroot.non_resident() == 0:
        irh = IndexRootHeader(indxroot.value(), 0, False)
        for e in irh.node_header().entries():
            model["indx_entries"].append({
                "filename": e.filename_information().filename(),
                "size": e.filename_information().logical_size(),
                "modified": e.filename_information().modified_time(),
                "accessed": e.filename_information().accessed_time(),
                "changed": e.filename_information().changed_time(),
                "created": e.filename_information().created_time(),
                "record_num": MREF(e.mft_reference()),
                "sequence_num": MSEQNO(e.mft_reference()),
            })

        for e in irh.node_header().slack_entries():
            model["slack_indx_entries"].append({
                "filename": e.filename_information().filename(),
                "size": e.filename_information().logical_size(),
                "modified": e.filename_information().modified_time(),
                "accessed": e.filename_information().accessed_time(),
                "changed": e.filename_information().changed_time(),
                "created": e.filename_information().created_time(),
                "record_num": MREF(e.mft_reference()),
                "sequence_num": MSEQNO(e.mft_reference()),
            })

    return model
Beispiel #10
0
def dfxml_mft_record(mft_enumerator, record, prefix, dfxml_item):
    """
    Attach all FileObjects
      associated with a single record. This includes
      standard information, filename information,
      and any resident directory index entries.
    """
    tags = []
    if not record.is_active():
        tags.append("inactive")

    path = prefix + "\\" + mft_enumerator.get_path(record)
    si = record.standard_information()
    fn = record.filename_information()

    if not record.is_active() and not fn:
        return

    inode = record.mft_record_number()
    if record.is_directory():
        size = 0
    else:
        data_attr = record.data_attribute()
        if data_attr and data_attr.non_resident() > 0:
            size = data_attr.data_size()
        elif fn:
            size = fn.logical_size()
        else:
            size = 0

    ADSs = []  # list of (name, size)
    for attr in record.attributes():
        if attr.type() != ATTR_TYPE.DATA or len(attr.name()) == 0:
            continue
        if attr.non_resident() > 0:
            size = attr.data_size()
        else:
            size = attr.value_length()
        ADSs.append((attr.name(), size))

    si_index = 0
    if si:
        try:
            si_index = si.security_id()
        except StandardInformationFieldDoesNotExist:
            pass

    indices = []  # list of (filename, size, reference, info)
    slack_indices = []  # list of (filename, size, reference, info)
    indxroot = record.attribute(ATTR_TYPE.INDEX_ROOT)
    if indxroot and indxroot.non_resident() == 0:
        # TODO(wb): don't use IndxRootHeader
        irh = IndexRootHeader(indxroot.value(), 0, False)
        for e in irh.node_header().entries():
            indices.append((e.filename_information().filename(),
                            e.mft_reference(),
                            e.filename_information().logical_size(),
                            e.filename_information()))

        for e in irh.node_header().slack_entries():
            slack_indices.append((e.filename_information().filename(),
                                  e.mft_reference(),
                                  e.filename_information().logical_size(),
                                  e.filename_information()))

    # si
    if si:
        try:
            fo = format_dfxml(path, size, inode, si_index, si, tags)
            dfxml_item.append(fo)
        except UnicodeEncodeError:
            print "# failed to print: %s" % (list(path))

    # fn
    if fn:
        tags = ["filename"]
        if not record.is_active():
            tags.append("inactive")
            is_active = 0
        else:
            is_active = 1
        try:
            fo = format_dfxml(path, size, inode, si_index, fn, tags)
            fo.alloc_inode = is_active #Check this
            dfxml_item.append(fo)
        except UnicodeEncodeError:
            print "# failed to print: %s" % (list(path))

    # ADS, NOTE: Future implementations can work with ADS's here.
    """
    for ads in ADSs:
        tags = []
        if not record.is_active():
            tags.append("inactive")
            is_active = 0
        else:
            is_active = 1
        try:
            fo = format_dfxml(path + ":" + ads[0], ads[1], inode, si_index, si or {}, tags)
            fo.alloc_inode = is_active #Check this
            dfxml_item.append(fo)
        except UnicodeEncodeError:
            print "# failed to print: %s" % (list(path))
    """

    # INDX
    for indx in indices:
        tags = ["indx"]
        try:
            fo = format_dfxml(path + "\\" + indx[0], indx[1], MREF(indx[2]), 0, indx[3], tags)
            fo.alloc_inode = 1
            dfxml_item.append(fo)
        except UnicodeEncodeError:
            print "# failed to print: %s" % (list(path))

    for indx in slack_indices:
        tags = ["indx", "slack"]
        try:
            fo = format_dfxml(path + "\\" + indx[0], indx[1], MREF(indx[2]), 0, indx[3], tags)
            fo.alloc_inode = 0
            dfxml_item.append(fo)
        except UnicodeEncodeError:
            print "# failed to print: %s" % (list(path))