def main():
    d = Objects.DFXMLObject("1.2.0")
    d.program = sys.argv[0]
    d.program_version = __version__
    d.command_line = " ".join(sys.argv)
    d.dc["type"] = "File system silent-change report"
    d.add_creator_library("Python", ".".join(
        map(str, sys.version_info[0:3]
            )))  #A bit of a bend, but gets the major version information out.
    d.add_creator_library("Objects.py", Objects.__version__)
    d.add_creator_library("dfxml.py", Objects.dfxml.__version__)

    current_appender = d
    tally = 0
    for (event, obj) in Objects.iterparse(args.infile):
        if event == "start":
            #Inherit namespaces
            if isinstance(obj, Objects.DFXMLObject):
                for (prefix, url) in obj.iter_namespaces():
                    d.add_namespace(prefix, url)
            #Group files by volume
            elif isinstance(obj, Objects.VolumeObject):
                d.append(obj)
                current_appender = obj
        elif event == "end":
            if isinstance(obj, Objects.VolumeObject):
                current_appender = d
            elif isinstance(obj, Objects.FileObject):
                if "_changed" not in obj.diffs:
                    if "_modified" in obj.diffs or "_renamed" in obj.diffs:
                        current_appender.append(obj)
                        tally += 1
    print(d.to_dfxml())
    _logger.info("Found %d suspiciously-changed files." % tally)
Beispiel #2
0
def main():
    d = Objects.DFXMLObject("1.2.0")
    d.program = sys.argv[0]
    d.program_version = __version__
    d.command_line = " ".join(sys.argv)
    d.dc["type"] = "File system silent-change report"
    d.add_creator_library("Python", ".".join(map(str, sys.version_info[0:3]))) #A bit of a bend, but gets the major version information out.
    d.add_creator_library("Objects.py", Objects.__version__)
    d.add_creator_library("dfxml.py", Objects.dfxml.__version__)

    current_appender = d
    tally = 0
    for (event, obj) in Objects.iterparse(args.infile):
        if event == "start":
            #Inherit namespaces
            if isinstance(obj, Objects.DFXMLObject):
                for (prefix, url) in obj.iter_namespaces():
                    d.add_namespace(prefix, url)
            #Group files by volume
            elif isinstance(obj, Objects.VolumeObject):
                d.append(obj)
                current_appender = obj
        elif event == "end":
            if isinstance(obj, Objects.VolumeObject):
                current_appender = d
            elif isinstance(obj, Objects.FileObject):
                if "_changed" not in obj.diffs:
                    if "_modified" in obj.diffs or "_renamed" in obj.diffs:
                        current_appender.append(obj)
                        tally += 1
    print(d.to_dfxml())
    _logger.info("Found %d suspiciously-changed files." % tally)
Beispiel #3
0
def main():
    counter = collections.defaultdict(lambda: 0)
    prev_obj = None
    for (event, obj) in Objects.iterparse(args.input_image):
        if isinstance(obj, Objects.FileObject):
            if args.ignore_virtual_files and make_differential_dfxml.ignorable_name(obj.filename):
                continue
            counter[(obj.alloc_inode, obj.alloc_name)] += 1

            #Inspect weird data
            if args.debug and obj.alloc_inode is None and obj.alloc_name is None:
                _logger.debug("Encountered a file with all-null allocation.")
                _logger.debug("Event: %r." % event)
                _logger.debug("Previous object: %s." % ET.tostring(prev_obj.to_Element()))
                _logger.debug("Current  object: %s." % ET.tostring(obj.to_Element()))
        prev_obj = obj
    print(repr(counter))
def main():
    if len(sys.argv) < 2:
        print("Usage: {} <filename.xml>".format(sys.argv[0]))
        exit(1)

    timeline = []

    for (event, obj) in Objects.iterparse( sys.argv[1] ):
        #Only work on FileObjects
        if not isinstance(obj, Objects.FileObject):
            continue
        if not obj.mtime is None:  timeline.append([obj.mtime, obj.filename," modified"])
        if not obj.crtime is None: timeline.append([obj.crtime,obj.filename," created"])
        if not obj.ctime is None:  timeline.append([obj.ctime, obj.filename," changed"])
        if not obj.atime is None:  timeline.append([obj.atime, obj.filename," accessed"])

    timeline.sort()

    for record in timeline:
        print("\t".join( map(str, record)) )
def main():
    counter = collections.defaultdict(lambda: 0)
    prev_obj = None
    for (event, obj) in Objects.iterparse(args.input_image):
        if isinstance(obj, Objects.FileObject):
            if args.ignore_virtual_files and make_differential_dfxml.ignorable_name(
                    obj.filename):
                continue
            counter[(obj.alloc_inode, obj.alloc_name)] += 1

            #Inspect weird data
            if args.debug and obj.alloc_inode is None and obj.alloc_name is None:
                _logger.debug("Encountered a file with all-null allocation.")
                _logger.debug("Event: %r." % event)
                _logger.debug("Previous object: %s." %
                              ET.tostring(prev_obj.to_Element()))
                _logger.debug("Current  object: %s." %
                              ET.tostring(obj.to_Element()))
        prev_obj = obj
    print(repr(counter))
Beispiel #6
0
def main():
    #Key: (annotation, histogram)
    hist = collections.defaultdict(int)
    for (event, obj) in Objects.iterparse(sys.argv[1]):
        if event != "end" or not isinstance(obj, Objects.FileObject):
            continue
        #Loop through annotations
        for anno in obj.annos:
            #Loop through diffs
            for diff in obj.diffs:
                hist[(anno, diff)] += 1

    annos = Objects.FileObject._diff_attr_names.keys()
    print("""
<table>
  <thead>
    <tr>
      <th>Property</th>
""")
    for anno in annos:
        print("      <th>%s</th>" % anno)
    print("""
    </tr>
  </thead>
  <tfoot></tfoot>
  <tbody>
""")
    for diff in sorted(Objects.FileObject._all_properties):
        print("    <tr>")
        if diff in Objects.FileObject._incomparable_properties:
            continue
        print("      <th style='text-align:left;'>%s</th>" % diff)
        for anno in annos:
            print("      <td>%d</td>" % hist[(anno,diff)])
        print("    </tr>")
    print("""
  </tbody>
</table>
""")
def main():
    if len(sys.argv) < 2:
        print("Usage: {} <filename.xml>".format(sys.argv[0]))
        exit(1)

    timeline = []

    for (event, obj) in Objects.iterparse(sys.argv[1]):
        #Only work on FileObjects
        if not isinstance(obj, Objects.FileObject):
            continue
        if not obj.mtime is None:
            timeline.append([obj.mtime, obj.filename, " modified"])
        if not obj.crtime is None:
            timeline.append([obj.crtime, obj.filename, " created"])
        if not obj.ctime is None:
            timeline.append([obj.ctime, obj.filename, " changed"])
        if not obj.atime is None:
            timeline.append([obj.atime, obj.filename, " accessed"])

    timeline.sort()

    for record in timeline:
        print("\t".join(map(str, record)))
#!/usr/bin/env python

__version__="0.1.0"

import os
import sys

sys.path.append( os.path.join(os.path.dirname(__file__), "../.."))
sys.path.append( os.path.join(os.path.dirname(__file__), ".."))
import dfxml.objects as Objects

if __name__=="__main__":
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument("file")
    args = parser.parse_args()

    for (event, obj) in Objects.iterparse(args.file):
        if not isinstance(obj, Objects.FileObject):
            continue
        for propname in [ "atime", "ctime", "crtime", "gid", "inode", "mtime", "uid" ]:
            if not getattr(obj, propname) is None:
                if propname == "mtime" and obj.name_type != "d":
                    continue
                raise ValueError("Found property that should have been ignored: %r on %r." % (propname, obj.filename))
def main():
    # Initialize output object.
    # TODO Upgrade to 1.3.0 on schema release.
    dobj = Objects.DFXMLObject(version="1.2.0+")
    dobj.program = sys.argv[0]
    dobj.program_version = __version__
    dobj.command_line = " ".join(sys.argv)
    dobj.dc["type"] = "Recoverability report"
    dobj.add_creator_library("Python", ".".join(
        map(str, sys.version_info[0:3]
            )))  #A bit of a bend, but gets the major version information out.
    dobj.add_creator_library("objects.py", Objects.__version__)
    dobj.add_creator_library("dfxml.py", Objects.dfxml.__version__)
    dobj.add_creator_library("intervals.py",
                             intact_byte_run_index.I.__version__)
    dobj.add_creator_library("intact_byte_run_index.py",
                             intact_byte_run_index.__version__)

    if args.disk_image_dfxml:
        disk_image_dfxml = args.disk_image_dfxml
    else:
        disk_image_dfxml = args.files_dfxml

    br_index = intact_byte_run_index.IntactByteRunIndex()

    diobj = None
    # Index the byte runs of the disk image.
    for (event, obj) in Objects.iterparse(disk_image_dfxml):
        if not isinstance(obj, Objects.DiskImageObject):
            continue
        if event != "start":
            continue
        if obj.byte_runs is None or len(obj.byte_runs) == 0:
            raise ValueError(
                "DFXML document %r does not have diskimageobject with byte runs.  Recoverability cannot be determined."
                % disk_image_dfxml)
        br_index.ingest_byte_runs(obj.byte_runs)
        diobj = obj
        break

    # Confirm initialization.
    if br_index.intervals is None:
        raise ValueError(
            "Disk image byte runs index not constructed after reading file that should have had disk image metadata: %r."
            % disk_image_dfxml)

    # Track diskimageobject.
    dobj.append(diobj)

    # The loop below will want to attach fileobjects to the closest/lowest parent in the object hierarchy.  Might be the disk image, might be the containing file system.
    appender_stack = [diobj]

    file_count_encountered = 0
    file_count_missing_byte_runs = 0
    file_count_missing_byte_run_offset = 0
    file_count_missing_byte_run_length = 0
    file_count_containment_unknown = 0
    file_count_intact = 0
    file_count_not_fully_recoverable = 0

    # Filter fileobject list, picking up file systems along the way.
    for (event, obj) in Objects.iterparse(args.files_dfxml):
        if isinstance(obj, Objects.VolumeObject):
            if event == "start":
                appender_stack[-1].append(obj)
                appender_stack.append(obj)
                continue
            elif event == "end":
                appender_stack.pop()
                continue

        if not isinstance(obj, Objects.FileObject):
            continue
        file_count_encountered += 1

        if obj.byte_runs is None or len(obj.byte_runs) == 0:
            file_count_missing_byte_runs += 1
            continue

        # This variable might be set to None within the loop through the content byte runs.
        byte_runs_contained = True
        for byte_run in obj.data_brs:
            if byte_run.img_offset is None:
                #TODO See if this can be computed from fs_offset.
                file_count_missing_byte_run_offset += 1
                byte_runs_contained = None
                break
            if byte_run.len is None:
                file_count_missing_byte_run_length += 1
                byte_runs_contained = None
                break
            byte_run_contained = br_index.is_byte_run_contained(byte_run)
            if byte_run_contained is None:
                file_count_containment_unknown += 1
                byte_runs_contained = None
                break
            elif byte_run_contained == False:
                byte_runs_contained = False
                break
        if byte_runs_contained == True:
            file_count_intact += 1
        if byte_runs_contained == False:
            file_count_not_fully_recoverable += 1
            # Record fileobject as child of diskimageobject.
            appender_stack[-1].append(obj)

    _logger.debug("file_count_encountered = %d." % file_count_encountered)
    _logger.debug("file_count_missing_byte_runs = %d." %
                  file_count_missing_byte_runs)
    _logger.debug("file_count_missing_byte_run_offset = %d." %
                  file_count_missing_byte_run_offset)
    _logger.debug("file_count_missing_byte_run_length = %d." %
                  file_count_missing_byte_run_length)
    _logger.debug("file_count_containment_unknown = %d." %
                  file_count_containment_unknown)
    _logger.debug("file_count_intact = %d." % file_count_intact)
    _logger.debug("file_count_not_fully_recoverable = %d." %
                  file_count_not_fully_recoverable)

    with open(args.out_dfxml, "w") as out_fh:
        dobj.print_dfxml(out_fh)
#!/usr/bin/env python3

# This software was developed at the National Institute of Standards
# and Technology by employees of the Federal Government in the course
# of their official duties. Pursuant to title 17 Section 105 of the
# United States Code this software is not subject to copyright
# protection and is in the public domain. NIST assumes no
# responsibility whatsoever for its use by other parties, and makes
# no guarantees, expressed or implied, about its quality,
# reliability, or any other characteristic.
#
# We would appreciate acknowledgement if the software is used.
"""
This is a quick utility script to emit all filenames from a DFXML file.
"""

__version__ = "0.1.0"

import sys

from dfxml import objects as Objects

for (event, obj) in Objects.iterparse(sys.argv[1]):
    if not isinstance(obj, Objects.FileObject):
        continue
    print(obj.filename)
#!/usr/bin/env python

__version__ = "0.1.0"

import os
import sys

sys.path.append(os.path.join(os.path.dirname(__file__), "../.."))
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
import dfxml.objects as Objects

if __name__ == "__main__":
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument("file")
    args = parser.parse_args()

    for (event, obj) in Objects.iterparse(args.file):
        if not isinstance(obj, Objects.FileObject):
            continue
        for propname in [
                "atime", "ctime", "crtime", "gid", "inode", "mtime", "uid"
        ]:
            if not getattr(obj, propname) is None:
                if propname == "mtime" and obj.name_type != "d":
                    continue
                raise ValueError(
                    "Found property that should have been ignored: %r on %r." %
                    (propname, obj.filename))
def main():

    br_index = intact_byte_run_index.IntactByteRunIndex()

    original_disk_size = None
    disk_summary_message = None
    files_summary_message = None
    file_system_tuples = []
    fileobject_tally = 0

    for (event, obj) in Objects.iterparse(args.disk_image_dfxml):
        if isinstance(obj, Objects.DiskImageObject):
            if event != "end":
                continue
            original_disk_size = obj.filesize
            bytes_unread = original_disk_size
            for br in obj.byte_runs:
                bytes_unread -= br.len
            if HAVE_HUMANFRIENDLY:
                parenthetical_friendly_filesize = " (%s)" % humanfriendly.format_size(
                    obj.filesize)
                parenthetical_friendly_bytes_unread = " (%s)" % humanfriendly.format_size(
                    bytes_unread)
            else:
                parenthetical_friendly_filesize = ""
                parenthetical_friendly_bytes_unread = ""

            disk_summary_message = "Of %s bytes%s of the original disk, %s bytes%s are not in the acquired disk image." % (
                f'{obj.filesize:n}', parenthetical_friendly_filesize,
                f'{bytes_unread:n}', parenthetical_friendly_bytes_unread)

            br_index.ingest_byte_runs(obj.byte_runs)
        elif isinstance(obj, Objects.VolumeObject):
            if event != "end":
                continue
            fs_tally = len(
                file_system_tuples) + 1  # (counting newly discovered self)
            # Determine file system geometry.
            if obj.byte_runs is None or len(obj.byte_runs) == 0:
                fs_img_offset = obj.partition_offset
                fs_len = obj.block_size * obj.block_count
            else:
                fs_img_offset = obj.byte_runs[0].img_offset
                fs_len = obj.byte_runs[0].len
            file_system_tuples.append(
                (fs_tally, fs_img_offset, fs_len, obj.ftype_str))
        elif isinstance(obj, Objects.FileObject):
            fileobject_tally += 1

    if fileobject_tally == 0:
        files_summary_message = "No files were listed as affected."
    elif fileobject_tally == 1:
        files_summary_message = "1 file was affected."
    else:
        files_summary_message = "%d files were affected." % fileobject_tally

    print("""\
<!doctype html>
<html>
  <head>
    <meta charset="utf-8" />
    <style type="text/css">
      table {
        border-collapse: collapse;
      }
      td {
        border: 1px solid black;
      }
      th {
        border: 1px solid black;
      }
    </style>
  </head>
  <body>
    <h1>Report of file recoverability</h1>
    <p>%s</p>
    <p>%s</p>""" % (disk_summary_message, files_summary_message))

    if fileobject_tally > 0:
        print("""\
    <table>
      <caption>Table 1. File systems</caption>
      <thead>
        <tr>
          <th>FS #</th>
          <th>Offset</th>
          <th>Length</th>
          <th>Type</th>
        </tr>
      </thead>
      <tbody>""")
        for file_system_tuple in file_system_tuples:
            print("""\
        <tr>
          <td>%d</td>
          <td>%d</td>
          <td>%d</td>
          <td>%s</td>
        </tr>""" % file_system_tuple)
        print("""\
      </tbody>
    </table>
    <table>
      <caption>Table 2. Affected files</caption>
      <thead>
        <tr>
          <th>FS #</th>
          <th>Type</th>
          <th>Size (bytes)</th>
          <th>Missing bytes</th>
          <th>Path</th>
        </tr>
      </thead>
      <tbody>""")

        last_fs_number = 0
        current_fs_number_str = ""
        for (event, obj) in Objects.iterparse(args.disk_image_dfxml):
            if isinstance(obj, Objects.VolumeObject):
                if event == "start":
                    last_fs_number += 1
                    current_fs_number_str = str(last_fs_number)
                else:
                    current_fs_number_str = ""
                continue
            elif not isinstance(obj, Objects.FileObject):
                continue

            # The remainder of this loop analyzes files.

            #TODO
            bytes_present = 0
            for byte_run in obj.data_brs:
                for filtered_run_pair in br_index.filter_byte_run(byte_run):
                    bytes_present += filtered_run_pair[1]
            bytes_missing = obj.filesize - bytes_present

            name_type = "" if obj.name_type is None else obj.name_type
            print("""\
        <tr>
          <td>%s</td>
          <td><code>%s</code></td>
          <td>%d</td>
          <td>%d</td>
          <td><code>%s</code></td>
        </tr>""" % (current_fs_number_str, name_type, obj.filesize,
                    bytes_missing, obj.filename))

        print("""\
      </tbody>
    </table>
    <p>Note that "FS #" is a simple incrementing integer defined only in this report.  A missing "FS #" indicates the input DFXML did not have the file associated with a file system.</p>
    <p>The file type code is the DFXML encoding of <code>name_type</code>.  <code>r</code> is a regular file; <code>d</code> a directory; and <code>v</code> a "virtual" file, a file that is not precisely a file in the file system, but is treated as a file by the tool that parsed the file system.</p>
    <p>The "Size" column is the size of the file according to the file system.  "Missing bytes" indicates how many bytes of the file were not captured in the disk image.</p>"""
              )
    print("""\
  </body>
</html>""")
Beispiel #13
0
def make_differential_dfxml(pre, post, **kwargs):
    """
    Takes as input two paths to DFXML files.  Returns a DFXMLObject.
    @param pre String.
    @param post String.
    @param diff_mode Optional.  One of "all" or "idifference".
    @param retain_unchanged Optional.  Boolean.
    @param ignore_properties Optional.  Set.
    @param annotate_matches Optional.  Boolean.  True -> matched file objects get a "delta:matched='1'" attribute.
    @param rename_requires_hash Optional.  Boolean.  True -> all matches require matching SHA-1's, if present.
    @param ignore_filename_function Optional.  Function, string -> Boolean.  Returns True if a file name (which can be null) should be ignored.
    @param glom_byte_runs Optional.  Boolean.  Joins contiguous-region byte runs together in FileObject byte run lists.
    """

    diff_mode = kwargs.get("diff_mode", "all")
    retain_unchanged = kwargs.get("retain_unchanged", False)
    ignore_properties = kwargs.get("ignore_properties", set())
    annotate_matches = kwargs.get("annotate_matches", False)
    rename_requires_hash = kwargs.get("rename_requires_hash", False)
    ignore_filename_function = kwargs.get("ignore_filename_function",
                                          ignorable_name)
    glom_byte_runs = kwargs.get("glom_byte_runs", False)

    _expected_diff_modes = ["all", "idifference"]
    if diff_mode not in _expected_diff_modes:
        raise ValueError("Differencing mode should be in: %r." %
                         _expected_diff_modes)
    diff_mask_set = set()

    if diff_mode == "idifference":
        diff_mask_set |= set([
            "atime", "byte_runs", "crtime", "ctime", "filename", "filesize",
            "md5", "mtime", "sha1"
        ])
    _logger.debug("diff_mask_set = " + repr(diff_mask_set))

    #d: The container DFXMLObject, ultimately returned.
    d = Objects.DFXMLObject(version="1.2.0")
    if sys.argv[0] == os.path.basename(__file__):
        d.program = sys.argv[0]
        d.program_version = __version__
    d.command_line = " ".join(sys.argv)
    d.add_namespace("delta", dfxml.XMLNS_DELTA)
    d.dc["type"] = "Disk image difference set"
    d.add_creator_library("Python", ".".join(
        map(str, sys.version_info[0:3]
            )))  #A bit of a bend, but gets the major version information out.
    d.add_creator_library("Objects.py", Objects.__version__)
    d.add_creator_library("dfxml.py", Objects.dfxml.__version__)

    d.diff_file_ignores |= ignore_properties
    _logger.debug("d.diff_file_ignores = " + repr(d.diff_file_ignores))

    #The list most of this function is spent on building
    fileobjects_changed = []

    #Unmodified files; only retained if requested.
    fileobjects_unchanged = []

    #Key: (partition, inode, filename); value: FileObject
    old_fis = None
    new_fis = None

    #Key: (partition, inode, filename); value: FileObject list
    old_fis_unalloc = None
    new_fis_unalloc = None

    #Key: Partition byte offset within the disk image, paired with the file system type
    #Value: VolumeObject
    old_volumes = None
    new_volumes = None
    matched_volumes = dict()

    #Populated in distinct (offset, file system type as string) encounter order
    volumes_encounter_order = dict()

    for infile in [pre, post]:

        _logger.debug("infile = %r" % infile)
        old_fis = new_fis
        new_fis = dict()

        old_volumes = new_volumes
        new_volumes = dict()
        #Fold in the matched volumes - we're just discarding the deleted volumes
        for k in matched_volumes:
            old_volumes[k] = matched_volumes[k]
        matched_volumes = dict()

        old_fis_unalloc = new_fis_unalloc
        new_fis_unalloc = collections.defaultdict(list)

        d.sources.append(infile)

        for (i, (event, new_obj)) in enumerate(Objects.iterparse(infile)):
            if isinstance(new_obj, Objects.DFXMLObject):
                #Inherit desired properties from the source DFXMLObject.

                #Inherit namespaces
                for (prefix, url) in new_obj.iter_namespaces():
                    d.add_namespace(prefix, url)

                continue
            elif isinstance(new_obj, Objects.VolumeObject):
                if event == "end":
                    #This algorithm doesn't yet need to know when a volume is concluded.  On to the next object.
                    continue

                offset = new_obj.partition_offset
                if offset is None:
                    raise AttributeError(
                        "To perform differencing with volumes, the <volume> elements must have a <partition_offset>.  Either re-generate your DFXML with partition offsets, or run this program again with the --ignore-volumes flag."
                    )

                #Use the lower-case volume spelling
                ftype_str = _lower_ftype_str(new_obj)

                #Re-capping the general differential analysis algorithm:
                #0. If the volume is in the new list, something's gone wrong.
                if (offset, ftype_str) in new_volumes:
                    _logger.debug("new_obj.partition_offset = %r." % offset)
                    _logger.warning(
                        "Encountered a volume that starts at an offset as another volume, in the same disk image.  This analysis is based on the assumption that that doesn't happen.  Check results that depend on partition mappings."
                    )

                #1. If the volume is in the old list, pop it out of the old list - it's matched.
                if old_volumes and (offset, ftype_str) in old_volumes:
                    _logger.debug(
                        "Found a volume in post image, at offset %r." % offset)
                    old_obj = old_volumes.pop((offset, ftype_str))
                    new_obj.original_volume = old_obj
                    new_obj.compare_to_original()
                    matched_volumes[(offset, ftype_str)] = new_obj

                #2. If the volume is NOT in the old list, add it to the new list.
                else:
                    _logger.debug("Found a new volume, at offset %r." % offset)
                    new_volumes[(offset, ftype_str)] = new_obj
                    volumes_encounter_order[(
                        offset, ftype_str)] = len(new_volumes) + (
                            (old_volumes and len(old_volumes))
                            or 0) + len(matched_volumes)

                #3. Afterwards, the old list contains deleted volumes.

                #Record the ID
                new_obj.id = volumes_encounter_order[(offset, ftype_str)]

                #Move on to the next object
                continue
            elif not isinstance(new_obj, Objects.FileObject):
                #The rest of this loop compares only file objects.
                continue

            if ignore_filename_function(new_obj.filename):
                continue

            #Simplify byte runs if requested
            if glom_byte_runs:
                if new_obj.byte_runs:
                    temp_byte_runs = Objects.ByteRuns()
                    for run in new_obj.byte_runs:
                        temp_byte_runs.glom(run)
                    new_obj.byte_runs = temp_byte_runs

            #Normalize the partition number
            if new_obj.volume_object is None:
                new_obj.partition = None
            else:
                vo = new_obj.volume_object
                fts = _lower_ftype_str(vo)
                new_obj.partition = volumes_encounter_order[(
                    vo.partition_offset, fts)]

            #Define the identity key of this file -- affected by the --ignore argument
            _key_partition = None if "partition" in ignore_properties else new_obj.partition
            _key_inode = None if "inode" in ignore_properties else new_obj.inode
            _key_filename = None if "filename" in ignore_properties else new_obj.filename
            key = (_key_partition, _key_inode, _key_filename)

            #Ignore unallocated content comparisons until a later loop.  The unique identification of deleted files needs a little more to work.
            if not new_obj.alloc:
                new_fis_unalloc[key].append(new_obj)
                continue

            #The rest of this loop is irrelevant until the second DFXML file.
            if old_fis is None:
                new_fis[key] = new_obj
                continue

            if key in old_fis:
                #Extract the old fileobject and check for changes
                old_obj = old_fis.pop(key)
                new_obj.original_fileobject = old_obj
                new_obj.compare_to_original(file_ignores=d.diff_file_ignores)

                #_logger.debug("Diffs: %r." % _diffs)
                _diffs = new_obj.diffs - d.diff_file_ignores
                #_logger.debug("Diffs after ignore-set: %r." % _diffs)
                if diff_mask_set:
                    _diffs &= diff_mask_set
                    #_logger.debug("Diffs after mask-set: %r." % _diffs)

                if len(_diffs) > 0:
                    #_logger.debug("Remaining diffs: " + repr(_diffs))
                    fileobjects_changed.append(new_obj)
                else:
                    #Unmodified file; only keep if requested.
                    if retain_unchanged:
                        fileobjects_unchanged.append(new_obj)
            else:
                #Store the new object
                new_fis[key] = new_obj

        #The rest of the files loop is irrelevant until the second file.
        if old_fis is None:
            continue

        _logger.debug("len(old_fis) = %d" % len(old_fis))
        _logger.debug("len(old_fis_unalloc) = %d" % len(old_fis_unalloc))
        _logger.debug("len(new_fis) = %d" % len(new_fis))
        _logger.debug("len(new_fis_unalloc) = %d" % len(new_fis_unalloc))
        _logger.debug("len(fileobjects_changed) = %d" %
                      len(fileobjects_changed))

        #Identify renames - only possible if 1-to-1.  Many-to-many renames are just left as new and deleted files.
        _logger.debug("Detecting renames...")
        fileobjects_renamed = []

        def _make_name_map(d):
            """Returns a dictionary, mapping (partition, inode) -> {filename}."""
            retdict = collections.defaultdict(lambda: set())
            for (partition, inode, filename) in d.keys():
                retdict[(partition, inode)].add(filename)
            return retdict

        old_inode_names = _make_name_map(old_fis)
        new_inode_names = _make_name_map(new_fis)
        for key in new_inode_names.keys():
            (partition, inode) = key

            if len(new_inode_names[key]) != 1:
                continue
            if not key in old_inode_names:
                continue
            if len(old_inode_names[key]) != 1:
                continue
            if rename_requires_hash:
                #Peek at the set elements by doing a quite-ephemeral list cast
                old_obj = old_fis[(partition, inode,
                                   list(old_inode_names[key])[0])]
                new_obj = new_fis[(partition, inode,
                                   list(new_inode_names[key])[0])]
                if old_obj.sha1 != new_obj.sha1:
                    continue

            #Found a match if we're at this point in the loop
            old_name = old_inode_names[key].pop()
            new_name = new_inode_names[key].pop()
            old_obj = old_fis.pop((partition, inode, old_name))
            new_obj = new_fis.pop((partition, inode, new_name))
            new_obj.original_fileobject = old_obj
            new_obj.compare_to_original(file_ignores=d.diff_file_ignores)
            fileobjects_renamed.append(new_obj)
        _logger.debug("len(old_fis) -> %d" % len(old_fis))
        _logger.debug("len(new_fis) -> %d" % len(new_fis))
        _logger.debug("len(fileobjects_changed) -> %d" %
                      len(fileobjects_changed))
        _logger.debug("len(fileobjects_renamed) = %d" %
                      len(fileobjects_renamed))

        #Identify files that just changed inode number - basically, doing the rename detection again
        _logger.debug("Detecting inode number changes...")

        def _make_inode_map(d):
            """Returns a dictionary, mapping (partition, filename) -> inode."""
            retdict = dict()
            for (partition, inode, filename) in d.keys():
                if (partition, filename) in retdict:
                    _logger.warning(
                        "Multiple instances of the file path %r were found in partition %r; this violates an assumption of this program, that paths are unique within partitions."
                        % (filename, partition))
                retdict[(partition, filename)] = inode
            return retdict

        old_name_inodes = _make_inode_map(old_fis)
        new_name_inodes = _make_inode_map(new_fis)
        for key in new_name_inodes.keys():
            if not key in old_name_inodes:
                continue
            (partition, name) = key
            old_obj = old_fis.pop((partition, old_name_inodes[key], name))
            new_obj = new_fis.pop((partition, new_name_inodes[key], name))
            new_obj.original_fileobject = old_obj
            #TODO Test for what chaos ensues when filename is in the ignore list.
            new_obj.compare_to_original(file_ignores=d.diff_file_ignores)
            fileobjects_changed.append(new_obj)
        _logger.debug("len(old_fis) -> %d" % len(old_fis))
        _logger.debug("len(new_fis) -> %d" % len(new_fis))
        _logger.debug("len(fileobjects_changed) -> %d" %
                      len(fileobjects_changed))
        #And that's the end of the allocated-only, per-volume analysis.

        #We may be able to match files that aren't allocated against files we think are deleted
        _logger.debug("Detecting modifications from unallocated files...")
        fileobjects_deleted = []
        for key in new_fis_unalloc:
            #1 partition; 1 inode number; 1 name, repeated:  Too ambiguous to compare.
            if len(new_fis_unalloc[key]) != 1:
                continue

            if key in old_fis_unalloc:
                if len(old_fis_unalloc[key]) == 1:
                    #The file was unallocated in the previous image, too.
                    old_obj = old_fis_unalloc[key].pop()
                    new_obj = new_fis_unalloc[key].pop()
                    new_obj.original_fileobject = old_obj
                    new_obj.compare_to_original(
                        file_ignores=d.diff_file_ignores)
                    #The file might not have changed.  It's interesting if it did, though.

                    _diffs = new_obj.diffs - diff_mask_set
                    #_logger.debug("Diffs: %r." % _diffs)
                    if diff_mask_set:
                        _diffs &= diff_mask_set
                        #_logger.debug("Diffs after mask-set: %r." % _diffs)
                    if len(_diffs) > 0:
                        _logger.debug("Remaining diffs: " + repr(_diffs))
                        fileobjects_changed.append(new_obj)
                    elif retain_unchanged:
                        fileobjects_unchanged.append(new_obj)
            elif key in old_fis:
                #Identified a deletion.
                old_obj = old_fis.pop(key)
                new_obj = new_fis_unalloc[key].pop()
                new_obj.original_fileobject = old_obj
                new_obj.compare_to_original(file_ignores=d.diff_file_ignores)
                fileobjects_deleted.append(new_obj)
        _logger.debug("len(old_fis) -> %d" % len(old_fis))
        _logger.debug("len(old_fis_unalloc) -> %d" % len(old_fis_unalloc))
        _logger.debug("len(new_fis) -> %d" % len(new_fis))
        _logger.debug("len(new_fis_unalloc) -> %d" % len(new_fis_unalloc))
        _logger.debug("len(fileobjects_changed) -> %d" %
                      len(fileobjects_changed))
        _logger.debug("len(fileobjects_deleted) -> %d" %
                      len(fileobjects_deleted))

        #After deletion matching is performed, one might want to look for files migrating to other partitions.
        #However, since between-volume migration creates a new deleted file, this algorithm instead ignores partition migrations.
        #AJN TODO Thinking about it a little more, I can't suss out a reason against trying this match.  It's complicated if we try looking for reallocations in new_fis, strictly from new_fis_unalloc.

        #TODO We might also want to match the unallocated objects based on metadata addresses.  Unfortunately, that requires implementation of additional byte runs, which hasn't been fully designed yet in the DFXML schema.

        #Begin output.
        #First, annotate the volume objects.
        for key in new_volumes:
            v = new_volumes[key]
            v.annos.add("new")
        for key in old_volumes:
            v = old_volumes[key]
            v.annos.add("deleted")
        for key in matched_volumes:
            v = matched_volumes[key]
            if len(v.diffs) > 0:
                v.annos.add("modified")

        #Build list of FileObject appenders, child volumes of the DFXML Document.
        #Key: Partition number, or None
        #Value: Reference to the VolumeObject corresponding with that partition number.  None -> the DFXMLObject.
        appenders = dict()
        for volume_dict in [new_volumes, matched_volumes, old_volumes]:
            for (offset, ftype_str) in volume_dict:
                veo = volumes_encounter_order[(offset, ftype_str)]
                if veo in appenders:
                    raise ValueError(
                        "This pair is already in the appenders dictionary, which was supposed to be distinct: "
                        + repr((offset, ftype_str)) + ", encounter order " +
                        str(veo) + ".")
                v = volume_dict[(offset, ftype_str)]
                appenders[veo] = v
                d.append(v)

        #Add in the default appender, the DFXML Document itself.
        appenders[None] = d

        #A file should only be considered "modified" if its contents have changed.
        content_diffs = set(["md5", "sha1", "sha256"])

        def _maybe_match_attr(obj):
            """Just adds the 'matched' annotation when called."""
            if annotate_matches:
                obj.annos.add("matched")

        #Populate DFXMLObject.
        for key in new_fis:
            #TODO If this script ever does a series of >2 DFXML files, these diff additions need to be removed for the next round.
            fi = new_fis[key]
            fi.annos.add("new")
            appenders[fi.partition].append(fi)
        for key in new_fis_unalloc:
            for fi in new_fis_unalloc[key]:
                fi.annos.add("new")
                appenders[fi.partition].append(fi)
        for fi in fileobjects_deleted:
            #Independently flag for name, content, and metadata modifications
            if len(fi.diffs - content_diffs) > 0:
                fi.annos.add("changed")
            if len(content_diffs.intersection(fi.diffs)) > 0:
                fi.annos.add("modified")
            if "filename" in fi.diffs:
                fi.annos.add("renamed")
            fi.annos.add("deleted")
            _maybe_match_attr(fi)
            appenders[fi.partition].append(fi)
        for key in old_fis:
            ofi = old_fis[key]
            nfi = Objects.FileObject()
            nfi.original_fileobject = ofi
            nfi.annos.add("deleted")
            appenders[ofi.partition].append(nfi)
        for key in old_fis_unalloc:
            for ofi in old_fis_unalloc[key]:
                nfi = Objects.FileObject()
                nfi.original_fileobject = ofi
                nfi.annos.add("deleted")
                appenders[ofi.partition].append(nfi)
        for fi in fileobjects_renamed:
            #Independently flag for content and metadata modifications
            if len(content_diffs.intersection(fi.diffs)) > 0:
                fi.annos.add("modified")
            if len(fi.diffs - content_diffs) > 0:
                fi.annos.add("changed")
            fi.annos.add("renamed")
            _maybe_match_attr(fi)
            appenders[fi.partition].append(fi)
        for fi in fileobjects_changed:
            #Independently flag for content and metadata modifications
            if len(content_diffs.intersection(fi.diffs)) > 0:
                fi.annos.add("modified")
            if len(fi.diffs - content_diffs) > 0:
                fi.annos.add("changed")
            _maybe_match_attr(fi)
            appenders[fi.partition].append(fi)
        for fi in fileobjects_unchanged:
            _maybe_match_attr(fi)
            appenders[fi.partition].append(fi)

        #Output
        return d
Beispiel #14
0
def extract_files(image_path,
                  outdir,
                  dfxml_path=None,
                  file_predicate=is_file,
                  file_name=name_with_part_path,
                  dry_run=None,
                  out_manifest_path=None,
                  err_manifest_path=None,
                  keep_going=False):
    """
    @param file_name Unary function.  Takes a Objects.FileObject; returns the file path to which this file will be extracted, relative to outdir.  So, if outdir="extraction" and the name_with_part_path function of this module is used, the file "/Users/Administrator/ntuser.dat" in partition 1 will be extracted to "extraction/partition_1/Users/Administrator/ntuser.dat".
    """

    extraction_byte_tally = 0

    _path_for_iterparse = dfxml_path or image_path

    #Set up base manifest to track extracted files
    base_manifest = Objects.DFXMLObject(version="1.2.0")
    base_manifest.program = sys.argv[0]
    if sys.argv[0] == os.path.basename(__file__):
        base_manifest.program_version = __version__
        #Otherwise, this DFXMLObject would need to be passed back to the calling function.
    base_manifest.command_line = " ".join(sys.argv)
    base_manifest.add_namespace("extractor", XMLNS_EXTRACTOR)
    base_manifest.add_namespace("delta", dfxml.XMLNS_DELTA)
    base_manifest.sources.append(image_path)
    if dfxml_path:
        base_manifest.sources.append(dfxml_path)
    base_manifest.add_creator_library("Python", ".".join(
        map(str, sys.version_info[0:3])
    ))  #A bit of a bend, but gets the major version information out.
    base_manifest.add_creator_library("Objects.py", Objects.__version__)
    base_manifest.add_creator_library("dfxml.py", Objects.dfxml.__version__)

    #Clone base manifest to all-files' manifest and errors-only manifest
    out_manifest = None
    if out_manifest_path:
        out_manifest = copy.deepcopy(base_manifest)
    err_manifest = None
    if err_manifest_path:
        err_manifest = copy.deepcopy(base_manifest)

    for (event, obj) in Objects.iterparse(_path_for_iterparse):
        #Absolute prerequisites:
        if not isinstance(obj, Objects.FileObject):
            continue

        #Invoker prerequisites
        if not file_predicate(obj):
            continue

        extraction_entry = Objects.FileObject()
        extraction_entry.original_fileobject = obj

        #Construct path where the file will be extracted
        extraction_write_path = os.path.join(outdir, file_name(obj))

        #Extract idempotently
        if os.path.exists(extraction_write_path):
            _logger.debug(
                "Skipping already-extracted file: %r.  Extraction path already exists: %r."
                % (obj.filename, extraction_write_path))
            continue

        extraction_entry.filename = extraction_write_path

        #Set up checksum verifier
        checker = None
        checked_byte_tally = 0
        if obj.sha1:
            checker = hashlib.sha1()

        extraction_byte_tally += obj.filesize

        any_error = None
        tsk_error = None
        if not dry_run:
            extraction_write_dir = os.path.dirname(extraction_write_path)
            if not os.path.exists(extraction_write_dir):
                os.makedirs(extraction_write_dir)
            _logger.debug("Extracting to: %r." % extraction_write_path)
            with open(extraction_write_path, "wb") as extraction_write_fh:
                try:
                    for chunk in obj.extract_facet("content", image_path):
                        if checker:
                            checker.update(chunk)
                        checked_byte_tally += len(chunk)
                        extraction_write_fh.write(chunk)

                    if checked_byte_tally != obj.filesize:
                        any_error = True
                        extraction_entry.filesize = checked_byte_tally
                        extraction_entry.diffs.add("filesize")
                        _logger.error("File size mismatch on %r." %
                                      obj.filename)
                        _logger.info("Recorded filesize = %r" % obj.filesize)
                        _logger.info("Extracted bytes   = %r" %
                                     checked_byte_tally)
                    if checker and (obj.sha1 != checker.hexdigest()):
                        any_error = True
                        extraction_entry.sha1 = checker.hexdigest()
                        extraction_entry.diffs.add("sha1")
                        _logger.error("Hash mismatch on %r." % obj.filename)
                        _logger.info("Recorded SHA-1 = %r" % obj.sha1)
                        _logger.info("Computed SHA-1 = %r" %
                                     checker.hexdigest())
                        #_logger.debug("File object: %r." % obj)
                except Exception as e:
                    any_error = True
                    tsk_error = True
                    extraction_entry.error = "".join(traceback.format_stack())
                    if e.args:
                        extraction_entry.error += "\n" + str(e.args)
        if out_manifest:
            out_manifest.append(extraction_entry)
        if err_manifest and any_error:
            err_manifest.append(extraction_entry)
        if tsk_error and not keep_going:
            _logger.warning(
                "Terminating extraction loop early, due to encountered error.")
            break

    #Report
    _logger.info("Estimated extraction: %d bytes." % extraction_byte_tally)
    if not out_manifest is None:
        with open(out_manifest_path, "w") as out_manifest_fh:
            out_manifest.print_dfxml(out_manifest_fh)
    if not err_manifest is None:
        tally = 0
        for obj in err_manifest:
            if isinstance(obj, Objects.FileObject):
                tally += 1
        _logger.info("Encountered errors extracting %d files." % tally)
        with open(err_manifest_path, "w") as err_manifest_fh:
            err_manifest.print_dfxml(err_manifest_fh)
def make_differential_dfxml(pre, post, **kwargs):
    """
    Takes as input two paths to DFXML files.  Returns a DFXMLObject.
    @param pre String.
    @param post String.
    @param diff_mode Optional.  One of "all" or "idifference".
    @param retain_unchanged Optional.  Boolean.
    @param ignore_properties Optional.  Set.
    @param annotate_matches Optional.  Boolean.  True -> matched file objects get a "delta:matched='1'" attribute.
    @param rename_requires_hash Optional.  Boolean.  True -> all matches require matching SHA-1's, if present.
    @param ignore_filename_function Optional.  Function, string -> Boolean.  Returns True if a file name (which can be null) should be ignored.
    @param glom_byte_runs Optional.  Boolean.  Joins contiguous-region byte runs together in FileObject byte run lists.
    """

    diff_mode = kwargs.get("diff_mode", "all")
    retain_unchanged = kwargs.get("retain_unchanged", False)
    ignore_properties = kwargs.get("ignore_properties", set())
    annotate_matches = kwargs.get("annotate_matches", False)
    rename_requires_hash = kwargs.get("rename_requires_hash", False)
    ignore_filename_function = kwargs.get("ignore_filename_function", ignorable_name)
    glom_byte_runs = kwargs.get("glom_byte_runs", False)

    _expected_diff_modes = ["all", "idifference"]
    if diff_mode not in _expected_diff_modes:
        raise ValueError("Differencing mode should be in: %r." % _expected_diff_modes)
    diff_mask_set = set()

    if diff_mode == "idifference":
        diff_mask_set |= set([
          "atime",
          "byte_runs",
          "crtime",
          "ctime",
          "filename",
          "filesize",
          "md5",
          "mtime",
          "sha1"
        ])
    _logger.debug("diff_mask_set = " + repr(diff_mask_set))

    #d: The container DFXMLObject, ultimately returned.
    d = Objects.DFXMLObject(version="1.2.0")
    if sys.argv[0] == os.path.basename(__file__):
        d.program = sys.argv[0]
        d.program_version = __version__
    d.command_line = " ".join(sys.argv)
    d.add_namespace("delta", dfxml.XMLNS_DELTA)
    d.dc["type"] = "Disk image difference set"
    d.add_creator_library("Python", ".".join(map(str, sys.version_info[0:3]))) #A bit of a bend, but gets the major version information out.
    d.add_creator_library("Objects.py", Objects.__version__)
    d.add_creator_library("dfxml.py", Objects.dfxml.__version__)

    d.diff_file_ignores |= ignore_properties
    _logger.debug("d.diff_file_ignores = " + repr(d.diff_file_ignores))

    #The list most of this function is spent on building
    fileobjects_changed = []

    #Unmodified files; only retained if requested.
    fileobjects_unchanged = []

    #Key: (partition, inode, filename); value: FileObject
    old_fis = None
    new_fis = None

    #Key: (partition, inode, filename); value: FileObject list
    old_fis_unalloc = None
    new_fis_unalloc = None

    #Key: Partition byte offset within the disk image, paired with the file system type
    #Value: VolumeObject
    old_volumes = None
    new_volumes = None
    matched_volumes = dict()

    #Populated in distinct (offset, file system type as string) encounter order
    volumes_encounter_order = dict()

    for infile in [pre, post]:

        _logger.debug("infile = %r" % infile)
        old_fis = new_fis
        new_fis = dict()

        old_volumes = new_volumes
        new_volumes = dict()
        #Fold in the matched volumes - we're just discarding the deleted volumes
        for k in matched_volumes:
            old_volumes[k] = matched_volumes[k]
        matched_volumes = dict()

        old_fis_unalloc = new_fis_unalloc
        new_fis_unalloc = collections.defaultdict(list)

        d.sources.append(infile)

        for (i, (event, new_obj)) in enumerate(Objects.iterparse(infile)):
            if isinstance(new_obj, Objects.DFXMLObject):
                #Inherit desired properties from the source DFXMLObject.

                #Inherit namespaces
                for (prefix, url) in new_obj.iter_namespaces():
                    d.add_namespace(prefix, url)

                continue
            elif isinstance(new_obj, Objects.VolumeObject):
                if event == "end":
                    #This algorithm doesn't yet need to know when a volume is concluded.  On to the next object.
                    continue

                offset = new_obj.partition_offset
                if offset is None:
                    raise AttributeError("To perform differencing with volumes, the <volume> elements must have a <partition_offset>.  Either re-generate your DFXML with partition offsets, or run this program again with the --ignore-volumes flag.")

                #Use the lower-case volume spelling
                ftype_str = _lower_ftype_str(new_obj)

                #Re-capping the general differential analysis algorithm: 
                #0. If the volume is in the new list, something's gone wrong.
                if (offset, ftype_str) in new_volumes:
                    _logger.debug("new_obj.partition_offset = %r." % offset)
                    _logger.warning("Encountered a volume that starts at an offset as another volume, in the same disk image.  This analysis is based on the assumption that that doesn't happen.  Check results that depend on partition mappings.")

                #1. If the volume is in the old list, pop it out of the old list - it's matched.
                if old_volumes and (offset, ftype_str) in old_volumes:
                    _logger.debug("Found a volume in post image, at offset %r." % offset)
                    old_obj = old_volumes.pop((offset, ftype_str))
                    new_obj.original_volume = old_obj
                    new_obj.compare_to_original()
                    matched_volumes[(offset, ftype_str)] = new_obj

                #2. If the volume is NOT in the old list, add it to the new list.
                else:
                    _logger.debug("Found a new volume, at offset %r." % offset)
                    new_volumes[(offset, ftype_str)] = new_obj
                    volumes_encounter_order[(offset, ftype_str)] = len(new_volumes) + ((old_volumes and len(old_volumes)) or 0) + len(matched_volumes)

                #3. Afterwards, the old list contains deleted volumes.

                #Record the ID
                new_obj.id = volumes_encounter_order[(offset, ftype_str)]

                #Move on to the next object
                continue
            elif not isinstance(new_obj, Objects.FileObject):
                #The rest of this loop compares only file objects.
                continue

            if ignore_filename_function(new_obj.filename):
                continue

            #Simplify byte runs if requested
            if glom_byte_runs:
                if new_obj.byte_runs:
                    temp_byte_runs = Objects.ByteRuns()
                    for run in new_obj.byte_runs:
                        temp_byte_runs.glom(run)
                    new_obj.byte_runs = temp_byte_runs

            #Normalize the partition number
            if new_obj.volume_object is None:
                new_obj.partition = None
            else:
                vo = new_obj.volume_object
                fts = _lower_ftype_str(vo)
                new_obj.partition = volumes_encounter_order[(vo.partition_offset, fts)]

            #Define the identity key of this file -- affected by the --ignore argument
            _key_partition = None if "partition" in ignore_properties else new_obj.partition
            _key_inode = None if "inode" in ignore_properties else new_obj.inode
            _key_filename = None if "filename" in ignore_properties else new_obj.filename
            key = (_key_partition, _key_inode, _key_filename)

            #Ignore unallocated content comparisons until a later loop.  The unique identification of deleted files needs a little more to work.
            if not new_obj.alloc:
                new_fis_unalloc[key].append(new_obj)
                continue

            #The rest of this loop is irrelevant until the second DFXML file.
            if old_fis is None:
                new_fis[key] = new_obj
                continue


            if key in old_fis:
                #Extract the old fileobject and check for changes
                old_obj = old_fis.pop(key)
                new_obj.original_fileobject = old_obj
                new_obj.compare_to_original(file_ignores=d.diff_file_ignores)

                #_logger.debug("Diffs: %r." % _diffs)
                _diffs = new_obj.diffs - d.diff_file_ignores
                #_logger.debug("Diffs after ignore-set: %r." % _diffs)
                if diff_mask_set:
                    _diffs &= diff_mask_set
                    #_logger.debug("Diffs after mask-set: %r." % _diffs)

                if len(_diffs) > 0:
                    #_logger.debug("Remaining diffs: " + repr(_diffs))
                    fileobjects_changed.append(new_obj)
                else:
                    #Unmodified file; only keep if requested.
                    if retain_unchanged:
                        fileobjects_unchanged.append(new_obj)
            else:
                #Store the new object
                new_fis[key] = new_obj

        #The rest of the files loop is irrelevant until the second file.
        if old_fis is None:
            continue


        _logger.debug("len(old_fis) = %d" % len(old_fis))
        _logger.debug("len(old_fis_unalloc) = %d" % len(old_fis_unalloc))
        _logger.debug("len(new_fis) = %d" % len(new_fis))
        _logger.debug("len(new_fis_unalloc) = %d" % len(new_fis_unalloc))
        _logger.debug("len(fileobjects_changed) = %d" % len(fileobjects_changed))

        #Identify renames - only possible if 1-to-1.  Many-to-many renames are just left as new and deleted files.
        _logger.debug("Detecting renames...")
        fileobjects_renamed = []
        def _make_name_map(d):
            """Returns a dictionary, mapping (partition, inode) -> {filename}."""
            retdict = collections.defaultdict(lambda: set())
            for (partition, inode, filename) in d.keys():
                retdict[(partition, inode)].add(filename)
            return retdict
        old_inode_names = _make_name_map(old_fis)
        new_inode_names = _make_name_map(new_fis)
        for key in new_inode_names.keys():
            (partition, inode) = key

            if len(new_inode_names[key]) != 1:
                continue
            if not key in old_inode_names:
                continue
            if len(old_inode_names[key]) != 1:
                continue
            if rename_requires_hash:
                #Peek at the set elements by doing a quite-ephemeral list cast
                old_obj = old_fis[(partition, inode, list(old_inode_names[key])[0])]
                new_obj = new_fis[(partition, inode, list(new_inode_names[key])[0])]
                if old_obj.sha1 != new_obj.sha1:
                    continue

            #Found a match if we're at this point in the loop
            old_name = old_inode_names[key].pop()
            new_name = new_inode_names[key].pop()
            old_obj = old_fis.pop((partition, inode, old_name))
            new_obj = new_fis.pop((partition, inode, new_name))
            new_obj.original_fileobject = old_obj
            new_obj.compare_to_original(file_ignores=d.diff_file_ignores)
            fileobjects_renamed.append(new_obj)
        _logger.debug("len(old_fis) -> %d" % len(old_fis))
        _logger.debug("len(new_fis) -> %d" % len(new_fis))
        _logger.debug("len(fileobjects_changed) -> %d" % len(fileobjects_changed))
        _logger.debug("len(fileobjects_renamed) = %d" % len(fileobjects_renamed))

        #Identify files that just changed inode number - basically, doing the rename detection again
        _logger.debug("Detecting inode number changes...")
        def _make_inode_map(d):
            """Returns a dictionary, mapping (partition, filename) -> inode."""
            retdict = dict()
            for (partition, inode, filename) in d.keys():
                if (partition, filename) in retdict:
                    _logger.warning("Multiple instances of the file path %r were found in partition %r; this violates an assumption of this program, that paths are unique within partitions." % (filename, partition))
                retdict[(partition, filename)] = inode
            return retdict
        old_name_inodes = _make_inode_map(old_fis)
        new_name_inodes = _make_inode_map(new_fis)
        for key in new_name_inodes.keys():
            if not key in old_name_inodes:
                continue
            (partition, name) = key
            old_obj = old_fis.pop((partition, old_name_inodes[key], name))
            new_obj = new_fis.pop((partition, new_name_inodes[key], name))
            new_obj.original_fileobject = old_obj
            #TODO Test for what chaos ensues when filename is in the ignore list.
            new_obj.compare_to_original(file_ignores=d.diff_file_ignores)
            fileobjects_changed.append(new_obj)
        _logger.debug("len(old_fis) -> %d" % len(old_fis))
        _logger.debug("len(new_fis) -> %d" % len(new_fis))
        _logger.debug("len(fileobjects_changed) -> %d" % len(fileobjects_changed))
        #And that's the end of the allocated-only, per-volume analysis.

        #We may be able to match files that aren't allocated against files we think are deleted
        _logger.debug("Detecting modifications from unallocated files...")
        fileobjects_deleted = []
        for key in new_fis_unalloc:
            #1 partition; 1 inode number; 1 name, repeated:  Too ambiguous to compare.
            if len(new_fis_unalloc[key]) != 1:
                continue

            if key in old_fis_unalloc:
                if len(old_fis_unalloc[key]) == 1:
                    #The file was unallocated in the previous image, too.
                    old_obj = old_fis_unalloc[key].pop()
                    new_obj = new_fis_unalloc[key].pop()
                    new_obj.original_fileobject = old_obj
                    new_obj.compare_to_original(file_ignores=d.diff_file_ignores)
                    #The file might not have changed.  It's interesting if it did, though.

                    _diffs = new_obj.diffs - diff_mask_set
                    #_logger.debug("Diffs: %r." % _diffs)
                    if diff_mask_set:
                        _diffs &= diff_mask_set
                        #_logger.debug("Diffs after mask-set: %r." % _diffs)
                    if len(_diffs) > 0:
                        _logger.debug("Remaining diffs: " + repr(_diffs))
                        fileobjects_changed.append(new_obj)
                    elif retain_unchanged:
                        fileobjects_unchanged.append(new_obj)
            elif key in old_fis:
                #Identified a deletion.
                old_obj = old_fis.pop(key)
                new_obj = new_fis_unalloc[key].pop()
                new_obj.original_fileobject = old_obj
                new_obj.compare_to_original(file_ignores=d.diff_file_ignores)
                fileobjects_deleted.append(new_obj)
        _logger.debug("len(old_fis) -> %d" % len(old_fis))
        _logger.debug("len(old_fis_unalloc) -> %d" % len(old_fis_unalloc))
        _logger.debug("len(new_fis) -> %d" % len(new_fis))
        _logger.debug("len(new_fis_unalloc) -> %d" % len(new_fis_unalloc))
        _logger.debug("len(fileobjects_changed) -> %d" % len(fileobjects_changed))
        _logger.debug("len(fileobjects_deleted) -> %d" % len(fileobjects_deleted))

        #After deletion matching is performed, one might want to look for files migrating to other partitions.
        #However, since between-volume migration creates a new deleted file, this algorithm instead ignores partition migrations.
        #AJN TODO Thinking about it a little more, I can't suss out a reason against trying this match.  It's complicated if we try looking for reallocations in new_fis, strictly from new_fis_unalloc.

        #TODO We might also want to match the unallocated objects based on metadata addresses.  Unfortunately, that requires implementation of additional byte runs, which hasn't been fully designed yet in the DFXML schema.

        #Begin output.
        #First, annotate the volume objects.
        for key in new_volumes:
            v = new_volumes[key]
            v.annos.add("new")
        for key in old_volumes:
            v = old_volumes[key]
            v.annos.add("deleted")
        for key in matched_volumes:
            v = matched_volumes[key]
            if len(v.diffs) > 0:
                v.annos.add("modified")

        #Build list of FileObject appenders, child volumes of the DFXML Document.
        #Key: Partition number, or None
        #Value: Reference to the VolumeObject corresponding with that partition number.  None -> the DFXMLObject.
        appenders = dict()
        for volume_dict in [new_volumes, matched_volumes, old_volumes]:
            for (offset, ftype_str) in volume_dict:
                    veo = volumes_encounter_order[(offset, ftype_str)]
                    if veo in appenders:
                        raise ValueError("This pair is already in the appenders dictionary, which was supposed to be distinct: " + repr((offset, ftype_str)) + ", encounter order " + str(veo) + ".")
                    v = volume_dict[(offset, ftype_str)]
                    appenders[veo] = v
                    d.append(v)

        #Add in the default appender, the DFXML Document itself.
        appenders[None] = d

        #A file should only be considered "modified" if its contents have changed.
        content_diffs = set(["md5", "sha1", "sha256"])

        def _maybe_match_attr(obj):
            """Just adds the 'matched' annotation when called."""
            if annotate_matches:
                obj.annos.add("matched")

        #Populate DFXMLObject.
        for key in new_fis:
            #TODO If this script ever does a series of >2 DFXML files, these diff additions need to be removed for the next round.
            fi = new_fis[key]
            fi.annos.add("new")
            appenders[fi.partition].append(fi)
        for key in new_fis_unalloc:
            for fi in new_fis_unalloc[key]:
                fi.annos.add("new")
                appenders[fi.partition].append(fi)
        for fi in fileobjects_deleted:
            #Independently flag for name, content, and metadata modifications
            if len(fi.diffs - content_diffs) > 0:
                fi.annos.add("changed")
            if len(content_diffs.intersection(fi.diffs)) > 0:
                fi.annos.add("modified")
            if "filename" in fi.diffs:
                fi.annos.add("renamed")
            fi.annos.add("deleted")
            _maybe_match_attr(fi)
            appenders[fi.partition].append(fi)
        for key in old_fis:
            ofi = old_fis[key]
            nfi = Objects.FileObject()
            nfi.original_fileobject = ofi
            nfi.annos.add("deleted")
            appenders[ofi.partition].append(nfi)
        for key in old_fis_unalloc:
            for ofi in old_fis_unalloc[key]:
                nfi = Objects.FileObject()
                nfi.original_fileobject = ofi
                nfi.annos.add("deleted")
                appenders[ofi.partition].append(nfi)
        for fi in fileobjects_renamed:
            #Independently flag for content and metadata modifications
            if len(content_diffs.intersection(fi.diffs)) > 0:
                fi.annos.add("modified")
            if len(fi.diffs - content_diffs) > 0:
                fi.annos.add("changed")
            fi.annos.add("renamed")
            _maybe_match_attr(fi)
            appenders[fi.partition].append(fi)
        for fi in fileobjects_changed:
            #Independently flag for content and metadata modifications
            if len(content_diffs.intersection(fi.diffs)) > 0:
                fi.annos.add("modified")
            if len(fi.diffs - content_diffs) > 0:
                fi.annos.add("changed")
            _maybe_match_attr(fi)
            appenders[fi.partition].append(fi)
        for fi in fileobjects_unchanged:
            _maybe_match_attr(fi)
            appenders[fi.partition].append(fi)

        #Output
        return d