예제 #1
0
    def testAppendOfEncryptedSingleChunkPlusOne(self):
        version = container.Version(0, 1, "pyaff4")
        print(self.filename)
        kb = keybag.PasswordWrappedKeyBag.create("secret")
        txt = b'a' * 512 * 1024 + b'b'
        with data_store.MemoryDataStore() as resolver:
            resolver.Set(lexicon.transient_graph, self.filename_urn,
                         lexicon.AFF4_STREAM_WRITE_MODE,
                         rdfvalue.XSDString("truncate"))

            with zip.ZipFile.NewZipFile(resolver, version,
                                        self.filename_urn) as zip_file:
                self.volume_urn = zip_file.urn
                self.image_urn = self.volume_urn.Append(self.image_name)

                self.image_urn_2 = self.image_urn.Append("2")
                with aff4_image.AFF4Image.NewAFF4Image(
                        resolver,
                        self.image_urn_2,
                        self.volume_urn,
                        type=lexicon.AFF4_ENCRYPTEDSTREAM_TYPE) as image:
                    image.DEBUG = True
                    image.setKeyBag(kb)
                    image.setKey(kb.unwrap_key("secret"))
                    image.Write(b'a' * 512)

        with data_store.MemoryDataStore() as resolver:
            resolver.Set(lexicon.transient_graph, self.filename_urn,
                         lexicon.AFF4_STREAM_WRITE_MODE,
                         rdfvalue.XSDString("random"))

            with zip.ZipFile.NewZipFile(resolver, version,
                                        self.filename_urn) as zip_file:
                self.volume_urn = zip_file.urn
                self.image_urn = self.volume_urn.Append(self.image_name)

                self.image_urn_2 = self.image_urn.Append("2")
                with aff4_image.AFF4Image.NewAFF4Image(
                        resolver,
                        self.image_urn_2,
                        self.volume_urn,
                        type=lexicon.AFF4_ENCRYPTEDSTREAM_TYPE) as image:
                    image.DEBUG = True
                    image.setKeyBag(kb)
                    image.setKey(kb.unwrap_key("secret"))
                    image.SeekWrite(512, 0)
                    image.Write(b'b')

        with data_store.MemoryDataStore() as resolver:
            with zip.ZipFile.NewZipFile(resolver, version,
                                        self.filename_urn) as zip_file:
                image_urn = zip_file.urn.Append(self.image_name)

                self.image_urn_2 = self.image_urn.Append("2")
                with resolver.AFF4FactoryOpen(self.image_urn_2) as image:
                    image.setKeyBag(kb)
                    image.DEBUG = True
                    image.setKey(kb.unwrap_key("secret"))
                    self.assertEquals(513, image.Size())
                    self.assertEquals(b'a' * 512 + b'b', image.ReadAll())
예제 #2
0
    def CreateMember(self, child_urn):
        # Check that child is a relative path in our URN.
        relative_path = self.urn.RelativePath(child_urn)
        if relative_path == child_urn.SerializeToString():
            raise IOError("Child URN is not within container URN.")

        # Use this filename. Note that since filesystems can not typically
        # represent files and directories as the same path component we can not
        # allow slashes in the filename. Otherwise we will fail to create
        # e.g. stream/0000000 and stream/0000000/index.
        filename = aff4_utils.member_name_for_urn(child_urn,
                                                  self.urn,
                                                  slash_ok=False)

        # We are allowed to create any files inside the directory volume.
        self.resolver.Set(child_urn, lexicon.AFF4_TYPE,
                          rdfvalue.URN(lexicon.AFF4_FILE_TYPE))
        self.resolver.Set(child_urn, lexicon.AFF4_STREAM_WRITE_MODE,
                          rdfvalue.XSDString("truncate"))
        self.resolver.Set(child_urn, lexicon.AFF4_DIRECTORY_CHILD_FILENAME,
                          rdfvalue.XSDString(filename))

        # Store the member inside our storage location.
        self.resolver.Set(
            child_urn, lexicon.AFF4_FILE_NAME,
            rdfvalue.XSDString(self.root_path + os.sep + filename))

        result = self.resolver.AFF4FactoryOpen(child_urn)
        self.MarkDirty()
        self.children.add(child_urn)

        return result
예제 #3
0
    def setUp(self):
        self.store = data_store.MemoryDataStore()
        self.store.Set(rdfvalue.URN("hello"), rdfvalue.URN("World"),
                       rdfvalue.XSDString("foo"))

        self.store.Set(rdfvalue.URN("hello"), rdfvalue.URN(lexicon.AFF4_TYPE),
                       rdfvalue.XSDString("bar"))
예제 #4
0
    def setUp(self):
        self.hello_urn = rdfvalue.URN("aff4://hello")
        self.store = data_store.MemoryDataStore()
        self.store.Set(None, self.hello_urn,
                       rdfvalue.URN(lexicon.AFF4_IMAGE_COMPRESSION_SNAPPY),
                       rdfvalue.XSDString("foo"))

        self.store.Set(None, self.hello_urn, rdfvalue.URN(lexicon.AFF4_TYPE),
                       rdfvalue.XSDString("bar"))
예제 #5
0
    def testDataStore(self):
        result = self.store.Get(rdfvalue.URN("hello"), rdfvalue.URN("World"))
        self.assertEquals(type(result), rdfvalue.XSDString)

        self.assertEquals(result.SerializeToString(), "foo")

        self.store.Set(rdfvalue.URN("hello"), rdfvalue.URN("World"),
                       rdfvalue.XSDString("bar"))

        # In the current implementation a second Set() overwrites the previous
        # value.
        self.assertEquals(
            self.store.Get(rdfvalue.URN("hello"), rdfvalue.URN("World")),
            rdfvalue.XSDString("bar"))
예제 #6
0
    def testEditInplaceZip(self):
        try:
            os.unlink(self.filename)
        except (IOError, OSError):
            pass

        with data_store.MemoryDataStore() as resolver:
            resolver.Set(lexicon.transient_graph, self.filename_urn,
                         lexicon.AFF4_STREAM_WRITE_MODE,
                         rdfvalue.XSDString("truncate"))

            with zip.ZipFile.NewZipFile(resolver, Version(1, 1, "pyaff4"),
                                        self.filename_urn) as zip_container:
                self.volume_urn = zip_container.urn

                with zip_container.CreateZipSegment("foo") as segment:
                    segment.compression_method = zip.ZIP_STORED
                    segment.Write(b'abcdefghijk')
                    segment.Flush()

                with zip_container.CreateZipSegment("bar") as segment:
                    segment.compression_method = zip.ZIP_STORED
                    segment.Write(b'alkjflajdflaksjdadflkjd')
                    segment.Flush()

                backing_store_urn = resolver.GetUnique(lexicon.transient_graph,
                                                       self.volume_urn,
                                                       lexicon.AFF4_STORED)
                with resolver.AFF4FactoryOpen(
                        backing_store_urn) as backing_store:
                    print()

        self.assertEquals(716, os.stat(self.filename).st_size)

        with data_store.MemoryDataStore() as resolver:
            resolver.Set(lexicon.transient_graph, self.filename_urn,
                         lexicon.AFF4_STREAM_WRITE_MODE,
                         rdfvalue.XSDString("random"))

            with zip.ZipFile.NewZipFile(resolver, Version(1, 1, "pyaff4"),
                                        self.filename_urn) as zip_file:
                self.volume_urn = zip_file.urn

                with zip_file.OpenZipSegment("foo") as segment:
                    segment.SeekWrite(0, 0)
                    segment.Write(b'0000')

        self.assertEquals(716, os.stat(self.filename).st_size)
예제 #7
0
    def setUp(self):
        with data_store.MemoryDataStore() as resolver:
            root_urn = rdfvalue.URN.NewURNFromFilename(self.root_path)

            resolver.Set(root_urn, lexicon.AFF4_STREAM_WRITE_MODE,
                         rdfvalue.XSDString("truncate"))

            with aff4_directory.AFF4Directory.NewAFF4Directory(
                    resolver, root_urn) as volume:

                segment_urn = volume.urn.Append(self.segment_name)
                with volume.CreateMember(segment_urn) as member:
                    member.Write(b"Hello world")
                    resolver.Set(
                        member.urn, lexicon.AFF4_STREAM_ORIGINAL_FILENAME,
                        rdfvalue.XSDString(self.root_path + self.segment_name))
예제 #8
0
    def render_acquisition(self, renderer):
        """Do the actual acquisition."""
        with renderer.open(filename=self.destination, mode="a+b") as out_fd:
            with data_store.MemoryDataStore() as resolver:
                output_urn = rdfvalue.URN.FromFileName(out_fd.name)
                mode = "truncate"
                if self.append:
                    mode = "append"
                    # Appending means we read the volume first, then add new
                    # members to it.

                resolver.Set(output_urn, lexicon.AFF4_STREAM_WRITE_MODE,
                             rdfvalue.XSDString(mode))

                with zip.ZipFile.NewZipFile(resolver, output_urn) as volume:
                    # We allow acquiring memory from a non volatile physical
                    # address space as a way of converting an image from another
                    # format to AFF4.
                    if self.session.physical_address_space:
                        if self.also_memory:
                            # Get the physical memory.
                            self.copy_physical_address_space(
                                renderer, resolver, volume)

                            # Also grab the default files on this OS.
                            self.copy_files(renderer, resolver, volume,
                                            self._default_file_globs())

                        # We only copy files if we are running on a raw device.
                        if self.session.physical_address_space.volatile:
                            if self.also_pagefile:
                                self.copy_page_file(renderer, resolver, volume)

                            if self.also_mapped_files:
                                self.copy_mapped_files(renderer, resolver,
                                                       volume)

                            # If a physical_address_space is specified, then we
                            # only allow copying files if it is volatile.
                            if self.files:
                                self.copy_files(renderer, resolver, volume,
                                                self.files)
                        elif any(self.also_pagefile, self.also_mapped_files,
                                 self.files):
                            raise RuntimeError(
                                "Imaging options require access to live memory "
                                "but the physical address space is not "
                                "volatile. Did you mean to specify the --live "
                                "option?")

                    elif self.memory_access_options:
                        raise RuntimeError(
                            "Imaging options require access to memory but no "
                            "suitable address space was defined. Did you mean "
                            "to specify the --live option?")

                    # User can request to just acquire regular files but only if
                    # no physical_address_space is also specified.
                    elif self.files:
                        self.copy_files(renderer, resolver, volume, self.files)
예제 #9
0
    def LoadFromTurtle(self, stream):
        data = stream.read(1000000)
        #print data
        g = rdflib.Graph()
        g.parse(data=data, format="turtle")

        for urn, attr, value in g:
            urn = rdfvalue.URN(str(urn))
            attr = rdfvalue.URN(str(attr))

            if isinstance(value, rdflib.URIRef):
                value = rdfvalue.URN(str(value))
            elif value.datatype in registry.RDF_TYPE_MAP:
                dt = value.datatype
                value = registry.RDF_TYPE_MAP[value.datatype](str(value))

            else:
                # Default to a string literal.
                value = rdfvalue.XSDString(value)

            self.Add(urn, attr, value)

        # look for the AFF4 namespace defined in the turtle
        for (a, b) in g.namespace_manager.namespaces():
            if str(b) == lexicon.AFF4_NAMESPACE or str(
                    b) == lexicon.AFF4_LEGACY_NAMESPACE:
                self.aff4NS = b
예제 #10
0
    def _copy_file_to_image(self, renderer, resolver, volume, filename):
        image_urn = volume.urn.Append(utils.SmartStr(filename))
        out_fd = None
        try:
            with open(filename, "rb") as in_fd:
                with aff4_image.AFF4Image.NewAFF4Image(resolver, image_urn,
                                                       volume.urn) as out_fd:

                    renderer.format("Adding file {0}\n", filename)
                    resolver.Set(image_urn,
                                 lexicon.AFF4_STREAM_ORIGINAL_FILENAME,
                                 rdfvalue.XSDString(filename))

                    while 1:
                        data = in_fd.read(self.BUFFERSIZE)
                        if not data:
                            break

                        out_fd.write(data)

        except IOError:
            try:
                self.session.logging.debug(
                    "Unable to read %s. Attempting raw access.", filename)

                # We can not just read this file, parse it from the NTFS.
                self._copy_raw_file_to_image(renderer, resolver, volume,
                                             filename)
            except IOError:
                self.session.logging.warn("Unable to read %s. Skipping.",
                                          filename)

        finally:
            if out_fd:
                resolver.Close(out_fd)
예제 #11
0
    def create(self):
        version = container.Version(1, 1, "pyaff4")
        with data_store.MemoryDataStore() as resolver:
            resolver.Set(lexicon.transient_graph, self.filenameA_urn,
                         lexicon.AFF4_STREAM_WRITE_MODE,
                         rdfvalue.XSDString("truncate"))

            with zip.ZipFile.NewZipFile(resolver, version,
                                        self.filenameA_urn) as zip_file:
                self.volume_urn = zip_file.urn
                image_urn = self.volume_urn.Append(self.image_name)

                self.crypto_stream_arn = image_urn

                # Use default compression.
                with aff4_image.AFF4Image.NewAFF4Image(
                        resolver,
                        image_urn,
                        self.volume_urn,
                        type=lexicon.AFF4_ENCRYPTEDSTREAM_TYPE) as image:
                    image.chunk_size = 512
                    image.chunks_per_segment = 1024

                    kb = keybag.KeyBag.create("password")
                    image.setKeyBag(kb)
                    image.setKey(kb.unwrap_key("password"))

                    for i in range(100):
                        image.Write(src)

                    self.image_urn = image.urn
예제 #12
0
    def setUp(self):
        with data_store.MemoryDataStore() as resolver:
            resolver.Set(lexicon.transient_graph, self.filename_urn,
                         lexicon.AFF4_STREAM_WRITE_MODE,
                         rdfvalue.XSDString("truncate"))

            with zip.ZipFile.NewZipFile(resolver, Version(1, 1, "pyaff4"),
                                        self.filename_urn) as zip_file:
                self.volume_urn = zip_file.urn
                segment_urn = self.volume_urn.Append(
                    escaping.arnPathFragment_from_path(self.segment_name),
                    quote=False)

                with zip_file.CreateMember(segment_urn) as segment:
                    segment.Write(self.data1)

                unc_segment_urn = self.volume_urn.Append(
                    escaping.arnPathFragment_from_path(self.unc_segment_name),
                    quote=False)

                with zip_file.CreateMember(unc_segment_urn) as segment:
                    segment.Write(self.data1)

                period_start_segment_urn = self.volume_urn.Append(
                    self.period_start_segment_name, quote=False)

                with zip_file.CreateMember(
                        period_start_segment_urn) as segment:
                    segment.Write(self.data1)
예제 #13
0
    def testLargerThanBevyWrite(self):
        version = container.Version(0, 1, "pyaff4")

        with data_store.MemoryDataStore() as resolver:
            resolver.Set(lexicon.transient_graph, self.filename_urn,
                         lexicon.AFF4_STREAM_WRITE_MODE,
                         rdfvalue.XSDString("truncate"))

            with zip.ZipFile.NewZipFile(resolver, version,
                                        self.filename_urn) as zip_file:
                self.volume_urn = zip_file.urn
                self.image_urn = self.volume_urn.Append(self.image_name)

                self.image_urn_2 = self.image_urn.Append("2")
                with aff4_image.AFF4Image.NewAFF4Image(
                        resolver, self.image_urn_2, self.volume_urn) as image:
                    image.chunk_size = 5
                    image.chunks_per_segment = 2
                    image.Write(b"abcdeabcdea")
                    self.assertEquals(b"abcde", image.Read(5))

        with data_store.MemoryDataStore() as resolver:
            with zip.ZipFile.NewZipFile(resolver, version,
                                        self.filename_urn) as zip_file:
                image_urn = zip_file.urn.Append(self.image_name)

                self.image_urn_2 = self.image_urn.Append("2")
                with resolver.AFF4FactoryOpen(self.image_urn_2) as image:
                    self.assertEquals(11, image.Size())
                    self.assertEqual(b"abcdeabcdea", image.ReadAll())
예제 #14
0
    def setUp(self):
        try:
            os.unlink(self.filename)
        except (IOError, OSError):
            pass

        with data_store.MemoryDataStore() as resolver:
            resolver.Set(lexicon.transient_graph, self.filename_urn,
                         lexicon.AFF4_STREAM_WRITE_MODE,
                         rdfvalue.XSDString("truncate"))

            with zip.ZipFile.NewZipFile(resolver, version.aff4v10,
                                        self.filename_urn) as zip_file:
                self.volume_urn = zip_file.urn
                segment_urn = self.volume_urn.Append(self.segment_name)

                with zip_file.CreateMember(segment_urn) as segment:
                    segment.Write(self.data1)

                with zip_file.CreateMember(segment_urn) as segment2:
                    segment2.SeekWrite(0, 2)
                    segment2.Write(self.data2)

                streamed_urn = self.volume_urn.Append(self.streamed_segment)
                with zip_file.CreateMember(streamed_urn) as streamed:
                    streamed.compression_method = zip.ZIP_DEFLATE
                    src = io.BytesIO(self.data1)
                    streamed.WriteStream(src)
예제 #15
0
    def LoadFromTurtle(self, stream, volume_arn):
        data = streams.ReadAll(stream)
        g = rdflib.Graph()
        g.parse(data=data, format="turtle")

        for urn, attr, value in g:
            urn = utils.SmartUnicode(urn)
            attr = utils.SmartUnicode(attr)
            serialized_value = value

            if isinstance(value, rdflib.URIRef):
                value = rdfvalue.URN(utils.SmartUnicode(serialized_value))
            elif value.datatype in registry.RDF_TYPE_MAP:
                dt = value.datatype
                value = registry.RDF_TYPE_MAP[value.datatype](
                    serialized_value)

            else:
                # Default to a string literal.
                value = rdfvalue.XSDString(value)

            if attr == rdfvalue.URN(lexicon.AFF4_TYPE) and value == rdfvalue.URN(lexicon.AFF4_IMAGE_TYPE):
                self.Add(lexicon.transient_graph, urn, lexicon.AFF4_STORED, volume_arn)
            self.Add(volume_arn, urn, attr, value)

        # look for the AFF4 namespace defined in the turtle
        for (_, b) in g.namespace_manager.namespaces():
            if (str(b) == lexicon.AFF4_NAMESPACE or
                str(b) == lexicon.AFF4_LEGACY_NAMESPACE):
                self.aff4NS = b
예제 #16
0
    def LoadFromURN(self):
        flags = "rb"

        filename = self._GetFilename()
        if not filename:
            raise IOError("Unable to find storage for %s" % self.urn)

        filename = str(filename)

        directory_components = os.sep.split(filename)
        directory_components.pop(-1)

        mode = self.resolver.GetUnique(lexicon.transient_graph, self.urn,
                                       lexicon.AFF4_STREAM_WRITE_MODE)
        if mode == "truncate":
            flags = "w+b"
            self.resolver.Set(lexicon.transient_graph, self.urn,
                              lexicon.AFF4_STREAM_WRITE_MODE,
                              rdfvalue.XSDString("append"))
            self.properties.writable = True
            self._CreateIntermediateDirectories(directory_components)

        elif mode == "append":
            flags = "a+b"
            self.properties.writable = True
            self._CreateIntermediateDirectories(directory_components)

        LOGGER.info("Opening file %s", filename)
        self.fd = open(filename, flags)
        try:
            self.fd.seek(0, 2)
            self.size = self.fd.tell()
        except IOError:
            self.properties.sizeable = False
            self.properties.seekable = False
예제 #17
0
    def LoadFromURN(self):
        self.storage = self.resolver.Get(self.urn, lexicon.AFF4_STORED)
        if not self.storage:
            LOGGER.error("Unable to find storage for AFF4Directory %s",
                         self.urn)
            raise IOError("NOT_FOUND")

        # The actual filename for the root directory.
        self.root_path = self.storage.ToFilename()

        try:
            # We need to get the URN of the container before we can process
            # anything.
            with self.resolver.AFF4FactoryOpen(
                    self.storage.Append(
                        lexicon.AFF4_CONTAINER_DESCRIPTION)) as desc:
                if desc:
                    urn_string = desc.Read(1000)

                    if (urn_string
                            and self.urn.SerializeToString() != urn_string):
                        self.resolver.DeleteSubject(self.urn)
                        self.urn.Set(urn_string)

                    # Set these triples with the new URN so we know how to open
                    # it.
                    self.resolver.Set(
                        self.urn, lexicon.AFF4_TYPE,
                        rdfvalue.URN(lexicon.AFF4_DIRECTORY_TYPE))

                    self.resolver.Set(self.urn, lexicon.AFF4_STORED,
                                      rdfvalue.URN(self.storage))

                    LOGGER.info("AFF4Directory volume found: %s", self.urn)

            # Try to load the RDF metadata file from the storage.
            with self.resolver.AFF4FactoryOpen(
                    self.storage.Append(
                        lexicon.AFF4_CONTAINER_INFO_TURTLE)) as turtle_stream:
                if turtle_stream:
                    self.resolver.LoadFromTurtle(turtle_stream)

                    # Find all the contained objects and adjust their filenames.
                    for subject in self.resolver.SelectSubjectsByPrefix(
                            self.urn):

                        child_filename = self.resolver.Get(
                            subject, lexicon.AFF4_DIRECTORY_CHILD_FILENAME)
                        if child_filename:
                            self.resolver.Set(
                                subject, lexicon.AFF4_FILE_NAME,
                                rdfvalue.XSDString(
                                    self.root_path + os.sep +
                                    child_filename.SerializeToString()))

        except IOError:
            pass
예제 #18
0
    def testDataStore(self):
        result = self.store.GetUnique(
            None, self.hello_urn,
            rdfvalue.URN(lexicon.AFF4_IMAGE_COMPRESSION_SNAPPY))
        self.assertEquals(type(result), rdfvalue.XSDString)

        self.assertEquals(result.SerializeToString(), b"foo")

        self.store.Set(None, self.hello_urn,
                       rdfvalue.URN(lexicon.AFF4_IMAGE_COMPRESSION_SNAPPY),
                       rdfvalue.XSDString("bar"))

        # In the current implementation a second Set() overwrites the previous
        # value.
        self.assertEquals(
            self.store.GetUnique(
                None, self.hello_urn,
                rdfvalue.URN(lexicon.AFF4_IMAGE_COMPRESSION_SNAPPY)),
            rdfvalue.XSDString("bar"))
예제 #19
0
def addPathNamesToVolume(resolver, volume, pathnames, recursive, hashbased):
    for pathname in pathnames:
        if not os.path.exists(pathname):
            print("Path %s not found. Skipping.")
            continue
        pathname = utils.SmartUnicode(pathname)
        print("\tAdding: %s" % pathname)
        fsmeta = logical.FSMetadata.create(pathname)
        if os.path.isdir(pathname):
            image_urn = None
            if volume.isAFF4Collision(pathname):
                image_urn = rdfvalue.URN("aff4://%s" % uuid.uuid4())
            else:
                image_urn = volume.urn.Append(
                    escaping.arnPathFragment_from_path(pathname), quote=False)

            fsmeta.urn = image_urn
            fsmeta.store(resolver)
            resolver.Set(volume.urn, image_urn,
                         rdfvalue.URN(lexicon.standard11.pathName),
                         rdfvalue.XSDString(pathname))
            resolver.Add(volume.urn, image_urn,
                         rdfvalue.URN(lexicon.AFF4_TYPE),
                         rdfvalue.URN(lexicon.standard11.FolderImage))
            resolver.Add(volume.urn, image_urn,
                         rdfvalue.URN(lexicon.AFF4_TYPE),
                         rdfvalue.URN(lexicon.standard.Image))
            if recursive:
                for child in os.listdir(pathname):
                    pathnames.append(os.path.join(pathname, child))
        else:
            with open(pathname, "rb") as src:
                hasher = linear_hasher.StreamHasher(
                    src,
                    [lexicon.HASH_SHA1, lexicon.HASH_MD5, lexicon.HASH_SHA256])
                if hashbased == False:
                    urn = volume.writeLogicalStream(pathname, hasher,
                                                    fsmeta.length)
                else:
                    urn = volume.writeLogicalStreamRabinHashBased(
                        pathname, hasher, fsmeta.length)
                fsmeta.urn = urn
                fsmeta.store(resolver)
                bc_writer = blockchain.BlockChainWriter.getBlockchainWriter()
                hash_dict = {}
                for h in hasher.hashes:
                    hh = hashes.newImmutableHash(h.hexdigest(),
                                                 hasher.hashToType[h])
                    resolver.Add(urn, urn, rdfvalue.URN(lexicon.standard.hash),
                                 hh)
                    hash_dict[h.name] = hh

                if bc_writer:
                    bc_writer.Set_hash(hash_dict["md5"], hash_dict["sha1"],
                                       hash_dict["sha256"])
예제 #20
0
    def _copy_file_to_image(self,
                            renderer,
                            resolver,
                            volume,
                            filename,
                            stat_entry=None):
        if stat_entry is None:
            try:
                stat_entry = os.stat(filename)
            except (OSError, IOError):
                return

        image_urn = volume.urn.Append(utils.SmartStr(filename))
        out_fd = None
        try:
            with open(filename, "rb") as in_fd:
                renderer.format("Adding file {0}\n", filename)
                resolver.Set(image_urn, lexicon.AFF4_STREAM_ORIGINAL_FILENAME,
                             rdfvalue.XSDString(os.path.abspath(filename)))

                progress = aff4.ProgressContext(length=stat_entry.st_size)

                if stat_entry.st_size < self.MAX_SIZE_FOR_SEGMENT:
                    with volume.CreateMember(image_urn) as out_fd:
                        # Only enable compression if we are using it.
                        if (self.compression !=
                                lexicon.AFF4_IMAGE_COMPRESSION_STORED):
                            out_fd.compression_method = zip.ZIP_DEFLATE
                        out_fd.WriteStream(in_fd, progress=progress)
                else:
                    resolver.Set(image_urn, lexicon.AFF4_IMAGE_COMPRESSION,
                                 rdfvalue.URN(self.compression))

                    with aff4_image.AFF4Image.NewAFF4Image(
                            resolver, image_urn, volume.urn) as out_fd:
                        out_fd.WriteStream(in_fd, progress=progress)

        except IOError:
            try:
                # Currently we can only access NTFS filesystems.
                if self.profile.metadata("os") == "windows":
                    self.session.logging.debug(
                        "Unable to read %s. Attempting raw access.", filename)

                    # We can not just read this file, parse it from the NTFS.
                    self._copy_raw_file_to_image(renderer, resolver, volume,
                                                 filename)
            except IOError:
                self.session.logging.warn("Unable to read %s. Skipping.",
                                          filename)

        finally:
            if out_fd:
                resolver.Close(out_fd)
예제 #21
0
    def testAbortImageStreamWithSingleBevyThenSecondStream(self):
        version = container.Version(0, 1, "pyaff4")

        image_urn_3 = None

        with data_store.MemoryDataStore() as resolver:
            resolver.Set(lexicon.transient_graph, self.filename_urn,
                         lexicon.AFF4_STREAM_WRITE_MODE,
                         rdfvalue.XSDString("truncate"))

            with zip.ZipFile.NewZipFile(resolver, version,
                                        self.filename_urn) as zip_file:
                self.volume_urn = zip_file.urn
                image_urn = self.volume_urn.Append(self.image_name)

                image_urn_2 = image_urn.Append("2")
                with aff4_image.AFF4Image.NewAFF4Image(
                        resolver, image_urn_2, self.volume_urn) as image:
                    image.chunk_size = 3
                    image.chunks_per_segment = 2
                    image.setCompressionMethod(
                        lexicon.AFF4_IMAGE_COMPRESSION_STORED)
                    image.Write(b"abcdefg")
                    image.Abort()

                self.image_urn_3 = image_urn.Append("3")
                with aff4_image.AFF4Image.NewAFF4Image(
                        resolver, self.image_urn_3, self.volume_urn) as image:
                    image.chunk_size = 3
                    image.chunks_per_segment = 2
                    image.setCompressionMethod(
                        lexicon.AFF4_IMAGE_COMPRESSION_STORED)
                    image.Write(b"abcdefg")

        with data_store.MemoryDataStore() as resolver:
            with zip.ZipFile.NewZipFile(resolver, version,
                                        self.filename_urn) as zip_file:
                for i in range(0, 2):
                    seg_arn = image_urn_2.Append("%08d" % i)
                    idx_arn = image_urn_2.Append("%08d.index" % i)
                    self.assertFalse(zip_file.ContainsMember(seg_arn))
                    self.assertFalse(zip_file.ContainsMember(idx_arn))

                for i in range(0, 2):
                    seg_arn = self.image_urn_3.Append("%08d" % i)
                    idx_arn = self.image_urn_3.Append("%08d.index" % i)
                    self.assertTrue(zip_file.ContainsMember(seg_arn))
                    self.assertTrue(zip_file.ContainsMember(idx_arn))

            with resolver.AFF4FactoryOpen(self.image_urn_3) as image:
                image.SeekRead(0, 0)
                res = image.Read(7)
                self.assertEqual(b"abcdefg", res)
        self.assertEquals(1265, os.stat(self.filename).st_size)
예제 #22
0
    def testFileBackedStream(self):
        filename = rdfvalue.URN.FromFileName("/tmp/test_filename.bin")
        resolver = data_store.MemoryDataStore()
        try:
            resolver.Set(filename, lexicon.AFF4_STREAM_WRITE_MODE,
                         rdfvalue.XSDString("truncate"))

            with resolver.AFF4FactoryOpen(filename) as file_stream:
                self.streamTest(file_stream)
        finally:
            os.unlink(filename.Parse().path)
예제 #23
0
    def setUp(self):
        with data_store.MemoryDataStore() as resolver:
            resolver.Set(lexicon.transient_graph, self.filename_urn,
                         lexicon.AFF4_STREAM_WRITE_MODE,
                         rdfvalue.XSDString("truncate"))

            with zip.ZipFile.NewZipFile(resolver, version.aff4v11,
                                        self.filename_urn) as zip_file:
                self.volume_urn = zip_file.urn

                with zip_file.CreateZipSegment(self.segment_name,
                                               arn=None) as segment:
                    segment.Write(self.data1)
예제 #24
0
    def createURN(resolver, container_urn):
        """Public method to create a new writable locical AFF4 container."""

        resolver.Set(lexicon.transient_graph, container_urn,
                     lexicon.AFF4_STREAM_WRITE_MODE,
                     rdfvalue.XSDString("truncate"))

        version = Version(1, 1, "pyaff4")
        with zip.ZipFile.NewZipFile(resolver, version,
                                    container_urn) as zip_file:
            volume_urn = zip_file.urn
            return WritableHashBasedImageContainer(version, volume_urn,
                                                   resolver, lexicon.standard)
예제 #25
0
    def __init__(self, filename=None, **kwargs):
        super(AFF4AddressSpace, self).__init__(**kwargs)
        self.as_assert(self.base == None,
                       "Must stack on another address space")

        path = filename or self.session.GetParameter("filename")
        self.as_assert(path != None, "Filename must be specified")

        self.image = None
        self.resolver = data_store.MemoryDataStore()

        # If we have a cache directory, configure AFF4 to use it.
        try:
            cache_dir = cache.GetCacheDir(self.session)
            if cache_dir:
                self.resolver.Set(lexicon.AFF4_CONFIG_CACHE_DIR,
                                  lexicon.AFF4_FILE_NAME,
                                  rdfvalue.XSDString(
                                      os.path.join(cache_dir, "aff4_cache")))
        except IOError:
            pass

        # A map between the filename and the offset it is mapped into the
        # address space.
        self.mapped_files = {}
        try:
            volume_path, stream_path = self._LocateAFF4Volume(path)
        except IOError as e:
            self.session.logging.debug("Unable to open AFF4 image %s", e)
            raise addrspace.ASAssertionError("Unable to open AFF4 volume")

        # filename is a volume, and there is no stream specified, just autoload
        # the stream if possible.
        if not stream_path:
            try:
                self._AutoLoadAFF4Volume(volume_path)
                return
            except IOError as e:
                raise addrspace.ASAssertionError(
                    "Unable to open AFF4 volume: %s" % e)

        # If the user asked for a specific stream just load that one. Note that
        # you can still load the pagefile manually using the --pagefile
        # parameter.
        try:
            image_urn = volume_path.Append(stream_path)
            self._LoadMemoryImage(image_urn)
        except IOError as e:
            raise addrspace.ASAssertionError(
                "Unable to open AFF4 stream %s: %s" % (
                    stream_path, e))
예제 #26
0
    def testRemoveIsEmpty(self):
        try:
            os.unlink(self.filename)
        except (IOError, OSError):
            pass

        with data_store.MemoryDataStore() as resolver:
            resolver.Set(lexicon.transient_graph, self.filename_urn,
                         lexicon.AFF4_STREAM_WRITE_MODE,
                         rdfvalue.XSDString("truncate"))

            with zip.ZipFile.NewZipFile(resolver, Version(1, 1, "pyaff4"),
                                        self.filename_urn) as zip_file:
                self.volume_urn = zip_file.urn
                segment_urn = self.volume_urn.Append(
                    escaping.arnPathFragment_from_path(self.segment_name),
                    quote=False)

                with zip_file.CreateMember(segment_urn) as segment:
                    segment.Write(self.data1)
                    segment.Flush()

                zip_file.RemoveMember(segment_urn)

        with data_store.MemoryDataStore() as resolver:
            resolver.Set(lexicon.transient_graph, self.filename_urn,
                         lexicon.AFF4_STREAM_WRITE_MODE,
                         rdfvalue.XSDString("append"))

            with zip.ZipFile.NewZipFile(resolver, Version(1, 1, "pyaff4"),
                                        self.filename_urn) as zip_file:
                self.volume_urn = zip_file.urn
                segment_urn = self.volume_urn.Append(
                    escaping.arnPathFragment_from_path(self.segment_name),
                    quote=False)
                self.assertFalse(zip_file.ContainsMember(segment_urn))

        self.assertEquals(518, os.stat(self.filename).st_size)
예제 #27
0
    def setUp(self):
        version = container.Version(1, 1, "pyaff4")
        with data_store.MemoryDataStore() as resolver:
            resolver.Set(lexicon.transient_graph, self.filename_urn,
                         lexicon.AFF4_STREAM_WRITE_MODE,
                         rdfvalue.XSDString("truncate"))

            with zip.ZipFile.NewZipFile(resolver, version,
                                        self.filename_urn) as zip_file:
                self.volume_urn = zip_file.urn
                self.image_urn = self.volume_urn.Append(self.image_name)

                # Write Map image sequentially (Seek/Write method).
                with aff4_map.AFF4Map.NewAFF4Map(resolver, self.image_urn,
                                                 self.volume_urn) as image:
                    # Maps are written in random order.
                    image.SeekWrite(50)
                    image.Write(b"XX - This is the position.")

                    image.SeekWrite(0)
                    image.Write(b"00 - This is the position.")

                    # We can "overwrite" data by writing the same range again.
                    image.SeekWrite(50)
                    image.Write(b"50")

                # Test the Stream method.
                with resolver.CachePut(
                        aff4_file.AFF4MemoryStream(resolver)) as source:
                    # Fill it with data.
                    source.Write(b"AAAABBBBCCCCDDDDEEEEFFFFGGGGHHHH")

                    # Make a temporary map that defines our plan.
                    helper_map = aff4_map.AFF4Map(resolver)

                    helper_map.AddRange(4, 0, 4, source.urn)  # 0000AAAA
                    helper_map.AddRange(0, 12, 4, source.urn)  # DDDDAAAA
                    helper_map.AddRange(12, 16, 4,
                                        source.urn)  # DDDDAAAA0000EEEE

                    image_urn_2 = self.volume_urn.Append(
                        self.image_name).Append("streamed")

                    with aff4_map.AFF4Map.NewAFF4Map(resolver, image_urn_2,
                                                     self.volume_urn) as image:

                        # Now we create the real map by copying the temporary
                        # map stream.
                        image.WriteStream(helper_map)
예제 #28
0
    def render_acquisition(self, renderer):
        with renderer.open(filename=self.destination, mode="w+b") as out_fd:
            with data_store.MemoryDataStore() as resolver:
                output_urn = rdfvalue.URN.FromFileName(out_fd.name)
                resolver.Set(output_urn, lexicon.AFF4_STREAM_WRITE_MODE,
                             rdfvalue.XSDString("truncate"))

                with zip.ZipFile.NewZipFile(resolver, output_urn) as volume:
                    self.copy_physical_address_space(renderer, resolver,
                                                     volume)

                    # We only copy files if we are running on a raw device.
                    if self.session.physical_address_space.volatile:
                        self.copy_page_file(renderer, resolver, volume)
                        if self.also_files:
                            self.copy_files(renderer, resolver, volume)
예제 #29
0
    def QuerySubjectPredicate(self, graph, subject, predicate):
        for o in super(HDTAssistedDataStore,
                       self).QuerySubjectPredicate(graph, subject, predicate):
            yield o

        if self.hdt == None:
            return

        if graph == transient_graph:
            return

        if isinstance(subject, rdfvalue.URN):
            subject = subject.SerializeToString()
        else:
            subject = utils.SmartUnicode(subject)

        if isinstance(predicate, rdfvalue.URN):
            predicate = predicate.SerializeToString()
        else:
            predicate = utils.SmartUnicode(predicate)

        (triples,
         cardinality) = self.hdt.search_triples(subject, predicate, "")

        for (s, p, o) in triples:
            if o.startswith("\""):
                # it is a literal
                (v, t) = o.split("^^")
                v = v.replace("\"", "")
                t = t[1:len(t) - 1]

                datatype = rdflib.URIRef(t)
                if datatype in registry.RDF_TYPE_MAP:
                    o = registry.RDF_TYPE_MAP[datatype](v)
                else:
                    # Default to a string literal.
                    o = rdfvalue.XSDString(v)
            elif o.startswith("<"):
                o = rdfvalue.URN(utils.SmartUnicode(o))
            elif o.startswith("aff4://"):
                o = rdfvalue.URN(utils.SmartUnicode(o))
            else:
                o = rdfvalue.URN(utils.SmartUnicode(o))

            yield o
예제 #30
0
    def testFileBackedStream(self):
        filename = tempfile.gettempdir() + "/test_filename.zip"
        fileURI = rdfvalue.URN.FromFileName(filename)

        try:
            with data_store.MemoryDataStore() as resolver:
                resolver.Set(lexicon.transient_graph, fileURI,
                             lexicon.AFF4_STREAM_WRITE_MODE,
                             rdfvalue.XSDString("truncate"))

                with resolver.AFF4FactoryOpen(fileURI) as file_stream:
                    self.streamTest(file_stream)
        except:
            traceback.print_exc()
            self.fail()

        finally:
            os.unlink(filename)