Exemple #1
0
    def LoadFromTurtle(self, stream, volume_arn):
        data = streams.ReadAll(stream)
        g = rdflib.Graph()
        g.parse(data=data, format="turtle")

        for urn, attr, value in g:
            urn = utils.SmartUnicode(urn)
            attr = utils.SmartUnicode(attr)
            serialized_value = value

            if isinstance(value, rdflib.URIRef):
                value = rdfvalue.URN(utils.SmartUnicode(serialized_value))
            elif value.datatype in registry.RDF_TYPE_MAP:
                dt = value.datatype
                value = registry.RDF_TYPE_MAP[value.datatype](
                    serialized_value)

            else:
                # Default to a string literal.
                value = rdfvalue.XSDString(value)

            if attr == rdfvalue.URN(lexicon.AFF4_TYPE) and value == rdfvalue.URN(lexicon.AFF4_IMAGE_TYPE):
                self.Add(lexicon.transient_graph, urn, lexicon.AFF4_STORED, volume_arn)
            self.Add(volume_arn, urn, attr, value)

        # look for the AFF4 namespace defined in the turtle
        for (_, b) in g.namespace_manager.namespaces():
            if (str(b) == lexicon.AFF4_NAMESPACE or
                str(b) == lexicon.AFF4_LEGACY_NAMESPACE):
                self.aff4NS = b
Exemple #2
0
    def NewAFF4Directory(cls, resolver, version, root_urn):
        result = AFF4Directory(resolver)
        result.version = version
        result.root_path = root_urn.ToFilename()

        mode = resolver.GetUnique(lexicon.transient_graph, root_urn,
                                  lexicon.AFF4_STREAM_WRITE_MODE)
        if mode == "truncate":
            aff4_utils.RemoveDirectory(result.root_path)

        if not (os.path.isdir(result.root_path)
                or os.path.isfile(result.root_path)):
            if mode == "truncate" or mode == "append":
                aff4_utils.MkDir(result.root_path)
            else:
                raise RuntimeError("Unknown mode")

        resolver.Set(lexicon.transient_graph, result.urn, lexicon.AFF4_TYPE,
                     rdfvalue.URN(lexicon.AFF4_DIRECTORY_TYPE))

        resolver.Set(lexicon.transient_graph, result.urn, lexicon.AFF4_STORED,
                     rdfvalue.URN(root_urn))

        result.LoadFromURN()

        return resolver.CachePut(result)
Exemple #3
0
    def setUp(self):
        self.store = data_store.MemoryDataStore()
        self.store.Set(rdfvalue.URN("hello"), rdfvalue.URN("World"),
                       rdfvalue.XSDString("foo"))

        self.store.Set(rdfvalue.URN("hello"), rdfvalue.URN(lexicon.AFF4_TYPE),
                       rdfvalue.XSDString("bar"))
Exemple #4
0
    def testAppend(self):
        test = rdfvalue.URN("http://www.google.com")
        aff4volume = rdfvalue.URN("aff4://volumeguid/image/0000")

        self.assertEquals(
            aff4volume.Append("index").SerializeToString(),
            "aff4://volumeguid/image/0000/index")

        self.assertEquals(
            test.Append("foobar").SerializeToString(),
            "http://www.google.com/foobar")

        self.assertEquals(
            test.Append("/foobar").SerializeToString(),
            "http://www.google.com/foobar")

        self.assertEquals(
            test.Append("..").SerializeToString(), "http://www.google.com/")

        self.assertEquals(
            test.Append("../../../..").SerializeToString(),
            "http://www.google.com/")

        self.assertEquals(
            test.Append("aa/bb/../..").SerializeToString(),
            "http://www.google.com/")

        self.assertEquals(
            test.Append("aa//../c").SerializeToString(),
            "http://www.google.com/c")

        self.assertEquals(
            test.Append("aa///////////.///./c").SerializeToString(),
            "http://www.google.com/aa/c")
Exemple #5
0
    def copy_physical_address_space(self, resolver, volume):
        """Copies the physical address space to the output volume.

        The result is a map object.
        """
        image_urn = volume.urn.Append("PhysicalMemory")
        source = self.session.physical_address_space

        # Mark the stream as a physical memory stream.
        resolver.Set(image_urn, lexicon.AFF4_CATEGORY,
                     rdfvalue.URN(lexicon.AFF4_MEMORY_PHYSICAL))

        with volume.CreateMember(
                image_urn.Append("information.yaml")) as metadata_fd:
            metadata_fd.Write(yaml_utils.encode(self.create_metadata(source)))

        yield ("Imaging Physical Memory:\n", )

        # Use an AFF4Image for the actual storage.
        map_data = image_urn.Append("data")

        # Set the compression type on the storage stream.
        resolver.Set(map_data, lexicon.AFF4_IMAGE_COMPRESSION,
                     rdfvalue.URN(self.compression))

        with aff4_map.AFF4Map.NewAFF4Map(resolver, image_urn,
                                         volume.urn) as image_stream:
            total_length = self._WriteToTarget(resolver, source, image_stream)

        yield ("Wrote {0} mb of Physical Memory to {1}\n".format(
            total_length // 1024 // 1024, image_stream.urn), )
Exemple #6
0
    def LoadFromTurtle(self, stream):
        data = stream.read(1000000)
        #print data
        g = rdflib.Graph()
        g.parse(data=data, format="turtle")

        for urn, attr, value in g:
            urn = rdfvalue.URN(str(urn))
            attr = rdfvalue.URN(str(attr))

            if isinstance(value, rdflib.URIRef):
                value = rdfvalue.URN(str(value))
            elif value.datatype in registry.RDF_TYPE_MAP:
                dt = value.datatype
                value = registry.RDF_TYPE_MAP[value.datatype](str(value))

            else:
                # Default to a string literal.
                value = rdfvalue.XSDString(value)

            self.Add(urn, attr, value)

        # look for the AFF4 namespace defined in the turtle
        for (a, b) in g.namespace_manager.namespaces():
            if str(b) == lexicon.AFF4_NAMESPACE or str(
                    b) == lexicon.AFF4_LEGACY_NAMESPACE:
                self.aff4NS = b
Exemple #7
0
    def Set(self, subject, attribute, value):
        subject = rdfvalue.URN(subject)
        attribute = rdfvalue.URN(attribute)
        CHECK(isinstance(value, rdfvalue.RDFValue),
              "Value must be an RDFValue")

        self.store.setdefault(str(subject), {})[str(attribute)] = value
Exemple #8
0
    def copy_physical_address_space(self, renderer, resolver, volume):
        """Copies the physical address space to the output volume."""
        image_urn = volume.urn.Append("PhysicalMemory")
        source = self.session.physical_address_space

        # Mark the stream as a physical memory stream.
        resolver.Set(image_urn, lexicon.AFF4_CATEGORY,
                     rdfvalue.URN(lexicon.AFF4_MEMORY_PHYSICAL))

        if self.compression:
            storage_urn = image_urn.Append("data")
            resolver.Set(storage_urn, lexicon.AFF4_IMAGE_COMPRESSION,
                         rdfvalue.URN(self.compression))

        with volume.CreateMember(
                image_urn.Append("information.yaml")) as metadata_fd:

            metadata_fd.Write(yaml_utils.encode(self.create_metadata(source)))

        renderer.format("Imaging Physical Memory:\n")

        with aff4_map.AFF4Map.NewAFF4Map(resolver, image_urn,
                                         volume.urn) as image_stream:

            total = 0
            last_tick = time.time()

            for run in source.get_address_ranges():
                length = run.length
                offset = run.start

                image_stream.seek(offset)

                while length > 0:
                    to_read = min(length, self.BUFFERSIZE)
                    data = source.read(offset, to_read)

                    image_stream.write(data)
                    now = time.time()

                    read_len = len(data)
                    if now > last_tick:
                        rate = read_len / (now - last_tick) / 1e6
                    else:
                        rate = 0

                    self.session.report_progress(
                        "%s: Wrote %#x (%d mb total) (%02.2d Mb/s)", source,
                        offset, total / 1e6, rate)

                    length -= read_len
                    offset += read_len
                    total += read_len
                    last_tick = now

        resolver.Close(image_stream)
        renderer.format("Wrote {0} mb of Physical Memory to {1}\n",
                        total / 1024 / 1024, image_stream.urn)
Exemple #9
0
    def AFF4FactoryOpen(self, urn, version=None):
        urn = rdfvalue.URN(urn)

        # Is the object cached?
        cached_obj = self.ObjectCache.Get(urn)
        if cached_obj:
            cached_obj.Prepare()
            #LOGGER.debug("AFF4FactoryOpen (Cached): %s" % urn)
            return cached_obj

        if self.streamFactory.isSymbolicStream(urn):
            obj = self.streamFactory.createSymbolic(urn)
        elif urn.SerializeToString().startswith("aff4:sha512"):
            # Don't use the cache for these as they are low cost
            # and they will push aside heavier weight things
            #bytestream_reference_id = self.Get(urn, urn, rdfvalue.URN(lexicon.standard.dataStream))
            #cached_obj = self.ObjectCache.Get(bytestream_reference_id)
            #if cached_obj:
            #    cached_obj.Prepare()
            #    return cached_obj
            bytestream_reference_id = self.GetUnique(lexicon.any, urn, rdfvalue.URN(lexicon.standard.dataStream))
            return aff4_map.ByteRangeARN(version, resolver=self, urn=bytestream_reference_id)
        elif isByteRangeARN(urn.SerializeToString()):
            return aff4_map.ByteRangeARN(version, resolver=self, urn=urn)
        else:
            uri_types = self.Get(lexicon.any, urn, rdfvalue.URN(lexicon.AFF4_TYPE))

            handler = None

            # TODO: this could be cleaner. RDF properties have multiple values

            if isinstance(uri_types, list) or isinstance(uri_types, types.GeneratorType):
                for typ in uri_types:
                    handler = registry.AFF4_TYPE_MAP.get(typ)
                    if handler is not None:
                        break
            else:
                handler = registry.AFF4_TYPE_MAP.get(uri_types)

            if handler is None:
                # Try to instantiate the handler based on the URN scheme alone.
                components = urn.Parse()
                handler = registry.AFF4_TYPE_MAP.get(components.scheme)

            if handler is None:
                if str(urn).endswith("paper-hash_based_disk_imaging_using_aff4.pdf.frag.2"):
                    print()
                raise IOError("Unable to create object %s" % urn)

            obj = handler(resolver=self, urn=urn, version=version)
            obj.LoadFromURN()

        # Cache the object for next time.
        self.ObjectCache.Put(obj, True)

        #LOGGER.debug("AFF4FactoryOpen (new instance): %s" % urn)
        obj.Prepare()
        return obj
Exemple #10
0
    def setUp(self):
        self.hello_urn = rdfvalue.URN("aff4://hello")
        self.store = data_store.MemoryDataStore()
        self.store.Set(None, self.hello_urn,
                       rdfvalue.URN(lexicon.AFF4_IMAGE_COMPRESSION_SNAPPY),
                       rdfvalue.XSDString("foo"))

        self.store.Set(None, self.hello_urn, rdfvalue.URN(lexicon.AFF4_TYPE),
                       rdfvalue.XSDString("bar"))
Exemple #11
0
 def store(self, resolver):
     resolver.Set(self.urn, rdfvalue.URN(lexicon.AFF4_STREAM_SIZE),
                  rdfvalue.XSDInteger(self.length))
     resolver.Set(self.urn, rdfvalue.URN(lexicon.standard11.lastWritten),
                  rdfvalue.XSDDateTime(self.lastWritten))
     resolver.Set(self.urn, rdfvalue.URN(lexicon.standard11.lastAccessed),
                  rdfvalue.XSDDateTime(self.lastAccessed))
     resolver.Set(self.urn, rdfvalue.URN(lexicon.standard11.birthTime),
                  rdfvalue.XSDDateTime(self.birthTime))
    def LoadFromURN(self):
        self.storage = self.resolver.Get(self.urn, lexicon.AFF4_STORED)
        if not self.storage:
            LOGGER.error("Unable to find storage for AFF4Directory %s",
                         self.urn)
            raise IOError("NOT_FOUND")

        # The actual filename for the root directory.
        self.root_path = self.storage.ToFilename()

        try:
            # We need to get the URN of the container before we can process
            # anything.
            with self.resolver.AFF4FactoryOpen(
                    self.storage.Append(
                        lexicon.AFF4_CONTAINER_DESCRIPTION)) as desc:
                if desc:
                    urn_string = desc.Read(1000)

                    if (urn_string
                            and self.urn.SerializeToString() != urn_string):
                        self.resolver.DeleteSubject(self.urn)
                        self.urn.Set(urn_string)

                    # Set these triples with the new URN so we know how to open
                    # it.
                    self.resolver.Set(
                        self.urn, lexicon.AFF4_TYPE,
                        rdfvalue.URN(lexicon.AFF4_DIRECTORY_TYPE))

                    self.resolver.Set(self.urn, lexicon.AFF4_STORED,
                                      rdfvalue.URN(self.storage))

                    LOGGER.info("AFF4Directory volume found: %s", self.urn)

            # Try to load the RDF metadata file from the storage.
            with self.resolver.AFF4FactoryOpen(
                    self.storage.Append(
                        lexicon.AFF4_CONTAINER_INFO_TURTLE)) as turtle_stream:
                if turtle_stream:
                    self.resolver.LoadFromTurtle(turtle_stream)

                    # Find all the contained objects and adjust their filenames.
                    for subject in self.resolver.SelectSubjectsByPrefix(
                            self.urn):

                        child_filename = self.resolver.Get(
                            subject, lexicon.AFF4_DIRECTORY_CHILD_FILENAME)
                        if child_filename:
                            self.resolver.Set(
                                subject, lexicon.AFF4_FILE_NAME,
                                rdfvalue.XSDString(
                                    self.root_path + os.sep +
                                    child_filename.SerializeToString()))

        except IOError:
            pass
Exemple #13
0
    def NewZipFile(resolver, backing_store_urn):
        result = ZipFile(resolver, urn=None)

        resolver.Set(result.urn, lexicon.AFF4_TYPE,
                     rdfvalue.URN(lexicon.AFF4_ZIP_TYPE))

        resolver.Set(result.urn, lexicon.AFF4_STORED,
                     rdfvalue.URN(backing_store_urn))

        return resolver.AFF4FactoryOpen(result.urn)
Exemple #14
0
def ingestZipfile(container_name, zipfiles, append, check_bytes):
    # TODO: check path in exists
    start = time.time()
    with data_store.MemoryDataStore() as resolver:


        container_urn = rdfvalue.URN.FromFileName(container_name)
        urn = None

        if not os.path.exists(container_name):
            volume = container.Container.createURN(resolver, container_urn)
            print("Creating AFF4Container: file://%s <%s>" % (container_name, volume.urn))
        else:
            volume = container.Container.openURNtoContainer(container_urn, mode="+")
            print("Appending to AFF4Container: file://%s <%s>" % (container_name, volume.urn))

        resolver = volume.resolver

        with volume as volume:
            for zipfile in zipfiles:
                basefilename = os.path.basename(zipfile)
                if basefilename.endswith(".bag.zip"):
                    basefilename = basefilename[0:len(basefilename) - len(".bag.zip")]


                filename_arn = rdfvalue.URN.FromFileName(zipfile)

                # the following coaxes our ZIP implementation to treat this file
                # as a regular old zip
                result = zip.BasicZipFile(resolver, urn=None, version=version.basic_zip)
                resolver.Set(lexicon.transient_graph, result.urn, lexicon.AFF4_TYPE, rdfvalue.URN("StandardZip"))
                resolver.Set(lexicon.transient_graph, result.urn, lexicon.AFF4_STORED, rdfvalue.URN(filename_arn))

                with resolver.AFF4FactoryOpen(result.urn, version=version.basic_zip) as zip_file:
                    for member in zip_file.members:
                        info = zip_file.members[member]
                        pathname = basefilename +  member.SerializeToString()[len(result.urn.SerializeToString()):]
                        print(pathname)

                        with resolver.AFF4FactoryOpen(member, version=version.aff4v10) as src:

                            hasher = linear_hasher.StreamHasher(src, [lexicon.HASH_SHA1, lexicon.HASH_MD5])
                            if volume.containsLogicalImage(pathname):
                                print("\tCollision: this ARN is already present in this volume.")
                                continue

                            urn = volume.writeLogicalStreamRabinHashBased(pathname, hasher, info.file_size, check_bytes)
                            #fsmeta.urn = urn
                            #fsmeta.store(resolver)
                            for h in hasher.hashes:
                                hh = hashes.newImmutableHash(h.hexdigest(), hasher.hashToType[h])
                                resolver.Add(container_urn, urn, rdfvalue.URN(lexicon.standard.hash), hh)

        print ("Finished in %d (s)" % int(time.time() - start))
        return urn
Exemple #15
0
    def testCreateAndAppendSinglePathImage(self):
        try:
            try:
                os.unlink(self.containerName)
            except:
                pass

            container_urn = rdfvalue.URN.FromFileName(self.containerName)
            resolver = data_store.MemoryDataStore()
            urn = None

            frag1path = os.path.join(self.testImagesPath, "paper-hash_based_disk_imaging_using_aff4.pdf.frag.1")

            with container.Container.createURN(resolver, container_urn) as volume:
                with open(frag1path, "rb") as src:
                    stream = linear_hasher.StreamHasher(src, [lexicon.HASH_SHA1])
                    urn = volume.writeLogicalStreamHashBased(frag1path, stream, 32768, False)
                    for h in stream.hashes:
                        hh = hashes.newImmutableHash(h.hexdigest(), stream.hashToType[h])
                        self.assertEqual("deb3fa3b60c6107aceb97f684899387c78587eae", hh.value)
                        resolver.Add(volume.urn, urn, rdfvalue.URN(lexicon.standard.hash), hh)

            frag2path = os.path.join(self.testImagesPath, "paper-hash_based_disk_imaging_using_aff4.pdf.frag.2")

            with container.Container.openURNtoContainer(container_urn, mode="+") as volume:
                with open(frag2path, "rb") as src:
                    stream = linear_hasher.StreamHasher(src, [lexicon.HASH_SHA1, lexicon.HASH_MD5 ])
                    urn = volume.writeLogicalStreamHashBased(frag2path, stream, 2*32768, False)
                    for h in stream.hashes:
                        hh = hashes.newImmutableHash(h.hexdigest(), stream.hashToType[h])
                        resolver.Add(volume.urn, urn, rdfvalue.URN(lexicon.standard.hash), hh)

            with container.Container.openURNtoContainer(container_urn) as volume:
                images = list(volume.images())
                images = sorted(images, key=lambda x: utils.SmartUnicode(x.pathName), reverse=False)
                self.assertEqual(2, len(images), "Only two logical images")

                fragmentA = escaping.member_name_for_urn(images[0].urn.value, volume.version, base_urn=volume.urn, use_unicode=True)
                fragmentB = escaping.member_name_for_urn(images[1].urn.value, volume.version, base_urn=volume.urn, use_unicode=True)

                self.assertTrue(fragmentA.endswith("paper-hash_based_disk_imaging_using_aff4.pdf.frag.1"))
                self.assertTrue(fragmentB.endswith("paper-hash_based_disk_imaging_using_aff4.pdf.frag.2"))

                hasher = linear_hasher.LinearHasher2(volume.resolver, self)
                for image in volume.images():
                    print("\t%s <%s>" % (image.name(), image.urn))
                    hasher.hash(image)

        except:
            traceback.print_exc()
            self.fail()

        finally:
            #os.unlink(containerName)
            pass
Exemple #16
0
    def Set(self, graph, subject, attribute, value):
        subject = rdfvalue.URN(subject).SerializeToString()
        attribute = rdfvalue.URN(attribute).SerializeToString()
        CHECK(isinstance(value, rdfvalue.RDFValue), "Value must be an RDFValue")

        if graph == transient_graph:
            store = self.transient_store
        else:
            store = self.store

        store.setdefault(subject, {})[attribute] = value
Exemple #17
0
 def QueryPredicate(self, predicate):
     """Yields all subjects which have this predicate."""
     predicate = utils.SmartStr(predicate)
     for subject, data in six.iteritems(self.store):
         for pred, values in six.iteritems(data):
             if pred == predicate:
                 if type(values) != type([]):
                     values = [values]
                 for value in values:
                     yield (rdfvalue.URN().UnSerializeFromString(subject),
                            rdfvalue.URN().UnSerializeFromString(predicate),
                            value)
Exemple #18
0
    def NewZipFile(resolver, vers, backing_store_urn):
        rdfvalue.AssertURN(backing_store_urn)
        if vers == None:
            vers = Version(0, 1, "pyaff4")
        result = ZipFile(resolver, urn=None, version=vers)

        resolver.Set(lexicon.transient_graph, result.urn, lexicon.AFF4_TYPE,
                     rdfvalue.URN(lexicon.AFF4_ZIP_TYPE))

        resolver.Set(lexicon.transient_graph, result.urn, lexicon.AFF4_STORED,
                     rdfvalue.URN(backing_store_urn))

        return resolver.AFF4FactoryOpen(result.urn, version=vers)
Exemple #19
0
    def NewAFF4Image(resolver, image_urn, volume_urn):
        with resolver.AFF4FactoryOpen(volume_urn) as volume:
            # Inform the volume that we have a new image stream contained within
            # it.
            volume.children.add(image_urn)

            resolver.Set(image_urn, lexicon.AFF4_TYPE,
                         rdfvalue.URN(lexicon.AFF4_IMAGE_TYPE))

            resolver.Set(image_urn, lexicon.AFF4_STORED,
                         rdfvalue.URN(volume_urn))

            return resolver.AFF4FactoryOpen(image_urn)
Exemple #20
0
    def _write_metadata(self):
        self.resolver.Set(self.urn, lexicon.AFF4_TYPE,
                          rdfvalue.URN(lexicon.AFF4_IMAGE_TYPE))

        self.resolver.Set(self.urn, lexicon.AFF4_IMAGE_CHUNK_SIZE,
                          rdfvalue.XSDInteger(self.chunk_size))

        self.resolver.Set(self.urn, lexicon.AFF4_IMAGE_CHUNKS_PER_SEGMENT,
                          rdfvalue.XSDInteger(self.chunks_per_segment))

        self.resolver.Set(self.urn, lexicon.AFF4_STREAM_SIZE,
                          rdfvalue.XSDInteger(self.Size()))

        self.resolver.Set(self.urn, lexicon.AFF4_IMAGE_COMPRESSION,
                          rdfvalue.URN(self.compression))
Exemple #21
0
    def Add(self, subject, attribute, value):
        subject = rdfvalue.URN(subject).SerializeToString()
        attribute = rdfvalue.URN(attribute).SerializeToString()
        CHECK(isinstance(value, rdfvalue.RDFValue), "Value must be an RDFValue")

        if attribute not in self.store.setdefault(
                subject, collections.OrderedDict()):
            self.store.get(subject)[attribute] = value
        else:
            oldvalue = self.store.get(subject)[attribute]
            t = type(oldvalue)
            if  t != type([]):
                self.store.get(subject)[attribute] = [oldvalue, value]
            else:
                self.store.get(subject)[attribute].append(value)
Exemple #22
0
    def NewAFF4Map(resolver, image_urn, volume_urn):
        with resolver.AFF4FactoryOpen(volume_urn) as volume:
            # Inform the volume that we have a new image stream contained within
            # it.
            volume.children.add(image_urn)

            resolver.Set(volume_urn, image_urn, lexicon.AFF4_TYPE,
                         rdfvalue.URN(lexicon.AFF4_MAP_TYPE))

            resolver.Set(lexicon.transient_graph, image_urn,
                         lexicon.AFF4_STORED, rdfvalue.URN(volume_urn))

            res = resolver.AFF4FactoryOpen(image_urn)
            res.properties.writable = volume.properties.writable
            return res
Exemple #23
0
def addPathNames(container_name, pathnames, recursive, append, hashbased):
    with data_store.MemoryDataStore() as resolver:
        container_urn = rdfvalue.URN.FromFileName(container_name)
        urn = None

        if append == False:
            volume = container.Container.createURN(resolver, container_urn)
            print("Creating AFF4Container: file://%s <%s>" % (container_name, volume.urn))
        else:
            volume = container.Container.openURNtoContainer(container_urn, mode="+", resolver=resolver)
            print("Appending to AFF4Container: file://%s <%s>" % (container_name, volume.urn))

        with volume as volume:
            for pathname in pathnames:
                if not os.path.exists(pathname):
                    print("Path %s not found. Skipping.")
                    continue
                pathname = utils.SmartUnicode(pathname)
                print ("\tAdding: %s" % pathname)
                fsmeta = logical.FSMetadata.create(pathname)
                if os.path.isdir(pathname):
                    image_urn = None
                    if volume.isAFF4Collision(pathname):
                        image_urn = rdfvalue.URN("aff4://%s" % uuid.uuid4())
                    else:
                        image_urn = volume.urn.Append(escaping.arnPathFragment_from_path(pathname), quote=False)

                    fsmeta.urn = image_urn
                    fsmeta.store(resolver)
                    resolver.Set(volume.urn, image_urn, rdfvalue.URN(lexicon.standard11.pathName), rdfvalue.XSDString(pathname))
                    resolver.Add(volume.urn, image_urn, rdfvalue.URN(lexicon.AFF4_TYPE), rdfvalue.URN(lexicon.standard11.FolderImage))
                    resolver.Add(volume.urn, image_urn, rdfvalue.URN(lexicon.AFF4_TYPE), rdfvalue.URN(lexicon.standard.Image))
                    if recursive:
                        for child in os.listdir(pathname):
                            pathnames.append(os.path.join(pathname, child))
                else:
                    with open(pathname, "rb") as src:
                        hasher = linear_hasher.StreamHasher(src, [lexicon.HASH_SHA1, lexicon.HASH_MD5])
                        if hashbased == False:
                            urn = volume.writeLogicalStream(pathname, hasher, fsmeta.length)
                        else:
                            urn = volume.writeLogicalStreamRabinHashBased(pathname, hasher, fsmeta.length)
                        fsmeta.urn = urn
                        fsmeta.store(resolver)
                        for h in hasher.hashes:
                            hh = hashes.newImmutableHash(h.hexdigest(), hasher.hashToType[h])
                            resolver.Add(urn, urn, rdfvalue.URN(lexicon.standard.hash), hh)
        return urn
Exemple #24
0
    def CreateZipSegment(self, filename):
        self.MarkDirty()
        segment_urn = aff4_utils.urn_from_member_name(filename, self.urn)

        # Is it in the cache?
        res = self.resolver.CacheGet(segment_urn)
        if res:
            return res

        self.resolver.Set(segment_urn, lexicon.AFF4_TYPE,
                          rdfvalue.URN(lexicon.AFF4_ZIP_SEGMENT_TYPE))

        self.resolver.Set(segment_urn, lexicon.AFF4_STORED, self.urn)

        #  Keep track of all the segments we issue.
        self.children.add(segment_urn)

        result = ZipFileSegment(resolver=self.resolver, urn=segment_urn)
        result.LoadFromZipFile(self)

        LOGGER.info("Creating ZipFileSegment %s",
                    result.urn.SerializeToString())

        # Add the new object to the object cache.
        return self.resolver.CachePut(result)
Exemple #25
0
    def collect(self):
        """Render a detailed description of the contents of an AFF4 volume."""
        volume_urn = rdfvalue.URN(self.plugin_args.volume)

        with self.credential_manager, self._get_aff4_volume(
                self.resolver, volume_urn, "Reading") as volume:
            if self.plugin_args.long:
                subjects = self.resolver.QuerySubject(
                    self.plugin_args.regex.pattern)
            else:
                subjects = self.interesting_streams(volume)

            for subject in sorted(subjects):
                urn = str(subject)
                filename = None
                if (self.resolver.Get(subject, lexicon.AFF4_CATEGORY) ==
                        lexicon.AFF4_MEMORY_PHYSICAL):
                    filename = "Physical Memory"
                else:
                    filename = self.resolver.Get(
                        subject, lexicon.AFF4_STREAM_ORIGINAL_FILENAME)

                if not filename:
                    filename = volume.urn.RelativePath(urn)

                type = str(self.resolver.Get(subject,
                                             lexicon.AFF4_TYPE)).split("#")[-1]

                size = self.resolver.Get(subject, lexicon.AFF4_STREAM_SIZE)
                if size is None and filename == "Physical Memory":
                    with self.resolver.AFF4FactoryOpen(urn) as fd:
                        last_range = fd.GetRanges()[-1]
                        size = last_range.map_offset + last_range.length

                yield (size, type, filename, urn)
    def CreateMember(self, child_urn):
        # Check that child is a relative path in our URN.
        relative_path = self.urn.RelativePath(child_urn)
        if relative_path == child_urn.SerializeToString():
            raise IOError("Child URN is not within container URN.")

        # Use this filename. Note that since filesystems can not typically
        # represent files and directories as the same path component we can not
        # allow slashes in the filename. Otherwise we will fail to create
        # e.g. stream/0000000 and stream/0000000/index.
        filename = aff4_utils.member_name_for_urn(child_urn,
                                                  self.urn,
                                                  slash_ok=False)

        # We are allowed to create any files inside the directory volume.
        self.resolver.Set(child_urn, lexicon.AFF4_TYPE,
                          rdfvalue.URN(lexicon.AFF4_FILE_TYPE))
        self.resolver.Set(child_urn, lexicon.AFF4_STREAM_WRITE_MODE,
                          rdfvalue.XSDString("truncate"))
        self.resolver.Set(child_urn, lexicon.AFF4_DIRECTORY_CHILD_FILENAME,
                          rdfvalue.XSDString(filename))

        # Store the member inside our storage location.
        self.resolver.Set(
            child_urn, lexicon.AFF4_FILE_NAME,
            rdfvalue.XSDString(self.root_path + os.sep + filename))

        result = self.resolver.AFF4FactoryOpen(child_urn)
        self.MarkDirty()
        self.children.add(child_urn)

        return result
Exemple #27
0
    def reloadBevy(self, bevy_id):
        bevy_urn = self.urn.Append("%08d" % bevy_id)
        bevy_index_urn = rdfvalue.URN("%s.index" % bevy_urn)
        if LOGGER.isEnabledFor(logging.INFO):
            LOGGER.info("Reload Bevy %s", bevy_urn)
        chunks = []

        with self.resolver.AFF4FactoryOpen(bevy_urn,
                                           version=self.version) as bevy:
            bevy_index = self._parse_bevy_index(bevy)
            for i in range(0, len(bevy_index)):
                off, sz = bevy_index[i]
                bevy.SeekRead(off, 0)
                chunk = bevy.Read(sz)
                chunks.append(self.onChunkLoad(chunk, bevy_id, i))

                # trim the chunk if it is the final one and it exceeds the size of the stream
                endOfChunkAddress = (bevy_id * self.chunks_per_segment + i +
                                     1) * self.chunk_size
                if endOfChunkAddress > self.size:
                    toKeep = self.chunk_size - (endOfChunkAddress - self.size)
                    chunk = chunks[i][0:toKeep]
                    chunks[i] = chunk
                    self.cache[i] = chunk
                    bevy_index = bevy_index[0:i + 1]
                    break
        self.bevy = chunks
        self.bevy_index = bevy_index
        self.bevy_length = len(bevy_index)
        self.bevy_number = bevy_id
        self.bevy_is_loaded_from_disk = True
Exemple #28
0
    def LoadFromURN(self):
        map_urn = self.urn.Append("map")
        map_idx_urn = self.urn.Append("idx")

        # Parse the map out of the map stream. If the stream does not exist yet
        # we just start with an empty map.
        try:
            with self.resolver.AFF4FactoryOpen(map_idx_urn) as map_idx:
                self.targets = [
                    rdfvalue.URN(utils.SmartUnicode(x))
                    for x in map_idx.Read(map_idx.Size()).splitlines()
                ]

            with self.resolver.AFF4FactoryOpen(map_urn) as map_stream:
                read_length = struct.calcsize(Range.format_str)
                while 1:
                    data = map_stream.Read(read_length)
                    if not data:
                        break
                    range = self.deserializeMapPoint(data)
                    if range.length > 0:
                        self.tree.addi(range.map_offset, range.map_end, range)

        except IOError:
            pass
Exemple #29
0
    def AFF4FactoryOpen(self, urn):
        urn = rdfvalue.URN(urn)

        # If the object cached?
        cached_obj = self.ObjectCache.Get(urn)
        if cached_obj:
            cached_obj.Prepare()
            LOGGER.debug("AFF4FactoryOpen (Cached): %s" % urn)
            return cached_obj

        type_urn = self.Get(urn, lexicon.AFF4_TYPE)
        handler = registry.AFF4_TYPE_MAP.get(type_urn)
        if handler is None:
            # Try to instantiate the handler based on the URN scheme alone.
            components = urn.Parse()
            handler = registry.AFF4_TYPE_MAP.get(components.scheme)

        if handler is None:
            raise IOError("Unable to create object %s" % urn)

        obj = handler(resolver=self, urn=urn)
        obj.LoadFromURN()

        # Cache the object for next time.
        self.ObjectCache.Put(obj, True)

        LOGGER.debug("AFF4FactoryOpen (new instance): %s" % urn)
        obj.Prepare()
        return obj
Exemple #30
0
    def readSegment(self, parentURI, subSegment):
        parentURI = rdfvalue.URN(parentURI)
        segment_uri = parentURI.Append(subSegment)

        with self.resolver.AFF4FactoryOpen(segment_uri) as segment:
            data = segment.Read(segment.Size())
            return data