Ejemplo n.º 1
0
    def open(filename):
        try:
            cached = localcache[filename]
            return cached
        except:
            lex = Container.identify(filename)
            resolver = data_store.MemoryDataStore(lex)
            with zip.ZipFile.NewZipFile(resolver, filename) as zip_file:
                if lex == lexicon.standard:
                    image = resolver.QueryPredicateObject(
                        lexicon.AFF4_TYPE, lex.Image).next()

                    datastreams = list(
                        resolver.QuerySubjectPredicate(image, lex.dataStream))

                    for stream in datastreams:
                        if lex.map in resolver.QuerySubjectPredicate(
                                stream, lexicon.AFF4_TYPE):
                            res = resolver.AFF4FactoryOpen(stream)
                            localcache[filename] = res
                            res.parent = aff4.Image(resolver, urn=image)
                            return res

                elif lex == lexicon.scudette:
                    m = resolver.QueryPredicateObject(lexicon.AFF4_TYPE,
                                                      lex.map).next()
                    cat = resolver.QuerySubjectPredicate(m,
                                                         lex.category).next()
                    if cat == lex.memoryPhysical:
                        res = resolver.AFF4FactoryOpen(m)
                        localcache[filename] = res
                        res.parent = aff4.Image(resolver, urn=m)

                        legacyYamlInfoURI = res.urn.Append("information.yaml")
                        with resolver.AFF4FactoryOpen(legacyYamlInfoURI) as fd:
                            txt = fd.read(10000000)
                            dt = yaml.safe_load(txt)
                            print txt
                            try:
                                CR3 = dt["Registers"]["CR3"]
                                resolver.Add(
                                    res.parent.urn, lexicon.standard.
                                    memoryPageTableEntryOffset,
                                    rdfvalue.XSDInteger(CR3))
                                kaslr_slide = dt["kaslr_slide"]
                                resolver.Add(res.parent.urn,
                                             lexicon.standard.OSXKALSRSlide,
                                             rdfvalue.XSDInteger(kaslr_slide))

                            except:
                                pass
                        return res
Ejemplo n.º 2
0
    def _write_metadata(self):
        self.resolver.Set(self.urn, lexicon.AFF4_TYPE,
                          rdfvalue.URN(lexicon.AFF4_IMAGE_TYPE))

        self.resolver.Set(self.urn, lexicon.AFF4_IMAGE_CHUNK_SIZE,
                          rdfvalue.XSDInteger(self.chunk_size))

        self.resolver.Set(self.urn, lexicon.AFF4_IMAGE_CHUNKS_PER_SEGMENT,
                          rdfvalue.XSDInteger(self.chunks_per_segment))

        self.resolver.Set(self.urn, lexicon.AFF4_STREAM_SIZE,
                          rdfvalue.XSDInteger(self.Size()))

        self.resolver.Set(self.urn, lexicon.AFF4_IMAGE_COMPRESSION,
                          rdfvalue.URN(self.compression))
Ejemplo n.º 3
0
    def _write_metadata(self):
        volume_urn = self.resolver.GetUnique(lexicon.transient_graph, self.urn,
                                             lexicon.AFF4_STORED)
        self.resolver.Add(volume_urn, self.urn, lexicon.AFF4_TYPE,
                          rdfvalue.URN(lexicon.AFF4_IMAGE_TYPE))

        self.resolver.Set(volume_urn, self.urn, lexicon.AFF4_IMAGE_CHUNK_SIZE,
                          rdfvalue.XSDInteger(self.chunk_size))

        self.resolver.Set(volume_urn, self.urn,
                          lexicon.AFF4_IMAGE_CHUNKS_PER_SEGMENT,
                          rdfvalue.XSDInteger(self.chunks_per_segment))

        self.resolver.Set(volume_urn, self.urn, lexicon.AFF4_STREAM_SIZE,
                          rdfvalue.XSDInteger(self.Size()))

        self.resolver.Set(volume_urn, self.urn, lexicon.AFF4_IMAGE_COMPRESSION,
                          rdfvalue.URN(self.compression))
Ejemplo n.º 4
0
 def store(self, resolver):
     resolver.Set(self.urn, rdfvalue.URN(lexicon.AFF4_STREAM_SIZE),
                  rdfvalue.XSDInteger(self.length))
     resolver.Set(self.urn, rdfvalue.URN(lexicon.standard11.lastWritten),
                  rdfvalue.XSDDateTime(self.lastWritten))
     resolver.Set(self.urn, rdfvalue.URN(lexicon.standard11.lastAccessed),
                  rdfvalue.XSDDateTime(self.lastAccessed))
     resolver.Set(self.urn, rdfvalue.URN(lexicon.standard11.birthTime),
                  rdfvalue.XSDDateTime(self.birthTime))
Ejemplo n.º 5
0
 def testXSDInt(self):
     i1 = rdfvalue.XSDInteger("100")
     self.assertLess(99, i1)
     self.assertEqual(100, i1)
     self.assertGreater(101, 100)
     self.assertGreater(101, i1)
     self.assertTrue(99 < i1)
     self.assertTrue(101 > i1)
     self.assertTrue(100 == i1)
Ejemplo n.º 6
0
    def _write_metadata(self):
        volume_urn = self.resolver.GetUnique(lexicon.transient_graph, self.urn,
                                             lexicon.AFF4_STORED)
        self.resolver.Add(volume_urn, self.urn, lexicon.AFF4_TYPE,
                          rdfvalue.URN(lexicon.AFF4_ENCRYPTEDSTREAM_TYPE))

        self.resolver.Set(volume_urn, self.urn, lexicon.AFF4_IMAGE_CHUNK_SIZE,
                          rdfvalue.XSDInteger(self.chunk_size))

        self.resolver.Set(volume_urn, self.urn,
                          lexicon.AFF4_IMAGE_CHUNKS_PER_SEGMENT,
                          rdfvalue.XSDInteger(self.chunks_per_segment))

        self.resolver.Set(volume_urn, self.urn, lexicon.AFF4_STREAM_SIZE,
                          rdfvalue.XSDInteger(self.Size()))

        for kb in self.keybags:
            self.resolver.Add(volume_urn, self.urn, lexicon.AFF4_KEYBAG,
                              rdfvalue.URN(kb.ID))
            kb.write(self.resolver, volume_urn)
Ejemplo n.º 7
0
    def parse_cd(self, backing_store_urn):
        with self.resolver.AFF4FactoryOpen(backing_store_urn) as backing_store:
            # Find the End of Central Directory Record - We read about 4k of
            # data and scan for the header from the end, just in case there is
            # an archive comment appended to the end.
            backing_store.Seek(-BUFF_SIZE, 2)

            ecd_real_offset = backing_store.Tell()
            buffer = backing_store.Read(BUFF_SIZE)

            end_cd, buffer_offset = EndCentralDirectory.FromBuffer(buffer)

            urn_string = None

            ecd_real_offset += buffer_offset

            # Fetch the volume comment.
            if end_cd.comment_len > 0:
                backing_store.Seek(ecd_real_offset + end_cd.sizeof())
                urn_string = backing_store.Read(end_cd.comment_len)

                LOGGER.info("Loaded AFF4 volume URN %s from zip file.",
                            urn_string)

            #if end_cd.size_of_cd == 0xFFFFFFFF:
            #    end_cd, buffer_offset = Zip64EndCD.FromBuffer(buffer)

            #LOGGER.info("Found ECD at %#x", ecd_real_offset)

            # There is a catch 22 here - before we parse the ZipFile we dont
            # know the Volume's URN, but we need to know the URN so the
            # AFF4FactoryOpen() can open it. Therefore we start with a random
            # URN and then create a new ZipFile volume. After parsing the
            # central directory we discover our URN and therefore we can delete
            # the old, randomly selected URN.
            if urn_string and self.urn != urn_string:
                self.resolver.DeleteSubject(self.urn)
                self.urn.Set(utils.SmartUnicode(urn_string))

                # Set these triples so we know how to open the zip file again.
                self.resolver.Set(self.urn, lexicon.AFF4_TYPE,
                                  rdfvalue.URN(lexicon.AFF4_ZIP_TYPE))
                self.resolver.Set(self.urn, lexicon.AFF4_STORED,
                                  rdfvalue.URN(backing_store_urn))
                self.resolver.Set(backing_store_urn, lexicon.AFF4_CONTAINS,
                                  self.urn)

            directory_offset = end_cd.offset_of_cd
            directory_number_of_entries = end_cd.total_entries_in_cd

            # Traditional zip file - non 64 bit.
            if directory_offset > 0 and directory_offset != 0xffffffff:
                # The global difference between the zip file offsets and real
                # file offsets. This is non zero when the zip file was appended
                # to another file.
                self.global_offset = (
                    # Real ECD offset.
                    ecd_real_offset - end_cd.size_of_cd -

                    # Claimed CD offset.
                    directory_offset)

                LOGGER.info("Global offset: %#x", self.global_offset)

            # This is a 64 bit archive, find the Zip64EndCD.
            else:
                locator_real_offset = ecd_real_offset - Zip64CDLocator.sizeof()
                backing_store.Seek(locator_real_offset, 0)
                locator = Zip64CDLocator(
                    backing_store.Read(Zip64CDLocator.sizeof()))

                if not locator.IsValid():
                    raise IOError("Zip64CDLocator invalid or not supported.")

                # Although it may appear that we can use the Zip64CDLocator to
                # locate the Zip64EndCD record via it's offset_of_cd record this
                # is not quite so. If the zip file was appended to another file,
                # the offset_of_cd field will not be valid, as it still points
                # to the old offset. In this case we also need to know the
                # global shift.
                backing_store.Seek(locator_real_offset - Zip64EndCD.sizeof(),
                                   0)

                end_cd = Zip64EndCD(backing_store.Read(Zip64EndCD.sizeof()))

                if not end_cd.IsValid():
                    LOGGER.error("Zip64EndCD magic not correct @%#x",
                                 locator_real_offset - Zip64EndCD.sizeof())
                    raise RuntimeError("Zip64EndCD magic not correct")

                directory_offset = end_cd.offset_of_cd
                directory_number_of_entries = end_cd.number_of_entries_in_volume

                # The global offset is now known:
                self.global_offset = (
                    # Real offset of the central directory.
                    locator_real_offset - Zip64EndCD.sizeof() -
                    end_cd.size_of_cd -

                    # The directory offset in zip file offsets.
                    directory_offset)

                LOGGER.info("Global offset: %#x", self.global_offset)

            # Now iterate over the directory and read all the ZipInfo structs.
            entry_offset = directory_offset
            for _ in range(directory_number_of_entries):
                backing_store.Seek(entry_offset + self.global_offset, 0)
                entry = CDFileHeader(backing_store.Read(CDFileHeader.sizeof()))

                if not entry.IsValid():
                    LOGGER.info("CDFileHeader at offset %#x invalid",
                                entry_offset)
                    raise RuntimeError()

                zip_info = ZipInfo(
                    filename=backing_store.Read(entry.file_name_length),
                    local_header_offset=entry.relative_offset_local_header,
                    compression_method=entry.compression_method,
                    compress_size=entry.compress_size,
                    file_size=entry.file_size,
                    crc32=entry.crc32,
                    lastmoddate=entry.dosdate,
                    lastmodtime=entry.dostime)

                # Zip64 local header - parse the Zip64 extended information extra field.
                # This field isnt a struct, its a serialization
                #if zip_info.local_header_offset < 0 or zip_info.local_header_offset == 0xffffffff:
                if entry.extra_field_len > 0:
                    extrabuf = backing_store.Read(entry.extra_field_len)

                    extra, readbytes = Zip64FileHeaderExtensibleField.FromBuffer(
                        entry, extrabuf)
                    extrabuf = extrabuf[readbytes:]

                    if extra.header_id == 1:
                        if extra.Get(
                                "relative_offset_local_header") is not None:
                            zip_info.local_header_offset = (
                                extra.Get("relative_offset_local_header"))
                        if extra.Get("file_size") is not None:
                            zip_info.file_size = extra.Get("file_size")
                        if extra.Get("compress_size") is not None:
                            zip_info.compress_size = extra.Get("compress_size")
                            #break

                LOGGER.info("Found file %s @ %#x", zip_info.filename,
                            zip_info.local_header_offset)

                # Store this information in the resolver. Ths allows
                # segments to be directly opened by URN.
                member_urn = aff4_utils.urn_from_member_name(
                    zip_info.filename, self.urn)

                self.resolver.Set(member_urn, lexicon.AFF4_TYPE,
                                  rdfvalue.URN(lexicon.AFF4_ZIP_SEGMENT_TYPE))

                self.resolver.Set(member_urn, lexicon.AFF4_STORED, self.urn)
                self.resolver.Set(member_urn, lexicon.AFF4_STREAM_SIZE,
                                  rdfvalue.XSDInteger(zip_info.file_size))
                self.members[member_urn] = zip_info

                # Go to the next entry.
                entry_offset += (entry.sizeof() + entry.file_name_length +
                                 entry.extra_field_len +
                                 entry.file_comment_length)
Ejemplo n.º 8
0
 def store(self, resolver):
     resolver.Set(self.urn, rdfvalue.URN(lexicon.size),
                  rdfvalue.XSDInteger(self.length))
     resolver.Set(self.urn, rdfvalue.URN(lexicon.name),
                  rdfvalue.XSDInteger(self.name))
Ejemplo n.º 9
0
 def write(self, resolver, volumeARN):
     resolver.Add(volumeARN, self.ID, lexicon.AFF4_TYPE, rdfvalue.URN(lexicon.AFF4_PASSWORD_WRAPPED_KEYBAG))
     resolver.Set(volumeARN, self.ID, lexicon.AFF4_KEYSIZEBYTES, rdfvalue.XSDInteger(self.keySizeBytes))
     resolver.Set(volumeARN, self.ID, lexicon.AFF4_ITERATIONS, rdfvalue.XSDInteger(self.iterations))
     resolver.Set(volumeARN, self.ID, lexicon.AFF4_WRAPPEDKEY, rdfvalue.RDFBytes(self.wrappedKey))
     resolver.Set(volumeARN, self.ID, lexicon.AFF4_SALT, rdfvalue.RDFBytes(self.salt))
Ejemplo n.º 10
0
 def write(self, resolver, volumeARN):
     resolver.Add(volumeARN, self.ID, lexicon.AFF4_TYPE, rdfvalue.URN(lexicon.AFF4_CERT_ENCRYPTED_KEYBAG))
     resolver.Set(volumeARN, self.ID, lexicon.AFF4_KEYSIZEBYTES, rdfvalue.XSDInteger(self.keySizeBytes))
     resolver.Set(volumeARN, self.ID, lexicon.AFF4_SERIALNUMBER, rdfvalue.XSDInteger(self.serialNumber))
     resolver.Set(volumeARN, self.ID, lexicon.AFF4_WRAPPEDKEY, rdfvalue.RDFBytes(self.wrappedKey))
     resolver.Set(volumeARN, self.ID, lexicon.AFF4_SUBJECTNAME, rdfvalue.XSDString(self.subjectName))
Ejemplo n.º 11
0
    def openURNtoContainer(urn, mode=None):
            if data_store.HAS_HDT:
                resolver = data_store.HDTAssistedDataStore(lexicon.standard)
            else:
                resolver = data_store.MemoryDataStore(lexicon.standard)

            (version, lex) = Container.identifyURN(urn, resolver=resolver)

            resolver.lexicon = lex
            if mode != None and mode == "+":
                resolver.Set(lexicon.transient_graph, urn, lexicon.AFF4_STREAM_WRITE_MODE,
                             rdfvalue.XSDString("random"))

            with zip.ZipFile.NewZipFile(resolver, version, urn) as zip_file:
                with resolver.AFF4FactoryOpen(zip_file.backing_store_urn) as backing_store:
                    volumeURN = zip_file.urn
                    if lex == lexicon.standard or lex == lexicon.standard11:

                        images = list(resolver.QueryPredicateObject(volumeURN, lexicon.AFF4_TYPE, lex.Image))
                        if len(images) > 0:
                            imageURN = images[0]

                            datastreams = list(resolver.QuerySubjectPredicate(volumeURN, imageURN, lex.dataStream))

                            if len(datastreams) > 0:
                                # it is a disk image or a memory image

                                for stream in datastreams:
                                    if lex.map in resolver.QuerySubjectPredicate(volumeURN, stream, lexicon.AFF4_TYPE):
                                        dataStream = resolver.AFF4FactoryOpen(stream)
                                        image = aff4.Image(resolver, urn=imageURN)
                                        dataStream.parent = image

                                        return PhysicalImageContainer(backing_store, zip_file, version, volumeURN, resolver, lex, image, dataStream)

                            else:
                                # it is a logical image
                                if version.is11():
                                    # AFF4 logical images are defined at version 1.1
                                    if mode != None and mode == "+":
                                        return WritableHashBasedImageContainer(backing_store, zip_file, version, volumeURN, resolver, lex)
                                    else:
                                        return LogicalImageContainer(backing_store, zip_file, version, volumeURN, resolver, lex)
                                else:
                                    # scudette's winpmem pre-std implementation is at 1.0
                                    lex = lexicon.pmemlogical
                                    return PreStdLogicalImageContainer(backing_store, zip_file, version, volumeURN, resolver, lex)

                        else:
                            # no images
                            encryptedStreams = list(resolver.QueryPredicateObject(volumeURN, lexicon.AFF4_TYPE, lexicon.standard11.EncryptedStream))
                            if len(encryptedStreams) == 1:
                                encryptedBlockStreamARN = encryptedStreams[0]
                                return EncryptedImageContainer(backing_store, zip_file, version, volumeURN, resolver, lexicon.standard11, encryptedBlockStreamARN, mode)
                            else:
                                return LogicalImageContainer(backing_store, zip_file, version, volumeURN, resolver, lex)


                    elif lex == lexicon.scudette:
                        m = next(resolver.QueryPredicateObject(volumeURN, lexicon.AFF4_TYPE, lex.map))
                        cat = next(resolver.QuerySubjectPredicate(volumeURN, m, lex.category))
                        if cat == lex.memoryPhysical:
                            dataStream = resolver.AFF4FactoryOpen(m)

                            image = aff4.Image(resolver, urn=m)
                            dataStream.parent = image

                            legacyYamlInfoURI = dataStream.urn.Append("information.yaml")
                            try:
                                with resolver.AFF4FactoryOpen(legacyYamlInfoURI) as fd:
                                    txt = fd.read(10000000)
                                    dt = yaml.safe_load(txt)
                                    CR3 = dt["Registers"]["CR3"]
                                    resolver.Add(dataStream.parent.urn, lexicon.standard.memoryPageTableEntryOffset, rdfvalue.XSDInteger(CR3))
                                    kaslr_slide = dt["kaslr_slide"]
                                    resolver.Add(dataStream.parent.urn, lexicon.standard.OSXKALSRSlide, rdfvalue.XSDInteger(kaslr_slide))
                            except:
                                pass

                            return PhysicalImageContainer(backing_store, zip_file, version, volumeURN, resolver, lex, image, dataStream)