def testRawGzchunkedMultipleClients(self):
        client_id_1 = db_test_utils.InitializeClient(data_store.REL_DB)
        client_id_2 = db_test_utils.InitializeClient(data_store.REL_DB)

        snapshot = rdf_objects.ClientSnapshot()
        snapshot.client_id = client_id_1
        snapshot.knowledge_base.fqdn = "foo.quux.com"
        data_store.REL_DB.WriteClientSnapshot(snapshot)

        snapshot = rdf_objects.ClientSnapshot()
        snapshot.client_id = client_id_2
        snapshot.knowledge_base.fqdn = "foo.norf.com"
        data_store.REL_DB.WriteClientSnapshot(snapshot)

        hunt_id = "A0B1D2C3E4"

        hunt_obj = rdf_hunt_objects.Hunt()
        hunt_obj.hunt_id = hunt_id
        hunt_obj.args.standard.client_ids = [client_id_1, client_id_2]
        hunt_obj.args.standard.flow_name = timeline.TimelineFlow.__name__
        hunt_obj.hunt_state = rdf_hunt_objects.Hunt.HuntState.PAUSED

        data_store.REL_DB.WriteHuntObject(hunt_obj)

        entry_1 = rdf_timeline.TimelineEntry()
        entry_1.path = "foo_1".encode("utf-8")
        entry_1.size = 13371

        entry_2 = rdf_timeline.TimelineEntry()
        entry_2.path = "foo_2".encode("utf-8")
        entry_2.size = 13372

        _WriteTimeline(client_id_1, [entry_1], hunt_id=hunt_id)
        _WriteTimeline(client_id_2, [entry_2], hunt_id=hunt_id)

        args = api_timeline.ApiGetCollectedHuntTimelinesArgs()
        args.hunt_id = hunt_id

        content = b"".join(self.handler.Handle(args).GenerateContent())
        buffer = io.BytesIO(content)

        with zipfile.ZipFile(buffer, mode="r") as archive:
            client_filename_1 = f"{client_id_1}_foo.quux.com.gzchunked"
            with archive.open(client_filename_1, mode="r") as file:
                chunks = chunked.ReadAll(file)
                entries = list(
                    rdf_timeline.TimelineEntry.DeserializeStream(chunks))
                self.assertEqual(entries, [entry_1])

            client_filename_2 = f"{client_id_2}_foo.norf.com.gzchunked"
            with archive.open(client_filename_2, mode="r") as file:
                chunks = chunked.ReadAll(file)
                entries = list(
                    rdf_timeline.TimelineEntry.DeserializeStream(chunks))
                self.assertEqual(entries, [entry_2])
Esempio n. 2
0
    def testRawGzchunkedMulipleEntries(self):
        entries = []

        for idx in range(1024):
            entry = rdf_timeline.TimelineEntry()
            entry.path = "/quux/thud/bar/baz/foo{}".format(idx).encode("utf-8")
            entry.size = random.randint(0, 1024)
            entries.append(entry)

        client_id = db_test_utils.InitializeClient(data_store.REL_DB)
        flow_id = _WriteTimeline(client_id, entries)

        args = api_timeline.ApiGetCollectedTimelineArgs()
        args.client_id = client_id
        args.flow_id = flow_id
        args.format = api_timeline.ApiGetCollectedTimelineArgs.Format.RAW_GZCHUNKED

        content = b"".join(self.handler.Handle(args).GenerateContent())

        buf = io.BytesIO(content)
        chunks = chunked.ReadAll(buf)
        deserialized = list(
            rdf_timeline.TimelineEntry.DeserializeStream(chunks))

        self.assertEqual(entries, deserialized)
Esempio n. 3
0
    def testMultiple(self):
        buf = io.BytesIO()
        chunked.Write(buf, b"foo")
        chunked.Write(buf, b"bar")
        chunked.Write(buf, b"quux")

        buf.seek(0, io.SEEK_SET)
        self.assertEqual(list(chunked.ReadAll(buf)), [b"foo", b"bar", b"quux"])
Esempio n. 4
0
    def testGetCollectedTimelinesGzchunked(self):
        client_id = db_test_utils.InitializeClient(data_store.REL_DB)
        fqdn = "foo.bar.baz"

        snapshot = rdf_objects.ClientSnapshot()
        snapshot.client_id = client_id
        snapshot.knowledge_base.fqdn = fqdn
        data_store.REL_DB.WriteClientSnapshot(snapshot)

        hunt_id = "A0B1D2C3"
        flow_id = "0A1B2D3C"

        hunt_obj = rdf_hunt_objects.Hunt()
        hunt_obj.hunt_id = hunt_id
        hunt_obj.args.standard.client_ids = [client_id]
        hunt_obj.args.standard.flow_name = timeline.TimelineFlow.__name__
        hunt_obj.hunt_state = rdf_hunt_objects.Hunt.HuntState.PAUSED
        data_store.REL_DB.WriteHuntObject(hunt_obj)

        flow_obj = rdf_flow_objects.Flow()
        flow_obj.client_id = client_id
        flow_obj.flow_id = flow_id
        flow_obj.flow_class_name = timeline.TimelineFlow.__name__
        flow_obj.create_time = rdfvalue.RDFDatetime.Now()
        flow_obj.parent_hunt_id = hunt_id
        data_store.REL_DB.WriteFlowObject(flow_obj)

        entry_1 = rdf_timeline.TimelineEntry()
        entry_1.path = "/foo/bar".encode("utf-8")
        entry_1.ino = 7890178901
        entry_1.size = 4815162342
        entry_1.atime_ns = 123 * 10**9
        entry_1.mtime_ns = 234 * 10**9
        entry_1.ctime_ns = 567 * 10**9
        entry_1.mode = 0o654

        entry_2 = rdf_timeline.TimelineEntry()
        entry_2.path = "/foo/baz".encode("utf-8")
        entry_1.ino = 8765487654
        entry_2.size = 1337
        entry_1.atime_ns = 987 * 10**9
        entry_1.mtime_ns = 876 * 10**9
        entry_1.ctime_ns = 765 * 10**9
        entry_2.mode = 0o757

        entries = [entry_1, entry_2]
        blobs = list(rdf_timeline.TimelineEntry.SerializeStream(iter(entries)))
        blob_ids = data_store.BLOBS.WriteBlobsWithUnknownHashes(blobs)

        result = rdf_timeline.TimelineResult()
        result.entry_batch_blob_ids = [
            blob_id.AsBytes() for blob_id in blob_ids
        ]

        flow_result = rdf_flow_objects.FlowResult()
        flow_result.client_id = client_id
        flow_result.flow_id = flow_id
        flow_result.payload = result

        data_store.REL_DB.WriteFlowResults([flow_result])

        buffer = io.BytesIO()

        fmt = timeline_pb2.ApiGetCollectedTimelineArgs.Format.RAW_GZCHUNKED
        self.api.Hunt(hunt_id).GetCollectedTimelines(fmt).WriteToStream(buffer)

        with zipfile.ZipFile(buffer, mode="r") as archive:
            with archive.open(f"{client_id}_{fqdn}.gzchunked",
                              mode="r") as file:
                chunks = chunked.ReadAll(file)
                entries = list(
                    rdf_timeline.TimelineEntry.DeserializeStream(chunks))
                self.assertEqual(entries, [entry_1, entry_2])
Esempio n. 5
0
    def testSingle(self):
        buf = io.BytesIO()
        chunked.Write(buf, b"foo")

        buf.seek(0, io.SEEK_SET)
        self.assertEqual(list(chunked.ReadAll(buf)), [b"foo"])
Esempio n. 6
0
 def testEmpty(self):
     buf = io.BytesIO()
     self.assertEmpty(list(chunked.ReadAll(buf)))
Esempio n. 7
0
  def testMalformedInputWithMaxChunkSizeSet(self):
    buf = io.BytesIO(b"\xff" * 1024)

    with self.assertRaises(chunked.ChunkSizeTooBigError):
      list(chunked.ReadAll(buf, max_chunk_size=1024))