def testGetFilesArchiveGeneratesCorrectArchive(self): client_id, flow_id = self._SetupFlowWithStatEntryResults() blob_size = 1024 * 1024 * 4 blob_data, blob_refs = vfs_test_lib.GenerateBlobRefs(blob_size, "ab") vfs_test_lib.CreateFileWithBlobRefsAndData( db.ClientPath.OS(client_id, ["foo", "bar1"]), blob_refs, blob_data) blob_data, blob_refs = vfs_test_lib.GenerateBlobRefs(blob_size, "cd") vfs_test_lib.CreateFileWithBlobRefsAndData( db.ClientPath.OS(client_id, ["foo", "bar2"]), blob_refs, blob_data) zip_stream = io.BytesIO() self.api.Client(client_id).Flow( flow_id).GetFilesArchive().WriteToStream(zip_stream) zip_fd = zipfile.ZipFile(zip_stream) prefix = "%s_flow_ListProcesses_%s" % (client_id, flow_id) namelist = zip_fd.namelist() self.assertCountEqual(namelist, [ "%s/MANIFEST" % prefix, "%s/%s/client_info.yaml" % (prefix, client_id), "%s/%s/fs/os/foo/bar1" % (prefix, client_id), "%s/%s/fs/os/foo/bar2" % (prefix, client_id), ]) for info in zip_fd.infolist(): self.assertGreater(info.compress_size, 0)
def testGetFilesArchiveDropsStreamingResponsesWhenSecondFileBlobIsMissing( self): client_id, flow_id = self._SetupFlowWithStatEntryResults() blob_data, blob_refs = vfs_test_lib.GenerateBlobRefs( 1024 * 1024 * 4, "abc") vfs_test_lib.CreateFileWithBlobRefsAndData( db.ClientPath.OS(client_id, ["foo", "bar1"]), blob_refs, blob_data[0:2]) zip_stream = io.BytesIO() timestamp = rdfvalue.RDFDatetime.Now() self.api.Client(client_id).Flow( flow_id).GetFilesArchive().WriteToStream(zip_stream) with self.assertRaises(zipfile.BadZipfile): zipfile.ZipFile(zip_stream) # Check that notification was pushed indicating the failure to the user. pending_notifications = list( self.api.GrrUser().ListPendingNotifications( timestamp=timestamp.AsMicrosecondsSinceEpoch())) self.assertLen(pending_notifications, 1) self.assertEqual( pending_notifications[0].data.notification_type, int(rdf_objects.UserNotification.Type. TYPE_FILE_ARCHIVE_GENERATION_FAILED)) self.assertEqual(pending_notifications[0].data.reference.type, pending_notifications[0].data.reference.FLOW) self.assertEqual( pending_notifications[0].data.reference.flow.client_id, client_id) self.assertEqual(pending_notifications[0].data.reference.flow.flow_id, flow_id)
def testGetFilesArchiveDropsStreamingResponsesWhenSecondFileBlobIsMissing( self): blob_data, blob_refs = vfs_test_lib.GenerateBlobRefs(1024 * 1024 * 10, "01") # We write just the references, without actual data, simulating a case # when blobs were not written to the blob store for some reason. vfs_test_lib.CreateFileWithBlobRefsAndData( db.ClientPath.TSK("C.1000000000000000", ["c", "universe", "42"]), blob_refs, blob_data[:1]) zip_stream = io.BytesIO() timestamp = rdfvalue.RDFDatetime.Now() self.api.Client(client_id=self.client_id).File( "fs/tsk/c/universe").GetFilesArchive().WriteToStream(zip_stream) with self.assertRaises(zipfile.BadZipfile): zipfile.ZipFile(zip_stream) # Check that notification was pushed indicating the failure to the user. pending_notifications = list(self.api.GrrUser().ListPendingNotifications( timestamp=timestamp.AsMicrosecondsSinceEpoch())) self.assertLen(pending_notifications, 1) self.assertEqual( pending_notifications[0].data.notification_type, int(rdf_objects.UserNotification.Type .TYPE_FILE_ARCHIVE_GENERATION_FAILED)) self.assertEqual(pending_notifications[0].data.reference.type, pending_notifications[0].data.reference.VFS) self.assertEqual(pending_notifications[0].data.reference.vfs.client_id, self.client_id) self.assertEqual(pending_notifications[0].data.reference.vfs.vfs_path, "fs/tsk/c/universe")
def _WriteFile(self, client_path, blobs_range=None): r_from, r_to = blobs_range or (0, 0) blob_data, blob_refs = vfs_test_lib.GenerateBlobRefs( self.blob_size, "abcdef"[r_from:r_to]) vfs_test_lib.CreateFileWithBlobRefsAndData(client_path, blob_refs, blob_data) return blob_data, blob_refs
def testGetFilesArchiveFailsWhenFirstFileBlobIsMissing(self): client_id, flow_id = self._SetupFlowWithStatEntryResults() _, blob_refs = vfs_test_lib.GenerateBlobRefs(10, "0") vfs_test_lib.CreateFileWithBlobRefsAndData( db.ClientPath.OS(client_id, ["foo", "bar1"]), blob_refs, []) zip_stream = io.BytesIO() with self.assertRaisesRegex(grr_api_errors.UnknownError, "Could not find one of referenced blobs"): self.api.Client(client_id).Flow( flow_id).GetFilesArchive().WriteToStream(zip_stream)
def testGetBlobFailsWhenFileIsCorrupt(self): _, blob_refs = vfs_test_lib.GenerateBlobRefs(10, "0") # We write just the references, without actual data, simulating a case # when blobs were not written to the blob store for some reason. vfs_test_lib.CreateFileWithBlobRefsAndData( db.ClientPath.OS("C.1000000000000000", ["c", "bin", "test"]), blob_refs, []) out = io.BytesIO() with self.assertRaises(errors.UnknownError): self.api.Client(client_id=self.client_id).File( "fs/os/c/bin/test").GetBlob().WriteToStream(out)