def testFailingOutputPluginDoesNotAffectOtherOutputPlugins(self): self.StartHunt(output_plugins=[ rdfvalue.OutputPluginDescriptor( plugin_name="FailingDummyHuntOutputPlugin"), rdfvalue.OutputPluginDescriptor( plugin_name="DummyHuntOutputPlugin") ]) # Process hunt results. self.ProcessHuntOutputPlugins() self.assertEqual(DummyHuntOutputPlugin.num_calls, 0) self.assertEqual(DummyHuntOutputPlugin.num_responses, 0) self.AssignTasksToClients() self.RunHunt(failrate=-1) # We shouldn't get any more calls after the first call to # ProcessHuntResultsCronFlow. self.assertRaises(standard.ResultsProcessingError, self.ProcessHuntOutputPlugins) for _ in range(5): self.ProcessHuntOutputPlugins() self.assertEqual(DummyHuntOutputPlugin.num_calls, 1) self.assertEqual(DummyHuntOutputPlugin.num_responses, 10)
def testResultsProcessingErrorContainsDetailedFailureData(self): failing_plugin_descriptor = rdfvalue.OutputPluginDescriptor( plugin_name="FailingDummyHuntOutputPlugin") plugin_descriptor = rdfvalue.OutputPluginDescriptor( plugin_name="DummyHuntOutputPlugin") hunt_urn = self.StartHunt( output_plugins=[failing_plugin_descriptor, plugin_descriptor]) # Process hunt results. self.ProcessHuntOutputPlugins() self.assertEqual(DummyHuntOutputPlugin.num_calls, 0) self.assertEqual(DummyHuntOutputPlugin.num_responses, 0) self.AssignTasksToClients() self.RunHunt(failrate=-1) # We shouldn't get any more calls after the first call to # ProcessHuntResultsCronFlow. try: self.ProcessHuntOutputPlugins() except standard.ResultsProcessingError as e: self.assertEqual(len(e.exceptions_by_hunt), 1) self.assertTrue(hunt_urn in e.exceptions_by_hunt) self.assertEqual(len(e.exceptions_by_hunt[hunt_urn]), 1) self.assertTrue( failing_plugin_descriptor in e.exceptions_by_hunt[hunt_urn]) self.assertEqual( len(e.exceptions_by_hunt[hunt_urn][failing_plugin_descriptor]), 1) self.assertEqual( e.exceptions_by_hunt[hunt_urn][failing_plugin_descriptor] [0].message, "Oh no!")
def testMultipleHuntsOutputIsProcessedCorrectly(self): self.StartHunt(output_plugins=[rdfvalue.OutputPluginDescriptor( plugin_name="DummyHuntOutputPlugin")]) self.StartHunt(output_plugins=[rdfvalue.OutputPluginDescriptor( plugin_name="StatefulDummyHuntOutputPlugin")]) self.AssignTasksToClients() self.RunHunt(failrate=-1) self.ProcessHuntOutputPlugins() # Check that plugins worked correctly self.assertEqual(DummyHuntOutputPlugin.num_calls, 1) self.assertListEqual(StatefulDummyHuntOutputPlugin.data, [0])
def testShowsFilesAndAllowsDownloadWhenCSVExportIsUsed(self): with self.ACLChecksDisabled(): self.client_ids = self.SetupClients(10) # Create hunt. self.CreateSampleHunt(output_plugins=[ rdfvalue.OutputPluginDescriptor(plugin_name="CSVOutputPlugin") ]) # Actually run created hunt. client_mock = test_lib.SampleHuntMock() test_lib.TestHuntHelper(client_mock, self.client_ids, False, self.token) # Make sure results are processed. flow_urn = flow.GRRFlow.StartFlow( flow_name="ProcessHuntResultsCronFlow", token=self.token) for _ in test_lib.TestFlowHelper(flow_urn, token=self.token): pass self.Open("/#main=ManageHunts") self.Click("css=td:contains('GenericHunt')") # Click the Results tab. self.Click("css=li[heading=Results]") self.WaitUntil(self.IsTextPresent, "CSV output plugin writes to following files") # Check that displayed file can be downloaded. self.Click("css=.csv-output-note a:contains('ExportedFile.csv')") self.WaitUntil(self.FileWasDownloaded)
def testProcessHuntResultsCronFlowAbortsIfRunningTooLong(self): self.assertEqual(LongRunningDummyHuntOutputPlugin.num_calls, 0) test = [0] def TimeStub(): test[0] += 1e-6 return test[0] with utils.Stubber(time, "time", TimeStub): self.StartHunt(output_plugins=[ rdfvalue.OutputPluginDescriptor( plugin_name="LongRunningDummyHuntOutputPlugin") ]) self.AssignTasksToClients() self.RunHunt(failrate=-1) # LongRunningDummyHuntOutputPlugin will set the time to 100s on the first # run, which will effectively mean that it's running for too long. self.ProcessHuntOutputPlugins( batch_size=1, max_running_time=rdfvalue.Duration("99s")) # In normal conditions, there should be 10 results generated. # With batch size of 1 this should result in 10 calls to output plugin. # But as we were using TimeStub, the flow should have aborted after 1 # call. self.assertEqual(LongRunningDummyHuntOutputPlugin.num_calls, 1)
def testMultipleOutputPluginsProcessingStatusAreWrittenToStatusCollection( self): plugin_descriptor = rdfvalue.OutputPluginDescriptor( plugin_name="DummyHuntOutputPlugin") hunt_urn = self.StartHunt(output_plugins=[plugin_descriptor]) # Run the hunt on first 4 clients and process output plugins. self.AssignTasksToClients(self.client_ids[:4]) self.RunHunt(failrate=-1) self.ProcessHuntOutputPlugins() # Run the hunt on last 6 clients and process output plugins. self.AssignTasksToClients(self.client_ids[4:]) self.RunHunt(failrate=-1) self.ProcessHuntOutputPlugins() hunt = aff4.FACTORY.Open(hunt_urn, token=self.token) status_collection = aff4.FACTORY.Open( hunt.output_plugins_status_collection_urn, token=self.token) errors_collection = aff4.FACTORY.Open( hunt.output_plugins_errors_collection_urn, token=self.token) self.assertEqual(len(errors_collection), 0) self.assertEqual(len(status_collection), 2) items = sorted(status_collection, key=lambda x: x.age) self.assertEqual(items[0].status, "SUCCESS") self.assertEqual(items[0].batch_index, 0) self.assertEqual(items[0].batch_size, 4) self.assertEqual(items[0].plugin_descriptor, plugin_descriptor) self.assertEqual(items[1].status, "SUCCESS") self.assertEqual(items[1].batch_index, 0) self.assertEqual(items[1].batch_size, 6) self.assertEqual(items[1].plugin_descriptor, plugin_descriptor)
def testOutputPluginsProcessingStatusIsWrittenToStatusCollection(self): plugin_descriptor = rdfvalue.OutputPluginDescriptor( plugin_name="DummyHuntOutputPlugin") hunt_urn = self.StartHunt(output_plugins=[plugin_descriptor]) # Run the hunt and process output plugins. self.AssignTasksToClients(self.client_ids) self.RunHunt(failrate=-1) self.ProcessHuntOutputPlugins() hunt = aff4.FACTORY.Open(hunt_urn, token=self.token) status_collection = aff4.FACTORY.Open( hunt.output_plugins_status_collection_urn, token=self.token) errors_collection = aff4.FACTORY.Open( hunt.output_plugins_errors_collection_urn, token=self.token) self.assertEqual(len(errors_collection), 0) self.assertEqual(len(status_collection), 1) self.assertEqual(status_collection[0].status, "SUCCESS") self.assertEqual(status_collection[0].batch_index, 0) self.assertEqual(status_collection[0].batch_size, 10) self.assertEqual(status_collection[0].plugin_descriptor, plugin_descriptor)
def testErrorOutputPluginStatusIsAlsoWrittenToErrorsCollection(self): failing_plugin_descriptor = rdfvalue.OutputPluginDescriptor( plugin_name="FailingDummyHuntOutputPlugin") plugin_descriptor = rdfvalue.OutputPluginDescriptor( plugin_name="DummyHuntOutputPlugin") hunt_urn = self.StartHunt(output_plugins=[ failing_plugin_descriptor, plugin_descriptor ]) # Run the hunt and process output plugins. self.AssignTasksToClients(self.client_ids) self.RunHunt(failrate=-1) try: self.ProcessHuntOutputPlugins() except standard.ResultsProcessingError: pass hunt = aff4.FACTORY.Open(hunt_urn, token=self.token) status_collection = aff4.FACTORY.Open( hunt.output_plugins_status_collection_urn, token=self.token) errors_collection = aff4.FACTORY.Open( hunt.output_plugins_errors_collection_urn, token=self.token) self.assertEqual(len(errors_collection), 1) self.assertEqual(len(status_collection), 2) self.assertEqual(errors_collection[0].status, "ERROR") self.assertEqual(errors_collection[0].batch_index, 0) self.assertEqual(errors_collection[0].batch_size, 10) self.assertEqual(errors_collection[0].plugin_descriptor, failing_plugin_descriptor) self.assertEqual(errors_collection[0].summary, "Oh no!") items = sorted(status_collection, key=lambda x: x.plugin_descriptor.plugin_name) self.assertEqual(items[0].status, "SUCCESS") self.assertEqual(items[0].batch_index, 0) self.assertEqual(items[0].batch_size, 10) self.assertEqual(items[0].plugin_descriptor, plugin_descriptor) self.assertEqual(items[1].status, "ERROR") self.assertEqual(items[1].batch_index, 0) self.assertEqual(items[1].batch_size, 10) self.assertEqual(items[1].plugin_descriptor, failing_plugin_descriptor) self.assertEqual(items[1].summary, "Oh no!")
def testListOfCSVFilesIsNotShownWhenHuntProducedNoResults(self): with self.ACLChecksDisabled(): self.client_ids = self.SetupClients(10) # Create hunt without results. self.CreateSampleHunt(output_plugins=[ rdfvalue.OutputPluginDescriptor(plugin_name="CSVOutputPlugin") ]) self.Open("/#main=ManageHunts") self.Click("css=td:contains('GenericHunt')") # Click the Results tab. self.Click("css=li[heading=Results]") self.WaitUntil(self.IsElementPresent, "css=table[aff4_path]") self.WaitUntilNot(self.IsTextPresent, "CSV output plugin writes to following files")
def testOutputPluginsMaintainState(self): self.StartHunt(output_plugins=[rdfvalue.OutputPluginDescriptor( plugin_name="StatefulDummyHuntOutputPlugin")]) self.assertListEqual(StatefulDummyHuntOutputPlugin.data, []) # Run the hunt on every client and separately and run the output # cron flow for every client to ensure that output plugin will # run multiple times. for index in range(10): self.AssignTasksToClients([self.client_ids[index]]) # Run the hunt. self.RunHunt(failrate=-1) self.ProcessHuntOutputPlugins() # Output plugins should have been called 10 times, adding a number # to the "data" list on every call and incrementing it each time. self.assertListEqual(StatefulDummyHuntOutputPlugin.data, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
def testOutputPluginsProcessOnlyNewResultsOnEveryRun(self): self.StartHunt(output_plugins=[ rdfvalue.OutputPluginDescriptor( plugin_name="DummyHuntOutputPlugin") ]) # Process hunt results. self.ProcessHuntOutputPlugins() # Check that nothing has happened because hunt hasn't reported any # results yet. self.assertEqual(DummyHuntOutputPlugin.num_calls, 0) self.assertEqual(DummyHuntOutputPlugin.num_responses, 0) # Process first 5 clients self.AssignTasksToClients(self.client_ids[:5]) # Run the hunt. self.RunHunt(failrate=-1) # Although we call ProcessHuntResultsCronFlow multiple times, it should # only call actual plugin once. for _ in range(5): self.ProcessHuntOutputPlugins() self.assertEqual(DummyHuntOutputPlugin.num_calls, 1) self.assertEqual(DummyHuntOutputPlugin.num_responses, 5) # Process last 5 clients self.AssignTasksToClients(self.client_ids[5:]) # Run the hunt. self.RunHunt(failrate=-1) # Although we call ProcessHuntResultsCronFlow multiple times, it should # only call actual plugin once. for _ in range(5): self.ProcessHuntOutputPlugins() self.assertEqual(DummyHuntOutputPlugin.num_calls, 2) self.assertEqual(DummyHuntOutputPlugin.num_responses, 10)
def testProcessHuntResultsCronFlowDoesNotAbortsIfRunningInTime(self): self.assertEqual(LongRunningDummyHuntOutputPlugin.num_calls, 0) test = [0] def TimeStub(): test[0] += 1e-6 return test[0] with utils.Stubber(time, "time", TimeStub): self.StartHunt(output_plugins=[rdfvalue.OutputPluginDescriptor( plugin_name="LongRunningDummyHuntOutputPlugin")]) self.AssignTasksToClients() self.RunHunt(failrate=-1) # LongRunningDummyHuntOutputPlugin will set the time to 100s on the first # run, which will effectively mean that it's running in time. self.ProcessHuntOutputPlugins(batch_size=1, max_running_time=rdfvalue.Duration("101s")) # In normal conditions, there should be 10 results generated. self.assertEqual(LongRunningDummyHuntOutputPlugin.num_calls, 10)
def testHuntResultsArrivingWhileOldResultsAreProcessedAreHandled(self): self.StartHunt(output_plugins=[ rdfvalue.OutputPluginDescriptor( plugin_name="DummyHuntOutputPlugin") ]) # Process hunt results. self.ProcessHuntOutputPlugins() # Check that nothing has happened because hunt hasn't reported any # results yet. self.assertEqual(DummyHuntOutputPlugin.num_calls, 0) self.assertEqual(DummyHuntOutputPlugin.num_responses, 0) # Generate new results while the plugin is working. def ProcessResponsesStub(_, responses): self.assertEqual(len(responses), 5) self.AssignTasksToClients(self.client_ids[5:]) self.RunHunt(failrate=-1) with utils.Stubber(DummyHuntOutputPlugin, "ProcessResponses", ProcessResponsesStub): # Process first 5 clients. self.AssignTasksToClients(self.client_ids[:5]) self.RunHunt(failrate=-1) self.ProcessHuntOutputPlugins() # Stub was running instead of actual plugin, so counters couldn't be # updated. Assert that they're 0 indeed. self.assertEqual(DummyHuntOutputPlugin.num_calls, 0) self.assertEqual(DummyHuntOutputPlugin.num_responses, 0) # Run another round of results processing. self.ProcessHuntOutputPlugins() # New results (the ones that arrived while the old were being processed) # should get processed now. self.assertEqual(DummyHuntOutputPlugin.num_calls, 1) self.assertEqual(DummyHuntOutputPlugin.num_responses, 5)
def testStatsHunt(self): interval = rdfvalue.Duration( config_lib.CONFIG["StatsHunt.CollectionInterval"]) batch_size = 3 config_lib.CONFIG.Set("StatsHunt.ClientBatchSize", batch_size) # Make one of the clients windows with aff4.FACTORY.Open(self.client_ids[3], mode="rw", token=self.token) as win_client: win_client.Set(win_client.Schema.SYSTEM("Windows")) with test_lib.FakeTime(0, increment=0.01): with hunts.GRRHunt.StartHunt( hunt_name="StatsHunt", client_rate=0, token=self.token, output_plugins=[ rdfvalue.OutputPluginDescriptor( plugin_name="DummyHuntOutputPlugin") ]) as hunt: hunt.Run() hunt_urn = hunt.urn # Run the hunt. self.AssignTasksToClients() client_mock = action_mocks.InterrogatedClient() client_mock.InitializeClient() test_lib.TestHuntHelper(client_mock, self.client_ids, False, self.token) # At this time the clients should not receive any messages since messages # are posted in the future. self.assertEqual(client_mock.response_count, 0) # Lets advance the time and re-run the hunt. The clients should now receive # their messages. with test_lib.FakeTime(10 + interval.seconds, increment=0.01): test_lib.TestHuntHelper(client_mock, self.client_ids, False, self.token) self.assertEqual(client_mock.response_count, len(self.client_ids)) # Make sure the last message was of LOW_PRIORITY (all messages should be # LOW_PRIORITY but we only check the last one). self.assertEqual(client_mock.recorded_messages[-1].priority, "LOW_PRIORITY") # Check fastpoll was false for all messages self.assertFalse( any([ x.require_fastpoll for x in client_mock.recorded_messages ])) # Pause the hunt with aff4.FACTORY.OpenWithLock(hunt.urn, token=self.token) as hunt: hunt.GetRunner().Pause() # Advance time and re-run. We get the results back from last time, but don't # schedule any new ones because the hunt is now paused. with test_lib.FakeTime(20 + (interval.seconds * 2), increment=0.01): test_lib.TestHuntHelper(client_mock, self.client_ids, False, self.token) self.assertEqual(client_mock.response_count, len(self.client_ids) * 2) # Advance time and re-run. We should have the same number of responses # still. with test_lib.FakeTime(30 + (interval.seconds * 3), increment=0.01): test_lib.TestHuntHelper(client_mock, self.client_ids, False, self.token) # All clients were called. self.assertEqual(client_mock.response_count, len(self.client_ids) * 2) # Check the results got written to the collection result_collection = aff4.FACTORY.Open(hunt_urn.Add("Results"), token=self.token) # The +1 is here because we write 2 responses for the single windows machine # (dnsconfig and interface) self.assertEqual(len(result_collection), (len(self.client_ids) + 1) * 2)