def testMultipleOutputPluginsProcessingStatusAreWrittenToStatusCollection( self): plugin_descriptor = rdf_output_plugin.OutputPluginDescriptor( plugin_name="DummyHuntOutputPlugin") hunt_urn = self.StartHunt(output_plugins=[plugin_descriptor]) # Run the hunt on first 4 clients and process output plugins. self.AssignTasksToClients(self.client_ids[:4]) self.RunHunt(failrate=-1) self.ProcessHuntOutputPlugins() # Run the hunt on last 6 clients and process output plugins. self.AssignTasksToClients(self.client_ids[4:]) self.RunHunt(failrate=-1) self.ProcessHuntOutputPlugins() status_collection = implementation.GRRHunt.PluginStatusCollectionForHID( hunt_urn) errors_collection = implementation.GRRHunt.PluginErrorCollectionForHID( hunt_urn) self.assertEmpty(errors_collection) self.assertLen(status_collection, 2) items = sorted(status_collection, key=lambda x: x.age) self.assertEqual(items[0].status, "SUCCESS") self.assertEqual(items[0].batch_index, 0) self.assertEqual(items[0].batch_size, 4) self.assertEqual(items[0].plugin_descriptor, plugin_descriptor) self.assertEqual(items[1].status, "SUCCESS") self.assertEqual(items[1].batch_index, 0) self.assertEqual(items[1].batch_size, 6) self.assertEqual(items[1].plugin_descriptor, plugin_descriptor)
def Run(self): failing_descriptor = rdf_output_plugin.OutputPluginDescriptor( plugin_name=hunt_test_lib.FailingDummyHuntOutputPlugin.__name__) with test_lib.FakeTime(42, increment=1): hunt_urn = self.StartHunt( description="the hunt", output_plugins=[failing_descriptor]) self.client_ids = self.SetupClients(2) for index, client_id in enumerate(self.client_ids): self.AssignTasksToClients(client_ids=[client_id]) self.RunHunt(failrate=-1) with test_lib.FakeTime(100042 + index * 100): try: self.ProcessHuntOutputPlugins() except process_results.ResultsProcessingError: if flags.FLAGS.debug: pdb.post_mortem() self.Check( "ListHuntOutputPluginErrors", args=hunt_plugin.ApiListHuntOutputPluginErrorsArgs( hunt_id=hunt_urn.Basename(), plugin_id="FailingDummyHuntOutputPlugin_0"), replace={hunt_urn.Basename(): "H:123456"})
def Run(self): output_plugins = [ rdf_output_plugin.OutputPluginDescriptor( plugin_name=test_plugins.DummyHuntTestOutputPlugin.__name__, plugin_args=test_plugins.DummyHuntTestOutputPlugin.args_type( filename_regex="blah!", fetch_binaries=True)) ] with test_lib.FakeTime(42, increment=1): if data_store.RelationalDBReadEnabled("hunts"): hunt_id = self.CreateHunt(description="the hunt", output_plugins=output_plugins) hunt.StartHunt(hunt_id) else: hunt_urn = self.StartHunt(description="the hunt", output_plugins=output_plugins) hunt_id = hunt_urn.Basename() self.client_ids = self.SetupClients(2) for index, client_id in enumerate(self.client_ids): self.RunHunt(client_ids=[client_id], failrate=-1) with test_lib.FakeTime(100042 + index * 100): self.ProcessHuntOutputPlugins() self.Check("ListHuntOutputPluginLogs", args=hunt_plugin.ApiListHuntOutputPluginLogsArgs( hunt_id=hunt_id, plugin_id="DummyHuntTestOutputPlugin_0"), replace={hunt_id: "H:123456"})
def Run(self): failing_descriptor = rdf_output_plugin.OutputPluginDescriptor( plugin_name=hunt_test_lib.FailingDummyHuntOutputPlugin.__name__) with test_lib.FakeTime(42, increment=1): if data_store.RelationalDBReadEnabled("hunts"): hunt_id = self.CreateHunt(description="the hunt", output_plugins=[failing_descriptor]) hunt.StartHunt(hunt_id) else: hunt_urn = self.StartHunt(description="the hunt", output_plugins=[failing_descriptor]) hunt_id = hunt_urn.Basename() self.client_ids = self.SetupClients(2) for index, client_id in enumerate(self.client_ids): self.RunHunt(client_ids=[client_id], failrate=-1) with test_lib.FakeTime(100042 + index * 100): try: self.ProcessHuntOutputPlugins() except process_results.ResultsProcessingError: if flags.FLAGS.pdb_post_mortem: pdb.post_mortem() self.Check("ListHuntOutputPluginErrors", args=hunt_plugin.ApiListHuntOutputPluginErrorsArgs( hunt_id=hunt_id, plugin_id="FailingDummyHuntOutputPlugin_0"), replace={hunt_id: "H:123456"})
def testFlowDoesNotFailWhenOutputPluginFails(self): flow_id = self.RunFlow(output_plugins=[ rdf_output_plugin.OutputPluginDescriptor( plugin_name="FailingDummyFlowOutputPlugin") ]) flow_obj = data_store.REL_DB.ReadFlowObject(self.client_id, flow_id) self.assertEqual(flow_obj.flow_state, "FINISHED")
def Run(self): client_id = self.SetupClient(0) failing_descriptor = rdf_output_plugin.OutputPluginDescriptor( plugin_name=hunt_test_lib.FailingDummyHuntOutputPlugin.__name__) with test_lib.FakeTime(42): if data_store.RelationalDBEnabled(): flow_id = flow_test_lib.StartAndRunFlow( flow_cls=flow_test_lib.DummyFlowWithSingleReply, client_id=client_id.Basename(), output_plugins=[failing_descriptor]) else: flow_urn = flow.StartAFF4Flow( flow_name=flow_test_lib.DummyFlowWithSingleReply.__name__, client_id=client_id, output_plugins=[failing_descriptor], token=self.token) flow_id = flow_urn.Basename() flow_test_lib.TestFlowHelper(flow_urn, token=self.token) self.Check("ListFlowOutputPluginErrors", args=flow_plugin.ApiListFlowOutputPluginErrorsArgs( client_id=client_id.Basename(), flow_id=flow_id, plugin_id="FailingDummyHuntOutputPlugin_0"), replace={flow_id: "W:ABCDEF"})
def testOutputPluginsAreCorrectlyAppliedAndTheirStatusCanBeRead(self): hunt_test_lib.StatefulDummyHuntOutputPlugin.data = [] hunt_test_lib.DummyHuntOutputPlugin.num_calls = 0 hunt_test_lib.DummyHuntOutputPlugin.num_responses = 0 plugin_descriptor = rdf_output_plugin.OutputPluginDescriptor( plugin_name="DummyHuntOutputPlugin") hunt_id, client_ids = self._CreateAndRunHunt( num_clients=5, client_mock=hunt_test_lib.SampleHuntMock(failrate=-1), client_rule_set=foreman_rules.ForemanClientRuleSet(), client_rate=0, args=self.GetFileHuntArgs(), output_plugins=[plugin_descriptor]) self.assertEqual(hunt_test_lib.DummyHuntOutputPlugin.num_calls, 5) self.assertEqual(hunt_test_lib.DummyHuntOutputPlugin.num_responses, 5) logs = data_store.REL_DB.ReadHuntOutputPluginLogEntries( hunt_id, # REL_DB code uses strings for output plugin ids for consistency (as # all other DB ids are strings). At the moment plugin_id in the database # is simply an index of the plugin in Flow/Hunt.output_plugins list. output_plugin_id="0", offset=0, count=sys.maxsize, with_type=rdf_flow_objects.FlowOutputPluginLogEntry.LogEntryType. LOG) self.assertLen(logs, 5) self.assertItemsEqual([l.client_id for l in logs], client_ids) for l in logs: self.assertEqual(l.hunt_id, hunt_id) self.assertGreater(l.timestamp, 0) self.assertEqual(l.message, "Processed 1 replies.")
def testProcessHuntResultCollectionsCronFlowAbortsIfRunningTooLong(self): self.assertEqual(hunt_test_lib.LongRunningDummyHuntOutputPlugin.num_calls, 0) test = [0] def TimeStub(): test[0] += 1e-6 return test[0] with utils.Stubber(time, "time", TimeStub): self.StartHunt(output_plugins=[ rdf_output_plugin.OutputPluginDescriptor( plugin_name="LongRunningDummyHuntOutputPlugin") ]) self.AssignTasksToClients() self.RunHunt(failrate=-1) # Max run time for the VerifyHuntOutputPluginsCronFlow is 0.6*lifetime so # 165s gives 99s max run time. LongRunningDummyHuntOutputPlugin will set # the time to 100s on the first run, which will effectively mean that it's # running for too long. phrccf = process_results.ProcessHuntResultCollectionsCronFlow with utils.MultiStubber((phrccf, "lifetime", rdfvalue.Duration("165s")), (phrccf, "BATCH_SIZE", 1)): self.ProcessHuntOutputPlugins() # In normal conditions, there should be 10 results generated. # With batch size of 1 this should result in 10 calls to output plugin. # But as we were using TimeStub, the flow should have aborted after 1 # call. self.assertEqual(hunt_test_lib.LongRunningDummyHuntOutputPlugin.num_calls, 1)
def Run(self): client_id = self.SetupClient(0) email_descriptor = rdf_output_plugin.OutputPluginDescriptor( plugin_name=email_plugin.EmailOutputPlugin.__name__, plugin_args=email_plugin.EmailOutputPluginArgs( email_address="test@localhost", emails_limit=42)) with test_lib.FakeTime(42): if data_store.RelationalDBEnabled(): flow_id = flow_test_lib.StartAndRunFlow( flow_cls=flow_test_lib.DummyFlowWithSingleReply, client_id=client_id.Basename(), output_plugins=[email_descriptor]) else: flow_urn = flow.StartAFF4Flow( flow_name=flow_test_lib.DummyFlowWithSingleReply.__name__, client_id=client_id, output_plugins=[email_descriptor], token=self.token) flow_id = flow_urn.Basename() flow_test_lib.TestFlowHelper(flow_urn, token=self.token) self.Check("ListFlowOutputPluginLogs", args=flow_plugin.ApiListFlowOutputPluginLogsArgs( client_id=client_id.Basename(), flow_id=flow_id, plugin_id="EmailOutputPlugin_0"), replace={flow_id: "W:ABCDEF"})
def testUpdatesStatsCounterOnOutputPluginFailure(self): plugin_descriptor = rdf_output_plugin.OutputPluginDescriptor( plugin_name="FailingDummyHuntOutputPlugin") prev_success_count = stats_collector_instance.Get().GetMetricValue( "hunt_results_ran_through_plugin", fields=["FailingDummyHuntOutputPlugin"]) prev_errors_count = stats_collector_instance.Get().GetMetricValue( "hunt_output_plugin_errors", fields=["FailingDummyHuntOutputPlugin"]) self._CreateAndRunHunt( num_clients=5, client_mock=hunt_test_lib.SampleHuntMock(failrate=-1), client_rule_set=foreman_rules.ForemanClientRuleSet(), client_rate=0, args=self.GetFileHuntArgs(), output_plugins=[plugin_descriptor]) success_count = stats_collector_instance.Get().GetMetricValue( "hunt_results_ran_through_plugin", fields=["FailingDummyHuntOutputPlugin"]) errors_count = stats_collector_instance.Get().GetMetricValue( "hunt_output_plugin_errors", fields=["FailingDummyHuntOutputPlugin"]) # 1 error for each client makes it 5 errors, 0 results. self.assertEqual(success_count - prev_success_count, 0) self.assertEqual(errors_count - prev_errors_count, 5)
def testOutputPluginFlushErrorIsLoggedProperly(self): plugin_descriptor = rdf_output_plugin.OutputPluginDescriptor( plugin_name="FailingInFlushDummyHuntOutputPlugin") hunt_id, client_ids = self._CreateAndRunHunt( num_clients=5, client_mock=hunt_test_lib.SampleHuntMock(failrate=-1), client_rule_set=foreman_rules.ForemanClientRuleSet(), client_rate=0, args=self.GetFileHuntArgs(), output_plugins=[plugin_descriptor]) logs = data_store.REL_DB.ReadHuntOutputPluginLogEntries( hunt_id, output_plugin_id="0", offset=0, count=sys.maxsize, with_type=rdf_flow_objects.FlowOutputPluginLogEntry.LogEntryType. LOG) self.assertEmpty(logs) errors = data_store.REL_DB.ReadHuntOutputPluginLogEntries( hunt_id, output_plugin_id="0", offset=0, count=sys.maxsize, with_type=rdf_flow_objects.FlowOutputPluginLogEntry.LogEntryType. ERROR) self.assertLen(errors, 5) self.assertItemsEqual([e.client_id for e in errors], client_ids) for e in errors: self.assertEqual(e.hunt_id, hunt_id) self.assertGreater(e.timestamp, 0) self.assertEqual( e.message, "Error while processing 1 replies: Flush, oh no!")
def Run(self): client_id = self.SetupClient(0) email_descriptor = rdf_output_plugin.OutputPluginDescriptor( plugin_name=email_plugin.EmailOutputPlugin.__name__, plugin_args=email_plugin.EmailOutputPluginArgs( email_address="test@localhost", emails_limit=42)) with test_lib.FakeTime(42): if data_store.RelationalDBFlowsEnabled(): flow_id = flow.StartFlow( flow_cls=processes.ListProcesses, client_id=client_id.Basename(), output_plugins=[email_descriptor]) else: flow_urn = flow.StartAFF4Flow( flow_name=processes.ListProcesses.__name__, client_id=client_id, output_plugins=[email_descriptor], token=self.token) flow_id = flow_urn.Basename() self.Check( "ListFlowOutputPlugins", args=flow_plugin.ApiListFlowOutputPluginsArgs( client_id=client_id.Basename(), flow_id=flow_id), replace={flow_id: "W:ABCDEF"})
def _CreateHuntFromHunt(self): flow_args = rdf_file_finder.FileFinderArgs( paths=["a/*", "b/*"], action=rdf_file_finder.FileFinderAction(action_type="STAT")) flow_runner_args = rdf_flow_runner.FlowRunnerArgs( flow_name=file_finder.FileFinder.__name__) client_rule_set = self._CreateForemanClientRuleSet() source_h = self.StartHunt( flow_args=flow_args, flow_runner_args=flow_runner_args, description="foo-description", client_rule_set=client_rule_set, paused=True) ref = rdf_hunts.FlowLikeObjectReference.FromHuntId(source_h) # Modify flow_args so that there are differences. flow_args.paths = ["b/*", "c/*"] client_rule_set.rules[0].regex.field = "FQDN" output_plugins = [ rdf_output_plugin.OutputPluginDescriptor( plugin_name="TestOutputPluginWithArgs") ] new_h = self.StartHunt( flow_args=flow_args, flow_runner_args=flow_runner_args, description="bar-description", client_rule_set=client_rule_set, output_plugins=output_plugins, original_object=ref, paused=True) return new_h, source_h
def testOutputPluginsAreCorrectlyAppliedAndTheirStatusCanBeRead(self): hunt_test_lib.StatefulDummyHuntOutputPlugin.data = [] hunt_test_lib.DummyHuntOutputPlugin.num_calls = 0 hunt_test_lib.DummyHuntOutputPlugin.num_responses = 0 plugin_descriptor = rdf_output_plugin.OutputPluginDescriptor( plugin_name="DummyHuntOutputPlugin") hunt_id, _ = self._CreateAndRunHunt( num_clients=5, client_mock=hunt_test_lib.SampleHuntMock(failrate=-1), client_rule_set=foreman_rules.ForemanClientRuleSet(), client_rate=0, args=self.GetFileHuntArgs(), output_plugins=[plugin_descriptor]) self.assertEqual(hunt_test_lib.DummyHuntOutputPlugin.num_calls, 5) self.assertEqual(hunt_test_lib.DummyHuntOutputPlugin.num_responses, 5) logs = hunt.GetHuntOutputPluginLogs(hunt_id, 0, sys.maxsize) self.assertLen(logs, 5) for l in logs: self.assertEqual(l.batch_size, 1) self.assertEqual( l.status, output_plugin.OutputPluginBatchProcessingStatus.Status.SUCCESS) self.assertEqual(l.plugin_descriptor, plugin_descriptor)
def testFlowDoesNotFailWhenOutputPluginFails(self): flow_urn = self.RunFlow(plugins=[ rdf_output_plugin.OutputPluginDescriptor( plugin_name="FailingDummyFlowOutputPlugin") ]) flow_obj = aff4.FACTORY.Open(flow_urn, token=self.token) self.assertEqual(flow_obj.context.state, "TERMINATED")
def testHuntResultsArrivingWhileOldResultsAreProcessedAreHandled(self): self.StartHunt(output_plugins=[ rdf_output_plugin.OutputPluginDescriptor( plugin_name="DummyHuntOutputPlugin") ]) # Process hunt results. self.ProcessHuntOutputPlugins() # Check that nothing has happened because hunt hasn't reported any # results yet. self.assertEqual(hunt_test_lib.DummyHuntOutputPlugin.num_calls, 0) self.assertEqual(hunt_test_lib.DummyHuntOutputPlugin.num_responses, 0) # Generate new results while the plugin is working. self.num_processed = 0 def ProcessResponsesStub(_, state, responses): # Add 5 more results the first time we are called. del state if not self.num_processed: self.AssignTasksToClients(self.client_ids[5:]) self.RunHunt(failrate=-1) # Just count the total number processed - we don't care about batch size # at this point. self.num_processed += len(responses) with utils.Stubber(hunt_test_lib.DummyHuntOutputPlugin, "ProcessResponses", ProcessResponsesStub): self.AssignTasksToClients(self.client_ids[:5]) self.RunHunt(failrate=-1) self.ProcessHuntOutputPlugins() self.assertEqual(10, self.num_processed) del self.num_processed
def testProcessHuntResultCollectionsCronFlowDoesNotAbortIfRunningInTime(self): self.assertEqual(hunt_test_lib.LongRunningDummyHuntOutputPlugin.num_calls, 0) test = [0] def TimeStub(): test[0] += 1e-6 return test[0] with utils.Stubber(time, "time", TimeStub): self.StartHunt(output_plugins=[ rdf_output_plugin.OutputPluginDescriptor( plugin_name="LongRunningDummyHuntOutputPlugin") ]) self.AssignTasksToClients() self.RunHunt(failrate=-1) # Same as above, 170s lifetime gives 102s max run time which is longer # than 100s, the time LongRunningDummyHuntOutputPlugin will set on the # first run. This time, the flow will run in time. phrccf = process_results.ProcessHuntResultCollectionsCronFlow phrccj = process_results.ProcessHuntResultCollectionsCronJob with utils.MultiStubber((phrccf, "lifetime", rdfvalue.Duration("170s")), (phrccf, "BATCH_SIZE", 1), (phrccj, "lifetime", rdfvalue.Duration("170s")), (phrccj, "BATCH_SIZE", 1)): self.ProcessHuntOutputPlugins() # In normal conditions, there should be 10 results generated. self.assertEqual(hunt_test_lib.LongRunningDummyHuntOutputPlugin.num_calls, 10)
def testOutputPluginsErrorsAreCorrectlyWrittenAndCanBeRead(self): failing_plugin_descriptor = rdf_output_plugin.OutputPluginDescriptor( plugin_name="FailingDummyHuntOutputPlugin") hunt_id, client_ids = self._CreateAndRunHunt( num_clients=5, client_mock=hunt_test_lib.SampleHuntMock(failrate=-1), client_rule_set=foreman_rules.ForemanClientRuleSet(), client_rate=0, args=self.GetFileHuntArgs(), output_plugins=[failing_plugin_descriptor]) errors = data_store.REL_DB.ReadHuntOutputPluginLogEntries( hunt_id, # REL_DB code uses strings for output plugin ids for consistency (as # all other DB ids are strings). At the moment plugin_id in the database # is simply an index of the plugin in Flow/Hunt.output_plugins list. output_plugin_id="0", offset=0, count=sys.maxsize, with_type=rdf_flow_objects.FlowOutputPluginLogEntry.LogEntryType. ERROR) self.assertLen(errors, 5) self.assertItemsEqual([e.client_id for e in errors], client_ids) for e in errors: self.assertEqual(e.hunt_id, hunt_id) self.assertGreater(e.timestamp, 0) self.assertEqual(e.message, "Error while processing 1 replies: Oh no!")
def testUpdatesStatsCounterOnFailure(self): failing_plugin_descriptor = rdf_output_plugin.OutputPluginDescriptor( plugin_name="FailingDummyHuntOutputPlugin") self.StartHunt(output_plugins=[failing_plugin_descriptor]) prev_success_count = stats_collector_instance.Get().GetMetricValue( "hunt_results_ran_through_plugin", fields=["FailingDummyHuntOutputPlugin"]) prev_errors_count = stats_collector_instance.Get().GetMetricValue( "hunt_output_plugin_errors", fields=["FailingDummyHuntOutputPlugin"]) self.AssignTasksToClients() self.RunHunt(failrate=-1) try: self.ProcessHuntOutputPlugins() except process_results.ResultsProcessingError: pass success_count = stats_collector_instance.Get().GetMetricValue( "hunt_results_ran_through_plugin", fields=["FailingDummyHuntOutputPlugin"]) errors_count = stats_collector_instance.Get().GetMetricValue( "hunt_output_plugin_errors", fields=["FailingDummyHuntOutputPlugin"]) self.assertEqual(success_count - prev_success_count, 0) self.assertEqual(errors_count - prev_errors_count, 1)
def testFlowLogsFailedOutputPluginProcessing(self): log_messages = self._RunFlowAndCollectLogs(output_plugins=[ rdf_output_plugin.OutputPluginDescriptor( plugin_name="FailingDummyFlowOutputPlugin") ]) self.assertIn( "Plugin FailingDummyFlowOutputPlugin failed to process 1 replies " "due to: Oh no!", log_messages)
def testMultipleHuntsOutputIsProcessedCorrectly(self): self.StartHunt(output_plugins=[ rdf_output_plugin.OutputPluginDescriptor( plugin_name="DummyHuntOutputPlugin") ]) self.StartHunt(output_plugins=[ rdf_output_plugin.OutputPluginDescriptor( plugin_name="StatefulDummyHuntOutputPlugin") ]) self.AssignTasksToClients() self.RunHunt(failrate=-1) self.ProcessHuntOutputPlugins() # Check that plugins worked correctly self.assertEqual(hunt_test_lib.DummyHuntOutputPlugin.num_calls, 1) self.assertListEqual(hunt_test_lib.StatefulDummyHuntOutputPlugin.data, [0])
def testFlowWithOutputPluginButWithoutResultsCompletes(self): self.RunFlow(flow_cls=NoRequestParentFlow, output_plugins=[ rdf_output_plugin.OutputPluginDescriptor( plugin_name="DummyFlowOutputPlugin") ]) self.assertEqual(test_output_plugins.DummyFlowOutputPlugin.num_calls, 0)
def testFlowWithOutputPluginProcessesResultsSuccessfully(self): self.RunFlow(output_plugins=[ rdf_output_plugin.OutputPluginDescriptor( plugin_name="DummyFlowOutputPlugin") ]) self.assertEqual(test_output_plugins.DummyFlowOutputPlugin.num_calls, 1) self.assertEqual( test_output_plugins.DummyFlowOutputPlugin.num_responses, 1)
def testFlowLogsFailedOutputPluginProcessing(self): flow_urn = self.RunFlow( plugins=rdf_output_plugin.OutputPluginDescriptor( plugin_name="FailingDummyFlowOutputPlugin")) flow_obj = aff4.FACTORY.Open(flow_urn, token=self.token) log_messages = [item.log_message for item in flow_obj.GetLog()] self.assertTrue( "Plugin FailingDummyFlowOutputPlugin failed to process 1 replies " "due to: Oh no!" in log_messages)
def testFlowLogsSuccessfulOutputPluginProcessing(self): flow_urn = self.RunFlow( plugins=rdf_output_plugin.OutputPluginDescriptor( plugin_name="DummyFlowOutputPlugin")) flow_obj = aff4.FACTORY.Open(flow_urn, token=self.token) log_messages = [item.log_message for item in flow_obj.GetLog()] self.assertTrue( "Plugin DummyFlowOutputPlugin sucessfully processed 1 flow replies." in log_messages)
def testWritingHuntOutputStatesForUnknownHuntRaises(self): state = rdf_flow_runner.OutputPluginState( plugin_descriptor=rdf_output_plugin.OutputPluginDescriptor( plugin_name="DummyHuntOutputPlugin1"), plugin_state={}) with self.assertRaises(db.UnknownHuntError): self.db.WriteHuntOutputPluginsStates(rdf_hunt_objects.RandomHuntId(), [state])
def testFlowLogsSuccessfulOutputPluginProcessing(self): log_messages = self._RunFlowAndCollectLogs(output_plugins=[ rdf_output_plugin.OutputPluginDescriptor( plugin_name="DummyFlowOutputPlugin") ]) self.assertIn( "Plugin DummyFlowOutputPlugin successfully processed 1 flow replies.", log_messages)
def testUserChangesToCopiedFlowAreRespected(self): args = flows_processes.ListProcessesArgs(filename_regex="test[a-z]*", fetch_binaries=True) flow.StartAFF4Flow(flow_name=flows_processes.ListProcesses.__name__, args=args, client_id=self.client_id, output_plugins=[self.email_descriptor], token=self.token) # Navigate to client and select newly created flow. self.Open("/#/clients/C.0000000000000001/flows") self.Click("css=td:contains('ListProcesses')") # Open wizard and change the arguments. self.Click("css=button[name=copy_flow]") self.Type("css=label:contains('Filename Regex') ~ * input", "somethingElse*") self.Click( "css=label:contains('Fetch Binaries') ~ * input[type=checkbox]") # Change output plugin and add another one. self.Click("css=label:contains('Output Plugins') ~ * button") self.Select( "css=grr-output-plugin-descriptor-form " "label:contains('Plugin') ~ * select:eq(0)", "DummyOutputPlugin") self.Type( "css=grr-output-plugin-descriptor-form " "label:contains('Filename Regex'):eq(0) ~ * input:text", "foobar!") self.Click("css=button:contains('Launch')") # Check that flows list got updated and that the new flow is selected. self.WaitUntil( self.IsElementPresent, "css=grr-client-flows-list tr:contains('ListProcesses'):nth(1)") self.WaitUntil( self.IsElementPresent, "css=grr-client-flows-list " "tr:contains('ListProcesses'):nth(0).row-selected") # Now open the last flow and check that it has the changes we made. fd = aff4.FACTORY.Open(self.client_id.Add("flows"), token=self.token) flows = sorted(fd.ListChildren(), key=lambda x: x.age) fobj = aff4.FACTORY.Open(flows[-1], token=self.token) self.assertEqual( fobj.args, flows_processes.ListProcessesArgs( filename_regex="somethingElse*", )) self.assertListEqual(list(fobj.runner_args.output_plugins), [ rdf_output_plugin.OutputPluginDescriptor( plugin_name=gui_test_lib.DummyOutputPlugin.__name__, plugin_args=flows_processes.ListProcessesArgs( filename_regex="foobar!")), self.email_descriptor ])
def testErrorOutputPluginStatusIsAlsoWrittenToErrorsCollection(self): failing_plugin_descriptor = rdf_output_plugin.OutputPluginDescriptor( plugin_name="FailingDummyHuntOutputPlugin") plugin_descriptor = rdf_output_plugin.OutputPluginDescriptor( plugin_name="DummyHuntOutputPlugin") hunt_urn = self.StartHunt( output_plugins=[failing_plugin_descriptor, plugin_descriptor]) # Run the hunt and process output plugins. self.AssignTasksToClients() self.RunHunt(failrate=-1) try: self.ProcessHuntOutputPlugins() except process_results.ResultsProcessingError: pass status_collection = implementation.GRRHunt.PluginStatusCollectionForHID( hunt_urn) errors_collection = implementation.GRRHunt.PluginErrorCollectionForHID( hunt_urn) self.assertLen(errors_collection, 1) self.assertLen(status_collection, 2) self.assertEqual(errors_collection[0].status, "ERROR") self.assertEqual(errors_collection[0].batch_index, 0) self.assertEqual(errors_collection[0].batch_size, 10) self.assertEqual(errors_collection[0].plugin_descriptor, failing_plugin_descriptor) self.assertEqual(errors_collection[0].summary, "Oh no!") items = sorted( status_collection, key=lambda x: x.plugin_descriptor.plugin_name) self.assertEqual(items[0].status, "SUCCESS") self.assertEqual(items[0].batch_index, 0) self.assertEqual(items[0].batch_size, 10) self.assertEqual(items[0].plugin_descriptor, plugin_descriptor) self.assertEqual(items[1].status, "ERROR") self.assertEqual(items[1].batch_index, 0) self.assertEqual(items[1].batch_size, 10) self.assertEqual(items[1].plugin_descriptor, failing_plugin_descriptor) self.assertEqual(items[1].summary, "Oh no!")
def testFailingOutputPluginDoesNotAffectOtherOutputPlugins(self): failing_plugin_descriptor = rdf_output_plugin.OutputPluginDescriptor( plugin_name="FailingDummyHuntOutputPlugin") plugin_descriptor = rdf_output_plugin.OutputPluginDescriptor( plugin_name="DummyHuntOutputPlugin") hunt_id, _ = self._CreateAndRunHunt( num_clients=5, client_mock=hunt_test_lib.SampleHuntMock(failrate=-1), client_rule_set=foreman_rules.ForemanClientRuleSet(), client_rate=0, args=self.GetFileHuntArgs(), output_plugins=[failing_plugin_descriptor, plugin_descriptor]) errors = hunt.GetHuntOutputPluginErrors(hunt_id, 0, sys.maxsize) self.assertLen(errors, 5) # Check that non-failing output plugin is still correctly processed. logs = hunt.GetHuntOutputPluginLogs(hunt_id, 0, sys.maxsize) self.assertLen(logs, 5)