def Run(self): runner_args = flow_runner.FlowRunnerArgs( flow_name=transfer.GetFile.__name__) flow_args = transfer.GetFileArgs(pathspec=rdf_paths.PathSpec( path="/tmp/evil.txt", pathtype=rdf_paths.PathSpec.PathType.OS)) client_mock = test_lib.SampleHuntMock() with test_lib.FakeTime(42): flow_urn = flow.GRRFlow.StartFlow(client_id=self.client_id, args=flow_args, runner_args=runner_args, token=self.token) for _ in test_lib.TestFlowHelper(flow_urn, client_mock=client_mock, client_id=self.client_id, token=self.token): pass self.Check("GET", "/api/clients/%s/flows/%s/results" % (self.client_id.Basename(), flow_urn.Basename()), replace={flow_urn.Basename(): "W:ABCDEF"})
def testShowsFilesAndAllowsDownloadWhenCSVExportIsUsed(self): with self.ACLChecksDisabled(): self.client_ids = self.SetupClients(10) # Create hunt. self.CreateSampleHunt(output_plugins=[ rdfvalue.OutputPlugin(plugin_name="CSVOutputPlugin")]) # Actually run created hunt. client_mock = test_lib.SampleHuntMock() test_lib.TestHuntHelper(client_mock, self.client_ids, False, self.token) # Make sure results are processed. flow_urn = flow.GRRFlow.StartFlow(flow_name="ProcessHuntResultsCronFlow", token=self.token) for _ in test_lib.TestFlowHelper(flow_urn, token=self.token): pass self.Open("/#main=ManageHunts") self.Click("css=td:contains('GenericHunt')") # Click the Results tab. self.Click("css=a[renderer=HuntResultsRenderer]") self.WaitUntil(self.IsTextPresent, "CSV output plugin writes to following files") # Check that displayed file can be downloaded. self.Click("css=.csv-output-note a:contains('ExportedFile.csv')") self.WaitUntil(self.FileWasDownloaded)
def RunFlow(self, flow_name=None, plugins=None, flow_args=None, client_mock=None): runner_args = flow_runner.FlowRunnerArgs(flow_name=flow_name or "GetFile", output_plugins=plugins) if flow_args is None: flow_args = transfer.GetFileArgs( pathspec=rdf_paths.PathSpec( path="/tmp/evil.txt", pathtype=rdf_paths.PathSpec.PathType.OS)) if client_mock is None: client_mock = test_lib.SampleHuntMock() flow_urn = flow.GRRFlow.StartFlow(client_id=self.client_id, args=flow_args, runner_args=runner_args, token=self.token) for _ in test_lib.TestFlowHelper(flow_urn, client_mock=client_mock, client_id=self.client_id, token=self.token): pass return flow_urn
def testHuntNotifications(self): """This tests the Hunt notification event.""" TestHuntListener.received_events = [] # Set up 10 clients. client_ids = self.SetupClients(10) with hunts.GRRHunt.StartHunt(hunt_name="BrokenSampleHunt", regex_rules=[ rdfvalue.ForemanAttributeRegex( attribute_name="GRR client", attribute_regex="GRR") ], client_rate=0, notification_event="TestHuntDone", token=self.token) as hunt: hunt.GetRunner().Start() foreman = aff4.FACTORY.Open("aff4:/foreman", mode="rw", token=self.token) for client_id in client_ids: foreman.AssignTasksToClient(client_id) # Run the hunt. client_mock = test_lib.SampleHuntMock() test_lib.TestHuntHelper(client_mock, client_ids, check_flow_errors=False, token=self.token) self.assertEqual(len(TestHuntListener.received_events), 5)
def testCreatorPropagation(self): self.CreateAdminUser("adminuser") admin_token = access_control.ACLToken(username="******", reason="testing") # Start a flow that requires admin privileges in the hunt. The # parameters are not valid so the flow will error out but it's # enough to check if the flow was actually run (i.e., it passed # the label test). with hunts.GRRHunt.StartHunt( hunt_name="GenericHunt", flow_runner_args=rdfvalue.FlowRunnerArgs(flow_name="UpdateClient"), flow_args=rdfvalue.UpdateClientArgs(), regex_rules=[ rdfvalue.ForemanAttributeRegex(attribute_name="GRR client", attribute_regex="GRR"), ], client_rate=0, token=admin_token) as hunt: hunt.Run() self.CreateUser("nonadmin") nonadmin_token = access_control.ACLToken(username="******", reason="testing") self.AssignTasksToClients() client_mock = test_lib.SampleHuntMock() test_lib.TestHuntHelper(client_mock, self.client_ids, False, nonadmin_token) errors = list(hunt.GetClientsErrors()) # Make sure there are errors... self.assertTrue(errors) # but they are not UnauthorizedAccess. for e in errors: self.assertTrue("UnauthorizedAccess" not in e.backtrace)
def testShowsFilesAndAllowsDownloadWhenCSVExportIsUsed(self): with self.ACLChecksDisabled(): self.client_ids = self.SetupClients(10) # Create hunt. self.CreateSampleHunt(output_plugins=[ output_plugin.OutputPluginDescriptor( plugin_name=csv_plugin.CSVOutputPlugin.__name__) ]) # Actually run created hunt. client_mock = test_lib.SampleHuntMock() test_lib.TestHuntHelper(client_mock, self.client_ids, False, self.token) # Make sure results are processed. flow_urn = flow.GRRFlow.StartFlow( flow_name=process_results.ProcessHuntResultCollectionsCronFlow. __name__, token=self.token) for _ in test_lib.TestFlowHelper(flow_urn, token=self.token): pass self.Open("/#main=ManageHunts") self.Click("css=td:contains('GenericHunt')") # Click the Results tab. self.Click("css=li[heading=Results]") self.WaitUntil(self.IsTextPresent, "Following files were written") # Check that displayed file can be downloaded. self.Click("css=a:contains('ExportedFile.csv')") self.WaitUntil(self.FileWasDownloaded)
def RunHunt(self, client_ids=None, iteration_limit=None, **mock_kwargs): client_mock = test_lib.SampleHuntMock(**mock_kwargs) test_lib.TestHuntHelper(client_mock, client_ids or self.client_ids, check_flow_errors=False, iteration_limit=iteration_limit, token=self.token)
def testClientLimit(self): """This tests that we can limit hunts to a number of clients.""" # Set up 10 clients. client_ids = self.SetupClients(10) with hunts.GRRHunt.StartHunt(hunt_name="SampleHunt", client_limit=5, regex_rules=[ rdfvalue.ForemanAttributeRegex( attribute_name="GRR client", attribute_regex="GRR") ], client_rate=0, token=self.token) as hunt: hunt.Run() foreman = aff4.FACTORY.Open("aff4:/foreman", mode="rw", token=self.token) for client_id in client_ids: foreman.AssignTasksToClient(client_id) # Run the hunt. client_mock = test_lib.SampleHuntMock() test_lib.TestHuntHelper(client_mock, client_ids, False, self.token) hunt_obj = aff4.FACTORY.Open(hunt.urn, mode="rw", age=aff4.ALL_TIMES, token=self.token) started, finished, _ = hunt_obj.GetClientsCounts() # We limited here to 5 clients. self.assertEqual(started, 5) self.assertEqual(finished, 5)
def testClientsTabShowsCompletedAndOutstandingClients(self): with self.ACLChecksDisabled(): # Create some clients and a hunt to view. self.CreateSampleHunt() # Run the hunt on half the clients. finished_client_ids = self.client_ids[5:] outstanding_client_ids = self.client_ids[:5] client_mock = test_lib.SampleHuntMock() test_lib.TestHuntHelper(client_mock, finished_client_ids, False, self.token) self.Open("/#main=ManageHunts") self.Click("css=td:contains('GenericHunt')") self.Click("css=li[heading=Clients]") self.Click("css=label[name=ShowCompletedClients]") for client_id in finished_client_ids: self.WaitUntilContains(client_id.Basename(), self.GetText, "css=.tab-content") self.Click("css=label[name=ShowOutstandingClients]") for client_id in outstanding_client_ids: self.WaitUntilContains(client_id.Basename(), self.GetText, "css=.tab-content")
def Run(self): client_ids = self.SetupClients(10) client_mock = test_lib.SampleHuntMock() with test_lib.FakeTime(42): with self.CreateHunt(description="the hunt") as hunt_obj: hunt_obj.Run() time_offset = 0 for client_id in client_ids: with test_lib.FakeTime(45 + time_offset): self.AssignTasksToClients([client_id]) test_lib.TestHuntHelper(client_mock, [client_id], False, self.token) time_offset += 10 replace = {hunt_obj.urn.Basename(): "H:123456"} self.Check("GetHuntClientCompletionStats", args=hunt_plugin.ApiGetHuntClientCompletionStatsArgs( hunt_id=hunt_obj.urn.Basename()), replace=replace) self.Check("GetHuntClientCompletionStats", args=hunt_plugin.ApiGetHuntClientCompletionStatsArgs( hunt_id=hunt_obj.urn.Basename(), size=4), replace=replace) self.Check("GetHuntClientCompletionStats", args=hunt_plugin.ApiGetHuntClientCompletionStatsArgs( hunt_id=hunt_obj.urn.Basename(), size=1000), replace=replace)
def testBrokenHunt(self): """This tests the behavior when a hunt raises an exception.""" # Set up 10 clients. client_ids = self.SetupClients(10) with hunts.GRRHunt.StartHunt( hunt_name="BrokenSampleHunt", regex_rules=[rdf_foreman.ForemanAttributeRegex( attribute_name="GRR client", attribute_regex="GRR")], client_rate=0, token=self.token) as hunt: hunt.GetRunner().Start() foreman = aff4.FACTORY.Open("aff4:/foreman", mode="rw", token=self.token) for client_id in client_ids: foreman.AssignTasksToClient(client_id) # Run the hunt. client_mock = test_lib.SampleHuntMock() test_lib.TestHuntHelper(client_mock, client_ids, False, self.token) hunt_obj = aff4.FACTORY.Open(hunt.session_id, mode="rw", age=aff4.ALL_TIMES, token=self.token) started, finished, errors = hunt_obj.GetClientsCounts() self.assertEqual(started, 10) # There should be errors for the five clients where the hunt raised. self.assertEqual(errors, 5) # All of the clients that have the file should still finish eventually. self.assertEqual(finished, 5)
def testHangingClients(self): """This tests if the hunt completes when some clients hang or raise.""" # Set up 10 clients. client_ids = self.SetupClients(10) with hunts.GRRHunt.StartHunt( hunt_name="SampleHunt", regex_rules=[rdf_foreman.ForemanAttributeRegex( attribute_name="GRR client", attribute_regex="GRR")], client_rate=0, token=self.token) as hunt: hunt.GetRunner().Start() foreman = aff4.FACTORY.Open("aff4:/foreman", mode="rw", token=self.token) for client_id in client_ids: foreman.AssignTasksToClient(client_id) client_mock = test_lib.SampleHuntMock() # Just pass 8 clients to run, the other two went offline. test_lib.TestHuntHelper(client_mock, client_ids[1:9], False, self.token) hunt_obj = aff4.FACTORY.Open(hunt.session_id, mode="rw", age=aff4.ALL_TIMES, token=self.token) started, finished, _ = hunt_obj.GetClientsCounts() # We started the hunt on 10 clients. self.assertEqual(started, 10) # But only 8 should have finished. self.assertEqual(finished, 8)
def testProcessing(self): """This tests running the hunt on some clients.""" # Set up 10 clients. client_ids = self.SetupClients(10) with hunts.GRRHunt.StartHunt( hunt_name="SampleHunt", regex_rules=[rdf_foreman.ForemanAttributeRegex( attribute_name="GRR client", attribute_regex="GRR")], client_rate=0, token=self.token) as hunt: hunt.GetRunner().Start() foreman = aff4.FACTORY.Open("aff4:/foreman", mode="rw", token=self.token) for client_id in client_ids: foreman.AssignTasksToClient(client_id) # Run the hunt. client_mock = test_lib.SampleHuntMock() test_lib.TestHuntHelper(client_mock, client_ids, False, self.token) hunt_obj = aff4.FACTORY.Open( hunt.session_id, mode="r", age=aff4.ALL_TIMES, aff4_type=hunts.SampleHunt, token=self.token) started, finished, _ = hunt_obj.GetClientsCounts() self.assertEqual(started, 10) self.assertEqual(finished, 10)
def RunHunt(self, plugin_name, plugin_args): with hunts.GRRHunt.StartHunt( hunt_name="GenericHunt", flow_runner_args=rdfvalue.FlowRunnerArgs(flow_name="GetFile"), flow_args=rdfvalue.GetFileArgs(pathspec=rdfvalue.PathSpec( path="/tmp/evil.txt", pathtype=rdfvalue.PathSpec.PathType.OS)), regex_rules=[ rdfvalue.ForemanAttributeRegex(attribute_name="GRR client", attribute_regex="GRR") ], output_plugins=[ rdfvalue.OutputPlugin(plugin_name=plugin_name, plugin_args=plugin_args) ], client_rate=0, token=self.token) as hunt: hunt.Run() hunt.StartClients(hunt.session_id, self.client_ids) # Run the hunt. client_mock = test_lib.SampleHuntMock() test_lib.TestHuntHelper(client_mock, self.client_ids, False, self.token) # Stop the hunt now. hunt.GetRunner().Stop() # Run cron flow that executes actual output plugins for _ in test_lib.TestFlowHelper("ProcessHuntResultsCronFlow", token=self.token): pass return hunt.urn
def SetupHuntDetailView(self, failrate=2): """Create some clients and a hunt to view.""" with self.CreateSampleHunt() as hunt: hunt.LogClientError(self.client_ids[1], "Client Error 1", traceback.format_exc()) # Run the hunt. client_mock = test_lib.SampleHuntMock(failrate=failrate) test_lib.TestHuntHelper(client_mock, self.client_ids, False, self.token)
def testHuntExpiration(self): """This tests that hunts with a client limit terminate correctly.""" with test_lib.FakeTime(1000): with hunts.GRRHunt.StartHunt( hunt_name="GenericHunt", flow_runner_args=flow_runner.FlowRunnerArgs( flow_name="GetFile"), flow_args=transfer.GetFileArgs(pathspec=rdf_paths.PathSpec( path="/tmp/evil.txt", pathtype=rdf_paths.PathSpec.PathType.OS)), regex_rules=[ rdf_foreman.ForemanAttributeRegex( attribute_name="GRR client", attribute_regex="GRR") ], client_limit=5, expiry_time=rdfvalue.Duration("1000s"), token=self.token) as hunt: hunt.Run() # Pretend to be the foreman now and dish out hunting jobs to all the # clients (Note we have 10 clients here). foreman = aff4.FACTORY.Open("aff4:/foreman", mode="rw", token=self.token) for client_id in self.client_ids: foreman.AssignTasksToClient(client_id) hunt_obj = aff4.FACTORY.Open(hunt.session_id, age=aff4.ALL_TIMES, token=self.token) self.assertEqual(hunt_obj.Get(hunt_obj.Schema.STATE), "STARTED") # Now advance the time such that the hunt expires. time.time = lambda: 5000 # Run the hunt. client_mock = test_lib.SampleHuntMock() test_lib.TestHuntHelper(client_mock, self.client_ids, check_flow_errors=False, token=self.token) # No client should be processed since the hunt is expired. started, finished, errors = hunt_obj.GetClientsCounts() self.assertEqual(started, 0) self.assertEqual(finished, 0) self.assertEqual(errors, 0) hunt_obj = aff4.FACTORY.Open(hunt.session_id, age=aff4.ALL_TIMES, token=self.token) # Hunts are automatically stopped when they expire. self.assertEqual(hunt_obj.Get(hunt_obj.Schema.STATE), "COMPLETED")
def testHuntTermination(self): """This tests that hunts with a client limit terminate correctly.""" with test_lib.FakeTime(1000, increment=1e-6): with hunts.GRRHunt.StartHunt( hunt_name="GenericHunt", flow_runner_args=rdfvalue.FlowRunnerArgs( flow_name="GetFile"), flow_args=rdfvalue.GetFileArgs(pathspec=rdfvalue.PathSpec( path="/tmp/evil.txt", pathtype=rdfvalue.PathSpec.PathType.OS)), regex_rules=[ rdfvalue.ForemanAttributeRegex( attribute_name="GRR client", attribute_regex="GRR") ], client_limit=5, client_rate=0, expiry_time=rdfvalue.Duration("1000s"), token=self.token) as hunt: hunt.Run() # Pretend to be the foreman now and dish out hunting jobs to all the # clients (Note we have 10 clients here). foreman = aff4.FACTORY.Open("aff4:/foreman", mode="rw", token=self.token) for client_id in self.client_ids: foreman.AssignTasksToClient(client_id) # Run the hunt. client_mock = test_lib.SampleHuntMock() test_lib.TestHuntHelper(client_mock, self.client_ids, check_flow_errors=False, token=self.token) hunt_obj = aff4.FACTORY.Open(hunt.session_id, age=aff4.ALL_TIMES, token=self.token) started, finished, errors = hunt_obj.GetClientsCounts() self.assertEqual(started, 5) self.assertEqual(finished, 5) self.assertEqual(errors, 2) hunt_obj = aff4.FACTORY.Open(hunt.session_id, age=aff4.ALL_TIMES, token=self.token) # Hunts are automatically paused when they reach the client limit. self.assertEqual(hunt_obj.Get(hunt_obj.Schema.STATE), "PAUSED")
def testPausingAndRestartingDoesNotStartHuntTwiceOnTheSameClient(self): """This tests if the hunt completes when some clients hang or raise.""" client_ids = self.SetupClients(10) client_rule_set = rdf_foreman.ForemanClientRuleSet(rules=[ rdf_foreman.ForemanClientRule( rule_type=rdf_foreman.ForemanClientRule.Type.REGEX, regex=rdf_foreman.ForemanRegexClientRule( attribute_name="GRR client", attribute_regex="GRR")) ]) with hunts.GRRHunt.StartHunt(hunt_name="SampleHunt", client_rule_set=client_rule_set, client_rate=0, token=self.token) as hunt: hunt.GetRunner().Start() hunt_id = hunt.urn foreman = aff4.FACTORY.Open("aff4:/foreman", mode="rw", token=self.token) for client_id in client_ids: num_tasks = foreman.AssignTasksToClient(client_id) self.assertEqual(num_tasks, 1) client_mock = test_lib.SampleHuntMock() test_lib.TestHuntHelper(client_mock, client_ids, False, self.token) # Pausing and running hunt: this leads to the fresh rules being written # to Foreman.RULES. with aff4.FACTORY.Open(hunt_id, mode="rw", token=self.token) as hunt: runner = hunt.GetRunner() runner.Pause() runner.Start() # Recreating the foreman so that it updates list of rules. foreman = aff4.FACTORY.Open("aff4:/foreman", mode="rw", token=self.token) for client_id in client_ids: num_tasks = foreman.AssignTasksToClient(client_id) # No tasks should be assigned as this hunt ran on all the clients # before. self.assertEqual(num_tasks, 0)
def SetupTestHuntView(self): # Create some clients and a hunt to view. with self.CreateSampleHunt() as hunt: hunt.Log("TestLogLine") # Log an error just with some random traceback. hunt.LogClientError(self.client_ids[1], "Client Error 1", traceback.format_exc()) # Run the hunt. client_mock = test_lib.SampleHuntMock() test_lib.TestHuntHelper(client_mock, self.client_ids, False, self.token) hunt = aff4.FACTORY.Open(hunt.urn, token=self.token) all_count, _, _ = hunt.GetClientsCounts() self.assertEqual(all_count, 10)
def setUp(self): super(ApiGetExportedHuntResultsHandlerTest, self).setUp() self.handler = hunt_plugin.ApiGetExportedHuntResultsHandler() self.hunt = hunts.GRRHunt.StartHunt( hunt_name=standard.GenericHunt.__name__, flow_runner_args=rdf_flows.FlowRunnerArgs( flow_name=DummyFlowWithSingleReply.__name__), client_rate=0, token=self.token) self.hunt.Run() client_ids = self.SetupClients(5) self.AssignTasksToClients(client_ids=client_ids) client_mock = test_lib.SampleHuntMock() test_lib.TestHuntHelper(client_mock, client_ids, token=self.token)
def SetupTestHuntView(self): # Create some clients and a hunt to view. with self.CreateSampleHunt() as hunt: hunt.LogResult(self.client_ids[2], "Result 1") # Log an error just with some random traceback. hunt.LogClientError(self.client_ids[1], "Client Error 1", traceback.format_exc()) # Run the hunt. client_mock = test_lib.SampleHuntMock() test_lib.TestHuntHelper(client_mock, self.client_ids, False, self.token) hunt = aff4.FACTORY.Open(hunt.urn, token=self.token, age=aff4.ALL_TIMES) started = hunt.GetValuesForAttribute(hunt.Schema.CLIENTS) self.assertEqual(len(set(started)), 10)
def testShowsResultsTabForIndividualFlowsOnClients(self): with self.ACLChecksDisabled(): # Create and run the hunt. self.CreateSampleHunt(stopped=False) client_mock = test_lib.SampleHuntMock(failrate=-1) test_lib.TestHuntHelper(client_mock, self.client_ids, False, self.token) self.GrantClientApproval(self.client_ids[0]) self.Open("/#c=" + self.client_ids[0].Basename()) self.Click("css=a:contains('Manage launched flows')") self.Click("css=grr-client-flows-list tr:contains('GetFile')") self.Click("css=li[heading=Results]") # This is to check that no exceptions happened when we tried to display # results. # TODO(user): Fail *any* test if we get a 500 in the process. self.WaitUntilNot(self.IsTextPresent, "Loading...")
def RunVariableGenericHunt(self): args = standard.VariableGenericHuntArgs() self._AppendFlowRequest(args.flows, 1, 1) self._AppendFlowRequest(args.flows, 2, 2) self._AppendFlowRequest(args.flows, 2, 3) with hunts.GRRHunt.StartHunt(hunt_name="VariableGenericHunt", args=args, client_rate=0, token=self.token) as hunt: hunt.Run() hunt.ManuallyScheduleClients() # Run the hunt. client_mock = test_lib.SampleHuntMock(failrate=100) test_lib.TestHuntHelper(client_mock, self.client_ids, False, self.token) with aff4.FACTORY.Open(hunt.session_id, mode="rw", token=self.token) as hunt: hunt.Stop() return hunt
def setUp(self): super(ApiGetExportedHuntResultsHandlerTest, self).setUp() self.handler = hunt_plugin.ApiGetExportedHuntResultsHandler() self.hunt = hunts.GRRHunt.StartHunt( hunt_name=standard.GenericHunt.__name__, flow_runner_args=rdf_flows.FlowRunnerArgs( flow_name=DummyFlowWithSingleReply.__name__), client_rate=0, token=self.token) self.hunt.Run() self.client_ids = self.SetupClients(5) # Ensure that clients are processed sequentially - this way the test won't # depend on the order of results in the collection (which is normally # random). for cid in self.client_ids: self.AssignTasksToClients(client_ids=[cid]) client_mock = test_lib.SampleHuntMock() test_lib.TestHuntHelper(client_mock, [cid], token=self.token)
def Run(self): client_ids = self.SetupClients(10) client_mock = test_lib.SampleHuntMock() with test_lib.FakeTime(42): with self.CreateHunt(description="the hunt") as hunt_obj: hunt_obj.Run() time_offset = 0 for client_id in client_ids: with test_lib.FakeTime(45 + time_offset): self.AssignTasksToClients([client_id]) test_lib.TestHuntHelper(client_mock, [client_id], False, self.token) time_offset += 10 replace = {hunt_obj.urn.Basename(): "H:123456"} base_url = ("/api/hunts/%s/client-completion-stats" "?strip_type_info=1" % hunt_obj.urn.Basename()) self.Check("GET", base_url, replace=replace) self.Check("GET", base_url + "&size=4", replace=replace) self.Check("GET", base_url + "&size=1000", replace=replace)
def testProcessing(self): """This tests running the hunt on some clients.""" # Set up 10 clients. client_ids = self.SetupClients(10) with hunts.GRRHunt.StartHunt( hunt_name="SampleHunt", regex_rules=[rdfvalue.ForemanAttributeRegex( attribute_name="GRR client", attribute_regex="GRR")], client_rate=0, token=self.token) as hunt: with hunt.GetRunner() as runner: runner.Start() foreman = aff4.FACTORY.Open("aff4:/foreman", mode="rw", token=self.token) for client_id in client_ids: foreman.AssignTasksToClient(client_id) # Run the hunt. client_mock = test_lib.SampleHuntMock() test_lib.TestHuntHelper(client_mock, client_ids, False, self.token) hunt_obj = aff4.FACTORY.Open( hunt.session_id, mode="r", age=aff4.ALL_TIMES, aff4_type="SampleHunt", token=self.token) started = hunt_obj.GetValuesForAttribute(hunt_obj.Schema.CLIENTS) finished = hunt_obj.GetValuesForAttribute(hunt_obj.Schema.FINISHED) self.assertEqual(len(set(started)), 10) self.assertEqual(len(set(finished)), 10) self.DeleteClients(10)
def testResourceUsageStats(self): client_ids = self.SetupClients(10) with hunts.GRRHunt.StartHunt( hunt_name="GenericHunt", flow_runner_args=rdfvalue.FlowRunnerArgs( flow_name="GetFile"), flow_args=rdfvalue.GetFileArgs( pathspec=rdfvalue.PathSpec( path="/tmp/evil.txt", pathtype=rdfvalue.PathSpec.PathType.OS, ) ), regex_rules=[rdfvalue.ForemanAttributeRegex( attribute_name="GRR client", attribute_regex="GRR")], output_plugins=[], client_rate=0, token=self.token) as hunt: hunt.Run() with aff4.FACTORY.Open( "aff4:/foreman", mode="rw", token=self.token) as foreman: for client_id in client_ids: foreman.AssignTasksToClient(client_id) client_mock = test_lib.SampleHuntMock() test_lib.TestHuntHelper(client_mock, client_ids, False, self.token) hunt = aff4.FACTORY.Open(hunt.urn, aff4_type="GenericHunt", token=self.token) # This is called once for each state method. Each flow above runs the # Start and the StoreResults methods. usage_stats = hunt.state.context.usage_stats self.assertEqual(usage_stats.user_cpu_stats.num, 10) self.assertTrue(math.fabs(usage_stats.user_cpu_stats.mean - 5.5) < 1e-7) self.assertTrue(math.fabs(usage_stats.user_cpu_stats.std - 2.8722813) < 1e-7) self.assertEqual(usage_stats.system_cpu_stats.num, 10) self.assertTrue(math.fabs(usage_stats.system_cpu_stats.mean - 11) < 1e-7) self.assertTrue(math.fabs(usage_stats.system_cpu_stats.std - 5.7445626) < 1e-7) self.assertEqual(usage_stats.network_bytes_sent_stats.num, 10) self.assertTrue(math.fabs(usage_stats.network_bytes_sent_stats.mean - 16.5) < 1e-7) self.assertTrue(math.fabs(usage_stats.network_bytes_sent_stats.std - 8.61684396) < 1e-7) # NOTE: Not checking histograms here. RunningStatsTest tests that mean, # standard deviation and histograms are calculated correctly. Therefore # if mean/stdev values are correct histograms should be ok as well. self.assertEqual(len(usage_stats.worst_performers), 10) prev = usage_stats.worst_performers[0] for p in usage_stats.worst_performers[1:]: self.assertTrue(prev.cpu_usage.user_cpu_time + prev.cpu_usage.system_cpu_time > p.cpu_usage.user_cpu_time + p.cpu_usage.system_cpu_time) prev = p
def testHuntModificationWorksCorrectly(self): """This tests running the hunt on some clients.""" with hunts.GRRHunt.StartHunt( hunt_name="GenericHunt", flow_runner_args=rdfvalue.FlowRunnerArgs(flow_name="GetFile"), flow_args=rdfvalue.GetFileArgs( pathspec=rdfvalue.PathSpec( path="/tmp/evil.txt", pathtype=rdfvalue.PathSpec.PathType.OS), ), regex_rules=[rdfvalue.ForemanAttributeRegex( attribute_name="GRR client", attribute_regex="GRR")], client_limit=1, client_rate=0, token=self.token) as hunt: hunt.Run() # Forget about hunt object, we'll use AFF4 for everything. hunt_session_id = hunt.session_id hunt = None # Pretend to be the foreman now and dish out hunting jobs to all the # client.. with aff4.FACTORY.Open( "aff4:/foreman", mode="rw", token=self.token) as foreman: for client_id in self.client_ids: foreman.AssignTasksToClient(client_id) # Run the hunt. client_mock = test_lib.SampleHuntMock() test_lib.TestHuntHelper(client_mock, self.client_ids, False, self.token) # Re-open the hunt to get fresh data. hunt_obj = aff4.FACTORY.Open(hunt_session_id, age=aff4.ALL_TIMES, ignore_cache=True, token=self.token) started = hunt_obj.GetValuesForAttribute(hunt_obj.Schema.CLIENTS) # There should be only one client, due to the limit self.assertEqual(len(set(started)), 1) # Check the hunt is paused. self.assertEqual(hunt_obj.Get(hunt_obj.Schema.STATE), "PAUSED") with aff4.FACTORY.Open( hunt_session_id, mode="rw", token=self.token) as hunt_obj: with hunt_obj.GetRunner() as runner: runner.args.client_limit = 10 runner.Start() # Pretend to be the foreman now and dish out hunting jobs to all the # clients. with aff4.FACTORY.Open( "aff4:/foreman", mode="rw", token=self.token) as foreman: for client_id in self.client_ids: foreman.AssignTasksToClient(client_id) test_lib.TestHuntHelper(client_mock, self.client_ids, False, self.token) hunt_obj = aff4.FACTORY.Open(hunt_session_id, age=aff4.ALL_TIMES, token=self.token) started = hunt_obj.GetValuesForAttribute(hunt_obj.Schema.CLIENTS) # There should be only one client, due to the limit self.assertEqual(len(set(started)), 10)
def RunHunt(self, **mock_kwargs): client_mock = test_lib.SampleHuntMock(**mock_kwargs) test_lib.TestHuntHelper(client_mock, self.client_ids, False, self.token)
def testEmailPlugin(self): def SendEmail(address, sender, title, message, **_): self.email_messages.append( dict(address=address, sender=sender, title=title, message=message)) with utils.Stubber(email_alerts, "SendEmail", SendEmail): self.email_messages = [] email_alerts.SendEmail = SendEmail email_address = "notify@%s" % config_lib.CONFIG["Logging.domain"] hunt_urn = self.RunHunt( "EmailPlugin", rdfvalue.EmailPluginArgs(email=email_address, email_limit=10)) hunt_obj = aff4.FACTORY.Open(hunt_urn, age=aff4.ALL_TIMES, mode="rw", token=self.token) self.client_ids = self.SetupClients(40) hunt_obj.StartClients(hunt_obj.session_id, self.client_ids) # Run the hunt. client_mock = test_lib.SampleHuntMock() test_lib.TestHuntHelper(client_mock, self.client_ids, False, self.token) # Run cron flow that executes actual output plugins for _ in test_lib.TestFlowHelper("ProcessHuntResultsCronFlow", token=self.token): pass # Stop the hunt now. hunt_obj.GetRunner().Stop() hunt_obj = aff4.FACTORY.Open(hunt_urn, age=aff4.ALL_TIMES, token=self.token) started, finished, errors = hunt_obj.GetClientsCounts() self.assertEqual(started, 40) self.assertEqual(finished, 40) self.assertEqual(errors, 20) collection = aff4.FACTORY.Open(hunt_urn.Add("Results"), mode="r", token=self.token) self.assertEqual(len(collection), 20) # Due to the limit there should only by 10 messages. self.assertEqual(len(self.email_messages), 10) for msg in self.email_messages: self.assertEqual(msg["address"], email_address) self.assertTrue( "%s got a new result" % hunt_obj.session_id.Add("Results") in msg["title"]) self.assertTrue("fs/os/tmp/evil.txt" in msg["message"]) self.assertTrue("sending of emails will be disabled now" in self.email_messages[-1]["message"])