def testProcessing(self): """This tests running the hunt on some clients.""" # Set up 10 clients. client_ids = self.SetupClients(10) client_rule_set = foreman_rules.ForemanClientRuleSet(rules=[ foreman_rules.ForemanClientRule( rule_type=foreman_rules.ForemanClientRule.Type.REGEX, regex=foreman_rules.ForemanRegexClientRule( field="CLIENT_NAME", attribute_regex="GRR")) ]) with implementation.StartHunt(hunt_name=standard.SampleHunt.__name__, client_rule_set=client_rule_set, client_rate=0, token=self.token) as hunt: hunt.GetRunner().Start() foreman = aff4.FACTORY.Open("aff4:/foreman", mode="rw", token=self.token) for client_id in client_ids: foreman.AssignTasksToClient(client_id.Basename()) # Run the hunt. client_mock = hunt_test_lib.SampleHuntMock(failrate=2) hunt_test_lib.TestHuntHelper(client_mock, client_ids, False, self.token) hunt_obj = aff4.FACTORY.Open(hunt.session_id, mode="r", age=aff4.ALL_TIMES, aff4_type=standard.SampleHunt, token=self.token) started, finished, _ = hunt_obj.GetClientsCounts() self.assertEqual(started, 10) self.assertEqual(finished, 10)
def testHangingClients(self): """This tests if the hunt completes when some clients hang or raise.""" # Set up 10 clients. client_ids = self.SetupClients(10) client_rule_set = foreman_rules.ForemanClientRuleSet(rules=[ foreman_rules.ForemanClientRule( rule_type=foreman_rules.ForemanClientRule.Type.REGEX, regex=foreman_rules.ForemanRegexClientRule( field="CLIENT_NAME", attribute_regex="GRR")) ]) with implementation.StartHunt(hunt_name=standard.SampleHunt.__name__, client_rule_set=client_rule_set, client_rate=0, token=self.token) as hunt: hunt.GetRunner().Start() foreman = aff4.FACTORY.Open("aff4:/foreman", mode="rw", token=self.token) for client_id in client_ids: foreman.AssignTasksToClient(client_id.Basename()) client_mock = hunt_test_lib.SampleHuntMock(failrate=2) # Just pass 8 clients to run, the other two went offline. hunt_test_lib.TestHuntHelper(client_mock, client_ids[1:9], False, self.token) hunt_obj = aff4.FACTORY.Open(hunt.session_id, mode="rw", age=aff4.ALL_TIMES, token=self.token) started, finished, _ = hunt_obj.GetClientsCounts() # We started the hunt on 10 clients. self.assertEqual(started, 10) # But only 8 should have finished. self.assertEqual(finished, 8)
def testClientLimit(self): """This tests that we can limit hunts to a number of clients.""" # Set up 10 clients. client_ids = self.SetupClients(10) client_rule_set = foreman_rules.ForemanClientRuleSet(rules=[ foreman_rules.ForemanClientRule( rule_type=foreman_rules.ForemanClientRule.Type.REGEX, regex=foreman_rules.ForemanRegexClientRule( field="CLIENT_NAME", attribute_regex="GRR")) ]) with implementation.StartHunt(hunt_name=standard.SampleHunt.__name__, client_limit=5, client_rule_set=client_rule_set, client_rate=0, token=self.token) as hunt: hunt.Run() foreman = aff4.FACTORY.Open("aff4:/foreman", mode="rw", token=self.token) for client_id in client_ids: foreman.AssignTasksToClient(client_id.Basename()) # Run the hunt. client_mock = hunt_test_lib.SampleHuntMock(failrate=2) hunt_test_lib.TestHuntHelper(client_mock, client_ids, False, self.token) hunt_obj = aff4.FACTORY.Open(hunt.urn, mode="rw", age=aff4.ALL_TIMES, token=self.token) started, finished, _ = hunt_obj.GetClientsCounts() # We limited here to 5 clients. self.assertEqual(started, 5) self.assertEqual(finished, 5)
def Run(self): if data_store.RelationalDBEnabled(): clients = self.SetupTestClientObjects(10) client_ids = sorted(clients) else: client_ids = [urn.Basename() for urn in self.SetupClients(10)] client_mock = hunt_test_lib.SampleHuntMock(failrate=2) if data_store.RelationalDBEnabled(): hunt_id = self.CreateHunt(description="the hunt") hunt.StartHunt(hunt_id) else: with test_lib.FakeTime(42): with self.CreateHunt(description="the hunt") as hunt_obj: hunt_obj.Run() hunt_id = hunt_obj.urn.Basename() time_offset = 0 for client_id in client_ids: with test_lib.FakeTime(45 + time_offset): self.AssignTasksToClients([client_id]) hunt_test_lib.TestHuntHelper(client_mock, [rdf_client.ClientURN(client_id)], False, self.token) time_offset += 10 replace = {hunt_id: "H:123456"} self.Check("GetHuntClientCompletionStats", args=hunt_plugin.ApiGetHuntClientCompletionStatsArgs( hunt_id=hunt_id), replace=replace) self.Check("GetHuntClientCompletionStats", args=hunt_plugin.ApiGetHuntClientCompletionStatsArgs( hunt_id=hunt_id, size=4), replace=replace) self.Check("GetHuntClientCompletionStats", args=hunt_plugin.ApiGetHuntClientCompletionStatsArgs( hunt_id=hunt_id, size=1000), replace=replace)
def setUp(self): super(ApiGetHuntFilesArchiveHandlerTest, self).setUp() self.handler = hunt_plugin.ApiGetHuntFilesArchiveHandler() self.hunt = implementation.StartHunt( hunt_name=standard.GenericHunt.__name__, flow_runner_args=rdf_flow_runner.FlowRunnerArgs( flow_name=file_finder.FileFinder.__name__), flow_args=rdf_file_finder.FileFinderArgs( paths=[os.path.join(self.base_path, "test.plist")], action=rdf_file_finder.FileFinderAction( action_type="DOWNLOAD"), ), client_rate=0, token=self.token) self.hunt.Run() client_ids = self.SetupClients(10) self.AssignTasksToClients(client_ids=client_ids) action_mock = action_mocks.FileFinderClientMock() hunt_test_lib.TestHuntHelper(action_mock, client_ids, token=self.token)
def testNotifyAboutEndDoesNothingWhenFlowsRunInsideHunt(self): self.CreateUser(self.token.username) # Create a user with a custom name to make sure the name is not in the list # of system names and that notifications are going to be delivered. user_token = access_control.ACLToken(username="******", reason="testing") self.CreateUser(user_token.username) hunt = implementation.StartHunt( hunt_name=standard.GenericHunt.__name__, flow_runner_args=rdf_flow_runner.FlowRunnerArgs( flow_name=FlowWithCustomNotifyAboutEnd.__name__), # pylint: disable=undefined-variable client_rate=0, token=user_token) hunt.Run() client_ids = self.SetupClients(5) self.AssignTasksToClients(client_ids=client_ids) hunt_test_lib.TestHuntHelper(None, client_ids, token=self.token) notifications = self.GetUserNotifications(user_token.username) self.assertEmpty(notifications)
def testBrokenHunt(self): """This tests the behavior when a hunt raises an exception.""" # Set up 10 clients. client_ids = self.SetupClients(10) client_rule_set = rdf_foreman.ForemanClientRuleSet(rules=[ rdf_foreman.ForemanClientRule( rule_type=rdf_foreman.ForemanClientRule.Type.REGEX, regex=rdf_foreman.ForemanRegexClientRule( attribute_name="GRR client", attribute_regex="GRR")) ]) with implementation.GRRHunt.StartHunt( hunt_name=BrokenSampleHunt.__name__, client_rule_set=client_rule_set, client_rate=0, token=self.token) as hunt: hunt.GetRunner().Start() foreman = aff4.FACTORY.Open("aff4:/foreman", mode="rw", token=self.token) for client_id in client_ids: foreman.AssignTasksToClient(client_id) # Run the hunt. client_mock = hunt_test_lib.SampleHuntMock() hunt_test_lib.TestHuntHelper(client_mock, client_ids, False, self.token) hunt_obj = aff4.FACTORY.Open( hunt.session_id, mode="rw", age=aff4.ALL_TIMES, token=self.token) started, finished, errors = hunt_obj.GetClientsCounts() self.assertEqual(started, 10) # There should be errors for the five clients where the hunt raised. self.assertEqual(errors, 5) # All of the clients that have the file should still finish eventually. self.assertEqual(finished, 5)
def SetupTestHuntView(self, client_limit=0, client_count=10): # Create some clients and a hunt to view. with self.CreateSampleHunt(client_limit=client_limit, client_count=client_count) as hunt: hunt.Log("TestLogLine") # Log an error just with some random traceback. hunt.LogClientError(self.client_ids[1], "Client Error 1", traceback.format_exc()) # Run the hunt. client_mock = hunt_test_lib.SampleHuntMock() hunt_test_lib.TestHuntHelper(client_mock, self.client_ids, False, self.token) hunt = aff4.FACTORY.Open(hunt.urn, token=self.token) all_count, _, _ = hunt.GetClientsCounts() if client_limit == 0: # No limit, so we should have all the clients self.assertEqual(all_count, client_count) else: self.assertEqual(all_count, min(client_count, client_limit))
def testHuntNotifications(self): """This tests the Hunt notification event.""" TestHuntListener.received_events = [] # Set up 10 clients. client_ids = self.SetupClients(10) client_rule_set = rdf_foreman.ForemanClientRuleSet(rules=[ rdf_foreman.ForemanClientRule( rule_type=rdf_foreman.ForemanClientRule.Type.REGEX, regex=rdf_foreman.ForemanRegexClientRule( field="CLIENT_NAME", attribute_regex="GRR")) ]) with implementation.GRRHunt.StartHunt( hunt_name=BrokenSampleHunt.__name__, client_rule_set=client_rule_set, client_rate=0, notification_event="TestHuntDone", token=self.token) as hunt: hunt.GetRunner().Start() foreman = aff4.FACTORY.Open("aff4:/foreman", mode="rw", token=self.token) for client_id in client_ids: foreman.AssignTasksToClient(client_id.Basename()) # Run the hunt. client_mock = hunt_test_lib.SampleHuntMock() hunt_test_lib.TestHuntHelper(client_mock, client_ids, check_flow_errors=False, token=self.token) self.assertEqual(len(TestHuntListener.received_events), 5)
def testHuntCreatorIsNotifiedWhenHuntIsStoppedDueToCrashes(self): with self.CreateHunt(crash_limit=3, token=self.token) as hunt: hunt.Run() # Run the hunt on 3 clients, one by one. Crash detection check happens # when client is scheduled, so it's important to schedule the clients # one by one in the test. for client_id in self.SetupClients(3): self.AssignTasksToClients([client_id]) client_mock = flow_test_lib.CrashClientMock(client_id, token=self.token) hunt_test_lib.TestHuntHelper(client_mock, [client_id], check_flow_errors=False, token=self.token) self.Open("/") # Wait until the notification is there and show the notifications list. self.WaitUntilEqual("1", self.GetText, "css=button[id=notification_button]") self.Click("css=button[id=notification_button]") # Click on the "hunt [id] reached the crashes limit" notificaiton. self.Click("css=td:contains(Hunt %s reached the crashes limit)" % hunt.urn.Basename()) # Clicking on notification should shown the hunt's overview page. self.WaitUntil(self.IsTextPresent, "/tmp/evil.txt") # Go to the logs and check that a reason for hunt's stopping is the # hunts logs. # Click the Log Tab. self.Click("css=li[heading=Log]") self.WaitUntil( self.IsTextPresent, "Hunt %s reached the crashes limit of 3 and was stopped." % hunt.urn.Basename())
def testListHuntClients(self): hunt = implementation.GRRHunt.StartHunt( hunt_name=standard.GenericHunt.__name__, flow_runner_args=rdf_flows.FlowRunnerArgs( flow_name=file_finder.FileFinder.__name__), flow_args=rdf_file_finder.FileFinderArgs( paths=[os.path.join(self.base_path, "test.plist")], action=rdf_file_finder.FileFinderAction( action_type="DOWNLOAD"), ), client_rate=0, token=self.token) hunt.Run() client_ids = self.SetupClients(5) self.AssignTasksToClients(client_ids=client_ids) action_mock = action_mocks.FileFinderClientMock() hunt_test_lib.TestHuntHelper(action_mock, client_ids, iteration_limit=10, token=self.token) result = self.handler.Handle( hunt_plugin.ApiListHuntClientsArgs(hunt_id=hunt.urn.Basename()), token=self.token) # TODO(user): This still uses data store internals and will fail on some # data stores. # This is not super deterministic, we start processing some # clients, run the hunt for a bit but there is no order to all # this. We should have some clients half finished though (i.e., # with pending requests) and five clients in total. self.assertEqual(result.total_count, 5) clients = list(result.items) pending_requests = [client.pending_requests for client in clients] self.assertTrue(any(r.next_state) for r in pending_requests)
def testStartClients(self): with implementation.GRRHunt.StartHunt( hunt_name=standard.SampleHunt.__name__, client_rate=0, token=self.token) as hunt: hunt.GetRunner().Start() flows = list( aff4.FACTORY.Open(self.client_id.Add("flows"), token=self.token) .ListChildren()) self.assertEqual(flows, []) implementation.GRRHunt.StartClients(hunt.session_id, [self.client_id]) hunt_test_lib.TestHuntHelper(None, [self.client_id], False, self.token) flows = list( aff4.FACTORY.Open(self.client_id.Add("flows"), token=self.token) .ListChildren()) # One flow should have been started. self.assertEqual(len(flows), 1) self.assertIn(hunt.session_id.Basename(), str(flows[0]))
def RunVariableGenericHunt(self): args = standard.VariableGenericHuntArgs() self._AppendFlowRequest(args.flows, 1, 1) self._AppendFlowRequest(args.flows, 2, 2) self._AppendFlowRequest(args.flows, 2, 3) with implementation.StartHunt( hunt_name=standard.VariableGenericHunt.__name__, args=args, client_rate=0, token=self.token) as hunt: hunt.Run() hunt.ManuallyScheduleClients() # Run the hunt. client_mock = hunt_test_lib.SampleHuntMock(failrate=100) hunt_test_lib.TestHuntHelper(client_mock, self.client_ids, False, self.token) with aff4.FACTORY.Open( hunt.session_id, mode="rw", token=self.token) as hunt: hunt.Stop() return hunt
def CrashClient(client_id): self.AssignTasksToClients([client_id]) client_mock = flow_test_lib.CrashClientMock(client_id, token=self.token) hunt_test_lib.TestHuntHelper( client_mock, [client_id], check_flow_errors=False, token=self.token)
def RunOnClients(client_ids, num_processes): client_mock = action_mocks.ListProcessesMock( [rdf_client.Process(pid=1, exe="a.exe")] * num_processes) self.AssignTasksToClients(client_ids) hunt_test_lib.TestHuntHelper( client_mock, client_ids, check_flow_errors=False, token=self.token)