def testHuntIsStoppedIfTotalNetworkUsageIsTooHigh(self): client_ids = self.SetupClients(5) hunt_id = self._CreateHunt( client_rule_set=foreman_rules.ForemanClientRuleSet(), client_rate=0, total_network_bytes_limit=5, args=self.GetFileHuntArgs()) def CheckState(hunt_state, network_bytes_sent): hunt_obj = data_store.REL_DB.ReadHuntObject(hunt_id) self.assertEqual(hunt_obj.hunt_state, hunt_state) hunt_counters = data_store.REL_DB.ReadHuntCounters(hunt_id) self.assertEqual(hunt_counters.total_network_bytes_sent, network_bytes_sent) self._RunHunt( client_ids[:2], client_mock=hunt_test_lib.SampleHuntMock(network_bytes_sent=2)) # 4 is lower than the total limit. The hunt should still be running. CheckState(rdf_hunt_objects.Hunt.HuntState.STARTED, 4) self._RunHunt( [client_ids[2]], client_mock=hunt_test_lib.SampleHuntMock(network_bytes_sent=1)) # 5 is equal to the total limit. Total network bytes sent should # go over the limit in order for the hunt to be stopped. CheckState(rdf_hunt_objects.Hunt.HuntState.STARTED, 5) self._RunHunt( [client_ids[3]], client_mock=hunt_test_lib.SampleHuntMock(network_bytes_sent=1)) # 6 is greater than the total limit. The hunt should be stopped now. CheckState(rdf_hunt_objects.Hunt.HuntState.STOPPED, 6) self._RunHunt([client_ids[4]], client_mock=hunt_test_lib.SampleHuntMock( network_bytes_sent=2, failrate=-1)) self._CheckHuntStoppedNotification( "reached the total network bytes sent limit")
def testOutputPluginsMaintainGlobalState(self): plugin_descriptor = rdf_output_plugin.OutputPluginDescriptor( plugin_name="StatefulDummyHuntOutputPlugin") self.assertListEqual(hunt_test_lib.StatefulDummyHuntOutputPlugin.data, []) _ = self._CreateAndRunHunt( num_clients=5, client_mock=hunt_test_lib.SampleHuntMock(failrate=-1), client_rule_set=foreman_rules.ForemanClientRuleSet(), client_rate=0, args=self.GetFileHuntArgs(), output_plugins=[plugin_descriptor]) # Output plugins should have been called 5 times, adding a number # to the "data" list on every call and incrementing it each time. self.assertListEqual(hunt_test_lib.StatefulDummyHuntOutputPlugin.data, [0, 1, 2, 3, 4])
def testPausingAndRestartingDoesNotStartHuntTwiceOnTheSameClient(self): """This tests if the hunt completes when some clients hang or raise.""" client_ids = self.SetupClients(10) client_rule_set = foreman_rules.ForemanClientRuleSet(rules=[ foreman_rules.ForemanClientRule( rule_type=foreman_rules.ForemanClientRule.Type.REGEX, regex=foreman_rules.ForemanRegexClientRule( field="CLIENT_NAME", attribute_regex="GRR")) ]) with implementation.StartHunt( hunt_name=standard.SampleHunt.__name__, client_rule_set=client_rule_set, client_rate=0, token=self.token) as hunt: hunt.GetRunner().Start() hunt_id = hunt.urn foreman = aff4.FACTORY.Open("aff4:/foreman", mode="rw", token=self.token) for client_id in client_ids: num_tasks = foreman.AssignTasksToClient(client_id.Basename()) self.assertEqual(num_tasks, 1) client_mock = hunt_test_lib.SampleHuntMock() hunt_test_lib.TestHuntHelper(client_mock, client_ids, False, self.token) # Pausing and running hunt: this leads to the fresh rules being written # to Foreman.RULES. with aff4.FACTORY.Open(hunt_id, mode="rw", token=self.token) as hunt: runner = hunt.GetRunner() runner.Pause() runner.Start() # Recreating the foreman so that it updates list of rules. foreman = aff4.FACTORY.Open("aff4:/foreman", mode="rw", token=self.token) for client_id in client_ids: num_tasks = foreman.AssignTasksToClient(client_id.Basename()) # No tasks should be assigned as this hunt ran on all the clients # before. self.assertEqual(num_tasks, 0)
def testPausingAndRestartingDoesNotStartHuntTwiceOnTheSameClient(self): hunt_id, client_ids = self._CreateAndRunHunt( num_clients=10, client_rule_set=foreman_rules.ForemanClientRuleSet(), client_rate=0, args=self.GetFileHuntArgs()) for client_id in client_ids: flows = data_store.REL_DB.ReadAllFlowObjects(client_id=client_id) self.assertLen(flows, 1) hunt.PauseHunt(hunt_id) hunt.StartHunt(hunt_id) self._RunHunt(client_ids) for client_id in client_ids: flows = data_store.REL_DB.ReadAllFlowObjects(client_id=client_id) self.assertLen(flows, 1)
def testFromDictMixedRules(self): dct = { "match_mode": "MATCH_ANY", "rules": [ { "rule_type": "OS", "os": { "os_windows": True, "os_linux": True, }, }, { "rule_type": "LABEL", "label": { "label_names": ["foo", "bar"], "match_mode": "MATCH_ALL", }, }, ], } rdf = foreman_rules.ForemanClientRuleSet() rdf.FromDict(dct) self.assertEqual( rdf.match_mode, foreman_rules.ForemanClientRuleSet.MatchMode.MATCH_ANY) self.assertLen(rdf.rules, 2) self.assertEqual(rdf.rules[0].rule_type, foreman_rules.ForemanClientRule.Type.OS) self.assertTrue(rdf.rules[0].os.os_windows) self.assertTrue(rdf.rules[0].os.os_linux) self.assertFalse(rdf.rules[0].os.os_darwin) self.assertEqual(rdf.rules[1].rule_type, foreman_rules.ForemanClientRule.Type.LABEL) self.assertEqual(rdf.rules[1].label.label_names, ["foo", "bar"]) self.assertEqual( rdf.rules[1].label.match_mode, foreman_rules.ForemanLabelClientRule.MatchMode.MATCH_ALL)
def testBrokenHunt(self): """This tests the behavior when a hunt raises an exception.""" # Set up 10 clients. client_ids = self.SetupClients(10) client_rule_set = foreman_rules.ForemanClientRuleSet(rules=[ foreman_rules.ForemanClientRule( rule_type=foreman_rules.ForemanClientRule.Type.REGEX, regex=foreman_rules.ForemanRegexClientRule( field="CLIENT_NAME", attribute_regex="GRR")) ]) with implementation.StartHunt(hunt_name=BrokenSampleHunt.__name__, client_rule_set=client_rule_set, client_rate=0, token=self.token) as hunt: hunt.GetRunner().Start() foreman = aff4.FACTORY.Open("aff4:/foreman", mode="rw", token=self.token) for client_id in client_ids: foreman.AssignTasksToClient(client_id.Basename()) # Run the hunt. client_mock = hunt_test_lib.SampleHuntMock(failrate=2) hunt_test_lib.TestHuntHelper(client_mock, client_ids, False, self.token) hunt_obj = aff4.FACTORY.Open(hunt.session_id, mode="rw", age=aff4.ALL_TIMES, token=self.token) started, finished, errors = hunt_obj.GetClientsCounts() self.assertEqual(started, 10) # There should be errors for the five clients where the hunt raised. self.assertEqual(errors, 5) # All of the clients that have the file should still finish eventually. self.assertEqual(finished, 5)
def testUpdatesStatsCounterOnOutputPluginFailure(self): plugin_descriptor = rdf_output_plugin.OutputPluginDescriptor( plugin_name="FailingDummyHuntOutputPlugin") # 1 error for each client makes it 5 errors, 0 results. with self.assertStatsCounterDelta( 0, flow_base.HUNT_RESULTS_RAN_THROUGH_PLUGIN, fields=["FailingDummyHuntOutputPlugin"]): with self.assertStatsCounterDelta( 5, flow_base.HUNT_OUTPUT_PLUGIN_ERRORS, fields=["FailingDummyHuntOutputPlugin"]): self._CreateAndRunHunt( num_clients=5, client_mock=hunt_test_lib.SampleHuntMock(failrate=-1), client_rule_set=foreman_rules.ForemanClientRuleSet(), client_rate=0, args=self.GetFileHuntArgs(), output_plugins=[plugin_descriptor])
def testHuntClientRateIsAppliedCorrectly(self): now = rdfvalue.RDFDatetime.Now() _, client_ids = self._CreateAndRunHunt( num_clients=10, client_rule_set=foreman_rules.ForemanClientRuleSet(), client_rate=1, args=self.GetFileHuntArgs()) requests = data_store.REL_DB.ReadFlowProcessingRequests() requests.sort(key=lambda r: r.delivery_time) # The first request is scheduled to run immediately and has been processed # already. self.assertLen(requests, 9) for i, (r, client_id) in enumerate(zip(requests, client_ids[1:])): self.assertEqual(r.client_id, client_id) time_diff = r.delivery_time - ( now + rdfvalue.Duration.From(1, rdfvalue.MINUTES) * (i + 1)) self.assertLess(time_diff, rdfvalue.Duration.From(5, rdfvalue.SECONDS))
def testEvaluatesPositiveInMatchAllModeIfAllRuleMatch(self): # Instantiate a rule set that matches if all of its two # operating system rules match rs = foreman_rules.ForemanClientRuleSet( match_mode=foreman_rules.ForemanClientRuleSet.MatchMode.MATCH_ALL, rules=[ foreman_rules.ForemanClientRule( rule_type=foreman_rules.ForemanClientRule.Type.OS, os=foreman_rules.ForemanOsClientRule( os_windows=False, os_linux=True, os_darwin=False)), foreman_rules.ForemanClientRule( rule_type=foreman_rules.ForemanClientRule.Type.OS, os=foreman_rules.ForemanOsClientRule( os_windows=False, os_linux=True, os_darwin=True)) ]) client_id_lin = self.SetupClient(0, system="Linux") # All of the set's rules have os_linux=False, so the whole set matches self.assertTrue( rs.Evaluate(data_store.REL_DB.ReadClientFullInfo(client_id_lin)))
def testFailingOutputPluginDoesNotAffectOtherOutputPlugins(self): failing_plugin_descriptor = rdf_output_plugin.OutputPluginDescriptor( plugin_name="FailingDummyHuntOutputPlugin") plugin_descriptor = rdf_output_plugin.OutputPluginDescriptor( plugin_name="DummyHuntOutputPlugin") hunt_id, _ = self._CreateAndRunHunt( num_clients=5, client_mock=hunt_test_lib.SampleHuntMock(failrate=-1), client_rule_set=foreman_rules.ForemanClientRuleSet(), client_rate=0, args=self.GetFileHuntArgs(), output_plugins=[failing_plugin_descriptor, plugin_descriptor]) errors = hunt.GetHuntOutputPluginErrors(hunt_id, 0, sys.maxsize) self.assertLen(errors, 5) # Check that non-failing output plugin is still correctly processed. logs = hunt.GetHuntOutputPluginLogs(hunt_id, 0, sys.maxsize) self.assertLen(logs, 5)
def testEvaluatesNegativeInMatchAllModeIfOnlyOneRuleMatches(self): # Instantiate a rule set that matches if all of its two # operating system rules match rs = foreman_rules.ForemanClientRuleSet( match_mode=foreman_rules.ForemanClientRuleSet.MatchMode.MATCH_ALL, rules=[ foreman_rules.ForemanClientRule( rule_type=foreman_rules.ForemanClientRule.Type.OS, os=foreman_rules.ForemanOsClientRule( os_windows=False, os_linux=True, os_darwin=False)), foreman_rules.ForemanClientRule( rule_type=foreman_rules.ForemanClientRule.Type.OS, os=foreman_rules.ForemanOsClientRule( os_windows=False, os_linux=True, os_darwin=True)) ]) client_id_dar = self.SetupClient(0, system="Darwin") # One of the set's rules has os_darwin=False, so the whole set doesn't # match with the match all match mode self.assertFalse( rs.Evaluate(data_store.REL_DB.ReadClientFullInfo(client_id_dar)))
def testEvaluatesPositiveInMatchAnyModeIfOneRuleMatches(self): # Instantiate a rule set that matches if any of its two # operating system rules matches rs = foreman_rules.ForemanClientRuleSet( match_mode=foreman_rules.ForemanClientRuleSet.MatchMode.MATCH_ANY, rules=[ foreman_rules.ForemanClientRule( rule_type=foreman_rules.ForemanClientRule.Type.OS, os=foreman_rules.ForemanOsClientRule( os_windows=False, os_linux=True, os_darwin=False)), foreman_rules.ForemanClientRule( rule_type=foreman_rules.ForemanClientRule.Type.OS, os=foreman_rules.ForemanOsClientRule( os_windows=False, os_linux=True, os_darwin=True)) ]) client_id_dar = self.SetupClient(0, system="Darwin") # One of the set's rules has os_darwin=True, so the whole set matches # with the match any match mode self.assertTrue( rs.Evaluate(aff4.FACTORY.Open(client_id_dar, token=self.token)))
def testOutputPluginsErrorsAreCorrectlyWrittenAndCanBeRead(self): failing_plugin_descriptor = rdf_output_plugin.OutputPluginDescriptor( plugin_name="FailingDummyHuntOutputPlugin") hunt_id, _ = self._CreateAndRunHunt( num_clients=5, client_mock=hunt_test_lib.SampleHuntMock(failrate=-1), client_rule_set=foreman_rules.ForemanClientRuleSet(), client_rate=0, args=self.GetFileHuntArgs(), output_plugins=[failing_plugin_descriptor]) errors = hunt.GetHuntOutputPluginErrors(hunt_id, 0, sys.maxsize) self.assertLen(errors, 5) for e in errors: self.assertEqual(e.batch_size, 1) self.assertEqual( e.status, output_plugin.OutputPluginBatchProcessingStatus.Status.ERROR) self.assertEqual(e.plugin_descriptor, failing_plugin_descriptor) self.assertEqual(e.summary, "Oh no!")
def testResultsAreCorrectlyCounted(self): path = os.path.join(self.base_path, "*hello*") num_files = len(glob.glob(path)) self.assertGreater(num_files, 1) hunt_args = rdf_hunt_objects.HuntArguments.Standard( flow_name=compatibility.GetName(file_finder.FileFinder), flow_args=rdf_file_finder.FileFinderArgs( paths=[path], action=rdf_file_finder.FileFinderAction(action_type="STAT"), )) hunt_id, _ = self._CreateAndRunHunt( num_clients=5, client_mock=action_mocks.FileFinderClientMock(), client_rule_set=foreman_rules.ForemanClientRuleSet(), client_rate=0, args=hunt_args) hunt_counters = data_store.REL_DB.ReadHuntCounters(hunt_id) self.assertEqual(hunt_counters.num_clients_with_results, 5) self.assertEqual(hunt_counters.num_results, 5 * num_files)
def testHangingClientsAreCorrectlyAccountedFor(self): client_ids = self.SetupClients(10) hunt_obj = rdf_hunt_objects.Hunt( client_rule_set=foreman_rules.ForemanClientRuleSet(), client_rate=0, args=self.GetFileHuntArgs()) hunt.CreateHunt(hunt_obj) hunt_obj = hunt.StartHunt(hunt_obj.hunt_id) foreman_obj = foreman.Foreman() for client_id in client_ids: foreman_obj.AssignTasksToClient(client_id) client_mock = hunt_test_lib.SampleHuntMock(failrate=2) hunt_test_lib.TestHuntHelper(client_mock, client_ids[1:9]) hunt_counters = data_store.REL_DB.ReadHuntCounters(hunt_obj.hunt_id) self.assertEqual(hunt_counters.num_clients, 10) self.assertEqual(hunt_counters.num_successful_clients, 4) self.assertEqual(hunt_counters.num_failed_clients, 4)
def testEvaluatesNegativeInMatchAnyModeIfNoRuleMatches(self): # Instantiate a rule set that matches if any of its two # operating system rules matches rs = foreman_rules.ForemanClientRuleSet( match_mode=foreman_rules.ForemanClientRuleSet.MatchMode.MATCH_ANY, rules=[ foreman_rules.ForemanClientRule( rule_type=foreman_rules.ForemanClientRule.Type.OS, os=foreman_rules.ForemanOsClientRule( os_windows=False, os_linux=True, os_darwin=False)), foreman_rules.ForemanClientRule( rule_type=foreman_rules.ForemanClientRule.Type.OS, os=foreman_rules.ForemanOsClientRule( os_windows=False, os_linux=True, os_darwin=True)) ]) client_id_win = self.SetupClient(0, system="Windows") # None of the set's rules has os_windows=True, so the whole set doesn't # match self.assertFalse( rs.Evaluate(data_store.REL_DB.ReadClientFullInfo(client_id_win)))
def _RunRateLimitedHunt(self, client_ids, start_time): client_rule_set = foreman_rules.ForemanClientRuleSet(rules=[ foreman_rules.ForemanClientRule( rule_type=foreman_rules.ForemanClientRule.Type.REGEX, regex=foreman_rules.ForemanRegexClientRule( field="CLIENT_NAME", attribute_regex="GRR")) ]) with implementation.StartHunt(hunt_name=DummyHunt.__name__, client_rule_set=client_rule_set, client_rate=1, token=self.token) as hunt: hunt.Run() # Pretend to be the foreman now and dish out hunting jobs to all the # clients.. foreman = aff4.FACTORY.Open("aff4:/foreman", mode="rw", token=self.token) for client_id in client_ids: foreman.AssignTasksToClient(client_id.Basename()) self.assertEmpty(DummyHunt.client_ids) # Run the hunt. worker_mock = worker_test_lib.MockWorker(check_flow_errors=True, queues=[queues.HUNTS], token=self.token) # One client is scheduled in the first minute. with test_lib.FakeTime(start_time + 2): worker_mock.Simulate() self.assertLen(DummyHunt.client_ids, 1) # No further clients will be scheduled until the end of the first minute. with test_lib.FakeTime(start_time + 59): worker_mock.Simulate() self.assertLen(DummyHunt.client_ids, 1) return worker_mock, hunt.urn
def testClientLimit(self): """This tests that we can limit hunts to a number of clients.""" # Set up 10 clients. client_ids = self.SetupClients(10) client_rule_set = foreman_rules.ForemanClientRuleSet(rules=[ foreman_rules.ForemanClientRule( rule_type=foreman_rules.ForemanClientRule.Type.REGEX, regex=foreman_rules.ForemanRegexClientRule( field="CLIENT_NAME", attribute_regex="GRR")) ]) with implementation.StartHunt(hunt_name=standard.SampleHunt.__name__, client_limit=5, client_rule_set=client_rule_set, client_rate=0, token=self.token) as hunt: hunt.Run() foreman = aff4.FACTORY.Open("aff4:/foreman", mode="rw", token=self.token) for client_id in client_ids: foreman.AssignTasksToClient(client_id.Basename()) # Run the hunt. client_mock = hunt_test_lib.SampleHuntMock(failrate=2) hunt_test_lib.TestHuntHelper(client_mock, client_ids, False, self.token) hunt_obj = aff4.FACTORY.Open(hunt.urn, mode="rw", age=aff4.ALL_TIMES, token=self.token) started, finished, _ = hunt_obj.GetClientsCounts() # We limited here to 5 clients. self.assertEqual(started, 5) self.assertEqual(finished, 5)
def testHangingClients(self): """This tests if the hunt completes when some clients hang or raise.""" # Set up 10 clients. client_ids = self.SetupClients(10) client_rule_set = foreman_rules.ForemanClientRuleSet(rules=[ foreman_rules.ForemanClientRule( rule_type=foreman_rules.ForemanClientRule.Type.REGEX, regex=foreman_rules.ForemanRegexClientRule( field="CLIENT_NAME", attribute_regex="GRR")) ]) with implementation.StartHunt(hunt_name=standard.SampleHunt.__name__, client_rule_set=client_rule_set, client_rate=0, token=self.token) as hunt: hunt.GetRunner().Start() foreman = aff4.FACTORY.Open("aff4:/foreman", mode="rw", token=self.token) for client_id in client_ids: foreman.AssignTasksToClient(client_id.Basename()) client_mock = hunt_test_lib.SampleHuntMock(failrate=2) # Just pass 8 clients to run, the other two went offline. hunt_test_lib.TestHuntHelper(client_mock, client_ids[1:9], False, self.token) hunt_obj = aff4.FACTORY.Open(hunt.session_id, mode="rw", age=aff4.ALL_TIMES, token=self.token) started, finished, _ = hunt_obj.GetClientsCounts() # We started the hunt on 10 clients. self.assertEqual(started, 10) # But only 8 should have finished. self.assertEqual(finished, 8)
def testProcessing(self): """This tests running the hunt on some clients.""" # Set up 10 clients. client_ids = self.SetupClients(10) client_rule_set = foreman_rules.ForemanClientRuleSet(rules=[ foreman_rules.ForemanClientRule( rule_type=foreman_rules.ForemanClientRule.Type.REGEX, regex=foreman_rules.ForemanRegexClientRule( field="CLIENT_NAME", attribute_regex="GRR")) ]) with implementation.StartHunt(hunt_name=standard.SampleHunt.__name__, client_rule_set=client_rule_set, client_rate=0, token=self.token) as hunt: hunt.GetRunner().Start() foreman = aff4.FACTORY.Open("aff4:/foreman", mode="rw", token=self.token) for client_id in client_ids: foreman.AssignTasksToClient(client_id.Basename()) # Run the hunt. client_mock = hunt_test_lib.SampleHuntMock(failrate=2) hunt_test_lib.TestHuntHelper(client_mock, client_ids, False, self.token) hunt_obj = aff4.FACTORY.Open(hunt.session_id, mode="r", age=aff4.ALL_TIMES, aff4_type=standard.SampleHunt, token=self.token) started, finished, _ = hunt_obj.GetClientsCounts() self.assertEqual(started, 10) self.assertEqual(finished, 10)
def testCopyHuntPreservesRuleType(self): implementation.StartHunt( hunt_name=standard.GenericHunt.__name__, description="model hunt", flow_runner_args=rdf_flow_runner.FlowRunnerArgs( flow_name=transfer.GetFile.__name__), flow_args=transfer.GetFileArgs(pathspec=rdf_paths.PathSpec( path="/tmp/evil.txt", pathtype=rdf_paths.PathSpec.PathType.TSK, )), client_rule_set=foreman_rules.ForemanClientRuleSet(rules=[ foreman_rules.ForemanClientRule( rule_type=foreman_rules.ForemanClientRule.Type.OS, os=foreman_rules.ForemanOsClientRule(os_darwin=True)) ]), token=self.token) self.Open("/#main=ManageHunts") self.Click("css=tr:contains('model hunt')") self.Click("css=button[name=CopyHunt]:not([disabled])") # Wait until dialog appears. self.WaitUntil(self.IsElementPresent, "css=grr-wizard-form:contains('What to run?')") # Click on "Next" button self.Click("css=grr-new-hunt-wizard-form button.Next") self.WaitUntil(self.IsElementPresent, "css=grr-wizard-form:contains('Hunt parameters')") # Click on "Next" button. self.Click("css=grr-new-hunt-wizard-form button.Next") self.WaitUntil( self.IsElementPresent, "css=grr-wizard-form:contains('How to process results')") # Click on "Next" button self.Click("css=grr-new-hunt-wizard-form button.Next") self.WaitUntil(self.IsElementPresent, "css=grr-wizard-form:contains('Where to run?')") self.WaitUntil( self.IsElementPresent, "css=grr-new-hunt-wizard-form " "label:contains('Os darwin') ~ * input:checked")
def testHuntFlowLogsAreCorrectlyWrittenAndCanBeRead(self): hunt_args = rdf_hunt_objects.HuntArguments.Standard( flow_name=compatibility.GetName(flow_test_lib.DummyLogFlow)) hunt_id, client_ids = self._CreateAndRunHunt( num_clients=10, client_mock=hunt_test_lib.SampleHuntMock(failrate=-1), client_rule_set=foreman_rules.ForemanClientRuleSet(), client_rate=0, args=hunt_args) hunt_logs = data_store.REL_DB.ReadHuntLogEntries( hunt_id, 0, sys.maxsize) # 4 logs for each flow, 2 flow run per client. self.assertLen(hunt_logs, 8 * len(client_ids)) self.assertCountEqual(set(log.client_id for log in hunt_logs), [c.Basename() for c in client_ids]) self.assertCountEqual(set(log.message for log in hunt_logs), [ "First", "Second", "Third", "Fourth", "Uno", "Dos", "Tres", "Cuatro" ]) for log in hunt_logs: self.assertEqual(log.hunt_id, hunt_id)
def testHuntIsStoppedIfCrashNumberOverThreshold(self): client_ids = self.SetupClients(4) hunt_id = self._CreateHunt( client_rule_set=foreman_rules.ForemanClientRuleSet(), client_rate=0, crash_limit=3, args=self.GetFileHuntArgs()) client_mock = flow_test_lib.CrashClientMock() self._RunHunt(client_ids[:2], client_mock=client_mock) hunt_obj = data_store.REL_DB.ReadHuntObject(hunt_id) self.assertEqual(hunt_obj.hunt_state, rdf_hunt_objects.Hunt.HuntState.STARTED) self._RunHunt(client_ids[2:], client_mock=client_mock) hunt_obj = data_store.REL_DB.ReadHuntObject(hunt_id) self.assertEqual(hunt_obj.hunt_state, rdf_hunt_objects.Hunt.HuntState.STOPPED) self._CheckHuntStoppedNotification("reached the crashes limit")
def testPausingTheHuntChangingParametersAndStartingAgainWorksAsExpected(self): client_ids = self.SetupClients(2) hunt_id = self._CreateHunt( client_rule_set=foreman_rules.ForemanClientRuleSet(), client_rate=0, client_limit=1, args=self.GetFileHuntArgs()) self._RunHunt(client_ids[:2]) hunt_obj = data_store.REL_DB.ReadHuntObject(hunt_id) self.assertEqual(hunt_obj.hunt_state, rdf_hunt_objects.Hunt.HuntState.PAUSED) # There should be only one client, due to the limit hunt_counters = data_store.REL_DB.ReadHuntCounters(hunt_id) self.assertEqual(hunt_counters.num_clients, 1) hunt.UpdateHunt(hunt_id, client_limit=10) hunt.StartHunt(hunt_id) self._RunHunt(client_ids[:2]) hunt_counters = data_store.REL_DB.ReadHuntCounters(hunt_id) self.assertEqual(hunt_counters.num_clients, 2)
def testResourceUsageStatsAreReportedCorrectly(self): hunt_id, _ = self._CreateAndRunHunt( num_clients=10, client_mock=hunt_test_lib.SampleHuntMock(failrate=-1), client_rule_set=foreman_rules.ForemanClientRuleSet(), client_rate=0, args=self.GetFileHuntArgs()) hunt_obj = data_store.REL_DB.ReadHuntObject(hunt_id) usage_stats = hunt_obj.client_resources_stats # Values below are calculated based on SampleHuntMock's behavior. self.assertEqual(usage_stats.user_cpu_stats.num, 10) self.assertAlmostEqual(usage_stats.user_cpu_stats.mean, 5.5) self.assertAlmostEqual(usage_stats.user_cpu_stats.std, 2.8722813) self.assertEqual(usage_stats.system_cpu_stats.num, 10) self.assertAlmostEqual(usage_stats.system_cpu_stats.mean, 11) self.assertAlmostEqual(usage_stats.system_cpu_stats.std, 5.7445626) self.assertEqual(usage_stats.network_bytes_sent_stats.num, 10) self.assertAlmostEqual(usage_stats.network_bytes_sent_stats.mean, 16.5) self.assertAlmostEqual(usage_stats.network_bytes_sent_stats.std, 8.61684396) # NOTE: Not checking histograms here. RunningStatsTest tests that mean, # standard deviation and histograms are calculated correctly. Therefore # if mean/stdev values are correct histograms should be ok as well. self.assertLen(usage_stats.worst_performers, 10) prev = usage_stats.worst_performers[0] for p in usage_stats.worst_performers[1:]: self.assertGreater( prev.cpu_usage.user_cpu_time + prev.cpu_usage.system_cpu_time, p.cpu_usage.user_cpu_time + p.cpu_usage.system_cpu_time) prev = p
def testForemanRulesAreCorrectlyPropagatedWhenHuntStarts(self): client_rule_set = foreman_rules.ForemanClientRuleSet(rules=[ foreman_rules.ForemanClientRule( rule_type=foreman_rules.ForemanClientRule.Type.REGEX, regex=foreman_rules.ForemanRegexClientRule( field="CLIENT_NAME", attribute_regex="HUNT")), foreman_rules.ForemanClientRule( rule_type=foreman_rules.ForemanClientRule.Type.INTEGER, integer=foreman_rules.ForemanIntegerClientRule( field="CLIENT_CLOCK", operator=foreman_rules.ForemanIntegerClientRule.Operator. GREATER_THAN, value=1336650631137737)) ]) self.assertEmpty(data_store.REL_DB.ReadAllForemanRules()) hunt_obj = rdf_hunt_objects.Hunt(client_rule_set=client_rule_set) hunt_obj.args.hunt_type = hunt_obj.args.HuntType.STANDARD data_store.REL_DB.WriteHuntObject(hunt_obj) hunt_obj = hunt.StartHunt(hunt_obj.hunt_id) rules = data_store.REL_DB.ReadAllForemanRules() self.assertLen(rules, 1) rule = rules[0] self.assertEqual(rule.client_rule_set, client_rule_set) self.assertEqual(rule.hunt_id, hunt_obj.hunt_id) self.assertEqual(rule.expiration_time, hunt_obj.init_start_time + hunt_obj.duration) # Running a second time should not change the rules any more. with self.assertRaises(hunt.OnlyPausedHuntCanBeStartedError): hunt.StartHunt(hunt_obj.hunt_id) rules = data_store.REL_DB.ReadAllForemanRules() self.assertLen(rules, 1)
def testHuntFlowLogsAreCorrectlyWrittenAndCanBeRead(self): hunt_args = rdf_hunt_objects.HuntArguments.Standard( flow_name=compatibility.GetName(flow_test_lib.DummyLogFlow)) hunt_id, client_ids = self._CreateAndRunHunt( num_clients=10, client_mock=hunt_test_lib.SampleHuntMock(failrate=-1), client_rule_set=foreman_rules.ForemanClientRuleSet(), client_rate=0, args=hunt_args) hunt_logs = data_store.REL_DB.ReadHuntLogEntries(hunt_id, 0, sys.maxsize) # 4 logs for each flow. Note: DummyLogFlow also calls DummyLogFlowChild, # but children flows logs should not be present in the output. self.assertLen(hunt_logs, 4 * len(client_ids)) self.assertCountEqual(set(log.client_id for log in hunt_logs), client_ids) messages_set = set(log.message for log in hunt_logs) self.assertCountEqual(messages_set, ["First", "Second", "Third", "Fourth"]) for nested_flow_log in ["Uno", "Dos", "Tres", "Cuatro"]: self.assertNotIn(nested_flow_log, messages_set) for log in hunt_logs: self.assertEqual(log.hunt_id, hunt_id)
def testOutputPluginFlushErrorIsLoggedProperly(self): plugin_descriptor = rdf_output_plugin.OutputPluginDescriptor( plugin_name="FailingInFlushDummyHuntOutputPlugin") hunt_id, _ = self._CreateAndRunHunt( num_clients=5, client_mock=hunt_test_lib.SampleHuntMock(failrate=-1), client_rule_set=foreman_rules.ForemanClientRuleSet(), client_rate=0, args=self.GetFileHuntArgs(), output_plugins=[plugin_descriptor]) logs = hunt.GetHuntOutputPluginLogs(hunt_id, 0, sys.maxsize) self.assertEmpty(logs) errors = hunt.GetHuntOutputPluginErrors(hunt_id, 0, sys.maxsize) self.assertLen(errors, 5) for e in errors: self.assertEqual(e.batch_size, 1) self.assertEqual( e.status, output_plugin.OutputPluginBatchProcessingStatus.Status.ERROR) self.assertEqual(e.plugin_descriptor, plugin_descriptor) self.assertEqual(e.summary, "Flush, oh no!")
def testOutputPluginFlushErrorIsLoggedProperly(self): plugin_descriptor = rdf_output_plugin.OutputPluginDescriptor( plugin_name="FailingInFlushDummyHuntOutputPlugin") hunt_id, client_ids = self._CreateAndRunHunt( num_clients=5, client_mock=hunt_test_lib.SampleHuntMock(failrate=-1), client_rule_set=foreman_rules.ForemanClientRuleSet(), client_rate=0, args=self.GetFileHuntArgs(), output_plugins=[plugin_descriptor]) logs = data_store.REL_DB.ReadHuntOutputPluginLogEntries( hunt_id, output_plugin_id="0", offset=0, count=sys.maxsize, with_type=rdf_flow_objects.FlowOutputPluginLogEntry.LogEntryType. LOG) self.assertEmpty(logs) errors = data_store.REL_DB.ReadHuntOutputPluginLogEntries( hunt_id, output_plugin_id="0", offset=0, count=sys.maxsize, with_type=rdf_flow_objects.FlowOutputPluginLogEntry.LogEntryType. ERROR) self.assertLen(errors, 5) self.assertItemsEqual([e.client_id for e in errors], [cid.Basename() for cid in client_ids]) for e in errors: self.assertEqual(e.hunt_id, hunt_id) self.assertGreater(e.timestamp, 0) self.assertEqual( e.message, "Error while processing 1 replies: Flush, oh no!")
def testCallback(self, client_limit=None): """Checks that the foreman uses the callback specified in the action.""" client_urn = self.SetupClient(0) client_rule_set = foreman_rules.ForemanClientRuleSet(rules=[ foreman_rules.ForemanClientRule( rule_type=foreman_rules.ForemanClientRule.Type.REGEX, regex=foreman_rules.ForemanRegexClientRule( field="CLIENT_NAME", attribute_regex="GRR")) ]) with implementation.StartHunt(hunt_name=standard.SampleHunt.__name__, client_rule_set=client_rule_set, client_limit=client_limit, client_rate=0, token=self.token) as hunt: hunt.GetRunner().Start() # Create a client that matches our regex. with aff4.FACTORY.Open(client_urn, mode="rw", token=self.token) as client: info = client.Schema.CLIENT_INFO() info.client_name = "GRR Monitor" client.Set(client.Schema.CLIENT_INFO, info) foreman = aff4.FACTORY.Open("aff4:/foreman", mode="rw", token=self.token) with utils.Stubber(standard.SampleHunt, "StartClients", self.Callback): self.called = [] client_id = client_urn.Basename() foreman.AssignTasksToClient(client_id) self.assertLen(self.called, 1) self.assertEqual(self.called[0][1], [client_id])