def ParseOption(self, option, request): """Parse the form that is selected by option.""" if option == "Windows": return implementation.GRRHunt.MATCH_WINDOWS elif option == "Linux": return implementation.GRRHunt.MATCH_LINUX elif option == "OSX": return implementation.GRRHunt.MATCH_DARWIN elif option == "Label": label_name = ClientLabelNameFormRenderer( descriptor=type_info.TypeInfoObject(), default="", prefix=self.prefix).ParseArgs(request) regex = rdfvalue.AFF4ObjectLabelsList.RegexForStringifiedValueMatch( label_name) return rdfvalue.ForemanAttributeRegex(attribute_name="Labels", attribute_regex=regex) elif option == "Regex": return forms.SemanticProtoFormRenderer( rdfvalue.ForemanAttributeRegex(), prefix=self.prefix).ParseArgs(request) elif option == "Integer": return forms.SemanticProtoFormRenderer( rdfvalue.ForemanAttributeInteger(), prefix=self.prefix).ParseArgs(request)
def testCreatorPropagation(self): self.CreateAdminUser("adminuser") admin_token = access_control.ACLToken(username="******", reason="testing") # Start a flow that requires admin privileges in the hunt. The # parameters are not valid so the flow will error out but it's # enough to check if the flow was actually run (i.e., it passed # the label test). with hunts.GRRHunt.StartHunt( hunt_name="GenericHunt", flow_runner_args=rdfvalue.FlowRunnerArgs(flow_name="UpdateClient"), flow_args=rdfvalue.UpdateClientArgs(), regex_rules=[ rdfvalue.ForemanAttributeRegex(attribute_name="GRR client", attribute_regex="GRR"), ], client_rate=0, token=admin_token) as hunt: hunt.Run() self.CreateUser("nonadmin") nonadmin_token = access_control.ACLToken(username="******", reason="testing") self.AssignTasksToClients() client_mock = test_lib.SampleHuntMock() test_lib.TestHuntHelper(client_mock, self.client_ids, False, nonadmin_token) errors = list(hunt.GetClientsErrors()) # Make sure there are errors... self.assertTrue(errors) # but they are not UnauthorizedAccess. for e in errors: self.assertTrue("UnauthorizedAccess" not in e.backtrace)
def _RunRateLimitedHunt(self, client_ids, start_time): with hunts.GRRHunt.StartHunt( hunt_name="DummyHunt", regex_rules=[ rdfvalue.ForemanAttributeRegex(attribute_name="GRR client", attribute_regex="GRR"), ], client_rate=1, token=self.token) as hunt: hunt.Run() # Pretend to be the foreman now and dish out hunting jobs to all the # clients.. foreman = aff4.FACTORY.Open("aff4:/foreman", mode="rw", token=self.token) for client_id in client_ids: foreman.AssignTasksToClient(client_id) self.assertEqual(len(DummyHunt.client_ids), 0) # Run the hunt. worker_mock = test_lib.MockWorker(check_flow_errors=True, queues=queues.HUNTS, token=self.token) # One client is scheduled in the first minute. with test_lib.FakeTime(start_time + 2): worker_mock.Simulate() self.assertEqual(len(DummyHunt.client_ids), 1) # No further clients will be scheduled until the end of the first minute. with test_lib.FakeTime(start_time + 59): worker_mock.Simulate() self.assertEqual(len(DummyHunt.client_ids), 1) return worker_mock, hunt.urn
def testStoppingHuntMarksAllStartedFlowsAsPendingForTermination(self): with hunts.GRRHunt.StartHunt( hunt_name="GenericHunt", flow_runner_args=rdfvalue.FlowRunnerArgs(flow_name="InfiniteFlow"), regex_rules=[ rdfvalue.ForemanAttributeRegex(attribute_name="GRR client", attribute_regex="GRR"), ], client_rate=0, token=self.token) as hunt: hunt.Run() self.AssignTasksToClients() # Run long enough for InfiniteFlows to start. self.RunHunt(iteration_limit=len(self.client_ids) * 2) self.StopHunt(hunt.urn) # All flows should be marked for termination now. RunHunt should raise. # If something is wrong with GRRFlow.MarkForTermination mechanism, then # this will run forever. self.RunHunt() for client_id in self.client_ids: flows_root = aff4.FACTORY.Open(client_id.Add("flows"), token=self.token) flows_list = list(flows_root.ListChildren()) # Only one flow (issued by the hunt) is expected. self.assertEqual(len(flows_list), 1) flow_obj = aff4.FACTORY.Open(flows_list[0], aff4_type="InfiniteFlow", token=self.token) self.assertEqual(flow_obj.state.context.state, "ERROR") self.assertEqual(flow_obj.state.context.backtrace, "Parent hunt stopped.")
def testHangingClients(self): """This tests if the hunt completes when some clients hang or raise.""" # Set up 10 clients. client_ids = self.SetupClients(10) with hunts.GRRHunt.StartHunt( hunt_name="SampleHunt", regex_rules=[rdfvalue.ForemanAttributeRegex( attribute_name="GRR client", attribute_regex="GRR")], client_rate=0, token=self.token) as hunt: hunt.GetRunner().Start() foreman = aff4.FACTORY.Open("aff4:/foreman", mode="rw", token=self.token) for client_id in client_ids: foreman.AssignTasksToClient(client_id) client_mock = test_lib.SampleHuntMock() # Just pass 8 clients to run, the other two went offline. test_lib.TestHuntHelper(client_mock, client_ids[1:9], False, self.token) hunt_obj = aff4.FACTORY.Open(hunt.session_id, mode="rw", age=aff4.ALL_TIMES, token=self.token) started, finished, _ = hunt_obj.GetClientsCounts() # We started the hunt on 10 clients. self.assertEqual(started, 10) # But only 8 should have finished. self.assertEqual(finished, 8)
def testBrokenHunt(self): """This tests the behavior when a hunt raises an exception.""" # Set up 10 clients. client_ids = self.SetupClients(10) with hunts.GRRHunt.StartHunt( hunt_name="BrokenSampleHunt", regex_rules=[rdfvalue.ForemanAttributeRegex( attribute_name="GRR client", attribute_regex="GRR")], client_rate=0, token=self.token) as hunt: hunt.GetRunner().Start() foreman = aff4.FACTORY.Open("aff4:/foreman", mode="rw", token=self.token) for client_id in client_ids: foreman.AssignTasksToClient(client_id) # Run the hunt. client_mock = test_lib.SampleHuntMock() test_lib.TestHuntHelper(client_mock, client_ids, False, self.token) hunt_obj = aff4.FACTORY.Open(hunt.session_id, mode="rw", age=aff4.ALL_TIMES, token=self.token) started, finished, errors = hunt_obj.GetClientsCounts() self.assertEqual(started, 10) # There should be errors for the five clients where the hunt raised. self.assertEqual(errors, 5) # All of the clients that have the file should still finish eventually. self.assertEqual(finished, 5)
def testClientLimit(self): """This tests that we can limit hunts to a number of clients.""" # Set up 10 clients. client_ids = self.SetupClients(10) with hunts.GRRHunt.StartHunt(hunt_name="SampleHunt", client_limit=5, regex_rules=[ rdfvalue.ForemanAttributeRegex( attribute_name="GRR client", attribute_regex="GRR") ], client_rate=0, token=self.token) as hunt: hunt.Run() foreman = aff4.FACTORY.Open("aff4:/foreman", mode="rw", token=self.token) for client_id in client_ids: foreman.AssignTasksToClient(client_id) # Run the hunt. client_mock = test_lib.SampleHuntMock() test_lib.TestHuntHelper(client_mock, client_ids, False, self.token) hunt_obj = aff4.FACTORY.Open(hunt.urn, mode="rw", age=aff4.ALL_TIMES, token=self.token) started, finished, _ = hunt_obj.GetClientsCounts() # We limited here to 5 clients. self.assertEqual(started, 5) self.assertEqual(finished, 5)
def testProcessing(self): """This tests running the hunt on some clients.""" # Set up 10 clients. client_ids = self.SetupClients(10) with hunts.GRRHunt.StartHunt( hunt_name="SampleHunt", regex_rules=[rdfvalue.ForemanAttributeRegex( attribute_name="GRR client", attribute_regex="GRR")], client_rate=0, token=self.token) as hunt: hunt.GetRunner().Start() foreman = aff4.FACTORY.Open("aff4:/foreman", mode="rw", token=self.token) for client_id in client_ids: foreman.AssignTasksToClient(client_id) # Run the hunt. client_mock = test_lib.SampleHuntMock() test_lib.TestHuntHelper(client_mock, client_ids, False, self.token) hunt_obj = aff4.FACTORY.Open( hunt.session_id, mode="r", age=aff4.ALL_TIMES, aff4_type="SampleHunt", token=self.token) started, finished, _ = hunt_obj.GetClientsCounts() self.assertEqual(started, 10) self.assertEqual(finished, 10)
def RenderOption(self, option, request, response): if option == "Windows": return self.RenderFromTemplate(self.match_system_template, response, system="Windows") elif option == "Linux": return self.RenderFromTemplate(self.match_system_template, response, system="Linux") elif option == "OSX": return self.RenderFromTemplate(self.match_system_template, response, system="OSX") elif option == "Regex": return self.RenderFromTemplate( self.form_template, response, form=forms.SemanticProtoFormRenderer( rdfvalue.ForemanAttributeRegex(), prefix=self.prefix).RawHTML(request)) elif option == "Integer": return self.RenderFromTemplate( self.form_template, response, form=forms.SemanticProtoFormRenderer( rdfvalue.ForemanAttributeInteger(), prefix=self.prefix).RawHTML(request))
def testHuntNotifications(self): """This tests the Hunt notification event.""" TestHuntListener.received_events = [] # Set up 10 clients. client_ids = self.SetupClients(10) with hunts.GRRHunt.StartHunt(hunt_name="BrokenSampleHunt", regex_rules=[ rdfvalue.ForemanAttributeRegex( attribute_name="GRR client", attribute_regex="GRR") ], client_rate=0, notification_event="TestHuntDone", token=self.token) as hunt: hunt.GetRunner().Start() foreman = aff4.FACTORY.Open("aff4:/foreman", mode="rw", token=self.token) for client_id in client_ids: foreman.AssignTasksToClient(client_id) # Run the hunt. client_mock = test_lib.SampleHuntMock() test_lib.TestHuntHelper(client_mock, client_ids, check_flow_errors=False, token=self.token) self.assertEqual(len(TestHuntListener.received_events), 5)
def CreateSampleHunt(self, stopped=False): self.client_ids = self.SetupClients(10) with hunts.GRRHunt.StartHunt( hunt_name="GenericHunt", flow_runner_args=rdfvalue.FlowRunnerArgs( flow_name="GetFile"), flow_args=rdfvalue.GetFileArgs( pathspec=rdfvalue.PathSpec( path="/tmp/evil.txt", pathtype=rdfvalue.PathSpec.PathType.OS, ) ), regex_rules=[rdfvalue.ForemanAttributeRegex( attribute_name="GRR client", attribute_regex="GRR")], output_plugins=[], client_rate=0, token=self.token) as hunt: if not stopped: hunt.Run() with aff4.FACTORY.Open("aff4:/foreman", mode="rw", token=self.token) as foreman: for client_id in self.client_ids: foreman.AssignTasksToClient(client_id) self.hunt_urn = hunt.urn return aff4.FACTORY.Open(hunt.urn, mode="rw", token=self.token, age=aff4.ALL_TIMES)
def CreateGenericHuntWithCollection(self, values=None): self.client_ids = self.SetupClients(10) if values is None: values = [rdfvalue.RDFURN("aff4:/sample/1"), rdfvalue.RDFURN("aff4:/C.0000000000000001/fs/os/c/bin/bash"), rdfvalue.RDFURN("aff4:/sample/3")] with hunts.GRRHunt.StartHunt( hunt_name="GenericHunt", regex_rules=[rdfvalue.ForemanAttributeRegex( attribute_name="GRR client", attribute_regex="GRR")], output_plugins=[], token=self.token) as hunt: runner = hunt.GetRunner() runner.Start() with aff4.FACTORY.Create( runner.context.results_collection_urn, aff4_type="RDFValueCollection", mode="w", token=self.token) as collection: for value in values: collection.Add(value) return hunt.urn
def RunHunt(self, plugin_name, plugin_args): with hunts.GRRHunt.StartHunt( hunt_name="GenericHunt", flow_runner_args=rdfvalue.FlowRunnerArgs(flow_name="GetFile"), flow_args=rdfvalue.GetFileArgs(pathspec=rdfvalue.PathSpec( path="/tmp/evil.txt", pathtype=rdfvalue.PathSpec.PathType.OS)), regex_rules=[ rdfvalue.ForemanAttributeRegex(attribute_name="GRR client", attribute_regex="GRR") ], output_plugins=[ rdfvalue.OutputPlugin(plugin_name=plugin_name, plugin_args=plugin_args) ], client_rate=0, token=self.token) as hunt: hunt.Run() hunt.StartClients(hunt.session_id, self.client_ids) # Run the hunt. client_mock = test_lib.SampleHuntMock() test_lib.TestHuntHelper(client_mock, self.client_ids, False, self.token) # Stop the hunt now. hunt.GetRunner().Stop() # Run cron flow that executes actual output plugins for _ in test_lib.TestFlowHelper("ProcessHuntResultsCronFlow", token=self.token): pass return hunt.urn
def testCallback(self, client_limit=None): """Checks that the foreman uses the callback specified in the action.""" with hunts.GRRHunt.StartHunt(hunt_name="SampleHunt", regex_rules=[ rdfvalue.ForemanAttributeRegex( attribute_name="GRR client", attribute_regex="GRR") ], client_limit=client_limit, client_rate=0, token=self.token) as hunt: hunt.GetRunner().Start() # Create a client that matches our regex. client = aff4.FACTORY.Open(self.client_id, mode="rw", token=self.token) info = client.Schema.CLIENT_INFO() info.client_name = "GRR Monitor" client.Set(client.Schema.CLIENT_INFO, info) client.Close() foreman = aff4.FACTORY.Open("aff4:/foreman", mode="rw", token=self.token) with utils.Stubber(hunts.SampleHunt, "StartClients", self.Callback): self.called = [] foreman.AssignTasksToClient(client.urn) self.assertEqual(len(self.called), 1) self.assertEqual(self.called[0][1], [client.urn])
def SetUpCrashedFlowInHunt(self): client_ids = [rdfvalue.ClientURN("C.%016X" % i) for i in range(0, 10)] client_mocks = dict([(client_id, test_lib.CrashClientMock(client_id, self.token)) for client_id in client_ids]) with hunts.GRRHunt.StartHunt(hunt_name="SampleHunt", regex_rules=[ rdfvalue.ForemanAttributeRegex( attribute_name="GRR client", attribute_regex="GRR") ], client_rate=0, token=self.token) as hunt: hunt.Run() foreman = aff4.FACTORY.Open("aff4:/foreman", mode="rw", token=self.token) for client_id in client_ids: foreman.AssignTasksToClient(client_id) test_lib.TestHuntHelperWithMultipleMocks(client_mocks, False, self.token) return client_ids
def CreateSampleHunt(self, token=None): with hunts.GRRHunt.StartHunt(hunt_name="SampleHunt", regex_rules=[ rdfvalue.ForemanAttributeRegex( attribute_name="GRR client", attribute_regex="GRR") ], token=token or self.token) as hunt: return hunt.session_id
def testRuleAdding(self): foreman = aff4.FACTORY.Open("aff4:/foreman", mode="rw", token=self.token) rules = foreman.Get(foreman.Schema.RULES) # Make sure there are no rules yet in the foreman. self.assertEqual(len(rules), 0) hunt = hunts.GRRHunt.StartHunt( hunt_name="SampleHunt", regex_rules=[ rdfvalue.ForemanAttributeRegex( attribute_name="GRR client", attribute_regex="HUNT") ], integer_rules=[ rdfvalue.ForemanAttributeInteger( attribute_name="Clock", operator=rdfvalue.ForemanAttributeInteger.Operator.GREATER_THAN, value=1336650631137737) ], client_rate=0, token=self.token) # Push the rules to the foreman. with hunt: hunt.GetRunner().Start() foreman = aff4.FACTORY.Open("aff4:/foreman", mode="rw", token=self.token) rules = foreman.Get(foreman.Schema.RULES) # Make sure they were written correctly. self.assertEqual(len(rules), 1) rule = rules[0] self.assertEqual(len(rule.regex_rules), 1) self.assertEqual(rule.regex_rules[0].attribute_name, "GRR client") self.assertEqual(rule.regex_rules[0].attribute_regex, "HUNT") self.assertEqual(len(rule.integer_rules), 1) self.assertEqual(rule.integer_rules[0].attribute_name, "Clock") self.assertEqual(rule.integer_rules[0].operator, rdfvalue.ForemanAttributeInteger.Operator.GREATER_THAN) self.assertEqual(rule.integer_rules[0].value, 1336650631137737) self.assertEqual(len(rule.actions), 1) self.assertEqual(rule.actions[0].hunt_name, "SampleHunt") # Running a second time should not change the rules any more. with hunt: hunt.GetRunner().Start() foreman = aff4.FACTORY.Open("aff4:/foreman", mode="rw", token=self.token) rules = foreman.Get(foreman.Schema.RULES) # Still just one rule. self.assertEqual(len(rules), 1)
def testHuntExpiration(self): """This tests that hunts with a client limit terminate correctly.""" with test_lib.FakeTime(1000): with hunts.GRRHunt.StartHunt( hunt_name="GenericHunt", flow_runner_args=rdfvalue.FlowRunnerArgs( flow_name="GetFile"), flow_args=rdfvalue.GetFileArgs(pathspec=rdfvalue.PathSpec( path="/tmp/evil.txt", pathtype=rdfvalue.PathSpec.PathType.OS)), regex_rules=[ rdfvalue.ForemanAttributeRegex( attribute_name="GRR client", attribute_regex="GRR") ], client_limit=5, expiry_time=rdfvalue.Duration("1000s"), token=self.token) as hunt: hunt.Run() # Pretend to be the foreman now and dish out hunting jobs to all the # clients (Note we have 10 clients here). foreman = aff4.FACTORY.Open("aff4:/foreman", mode="rw", token=self.token) for client_id in self.client_ids: foreman.AssignTasksToClient(client_id) hunt_obj = aff4.FACTORY.Open(hunt.session_id, age=aff4.ALL_TIMES, token=self.token) self.assertEqual(hunt_obj.Get(hunt_obj.Schema.STATE), "STARTED") # Now advance the time such that the hunt expires. time.time = lambda: 5000 # Run the hunt. client_mock = test_lib.SampleHuntMock() test_lib.TestHuntHelper(client_mock, self.client_ids, check_flow_errors=False, token=self.token) # No client should be processed since the hunt is expired. started, finished, errors = hunt_obj.GetClientsCounts() self.assertEqual(started, 0) self.assertEqual(finished, 0) self.assertEqual(errors, 0) hunt_obj = aff4.FACTORY.Open(hunt.session_id, age=aff4.ALL_TIMES, token=self.token) # Hunts are automatically stopped when they expire. self.assertEqual(hunt_obj.Get(hunt_obj.Schema.STATE), "STOPPED")
def _CreateHunt(self, token): return hunts.GRRHunt.StartHunt( hunt_name="GenericHunt", flow_runner_args=rdfvalue.FlowRunnerArgs(flow_name="GetFile"), flow_args=rdfvalue.GetFileArgs(pathspec=rdfvalue.PathSpec( path="/tmp/evil.txt", pathtype=rdfvalue.PathSpec.PathType.OS)), regex_rules=[ rdfvalue.ForemanAttributeRegex(attribute_name="GRR client", attribute_regex="GRR"), ], client_rate=0, token=token)
def testInvalidRules(self): """Tests the behavior when a wrong attribute name is passed in a rule.""" with hunts.GRRHunt.StartHunt( hunt_name="BrokenSampleHunt", regex_rules=[rdfvalue.ForemanAttributeRegex( attribute_name="no such attribute", attribute_regex="HUNT")], client_rate=0, token=self.token) as hunt: runner = hunt.GetRunner() self.assertRaises(ValueError, runner.Start)
def Start(self): self.state.Register("hunt_id", None) self.state.Register("client_ids", set()) self.state.Register("client_ids_failures", set()) self.state.Register("client_ids_result_reported", set()) self.state.client_ids = base.GetClientTestTargets(token=self.token) if not self.state.client_ids: self.Log("No clients to test on, define them in " "Test.end_to_end_client_ids") return token = access_control.ACLToken(username="******", reason="Running endtoend tests.").SetUID() runner_args = rdfvalue.FlowRunnerArgs(flow_name="EndToEndTestFlow") flow_request = rdfvalue.FlowRequest( client_ids=self.state.client_ids, args=rdfvalue.EndToEndTestFlowArgs(), runner_args=runner_args) bogus_rule = rdfvalue.ForemanAttributeRegex( attribute_name="System", attribute_regex="Does not match anything") hunt_args = rdfvalue.VariableGenericHuntArgs(flows=[flow_request]) hunt_args.output_plugins = self.GetOutputPlugins() with hunts.GRRHunt.StartHunt( hunt_name="VariableGenericHunt", args=hunt_args, regex_rules=[bogus_rule], client_rate=0, expiry_time="1d", token=token) as hunt: self.state.hunt_id = hunt.session_id hunt.SetDescription("EndToEnd tests run by cron") hunt.Run() hunt.ManuallyScheduleClients(token=token) # Set a callback to check the results after 50 minutes. This should be # plenty of time for the clients to receive the hunt and run the tests, but # not so long that the flow lease will expire. wait_duration = rdfvalue.Duration( config_lib.CONFIG.Get("Test.end_to_end_result_check_wait")) completed_time = rdfvalue.RDFDatetime().Now() + wait_duration self.CallState(next_state="CheckResults", start_time=completed_time)
def testHuntClientRate(self): """Check that clients are scheduled slowly by the hunt.""" start_time = 10 # Set up 10 clients. client_ids = self.SetupClients(10) with test_lib.FakeTime(start_time): with hunts.GRRHunt.StartHunt(hunt_name="DummyHunt", regex_rules=[ rdfvalue.ForemanAttributeRegex( attribute_name="GRR client", attribute_regex="GRR"), ], client_rate=1, token=self.token) as hunt: hunt.Run() # Pretend to be the foreman now and dish out hunting jobs to all the # clients.. foreman = aff4.FACTORY.Open("aff4:/foreman", mode="rw", token=self.token) for client_id in client_ids: foreman.AssignTasksToClient(client_id) self.assertEqual(len(DummyHunt.client_ids), 0) # Run the hunt. worker_mock = test_lib.MockWorker(check_flow_errors=True, token=self.token) time.time = lambda: start_time + 2 # One client is scheduled in the first minute. worker_mock.Simulate() self.assertEqual(len(DummyHunt.client_ids), 1) # No further clients will be scheduled until the end of the first minute. time.time = lambda: start_time + 59 worker_mock.Simulate() self.assertEqual(len(DummyHunt.client_ids), 1) # One client will be processed every minute. for i in range(len(client_ids)): time.time = lambda: start_time + 1 + 60 * i worker_mock.Simulate() self.assertEqual(len(DummyHunt.client_ids), i + 1)
def testHuntTermination(self): """This tests that hunts with a client limit terminate correctly.""" with test_lib.FakeTime(1000): with hunts.GRRHunt.StartHunt( hunt_name="GenericHunt", flow_runner_args=rdfvalue.FlowRunnerArgs(flow_name="GetFile"), flow_args=rdfvalue.GetFileArgs( pathspec=rdfvalue.PathSpec( path="/tmp/evil.txt", pathtype=rdfvalue.PathSpec.PathType.OS) ), regex_rules=[rdfvalue.ForemanAttributeRegex( attribute_name="GRR client", attribute_regex="GRR")], client_limit=5, client_rate=0, expiry_time=rdfvalue.Duration("1000s"), token=self.token) as hunt: hunt.Run() # Pretend to be the foreman now and dish out hunting jobs to all the # clients (Note we have 10 clients here). foreman = aff4.FACTORY.Open("aff4:/foreman", mode="rw", token=self.token) for client_id in self.client_ids: foreman.AssignTasksToClient(client_id) # Run the hunt. client_mock = test_lib.SampleHuntMock() test_lib.TestHuntHelper(client_mock, self.client_ids, check_flow_errors=False, token=self.token) hunt_obj = aff4.FACTORY.Open(hunt.session_id, age=aff4.ALL_TIMES, token=self.token) started = hunt_obj.GetValuesForAttribute(hunt_obj.Schema.CLIENTS) finished = hunt_obj.GetValuesForAttribute(hunt_obj.Schema.FINISHED) errors = hunt_obj.GetValuesForAttribute(hunt_obj.Schema.ERRORS) self.assertEqual(len(set(started)), 5) self.assertEqual(len(set(finished)), 5) self.assertEqual(len(set(errors)), 2) hunt_obj = aff4.FACTORY.Open(hunt.session_id, age=aff4.ALL_TIMES, token=self.token) # Hunts are automatically paused when they reach the client limit. self.assertEqual(hunt_obj.Get(hunt_obj.Schema.STATE), "PAUSED")
def setUp(self): super(TestExportHuntResultsFilesAsArchive, self).setUp() path1 = "aff4:/C.0000000000000000/fs/os/foo/bar/hello1.txt" fd = aff4.FACTORY.Create(path1, "AFF4MemoryStream", token=self.token) fd.Write("hello1") fd.Set(fd.Schema.HASH, rdfvalue.Hash(sha256=hashlib.sha256("hello1").digest())) fd.Close() path2 = u"aff4:/C.0000000000000000/fs/os/foo/bar/中国新闻网新闻中.txt" fd = aff4.FACTORY.Create(path2, "AFF4MemoryStream", token=self.token) fd.Write("hello2") fd.Set(fd.Schema.HASH, rdfvalue.Hash(sha256=hashlib.sha256("hello2").digest())) fd.Close() self.paths = [path1, path2] with hunts.GRRHunt.StartHunt(hunt_name="GenericHunt", regex_rules=[ rdfvalue.ForemanAttributeRegex( attribute_name="GRR client", attribute_regex="GRR") ], output_plugins=[], token=self.token) as hunt: self.hunt_urn = hunt.urn runner = hunt.GetRunner() runner.Start() with aff4.FACTORY.Create(runner.context.results_collection_urn, aff4_type="RDFValueCollection", mode="w", token=self.token) as collection: for path in self.paths: collection.Add( rdfvalue.StatEntry( aff4path=path, pathspec=rdfvalue.PathSpec( path="fs/os/foo/bar/" + path.split("/")[-1], pathtype=rdfvalue.PathSpec.PathType.OS)))
def testPausingAndRestartingDoesNotStartHuntTwiceOnTheSameClient(self): """This tests if the hunt completes when some clients hang or raise.""" client_ids = self.SetupClients(10) with hunts.GRRHunt.StartHunt(hunt_name="SampleHunt", regex_rules=[ rdfvalue.ForemanAttributeRegex( attribute_name="GRR client", attribute_regex="GRR") ], client_rate=0, token=self.token) as hunt: hunt.GetRunner().Start() hunt_id = hunt.urn foreman = aff4.FACTORY.Open("aff4:/foreman", mode="rw", token=self.token) for client_id in client_ids: num_tasks = foreman.AssignTasksToClient(client_id) self.assertEqual(num_tasks, 1) client_mock = test_lib.SampleHuntMock() test_lib.TestHuntHelper(client_mock, client_ids, False, self.token) # Pausing and running hunt: this leads to the fresh rules being written # to Foreman.RULES. with aff4.FACTORY.Open(hunt_id, mode="rw", token=self.token) as hunt: runner = hunt.GetRunner() runner.Pause() runner.Start() # Recreating the foreman so that it updates list of rules. foreman = aff4.FACTORY.Open("aff4:/foreman", mode="rw", token=self.token) for client_id in client_ids: num_tasks = foreman.AssignTasksToClient(client_id) # No tasks should be assigned as this hunt ran on all the clients # before. self.assertEqual(num_tasks, 0)
def RunHunt(self, plugin_args=None, responses=None, process_responses_separately=False): if responses is None: responses = [] with hunts.GRRHunt.StartHunt( hunt_name="GenericHunt", flow_runner_args=rdfvalue.FlowRunnerArgs(flow_name="GetFile"), flow_args=rdfvalue.GetFileArgs(pathspec=rdfvalue.PathSpec( path="/tmp/evil.txt", pathtype=rdfvalue.PathSpec.PathType.OS)), regex_rules=[ rdfvalue.ForemanAttributeRegex(attribute_name="GRR client", attribute_regex="GRR"), ], client_rate=0, token=self.token) as hunt: hunt_urn = hunt.urn plugin_def = rdfvalue.OutputPlugin(plugin_name="CSVOutputPlugin", plugin_args=plugin_args) plugin = plugin_def.GetPluginForHunt(hunt) # We don't want to test the whole output plugins subsystem as it's # tested in its own tests. We only want to test logic specific to # ColumnIOHuntOutputPlugin. messages = [] for response in responses: messages.append( rdfvalue.GrrMessage(source=self.client_id, payload=response)) if process_responses_separately: for message in messages: plugin.ProcessResponses([message]) else: plugin.ProcessResponses(messages) plugin.Flush() return (hunt_urn, plugin)
def ParseOption(self, option, request): """Parse the form that is selected by option.""" if option == "Windows": return implementation.GRRHunt.MATCH_WINDOWS elif option == "Linux": return implementation.GRRHunt.MATCH_LINUX elif option == "OSX": return implementation.GRRHunt.MATCH_DARWIN elif option == "Regex": return forms.SemanticProtoFormRenderer( rdfvalue.ForemanAttributeRegex(), prefix=self.prefix).ParseArgs(request) elif option == "Integer": return forms.SemanticProtoFormRenderer( rdfvalue.ForemanAttributeInteger(), prefix=self.prefix).ParseArgs(request)
def RenderOption(self, option, request, response): if option == "Windows": return self.RenderFromTemplate(self.match_system_template, response, system="Windows") elif option == "Linux": return self.RenderFromTemplate(self.match_system_template, response, system="Linux") elif option == "OSX": return self.RenderFromTemplate(self.match_system_template, response, system="OSX") elif option == "Label": return self.RenderFromTemplate( self.form_template, response, form=ClientLabelNameFormRenderer( descriptor=type_info.TypeInfoObject(friendly_name="Label"), default="", prefix=self.prefix).RawHTML(request)) elif option == "Regex": return self.RenderFromTemplate( self.form_template, response, form=forms.SemanticProtoFormRenderer( rdfvalue.ForemanAttributeRegex(), prefix=self.prefix).RawHTML(request)) elif option == "Integer": return self.RenderFromTemplate( self.form_template, response, form=forms.SemanticProtoFormRenderer( rdfvalue.ForemanAttributeInteger(), prefix=self.prefix).RawHTML(request))
def testResourceUsageStats(self): client_ids = self.SetupClients(10) with hunts.GRRHunt.StartHunt( hunt_name="GenericHunt", flow_runner_args=rdfvalue.FlowRunnerArgs( flow_name="GetFile"), flow_args=rdfvalue.GetFileArgs( pathspec=rdfvalue.PathSpec( path="/tmp/evil.txt", pathtype=rdfvalue.PathSpec.PathType.OS, ) ), regex_rules=[rdfvalue.ForemanAttributeRegex( attribute_name="GRR client", attribute_regex="GRR")], output_plugins=[], client_rate=0, token=self.token) as hunt: hunt.Run() with aff4.FACTORY.Open( "aff4:/foreman", mode="rw", token=self.token) as foreman: for client_id in client_ids: foreman.AssignTasksToClient(client_id) client_mock = test_lib.SampleHuntMock() test_lib.TestHuntHelper(client_mock, client_ids, False, self.token) hunt = aff4.FACTORY.Open(hunt.urn, aff4_type="GenericHunt", token=self.token) # This is called once for each state method. Each flow above runs the # Start and the StoreResults methods. usage_stats = hunt.state.context.usage_stats self.assertEqual(usage_stats.user_cpu_stats.num, 10) self.assertTrue(math.fabs(usage_stats.user_cpu_stats.mean - 5.5) < 1e-7) self.assertTrue(math.fabs(usage_stats.user_cpu_stats.std - 2.8722813) < 1e-7) self.assertEqual(usage_stats.system_cpu_stats.num, 10) self.assertTrue(math.fabs(usage_stats.system_cpu_stats.mean - 11) < 1e-7) self.assertTrue(math.fabs(usage_stats.system_cpu_stats.std - 5.7445626) < 1e-7) self.assertEqual(usage_stats.network_bytes_sent_stats.num, 10) self.assertTrue(math.fabs(usage_stats.network_bytes_sent_stats.mean - 16.5) < 1e-7) self.assertTrue(math.fabs(usage_stats.network_bytes_sent_stats.std - 8.61684396) < 1e-7) # NOTE: Not checking histograms here. RunningStatsTest tests that mean, # standard deviation and histograms are calculated correctly. Therefore # if mean/stdev values are correct histograms should be ok as well. self.assertEqual(len(usage_stats.worst_performers), 10) prev = usage_stats.worst_performers[0] for p in usage_stats.worst_performers[1:]: self.assertTrue(prev.cpu_usage.user_cpu_time + prev.cpu_usage.system_cpu_time > p.cpu_usage.user_cpu_time + p.cpu_usage.system_cpu_time) prev = p
def testHuntModificationWorksCorrectly(self): """This tests running the hunt on some clients.""" with hunts.GRRHunt.StartHunt( hunt_name="GenericHunt", flow_runner_args=rdfvalue.FlowRunnerArgs(flow_name="GetFile"), flow_args=rdfvalue.GetFileArgs( pathspec=rdfvalue.PathSpec( path="/tmp/evil.txt", pathtype=rdfvalue.PathSpec.PathType.OS), ), regex_rules=[rdfvalue.ForemanAttributeRegex( attribute_name="GRR client", attribute_regex="GRR")], client_limit=1, client_rate=0, token=self.token) as hunt: hunt.Run() # Forget about hunt object, we'll use AFF4 for everything. hunt_session_id = hunt.session_id hunt = None # Pretend to be the foreman now and dish out hunting jobs to all the # client.. with aff4.FACTORY.Open( "aff4:/foreman", mode="rw", token=self.token) as foreman: for client_id in self.client_ids: foreman.AssignTasksToClient(client_id) # Run the hunt. client_mock = test_lib.SampleHuntMock() test_lib.TestHuntHelper(client_mock, self.client_ids, False, self.token) # Re-open the hunt to get fresh data. hunt_obj = aff4.FACTORY.Open(hunt_session_id, age=aff4.ALL_TIMES, ignore_cache=True, token=self.token) started = hunt_obj.GetValuesForAttribute(hunt_obj.Schema.CLIENTS) # There should be only one client, due to the limit self.assertEqual(len(set(started)), 1) # Check the hunt is paused. self.assertEqual(hunt_obj.Get(hunt_obj.Schema.STATE), "PAUSED") with aff4.FACTORY.Open( hunt_session_id, mode="rw", token=self.token) as hunt_obj: with hunt_obj.GetRunner() as runner: runner.args.client_limit = 10 runner.Start() # Pretend to be the foreman now and dish out hunting jobs to all the # clients. with aff4.FACTORY.Open( "aff4:/foreman", mode="rw", token=self.token) as foreman: for client_id in self.client_ids: foreman.AssignTasksToClient(client_id) test_lib.TestHuntHelper(client_mock, self.client_ids, False, self.token) hunt_obj = aff4.FACTORY.Open(hunt_session_id, age=aff4.ALL_TIMES, token=self.token) started = hunt_obj.GetValuesForAttribute(hunt_obj.Schema.CLIENTS) # There should be only one client, due to the limit self.assertEqual(len(set(started)), 10)