def testRunSuccess(self): args = endtoend.EndToEndTestFlowArgs(test_names=[ "TestListDirectoryOSLinuxDarwin", "MockEndToEndTest", "TestListDirectoryOSLinuxDarwin" ]) with test_lib.Instrument(flow.GRRFlow, "SendReply") as send_reply: for _ in flow_test_lib.TestFlowHelper( endtoend.EndToEndTestFlow.__name__, self.client_mock, client_id=self.client_id, token=self.token, args=args): pass results = [] for _, reply in send_reply.args: if isinstance(reply, endtoend.EndToEndTestResult): results.append(reply) self.assertTrue(reply.success) self.assertTrue(reply.test_class_name in [ "TestListDirectoryOSLinuxDarwin", "MockEndToEndTest" ]) self.assertFalse(reply.log) # We only expect 2 results because we dedup test names self.assertEqual(len(results), 2)
def testRunSuccessAndFail(self): args = endtoend.EndToEndTestFlowArgs() with utils.Stubber(base.AutomatedTest, "classes", { "MockEndToEndTest": endtoend_mocks.MockEndToEndTest, "TestFailure": endtoend_mocks.TestFailure }): with test_lib.Instrument(flow.GRRFlow, "SendReply") as send_reply: for _ in flow_test_lib.TestFlowHelper( endtoend.EndToEndTestFlow.__name__, self.client_mock, client_id=self.client_id, token=self.token, args=args): pass results = [] for _, reply in send_reply.args: if isinstance(reply, endtoend.EndToEndTestResult): results.append(reply) if reply.test_class_name == "MockEndToEndTest": self.assertTrue(reply.success) self.assertFalse(reply.log) elif reply.test_class_name == "TestFailure": self.assertFalse(reply.success) self.assertTrue("This should be logged" in reply.log) self.assertItemsEqual([x.test_class_name for x in results], ["MockEndToEndTest", "TestFailure"]) self.assertEqual(len(results), 2)
def testRunBadTearDown(self): args = endtoend.EndToEndTestFlowArgs(test_names=["TestBadTearDown"]) self.assertRaises(RuntimeError, list, flow_test_lib.TestFlowHelper( endtoend.EndToEndTestFlow.__name__, self.client_mock, client_id=self.client_id, token=self.token, args=args))
def testRunBadFlow(self): """Test behaviour when test flow raises in Start. A flow that raises in its Start method will kill the EndToEndTest run. Protecting and reporting on this significantly complicates this code, and a flow raising in Start is really broken, so we allow this behaviour. """ args = endtoend.EndToEndTestFlowArgs( test_names=["MockEndToEndTestBadFlow", "MockEndToEndTest"]) self.assertRaises(RuntimeError, list, flow_test_lib.TestFlowHelper( endtoend.EndToEndTestFlow.__name__, self.client_mock, client_id=self.client_id, token=self.token, args=args))
def testNoApplicableTests(self): """Try to run linux tests on windows.""" self.SetupClients( 1, system="Windows", os_version="6.1.7601SP1", arch="AMD64") install_time = rdfvalue.RDFDatetime.Now() user = "******" userobj = rdf_client.User(username=user) interface = rdf_client.Interface(ifname="eth0") self.client = aff4.FACTORY.Create( self.client_id, aff4_grr.VFSGRRClient, mode="rw", token=self.token, age=aff4.ALL_TIMES) kb = self.client.Get(self.client.Schema.KNOWLEDGE_BASE) kb.users.Append(userobj) self.client.Set(self.client.Schema.HOSTNAME("hostname")) self.client.Set(self.client.Schema.OS_RELEASE("7")) self.client.Set(self.client.Schema.KERNEL("6.1.7601")) self.client.Set(self.client.Schema.FQDN("hostname.example.com")) self.client.Set(self.client.Schema.INSTALL_DATE(install_time)) self.client.Set(self.client.Schema.KNOWLEDGE_BASE(kb)) self.client.Set(self.client.Schema.USERNAMES([user])) self.client.Set(self.client.Schema.INTERFACES([interface])) self.client.Flush() args = endtoend.EndToEndTestFlowArgs(test_names=[ "TestListDirectoryOSLinuxDarwin", "MockEndToEndTest", "TestListDirectoryOSLinuxDarwin" ]) self.assertRaises(flow.FlowError, list, flow_test_lib.TestFlowHelper( endtoend.EndToEndTestFlow.__name__, self.client_mock, client_id=self.client_id, token=self.token, args=args))
def testEndToEndTestFailure(self): args = endtoend.EndToEndTestFlowArgs(test_names=["TestFailure"]) with test_lib.Instrument(flow.GRRFlow, "SendReply") as send_reply: for _ in flow_test_lib.TestFlowHelper( endtoend.EndToEndTestFlow.__name__, self.client_mock, client_id=self.client_id, token=self.token, args=args): pass results = [] for _, reply in send_reply.args: if isinstance(reply, endtoend.EndToEndTestResult): results.append(reply) self.assertFalse(reply.success) self.assertEqual(reply.test_class_name, "TestFailure") self.assertTrue("This should be logged" in reply.log) self.assertEqual(len(results), 1)
def Start(self): self.state.hunt_id = None self.state.client_ids_failures = set() self.state.client_ids_result_reported = set() # TODO(user): Figure out if this can just be a set. Add a tap test to # meaningfully exercise this code. self.state.client_ids = list( base.GetClientTestTargets(token=self.token)) if not self.state.client_ids: self.Log("No clients to test on, define them in " "Test.end_to_end_client_ids") return # SetUID is required to run a hunt on the configured end-to-end client # targets without an approval. token = access_control.ACLToken( username="******", reason="Running endtoend tests.").SetUID() runner_args = rdf_flows.FlowRunnerArgs( flow_name=flows_endtoend.EndToEndTestFlow.__name__) flow_request = hunts_standard.FlowRequest( client_ids=self.state.client_ids, args=flows_endtoend.EndToEndTestFlowArgs(), runner_args=runner_args) bogus_rule = rdf_foreman.ForemanRegexClientRule( attribute_name="System", attribute_regex="Does not match anything") client_rule_set = rdf_foreman.ForemanClientRuleSet(rules=[ rdf_foreman.ForemanClientRule( rule_type=rdf_foreman.ForemanClientRule.Type.REGEX, regex=bogus_rule) ]) hunt_args = hunts_standard.VariableGenericHuntArgs( flows=[flow_request]) hunt_args.output_plugins = self.GetOutputPlugins() with hunts_implementation.GRRHunt.StartHunt( hunt_name=hunts_standard.VariableGenericHunt.__name__, args=hunt_args, client_rule_set=client_rule_set, client_rate=0, expiry_time="1d", token=token) as hunt: self.state.hunt_id = hunt.session_id hunt.SetDescription("EndToEnd tests run by cron") hunt.Run() hunt.ManuallyScheduleClients(token=token) # Set a callback to check the results after 50 minutes. This should be # plenty of time for the clients to receive the hunt and run the tests, but # not so long that the flow lease will expire. wait_duration = rdfvalue.Duration( config.CONFIG.Get("Test.end_to_end_result_check_wait")) completed_time = rdfvalue.RDFDatetime.Now() + wait_duration self.CallState(next_state="CheckResults", start_time=completed_time)