def Start(self): self.state.hunt_id = None self.state.client_ids = set() self.state.client_ids_failures = set() self.state.client_ids_result_reported = set() self.state.client_ids = list( base.GetClientTestTargets(token=self.token)) if not self.state.client_ids: self.Log("No clients to test on, define them in " "Test.end_to_end_client_ids") return # SetUID is required to run a hunt on the configured end-to-end client # targets without an approval. token = access_control.ACLToken( username="******", reason="Running endtoend tests.").SetUID() runner_args = rdf_flows.FlowRunnerArgs(flow_name="EndToEndTestFlow") flow_request = hunts_standard.FlowRequest( client_ids=self.state.client_ids, args=flows_endtoend.EndToEndTestFlowArgs(), runner_args=runner_args) bogus_rule = rdf_foreman.ForemanRegexClientRule( attribute_name="System", attribute_regex="Does not match anything") client_rule_set = rdf_foreman.ForemanClientRuleSet(rules=[ rdf_foreman.ForemanClientRule( rule_type=rdf_foreman.ForemanClientRule.Type.REGEX, regex=bogus_rule) ]) hunt_args = hunts_standard.VariableGenericHuntArgs( flows=[flow_request]) hunt_args.output_plugins = self.GetOutputPlugins() with hunts.GRRHunt.StartHunt(hunt_name="VariableGenericHunt", args=hunt_args, client_rule_set=client_rule_set, client_rate=0, expiry_time="1d", token=token) as hunt: self.state.hunt_id = hunt.session_id hunt.SetDescription("EndToEnd tests run by cron") hunt.Run() hunt.ManuallyScheduleClients(token=token) # Set a callback to check the results after 50 minutes. This should be # plenty of time for the clients to receive the hunt and run the tests, but # not so long that the flow lease will expire. wait_duration = rdfvalue.Duration( config_lib.CONFIG.Get("Test.end_to_end_result_check_wait")) completed_time = rdfvalue.RDFDatetime.Now() + wait_duration self.CallState(next_state="CheckResults", start_time=completed_time)
def RunEndToEndTests(): runner = unittest.TextTestRunner() # We are running a test so let the config system know that. config_lib.CONFIG.AddContext("Test Context", "Context applied when we run tests.") startup.Init() token = access_control.ACLToken(username="******", reason="Running end to end client tests.") client_id_set = base.GetClientTestTargets( client_ids=flags.FLAGS.client_ids, hostnames=flags.FLAGS.hostnames, checkin_duration_threshold="1h") for cls in base.ClientTestBase.classes.values(): for p in cls.platforms: if p not in set(["Linux", "Darwin", "Windows"]): raise ValueError("Unsupported platform: %s in class %s" % (p, cls.__name__)) if not client_id_set: print( "No clients to test on. Define Test.end_to_end_client* config " "options, or pass them as parameters.") for client in aff4.FACTORY.MultiOpen(client_id_set, token=token): client = client.Get(client.SchemaCls.SUMMARY) if hasattr(client, "system_info"): sysinfo = client.system_info else: raise RuntimeError( "Unknown system type, likely waiting on interrogate" " to complete.") for cls in base.ClientTestBase.classes.values(): if flags.FLAGS.testnames and (cls.__name__ not in flags.FLAGS.testnames): continue if not aff4.issubclass(cls, base.ClientTestBase): continue # Fix the call method so we can use the test runner. See doco in # base.ClientTestBase def _RealCall(testcase, *args, **kwds): return testcase.run(*args, **kwds) cls.__call__ = _RealCall if sysinfo.system in cls.platforms: print "Running %s on %s (%s: %s, %s, %s)" % ( cls.__name__, client.client_id, sysinfo.fqdn, sysinfo.system, sysinfo.version, sysinfo.machine) try: # Mixin the unittest framework so we can use the test runner to run # the test and get nice output. We don't want to depend on unitttest # code in the tests themselves. testcase = cls(client_id=client.client_id, platform=sysinfo.system, token=token, local_client=flags.FLAGS.local_client, local_worker=flags.FLAGS.local_worker) runner.run(testcase) except Exception: # pylint: disable=broad-except logging.exception("Failed to run test %s", cls)
def RunEndToEndTests(): runner = unittest.TextTestRunner() # We are running a test so let the config system know that. config_lib.CONFIG.AddContext(contexts.TEST_CONTEXT, "Context applied when we run tests.") server_startup.Init() token = access_control.ACLToken(username="******", reason="Running end to end client tests.") # We need this for the launchbinary test with aff4.FACTORY.Create("aff4:/users/GRREndToEndTest", aff4_users.GRRUser, mode="rw", token=token) as test_user: test_user.AddLabels("admin") client_id_set = base.GetClientTestTargets( client_ids=flags.FLAGS.client_ids, hostnames=flags.FLAGS.hostnames, checkin_duration_threshold="1h", token=token) for cls in base.ClientTestBase.classes.values(): for p in cls.platforms: if p not in set(["Linux", "Darwin", "Windows"]): raise ValueError("Unsupported platform: %s in class %s" % (p, cls.__name__)) if not client_id_set: print( "No clients to test on. Define Test.end_to_end_client* config " "options, or pass them as parameters.") results_by_client = {} for client in aff4.FACTORY.MultiOpen(client_id_set, token=token): client_summary = client.GetSummary() if hasattr(client_summary, "system_info"): sysinfo = client_summary.system_info else: raise RuntimeError( "Unknown system type, likely waiting on interrogate" " to complete.") results = {} results_by_client[client.urn] = results for cls in base.ClientTestBase.classes.values(): if flags.FLAGS.testnames and (cls.__name__ not in flags.FLAGS.testnames): continue if not aff4.issubclass(cls, base.ClientTestBase): continue if cls.__name__.startswith("Abstract"): continue # Fix the call method so we can use the test runner. See doco in # base.ClientTestBase def _RealCall(testcase, *args, **kwds): return testcase.run(*args, **kwds) cls.__call__ = _RealCall if sysinfo.system in cls.platforms: print "Running %s on %s (%s: %s, %s, %s)" % ( cls.__name__, client_summary.client_id, sysinfo.fqdn, sysinfo.system, sysinfo.version, sysinfo.machine) try: # Mixin the unittest framework so we can use the test runner to run # the test and get nice output. We don't want to depend on unitttest # code in the tests themselves. testcase = cls(client_id=client_summary.client_id, platform=sysinfo.system, token=token, local_client=flags.FLAGS.local_client, local_worker=flags.FLAGS.local_worker) results[cls.__name__] = runner.run(testcase) except Exception: # pylint: disable=broad-except logging.exception("Failed to run test %s", cls) # Print a little summary. for client, results in results_by_client.iteritems(): print "Results for %s:" % client for testcase, result in sorted(results.items()): res = "[ OK ]" if result.errors or result.failures: res = "[ FAIL ]" print "%45s: %s" % (testcase, res)