def Start(self): """Retrieve all the clients for the AbstractClientStatsCollectors.""" try: self.stats = {} self.BeginProcessing() if data_store.RelationalDBReadEnabled(): clients = self._IterateClients() else: clients = self._IterateLegacyClients() processed_count = 0 for c in clients: if data_store.RelationalDBReadEnabled(): self.ProcessClientFullInfo(c) else: self.ProcessLegacyClient(c) processed_count += 1 # This flow is not dead: we don't want to run out of lease time. self.HeartBeat() self.FinishProcessing() for fd in self.stats.values(): fd.Close() logging.info("%s: processed %d clients.", self.__class__.__name__, processed_count) except Exception as e: # pylint: disable=broad-except logging.exception("Error while calculating stats: %s", e) raise
def Handle(self, args, token=None): end = args.count or sys.maxint keywords = shlex.split(args.query) api_clients = [] if data_store.RelationalDBReadEnabled(): index = client_index.ClientIndex() clients = sorted( index.LookupClients(keywords))[args.offset:args.offset + end] client_infos = data_store.REL_DB.MultiReadClientFullInfo(clients) for client_info in client_infos.itervalues(): api_clients.append(ApiClient().InitFromClientInfo(client_info)) else: index = client_index.CreateClientIndex(token=token) result_urns = sorted( index.LookupClients(keywords))[args.offset:args.offset + end] result_set = aff4.FACTORY.MultiOpen(result_urns, token=token) for child in sorted(result_set): api_clients.append(ApiClient().InitFromAff4Object(child)) return ApiSearchClientsResult(items=api_clients)
def Handle(self, args, token=None): if not args.timestamp: age = rdfvalue.RDFDatetime.Now() else: age = rdfvalue.RDFDatetime(args.timestamp) if data_store.RelationalDBReadEnabled(): info = data_store.REL_DB.ReadClientFullInfo(str(args.client_id)) if args.timestamp: # Assume that a snapshot for this particular timestamp exists. snapshots = data_store.REL_DB.ReadClientSnapshotHistory( str(args.client_id), timerange=(args.timestamp, args.timestamp)) if snapshots: info.last_snapshot = snapshots[0] info.last_startup_info = snapshots[0].startup_info return ApiClient().InitFromClientInfo(info) else: client = aff4.FACTORY.Open(args.client_id.ToClientURN(), aff4_type=aff4_grr.VFSGRRClient, age=age, token=token) return ApiClient().InitFromAff4Object(client)
def setUp(self): super(ApprovalByLabelE2ETest, self).setUp() self.SetUpLegacy() if data_store.RelationalDBReadEnabled(): self.SetUpRelationalDB() cls = (api_call_router_with_approval_checks. ApiCallRouterWithApprovalChecks) cls.ClearCache() self.approver = test_lib.ConfigOverrider({ "API.DefaultRouter": cls.__name__, "ACL.approvers_config_file": os.path.join(self.base_path, "approvers.yaml") }) self.approver.Start() # Get a fresh approval manager object and reload with test approvers. self.approval_manager_stubber = utils.Stubber( client_approval_auth, "CLIENT_APPROVAL_AUTH_MGR", client_approval_auth.ClientApprovalAuthorizationManager()) self.approval_manager_stubber.Start() # Force creation of new APIAuthorizationManager, so that configuration # changes are picked up. api_auth_manager.APIACLInit.InitApiAuthManager()
def End(self): """Finalize client registration.""" # Update summary and publish to the Discovery queue. if data_store.RelationalDBWriteEnabled(): try: data_store.REL_DB.WriteClient(self.state.client) except db.UnknownClientError: pass if data_store.RelationalDBReadEnabled(): summary = self.state.client.GetSummary() summary.client_id = self.client_id else: client = self._OpenClient() summary = client.GetSummary() self.Publish("Discovery", summary) self.SendReply(summary) # Update the client index client_index.CreateClientIndex(token=self.token).AddClient(client) if data_store.RelationalDBWriteEnabled(): try: index = client_index.ClientIndex() index.AddClient(self.client_id.Basename(), self.state.client) except db.UnknownClientError: # TODO(amoser): Remove after data migration. pass
def Evaluate(self, client_obj): if data_store.RelationalDBReadEnabled(): value = self._ResolveField(self.field, client_obj) else: value = self._ResolveFieldAFF4(self.field, client_obj) return self.attribute_regex.Search(value)
def Run(self): if data_store.RelationalDBReadEnabled(): clients = self.SetupTestClientObjects(10) client_ids = sorted(clients) else: client_ids = [urn.Basename() for urn in self.SetupClients(10)] client_mock = hunt_test_lib.SampleHuntMock() with test_lib.FakeTime(42): with self.CreateHunt(description="the hunt") as hunt_obj: hunt_obj.Run() time_offset = 0 for client_id in client_ids: with test_lib.FakeTime(45 + time_offset): self.AssignTasksToClients([client_id]) hunt_test_lib.TestHuntHelper(client_mock, [rdf_client.ClientURN(client_id)], False, self.token) time_offset += 10 replace = {hunt_obj.urn.Basename(): "H:123456"} self.Check("GetHuntClientCompletionStats", args=hunt_plugin.ApiGetHuntClientCompletionStatsArgs( hunt_id=hunt_obj.urn.Basename()), replace=replace) self.Check("GetHuntClientCompletionStats", args=hunt_plugin.ApiGetHuntClientCompletionStatsArgs( hunt_id=hunt_obj.urn.Basename(), size=4), replace=replace) self.Check("GetHuntClientCompletionStats", args=hunt_plugin.ApiGetHuntClientCompletionStatsArgs( hunt_id=hunt_obj.urn.Basename(), size=1000), replace=replace)
def Run(self): with test_lib.FakeTime(42): hunt_urn = self.StartHunt(description="the hunt") if data_store.RelationalDBReadEnabled(): clients = self.SetupTestClientObjects(5) client_ids = sorted(clients) else: client_ids = [urn.Basename() for urn in self.SetupClients(5)] self.AssignTasksToClients(client_ids=client_ids) # Only running the hunt on a single client, as SampleMock # implementation is non-deterministic in terms of resources # usage that gets reported back to the hunt. client_urns = [rdf_client.ClientURN(client_ids[-1])] self.RunHunt(client_ids=client_urns, failrate=0) # Create replace dictionary. replace = {hunt_urn.Basename(): "H:123456"} self.Check("ListHuntClients", args=hunt_plugin.ApiListHuntClientsArgs( hunt_id=hunt_urn.Basename(), client_status="STARTED"), replace=replace) self.Check("ListHuntClients", args=hunt_plugin.ApiListHuntClientsArgs( hunt_id=hunt_urn.Basename(), client_status="OUTSTANDING"), replace=replace) self.Check("ListHuntClients", args=hunt_plugin.ApiListHuntClientsArgs( hunt_id=hunt_urn.Basename(), client_status="COMPLETED"), replace=replace)
def Handle(self, args, token=None): end_time = args.end or rdfvalue.RDFDatetime.Now() start_time = args.start or end_time - rdfvalue.Duration("3m") diffs_only = args.mode == args.Mode.DIFF items = [] if data_store.RelationalDBReadEnabled(): history = data_store.REL_DB.ReadClientSnapshotHistory( str(args.client_id)) for client in history[::-1]: # TODO(amoser): Filtering could be done at the db level and we # wouldn't have to read all the versions always. if client.timestamp < start_time or client.timestamp > end_time: continue items.append(ApiClient().InitFromClientObject(client)) else: all_clients = aff4.FACTORY.OpenDiscreteVersions( args.client_id.ToClientURN(), mode="r", age=(start_time.AsMicroSecondsFromEpoch(), end_time.AsMicroSecondsFromEpoch()), diffs_only=diffs_only, token=token) for fd in all_clients: items.append(ApiClient().InitFromAff4Object( fd, include_metadata=False)) return ApiGetClientVersionsResult(items=items)
def Handle(self, args, token=None): end_time = args.end or rdfvalue.RDFDatetime.Now() start_time = args.start or end_time - rdfvalue.Duration("3m") diffs_only = args.mode == args.Mode.DIFF items = [] if data_store.RelationalDBReadEnabled(): history = data_store.REL_DB.ReadClientSnapshotHistory( str(args.client_id), timerange=(start_time, end_time)) for client in history[::-1]: items.append(ApiClient().InitFromClientObject(client)) else: all_clients = aff4.FACTORY.OpenDiscreteVersions( args.client_id.ToClientURN(), mode="r", age=(start_time.AsMicrosecondsSinceEpoch(), end_time.AsMicrosecondsSinceEpoch()), diffs_only=diffs_only, token=token) for fd in all_clients: items.append(ApiClient().InitFromAff4Object( fd, include_metadata=False)) return ApiGetClientVersionsResult(items=items)
def Handle(self, args, token=None): if args.count: end = args.offset + args.count else: end = sys.maxint keywords = shlex.split(args.query) api_clients = [] if data_store.RelationalDBReadEnabled(): index = client_index.ClientIndex() # TODO(amoser): We could move the label verification into the # database making this method more efficient. Label restrictions # should be on small subsets though so this might not be worth # it. all_client_ids = set() for label in self.labels_whitelist: label_filter = ["label:" + label] + keywords all_client_ids.update(index.LookupClients(label_filter)) client_infos = data_store.REL_DB.MultiReadClientFullInfo( all_client_ids) index = 0 for _, client_info in sorted(client_infos.items()): if not self._VerifyLabels(client_info.labels): continue if index >= args.offset and index < end: api_clients.append( ApiClient().InitFromClientInfo(client_info)) index += 1 if index >= end: break else: index = client_index.CreateClientIndex(token=token) all_urns = set() for label in self.labels_whitelist: label_filter = ["label:" + label] + keywords all_urns.update(index.LookupClients(label_filter)) all_objs = aff4.FACTORY.MultiOpen(sorted(all_urns, key=str), aff4_type=aff4_grr.VFSGRRClient, token=token) index = 0 for client_obj in all_objs: if not self._CheckClientLabels(client_obj): continue if index >= args.offset and index < end: api_clients.append( ApiClient().InitFromAff4Object(client_obj)) index += 1 if index >= end: break return ApiSearchClientsResult(items=api_clients)
def ProcessMessage(self, message=None, event=None): """Handle a startup event.""" _ = event # We accept unauthenticated messages so there are no errors but we don't # store the results. if (message.auth_state != rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED): return client_id = message.source new_si = message.payload drift = rdfvalue.Duration("5m") if data_store.RelationalDBReadEnabled(): current_si = data_store.REL_DB.ReadClientStartupInfo( client_id.Basename()) # We write the updated record if the client_info has any changes # or the boot time is more than 5 minutes different. if (not current_si or current_si.client_info != new_si.client_info or not current_si.boot_time or abs(current_si.boot_time - new_si.boot_time) > drift): data_store.REL_DB.WriteClientStartupInfo( client_id.Basename(), new_si) else: changes = False with aff4.FACTORY.Create(client_id, aff4_grr.VFSGRRClient, mode="rw", token=self.token) as client: old_info = client.Get(client.Schema.CLIENT_INFO) old_boot = client.Get(client.Schema.LAST_BOOT_TIME, 0) info = new_si.client_info # Only write to the datastore if we have new information. if info != old_info: client.Set(client.Schema.CLIENT_INFO(info)) changes = True client.AddLabels(info.labels, owner="GRR") # Allow for some drift in the boot times (5 minutes). if not old_boot or abs(old_boot - new_si.boot_time) > drift: client.Set(client.Schema.LAST_BOOT_TIME(new_si.boot_time)) changes = True if data_store.RelationalDBWriteEnabled() and changes: try: data_store.REL_DB.WriteClientStartupInfo( client_id.Basename(), new_si) except db.UnknownClientError: pass events.Events.PublishEventInline("ClientStartup", message, token=self.token)
def CreateAdminUser(self, username): """Creates a user and makes it an admin.""" if data_store.RelationalDBReadEnabled(): data_store.REL_DB.WriteGRRUser( username, user_type=rdf_objects.GRRUser.UserType.USER_TYPE_ADMIN) with self.CreateUser(username) as user: user.SetLabel("admin", owner="GRR")
def CreateUser(self, username): """Creates a user.""" if data_store.RelationalDBReadEnabled(): data_store.REL_DB.WriteGRRUser(username) user = aff4.FACTORY.Create("aff4:/users/%s" % username, users.GRRUser, token=self.token.SetUID()) user.Flush() return user
def _GetAccessChecker(self): cls = ApiCallRouterWithApprovalChecks if cls.access_checker is None: if data_store.RelationalDBReadEnabled( data_store.READ_CATEGORY_APPROVALS): cls.access_checker = RelDBChecker() else: cls.access_checker = LegacyChecker() return cls.access_checker
def EnrolFleetspeakClient(self, client_id): """Enrols a Fleetspeak-enabled client for use with GRR.""" client_urn = rdf_client.ClientURN(client_id) # If already enrolled, return. if data_store.RelationalDBReadEnabled(): if data_store.REL_DB.ReadClientMetadata(client_id): return else: if aff4.FACTORY.ExistsWithType(client_urn, aff4_type=aff4_grr.VFSGRRClient, token=self.token): return logging.info("Enrolling a new Fleetspeak client: %r", client_id) if data_store.RelationalDBWriteEnabled(): data_store.REL_DB.WriteClientMetadata(client_id, fleetspeak_enabled=True) # TODO(fleetspeak-team,grr-team): If aff4 isn't reliable enough, we can # catch exceptions from it and forward them to Fleetspeak by failing its # gRPC call. Fleetspeak will then retry with a random, perhaps healthier, # instance of the GRR frontend. with aff4.FACTORY.Create(client_urn, aff4_type=aff4_grr.VFSGRRClient, mode="rw", token=self.token) as client: client.Set(client.Schema.FLEETSPEAK_ENABLED, rdfvalue.RDFBool(True)) index = client_index.CreateClientIndex(token=self.token) index.AddClient(client) if data_store.RelationalDBWriteEnabled(): index = client_index.ClientIndex() index.AddClient(data_migration.ConvertVFSGRRClient(client)) enrollment_session_id = rdfvalue.SessionID(queue=queues.ENROLLMENT, flow_name="Enrol") publish_msg = rdf_flows.GrrMessage( payload=client_urn, session_id=enrollment_session_id, # Fleetspeak ensures authentication. auth_state=rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED, source=enrollment_session_id, priority=rdf_flows.GrrMessage.Priority.MEDIUM_PRIORITY) # Publish the client enrollment message. events.Events.PublishEvent("ClientEnrollment", publish_msg, token=self.token)
def Run(self): # Fix the time to avoid regressions. with test_lib.FakeTime(42): if data_store.RelationalDBReadEnabled(): client_obj = self.SetupTestClientObject(0) client_id = client_obj.client_id else: client_urn = self.SetupClient(0, add_cert=False) client_id = client_urn.Basename() self.Check( "SearchClients", args=client_plugin.ApiSearchClientsArgs(query=client_id))
def Evaluate(self, client_obj): if data_store.RelationalDBReadEnabled(): value = client_obj.last_snapshot.knowledge_base.os else: value = client_obj.Get(client_obj.Schema.SYSTEM) if not value: return False value = utils.SmartStr(value) return ((self.os_windows and value.startswith("Windows")) or (self.os_linux and value.startswith("Linux")) or (self.os_darwin and value.startswith("Darwin")))
def Handle(self, args, token=None): if not args.timestamp: age = rdfvalue.RDFDatetime.Now() else: age = rdfvalue.RDFDatetime(args.timestamp) if data_store.RelationalDBReadEnabled(): info = data_store.REL_DB.ReadClientFullInfo(str(args.client_id)) return ApiClient().InitFromClientInfo(info) else: client = aff4.FACTORY.Open(args.client_id.ToClientURN(), aff4_type=aff4_grr.VFSGRRClient, age=age, token=token) return ApiClient().InitFromAff4Object(client)
def ClientFixture(client_id, token=None, age=None): """Creates a client fixture with a predefined VFS tree.""" if hasattr(client_id, "Basename"): client_id = client_id.Basename() LegacyClientFixture(client_id, age=age, token=token) if not data_store.RelationalDBReadEnabled(): return data_migration.Migrate(thread_count=1) db_client_snapshot = data_store.REL_DB.ReadClientSnapshot(client_id) client_index.ClientIndex().AddClient(db_client_snapshot)
def IsFleetspeakEnabledClient(grr_id, token): if grr_id is None: return False if data_store.RelationalDBReadEnabled(): md = data_store.REL_DB.ReadClientMetadata(grr_id) if not md: return False return md.fleetspeak_enabled else: with aff4.FACTORY.Create(rdf_client.ClientURN(grr_id), aff4.AFF4Object.classes["VFSGRRClient"], mode="r", token=token) as client: return bool(client.Get(client.Schema.FLEETSPEAK_ENABLED))
def Run(self): # Fix the time to avoid regressions. with test_lib.FakeTime(42): if data_store.RelationalDBReadEnabled(): client_obj = self.SetupTestClientObject(0, memory_size=4294967296, add_cert=False) client_id = client_obj.client_id else: client_urn = self.SetupClient(0, memory_size=4294967296, add_cert=False) client_id = client_urn.Basename() self.Check("GetClient", args=client_plugin.ApiGetClientArgs(client_id=client_id))
def ProcessMessage(self, message=None, event=None): """Processes this event.""" _ = event client_id = message.source message = message.payload.string logging.info(self.logline, client_id, message) # Write crash data. if data_store.RelationalDBReadEnabled(): client = data_store.REL_DB.ReadClientSnapshot(client_id) client_info = client.startup_info.client_info else: client = aff4.FACTORY.Open(client_id, token=self.token) client_info = client.Get(client.Schema.CLIENT_INFO) crash_details = rdf_client.ClientCrash( client_id=client_id, client_info=client_info, crash_message=message, timestamp=long(time.time() * 1e6), crash_type=self.well_known_session_id) self.WriteAllCrashDetails(client_id, crash_details) # Also send email. if config.CONFIG["Monitoring.alert_email"]: client = aff4.FACTORY.Open(client_id, token=self.token) hostname = client.Get(client.Schema.HOSTNAME) url = "/clients/%s" % client_id.Basename() body = self.__class__.mail_template.render( client_id=client_id, admin_ui=config.CONFIG["AdminUI.url"], hostname=utils.SmartUnicode(hostname), signature=config.CONFIG["Email.signature"], url=url, message=utils.SmartUnicode(message)) email_alerts.EMAIL_ALERTER.SendEmail( config.CONFIG["Monitoring.alert_email"], "GRR server", self.subject % client_id, utils.SmartStr(body), is_html=True)
def Handle(self, args, token=None): if data_store.RelationalDBReadEnabled(): # TODO(amoser): Again, this is rather inefficient,if we moved # this call to the datastore we could make it much # faster. However, there is a chance that this will not be # needed anymore once we use the relational db everywhere, let's # decide later. history = data_store.REL_DB.ReadClientHistory(str(args.client_id)) times = [h.timestamp for h in history] else: fd = aff4.FACTORY.Open(args.client_id.ToClientURN(), mode="r", age=aff4.ALL_TIMES, token=token) type_values = list(fd.GetValuesForAttribute(fd.Schema.TYPE)) times = sorted([t.age for t in type_values], reverse=True) return ApiGetClientVersionTimesResult(times=times)
def CreateClientWithVolumes(self, available=50): volume = rdf_client.Volume( total_allocation_units=100, actual_available_allocation_units=available) client_id = self.SetupClient(0) if data_store.RelationalDBReadEnabled(): snapshot = data_store.REL_DB.ReadClientSnapshot(client_id.Basename()) snapshot.volumes = [volume] data_store.REL_DB.WriteClientSnapshot(snapshot) else: with aff4.FACTORY.Open( client_id, mode="rw", token=self.token) as client_obj: client_obj.Set(client_obj.Schema.VOLUMES([volume])) self.RequestAndGrantClientApproval(client_id) client_obj = aff4.FACTORY.Open(client_id, token=self.token) return client_id
def Evaluate(self, client_obj): if data_store.RelationalDBReadEnabled(): value = self._ResolveField(self.field, client_obj) else: value = self._ResolveFieldAFF4(self.field, client_obj) if value is None: return False op = self.operator if op == ForemanIntegerClientRule.Operator.LESS_THAN: return value < self.value elif op == ForemanIntegerClientRule.Operator.GREATER_THAN: return value > self.value elif op == ForemanIntegerClientRule.Operator.EQUAL: return value == self.value else: # Unknown operator. raise ValueError("Unknown operator: %d" % op)
def Evaluate(self, client_obj): if self.match_mode == ForemanLabelClientRule.MatchMode.MATCH_ALL: quantifier = all elif self.match_mode == ForemanLabelClientRule.MatchMode.MATCH_ANY: quantifier = any elif self.match_mode == ForemanLabelClientRule.MatchMode.DOES_NOT_MATCH_ALL: quantifier = lambda iterable: not all(iterable) elif self.match_mode == ForemanLabelClientRule.MatchMode.DOES_NOT_MATCH_ANY: quantifier = lambda iterable: not any(iterable) else: raise ValueError("Unexpected match mode value: %s" % self.match_mode) if data_store.RelationalDBReadEnabled(): client_label_names = [label.name for label in client_obj.labels] else: client_label_names = set(client_obj.GetLabelsNames()) return quantifier( (name in client_label_names) for name in self.label_names)
def Run(self): if data_store.RelationalDBReadEnabled(): client = self.SetupTestClientObject(0) client_id = client.client_id client_ids = [rdf_client.ClientURN(client_id)] else: client_ids = self.SetupClients(1) client_id = client_ids[0].Basename() client_mock = flow_test_lib.CrashClientMock( rdf_client.ClientURN(client_id), self.token) with test_lib.FakeTime(42): with self.CreateHunt(description="the hunt") as hunt_obj: hunt_obj.Run() with test_lib.FakeTime(45): self.AssignTasksToClients(client_ids) hunt_test_lib.TestHuntHelperWithMultipleMocks( {client_id: client_mock}, False, self.token) crashes = aff4_grr.VFSGRRClient.CrashCollectionForCID( rdf_client.ClientURN(client_id)) crash = list(crashes)[0] session_id = crash.session_id.Basename() replace = { hunt_obj.urn.Basename(): "H:123456", session_id: "H:11223344" } self.Check( "ListClientCrashes", args=client_plugin.ApiListClientCrashesArgs(client_id=client_id), replace=replace) self.Check("ListClientCrashes", args=client_plugin.ApiListClientCrashesArgs( client_id=client_id, count=1), replace=replace) self.Check("ListClientCrashes", args=client_plugin.ApiListClientCrashesArgs( client_id=client_id, offset=1, count=1), replace=replace)
def __init__(self, certificate, private_key, max_queue_size=50, message_expiry_time=120, max_retransmission_time=10, threadpool_prefix="grr_threadpool"): # Identify ourselves as the server. self.token = access_control.ACLToken(username="******", reason="Implied.") self.token.supervisor = True if data_store.RelationalDBReadEnabled(): self._communicator = RelationalServerCommunicator( certificate=certificate, private_key=private_key) else: self._communicator = ServerCommunicator(certificate=certificate, private_key=private_key, token=self.token) self.receive_thread_pool = {} self.message_expiry_time = message_expiry_time self.max_retransmission_time = max_retransmission_time self.max_queue_size = max_queue_size self.thread_pool = threadpool.ThreadPool.Factory( threadpool_prefix, min_threads=2, max_threads=config.CONFIG["Threadpool.size"]) self.thread_pool.Start() # Well known flows are run on the front end. self.well_known_flows = (flow.WellKnownFlow.GetAllWellKnownFlows( token=self.token)) well_known_flow_names = self.well_known_flows.keys() for well_known_flow in well_known_flow_names: if well_known_flow not in config.CONFIG[ "Frontend.well_known_flows"]: del self.well_known_flows[well_known_flow] self.well_known_flows_blacklist = set( config.CONFIG["Frontend.DEBUG_well_known_flows_blacklist"])
def Run(self): if data_store.RelationalDBReadEnabled(): client_obj = self.SetupTestClientObject(0) client_id = client_obj.client_id else: client_id = self.SetupClient(0).Basename() client_mocks = { client_id: flow_test_lib.CrashClientMock(client_id, self.token) } with test_lib.FakeTime(42): with self.CreateHunt(description="the hunt") as hunt_obj: hunt_obj.Run() with test_lib.FakeTime(45): self.AssignTasksToClients([client_id]) hunt_test_lib.TestHuntHelperWithMultipleMocks( client_mocks, False, self.token) crashes = implementation.GRRHunt.CrashCollectionForHID(hunt_obj.urn) crash = list(crashes)[0] session_id = crash.session_id.Basename() replace = { hunt_obj.urn.Basename(): "H:123456", session_id: "H:11223344" } self.Check("ListHuntCrashes", args=hunt_plugin.ApiListHuntCrashesArgs( hunt_id=hunt_obj.urn.Basename()), replace=replace) self.Check("ListHuntCrashes", args=hunt_plugin.ApiListHuntCrashesArgs( hunt_id=hunt_obj.urn.Basename(), count=1), replace=replace) self.Check("ListHuntCrashes", args=hunt_plugin.ApiListHuntCrashesArgs( hunt_id=hunt_obj.urn.Basename(), offset=1, count=1), replace=replace)