def testCronJobRunExpiry(self): job_id = "job1" self.db.WriteCronJob(rdf_cronjobs.CronJob(cron_job_id=job_id)) fake_time = rdfvalue.RDFDatetime.Now() - rdfvalue.Duration("7d") with test_lib.FakeTime(fake_time): run = rdf_cronjobs.CronJobRun(cron_job_id=job_id, run_id="00000000") self.db.WriteCronJobRun(run) with test_lib.FakeTime(fake_time + rdfvalue.Duration("1d")): run = rdf_cronjobs.CronJobRun(cron_job_id=job_id, run_id="00000001") self.db.WriteCronJobRun(run) with test_lib.FakeTime(fake_time + rdfvalue.Duration("2d")): run = rdf_cronjobs.CronJobRun(cron_job_id=job_id, run_id="00000002") self.db.WriteCronJobRun(run) self.assertLen(self.db.ReadCronJobRuns(job_id), 3) cutoff = fake_time + rdfvalue.Duration("1h") self.db.DeleteOldCronJobRuns(cutoff) jobs = self.db.ReadCronJobRuns(job_id) self.assertLen(jobs, 2) for job in jobs: self.assertGreater(job.timestamp, cutoff) cutoff = fake_time + rdfvalue.Duration("1d") + rdfvalue.Duration("1h") self.db.DeleteOldCronJobRuns(cutoff) jobs = self.db.ReadCronJobRuns(job_id) self.assertLen(jobs, 1) for job in jobs: self.assertGreater(job.timestamp, cutoff)
def testCronJobRegistryInstantiation(self): for job_cls in cronjobs.CronJobRegistry.CRON_REGISTRY.values(): job = rdf_cronjobs.CronJob(cron_job_id="foobar") job_run = rdf_cronjobs.CronJobRun(cron_job_id="foobar", status="RUNNING") job_cls(job_run, job) # Should not fail.
def testInitFromCronObject(self): state = rdf_protodict.AttributedDict() state["quux"] = "norf" state["thud"] = "blargh" cron_job = rdf_cronjobs.CronJob() cron_job.cron_job_id = "foo" cron_job.current_run_id = "bar" cron_job.last_run_time = self._DATETIME("2001-01-01") cron_job.last_run_status = "FINISHED" cron_job.frequency = rdfvalue.Duration.From(1, rdfvalue.DAYS) cron_job.lifetime = rdfvalue.Duration.From(30, rdfvalue.DAYS) cron_job.enabled = False cron_job.forced_run_requested = True cron_job.state = state cron_job.description = "testdescription" api_cron_job = cron_plugin.ApiCronJob.InitFromObject(cron_job) self.assertEqual(api_cron_job.cron_job_id, "foo") self.assertEqual(api_cron_job.current_run_id, "bar") self.assertEqual(api_cron_job.description, "testdescription") self.assertEqual(api_cron_job.last_run_time, self._DATETIME("2001-01-01")) self.assertEqual(api_cron_job.last_run_status, "FINISHED") self.assertEqual(api_cron_job.frequency, rdfvalue.Duration.From(1, rdfvalue.DAYS)) self.assertEqual(api_cron_job.lifetime, rdfvalue.Duration.From(30, rdfvalue.DAYS)) self.assertFalse(api_cron_job.enabled) self.assertTrue(api_cron_job.forced_run_requested) api_state_items = {_.key: _.value for _ in api_cron_job.state.items} self.assertEqual(api_state_items, {"quux": "norf", "thud": "blargh"})
def testOSBreakdown(self): """Check that all client stats cron jobs are run.""" run = rdf_cronjobs.CronJobRun() job = rdf_cronjobs.CronJob() system.OSBreakDownCronJob(run, job).Run() self._CheckOSBreakdown()
def CreateJob(self, cron_args=None, job_id=None, disabled=False, token=None): """Creates a cron job that runs given flow with a given frequency. Args: cron_args: A protobuf of type CreateCronJobFlowArgs. job_id: Use this job_id instead of an autogenerated unique name (used for system cron jobs - we want them to have well-defined persistent name). disabled: If True, the job object will be created, but will be disabled. token: Security token used for data store access. Unused. Returns: URN of the cron job created. """ # TODO(amoser): Remove the token from this method once the aff4 # cronjobs are gone. del token if not job_id: uid = utils.PRNG.GetUInt16() job_id = "%s_%s" % (cron_args.flow_runner_args.flow_name, uid) job = rdf_cronjobs.CronJob(job_id=job_id, cron_args=cron_args, disabled=disabled) data_store.REL_DB.WriteCronJob(job) return job_id
def testLastAccessStats(self): """Check that all client stats cron jobs are run.""" run = rdf_cronjobs.CronJobRun() job = rdf_cronjobs.CronJob() system.LastAccessStatsCronJob(run, job).Run() self._CheckLastAccessStats()
def testStatefulSystemCronJobMaintainsState(self): DummyStatefulSystemCronJobRel.VALUES = [] # We need to have a cron job started to have a place to maintain # state. cron_manager = cronjobs.CronManager() args = rdf_cronjobs.CronJobAction( action_type=rdf_cronjobs.CronJobAction.ActionType.SYSTEM_CRON_ACTION, system_cron_action=rdf_cronjobs.SystemCronAction( job_class_name="DummyStatefulSystemCronJobRel")) job = rdf_cronjobs.CronJob( cron_job_id="test_cron", args=args, enabled=True, frequency=rdfvalue.Duration("2h"), lifetime=rdfvalue.Duration("1h"), allow_overruns=False) data_store.REL_DB.WriteCronJob(job) fake_time = rdfvalue.RDFDatetime.Now() for i in range(3): with test_lib.FakeTime(fake_time + rdfvalue.Duration("%dh" % (3 * i))): cron_manager.RunOnce() cron_manager._GetThreadPool().Join() runs = cron_manager.ReadJobRuns("test_cron") self.assertLen(runs, i + 1) for run in runs: self.assertEqual(run.status, "FINISHED") self.assertListEqual(DummyStatefulSystemCronJobRel.VALUES, [0, 1, 2])
def testGRRVersionBreakDown(self): """Check that all client stats cron jobs are run.""" cron_run = rdf_cronjobs.CronJobRun() job_data = rdf_cronjobs.CronJob() cron = system.GRRVersionBreakDownCronJob(cron_run, job_data) cron.Run() self._CheckGRRVersionBreakDown()
def testPurgeServerStats(self): if not data_store.RelationalDBReadEnabled(): self.skipTest("Test is only for the relational DB. Skipping...") fake_stats_collector = default_stats_collector.DefaultStatsCollector([ stats_utils.CreateCounterMetadata("fake_counter"), ]) timestamp0 = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(1) timestamp1 = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(2) timestamp2 = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(3600) timestamp3 = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(4800) config_overrides = { "Database.useForReads.stats": True, "StatsStore.stats_ttl_hours": 1 } with test_lib.ConfigOverrider(config_overrides), \ stats_test_utils.FakeStatsContext(fake_stats_collector), \ mock.patch.object(system, "_STATS_DELETION_BATCH_SIZE", 1): with test_lib.FakeTime(rdfvalue.RDFDatetime(timestamp0)): stats_store._WriteStats(process_id="fake_process_id") with test_lib.FakeTime(rdfvalue.RDFDatetime(timestamp1)): stats_collector_instance.Get().IncrementCounter("fake_counter") stats_store._WriteStats(process_id="fake_process_id") expected_results = { "fake_process_id": { "fake_counter": [(0, timestamp0), (1, timestamp1)] } } self.assertDictEqual( stats_store.ReadStats("f", "fake_counter"), expected_results) with test_lib.FakeTime(timestamp2): stats_store._WriteStats(process_id="fake_process_id") expected_results = { "fake_process_id": { "fake_counter": [(0, timestamp0), (1, timestamp1), (1, timestamp2)] } } self.assertDictEqual( stats_store.ReadStats("f", "fake_counter"), expected_results) with test_lib.FakeTime(timestamp3): cron = system.PurgeServerStatsCronJob( rdf_cronjobs.CronJobRun(), rdf_cronjobs.CronJob()) cron.Run() # timestamp0 and timestamp1 are older than 1h, so they should get # deleted. expected_results = { "fake_process_id": { "fake_counter": [(1, timestamp2)] } } self.assertDictEqual( stats_store.ReadStats("f", "fake_counter"), expected_results) self.assertIn("Deleted 2 stats entries.", cron.run_state.log_message)
def testGRRVersionBreakDown(self): """Check that all client stats cron jobs are run.""" cron_run = rdf_cronjobs.CronJobRun() job_data = rdf_cronjobs.CronJob() cron = system.GRRVersionBreakDownCronJob(cron_run, job_data) cron.Run() self._CheckGRRVersionBreakDown() self.assertEqual(cron.run_state.log_message, "Processed 22 clients.")
def testNonExistingSystemCronJobDoesNotPreventOtherCronJobsFromRunning( self): # Have a fake non-existing cron job. We assume that cron jobs are going # to be processed in alphabetical order, according to their cron job ids. args = rdf_cronjobs.CronJobAction( action_type=rdf_cronjobs.CronJobAction.ActionType. SYSTEM_CRON_ACTION, system_cron_action=rdf_cronjobs.SystemCronAction( job_class_name="__AbstractFakeCronJob__")) job = rdf_cronjobs.CronJob( cron_job_id="cron_1", args=args, enabled=True, frequency=rdfvalue.Duration.From(2, rdfvalue.HOURS), lifetime=rdfvalue.Duration.From(1, rdfvalue.HOURS), allow_overruns=False) data_store.REL_DB.WriteCronJob(job) # Have a proper cron job. cron_manager = cronjobs.CronManager() args = rdf_cronjobs.CronJobAction( action_type=rdf_cronjobs.CronJobAction.ActionType. SYSTEM_CRON_ACTION, system_cron_action=rdf_cronjobs.SystemCronAction( job_class_name="DummyStatefulSystemCronJobRel")) job = rdf_cronjobs.CronJob( cron_job_id="cron_2", args=args, enabled=True, frequency=rdfvalue.Duration.From(2, rdfvalue.HOURS), lifetime=rdfvalue.Duration.From(1, rdfvalue.HOURS), allow_overruns=False) data_store.REL_DB.WriteCronJob(job) with self.assertRaises(cronjobs.OneOrMoreCronJobsFailedError): cron_manager.RunOnce() cron_manager._GetThreadPool().Join() self.assertEmpty(cron_manager.ReadJobRuns("cron_1")) self.assertLen(cron_manager.ReadJobRuns("cron_2"), 1)
def testCronJobDeletion(self): job_id = "job0" self.db.WriteCronJob(rdf_cronjobs.CronJob(cron_job_id=job_id)) job_run0 = rdf_cronjobs.CronJobRun(cron_job_id=job_id, run_id="a") job_run1 = rdf_cronjobs.CronJobRun(cron_job_id=job_id, run_id="b") self.db.WriteCronJobRun(job_run0) self.db.WriteCronJobRun(job_run1) self.assertLen(self.db.ReadCronJobRuns(job_id), 2) self.db.DeleteCronJob(job_id) with self.assertRaises(db.UnknownCronJobError): self.db.ReadCronJob(job_id) self.assertEmpty(self.db.ReadCronJobRuns(job_id))
def testCronJobRegistryInstantiation(self): # We import the `server_startup` module to ensure that all cron jobs classes # that are really used on the server are imported and populate the registry. # pylint: disable=unused-variable, g-import-not-at-top from grr_response_server import server_startup # pylint: enable=unused-variable, g-import-not-at-top for job_cls in cronjobs.CronJobRegistry.CRON_REGISTRY.values(): job = rdf_cronjobs.CronJob(cron_job_id="foobar") job_run = rdf_cronjobs.CronJobRun(cron_job_id="foobar", status="RUNNING") job_cls(job_run, job) # Should not fail.
def testPurgeServerStats(self): if not data_store.RelationalDBReadEnabled(): self.skipTest("Test is only for the relational DB. Skipping...") fake_stats_collector = default_stats_collector.DefaultStatsCollector([ stats_utils.CreateCounterMetadata("fake_counter"), ]) timestamp1 = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(1) timestamp2 = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(3600) timestamp3 = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(4800) config_overrides = { "Database.useForReads.stats": True, "StatsStore.stats_ttl_hours": 1 } with test_lib.ConfigOverrider(config_overrides): with stats_test_utils.FakeStatsContext(fake_stats_collector): with test_lib.FakeTime(rdfvalue.RDFDatetime(timestamp1)): stats_collector_instance.Get().IncrementCounter( "fake_counter") stats_store._WriteStats(process_id="fake_process_id") expected_results = { "fake_process_id": { "fake_counter": [(1, timestamp1)] } } self.assertDictEqual( stats_store.ReadStats("f", "fake_counter"), expected_results) with test_lib.FakeTime(timestamp2): stats_store._WriteStats(process_id="fake_process_id") expected_results = { "fake_process_id": { "fake_counter": [(1, timestamp1), (1, timestamp2)] } } self.assertDictEqual( stats_store.ReadStats("f", "fake_counter"), expected_results) with test_lib.FakeTime(timestamp3): system.PurgeServerStatsCronJob( rdf_cronjobs.CronJobRun(), rdf_cronjobs.CronJob()).Run() # timestamp1 is older than 1h, so it should get deleted. expected_results = { "fake_process_id": { "fake_counter": [(1, timestamp2)] } } self.assertDictEqual( stats_store.ReadStats("f", "fake_counter"), expected_results)
def ProcessHuntOutputPlugins(self): if data_store.RelationalDBFlowsEnabled(): job = rdf_cronjobs.CronJob( cron_job_id="some/id", lifetime=rdfvalue.Duration("1h")) run_state = rdf_cronjobs.CronJobRun( cron_job_id="some/id", status="RUNNING", started_at=rdfvalue.RDFDatetime.Now()) process_results.ProcessHuntResultCollectionsCronJob(run_state, job).Run() else: flow_urn = flow.StartAFF4Flow( flow_name=process_results.ProcessHuntResultCollectionsCronFlow .__name__, token=self.token) flow_test_lib.TestFlowHelper(flow_urn, token=self.token) return flow_urn
def CreateJob(self, cron_args=None, job_id=None, enabled=True, token=None): """Creates a cron job that runs given flow with a given frequency. Args: cron_args: A protobuf of type rdf_cronjobs.CreateCronJobArgs. job_id: Use this job_id instead of an autogenerated unique name (used for system cron jobs - we want them to have well-defined persistent name). enabled: If False, the job object will be created, but will be disabled. token: Security token used for data store access. Unused. Returns: URN of the cron job created. Raises: ValueError: This function expects an arg protobuf that starts a CreateAndRunGenericHuntFlow flow. If the args specify something else, ValueError is raised. """ # TODO(amoser): Remove the token from this method once the aff4 # cronjobs are gone. del token if not job_id: uid = utils.PRNG.GetUInt16() job_id = "%s_%s" % (cron_args.flow_name, uid) args = rdf_cronjobs.CronJobAction( action_type=rdf_cronjobs.CronJobAction.ActionType.HUNT_CRON_ACTION, hunt_cron_action=rdf_cronjobs.HuntCronAction( flow_name=cron_args.flow_name, flow_args=cron_args.flow_args, hunt_runner_args=cron_args.hunt_runner_args)) job = rdf_cronjobs.CronJob( cron_job_id=job_id, frequency=cron_args.frequency, lifetime=cron_args.lifetime, allow_overruns=cron_args.allow_overruns, args=args, enabled=enabled) data_store.REL_DB.WriteCronJob(job) return job_id
def testCronJobRunExpiry(self): job_id = "job1" self.db.WriteCronJob(rdf_cronjobs.CronJob(cron_job_id=job_id)) fake_time = rdfvalue.RDFDatetime.Now() - rdfvalue.Duration.From( 7, rdfvalue.DAYS) with test_lib.FakeTime(fake_time): run = rdf_cronjobs.CronJobRun(cron_job_id=job_id, run_id="00000000", started_at=fake_time) self.db.WriteCronJobRun(run) fake_time_one_day_later = fake_time + rdfvalue.Duration.From( 1, rdfvalue.DAYS) with test_lib.FakeTime(fake_time_one_day_later): run = rdf_cronjobs.CronJobRun(cron_job_id=job_id, run_id="00000001", started_at=fake_time_one_day_later) self.db.WriteCronJobRun(run) fake_time_two_days_later = fake_time + rdfvalue.Duration.From( 2, rdfvalue.DAYS) with test_lib.FakeTime(fake_time_two_days_later): run = rdf_cronjobs.CronJobRun(cron_job_id=job_id, run_id="00000002", started_at=fake_time_two_days_later) self.db.WriteCronJobRun(run) self.assertLen(self.db.ReadCronJobRuns(job_id), 3) cutoff = fake_time + rdfvalue.Duration.From(1, rdfvalue.HOURS) self.db.DeleteOldCronJobRuns(cutoff) jobs = self.db.ReadCronJobRuns(job_id) self.assertLen(jobs, 2) for job in jobs: self.assertGreater(job.timestamp, cutoff) cutoff = fake_time + rdfvalue.Duration.From(25, rdfvalue.HOURS) self.db.DeleteOldCronJobRuns(cutoff) jobs = self.db.ReadCronJobRuns(job_id) self.assertLen(jobs, 1) for job in jobs: self.assertGreater(job.timestamp, cutoff)
def CreateJob(self, cron_args=None, job_id=None, enabled=True): """Creates a cron job that runs given flow with a given frequency. Args: cron_args: A protobuf of type rdf_cronjobs.CreateCronJobArgs. job_id: Use this job_id instead of an autogenerated unique name (used for system cron jobs - we want them to have well-defined persistent name). enabled: If False, the job object will be created, but will be disabled. Returns: URN of the cron job created. Raises: ValueError: This function expects an arg protobuf that starts a CreateAndRunGenericHuntFlow flow. If the args specify something else, ValueError is raised. """ if not cron_args.flow_name: raise ValueError("Unspecified flow name") if not job_id: # TODO: UInt16 is too small for randomly generated IDs. uid = random.UInt16() job_id = "%s_%s" % (cron_args.flow_name, uid) args = rdf_cronjobs.CronJobAction( action_type=rdf_cronjobs.CronJobAction.ActionType.HUNT_CRON_ACTION, hunt_cron_action=rdf_cronjobs.HuntCronAction( flow_name=cron_args.flow_name, flow_args=cron_args.flow_args, hunt_runner_args=cron_args.hunt_runner_args)) job = rdf_cronjobs.CronJob( cron_job_id=job_id, description=cron_args.description, frequency=cron_args.frequency, lifetime=cron_args.lifetime, allow_overruns=cron_args.allow_overruns, args=args, enabled=enabled) data_store.REL_DB.WriteCronJob(job) return job_id
def testCronJobRunsOverwrite(self): self.db.WriteCronJob(rdf_cronjobs.CronJob(cron_job_id="job")) run = rdf_cronjobs.CronJobRun(cron_job_id="job", run_id="abcd1234") self.db.WriteCronJobRun(run) original_ts = self.db.ReadCronJobRun("job", "abcd1234").timestamp now = rdfvalue.RDFDatetime.Now() run.backtrace = "error" run.log_message = "log" run.started_at = now - rdfvalue.Duration("5s") run.finished_at = now self.db.WriteCronJobRun(run) read = self.db.ReadCronJobRun("job", "abcd1234") self.assertEqual(read.backtrace, run.backtrace) self.assertEqual(read.log_message, run.log_message) self.assertEqual(read.started_at, run.started_at) self.assertEqual(read.finished_at, run.finished_at) self.assertNotEqual(read.timestamp, original_ts)
def testHandler(self): now = rdfvalue.RDFDatetime.Now() with test_lib.FakeTime(now): job = rdf_cronjobs.CronJob( cron_job_id="job_id", enabled=True, last_run_status="FINISHED", frequency=rdfvalue.Duration("7d"), lifetime=rdfvalue.Duration("1h"), allow_overruns=True) data_store.REL_DB.WriteCronJob(job) state = rdf_protodict.AttributedDict() state["item"] = "key" data_store.REL_DB.UpdateCronJob( job.cron_job_id, current_run_id="ABCD1234", state=state, forced_run_requested=True) args = cron_plugin.ApiGetCronJobArgs(cron_job_id=job.cron_job_id) result = self.handler.Handle(args) self.assertEqual(result.cron_job_id, job.cron_job_id) # TODO(amoser): The aff4 implementation does not store the create time so we # can't return it yet. # self.assertEqual(result.created_at, now) self.assertEqual(result.enabled, job.enabled) self.assertEqual(result.current_run_id, "ABCD1234") self.assertEqual(result.forced_run_requested, True) self.assertEqual(result.frequency, job.frequency) self.assertEqual(result.is_failing, False) self.assertEqual(result.last_run_status, job.last_run_status) self.assertEqual(result.lifetime, job.lifetime) state_entries = list(result.state.items) self.assertEqual(len(state_entries), 1) state_entry = state_entries[0] self.assertEqual(state_entry.key, "item") self.assertEqual(state_entry.value, "key")
def testCronJobRuns(self): with self.assertRaises(db.UnknownCronJobError): self.db.WriteCronJobRun( rdf_cronjobs.CronJobRun(cron_job_id="job1", run_id="00000000")) now = rdfvalue.RDFDatetime.Now() with test_lib.FakeTime(now): for j in range(1, 3): self.db.WriteCronJob( rdf_cronjobs.CronJob(cron_job_id="job%d" % j)) for r in range(1, 3): run = rdf_cronjobs.CronJobRun(cron_job_id="job%d" % j, run_id="abcd123%d" % r) self.db.WriteCronJobRun(run) for j in range(1, 3): job_id = "job%d" % j jobs = self.db.ReadCronJobRuns(job_id) self.assertLen(jobs, 2) for job in jobs: self.assertEqual(job.cron_job_id, job_id) self.assertEqual(job.timestamp, now) job = self.db.ReadCronJobRun("job1", "abcd1231") self.assertEqual(job.cron_job_id, "job1") self.assertEqual(job.run_id, "abcd1231") self.assertEqual(job.timestamp, now) with self.assertRaises(ValueError): self.db.ReadCronJobRun(job_id, "invalid_id") with self.assertRaises(db.UnknownCronJobRunError): self.db.ReadCronJobRun(job_id, "abcd1234") with self.assertRaises(db.UnknownCronJobRunError): self.db.ReadCronJobRun("doesntexist", "abcd1231") self.assertEqual(self.db.ReadCronJobRuns("doesntexist"), [])
def ScheduleSystemCronJobs(names=None): """Schedules all system cron jobs.""" errors = [] disabled_classes = config.CONFIG["Cron.disabled_cron_jobs"] for name in disabled_classes: try: cls = registry.SystemCronJobRegistry.CronJobClassByName(name) except ValueError: errors.append("Cron job not found: %s." % name) continue if names is None: names = iterkeys(registry.SystemCronJobRegistry.SYSTEM_CRON_REGISTRY) for name in names: cls = registry.SystemCronJobRegistry.CronJobClassByName(name) enabled = cls.enabled and name not in disabled_classes system = rdf_cronjobs.CronJobAction.ActionType.SYSTEM_CRON_ACTION args = rdf_cronjobs.CronJobAction( action_type=system, system_cron_action=rdf_cronjobs.SystemCronAction( job_class_name=name)) job = rdf_cronjobs.CronJob(cron_job_id=name, args=args, enabled=enabled, frequency=cls.frequency, lifetime=cls.lifetime, allow_overruns=cls.allow_overruns) data_store.REL_DB.WriteCronJob(job) if errors: raise ValueError("Error(s) while parsing Cron.disabled_cron_jobs: %s" % errors)
def _RunPurgeClientStats(self): run = rdf_cronjobs.CronJobRun() job = rdf_cronjobs.CronJob() system.PurgeClientStatsCronJob(run, job).Run()
def _CreateCronJob(self): return rdf_cronjobs.CronJob(cron_job_id="job_%s" % utils.PRNG.GetUInt16(), enabled=True)
def testCronJob(self, fs_conn_mock): if not data_store.RelationalDBReadEnabled(): self.skipTest("Test is only for the relational DB. Skipping...") client_id1 = "C.0000000000000001" client_id2 = "C.0000000000000002" client_id3 = "C.0000000000000003" client_id4 = "C.0000000000000004" client_id5 = "C.0000000000000005" client_id6 = "C.0000000000000006" client_id7 = "C.0000000000000007" data_store.REL_DB.WriteClientMetadata(client_id1, fleetspeak_enabled=False) data_store.REL_DB.WriteClientMetadata(client_id2, fleetspeak_enabled=True) data_store.REL_DB.WriteClientMetadata( client_id3, last_ping=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(3), fleetspeak_enabled=True) data_store.REL_DB.WriteClientMetadata( client_id4, last_ping=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(41), fleetspeak_enabled=True) data_store.REL_DB.WriteClientMetadata( client_id5, last_ping=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(5), fleetspeak_enabled=True) data_store.REL_DB.WriteClientMetadata( client_id6, last_ping=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(61), fleetspeak_enabled=True) data_store.REL_DB.WriteClientMetadata( client_id7, last_ping=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(68), fleetspeak_enabled=True) fs_enabled_ids = [ client_id2, client_id3, client_id4, client_id5, client_id6, client_id7 ] fs_clients = {} for i, client_id in enumerate(fs_enabled_ids): client_number = i + 2 fs_client_id = fleetspeak_utils.GRRIDToFleetspeakID(client_id) fs_client = admin_pb2.Client(client_id=fs_client_id) fs_client.last_contact_time.FromSeconds(client_number * 10) fs_clients[fs_client_id] = fs_client def FakeListClients(list_request): clients = [] for fs_client_id in list_request.client_ids: clients.append(fs_clients[fs_client_id]) return admin_pb2.ListClientsResponse(clients=clients) fs_conn_mock.outgoing.ListClients = FakeListClients cron_run = rdf_cronjobs.CronJobRun() job_data = rdf_cronjobs.CronJob() cron = system.UpdateFSLastPingTimestamps(cron_run, job_data) with test_lib.FakeTime( rdfvalue.RDFDatetime.FromSecondsSinceEpoch(100)): with test_lib.ConfigOverrider({ "Server.fleetspeak_last_ping_threshold": "35s", "Server.fleetspeak_list_clients_batch_size": 2, }): cron.Run() actual_timestamps = data_store.REL_DB.ReadClientLastPings() expected_timestamps = { # Skipped because not a Fleetspeak client. client_id1: None, client_id2: rdfvalue.RDFDatetime.FromSecondsSinceEpoch(20), client_id3: rdfvalue.RDFDatetime.FromSecondsSinceEpoch(30), # Skipped because FS timestamp is old. client_id4: rdfvalue.RDFDatetime.FromSecondsSinceEpoch(41), client_id5: rdfvalue.RDFDatetime.FromSecondsSinceEpoch(50), # Skipped because FS timestamp is old. client_id6: rdfvalue.RDFDatetime.FromSecondsSinceEpoch(61), # Skipped because existing GRR timestamp is too recent. client_id7: rdfvalue.RDFDatetime.FromSecondsSinceEpoch(68), } self.assertEqual(actual_timestamps, expected_timestamps) self.assertMultiLineEqual(cron._log_messages.popleft(), "Updated timestamps for 3 clients.")
def _CreateCronJob(self): return rdf_cronjobs.CronJob(cron_job_id="job_%s" % random.UInt16(), enabled=True)
def _RunCleanup(self): run = rdf_cronjobs.CronJobRun() job = rdf_cronjobs.CronJob() self.cleaner_job = data_retention.CleanInactiveClientsCronJob(run, job) self.cleaner_job.Run()