def testWellKnownFlowResponsesAreProcessedOnlyOnce(self): worker_obj = worker.GRRWorker(token=self.token) # Send a message to a WellKnownFlow - ClientStatsAuto. client_id = rdf_client.ClientURN("C.1100110011001100") self.SendResponse( rdfvalue.SessionID(queue=queues.STATS, flow_name="Stats"), data=rdf_client.ClientStats(RSS_size=1234), client_id=client_id, well_known=True) # Process all messages worker_obj.RunOnce() worker_obj.thread_pool.Join() client = aff4.FACTORY.Open(client_id.Add("stats"), token=self.token) stats = client.Get(client.Schema.STATS) self.assertEqual(stats.RSS_size, 1234) aff4.FACTORY.Delete(client_id.Add("stats"), token=self.token) # Process all messages once again - there should be no actual processing # done, as all the responses were processed last time. worker_obj.RunOnce() worker_obj.thread_pool.Join() # Check that stats haven't changed as no new responses were processed. client = aff4.FACTORY.Open(client_id.Add("stats"), token=self.token) self.assertIsNone(client.Get(client.Schema.STATS))
def testPurgeClientStats(self): max_age = system.PurgeClientStats.MAX_AGE for t in [1 * max_age, 1.5 * max_age, 2 * max_age]: with test_lib.FakeTime(t): urn = self.client_id.Add("stats") stats_fd = aff4.FACTORY.Create(urn, aff4_stats.ClientStats, token=self.token, mode="rw") st = client_rdf.ClientStats(RSS_size=int(t)) stats_fd.AddAttribute(stats_fd.Schema.STATS(st)) stats_fd.Close() stat_obj = aff4.FACTORY.Open(urn, age=aff4.ALL_TIMES, token=self.token) stat_entries = list( stat_obj.GetValuesForAttribute(stat_obj.Schema.STATS)) self.assertEqual(len(stat_entries), 3) self.assertTrue(max_age in [e.RSS_size for e in stat_entries]) with test_lib.FakeTime(2.5 * max_age): for _ in test_lib.TestFlowHelper("PurgeClientStats", None, client_id=self.client_id, token=self.token): pass stat_obj = aff4.FACTORY.Open(urn, age=aff4.ALL_TIMES, token=self.token) stat_entries = list( stat_obj.GetValuesForAttribute(stat_obj.Schema.STATS)) self.assertEqual(len(stat_entries), 1) self.assertTrue(max_age not in [e.RSS_size for e in stat_entries])
def Run(self, arg): """Returns the client stats.""" if arg is None: arg = rdf_client.GetClientStatsRequest() proc = psutil.Process(os.getpid()) meminfo = proc.memory_info() response = rdf_client.ClientStats( RSS_size=meminfo.rss, VMS_size=meminfo.vms, memory_percent=proc.memory_percent(), bytes_received=stats.STATS.GetMetricValue( "grr_client_received_bytes"), bytes_sent=stats.STATS.GetMetricValue("grr_client_sent_bytes"), create_time=long(proc.create_time() * 1e6), boot_time=long(psutil.boot_time() * 1e6)) samples = self.grr_worker.stats_collector.cpu_samples for (timestamp, user, system, percent) in samples: if arg.start_time < timestamp < arg.end_time: sample = rdf_client.CpuSample(timestamp=timestamp, user_cpu_time=user, system_cpu_time=system, cpu_percent=percent) response.cpu_samples.Append(sample) samples = self.grr_worker.stats_collector.io_samples for (timestamp, read_bytes, write_bytes) in samples: if arg.start_time < timestamp < arg.end_time: sample = rdf_client.IOSample(timestamp=timestamp, read_bytes=read_bytes, write_bytes=write_bytes) response.io_samples.Append(sample) self.Send(response)
def GetClientStats(self, _): """Fake get client stats method.""" response = rdf_client.ClientStats() for i in range(12): sample = rdf_client.CpuSample(timestamp=int(i * 10 * 1e6), user_cpu_time=10 + i, system_cpu_time=20 + i, cpu_percent=10 + i) response.cpu_samples.Append(sample) sample = rdf_client.IOSample(timestamp=int(i * 10 * 1e6), read_bytes=10 + i, write_bytes=10 + i) response.io_samples.Append(sample) return [response]
def FillClientStats( client_id=rdf_client.ClientURN("C.0000000000000001"), token=None): for minute in range(6): stats = rdf_client.ClientStats() for i in range(minute * 60, (minute + 1) * 60): sample = rdf_client.CpuSample(timestamp=int(i * 10 * 1e6), user_cpu_time=10 + i, system_cpu_time=20 + i, cpu_percent=10 + i) stats.cpu_samples.Append(sample) sample = rdf_client.IOSample(timestamp=int(i * 10 * 1e6), read_bytes=10 + i, write_bytes=10 + i * 2) stats.io_samples.Append(sample) message = rdf_flows.GrrMessage(source=client_id, args=stats.SerializeToString()) flow.WellKnownFlow.GetAllWellKnownFlows( token=token)["Stats"].ProcessMessage(message)
def testProcessMessagesWellKnown(self): worker_obj = worker.GRRWorker(token=self.token) # Send a message to a WellKnownFlow - ClientStatsAuto. session_id = administrative.GetClientStatsAuto.well_known_session_id client_id = rdf_client.ClientURN("C.1100110011001100") self.SendResponse(session_id, data=rdf_client.ClientStats(RSS_size=1234), client_id=client_id, well_known=True) # Process all messages worker_obj.RunOnce() worker_obj.thread_pool.Join() client = aff4.FACTORY.Open(client_id.Add("stats"), token=self.token) stats = client.Get(client.Schema.STATS) self.assertEqual(stats.RSS_size, 1234) # Make sure no notifications have been sent. user = aff4.FACTORY.Open("aff4:/users/%s" % self.token.username, token=self.token) notifications = user.Get(user.Schema.PENDING_NOTIFICATIONS) self.assertIsNone(notifications)
def FillClientStats(self, client_id): with aff4.FACTORY.Create(client_id.Add("stats"), aff4_type=aff4_stats.ClientStats, token=self.token, mode="rw") as stats_fd: for i in range(6): with test_lib.FakeTime((i + 1) * 10): timestamp = int((i + 1) * 10 * 1e6) st = rdf_client.ClientStats() sample = rdf_client.CpuSample(timestamp=timestamp, user_cpu_time=10 + i, system_cpu_time=20 + i, cpu_percent=10 + i) st.cpu_samples.Append(sample) sample = rdf_client.IOSample(timestamp=timestamp, read_bytes=10 + i, write_bytes=10 + i * 2) st.io_samples.Append(sample) stats_fd.AddAttribute(stats_fd.Schema.STATS(st))
def Run(self, arg): """Returns the client stats.""" if arg is None: arg = rdf_client.GetClientStatsRequest() proc = psutil.Process(os.getpid()) meminfo = proc.memory_info() boot_time = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(psutil.boot_time()) create_time = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(proc.create_time()) response = rdf_client.ClientStats( RSS_size=meminfo.rss, VMS_size=meminfo.vms, memory_percent=proc.memory_percent(), bytes_received=stats.STATS.GetMetricValue("grr_client_received_bytes"), bytes_sent=stats.STATS.GetMetricValue("grr_client_sent_bytes"), create_time=create_time, boot_time=boot_time) response.cpu_samples = self.grr_worker.stats_collector.CpuSamplesBetween( start_time=arg.start_time, end_time=arg.end_time) response.io_samples = self.grr_worker.stats_collector.IOSamplesBetween( start_time=arg.start_time, end_time=arg.end_time) self.Send(response)
def ProcessMessage(self, message): """Processes a stats response from the client.""" client_stats = rdf_client.ClientStats(message.payload) self.ProcessResponse(message.source, client_stats)
def Layout(self, request, response): self.client_id = rdf_client.ClientURN(request.REQ.get("client_id")) self.client_actions = [] current_time = rdfvalue.RDFDatetime().Now() leased_tasks = [] with queue_manager.QueueManager(token=request.token) as manager: tasks = manager.Query(self.client_id.Queue(), limit=1000) for task in tasks: if task.eta > current_time: leased_tasks.append(task) flows_map = {} for flow_obj in aff4.FACTORY.MultiOpen(set(task.session_id for task in leased_tasks), mode="r", token=request.token): flows_map[flow_obj.urn] = flow_obj for task in leased_tasks: flow_obj = flows_map.get(task.session_id, None) if flow_obj: self.client_actions.append( dict(name=task.name, priority=str(task.priority), lease_time_left=str(task.eta - current_time), parent_flow=dict(name=flow_obj.Name(), urn=flow_obj.urn))) now = rdfvalue.RDFDatetime().Now() hour_before_now = now - rdfvalue.Duration("1h") stats_urn = self.client_id.Add("stats") stats_obj = aff4.FACTORY.Create( stats_urn, "ClientStats", mode="r", age=(hour_before_now.AsMicroSecondsFromEpoch(), now.AsMicroSecondsFromEpoch()), token=request.token) client_stats_list = list( stats_obj.GetValuesForAttribute(stats_obj.Schema.STATS)) cpu_samples = [] io_samples = [] for client_stats in client_stats_list: cpu_samples.extend(client_stats.cpu_samples) io_samples.extend(client_stats.io_samples) cpu_samples = sorted(cpu_samples, key=lambda x: x.timestamp) io_samples = sorted(io_samples, key=lambda x: x.timestamp) if client_stats_list: client_stats = client_stats_list[-1].Copy() else: client_stats = rdf_client.ClientStats() client_stats.cpu_samples = cpu_samples client_stats.io_samples = io_samples if client_stats.cpu_samples: self.stats_timestamp = client_stats.cpu_samples[-1].timestamp elif client_stats.io_samples: self.stats_timestamp = client_stats.io_samples[-1].timestamp else: self.stats_timestamp = None user_cpu_data = [] system_cpu_data = [] for sample in client_stats.cpu_samples: if sample.timestamp > hour_before_now and sample.timestamp < now: user_cpu_data.append( (sample.timestamp.AsSecondsFromEpoch() * 1000, sample.user_cpu_time)) system_cpu_data.append( (sample.timestamp.AsSecondsFromEpoch() * 1000, sample.system_cpu_time)) read_bytes_data = [] write_bytes_data = [] read_count_data = [] write_count_data = [] for sample in client_stats.io_samples: if sample.timestamp > hour_before_now and sample.timestamp < now: read_bytes_data.append( (sample.timestamp.AsSecondsFromEpoch() * 1000, sample.read_bytes)) write_bytes_data.append( (sample.timestamp.AsSecondsFromEpoch() * 1000, sample.write_bytes)) read_count_data.append( (sample.timestamp.AsSecondsFromEpoch() * 1000, sample.read_count)) write_count_data.append( (sample.timestamp.AsSecondsFromEpoch() * 1000, sample.write_count)) response = super(ClientLoadView, self).Layout(request, response) return self.CallJavascript(response, "ClientLoadView.Layout", user_cpu_data=user_cpu_data, system_cpu_data=system_cpu_data, read_bytes_data=read_bytes_data, write_bytes_data=write_bytes_data, read_count_data=read_count_data, write_count_data=write_count_data)
def testDownsampled(self): timestamp = rdfvalue.RDFDatetime.FromHumanReadable stats = rdf_client.ClientStats( cpu_samples=[ rdf_client.CpuSample(timestamp=timestamp("2001-01-01 00:00"), user_cpu_time=2.5, system_cpu_time=3.2, cpu_percent=0.5), rdf_client.CpuSample(timestamp=timestamp("2001-01-01 00:05"), user_cpu_time=2.6, system_cpu_time=4.7, cpu_percent=0.6), rdf_client.CpuSample(timestamp=timestamp("2001-01-01 00:10"), user_cpu_time=10.0, system_cpu_time=14.2, cpu_percent=0.9), rdf_client.CpuSample(timestamp=timestamp("2001-01-01 00:12"), user_cpu_time=12.3, system_cpu_time=14.9, cpu_percent=0.1), rdf_client.CpuSample(timestamp=timestamp("2001-01-01 00:21"), user_cpu_time=16.1, system_cpu_time=22.3, cpu_percent=0.4) ], io_samples=[ rdf_client.IOSample(timestamp=timestamp("2001-01-01 00:00"), read_count=0, write_count=0), rdf_client.IOSample(timestamp=timestamp("2001-01-01 00:02"), read_count=3, write_count=5), rdf_client.IOSample(timestamp=timestamp("2001-01-01 00:12"), read_count=6, write_count=8), ]) expected = rdf_client.ClientStats( cpu_samples=[ rdf_client.CpuSample(timestamp=timestamp("2001-01-01 00:05"), user_cpu_time=2.6, system_cpu_time=4.7, cpu_percent=0.55), rdf_client.CpuSample(timestamp=timestamp("2001-01-01 00:12"), user_cpu_time=12.3, system_cpu_time=14.9, cpu_percent=0.5), rdf_client.CpuSample(timestamp=timestamp("2001-01-01 00:21"), user_cpu_time=16.1, system_cpu_time=22.3, cpu_percent=0.4), ], io_samples=[ rdf_client.IOSample(timestamp=timestamp("2001-01-01 00:02"), read_count=3, write_count=5), rdf_client.IOSample(timestamp=timestamp("2001-01-01 00:12"), read_count=6, write_count=8), ]) actual = rdf_client.ClientStats.Downsampled( stats, interval=rdfvalue.Duration("10m")) self.assertEqual(actual, expected)