def ProcessClients(self, unused_responses): """Does the work.""" self.start = 0 self.end = int(1e6 * (time.time() - self.MAX_AGE)) client_urns = export_utils.GetAllClients(token=self.token) for client_urn in client_urns: data_store.DB.DeleteAttributes(client_urn.Add("stats"), [u"aff4:stats"], start=self.start, end=self.end, sync=False, token=self.token) self.HeartBeat() data_store.DB.Flush()
def ProcessClients(self, unused_responses): """Does the work.""" self.start = 0 self.end = int(1e6 * (time.time() - self.MAX_AGE)) client_urns = export_utils.GetAllClients(token=self.token) for batch in utils.Grouper(client_urns, 10000): with data_store.DB.GetMutationPool(token=self.token) as mutation_pool: for client_urn in batch: mutation_pool.DeleteAttributes( client_urn.Add("stats"), [u"aff4:stats"], start=self.start, end=self.end) self.HeartBeat()
"""Cleanup script.""" from grr.lib import export_utils # After you do this the UI complains a little, but creating a new hunt fixes it. hunts = aff4.FACTORY.Open("aff4:/hunts/") for hunt in hunts.ListChildren(): aff4.FACTORY.Delete(hunt) # Delete clients that haven't polled in for 2hours for fd in aff4.FACTORY.MultiOpen(export_utils.GetAllClients()): cutoff = rdfvalue.RDFDatetime().Now() - rdfvalue.Duration("2h") if fd.Get(fd.Schema.PING) < cutoff: aff4.FACTORY.Delete(fd.urn) # Delete all flows for client in export_utils.GetAllClients(): aff4.FACTORY.Delete(client.Add("flows"))