def testAddTimestamp(self): index = client_index.CreateClientIndex(token=self.token) client_urns = self.SetupClients(5) # 1413807132 = Mon, 20 Oct 2014 12:12:12 GMT with test_lib.FakeTime(1413807132): for urn in client_urns: client = aff4.FACTORY.Create( urn, aff4_type=aff4_grr.VFSGRRClient, mode="r", token=self.token) index.AddClient(client) self.assertEqual( len(index.LookupClients([".", "start_date:2014-10-20"])), 5) self.assertEqual( len(index.LookupClients([".", "start_date:2014-10-21"])), 0) self.assertEqual( len( index.LookupClients( [".", "start_date:2013-10-20", "end_date:2014-10-19"])), 0) self.assertEqual( len( index.LookupClients( [".", "start_date:2013-10-20", "end_date:2014-10-20"])), 5) # Ignore the keyword if the date is not readable. self.assertEqual( len( index.LookupClients([".", "start_date:2013-10-20", "end_date:XXXX"])), 5)
def testUnversionedKeywords(self): index = client_index.CreateClientIndex(token=self.token) client_urns = self.SetupClients(5) with test_lib.FakeTime(1000000): for i in range(5): client = aff4.FACTORY.Create( client_urns[i], aff4_type=aff4_grr.VFSGRRClient, mode="rw", token=self.token) client.Set(client.Schema.HOST_IPS("10.1.0.%d" % i)) client.Flush() index.AddClient(client) with test_lib.FakeTime(2000000): for i in range(5): client = aff4.FACTORY.Create( client_urns[i], aff4_type=aff4_grr.VFSGRRClient, mode="rw", token=self.token) client.Set(client.Schema.HOST_IPS("10.1.1.%d" % i)) client.Flush() index.AddClient(client) with test_lib.FakeTime(3000000): self.assertEqual( index.LookupClients(["10.1.0", "Host-2"]), [rdf_client.ClientURN("aff4:/C.1000000000000002")]) self.assertEqual(index.LookupClients(["+10.1.0", "Host-2"]), []) self.assertEqual( index.LookupClients(["+10.1.1", "Host-2"]), [rdf_client.ClientURN("aff4:/C.1000000000000002")])
def End(self): """Finalize client registration.""" # Update summary and publish to the Discovery queue. if data_store.RelationalDBWriteEnabled(): try: data_store.REL_DB.WriteClientSnapshot(self.state.client) except db.UnknownClientError: pass client = self._OpenClient() if data_store.RelationalDBReadEnabled(): summary = self.state.client.GetSummary() summary.client_id = self.client_id summary.timestamp = rdfvalue.RDFDatetime.Now() else: summary = client.GetSummary() self.Publish("Discovery", summary) self.SendReply(summary) # Update the client index client_index.CreateClientIndex(token=self.token).AddClient(client) if data_store.RelationalDBWriteEnabled(): try: index = client_index.ClientIndex() index.AddClient(self.state.client) labels = self.state.client.startup_info.client_info.labels if labels: data_store.REL_DB.AddClientLabels( self.state.client.client_id, "GRR", labels) except db.UnknownClientError: # TODO(amoser): Remove after data migration. pass
def Handle(self, args, token=None): end = args.count or db.MAX_COUNT keywords = compatibility.ShlexSplit(args.query) api_clients = [] if data_store.RelationalDBEnabled(): index = client_index.ClientIndex() # LookupClients returns a sorted list of client ids. clients = index.LookupClients(keywords)[args.offset:args.offset + end] client_infos = data_store.REL_DB.MultiReadClientFullInfo(clients) for client_info in itervalues(client_infos): api_clients.append(ApiClient().InitFromClientInfo(client_info)) else: index = client_index.CreateClientIndex(token=token) result_urns = sorted( index.LookupClients(keywords))[args.offset:args.offset + end] result_set = aff4.FACTORY.MultiOpen(result_urns, token=token) for child in sorted(result_set): api_clients.append(ApiClient().InitFromAff4Object(child)) UpdateClientsFromFleetspeak(api_clients) return ApiSearchClientsResult(items=api_clients)
def EnrolFleetspeakClient(self, client_id): """Enrols a Fleetspeak-enabled client for use with GRR. Args: client_id: GRR client-id for the client. Returns: True if the client is new, and actually got enrolled. This method is a no-op if the client already exists (in which case False is returned). """ client_urn = rdf_client.ClientURN(client_id) # If already enrolled, return. if data_store.RelationalDBEnabled(): try: data_store.REL_DB.ReadClientMetadata(client_id) return False except db.UnknownClientError: pass else: if aff4.FACTORY.ExistsWithType(client_urn, aff4_type=aff4_grr.VFSGRRClient, token=self.token): return False logging.info("Enrolling a new Fleetspeak client: %r", client_id) if data_store.RelationalDBEnabled(): now = rdfvalue.RDFDatetime.Now() data_store.REL_DB.WriteClientMetadata(client_id, first_seen=now, fleetspeak_enabled=True, last_ping=now) if data_store.AFF4Enabled(): # TODO(fleetspeak-team,grr-team): If aff4 isn't reliable enough, we can # catch exceptions from it and forward them to Fleetspeak by failing its # gRPC call. Fleetspeak will then retry with a random, perhaps healthier, # instance of the GRR frontend. with aff4.FACTORY.Create(client_urn, aff4_type=aff4_grr.VFSGRRClient, mode="rw", token=self.token) as client: client.Set(client.Schema.FLEETSPEAK_ENABLED, rdfvalue.RDFBool(True)) index = client_index.CreateClientIndex(token=self.token) index.AddClient(client) if data_store.RelationalDBEnabled(): client_obj = rdf_objects.ClientSnapshot( client_id=client_urn.Basename()) index = client_index.ClientIndex() index.AddClient(client_obj) # Publish the client enrollment message. events.Events.PublishEvent("ClientEnrollment", client_urn, token=self.token) return True
def Start(self): """Sign the CSR from the client.""" if self.args.csr.type != rdf_crypto.Certificate.Type.CSR: raise ValueError("Must be called with CSR") csr = rdf_crypto.CertificateSigningRequest(self.args.csr.pem) # Verify the CSR. This is not strictly necessary but doesn't harm either. try: csr.Verify(csr.GetPublicKey()) except rdf_crypto.VerificationError: raise flow.FlowError("CSR for client %s did not verify: %s" % (self.client_id, csr.AsPEM())) # Verify that the CN is of the correct form. The common name should refer # to a client URN. self.cn = rdf_client.ClientURN.FromPublicKey(csr.GetPublicKey()) if self.cn != csr.GetCN(): raise ValueError("CSR CN %s does not match public key %s." % (csr.GetCN(), self.cn)) logging.info("Will sign CSR for: %s", self.cn) cert = rdf_crypto.RDFX509Cert.ClientCertFromCSR(csr) # This check is important to ensure that the client id reported in the # source of the enrollment request is the same as the one in the # certificate. We use the ClientURN to ensure this is also of the correct # form for a client name. if self.cn != self.client_id: raise flow.FlowError("Certificate name %s mismatch for client %s" % (self.cn, self.client_id)) now = rdfvalue.RDFDatetime.Now() if data_store.AFF4Enabled(): with aff4.FACTORY.Create(self.client_id, aff4_grr.VFSGRRClient, mode="rw", token=self.token) as client: # Set and write the certificate to the client record. client.Set(client.Schema.CERT, cert) client.Set(client.Schema.FIRST_SEEN, now) index = client_index.CreateClientIndex(token=self.token) index.AddClient(client) if data_store.RelationalDBWriteEnabled(): data_store.REL_DB.WriteClientMetadata(self.client_id, certificate=cert, fleetspeak_enabled=False) index = client_index.ClientIndex() index.AddClient( rdf_objects.ClientSnapshot(client_id=self.client_id)) # Publish the client enrollment message. events.Events.PublishEvent("ClientEnrollment", self.client_urn, token=self.token) self.Log("Enrolled %s successfully", self.client_id)
def ProcessKnowledgeBase(self, responses): """Collect and store any extra non-kb artifacts.""" if not responses.success: raise flow.FlowError( "Error while collecting the knowledge base: %s" % responses.status) kb = responses.First() if data_store.AFF4Enabled(): # AFF4 client. client = self._OpenClient(mode="rw") with client: client.Set(client.Schema.KNOWLEDGE_BASE, kb) # Copy usernames. usernames = [ user.username for user in kb.users if user.username ] client.AddAttribute( client.Schema.USERNAMES(" ".join(usernames))) self.CopyOSReleaseFromKnowledgeBase(kb, client) # rdf_objects.ClientSnapshot. # Information already present in the knowledge base takes precedence. if not kb.os: kb.os = self.state.os if not kb.fqdn: kb.fqdn = self.state.fqdn self.state.client.knowledge_base = kb if data_store.RelationalDBReadEnabled(): existing_client = data_store.REL_DB.ReadClientSnapshot( self.client_id) if existing_client is None: # This is the first time we interrogate this client. In that case, we # need to store basic information about this client right away so follow # up flows work properly. data_store.REL_DB.WriteClientSnapshot(self.state.client) self.CallFlow(collectors.ArtifactCollectorFlow.__name__, artifact_list=config. CONFIG["Artifacts.non_kb_interrogate_artifacts"], knowledge_base=kb, next_state="ProcessArtifactResponses") if data_store.AFF4Enabled(): # Update the client index for the AFF4 client. client_index.CreateClientIndex(token=self.token).AddClient(client) if data_store.RelationalDBWriteEnabled(): try: # Update the client index for the rdf_objects.ClientSnapshot. client_index.ClientIndex().AddClient(self.state.client) except db.UnknownClientError: pass
def testBulkLabelClients(self): index = client_index.CreateClientIndex(token=self.token) client_urns = self.SetupClients(2) for urn in client_urns: client = aff4.FACTORY.Create( urn, aff4_type=aff4_grr.VFSGRRClient, mode="rw", token=self.token) client.AddLabel("test_client") client.Flush() index.AddClient(client) # Maps hostnames used in the test to client urns. m = {"host-0": client_urns[0], "host-1": client_urns[1]} # No hostname. client_index.BulkLabel( "label-0", ["host-3"], token=self.token, client_index=index) self._HostsHaveLabel([], "label-0", index) # Add label. hosts = ["host-0", "host-1"] client_index.BulkLabel( "label-0", hosts, token=self.token, client_index=index) # host-0: label-0 # host-1: label-0 self._HostsHaveLabel(hosts, "label-0", index) self.assertItemsEqual( index.LookupClients(["label-0"]), [m[host] for host in hosts]) # Add another label only changes the new host. hosts = ["host-1"] client_index.BulkLabel( "label-1", hosts, token=self.token, client_index=index) # host-0: label-0 # host-1: label-0, label-1 self._HostsHaveLabel(hosts, "label-1", index) self.assertItemsEqual( index.LookupClients(["label-1"]), [m[host] for host in hosts]) # and other labels remain unchanged. hosts = ["host-0", "host-1"] self._HostsHaveLabel(hosts, "label-0", index) self.assertItemsEqual( index.LookupClients(["label-0"]), [m[host] for host in hosts]) # Relabeling updates the label on already labeled hosts. hosts = ["host-0"] client_index.BulkLabel( "label-0", hosts, token=self.token, client_index=index) # host-0: label-0 # host-1: label-1 self._HostsHaveLabel(hosts, "label-0", index) self.assertItemsEqual( index.LookupClients(["label-0"]), [m[host] for host in hosts]) # and other labels remain unchanged. hosts = ["host-1"] self._HostsHaveLabel(hosts, "label-1", index) self.assertItemsEqual( index.LookupClients(["label-1"]), [m[host] for host in hosts])
def SetupClient(self, client_nr, arch="x86_64", last_boot_time=None, install_time=None, kernel="4.0.0", os_version="buster/sid", ping=None, system="Linux", memory_size=None, add_cert=True): """Prepares a test client mock to be used. Args: client_nr: int The GRR ID to be used. 0xABCD maps to C.100000000000abcd in canonical representation. arch: string last_boot_time: RDFDatetime install_time: RDFDatetime kernel: string os_version: string ping: RDFDatetime system: string memory_size: bytes add_cert: boolean Returns: rdf_client.ClientURN """ # Make it possible to use SetupClient for both REL_DB and legacy tests. self.SetupTestClientObject( client_nr, add_cert=add_cert, arch=arch, install_time=install_time, last_boot_time=last_boot_time, kernel=kernel, memory_size=memory_size, os_version=os_version, ping=ping or rdfvalue.RDFDatetime.Now(), system=system) with client_index.CreateClientIndex(token=self.token) as index: client_id_urn = self._SetupClientImpl( client_nr, index=index, arch=arch, install_time=install_time, last_boot_time=last_boot_time, kernel=kernel, os_version=os_version, ping=ping, system=system, memory_size=memory_size, add_cert=add_cert) return client_id_urn
def _Setup100Clients(self): self.client_urns = self.SetupClients(100) self.client_ids = [u.Basename() for u in self.client_urns] index = client_index.CreateClientIndex(token=self.token) for client in aff4.FACTORY.MultiOpen( self.client_urns, mode="rw", token=self.token): with client: client.AddLabel("foo", owner="david") index.AddClient(client)
def testAddLookupClients(self): index = client_index.CreateClientIndex(token=self.token) client_urns = self.SetupClients(42) for urn in client_urns: client = aff4.FACTORY.Create( urn, aff4_type=aff4_grr.VFSGRRClient, mode="r", token=self.token) index.AddClient(client) # Check unique identifiers. self.assertEqual( index.LookupClients(["192.168.0.1"]), [rdf_client.ClientURN("aff4:/C.1000000000000001")]) self.assertEqual( index.LookupClients(["2001:aBcd::1"]), [rdf_client.ClientURN("aff4:/C.1000000000000001")]) self.assertEqual( index.LookupClients(["ip:192.168.0.1"]), [rdf_client.ClientURN("aff4:/C.1000000000000001")]) self.assertEqual( index.LookupClients(["ip:2001:abcd::1"]), [rdf_client.ClientURN("aff4:/C.1000000000000001")]) self.assertEqual( index.LookupClients(["host-2"]), [rdf_client.ClientURN("aff4:/C.1000000000000002")]) self.assertEqual( index.LookupClients(["C.1000000000000002"]), [rdf_client.ClientURN("aff4:/C.1000000000000002")]) self.assertEqual( index.LookupClients(["aabbccddee01"]), [rdf_client.ClientURN("aff4:/C.1000000000000001")]) self.assertEqual( index.LookupClients(["mac:aabbccddee01"]), [rdf_client.ClientURN("aff4:/C.1000000000000001")]) self.assertEqual( index.LookupClients(["aa:bb:cc:dd:ee:01"]), [rdf_client.ClientURN("aff4:/C.1000000000000001")]) self.assertEqual( index.LookupClients(["mac:aa:bb:cc:dd:ee:01"]), [rdf_client.ClientURN("aff4:/C.1000000000000001")]) # IP prefixes of octets should work: self.assertEqual( sorted(index.LookupClients(["192.168.0"])), sorted(client_urns)) # Hostname prefixes of tokens should work. self.assertEqual( index.LookupClients(["host-5.example"]), [rdf_client.ClientURN("aff4:/C.1000000000000005")]) # Intersections should work. self.assertEqual( index.LookupClients(["192.168.0", "Host-2"]), [rdf_client.ClientURN("aff4:/C.1000000000000002")]) # Universal keyword should find everything. self.assertEqual(len(index.LookupClients(["."])), 42)
def _CheckClientKwIndex(self, keywords, expected_count): # Tests that the client index has expected_count results when # searched for keywords. # AFF4 index. index = client_index.CreateClientIndex(token=self.token) self.assertEqual(len(index.LookupClients(keywords)), expected_count) # Relational index. index = client_index.ClientIndex() self.assertEqual(len(index.LookupClients(keywords)), expected_count)
def _CheckLabelIndex(self): """Check that label indexes are updated.""" index = client_index.CreateClientIndex(token=self.token) # AFF4 index. self.assertItemsEqual( list(index.LookupClients(["label:Label2"])), [self.client_id]) # Relational index. self.assertItemsEqual( client_index.ClientIndex().LookupClients(["label:Label2"]), [self.client_id.Basename()])
def CleanVacuousVersions(clients=None, dry_run=True): """A script to remove no-op client versions. This script removes versions of a client when it is identical to the previous, in the sense that no versioned attributes were changed since the previous client version. Args: clients: A list of ClientURN, if empty cleans all clients. dry_run: whether this is a dry run """ if not clients: index = client_index.CreateClientIndex() clients = index.LookupClients(["."]) clients.sort() with data_store.DB.GetMutationPool() as pool: logging.info("checking %d clients", len(clients)) for batch in collection.Batch(clients, 10000): # TODO(amoser): This only works on datastores that use the Bigtable # scheme. client_infos = data_store.DB.MultiResolvePrefix( batch, ["aff4:", "aff4:"], data_store.DB.ALL_TIMESTAMPS) for client, type_list in client_infos: cleared = 0 kept = 0 updates = [] for a, _, ts in type_list: if ts != 0: updates.append((ts, a)) updates = sorted(updates) dirty = True for ts, a in updates: if a == "aff4:type": if dirty: kept += 1 dirty = False else: cleared += 1 if not dry_run: pool.DeleteAttributes(client, ["aff4:type"], start=ts, end=ts) if pool.Size() > 1000: pool.Flush() else: dirty = True logging.info("%s: kept %d and cleared %d", client, kept, cleared)
def AddClientLabel(self, client_id, owner, name): if data_store.RelationalDBReadEnabled(): if hasattr(client_id, "Basename"): client_id = client_id.Basename() data_store.REL_DB.AddClientLabels(client_id, owner, [name]) client_index.ClientIndex().AddClientLabels(client_id, [name]) else: with aff4.FACTORY.Open(client_id, mode="rw", token=self.token) as client_obj: client_obj.AddLabel(name, owner=owner) with client_index.CreateClientIndex(token=self.token) as index: index.AddClient(client_obj)
def ExportClientsByKeywords(keywords, filename, token=None): r"""A script to export clients summaries selected by a keyword search. This script does a client search for machines matching all of keywords and writes a .csv summary of the results to filename. Multi-value fields are '\n' separated. Args: keywords: a list of keywords to search for filename: the name of the file to write to, will be replaced if already present token: datastore token. """ index = client_index.CreateClientIndex(token=token) client_list = index.LookupClients(keywords) logging.info("found %d clients", len(client_list)) if not client_list: return writer = utils.CsvDictWriter([ u"client_id", u"hostname", u"last_seen", u"os", u"os_release", u"os_version", u"users", u"ips", u"macs", ]) writer.WriteHeader() for client in aff4.FACTORY.MultiOpen(client_list, token=token): s = client.Schema writer.WriteRow({ u"client_id": client.urn.Basename(), u"hostname": client.Get(s.HOSTNAME), u"os": client.Get(s.SYSTEM), u"os_release": client.Get(s.OS_RELEASE), u"os_version": client.Get(s.OS_VERSION), u"ips": client.Get(s.HOST_IPS), u"macs": client.Get(s.MAC_ADDRESS), u"users": "\n".join(client.Get(s.USERNAMES, [])), u"last_seen": client.Get(s.PING), }) with io.open(filename, "w") as csv_out: csv_out.write(writer.Content())
def SearchClients(query_str, token=None, limit=1000): """Search indexes for clients. Returns list (client, hostname, os version).""" client_schema = aff4.AFF4Object.classes["VFSGRRClient"].SchemaCls index = client_index.CreateClientIndex(token=token) client_list = index.LookupClients([query_str]) result_set = aff4.FACTORY.MultiOpen(client_list, token=token) results = [] for result in result_set: results.append((result, str(result.Get(client_schema.HOSTNAME)), str(result.Get(client_schema.OS_VERSION)), str(result.Get(client_schema.PING)))) if len(results) >= limit: break return results
def CleanClientVersions(clients=None, dry_run=True, token=None): """A script to remove excessive client versions. Especially when a client is heavily cloned, we sometimes write an excessive number of versions of it. Since these version all go into the same database row and are displayed as a dropdown list in the adminui, it is sometimes necessary to clear them out. This deletes version from clients so that we have at most one version per hour. Args: clients: A list of ClientURN, if empty cleans all clients. dry_run: whether this is a dry run token: datastore token. """ if not clients: index = client_index.CreateClientIndex(token=token) clients = index.LookupClients(["."]) clients.sort() with data_store.DB.GetMutationPool() as pool: logging.info("checking %d clients", len(clients)) # TODO(amoser): This only works on datastores that use the Bigtable scheme. client_infos = data_store.DB.MultiResolvePrefix( clients, "aff4:type", data_store.DB.ALL_TIMESTAMPS) for client, type_list in client_infos: logging.info("%s: has %d versions", client, len(type_list)) cleared = 0 kept = 1 last_kept = type_list[0][2] for _, _, ts in type_list[1:]: if last_kept - ts > 60 * 60 * 1000000: # 1 hour last_kept = ts kept += 1 else: if not dry_run: pool.DeleteAttributes(client, ["aff4:type"], start=ts, end=ts) cleared += 1 if pool.Size() > 10000: pool.Flush() logging.info("%s: kept %d and cleared %d", client, kept, cleared)
def ProcessKnowledgeBase(self, responses): """Collect and store any extra non-kb artifacts.""" if not responses.success: raise flow.FlowError( "Error while collecting the knowledge base: %s" % responses.status) kb = responses.First() if data_store.AFF4Enabled(): # AFF4 client. client = self._OpenClient(mode="rw") client.Set(client.Schema.KNOWLEDGE_BASE, kb) # Copy usernames. usernames = [user.username for user in kb.users if user.username] client.AddAttribute(client.Schema.USERNAMES(" ".join(usernames))) self.CopyOSReleaseFromKnowledgeBase(kb, client) client.Flush() # rdf_objects.ClientSnapshot. # Information already present in the knowledge base takes precedence. if not kb.os: kb.os = self.state.os if not kb.fqdn: kb.fqdn = self.state.fqdn self.state.client.knowledge_base = kb self.CallFlow(collectors.ArtifactCollectorFlow.__name__, artifact_list=config. CONFIG["Artifacts.non_kb_interrogate_artifacts"], next_state="ProcessArtifactResponses") if data_store.AFF4Enabled(): # Update the client index for the AFF4 client. client_index.CreateClientIndex(token=self.token).AddClient(client) if data_store.RelationalDBWriteEnabled(): try: # Update the client index for the rdf_objects.ClientSnapshot. client_index.ClientIndex().AddClient(self.state.client) except db.UnknownClientError: pass
def Handle(self, args, token=None): audit_description = ",".join([ token.username + u"." + utils.SmartUnicode(name) for name in args.labels ]) audit_events = [] try: if data_store.AFF4Enabled(): index = client_index.CreateClientIndex(token=token) client_objs = aff4.FACTORY.MultiOpen( [cid.ToClientURN() for cid in args.client_ids], aff4_type=aff4_grr.VFSGRRClient, mode="rw", token=token) for client_obj in client_objs: index.RemoveClientLabels(client_obj) self.RemoveClientLabels(client_obj, args.labels) index.AddClient(client_obj) client_obj.Close() if data_store.RelationalDBWriteEnabled(): for client_id in args.client_ids: cid = unicode(client_id) data_store.REL_DB.RemoveClientLabels( cid, token.username, args.labels) labels_to_remove = set(args.labels) existing_labels = data_store.REL_DB.ReadClientLabels(cid) for label in existing_labels: labels_to_remove.discard(label.name) if labels_to_remove: idx = client_index.ClientIndex() idx.RemoveClientLabels(cid, labels_to_remove) for client_id in args.client_ids: audit_events.append( rdf_events.AuditEvent( user=token.username, action="CLIENT_REMOVE_LABEL", flow_name="handler.ApiRemoveClientsLabelsHandler", client=client_id.ToClientURN(), description=audit_description)) finally: events.Events.PublishMultipleEvents( {audit.AUDIT_EVENT: audit_events}, token=token)
def EnrolFleetspeakClient(self, client_id): """Enrols a Fleetspeak-enabled client for use with GRR.""" client_urn = rdf_client.ClientURN(client_id) # If already enrolled, return. if data_store.RelationalDBReadEnabled(): try: data_store.REL_DB.ReadClientMetadata(client_id) return except db.UnknownClientError: pass else: if aff4.FACTORY.ExistsWithType(client_urn, aff4_type=aff4_grr.VFSGRRClient, token=self.token): return logging.info("Enrolling a new Fleetspeak client: %r", client_id) if data_store.RelationalDBWriteEnabled(): data_store.REL_DB.WriteClientMetadata(client_id, fleetspeak_enabled=True) # TODO(fleetspeak-team,grr-team): If aff4 isn't reliable enough, we can # catch exceptions from it and forward them to Fleetspeak by failing its # gRPC call. Fleetspeak will then retry with a random, perhaps healthier, # instance of the GRR frontend. with aff4.FACTORY.Create(client_urn, aff4_type=aff4_grr.VFSGRRClient, mode="rw", token=self.token) as client: client.Set(client.Schema.FLEETSPEAK_ENABLED, rdfvalue.RDFBool(True)) index = client_index.CreateClientIndex(token=self.token) index.AddClient(client) if data_store.RelationalDBWriteEnabled(): index = client_index.ClientIndex() index.AddClient(data_migration.ConvertVFSGRRClient(client)) # Publish the client enrollment message. events.Events.PublishEvent("ClientEnrollment", client_urn, token=self.token)
def Handle(self, args, token=None): audit_description = ",".join([ token.username + u"." + utils.SmartUnicode(name) for name in args.labels ]) audit_events = [] try: for api_client_id in args.client_ids: audit_events.append( rdf_events.AuditEvent( user=token.username, action="CLIENT_ADD_LABEL", flow_name="handler.ApiAddClientsLabelsHandler", client=api_client_id.ToClientURN(), description=audit_description)) if data_store.AFF4Enabled(): index = client_index.CreateClientIndex(token=token) client_objs = aff4.FACTORY.MultiOpen( [cid.ToClientURN() for cid in args.client_ids], aff4_type=aff4_grr.VFSGRRClient, mode="rw", token=token) for client_obj in client_objs: client_obj.AddLabels(args.labels) index.AddClient(client_obj) client_obj.Close() if data_store.RelationalDBWriteEnabled(): for api_client_id in args.client_ids: cid = unicode(api_client_id) try: data_store.REL_DB.AddClientLabels( cid, token.username, args.labels) idx = client_index.ClientIndex() idx.AddClientLabels(cid, args.labels) except db.UnknownClientError: # TODO(amoser): Remove after data migration. pass finally: events.Events.PublishMultipleEvents( {audit.AUDIT_EVENT: audit_events}, token=token)
def testAnalyzeClient(self): index = client_index.CreateClientIndex(token=self.token) client = aff4.FACTORY.Create("aff4:/" + CLIENT_ID, aff4_type=aff4_grr.VFSGRRClient, mode="rw", token=self.token) client.Set(client.Schema.SYSTEM("Windows")) client.Set( client.Schema.CLIENT_INFO(client_name="grr monitor", labels=["client-label-23"])) kb = rdf_client.KnowledgeBase() # Using cyrillic characters to make sure non-latin username and fullname # works fine. kb.users.Append( rdf_client.User( username="******", full_name="Лев (граф ) 'Николаевич' \"Толстой\" ЛНТ")) kb.users.Append( rdf_client.User(username="******", full_name="Steve O'Bryan")) client.Set(client.Schema.KNOWLEDGE_BASE(kb)) _, keywords = index.AnalyzeClient(client) # Should not contain an empty string. self.assertNotIn("", keywords) # OS of the client self.assertIn(b"windows", keywords) # Users of the client. self.assertIn(b"левтолстой", keywords) self.assertIn(b"граф", keywords) self.assertNotIn(")", keywords) self.assertIn(b"николаевич", keywords) self.assertIn(b"толстой", keywords) self.assertIn(b"ernie", keywords) self.assertIn(b"лев", keywords) self.assertIn(b"лнт", keywords) self.assertIn(b"steve o'bryan", keywords) self.assertIn(b"o'bryan", keywords) # Client information. self.assertIn(b"grr monitor", keywords) self.assertIn(b"client-label-23", keywords)
def _CreateClients(self): # To test all search keywords, we can rely on SetupClients # creating clients with attributes containing a numberic # value, e.g. hostname will be Host-0, Host-1, etc. self.client_ids = self.SetupClients(15) self.AddClientLabel(self.client_ids[0], self.token.username, u"common_test_label") self.AddClientLabel(self.client_ids[0], self.token.username, u"unique_test_label") self.AddClientLabel(self.client_ids[1], self.token.username, u"common_test_label") if data_store.RelationalDBReadEnabled(): snapshot = data_store.REL_DB.ReadClientSnapshot( self.client_ids[0].Basename()) snapshot.knowledge_base.users.Append( rdf_client.User(username="******")) snapshot.knowledge_base.users.Append( rdf_client.User(username=self.token.username)) data_store.REL_DB.WriteClientSnapshot(snapshot) client_index.ClientIndex().AddClient( data_store.REL_DB.ReadClientSnapshot( self.client_ids[0].Basename())) else: # SetupClients adds no labels or user names. with aff4.FACTORY.Open(self.client_ids[0], mode="rw", token=self.token) as client_obj: client_obj.AddLabel(u"common_test_label", owner=self.token.username) client_obj.AddLabel(u"unique_test_label", owner=self.token.username) # Add user in knowledge base. kb = client_obj.Get(client_obj.Schema.KNOWLEDGE_BASE) kb.users.Append(rdf_client.User(username="******")) kb.users.Append(rdf_client.User(username=self.token.username)) client_obj.Set(client_obj.Schema.KNOWLEDGE_BASE, kb) # Update index, since we added users and labels. with client_index.CreateClientIndex(token=self.token) as index: index.AddClient(client_obj)
def testAnalyzeClient(self): index = client_index.CreateClientIndex(token=self.token) client = aff4.FACTORY.Create( "aff4:/" + CLIENT_ID, aff4_type=aff4_grr.VFSGRRClient, mode="rw", token=self.token) client.Set(client.Schema.SYSTEM("Windows")) client.Set( client.Schema.CLIENT_INFO( client_name="grr monitor", labels=["client-label-23"])) kb = rdf_client.KnowledgeBase() kb.users.Append( rdf_client.User( username="******", full_name="Eric (Bertrand ) 'Russell' \"Logician\" Jacobson")) kb.users.Append( rdf_client.User(username="******", full_name="Steve O'Bryan")) client.Set(client.Schema.KNOWLEDGE_BASE(kb)) _, keywords = index.AnalyzeClient(client) # Should not contain an empty string. self.assertNotIn("", keywords) # OS of the client self.assertIn("windows", keywords) # Users of the client. self.assertIn("bert", keywords) self.assertIn("bertrand", keywords) self.assertNotIn(")", keywords) self.assertIn("russell", keywords) self.assertIn("logician", keywords) self.assertIn("ernie", keywords) self.assertIn("eric", keywords) self.assertIn("jacobson", keywords) self.assertIn("steve o'bryan", keywords) self.assertIn("o'bryan", keywords) # Client information. self.assertIn("grr monitor", keywords) self.assertIn("client-label-23", keywords)
def testRemoveLabels(self): client = aff4.FACTORY.Create( CLIENT_ID, aff4_type=aff4_grr.VFSGRRClient, mode="rw", token=self.token) client.AddLabel("testlabel_1") client.AddLabel("testlabel_2") client.Flush() index = client_index.CreateClientIndex(token=self.token) index.AddClient(client) client_list = [rdf_client.ClientURN(CLIENT_ID)] self.assertEqual(index.LookupClients(["testlabel_1"]), client_list) self.assertEqual(index.LookupClients(["testlabel_2"]), client_list) # Now delete one label. index.RemoveClientLabels(client) client.RemoveLabel("testlabel_1") index.AddClient(client) self.assertEqual(index.LookupClients(["testlabel_1"]), []) self.assertEqual(index.LookupClients(["testlabel_2"]), client_list)
def setUp(self): super(ApiLabelsRestrictedSearchClientsHandlerTestAFF4, self).setUp() self.client_ids = [u.Basename() for u in self.SetupClients(4)] index = client_index.CreateClientIndex(token=self.token) def LabelClient(i, label, owner): with aff4.FACTORY.Open( self.client_ids[i], mode="rw", token=self.token) as grr_client: grr_client.AddLabel(label, owner=owner) index.AddClient(grr_client) LabelClient(0, "foo", "david") LabelClient(1, "not-foo", "david") LabelClient(2, "bar", "peter_another") LabelClient(3, "bar", "peter") self.handler = client_plugin.ApiLabelsRestrictedSearchClientsHandler( labels_whitelist=["foo", "bar"], labels_owners_whitelist=["david", "peter"])
def CleanAff4Clients(self): """Cleans up old client data from aff4.""" inactive_client_ttl = config.CONFIG[ "DataRetention.inactive_client_ttl"] if not inactive_client_ttl: self.Log("TTL not set - nothing to do...") return exception_label = config.CONFIG[ "DataRetention.inactive_client_ttl_exception_label"] index = client_index.CreateClientIndex(token=self.token) client_urns = index.LookupClients(["."]) deadline = rdfvalue.RDFDatetime.Now() - inactive_client_ttl deletion_count = 0 for client_group in utils.Grouper(client_urns, 1000): inactive_client_urns = [] for client in aff4.FACTORY.MultiOpen( client_group, mode="r", aff4_type=aff4_grr.VFSGRRClient, token=self.token): if exception_label in client.GetLabelsNames(): continue if client.Get(client.Schema.LAST) < deadline: inactive_client_urns.append(client.urn) aff4.FACTORY.MultiDelete(inactive_client_urns, token=self.token) deletion_count += len(inactive_client_urns) self.HeartBeat() self.Log("Deleted %d inactive clients." % deletion_count)
def _CheckLabelIndexAFF4(self, client_id, token=None): """Check that label indexes are updated.""" index = client_index.CreateClientIndex(token=token) self.assertCountEqual( list(index.LookupClients(["label:Label2"])), [client_id])
def _CheckClientKwIndexAFF4(self, keywords, expected_count): # Tests that the client index has expected_count results when # searched for keywords. index = client_index.CreateClientIndex(token=self.token) self.assertLen(index.LookupClients(keywords), expected_count)