def SetupTestClientObject(self, client_nr, add_cert=True, arch="x86_64", last_boot_time=None, fqdn=None, kernel="4.0.0", memory_size=None, os_version="buster/sid", ping=None, system="Linux", labels=None): """Prepares a test client object.""" client_id = "C.1%015x" % client_nr client = objects.ClientSnapshot(client_id=client_id) client.startup_info.client_info = self._TestClientInfo() if last_boot_time is not None: client.startup_info.boot_time = last_boot_time client.knowledge_base.fqdn = fqdn or "Host-%x.example.com" % client_nr client.knowledge_base.os = system client.knowledge_base.users = [ rdf_client.User(username="******"), rdf_client.User(username="******"), ] client.os_version = os_version client.arch = arch client.kernel = kernel client.interfaces = self._TestInterfaces(client_nr) client.hardware_info = rdf_client.HardwareInfo( system_manufacturer="System-Manufacturer-%x" % client_nr, bios_version="Bios-Version-%x" % client_nr) if memory_size is not None: client.memory_size = memory_size ping = ping or rdfvalue.RDFDatetime.Now() if add_cert: cert = self.ClientCertFromPrivateKey( config.CONFIG["Client.private_key"]) else: cert = None data_store.REL_DB.WriteClientMetadata(client_id, last_ping=ping, certificate=cert, fleetspeak_enabled=False) data_store.REL_DB.WriteClientSnapshot(client) client_index.ClientIndex().AddClient(client) if labels: data_store.REL_DB.AddClientLabels(client_id, "GRR", labels) client_index.ClientIndex().AddClientLabels( client_id, data_store.REL_DB.ReadClientLabels(client_id)) return client
def Handle(self, args, token=None): end = args.count or sys.maxsize keywords = shlex.split(args.query) api_clients = [] if data_store.RelationalDBReadEnabled(): index = client_index.ClientIndex() clients = sorted( index.LookupClients(keywords))[args.offset:args.offset + end] client_infos = data_store.REL_DB.MultiReadClientFullInfo(clients) for client_info in client_infos.itervalues(): api_clients.append(ApiClient().InitFromClientInfo(client_info)) else: index = client_index.CreateClientIndex(token=token) result_urns = sorted( index.LookupClients(keywords))[args.offset:args.offset + end] result_set = aff4.FACTORY.MultiOpen(result_urns, token=token) for child in sorted(result_set): api_clients.append(ApiClient().InitFromAff4Object(child)) UpdateClientsFromFleetspeak(api_clients) return ApiSearchClientsResult(items=api_clients)
def testRemovesUserLabelWhenSystemLabelWithSimilarNameAlsoExists(self): idx = client_index.ClientIndex() with aff4.FACTORY.Open(self.client_ids[0], mode="rw", token=self.token) as grr_client: grr_client.AddLabel("foo") grr_client.AddLabel("foo", owner="GRR") data_store.REL_DB.WriteClientMetadata( self.client_ids[0].Basename(), fleetspeak_enabled=False) data_store.REL_DB.AddClientLabels(self.client_ids[0].Basename(), self.token.username, ["foo"]) data_store.REL_DB.AddClientLabels(self.client_ids[0].Basename(), "GRR", ["foo"]) idx.AddClientLabels(self.client_ids[0].Basename(), ["foo"]) self.handler.Handle(client_plugin.ApiRemoveClientsLabelsArgs( client_ids=[self.client_ids[0]], labels=["foo"]), token=self.token) # AFF4 labels. labels = aff4.FACTORY.Open(self.client_ids[0], token=self.token).GetLabels() self.assertEqual(len(labels), 1) self.assertEqual(labels[0].name, "foo") self.assertEqual(labels[0].owner, "GRR") # Relational labels. labels = data_store.REL_DB.ReadClientLabels( self.client_ids[0].Basename()) self.assertEqual(len(labels), 1) self.assertEqual(labels[0].name, "foo") self.assertEqual(labels[0].owner, "GRR") # The label is still in the index. self.assertEqual(idx.LookupClients(["label:foo"]), [self.client_ids[0].Basename()])
def End(self): """Finalize client registration.""" # Update summary and publish to the Discovery queue. if data_store.RelationalDBWriteEnabled(): try: data_store.REL_DB.WriteClientSnapshot(self.state.client) except db.UnknownClientError: pass client = self._OpenClient() if data_store.RelationalDBReadEnabled(): summary = self.state.client.GetSummary() summary.client_id = self.client_id summary.timestamp = rdfvalue.RDFDatetime.Now() else: summary = client.GetSummary() self.Publish("Discovery", summary) self.SendReply(summary) # Update the client index client_index.CreateClientIndex(token=self.token).AddClient(client) if data_store.RelationalDBWriteEnabled(): try: index = client_index.ClientIndex() index.AddClient(self.state.client) labels = self.state.client.startup_info.client_info.labels if labels: data_store.REL_DB.AddClientLabels(self.state.client.client_id, "GRR", labels) except db.UnknownClientError: # TODO(amoser): Remove after data migration. pass
def Start(self): """Sign the CSR from the client.""" if self.args.csr.type != rdf_crypto.Certificate.Type.CSR: raise ValueError("Must be called with CSR") csr = rdf_crypto.CertificateSigningRequest(self.args.csr.pem) # Verify the CSR. This is not strictly necessary but doesn't harm either. try: csr.Verify(csr.GetPublicKey()) except rdf_crypto.VerificationError: raise flow.FlowError("CSR for client %s did not verify: %s" % (self.client_id, csr.AsPEM())) # Verify that the CN is of the correct form. The common name should refer # to a client URN. self.cn = rdf_client.ClientURN.FromPublicKey(csr.GetPublicKey()) if self.cn != csr.GetCN(): raise ValueError("CSR CN %s does not match public key %s." % (csr.GetCN(), self.cn)) logging.info("Will sign CSR for: %s", self.cn) cert = rdf_crypto.RDFX509Cert.ClientCertFromCSR(csr) # This check is important to ensure that the client id reported in the # source of the enrollment request is the same as the one in the # certificate. We use the ClientURN to ensure this is also of the correct # form for a client name. if self.cn != self.client_id: raise flow.FlowError("Certificate name %s mismatch for client %s" % (self.cn, self.client_id)) with aff4.FACTORY.Create(self.client_id, aff4_grr.VFSGRRClient, mode="rw", token=self.token) as client: # Set and write the certificate to the client record. now = rdfvalue.RDFDatetime.Now() client.Set(client.Schema.CERT, cert) client.Set(client.Schema.FIRST_SEEN, now) if data_store.RelationalDBWriteEnabled(): data_store.REL_DB.WriteClientMetadata( self.client_id.Basename(), certificate=cert, first_seen=now, fleetspeak_enabled=False) index = client_index.CreateClientIndex(token=self.token) index.AddClient(client) if data_store.RelationalDBWriteEnabled(): index = client_index.ClientIndex() index.AddClient(data_migration.ConvertVFSGRRClient(client)) # Publish the client enrollment message. self.Publish("ClientEnrollment", self.client_id) self.Log("Enrolled %s successfully", self.client_id)
def _CheckClientKwIndex(self, keywords, expected_count): # Tests that the client index has expected_count results when # searched for keywords. # AFF4 index. index = client_index.CreateClientIndex(token=self.token) self.assertEqual(len(index.LookupClients(keywords)), expected_count) # Relational index. index = client_index.ClientIndex() self.assertEqual(len(index.LookupClients(keywords)), expected_count)
def _CheckLabelIndex(self): """Check that label indexes are updated.""" index = client_index.CreateClientIndex(token=self.token) # AFF4 index. self.assertItemsEqual(list(index.LookupClients(["label:Label2"])), [self.client_id]) # Relational index. self.assertItemsEqual( client_index.ClientIndex().LookupClients(["label:Label2"]), [self.client_id.Basename()])
def EnrolFleetspeakClient(self, client_id): """Enrols a Fleetspeak-enabled client for use with GRR.""" client_urn = rdf_client.ClientURN(client_id) # If already enrolled, return. if data_store.RelationalDBReadEnabled(): if data_store.REL_DB.ReadClientMetadata(client_id): return else: if aff4.FACTORY.ExistsWithType( client_urn, aff4_type=aff4_grr.VFSGRRClient, token=self.token): return logging.info("Enrolling a new Fleetspeak client: %r", client_id) if data_store.RelationalDBWriteEnabled(): data_store.REL_DB.WriteClientMetadata(client_id, fleetspeak_enabled=True) # TODO(fleetspeak-team,grr-team): If aff4 isn't reliable enough, we can # catch exceptions from it and forward them to Fleetspeak by failing its # gRPC call. Fleetspeak will then retry with a random, perhaps healthier, # instance of the GRR frontend. with aff4.FACTORY.Create( client_urn, aff4_type=aff4_grr.VFSGRRClient, mode="rw", token=self.token) as client: client.Set(client.Schema.FLEETSPEAK_ENABLED, rdfvalue.RDFBool(True)) index = client_index.CreateClientIndex(token=self.token) index.AddClient(client) if data_store.RelationalDBWriteEnabled(): index = client_index.ClientIndex() index.AddClient(data_migration.ConvertVFSGRRClient(client)) enrollment_session_id = rdfvalue.SessionID( queue=queues.ENROLLMENT, flow_name="Enrol") publish_msg = rdf_flows.GrrMessage( payload=client_urn, session_id=enrollment_session_id, # Fleetspeak ensures authentication. auth_state=rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED, source=enrollment_session_id, priority=rdf_flows.GrrMessage.Priority.MEDIUM_PRIORITY) # Publish the client enrollment message. events.Events.PublishEvent( "ClientEnrollment", publish_msg, token=self.token)
def AddClientLabel(self, client_id, owner, name): if data_store.RelationalDBReadEnabled(): if hasattr(client_id, "Basename"): client_id = client_id.Basename() data_store.REL_DB.AddClientLabels(client_id, owner, [name]) client_index.ClientIndex().AddClientLabels(client_id, [name]) else: with aff4.FACTORY.Open(client_id, mode="rw", token=self.token) as client_obj: client_obj.AddLabel(name, owner=owner) with client_index.CreateClientIndex(token=self.token) as index: index.AddClient(client_obj)
def ClientFixture(client_id, token=None, age=None): """Creates a client fixture with a predefined VFS tree.""" if hasattr(client_id, "Basename"): client_id = client_id.Basename() LegacyClientFixture(client_id, age=age, token=token) if not data_store.RelationalDBReadEnabled(): return data_migration.Migrate(thread_count=1) db_client_snapshot = data_store.REL_DB.ReadClientSnapshot(client_id) client_index.ClientIndex().AddClient(db_client_snapshot)
def ProcessKnowledgeBase(self, responses): """Collect and store any extra non-kb artifacts.""" if not responses.success: raise flow.FlowError( "Error while collecting the knowledge base: %s" % responses.status) kb = responses.First() # AFF4 client. client = self._OpenClient(mode="rw") client.Set(client.Schema.KNOWLEDGE_BASE, kb) # Copy usernames. usernames = [user.username for user in kb.users if user.username] client.AddAttribute(client.Schema.USERNAMES(" ".join(usernames))) self.CopyOSReleaseFromKnowledgeBase(kb, client) client.Flush() # objects.ClientSnapshot. # Information already present in the knowledge base takes precedence. if not kb.os: kb.os = self.state.system if not kb.fqdn: kb.fqdn = self.state.fqdn self.state.client.knowledge_base = kb artifact_list = [ "WMILogicalDisks", "RootDiskVolumeUsage", "WMIComputerSystemProduct", "LinuxHardwareInfo", "OSXSPHardwareDataType" ] self.CallFlow(collectors.ArtifactCollectorFlow.__name__, artifact_list=artifact_list, next_state="ProcessArtifactResponses") # Update the client index for the AFF4 client. client_index.CreateClientIndex(token=self.token).AddClient(client) if data_store.RelationalDBWriteEnabled(): try: # Update the client index for the objects.ClientSnapshot. client_index.ClientIndex().AddClient(self.state.client) except db.UnknownClientError: pass
def testAddLookupClients(self): index = client_index.ClientIndex() clients = self._SetupClients(2) for client_id, client in clients.items(): data_store.REL_DB.WriteClientMetadata(client_id, fleetspeak_enabled=False) index.AddClient(client) # Check unique identifiers. self.assertEqual(index.LookupClients(["192.168.0.1"]), ["C.1000000000000001"]) self.assertEqual(index.LookupClients(["2001:aBcd::1"]), ["C.1000000000000001"]) self.assertEqual(index.LookupClients(["ip:192.168.0.1"]), ["C.1000000000000001"]) self.assertEqual(index.LookupClients(["ip:2001:abcd::1"]), ["C.1000000000000001"]) self.assertEqual(index.LookupClients(["host-2"]), ["C.1000000000000002"]) self.assertEqual(index.LookupClients(["C.1000000000000002"]), ["C.1000000000000002"]) self.assertEqual(index.LookupClients(["aabbccddee01"]), ["C.1000000000000001"]) self.assertEqual(index.LookupClients(["mac:aabbccddee01"]), ["C.1000000000000001"]) self.assertEqual(index.LookupClients(["aa:bb:cc:dd:ee:01"]), ["C.1000000000000001"]) self.assertEqual(index.LookupClients(["mac:aa:bb:cc:dd:ee:01"]), ["C.1000000000000001"]) # IP prefixes of octets should work: self.assertItemsEqual(index.LookupClients(["192.168.0"]), list(clients)) # Hostname prefixes of tokens should work. self.assertEqual(index.LookupClients(["host-2.example"]), ["C.1000000000000002"]) # Intersections should work. self.assertEqual(index.LookupClients(["192.168.0", "Host-2"]), ["C.1000000000000002"]) # Universal keyword should find everything. self.assertItemsEqual(index.LookupClients(["."]), list(clients))
def testAddTimestamp(self): index = client_index.ClientIndex() clients = self._SetupClients(5) # 1413807132 = Mon, 20 Oct 2014 12:12:12 GMT with test_lib.FakeTime(1413807132): for client_id, client in clients.items(): data_store.REL_DB.WriteClientMetadata(client_id, fleetspeak_enabled=False) index.AddClient(client) self.assertEqual( len(index.LookupClients([".", "start_date:2014-10-20"])), 5) self.assertEqual( len(index.LookupClients([".", "start_date:2014-10-21"])), 0) # Ignore the keyword if the date is not readable. self.assertEqual(len(index.LookupClients([".", "start_date:XXX"])), 0)
def _CreateClients(self): # To test all search keywords, we can rely on SetupClients # creating clients with attributes containing a numberic # value, e.g. hostname will be Host-0, Host-1, etc. self.client_ids = self.SetupClients(15) self.AddClientLabel(self.client_ids[0], self.token.username, "common_test_label") self.AddClientLabel(self.client_ids[0], self.token.username, "unique_test_label") self.AddClientLabel(self.client_ids[1], self.token.username, "common_test_label") if data_store.RelationalDBReadEnabled(): snapshot = data_store.REL_DB.ReadClientSnapshot( self.client_ids[0].Basename()) snapshot.knowledge_base.users.Append( rdf_client.User(username="******")) snapshot.knowledge_base.users.Append( rdf_client.User(username=self.token.username)) data_store.REL_DB.WriteClientSnapshot(snapshot) client_index.ClientIndex().AddClient( data_store.REL_DB.ReadClientSnapshot( self.client_ids[0].Basename())) else: # SetupClients adds no labels or user names. with aff4.FACTORY.Open(self.client_ids[0], mode="rw", token=self.token) as client_obj: client_obj.AddLabel("common_test_label", owner=self.token.username) client_obj.AddLabel("unique_test_label", owner=self.token.username) # Add user in knowledge base. kb = client_obj.Get(client_obj.Schema.KNOWLEDGE_BASE) kb.users.Append(rdf_client.User(username="******")) kb.users.Append(rdf_client.User(username=self.token.username)) client_obj.Set(client_obj.Schema.KNOWLEDGE_BASE, kb) # Update index, since we added users and labels. with client_index.CreateClientIndex(token=self.token) as index: index.AddClient(client_obj)
def EnrolFleetspeakClient(self, client_id): """Enrols a Fleetspeak-enabled client for use with GRR.""" client_urn = rdf_client.ClientURN(client_id) # If already enrolled, return. if data_store.RelationalDBReadEnabled(): if data_store.REL_DB.ReadClientMetadata(client_id): return else: if aff4.FACTORY.ExistsWithType(client_urn, aff4_type=aff4_grr.VFSGRRClient, token=self.token): return logging.info("Enrolling a new Fleetspeak client: %r", client_id) if data_store.RelationalDBWriteEnabled(): data_store.REL_DB.WriteClientMetadata(client_id, fleetspeak_enabled=True) # TODO(fleetspeak-team,grr-team): If aff4 isn't reliable enough, we can # catch exceptions from it and forward them to Fleetspeak by failing its # gRPC call. Fleetspeak will then retry with a random, perhaps healthier, # instance of the GRR frontend. with aff4.FACTORY.Create(client_urn, aff4_type=aff4_grr.VFSGRRClient, mode="rw", token=self.token) as client: client.Set(client.Schema.FLEETSPEAK_ENABLED, rdfvalue.RDFBool(True)) index = client_index.CreateClientIndex(token=self.token) index.AddClient(client) if data_store.RelationalDBWriteEnabled(): index = client_index.ClientIndex() index.AddClient(data_migration.ConvertVFSGRRClient(client)) # Publish the client enrollment message. events.Events.PublishEvent("ClientEnrollment", client_urn, token=self.token)
def Handle(self, args, token=None): audit_description = ",".join([ token.username + u"." + utils.SmartUnicode(name) for name in args.labels ]) audit_events = [] try: index = client_index.CreateClientIndex(token=token) client_objs = aff4.FACTORY.MultiOpen( [cid.ToClientURN() for cid in args.client_ids], aff4_type=aff4_grr.VFSGRRClient, mode="rw", token=token) for client_obj in client_objs: if data_store.RelationalDBWriteEnabled(): cid = client_obj.urn.Basename() data_store.REL_DB.RemoveClientLabels( cid, token.username, args.labels) labels_to_remove = set(args.labels) existing_labels = data_store.REL_DB.ReadClientLabels(cid) for label in existing_labels: labels_to_remove.discard(label.name) if labels_to_remove: idx = client_index.ClientIndex() idx.RemoveClientLabels(cid, labels_to_remove) index.RemoveClientLabels(client_obj) self.RemoveClientLabels(client_obj, args.labels) index.AddClient(client_obj) client_obj.Close() audit_events.append( rdf_events.AuditEvent( user=token.username, action="CLIENT_REMOVE_LABEL", flow_name="handler.ApiRemoveClientsLabelsHandler", client=client_obj.urn, description=audit_description)) finally: events.Events.PublishMultipleEvents( {audit.AUDIT_EVENT: audit_events}, token=token)
def Handle(self, args, token=None): audit_description = ",".join([ token.username + u"." + utils.SmartUnicode(name) for name in args.labels ]) audit_events = [] try: index = client_index.CreateClientIndex(token=token) client_objs = aff4.FACTORY.MultiOpen( [cid.ToClientURN() for cid in args.client_ids], aff4_type=aff4_grr.VFSGRRClient, mode="rw", token=token) for client_obj in client_objs: if data_store.RelationalDBWriteEnabled(): cid = client_obj.urn.Basename() try: data_store.REL_DB.AddClientLabels( cid, token.username, args.labels) idx = client_index.ClientIndex() idx.AddClientLabels(cid, args.labels) except db.UnknownClientError: # TODO(amoser): Remove after data migration. pass client_obj.AddLabels(args.labels) index.AddClient(client_obj) client_obj.Close() audit_events.append( rdf_events.AuditEvent( user=token.username, action="CLIENT_ADD_LABEL", flow_name="handler.ApiAddClientsLabelsHandler", client=client_obj.urn, description=audit_description)) finally: events.Events.PublishMultipleEvents( {audit.AUDIT_EVENT: audit_events}, token=token)
def setUp(self): super(ApiLabelsRestrictedSearchClientsHandlerTestRelational, self).setUp() self.client_ids = sorted(self.SetupTestClientObjects(4)) data_store.REL_DB.AddClientLabels(self.client_ids[0], "david", ["foo"]) data_store.REL_DB.AddClientLabels(self.client_ids[1], "david", ["not-foo"]) data_store.REL_DB.AddClientLabels(self.client_ids[2], "peter_oth", ["bar"]) data_store.REL_DB.AddClientLabels(self.client_ids[3], "peter", ["bar"]) index = client_index.ClientIndex() index.AddClientLabels(self.client_ids[0], ["foo"]) index.AddClientLabels(self.client_ids[1], ["not-foo"]) index.AddClientLabels(self.client_ids[2], ["bar"]) index.AddClientLabels(self.client_ids[3], ["bar"]) self.handler = client_plugin.ApiLabelsRestrictedSearchClientsHandler( labels_whitelist=["foo", "bar"], labels_owners_whitelist=["david", "peter"])
def testAnalyzeClient(self): index = client_index.ClientIndex() client = rdf_objects.ClientSnapshot(client_id="C.0000000000000000") client.knowledge_base.os = "Windows" client.startup_info.client_info.client_name = "grr monitor" client.startup_info.client_info.labels = ["client-label-23"] kb = client.knowledge_base kb.users = [ rdf_client.User( username="******", full_name="Eric (Bertrand ) 'Russell' \"Logician\" Jacobson"), rdf_client.User(username="******", full_name="Steve O'Bryan") ] keywords = index.AnalyzeClient(client) # Should not contain an empty string. self.assertNotIn("", keywords) # OS of the client self.assertIn("windows", keywords) # Users of the client. self.assertIn("bert", keywords) self.assertIn("bertrand", keywords) self.assertNotIn(")", keywords) self.assertIn("russell", keywords) self.assertIn("logician", keywords) self.assertIn("ernie", keywords) self.assertIn("eric", keywords) self.assertIn("jacobson", keywords) self.assertIn("steve o'bryan", keywords) self.assertIn("o'bryan", keywords) # Client information. self.assertIn("grr monitor", keywords) self.assertIn("client-label-23", keywords)
def testRemoveLabels(self): client_id = self._SetupClients(1).keys()[0] data_store.REL_DB.WriteClientMetadata(client_id, fleetspeak_enabled=False) data_store.REL_DB.AddClientLabels(client_id, "owner", ["testlabel_1", "testlabel_2"]) index = client_index.ClientIndex() index.AddClientLabels(client_id, ["testlabel_1", "testlabel_2"]) self.assertEqual(index.LookupClients(["testlabel_1"]), [client_id]) self.assertEqual(index.LookupClients(["testlabel_2"]), [client_id]) # Now delete one label. index.RemoveClientLabels(client_id, ["testlabel_1"]) self.assertEqual(index.LookupClients(["testlabel_1"]), []) self.assertEqual(index.LookupClients(["testlabel_2"]), [client_id]) # Remove them all. index.RemoveAllClientLabels(client_id) self.assertEqual(index.LookupClients(["testlabel_1"]), []) self.assertEqual(index.LookupClients(["testlabel_2"]), [])
def _Setup100Clients(self): self.client_ids = sorted(self.SetupTestClientObjects(100)) index = client_index.ClientIndex() for client_id in self.client_ids: data_store.REL_DB.AddClientLabels(client_id, "david", ["foo"]) index.AddClientLabels(client_id, ["foo"])
def Handle(self, args, token=None): if args.count: end = args.offset + args.count # Read <count> clients ahead in case some of them fail to open / verify. batch_size = end + args.count else: end = sys.maxsize batch_size = end keywords = shlex.split(args.query) api_clients = [] if data_store.RelationalDBReadEnabled(): index = client_index.ClientIndex() # TODO(amoser): We could move the label verification into the # database making this method more efficient. Label restrictions # should be on small subsets though so this might not be worth # it. all_client_ids = set() for label in self.labels_whitelist: label_filter = ["label:" + label] + keywords all_client_ids.update(index.LookupClients(label_filter)) index = 0 for cid_batch in utils.Grouper(sorted(all_client_ids), batch_size): client_infos = data_store.REL_DB.MultiReadClientFullInfo( cid_batch) for _, client_info in sorted(client_infos.items()): if not self._VerifyLabels(client_info.labels): continue if index >= args.offset and index < end: api_clients.append( ApiClient().InitFromClientInfo(client_info)) index += 1 if index >= end: UpdateClientsFromFleetspeak(api_clients) return ApiSearchClientsResult(items=api_clients) else: index = client_index.CreateClientIndex(token=token) all_urns = set() for label in self.labels_whitelist: label_filter = ["label:" + label] + keywords all_urns.update(index.LookupClients(label_filter)) all_objs = aff4.FACTORY.MultiOpen(all_urns, aff4_type=aff4_grr.VFSGRRClient, token=token) index = 0 for client_obj in sorted(all_objs): if not self._CheckClientLabels(client_obj): continue if index >= args.offset and index < end: api_clients.append( ApiClient().InitFromAff4Object(client_obj)) index += 1 if index >= end: break UpdateClientsFromFleetspeak(api_clients) return ApiSearchClientsResult(items=api_clients)
def Platform(self, responses): """Stores information about the platform.""" if responses.success: response = responses.First() # AFF4 client. # These need to be in separate attributes because they get searched on in # the GUI with self._OpenClient(mode="rw") as client: # For backwards compatibility. client.Set(client.Schema.HOSTNAME(response.fqdn)) client.Set(client.Schema.SYSTEM(response.system)) client.Set(client.Schema.OS_RELEASE(response.release)) client.Set(client.Schema.OS_VERSION(response.version)) client.Set(client.Schema.KERNEL(response.kernel)) client.Set(client.Schema.FQDN(response.fqdn)) # response.machine is the machine value of platform.uname() # On Windows this is the value of: # HKEY_LOCAL_MACHINE\System\CurrentControlSet\Control\Session # Manager\Environment\PROCESSOR_ARCHITECTURE # "AMD64", "IA64" or "x86" client.Set(client.Schema.ARCH(response.machine)) client.Set( client.Schema.UNAME("%s-%s-%s" % (response.system, response.release, response.version))) # Update the client index client_index.CreateClientIndex(token=self.token).AddClient(client) # rdf_objects.ClientSnapshot. client = self.state.client client.os_release = response.release client.os_version = response.version client.kernel = response.kernel client.arch = response.machine # Store these for later, there might be more accurate data # coming in from the artifact collector. self.state.fqdn = response.fqdn self.state.os = response.system if data_store.RelationalDBWriteEnabled(): try: # Update the client index client_index.ClientIndex().AddClient(client) except db.UnknownClientError: pass if response.system == "Windows": with aff4.FACTORY.Create( self.client_id.Add("registry"), standard.VFSDirectory, token=self.token) as fd: fd.Set( fd.Schema.PATHSPEC, fd.Schema.PATHSPEC( path="/", pathtype=rdf_paths.PathSpec.PathType.REGISTRY)) # No support for OS X cloud machines as yet. if response.system in ["Linux", "Windows"]: self.CallClient( server_stubs.GetCloudVMMetadata, cloud.BuildCloudMetadataRequests(), next_state="CloudMetadata") known_system_type = True else: # We failed to get the Platform info, maybe there is a stored # system we can use to get at least some data. if data_store.RelationalDBReadEnabled(): client = data_store.REL_DB.ReadClientSnapshot(self.client_id.Basename()) known_system_type = client and client.knowledge_base.os else: client = self._OpenClient() known_system_type = client.Get(client.Schema.SYSTEM) self.Log("Could not retrieve Platform info.") if known_system_type: # We will accept a partial KBInit rather than raise, so pass # require_complete=False. self.CallFlow( artifact.KnowledgeBaseInitializationFlow.__name__, require_complete=False, lightweight=self.args.lightweight, next_state="ProcessKnowledgeBase") else: self.Log("Unknown system type, skipping KnowledgeBaseInitializationFlow")