def testDoesNotRemoveSystemLabelFromSingleClient(self): if data_store.AFF4Enabled(): with aff4.FACTORY.Open(self.client_ids[0], mode="rw", token=self.token) as grr_client: grr_client.AddLabel(u"foo", owner=u"GRR") if data_store.RelationalDBEnabled(): data_store.REL_DB.WriteClientMetadata( self.client_ids[0].Basename(), fleetspeak_enabled=False) data_store.REL_DB.AddClientLabels(self.client_ids[0].Basename(), u"GRR", [u"foo"]) idx = client_index.ClientIndex() idx.AddClientLabels(self.client_ids[0].Basename(), [u"foo"]) self.handler.Handle(client_plugin.ApiRemoveClientsLabelsArgs( client_ids=[self.client_ids[0]], labels=[u"foo"]), token=self.token) if data_store.AFF4Enabled(): # AFF4 labels. labels = aff4.FACTORY.Open(self.client_ids[0], token=self.token).GetLabels() self.assertLen(labels, 1) if data_store.RelationalDBEnabled(): # Relational labels. labels = data_store.REL_DB.ReadClientLabels( self.client_ids[0].Basename()) self.assertLen(labels, 1) # The label is still in the index. self.assertEqual(idx.LookupClients(["label:foo"]), [self.client_ids[0].Basename()])
def Run(self): user_urn = aff4.ROOT_URN.Add("users").Add(self.token.username) if data_store.AFF4Enabled(): with test_lib.FakeTime(42): with aff4.FACTORY.Create( user_urn, aff4_type=aff4_users.GRRUser, mode="w", token=self.token) as user_fd: user_fd.Set(user_fd.Schema.GUI_SETTINGS, aff4_users.GUISettings(mode="ADVANCED", canary_mode=True)) # Setup relational DB. data_store.REL_DB.WriteGRRUser( username=self.token.username, ui_mode="ADVANCED", canary_mode=True) self.Check("GetGrrUser") # Make user an admin and do yet another request. if data_store.AFF4Enabled(): with aff4.FACTORY.Open(user_urn, mode="rw", token=self.token) as user_fd: user_fd.SetLabel("admin", owner="GRR") data_store.REL_DB.WriteGRRUser( username=self.token.username, user_type=rdf_objects.GRRUser.UserType.USER_TYPE_ADMIN) self.Check("GetGrrUser")
def testRemovesUserLabelFromSingleClient(self): if data_store.AFF4Enabled(): with aff4.FACTORY.Open(self.client_ids[0], mode="rw", token=self.token) as grr_client: grr_client.AddLabels([u"foo", u"bar"]) if data_store.RelationalDBEnabled(): data_store.REL_DB.WriteClientMetadata( self.client_ids[0].Basename(), fleetspeak_enabled=False) data_store.REL_DB.AddClientLabels(self.client_ids[0].Basename(), self.token.username, [u"foo", u"bar"]) self.handler.Handle(client_plugin.ApiRemoveClientsLabelsArgs( client_ids=[self.client_ids[0]], labels=[u"foo"]), token=self.token) if data_store.AFF4Enabled(): # AFF4 labels. labels = aff4.FACTORY.Open(self.client_ids[0], token=self.token).GetLabels() self.assertLen(labels, 1) self.assertEqual(labels[0].name, u"bar") self.assertEqual(labels[0].owner, self.token.username) if data_store.RelationalDBEnabled(): # Relational labels. labels = data_store.REL_DB.ReadClientLabels( self.client_ids[0].Basename()) self.assertLen(labels, 1) self.assertEqual(labels[0].name, u"bar") self.assertEqual(labels[0].owner, self.token.username)
def ProcessKnowledgeBase(self, responses): """Collect and store any extra non-kb artifacts.""" if not responses.success: raise flow.FlowError( "Error while collecting the knowledge base: %s" % responses.status) kb = responses.First() if data_store.AFF4Enabled(): # AFF4 client. client = self._OpenClient(mode="rw") with client: client.Set(client.Schema.KNOWLEDGE_BASE, kb) # Copy usernames. usernames = [ user.username for user in kb.users if user.username ] client.AddAttribute( client.Schema.USERNAMES(" ".join(usernames))) self.CopyOSReleaseFromKnowledgeBase(kb, client) # rdf_objects.ClientSnapshot. # Information already present in the knowledge base takes precedence. if not kb.os: kb.os = self.state.os if not kb.fqdn: kb.fqdn = self.state.fqdn self.state.client.knowledge_base = kb if data_store.RelationalDBReadEnabled(): existing_client = data_store.REL_DB.ReadClientSnapshot( self.client_id) if existing_client is None: # This is the first time we interrogate this client. In that case, we # need to store basic information about this client right away so follow # up flows work properly. data_store.REL_DB.WriteClientSnapshot(self.state.client) self.CallFlow(collectors.ArtifactCollectorFlow.__name__, artifact_list=config. CONFIG["Artifacts.non_kb_interrogate_artifacts"], knowledge_base=kb, next_state="ProcessArtifactResponses") if data_store.AFF4Enabled(): # Update the client index for the AFF4 client. client_index.CreateClientIndex(token=self.token).AddClient(client) if data_store.RelationalDBWriteEnabled(): try: # Update the client index for the rdf_objects.ClientSnapshot. client_index.ClientIndex().AddClient(self.state.client) except db.UnknownClientError: pass
def List(self, responses): """Collect the directory listing and store in the datastore.""" if not responses.success: raise flow.FlowError(str(responses.status)) self.Log("Listed %s", self.state.urn) with data_store.DB.GetMutationPool() as pool: if data_store.AFF4Enabled(): with aff4.FACTORY.Create( self.state.urn, standard.VFSDirectory, mode="w", mutation_pool=pool, token=self.token) as fd: fd.Set(fd.Schema.PATHSPEC(self.state.stat.pathspec)) fd.Set(fd.Schema.STAT(self.state.stat)) if data_store.RelationalDBWriteEnabled(): path_info = rdf_objects.PathInfo.FromStatEntry(self.state.stat) data_store.REL_DB.WritePathInfos(self.client_id, [path_info]) stat_entries = list(map(rdf_client_fs.StatEntry, responses)) WriteStatEntries( stat_entries, client_id=self.client_id, mutation_pool=pool, token=self.token) for stat_entry in stat_entries: self.SendReply(stat_entry) # Send Stats to parent flows.
def testClientFileFinderUploadBound(self): paths = [os.path.join(self.base_path, "{**,.}/*.plist")] action = rdf_file_finder.FileFinderAction.Download( oversized_file_policy="DOWNLOAD_TRUNCATED", max_size=300) session_id = self._RunClientFileFinder(paths, action) results = flow_test_lib.GetFlowResults(self.client_id, session_id) self.assertLen(results, 5) relpaths = [ os.path.relpath(p.stat_entry.pathspec.path, self.base_path) for p in results ] self.assertCountEqual(relpaths, [ "History.plist", "History.xml.plist", "test.plist", "parser_test/com.google.code.grr.plist", "parser_test/InstallHistory.plist" ]) if data_store.AFF4Enabled(): for r in results: aff4_obj = aff4.FACTORY.Open( r.stat_entry.pathspec.AFF4Path(self.client_id), token=self.token) data = aff4_obj.read() self.assertLessEqual(len(data), 300) self.assertEqual(data, open(r.stat_entry.pathspec.path, "rb").read(len(data)))
def setUp(self): super(SystemCronTestMixin, self).setUp() one_hour_ping = rdfvalue.RDFDatetime.Now() - rdfvalue.Duration("1h") eight_day_ping = rdfvalue.RDFDatetime.Now() - rdfvalue.Duration("8d") ancient_ping = rdfvalue.RDFDatetime.Now() - rdfvalue.Duration("61d") self.SetupClientsWithIndices( range(0, 10), system="Windows", ping=eight_day_ping) self.SetupClientsWithIndices( range(10, 20), system="Linux", ping=eight_day_ping) self.SetupClientsWithIndices( range(20, 22), system="Darwin", fleetspeak_enabled=True, ping=one_hour_ping) # These clients shouldn't be analyzed by any of the stats cronjobs. self.SetupClientsWithIndices( range(22, 24), system="Linux", ping=ancient_ping) for i in range(0, 10): client_id = u"C.1%015x" % i if data_store.AFF4Enabled(): with aff4.FACTORY.Open( client_id, mode="rw", token=self.token) as client: client.AddLabels([u"Label1", u"Label2"], owner=u"GRR") client.AddLabel(u"UserLabel", owner=u"jim") if data_store.RelationalDBEnabled(): data_store.REL_DB.AddClientLabels(client_id, u"GRR", [u"Label1", u"Label2"]) data_store.REL_DB.AddClientLabels(client_id, u"jim", [u"UserLabel"])
def testGlobDirectory(self): """Test that glob expands directories.""" users = [ rdf_client.User(username="******", appdata="test_data/index.dat"), rdf_client.User(username="******", appdata="test_data/History"), rdf_client.User(username="******", appdata="%%PATH%%"), ] self.client_id = self.SetupClient(0, users=users) client_mock = action_mocks.GlobClientMock() # This glob selects all files which start with the username on this system. path = os.path.join(os.path.dirname(self.base_path), "%%users.appdata%%") # Run the flow. flow_test_lib.TestFlowHelper(compatibility.GetName(filesystem.Glob), client_mock, client_id=self.client_id, paths=[path], token=self.token) if data_store.AFF4Enabled(): path = self.client_id.Add("fs/os").Add( self.base_path).Add("index.dat") aff4.FACTORY.Open(path, aff4_type=aff4_grr.VFSFile, token=self.token) else: children = self._ListTestChildPathInfos( [], path_type=rdf_objects.PathInfo.PathType.OS) self.assertLen(children, 1) self.assertEqual(children[0].components[-1], "index.dat")
def testUnicodeListDirectory(self): """Test that the ListDirectory flow works on unicode directories.""" client_mock = action_mocks.ListDirectoryClientMock() pb = rdf_paths.PathSpec(path=os.path.join(self.base_path, "test_img.dd"), pathtype=rdf_paths.PathSpec.PathType.OS) filename = "入乡随俗 海外春节别样过法" pb.Append(path=filename, pathtype=rdf_paths.PathSpec.PathType.TSK) flow_test_lib.TestFlowHelper(compatibility.GetName( filesystem.ListDirectory), client_mock, client_id=self.client_id, pathspec=pb, token=self.token) # Check the output file is created output_path = self.client_id.Add("fs/tsk").Add(pb.CollapsePath()) if data_store.AFF4Enabled(): fd = aff4.FACTORY.Open(output_path, token=self.token) children = list(fd.OpenChildren()) self.assertLen(children, 1) child = children[0] filename = child.urn.Basename() else: components = ["test_img.dd", filename] children = self._ListTestChildPathInfos(components) self.assertLen(children, 1) filename = children[0].components[-1] self.assertEqual(filename, "入乡随俗.txt")
def StoreMBR(self, responses): """This method stores the MBR.""" if not responses.success: msg = "Could not retrieve MBR: %s" % responses.status self.Log(msg) raise flow.FlowError(msg) response = responses.First() self.state.buffers.append(response.data) self.state.bytes_downloaded += len(response.data) if self.state.bytes_downloaded >= self.args.length: mbr_data = b"".join(self.state.buffers) self.state.buffers = None if data_store.AFF4Enabled(): with aff4.FACTORY.Create( self.client_urn.Add("mbr"), aff4_grr.VFSFile, mode="w", token=self.token) as mbr: mbr.write(mbr_data) self.Log("Successfully collected the MBR (%d bytes)." % len(mbr_data)) self.SendReply(rdfvalue.RDFBytes(mbr_data))
def setUp(self): super(ApprovalByLabelE2ETest, self).setUp() if data_store.AFF4Enabled(): self.SetUpLegacy() if data_store.RelationalDBReadEnabled(): self.SetUpRelationalDB() cls = (api_call_router_with_approval_checks. ApiCallRouterWithApprovalChecks) cls.ClearCache() approver = test_lib.ConfigOverrider({ "API.DefaultRouter": cls.__name__, "ACL.approvers_config_file": os.path.join(self.base_path, "approvers.yaml") }) approver.Start() self.addCleanup(approver.Stop) # Get a fresh approval manager object and reload with test approvers. approval_manager_stubber = utils.Stubber( client_approval_auth, "CLIENT_APPROVAL_AUTH_MGR", client_approval_auth.ClientApprovalAuthorizationManager()) approval_manager_stubber.Start() self.addCleanup(approval_manager_stubber.Stop) # Force creation of new APIAuthorizationManager, so that configuration # changes are picked up. api_auth_manager.APIACLInit.InitApiAuthManager()
def WriteAllCrashDetails(client_id, crash_details, flow_session_id=None, hunt_session_id=None, token=None): """Updates the last crash attribute of the client.""" # AFF4. if data_store.AFF4Enabled(): with aff4.FACTORY.Create(client_id, aff4_grr.VFSGRRClient, token=token) as client_obj: client_obj.Set(client_obj.Schema.LAST_CRASH(crash_details)) # Duplicate the crash information in a number of places so we can find it # easily. client_urn = rdf_client.ClientURN(client_id) client_crashes = aff4_grr.VFSGRRClient.CrashCollectionURNForCID( client_urn) with data_store.DB.GetMutationPool() as pool: grr_collections.CrashCollection.StaticAdd(client_crashes, crash_details, mutation_pool=pool) # Relational db. if data_store.RelationalDBWriteEnabled(): try: data_store.REL_DB.WriteClientCrashInfo(client_id, crash_details) except db.UnknownClientError: pass if not flow_session_id: return if data_store.RelationalDBFlowsEnabled(): flow_id = flow_session_id.Basename() data_store.REL_DB.UpdateFlow(client_id, flow_id, client_crash_info=crash_details) flow_obj = data_store.REL_DB.ReadFlowObject(client_id, flow_id) if flow_obj.parent_hunt_id: db_compat.ProcessHuntClientCrash(flow_obj, client_crash_info=crash_details) # TODO(amoser): Registering crashes in hunts is currently not implemented for # the relational db. if not data_store.RelationalDBFlowsEnabled(): with aff4.FACTORY.Open(flow_session_id, flow.GRRFlow, mode="rw", age=aff4.NEWEST_TIME, token=token) as aff4_flow: aff4_flow.Set(aff4_flow.Schema.CLIENT_CRASH(crash_details)) hunt_session_id = ExtractHuntId(flow_session_id) if hunt_session_id and hunt_session_id != flow_session_id: hunt_obj = aff4.FACTORY.Open(hunt_session_id, aff4_type=implementation.GRRHunt, mode="rw", token=token) hunt_obj.RegisterCrash(crash_details)
def InstallDate(self, responses): """Stores the time when the OS was installed on the client.""" if not responses.success: self.Log("Could not get InstallDate") return response = responses.First() # When using relational flows, the response is serialized as an any value # and we get an equivalent RDFInteger here so we need to check for both. if isinstance(response, (rdfvalue.RDFDatetime, rdfvalue.RDFInteger)): # New clients send the correct values already. install_date = response elif isinstance(response, rdf_protodict.DataBlob): # For backwards compatibility. install_date = rdfvalue.RDFDatetime.FromSecondsSinceEpoch( response.integer) else: self.Log("Unknown response type for InstallDate: %s" % type(response)) return if data_store.AFF4Enabled(): # AFF4 client. with self._CreateClient() as client: client.Set(client.Schema.INSTALL_DATE(install_date)) # rdf_objects.ClientSnapshot. self.state.client.install_time = install_date
def Run(self): with test_lib.FakeTime(42): self.CreateAdminUser(u"requestor") client_id = self.SetupClient(0) if data_store.AFF4Enabled(): # Delete the certificate as it's being regenerated every time the # client is created. with aff4.FACTORY.Open( client_id, mode="rw", token=self.token) as grr_client: grr_client.DeleteAttribute(grr_client.Schema.CERT) with test_lib.FakeTime(44): approval_id = self.RequestClientApproval( client_id.Basename(), reason="foo", approver=self.token.username, requestor=u"requestor") with test_lib.FakeTime(126): self.Check( "GrantClientApproval", args=user_plugin.ApiGrantClientApprovalArgs( client_id=client_id.Basename(), approval_id=approval_id, username=u"requestor"), replace={approval_id: "approval:111111"})
def Run(self): with test_lib.FakeTime(42): self.CreateUser(u"approver") client_id = self.SetupClient(0) if data_store.AFF4Enabled(): # Delete the certificate as it's being regenerated every time the # client is created. with aff4.FACTORY.Open( client_id, mode="rw", token=self.token) as grr_client: grr_client.DeleteAttribute(grr_client.Schema.CERT) def ReplaceApprovalId(): approvals = self.ListClientApprovals() return {approvals[0].id: "approval:112233"} with test_lib.FakeTime(126): self.Check( "CreateClientApproval", args=user_plugin.ApiCreateClientApprovalArgs( client_id=client_id.Basename(), approval=user_plugin.ApiClientApproval( reason="really important reason!", notified_users=[u"approver1", u"approver2"], email_cc_addresses=["*****@*****.**"])), replace=ReplaceApprovalId)
def ClientInfo(self, responses): """Obtain some information about the GRR client running.""" if not responses.success: self.Log("Could not get ClientInfo.") return response = responses.First() if fleetspeak_utils.IsFleetspeakEnabledClient( self.client_id, token=self.token): label = fleetspeak_utils.GetLabelFromFleetspeak(self.client_id) # A FS enabled GRR shouldn't provide a label, but if it does prefer # it to an unrecognized FS label. # # TODO(user): Remove condition once we are confident in FS labeling. if label != fleetspeak_connector.unknown_label or not response.labels: response.labels = [label] if data_store.AFF4Enabled(): # AFF4 client. with self._OpenClient(mode="rw") as client: client.Set(client.Schema.CLIENT_INFO(response)) client.AddLabels(response.labels, owner="GRR") # rdf_objects.ClientSnapshot. self.state.client.startup_info.client_info = response
def Start(self): """Start off all the tests.""" self.state.client = rdf_objects.ClientSnapshot(client_id=self.client_id) self.state.fqdn = None self.state.os = None if data_store.AFF4Enabled(): # Make sure we always have a VFSDirectory with a pathspec at fs/os pathspec = rdf_paths.PathSpec( path="/", pathtype=rdf_paths.PathSpec.PathType.OS) urn = pathspec.AFF4Path(self.client_urn) with aff4.FACTORY.Create( urn, standard.VFSDirectory, mode="w", token=self.token) as fd: fd.Set(fd.Schema.PATHSPEC, pathspec) self.CallClient(server_stubs.GetPlatformInfo, next_state="Platform") self.CallClient(server_stubs.GetMemorySize, next_state="StoreMemorySize") self.CallClient(server_stubs.GetInstallDate, next_state="InstallDate") self.CallClient(server_stubs.GetClientInfo, next_state="ClientInfo") self.CallClient( server_stubs.GetConfiguration, next_state="ClientConfiguration") self.CallClient( server_stubs.GetLibraryVersions, next_state="ClientLibraries") self.CallClient( server_stubs.EnumerateInterfaces, next_state="EnumerateInterfaces") self.CallClient( server_stubs.EnumerateFilesystems, next_state="EnumerateFilesystems")
def _GetArtifact(self, artifact_name): client_mock = action_mocks.FileFinderClientMock() client_id = self.SetupClient(0, system="Linux") file_path = os.path.join(self.base_path, "hello.exe") artifact_list = [artifact_name] flow_test_lib.TestFlowHelper( aff4_flows.ArtifactCollectorFlow.__name__, client_mock, artifact_list=artifact_list, use_tsk=False, token=self.token, client_id=client_id) fd2 = open(file_path, "rb") fd2.seek(0, 2) expected_size = fd2.tell() if data_store.AFF4Enabled(): # Test the AFF4 file that was created. fd1 = aff4.FACTORY.Open( "%s/fs/os/%s" % (client_id, file_path), token=self.token) size = fd1.Get(fd1.Schema.SIZE) self.assertEqual(size, expected_size) else: components = file_path.strip("/").split("/") fd = file_store.OpenFile( db.ClientPath( client_id.Basename(), rdf_objects.PathInfo.PathType.OS, components=tuple(components))) fd.Seek(0, 2) size = fd.Tell() self.assertEqual(size, expected_size)
def testListingRegistryDirectoryDoesNotYieldMtimes(self): with vfs_test_lib.RegistryVFSStubber(): client_id = self.SetupClient(0) pb = rdf_paths.PathSpec( path="/HKEY_LOCAL_MACHINE/SOFTWARE/ListingTest", pathtype=rdf_paths.PathSpec.PathType.REGISTRY) client_mock = action_mocks.ListDirectoryClientMock() flow_test_lib.TestFlowHelper(compatibility.GetName( filesystem.ListDirectory), client_mock, client_id=client_id, pathspec=pb, token=self.token) if data_store.AFF4Enabled(): output_path = client_id.Add("registry").Add(pb.first.path) results = list( aff4.FACTORY.Open(output_path, token=self.token).OpenChildren()) self.assertLen(results, 2) for result in results: st = result.Get(result.Schema.STAT) self.assertIsNone(st.st_mtime) else: children = data_store.REL_DB.ListChildPathInfos( self.client_id.Basename(), rdf_objects.PathInfo.PathType.REGISTRY, ["HKEY_LOCAL_MACHINE", "SOFTWARE", "ListingTest"]) self.assertLen(children, 2) for child in children: self.assertIsNone(child.stat_entry.st_mtime)
def InitializeKnowledgeBase(self): """Get the existing KB or create a new one if none exists.""" if data_store.AFF4Enabled(): self.client = aff4.FACTORY.Open(self.client_id, token=self.token) # Always create a new KB to override any old values. self.state.knowledge_base = rdf_client.KnowledgeBase() SetCoreGRRKnowledgeBaseValues(self.state.knowledge_base, self.client) if not self.state.knowledge_base.os: # If we don't know what OS this is, there is no way to proceed. raise flow.FlowError("Client OS not set for: %s, cannot initialize" " KnowledgeBase" % self.client_id) else: # Always create a new KB to override any old values but keep os and # version so we know which artifacts we can run. self.state.knowledge_base = rdf_client.KnowledgeBase() snapshot = data_store.REL_DB.ReadClientSnapshot(self.client_id) if not snapshot or not snapshot.knowledge_base: return kb = snapshot.knowledge_base state_kb = self.state.knowledge_base state_kb.os = kb.os state_kb.os_major_version = kb.os_major_version state_kb.os_minor_version = kb.os_minor_version if not state_kb.os_major_version and snapshot.os_version: version = snapshot.os_version.split(".") try: state_kb.os_major_version = int(version[0]) if len(version) >= 1: state_kb.os_minor_version = int(version[1]) except ValueError: pass
def testGlobWildcardsAndTSK(self): client_mock = action_mocks.GlobClientMock() # This glob should find this file in test data: glob_test/a/b/foo. path = os.path.join(self.base_path, "test_IMG.dd", "glob_test", "a", "b", "FOO*") flow_test_lib.TestFlowHelper(compatibility.GetName(filesystem.Glob), client_mock, client_id=self.client_id, paths=[path], pathtype=rdf_paths.PathSpec.PathType.OS, token=self.token) if data_store.AFF4Enabled(): output_path = self.client_id.Add("fs/tsk").Add( os.path.join(self.base_path, "test_img.dd", "glob_test", "a", "b")) fd = aff4.FACTORY.Open(output_path, token=self.token) children = list(fd.ListChildren()) self.assertLen(children, 1) self.assertEqual(children[0].Basename(), "foo") else: children = self._ListTestChildPathInfos( ["test_img.dd", "glob_test", "a", "b"]) self.assertLen(children, 1) self.assertEqual(children[0].components[-1], "foo")
def Run(self): # Fix the time to avoid regressions. with test_lib.FakeTime(42): client_id = self.SetupClient(0).Basename() if data_store.AFF4Enabled(): # Delete the certificates as it's being regenerated every time the # client is created. with aff4.FACTORY.Open(client_id, mode="rw", token=self.token) as client_obj: client_obj.DeleteAttribute(client_obj.Schema.CERT) flow_id = api_regression_test_lib.StartFlow(client_id, discovery.Interrogate, token=self.token) replace = api_regression_test_lib.GetFlowTestReplaceDict( client_id, flow_id, "F:ABCDEF12") self.Check("GetFlow", args=flow_plugin.ApiGetFlowArgs(client_id=client_id, flow_id=flow_id), replace=replace) self._TerminateFlow(client_id, flow_id) replace = api_regression_test_lib.GetFlowTestReplaceDict( client_id, flow_id, "F:ABCDEF13") # Fetch the same flow which is now should be marked as pending # termination. self.Check("GetFlow", args=flow_plugin.ApiGetFlowArgs(client_id=client_id, flow_id=flow_id), replace=replace)
def testGlobGrouping(self): """Tests the glob grouping functionality.""" pattern = "test_data/{ntfs_img.dd,*log,*.exe}" client_mock = action_mocks.GlobClientMock() path = os.path.join(os.path.dirname(self.base_path), pattern) # Run the flow. flow_test_lib.TestFlowHelper(compatibility.GetName(filesystem.Glob), client_mock, client_id=self.client_id, paths=[path], token=self.token) if data_store.AFF4Enabled(): path = self.client_id.Add("fs/os").Add(self.base_path) files_found = [ urn.Basename() for urn in aff4.FACTORY.ListChildren(path) ] else: children = self._ListTestChildPathInfos( [], path_type=rdf_objects.PathInfo.PathType.OS) files_found = [child.components[-1] for child in children] self.assertCountEqual(files_found, [ "ntfs_img.dd", "apache_false_log", "apache_log", "syslog", "hello.exe", ])
def EnrolFleetspeakClient(self, client_id): """Enrols a Fleetspeak-enabled client for use with GRR. Args: client_id: GRR client-id for the client. Returns: True if the client is new, and actually got enrolled. This method is a no-op if the client already exists (in which case False is returned). """ client_urn = rdf_client.ClientURN(client_id) # If already enrolled, return. if data_store.RelationalDBEnabled(): try: data_store.REL_DB.ReadClientMetadata(client_id) return False except db.UnknownClientError: pass else: if aff4.FACTORY.ExistsWithType(client_urn, aff4_type=aff4_grr.VFSGRRClient, token=self.token): return False logging.info("Enrolling a new Fleetspeak client: %r", client_id) if data_store.RelationalDBEnabled(): now = rdfvalue.RDFDatetime.Now() data_store.REL_DB.WriteClientMetadata(client_id, first_seen=now, fleetspeak_enabled=True, last_ping=now) if data_store.AFF4Enabled(): # TODO(fleetspeak-team,grr-team): If aff4 isn't reliable enough, we can # catch exceptions from it and forward them to Fleetspeak by failing its # gRPC call. Fleetspeak will then retry with a random, perhaps healthier, # instance of the GRR frontend. with aff4.FACTORY.Create(client_urn, aff4_type=aff4_grr.VFSGRRClient, mode="rw", token=self.token) as client: client.Set(client.Schema.FLEETSPEAK_ENABLED, rdfvalue.RDFBool(True)) index = client_index.CreateClientIndex(token=self.token) index.AddClient(client) if data_store.RelationalDBEnabled(): client_obj = rdf_objects.ClientSnapshot( client_id=client_urn.Basename()) index = client_index.ClientIndex() index.AddClient(client_obj) # Publish the client enrollment message. events.Events.PublishEvent("ClientEnrollment", client_urn, token=self.token) return True
def CreateDirectory(client_path, token=None): """Creates a directory in datastore-agnostic way. Args: client_path: A `ClientPath` instance specifying location of the file. token: A GRR token for accessing the data store. """ precondition.AssertType(client_path, db.ClientPath) stat_entry = rdf_client_fs.StatEntry(pathspec=rdf_paths.PathSpec( pathtype=client_path.path_type, path="/".join(client_path.components)), st_mode=16895) if data_store.RelationalDBEnabled(): path_info = rdf_objects.PathInfo() path_info.path_type = client_path.path_type path_info.components = client_path.components path_info.stat_entry = stat_entry data_store.REL_DB.WritePathInfos(client_path.client_id, [path_info]) if data_store.AFF4Enabled(): urn = aff4.ROOT_URN.Add(client_path.client_id).Add( client_path.vfs_path) with aff4.FACTORY.Create(urn, aff4_standard.VFSDirectory, token=token) as filedesc: filedesc.Set(filedesc.Schema.STAT, stat_entry) filedesc.Set(filedesc.Schema.PATHSPEC, stat_entry.pathspec)
def InstallDate(self, responses): if not responses.success: self.Log("Could not get InstallDate") return response = responses.First() if isinstance(response, rdfvalue.RDFDatetime): # New clients send the correct values already. install_date = response elif isinstance(response, rdf_protodict.DataBlob): # For backwards compatibility. install_date = rdfvalue.RDFDatetime.FromSecondsSinceEpoch( response.integer) else: self.Log("Unknown response type for InstallDate: %s" % type(response)) return if data_store.AFF4Enabled(): # AFF4 client. with self._CreateClient() as client: client.Set(client.Schema.INSTALL_DATE(install_date)) # rdf_objects.ClientSnapshot. self.state.client.install_time = install_date
def testClientFileFinderUploadSkip(self): paths = [os.path.join(self.base_path, "{**,.}/*.plist")] action = rdf_file_finder.FileFinderAction.Download( oversized_file_policy="SKIP", max_size=300) session_id = self._RunClientFileFinder(paths, action) results = flow_test_lib.GetFlowResults(self.client_id, session_id) skipped = [] uploaded = [] for result in results: if result.HasField("transferred_file"): uploaded.append(result) else: skipped.append(result) self.assertLen(uploaded, 2) self.assertLen(skipped, 3) relpaths = [ os.path.relpath(p.stat_entry.pathspec.path, self.base_path) for p in uploaded ] self.assertCountEqual(relpaths, ["History.plist", "test.plist"]) if data_store.AFF4Enabled(): for r in uploaded: aff4_obj = aff4.FACTORY.Open( r.stat_entry.pathspec.AFF4Path(self.client_id), token=self.token) self.assertEqual( aff4_obj.Read(100), open(r.stat_entry.pathspec.path, "rb").read(100))
def ProcessArtifactResponses(self, responses): if not responses.success: self.Log("Error collecting artifacts: %s", responses.status) if not list(responses): return if data_store.AFF4Enabled(): with self._OpenClient(mode="rw") as client: new_volumes = [] for response in responses: if isinstance(response, rdf_client_fs.Volume): # AFF4 client. new_volumes.append(response) elif isinstance(response, rdf_client.HardwareInfo): # AFF4 client. client.Set(client.Schema.HARDWARE_INFO, response) else: raise ValueError("Unexpected response type: %s" % type(response)) if new_volumes: volumes = client.Schema.VOLUMES() for v in new_volumes: volumes.Append(v) client.Set(client.Schema.VOLUMES, volumes) for response in responses: if isinstance(response, rdf_client_fs.Volume): self.state.client.volumes.append(response) elif isinstance(response, rdf_client.HardwareInfo): self.state.client.hardware_info = response else: raise ValueError("Unexpected response type: %s" % type(response))
def testGetClientStats(self): client_id = self.SetupClient(0) # TODO(amoser): Fix this. if not data_store.AFF4Enabled(): self.skipTest( "Client stats storage not yet implemented for the relational db." ) class ClientMock(action_mocks.ActionMock): def GetClientStats(self, _): """Fake get client stats method.""" response = rdf_client_stats.ClientStats() for i in range(12): sample = rdf_client_stats.CpuSample(timestamp=int(i * 10 * 1e6), user_cpu_time=10 + i, system_cpu_time=20 + i, cpu_percent=10 + i) response.cpu_samples.Append(sample) sample = rdf_client_stats.IOSample(timestamp=int(i * 10 * 1e6), read_bytes=10 + i, write_bytes=10 + i) response.io_samples.Append(sample) return [response] flow_test_lib.TestFlowHelper(administrative.GetClientStats.__name__, ClientMock(), token=self.token, client_id=client_id) urn = client_id.Add("stats") stats_fd = aff4.FACTORY.Create(urn, aff4_stats.ClientStats, token=self.token, mode="rw") sample = stats_fd.Get(stats_fd.Schema.STATS) # Samples are taken at the following timestamps and should be split into 2 # bins as follows (sample_interval is 60000000): # 00000000, 10000000, 20000000, 30000000, 40000000, 50000000 -> Bin 1 # 60000000, 70000000, 80000000, 90000000, 100000000, 110000000 -> Bin 2 self.assertLen(sample.cpu_samples, 2) self.assertLen(sample.io_samples, 2) self.assertAlmostEqual(sample.io_samples[0].read_bytes, 15.0) self.assertAlmostEqual(sample.io_samples[1].read_bytes, 21.0) self.assertAlmostEqual(sample.cpu_samples[0].cpu_percent, sum(range(10, 16)) / 6.0) self.assertAlmostEqual(sample.cpu_samples[1].cpu_percent, sum(range(16, 22)) / 6.0) self.assertAlmostEqual(sample.cpu_samples[0].user_cpu_time, 15.0) self.assertAlmostEqual(sample.cpu_samples[1].system_cpu_time, 31.0)
def EnumerateInterfaces(self, responses): """Enumerates the interfaces.""" if not (responses.success and responses): self.Log("Could not enumerate interfaces: %s" % responses.status) return if data_store.AFF4Enabled(): # AFF4 client. with self._CreateClient() as client: interface_list = client.Schema.INTERFACES() mac_addresses = [] ip_addresses = [] for response in responses: interface_list.Append(response) # Add a hex encoded string for searching if (response.mac_address and response.mac_address != "\x00" * len(response.mac_address)): mac_addresses.append(response.mac_address.human_readable_address) for address in response.addresses: if address.human_readable_address not in self.FILTERED_IPS: ip_addresses.append(address.human_readable_address) client.Set(client.Schema.MAC_ADDRESS("\n".join(mac_addresses))) client.Set(client.Schema.HOST_IPS("\n".join(ip_addresses))) client.Set(client.Schema.INTERFACES(interface_list)) # rdf_objects.ClientSnapshot. self.state.client.interfaces = sorted(responses, key=lambda i: i.ifname)