def testStartupHandler(self): with test_lib.ConfigOverrider({ "Database.useForReads": True, "Database.useForReads.message_handlers": True, }): client_id = self.SetupClient(0).Basename() self._RunSendStartupInfo(client_id) si = data_store.REL_DB.ReadClientStartupInfo(client_id) self.assertIsNotNone(si) self.assertEqual(si.client_info.client_name, config.CONFIG["Client.name"]) self.assertEqual(si.client_info.client_description, config.CONFIG["Client.description"]) # Run it again - this should not update any record. self._RunSendStartupInfo(client_id) new_si = data_store.REL_DB.ReadClientStartupInfo(client_id) self.assertEqual(new_si, si) # Simulate a reboot. current_boot_time = psutil.boot_time() with utils.Stubber(psutil, "boot_time", lambda: current_boot_time + 600): # Run it again - this should now update the boot time. self._RunSendStartupInfo(client_id) new_si = data_store.REL_DB.ReadClientStartupInfo(client_id) self.assertIsNotNone(new_si) self.assertNotEqual(new_si.boot_time, si.boot_time) # Now set a new client build time. with test_lib.ConfigOverrider({"Client.build_time": time.ctime()}): # Run it again - this should now update the client info. self._RunSendStartupInfo(client_id) new_si = data_store.REL_DB.ReadClientStartupInfo(client_id) self.assertIsNotNone(new_si) self.assertNotEqual(new_si.client_info, si.client_info)
def testExecuteBinariesWithArgs(self): client_mock = action_mocks.ActionMock(standard.ExecuteBinaryCommand) code = b"I am a binary file" upload_path = config.CONFIG["Executables.aff4_path"].Add("test.exe") maintenance_utils.UploadSignedConfigBlob(code, aff4_path=upload_path, token=self.token) # This flow has an acl, the user needs to be admin. user = aff4.FACTORY.Create("aff4:/users/%s" % self.token.username, mode="rw", aff4_type=users.GRRUser, token=self.token) user.SetLabel("admin", owner="GRRTest") user.Close() with utils.Stubber(subprocess, "Popen", client_test_lib.Popen): flow_test_lib.TestFlowHelper(administrative.LaunchBinary.__name__, client_mock, client_id=test_lib.TEST_CLIENT_ID, binary=upload_path, command_line="--value 356", token=self.token) # Check that the executable file contains the code string. self.assertEqual(client_test_lib.Popen.binary, code) # At this point, the actual binary should have been cleaned up by the # client action so it should not exist. self.assertRaises(IOError, open, client_test_lib.Popen.running_args[0]) # Check the binary was run with the correct command line. self.assertEqual(client_test_lib.Popen.running_args[1], "--value") self.assertEqual(client_test_lib.Popen.running_args[2], "356") # Check the command was in the tmp file. self.assertTrue(client_test_lib.Popen.running_args[0].startswith( config.CONFIG["Client.tempdir_roots"][0]))
def testNannyMessageHandlerForUnknownClient(self): client_id = "C.1000000000000000" nanny_message = "Oh no!" email_dict = {} def SendEmail(address, sender, title, message, **_): email_dict.update( dict(address=address, sender=sender, title=title, message=message)) with test_lib.ConfigOverrider({ "Database.useForReads": True, "Database.useForReads.message_handlers": True }): with utils.Stubber(email_alerts.EMAIL_ALERTER, "SendEmail", SendEmail): flow_test_lib.MockClient(client_id, None)._PushHandlerMessage( rdf_flows.GrrMessage( source=client_id, session_id=rdfvalue.SessionID( flow_name="NannyMessage"), payload=rdf_protodict.DataBlob(string=nanny_message), request_id=0, auth_state="AUTHENTICATED", response_id=123)) # We expect the email to be sent. self.assertEqual(email_dict.get("address"), config.CONFIG["Monitoring.alert_email"]) # Make sure the message is included in the email message. self.assertIn(nanny_message, email_dict["message"]) if data_store.RelationalDBReadEnabled(): self.assertIn(client_id, email_dict["title"]) else: self.assertIn(client_id.Basename(), email_dict["title"])
def testIndexedReads(self): spacing = 10 with utils.Stubber(sequential_collection.IndexedSequentialCollection, "INDEX_SPACING", spacing): urn = "aff4:/sequential_collection/testIndexedReads" collection = self._TestCollection(urn) data_size = 4 * spacing # TODO(amoser): Without using a mutation pool, this test is really # slow on MySQL data store. with data_store.DB.GetMutationPool() as pool: for i in range(data_size): collection.StaticAdd( rdfvalue.RDFURN(urn), rdfvalue.RDFInteger(i), mutation_pool=pool) with test_lib.FakeTime(rdfvalue.RDFDatetime.Now() + rdfvalue.Duration("10m")): for i in range(data_size - 1, data_size - 20, -1): self.assertEqual(collection[i], i) for i in [spacing - 1, spacing, spacing + 1]: self.assertEqual(collection[i], i) for i in range(data_size - spacing + 5, data_size - spacing - 5, -1): self.assertEqual(collection[i], i)
def testEnumerateFilesystemsLinux(self): """Enumerate filesystems.""" def MockCheckMounts(unused_filename): del unused_filename # Unused. device = "/dev/mapper/dhcp--100--104--9--24--vg-root" fs_type = "ext4" mnt_point = "/" yield device, fs_type, mnt_point with utils.Stubber(linux, "CheckMounts", MockCheckMounts): results = self.RunAction(linux.EnumerateFilesystems) expected = rdf_client_fs.Filesystem( mount_point="/", type="ext4", device="/dev/mapper/dhcp--100--104--9--24--vg-root") self.assertLen(results, 2) for result in results: self.assertEqual(result, expected)
def testCmdArtifact(self): """Check we can run command based artifacts and get anomalies.""" client_id = test_lib.TEST_CLIENT_ID client_mock = self.MockClient(standard.ExecuteCommand, client_id=client_id) with utils.Stubber(subprocess, "Popen", client_test_lib.Popen): session_id = flow_test_lib.TestFlowHelper( collectors.ArtifactCollectorFlow.__name__, client_mock, client_id=client_id, use_tsk=False, artifact_list=["TestCmdArtifact"], token=self.token) results = flow_test_lib.GetFlowResults(client_id, session_id) self.assertEqual(len(results), 3) packages = [p for p in results if isinstance(p, rdf_client.SoftwarePackage)] self.assertEqual(len(packages), 2) anomalies = [a for a in results if isinstance(a, rdf_anomaly.Anomaly)] self.assertEqual(len(anomalies), 1) self.assertIn("gremlin", anomalies[0].symptom)
def testRunGrrClientActionArtifact(self): """Test we can get a GRR client artifact.""" client_id = self.SetupClient(0, system="Linux") with utils.Stubber(psutil, "process_iter", ProcessIter): client_mock = action_mocks.ActionMock(standard.ListProcesses) coll1 = rdf_artifacts.ArtifactSource( type=rdf_artifacts.ArtifactSource.SourceType.GRR_CLIENT_ACTION, attributes={"client_action": standard.ListProcesses.__name__}) self.fakeartifact.sources.append(coll1) artifact_list = ["FakeArtifact"] session_id = flow_test_lib.TestFlowHelper( aff4_flows.ArtifactCollectorFlow.__name__, client_mock, artifact_list=artifact_list, token=self.token, client_id=client_id) results = flow_test_lib.GetFlowResults(client_id, session_id) self.assertIsInstance(results[0], rdf_client.Process) self.assertLen(results, 1)
def testPerProcessTimeout(self): FakeRules.invocations = [] procs = [ p for p in self.procs if p.pid in [101, 102, 103, 104, 105, 106, 107] ] with utils.Stubber(rdf_memory.YaraSignature, "GetRules", TimeoutRules): matches, errors, misses = self._RunYaraProcessScan( procs, per_process_timeout=50, include_errors_in_results=True, include_misses_in_results=True) self.assertEmpty(matches) self.assertLen(errors, 6) self.assertEmpty(misses) for e in errors: if e.process.pid in [101, 106]: self.assertEqual("Access Denied.", e.error) else: self.assertIn("Scanning timed out", e.error)
def testGRRClientActionListProcesses(self): """Test the GRR Client Action ListProcesses.""" def ProcessIter(): return iter([client_test_lib.MockWindowsProcess()]) source = rdf_artifact.ArtifactSource( type=rdf_artifact.ArtifactSource.SourceType.GRR_CLIENT_ACTION) ext_src = rdf_artifact.ExpandedSource(base_source=source) ext_art = rdf_artifact.ExpandedArtifact( name="TestClientActionArtifact", sources=[ext_src]) request = rdf_artifact.ClientArtifactCollectorArgs(artifacts=[ext_art], apply_parsers=False) source.attributes["client_action"] = "ListProcesses" with utils.Stubber(psutil, "process_iter", ProcessIter): result = self.RunAction(artifact_collector.ArtifactCollector, request)[0] collected_artifact = result.collected_artifacts[0] value = collected_artifact.action_results[0].value self.assertIsInstance(value, rdf_client.Process) self.assertEqual(value.pid, 10)
def testNoRequestChildFlowRace(self): manager = queue_manager.QueueManager(token=self.token) self.old_notify = manager._MultiNotifyQueue with utils.Stubber(queue_manager.QueueManager, "_MultiNotifyQueue", self.CollectNotifications): session_id = flow.StartFlow( client_id=self.client_id, flow_name="NoRequestParentFlow", token=self.token) self.assertIn(session_id, self.notifications) f = aff4.FACTORY.Open(session_id, token=self.token) # Check that the first notification came in after the flow was created. self.assertLess( int(f.Get(f.Schema.TYPE).age), 1e6 * min(self.notifications[session_id]), "There was a notification for a flow before " "the flow was created.")
def testServerErrorInApiShowsErrorButton(self): def MockRender(self, args, context): # pylint: disable=unused-argument """Fake render method to force an exception.""" raise RuntimeError("This is a another forced exception") with self.DisableHttpErrorChecks(): # By mocking out Handle, we can force an exception. with utils.Stubber(ApiSearchClientsHandler, "Handle", MockRender): self.Open("/") self.Click("client_query_submit") # Open server error dialog. self.Click("css=button#show_backtrace") # Check if message and traceback are shown. self.WaitUntilContains("This is a another forced exception", self.GetText, "css=div[name=ServerErrorDialog]") self.WaitUntilContains("Traceback (most recent call last):", self.GetText, "css=div[name=ServerErrorDialog]")
def _RunTSKFileFinder(self, paths): image_path = os.path.join(self.base_path, "ntfs_img.dd") with utils.Stubber( vfs, "VFS_VIRTUALROOTS", { rdf_paths.PathSpec.PathType.TSK: rdf_paths.PathSpec( path=image_path, pathtype="OS", offset=63 * 512) }): action = rdf_file_finder.FileFinderAction.Action.DOWNLOAD with test_lib.SuppressLogs(): flow_test_lib.TestFlowHelper( file_finder.FileFinder.__name__, self.client_mock, client_id=self.client_id, paths=paths, pathtype=rdf_paths.PathSpec.PathType.TSK, action=rdf_file_finder.FileFinderAction( action_type=action), token=self.token)
def ValidateConfigs(self, configs): test_filter_map = copy.deepcopy(config_lib.ConfigFilter.classes_by_name) for filter_name in self.disabled_filters: test_filter_map[filter_name] = config_lib.ConfigFilter with utils.Stubber(config_lib.ConfigFilter, "classes_by_name", test_filter_map): for config_file in configs: errors = self.ValidateConfig(config_file) for exception in self.exceptions: errors.pop(exception, None) if errors: logging.info("Validation of %s returned errors:", config_file) for config_entry, error in iteritems(errors): logging.info("%s:", config_entry) logging.info("%s", error) self.fail("Validation of %s returned errors: %s" % (config_file, errors))
def testPerProcessTimeout(self): FakeRules.invocations = [] procs = [ p for p in self.procs if p.pid in [101, 102, 103, 104, 105, 106, 107] ] with utils.Stubber(yara, "compile", lambda source: TimeoutRules()): matches, errors, misses = self._RunYaraProcessScan( procs, per_process_timeout=50, include_errors_in_results="ALL_ERRORS", include_misses_in_results=True) self.assertEmpty(matches) self.assertLen(errors, 6) self.assertEmpty(misses) for e in errors: if e.process.pid in [101, 106]: self.assertEqual("Access Denied.", e.error) else: self.assertIn("Scanning timed out", e.error)
def testStartupHandler(self): client_id = self.SetupClient(0) self._RunSendStartupInfo(client_id) si = data_store.REL_DB.ReadClientStartupInfo(client_id) self.assertIsNotNone(si) self.assertEqual(si.client_info.client_name, config.CONFIG["Client.name"]) self.assertEqual(si.client_info.client_description, config.CONFIG["Client.description"]) # Run it again - this should not update any record. self._RunSendStartupInfo(client_id) new_si = data_store.REL_DB.ReadClientStartupInfo(client_id) self.assertEqual(new_si, si) # Simulate a reboot. current_boot_time = psutil.boot_time() with utils.Stubber(psutil, "boot_time", lambda: current_boot_time + 600): # Run it again - this should now update the boot time. self._RunSendStartupInfo(client_id) new_si = data_store.REL_DB.ReadClientStartupInfo(client_id) self.assertIsNotNone(new_si) self.assertNotEqual(new_si.boot_time, si.boot_time) # Now set a new client build time. build_time = compatibility.FormatTime("%a %b %d %H:%M:%S %Y") with test_lib.ConfigOverrider({"Client.build_time": build_time}): # Run it again - this should now update the client info. self._RunSendStartupInfo(client_id) new_si = data_store.REL_DB.ReadClientStartupInfo(client_id) self.assertIsNotNone(new_si) self.assertNotEqual(new_si.client_info, si.client_info)
def testUpdateConfig(self): """Ensure we can retrieve and update the config.""" # Write a client without a proper system so we don't need to # provide the os specific artifacts in the interrogate flow below. client_id = self.SetupClient(0, system="") # Only mock the pieces we care about. client_mock = action_mocks.ActionMock(admin.GetConfiguration, admin.UpdateConfiguration) loc = "http://www.example.com" new_config = rdf_protodict.Dict({ "Client.server_urls": [loc], "Client.foreman_check_frequency": 3600, "Client.poll_min": 1 }) # Setting config options is disallowed in tests so we need to temporarily # revert this. with utils.Stubber(config.CONFIG, "Set", config.CONFIG.Set.old_target): # Write the config. flow_test_lib.TestFlowHelper( administrative.UpdateConfiguration.__name__, client_mock, client_id=client_id, token=self.token, config=new_config) # Now retrieve it again to see if it got written. flow_test_lib.TestFlowHelper( discovery.Interrogate.__name__, client_mock, token=self.token, client_id=client_id) fd = aff4.FACTORY.Open(client_id, token=self.token) config_dat = fd.Get(fd.Schema.GRR_CONFIGURATION) self.assertEqual(config_dat["Client.server_urls"], [loc]) self.assertEqual(config_dat["Client.poll_min"], 1)
def testVFSFileContentLastNotUpdated(self): """Make sure CONTENT_LAST does not update when only STAT is written..""" path = "/C.12345/contentlastchecker" timestamp = 1 with utils.Stubber(time, "time", lambda: timestamp): fd = aff4.FACTORY.Create( path, aff4_grr.VFSFile, mode="w", token=self.token) timestamp += 1 fd.SetChunksize(10) # Make lots of small writes - The length of this string and the chunk size # are relative primes for worst case. for i in range(100): fd.Write("%s%08X\n" % ("Test", i)) # Flush after every write. fd.Flush() # And advance the time. timestamp += 1 fd.Set(fd.Schema.STAT, rdf_client_fs.StatEntry()) fd.Close() fd = aff4.FACTORY.Open(path, mode="rw", token=self.token) # Make sure the attribute was written when the write occured. self.assertEqual(int(fd.GetContentAge()), 101000000) # Write the stat (to be the same as before, but this still counts # as a write). fd.Set(fd.Schema.STAT, fd.Get(fd.Schema.STAT)) fd.Flush() fd = aff4.FACTORY.Open(path, token=self.token) # The age of the content should still be the same. self.assertEqual(int(fd.GetContentAge()), 101000000)
def testHuntIsStoppedIfAveragePerClientNetworkUsageTooHigh(self): with utils.Stubber(implementation.GRRHunt, "MIN_CLIENTS_FOR_AVERAGE_THRESHOLDS", 4): hunt_urn = self.StartHunt( avg_network_bytes_per_client_limit=1, token=self.token) def RunOnClients(client_ids, network_bytes_sent): self.AssignTasksToClients(client_ids) self.RunHunt( client_ids=client_ids, network_bytes_sent=network_bytes_sent) def CheckState(expected_state, expected_network_bytes_sent): hunt_obj = aff4.FACTORY.Open(hunt_urn, token=self.token) self.assertEqual(hunt_obj.Get(hunt_obj.Schema.STATE), expected_state) self.assertEqual(hunt_obj.context.network_bytes_sent, expected_network_bytes_sent) RunOnClients(self.client_ids[:2], 1) # Hunt should still be running: we need at least 3 clients to start # calculating the average. CheckState("STARTED", 2) RunOnClients([self.client_ids[2]], 2) # Hunt should still be running: even though the average is higher than the # limit, number of clients is not enough. CheckState("STARTED", 4) RunOnClients([self.client_ids[3]], 0) # Hunt should still be running: we got 4 clients, which is enough to check # average per-client network bytes usage, but 4 bytes for 4 clients is # within the limit of 1 byte per client on average. CheckState("STARTED", 4) RunOnClients([self.client_ids[4]], 2) # Hunt should be terminated: the limit is exceeded. CheckState("STOPPED", 6) self._CheckHuntStoppedNotification( "reached the average network bytes per client")
def testRunGrrClientActionArtifactSplit(self): """Test that artifacts get split into separate collections.""" client_id = self.SetupClient(0, system="Linux") with utils.Stubber(psutil, "process_iter", ProcessIter): client_mock = action_mocks.ActionMock(standard.ListProcesses) coll1 = rdf_artifacts.ArtifactSource( type=rdf_artifacts.ArtifactSource.SourceType.GRR_CLIENT_ACTION, attributes={"client_action": standard.ListProcesses.__name__}) self.fakeartifact.sources.append(coll1) self.fakeartifact2.sources.append(coll1) artifact_list = ["FakeArtifact", "FakeArtifact2"] session_id = flow_test_lib.TestFlowHelper( collectors.ArtifactCollectorFlow.__name__, client_mock, artifact_list=artifact_list, token=self.token, client_id=client_id, split_output_by_artifact=True) results_by_tag = flow_test_lib.GetFlowResultsByTag(client_id, session_id) self.assertCountEqual(results_by_tag.keys(), ["artifact:FakeArtifact", "artifact:FakeArtifact2"])
def Run(self): stats_collector = stats.StatsCollector() stats_collector.RegisterCounterMetric( "sample_counter", docstring="Sample counter metric.") stats_collector.RegisterGaugeMetric( "sample_gauge_value", str, docstring="Sample gauge metric.") stats_collector.RegisterEventMetric( "sample_event", docstring="Sample event metric.") with utils.Stubber(stats, "STATS", stats_collector): with aff4.FACTORY.Create( None, aff4_stats_store.StatsStore, mode="w", token=self.token) as stats_store: stats_store.WriteStats(process_id="worker_1") self.Check( "ListStatsStoreMetricsMetadata", args=stats_plugin.ApiListStatsStoreMetricsMetadataArgs( component="WORKER"))
def testShowsNotificationIfArchiveStreamingFailsInProgress(self): hunt = self._CreateHuntWithDownloadedFile() self.RequestAndGrantHuntApproval(hunt.urn.Basename()) def RaisingStub(*unused_args, **unused_kwargs): yield b"foo" yield b"bar" raise RuntimeError("something went wrong") with utils.Stubber(api_call_handler_utils.CollectionArchiveGenerator, "Generate", RaisingStub): self.Open("/") self.Click("css=a[grrtarget=hunts]") self.Click("css=td:contains('GenericHunt')") self.Click("css=li[heading=Results]") self.Click("css=button.DownloadButton") self.WaitUntil(self.IsUserNotificationPresent, "Archive generation failed for hunt") # There will be no failure message, as we can't get a status from an # iframe that triggers the download. self.WaitUntilNot(self.IsTextPresent, "Can't generate archive: Unknown error")
def testClientRetransmission(self): """Test that client retransmits failed messages.""" fail = True num_messages = 10 def FlakyServer(url=None, **kwargs): if not fail or "server.pem" in url: return self.UrlMock(num_messages=num_messages, url=url, **kwargs) raise MakeHTTPException(500) with utils.Stubber(requests, "request", FlakyServer): self.SendToServer() status = self.client_communicator.RunOnce() self.assertEqual(status.code, 500) # Server should not receive anything. self.assertEmpty(self.messages) # Try to send these messages again. fail = False self.assertEqual( self.client_communicator.client_worker.InQueueSize(), 0) status = self.client_communicator.RunOnce() self.assertEqual(status.code, 200) # We have received 10 client messages. self.assertEqual( self.client_communicator.client_worker.InQueueSize(), 10) self.CheckClientQueue() # Server should have received 10 messages this time. self.assertLen(self.messages, 10)
def testLinuxNanny(self): """Tests the linux nanny.""" def MockExit(unused_value): raise RuntimeError("Exit was called.") now = rdfvalue.RDFDatetime.Now() with utils.Stubber(os, "_exit", MockExit): nanny = client_utils_osx_linux.NannyThread(unresponsive_kill_period=5) with test_lib.FakeTime(now): nanny.Heartbeat() for i in range(10): with test_lib.FakeTime(now + i * rdfvalue.Duration("1s")): nanny._CheckHeartbeatDeadline(nanny.last_heart_beat_time + nanny.unresponsive_kill_period) nanny.Heartbeat() with test_lib.FakeTime(now + (10 + 5) * rdfvalue.Duration("1s")): with self.assertRaises(RuntimeError): nanny._CheckHeartbeatDeadline(nanny.last_heart_beat_time + nanny.unresponsive_kill_period)
def testSendsEmailWithLinkToNewUi(self): email_messages = [] def SendEmailStub(to_addresses, from_address, subject, message, **kwargs): del to_addresses, from_address, subject, kwargs # Unused. email_messages.append(message) with utils.Stubber(email_alerts.EMAIL_ALERTER, "SendEmail", SendEmailStub): approval_id = self.RequestClientApproval( self.client_id, requestor=self.context.username, reason="blah", approver=u"approver", email_cc_address="*****@*****.**") self.assertLen(email_messages, 1) self.assertIn((f"http://localhost:8000/v2/clients/{self.client_id}/users/" f"{self.context.username}/approvals/{approval_id}"), email_messages[0]) # Check for correct link to legacy UI. Remove once new UI is stable. self.assertIn( (f"http://localhost:8000/#/users/{self.context.username}/approvals/" f"client/{self.client_id}/{approval_id}"), email_messages[0])
def testRunGrrClientActionArtifact(self): """Test we can get a GRR client artifact.""" with utils.Stubber(psutil, "process_iter", ProcessIter): client_mock = action_mocks.ActionMock(standard.ListProcesses) client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw") client.Set(client.Schema.SYSTEM("Linux")) client.Flush() coll1 = rdf_artifacts.ArtifactSource( type=rdf_artifacts.ArtifactSource.SourceType.GRR_CLIENT_ACTION, attributes={"client_action": standard.ListProcesses.__name__}) self.fakeartifact.sources.append(coll1) artifact_list = ["FakeArtifact"] session_id = flow_test_lib.TestFlowHelper( aff4_flows.ArtifactCollectorFlow.__name__, client_mock, artifact_list=artifact_list, token=self.token, client_id=self.client_id) fd = flow.GRRFlow.ResultCollectionForFID(session_id) self.assertTrue(isinstance(list(fd)[0], rdf_client.Process)) self.assertTrue(len(fd) == 1)
def testIPInfo(self): args = [] def MockGetNameInfo(ip, unused_flags): args.append(ip) return "test.com", ip[1] resolver = ip_resolver.IPResolver() with utils.Stubber(socket, "getnameinfo", MockGetNameInfo): for ip, result in [ ("192.168.0.1", ip_resolver.IPInfo.INTERNAL), ("10.0.0.7", ip_resolver.IPInfo.INTERNAL), ("::1", ip_resolver.IPInfo.INTERNAL), ("69.50.225.155", ip_resolver.IPInfo.EXTERNAL), ("69.50.225.155", ip_resolver.IPInfo.EXTERNAL), ]: info, _ = resolver.RetrieveIPInfo(ipaddress.ip_address(ip)) self.assertEqual(info, result) # There is one external address but it was resolved twice. There is a cache # so getnameinfo should have been called only once. self.assertLen(args, 1)
def testClientAlertHandler(self): client_id = self.SetupClient(0) client_message = "Oh no!" email_dict = {} def SendEmail(address, sender, title, message, **_): email_dict.update( dict(address=address, sender=sender, title=title, message=message)) with utils.Stubber(email_alerts.EMAIL_ALERTER, "SendEmail", SendEmail): flow_test_lib.MockClient(client_id, None)._PushHandlerMessage( rdf_flows.GrrMessage( source=client_id, session_id=rdfvalue.SessionID(flow_name="ClientAlert"), payload=rdf_protodict.DataBlob(string=client_message), request_id=0, auth_state="AUTHENTICATED", response_id=123)) self._CheckAlertEmail(client_id, client_message, email_dict)
def testExecuteBinariesWithArgs(self): client_mock = action_mocks.ActionMock(standard.ExecuteBinaryCommand) code = b"I am a binary file" upload_path = signed_binary_utils.GetAFF4ExecutablesRoot().Add( config.CONFIG["Client.platform"]).Add("test.exe") maintenance_utils.UploadSignedConfigBlob(code, aff4_path=upload_path, token=self.token) # This flow has an acl, the user needs to be admin. acl_test_lib.CreateAdminUser(self.token.username) with utils.Stubber(subprocess, "Popen", client_test_lib.Popen): flow_test_lib.TestFlowHelper(administrative.LaunchBinary.__name__, client_mock, client_id=self.SetupClient(0), binary=upload_path, command_line="--value 356", token=self.token) # Check that the executable file contains the code string. self.assertEqual(client_test_lib.Popen.binary, code) # At this point, the actual binary should have been cleaned up by the # client action so it should not exist. self.assertRaises(IOError, open, client_test_lib.Popen.running_args[0]) # Check the binary was run with the correct command line. self.assertEqual(client_test_lib.Popen.running_args[1], "--value") self.assertEqual(client_test_lib.Popen.running_args[2], "356") # Check the command was in the tmp file. self.assertTrue(client_test_lib.Popen.running_args[0].startswith( config.CONFIG["Client.tempdir_roots"][0]))
def testFailToCreateThread(self): """Test that we handle thread creation problems ok.""" # The pool starts off with the minimum number of threads. self.assertLen(self.test_pool, self.NUMBER_OF_THREADS) done_event = threading.Event() def Block(done): done.wait() def RaisingStart(_): raise threading.ThreadError() # Now simulate failure of creating threads. with utils.Stubber(threadpool._WorkerThread, "start", RaisingStart): # Fill all the existing threads and wait for them to become busy. self.test_pool.AddTask(Block, (done_event,)) self.WaitUntil(lambda: self.test_pool.busy_threads == self. NUMBER_OF_THREADS) # Now fill the queue completely.. for _ in range(self.MAXIMUM_THREADS): self.test_pool.AddTask(Block, (done_event,)) # Trying to push this task will overflow the queue, and would normally # cause a new thread to start. We use non blocking mode to receive the # exception. self.assertRaises( threadpool.Full, self.test_pool.AddTask, Block, (done_event,), blocking=False, inline=False) # Release the blocking tasks. done_event.set() self.test_pool.Join()
def Run(self): client_id = self.SetupClient(0) replace = {} with test_lib.FakeTime(42): flow_urn = flow.StartAFF4Flow( client_id=client_id, flow_name=processes.ListProcesses.__name__, token=self.token) replace[flow_urn.Basename()] = "F:123456" test_process = client_test_lib.MockWindowsProcess( name="test_process") with utils.Stubber(psutil, "Process", lambda: test_process): # Here we emulate a mock client with no actions (None) that # should produce an error. mock = flow_test_lib.MockClient(client_id, None, token=self.token) while mock.Next(): pass manager = queue_manager.QueueManager(token=self.token) requests_responses = manager.FetchRequestsAndResponses(flow_urn) for request, responses in requests_responses: replace[str(request.request.task_id)] = "42" for response in responses: replace[str(response.task_id)] = "43" self.Check("ListClientActionRequests", args=client_plugin.ApiListClientActionRequestsArgs( client_id=client_id.Basename()), replace=replace) self.Check("ListClientActionRequests", args=client_plugin.ApiListClientActionRequestsArgs( client_id=client_id.Basename(), fetch_responses=True), replace=replace)