def setUp(self): super(RouterMatcherTest, self).setUp() self.config_overrider = test_lib.ConfigOverrider( {"API.DefaultRouter": TestHttpApiRouter.__name__}) self.config_overrider.Start() self.router_matcher = http_api.RouterMatcher()
def _UploadFile(self, args): self.client_id = self.SetupClients(1)[0] with test_lib.ConfigOverrider({"Client.server_urls": [self.base_url]}): client = comms.GRRHTTPClient( ca_cert=config_lib.CONFIG["CA.certificate"], private_key=config_lib.CONFIG.Get("Client.private_key", default=None)) client.server_certificate = config_lib.CONFIG["Frontend.certificate"] def MockSendReply(_, reply): self.reply = reply @classmethod def FromPrivateKey(*_): """Returns the correct client id. The test framework does not generate valid client ids (which should be related to the client's private key. We therefore need to mock it and override. Returns: Correct client_id """ return self.client_id with utils.MultiStubber( (standard.UploadFile, "SendReply", MockSendReply), (rdf_client.ClientURN, "FromPrivateKey", FromPrivateKey)): action = standard.UploadFile(client.client_worker) action.Run(args) return self.reply
def testWellKnownFlowsBlacklist(self): """Make sure that well known flows can run on the front end.""" with test_lib.ConfigOverrider({ "Frontend.DEBUG_well_known_flows_blacklist": [ utils.SmartStr(test_lib.WellKnownSessionTest. well_known_session_id.FlowName()) ] }): self.InitTestServer() test_lib.WellKnownSessionTest.messages = [] session_id = test_lib.WellKnownSessionTest.well_known_session_id messages = [ rdf_flows.GrrMessage(request_id=0, response_id=0, session_id=session_id, payload=rdfvalue.RDFInteger(i)) for i in range(1, 10) ] self.server.ReceiveMessages(self.client_id, messages) # Wait for async actions to complete self.server.thread_pool.Join() # Check that no processing took place. self.assertFalse(test_lib.WellKnownSessionTest.messages) # There should be nothing in the client_queue self.assertEqual([], data_store.DB.ResolvePrefix(self.client_id, "task:", token=self.token))
def testKeepsClientsWithRetainLabel(self): exception_label_name = config_lib.CONFIG[ "DataRetention.inactive_client_ttl_exception_label"] for client_urn in self.client_urns[:3]: with aff4.FACTORY.Open(client_urn, mode="rw", token=self.token) as fd: fd.AddLabels(exception_label_name) with test_lib.ConfigOverrider( {"DataRetention.inactive_client_ttl": rdfvalue.Duration("10s")}): with test_lib.FakeTime(40 + 60 * self.NUM_CLIENT): flow.GRRFlow.StartFlow( flow_name=data_retention.CleanInactiveClients.__name__, sync=True, token=self.token) aff4_root = aff4.FACTORY.Open("aff4:/", mode="r", token=self.token) aff4_urns = list(aff4_root.ListChildren()) client_urns = [ x for x in aff4_urns if re.match(self.client_regex, str(x)) ] self.assertEqual(len(client_urns), 3)
def testDeletesInactiveClientsWithAgeOlderThanGivenAge(self): with test_lib.ConfigOverrider( {"DataRetention.inactive_client_ttl": rdfvalue.Duration("300s")}): with test_lib.FakeTime(40 + 60 * self.NUM_CLIENT): flow.GRRFlow.StartFlow( flow_name=data_retention.CleanInactiveClients.__name__, sync=True, token=self.token) latest_timestamp = rdfvalue.RDFDatetime.Now() aff4_root = aff4.FACTORY.Open("aff4:/", mode="r", token=self.token) aff4_urns = list(aff4_root.ListChildren()) client_urns = [ x for x in aff4_urns if re.match(self.client_regex, str(x)) ] self.assertEqual(len(client_urns), 5) for client_urn in client_urns: client = aff4.FACTORY.Open(client_urn, mode="r", token=self.token) self.assertLess(client.Get(client.Schema.LAST), latest_timestamp) self.assertGreaterEqual( client.Get(client.Schema.LAST), latest_timestamp - rdfvalue.Duration("300s"))
def testNoTraceOfDeletedHuntIsLeftInTheDataStore(self): with test_lib.ConfigOverrider( {"DataRetention.hunts_ttl": rdfvalue.Duration("1s")}): with test_lib.FakeTime(40 + 60 * self.NUM_HUNTS): flow.GRRFlow.StartFlow( flow_name=data_retention.CleanHunts.__name__, sync=True, token=self.token) for hunt_urn in self.hunts_urns: hunt_id = hunt_urn.Basename() # NOTE: We assume that tests are running with FakeDataStore. for subject, subject_data in data_store.DB.subjects.items(): # Foreman rules are versioned, so hunt ids will be mentioned # there. Ignoring audit events as well. if subject == "aff4:/foreman" or subject.startswith( "aff4:/audit"): continue self.assertNotIn(hunt_id, subject) for column_name, values in subject_data.items(): self.assertNotIn(hunt_id, column_name) for value, _ in values: self.assertNotIn(hunt_id, utils.SmartUnicode(value))
def testKnowledgeBaseRetrievalLinuxNoUsers(self): """Cause a users.username dependency failure.""" self.ClearKB() with test_lib.ConfigOverrider({ "Artifacts.knowledge_base": [ "NetgroupConfiguration", "NssCacheLinuxPasswdHomedirs", "LinuxRelease" ], "Artifacts.netgroup_filter_regexes": ["^doesntexist$"] }): with test_lib.VFSOverrider(rdf_paths.PathSpec.PathType.OS, test_lib.FakeTestDataVFSHandler): for _ in test_lib.TestFlowHelper( "KnowledgeBaseInitializationFlow", self.client_mock, require_complete=False, client_id=self.client_id, token=self.token): pass client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw") kb = artifact.GetArtifactKnowledgeBase(client) self.assertEqual(kb.os_major_version, 14) self.assertEqual(kb.os_minor_version, 4) self.assertItemsEqual([x.username for x in kb.users], [])
def testDeleteMultipleRoots(self): tempdir1 = utils.JoinPath(self.temp_dir, "del_test1") tempdir2 = utils.JoinPath(self.temp_dir, "del_test2") tempdir3 = utils.JoinPath(self.temp_dir, "del_test3") os.makedirs(tempdir1) os.makedirs(tempdir2) # Omit tempdir3. file1 = utils.JoinPath(tempdir1, "file1") file2 = utils.JoinPath(tempdir2, "file2") open(file1, "w").write("something") open(file2, "w").write("something") self.assertTrue(os.path.exists(file1)) self.assertTrue(os.path.exists(file2)) with test_lib.ConfigOverrider({ "Client.tempdir_roots": [tempdir1, tempdir2, tempdir3]}): result = self.RunAction("DeleteGRRTempFiles", rdf_paths.PathSpec()) self.assertEqual(len(result), 1) log = result[0].data self.assertIn(file1, log) self.assertIn(file2, log) self.assertNotIn(tempdir3, log)
def testSystemCronJobSetsStartTime(self): with test_lib.ConfigOverrider({"Cron.enabled_system_jobs": ["DummySystemCronJob", "DummySystemCronJobStartNow"]}): with test_lib.FakeTime(100): now = rdfvalue.RDFDatetime().Now() cronjobs.ScheduleSystemCronFlows(token=self.token) random_time = "aff4:/cron/DummySystemCronJob" no_random_time = "aff4:/cron/DummySystemCronJobStartNow" random_time_job = aff4.FACTORY.Open(random_time, aff4_type="CronJob", token=self.token) no_random_time_job = aff4.FACTORY.Open(no_random_time, aff4_type="CronJob", token=self.token) start_time_now = no_random_time_job.Get( no_random_time_job.Schema.CRON_ARGS).start_time self.assertEqual(start_time_now, now) random_start_time = random_time_job.Get( random_time_job.Schema.CRON_ARGS).start_time self.assertTrue(now < random_start_time < ( now + DummySystemCronJob.frequency))
def testExtendsLeaseIfCompactionTakesTooLong(self): with aff4.FACTORY.Create(self.collection_urn, collects.PackedVersionedCollection, mode="w", token=self.token) as fd: elements = [] for i in range(10): elements.append(rdf_flows.GrrMessage(request_id=i)) fd.AddAll(elements) with test_lib.ConfigOverrider({"Worker.compaction_lease_time": 42}): with test_lib.FakeTime(20): # Lease time here is much less than compaction_lease_time, # collection will have to extend the lease immediately # when compaction starts. fd = aff4.FACTORY.OpenWithLock(self.collection_urn, collects.PackedVersionedCollection, lease_time=10, token=self.token) # This is the expected lease time: time.time() + lease_time self.assertEqual(fd.CheckLease(), 10) with test_lib.FakeTime(29): fd.Compact() # Compaction should have updated the lease. self.assertEqual(fd.CheckLease(), 42)
def testNoJournalEntriesAreAddedWhenJournalingIsDisabled(self): with test_lib.ConfigOverrider({ "Worker.enable_packed_versioned_collection_journaling": False }): with aff4.FACTORY.Create(self.collection_urn, collects.PackedVersionedCollection, mode="w", token=self.token) as fd: fd.Add(rdf_flows.GrrMessage(request_id=42)) fd.AddAll([rdf_flows.GrrMessage(request_id=43), rdf_flows.GrrMessage(request_id=44)]) collects.PackedVersionedCollection.AddToCollection( self.collection_urn, [rdf_flows.GrrMessage(request_id=1), rdf_flows.GrrMessage(request_id=2)], token=self.token) with aff4.FACTORY.OpenWithLock(self.collection_urn, token=self.token) as fd: fd.Compact() fd = aff4.FACTORY.Open(self.collection_urn, age=aff4.ALL_TIMES, token=self.token) self.assertFalse(fd.IsAttributeSet(fd.Schema.ADDITION_JOURNAL)) self.assertFalse(fd.IsAttributeSet(fd.Schema.COMPACTION_JOURNAL))
def testDefaultRouterIsReturnedIfNoConfigFileDefined(self): """The default router is returned if no API.RouterACLConfigFile defined.""" with test_lib.ConfigOverrider({"API.RouterACLConfigFile": ""}): auth_mgr = api_auth_manager.APIAuthorizationManager() router = auth_mgr.GetRouterForUser("u1") self.assertEqual(router.__class__, DefaultDummyAuthManagerTestApiRouter)
def setUp(self): super(MultiShardedQueueManagerTest, self).setUp() self.config_overrider = test_lib.ConfigOverrider({ "Worker.queue_shards": 2 }) self.config_overrider.Start()
def setUp(self): super(HttpRequestHandlerTest, self).setUp() self.config_overrider = test_lib.ConfigOverrider( {"API.DefaultRouter": TestHttpApiRouter.__name__}) self.config_overrider.Start() self.request_handler = http_api.HttpRequestHandler()
def InitDatastore(self): if self.disabled: raise unittest.SkipTest( "Skipping since Mysql db is not reachable.") self.token = access_control.ACLToken(username="******", reason="Running tests") # Use separate tables for benchmarks / tests so they can be run in parallel. with test_lib.ConfigOverrider({ "Mysql.database_name": "grr_test_%s" % self.__class__.__name__, "Mysql.max_connect_wait": 2 }): try: data_store.DB = mysql_advanced_data_store.MySQLAdvancedDataStore( ) data_store.DB.Initialize() data_store.DB.flusher_thread.Stop() data_store.DB.security_manager = test_lib.MockSecurityManager() data_store.DB.RecreateTables() except Exception as e: logging.debug("Error while connecting to MySQL db: %s.", e) MysqlAdvancedTestMixin.disabled = True raise unittest.SkipTest( "Skipping since Mysql db is not reachable.")
def testKnowledgeBaseRetrievalLinuxPasswd(self): """Check we can retrieve a Linux kb.""" self.ClearKB() with test_lib.VFSOverrider(rdf_paths.PathSpec.PathType.OS, test_lib.FakeTestDataVFSHandler): with test_lib.ConfigOverrider({ "Artifacts.knowledge_base": ["LinuxWtmp", "LinuxPasswdHomedirs", "LinuxRelease"], "Artifacts.knowledge_base_additions": [], "Artifacts.knowledge_base_skip": []}): for _ in test_lib.TestFlowHelper( "KnowledgeBaseInitializationFlow", self.client_mock, client_id=self.client_id, token=self.token): pass client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw") kb = artifact.GetArtifactKnowledgeBase(client) self.assertEqual(kb.os_major_version, 14) self.assertEqual(kb.os_minor_version, 4) # user 1,2,3 from wtmp. # Bert and Ernie not present (Users fixture overriden by kb). self.assertItemsEqual([x.username for x in kb.users], ["user1", "user2", "user3"]) user = kb.GetUser(username="******") self.assertEqual(user.last_logon.AsSecondsFromEpoch(), 1296552099) self.assertEqual(user.homedir, "/home/user1") user = kb.GetUser(username="******") self.assertEqual(user.last_logon.AsSecondsFromEpoch(), 1296552102) self.assertEqual(user.homedir, "/home/user2") self.assertFalse(kb.GetUser(username="******"))
def setUp(self): super(TestAdministrativeFlows, self).setUp() test_tmp = os.environ.get("TEST_TMPDIR") if test_tmp: self.tempdir_overrider = test_lib.ConfigOverrider({}) self.tempdir_overrider.Start()
def testKnowledgeBaseRetrievalDarwin(self): """Check we can retrieve a Darwin kb.""" test_lib.ClientFixture(self.client_id, token=self.token) self.SetDarwinClient() with test_lib.ConfigOverrider( {"Artifacts.knowledge_base": ["OSXUsers"]}): with test_lib.VFSOverrider(rdf_paths.PathSpec.PathType.OS, test_lib.ClientVFSHandlerFixture): client_mock = action_mocks.ActionMock("TransferBuffer", "StatFile", "Find", "HashBuffer", "ListDirectory", "FingerprintFile") for _ in test_lib.TestFlowHelper( "KnowledgeBaseInitializationFlow", client_mock, client_id=self.client_id, token=self.token): pass client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw") kb = artifact.GetArtifactKnowledgeBase(client) self.assertEqual(kb.os_major_version, 10) self.assertEqual(kb.os_minor_version, 9) # scalzi from /Users dir listing. # Bert and Ernie not present (Users fixture overriden by kb). self.assertItemsEqual([x.username for x in kb.users], ["scalzi"]) user = kb.GetUser(username="******") self.assertEqual(user.homedir, "/Users/scalzi")
def testKnowledgeBaseRetrievalLinux(self): """Check we can retrieve a Linux kb.""" self.ClearKB() with test_lib.ConfigOverrider({ "Artifacts.knowledge_base": [ "LinuxWtmp", "NetgroupConfiguration", "LinuxPasswdHomedirs", "LinuxRelease" ], "Artifacts.netgroup_filter_regexes": ["^login$"], "Artifacts.netgroup_user_blacklist": ["isaac"] }): with test_lib.VFSOverrider(rdf_paths.PathSpec.PathType.OS, test_lib.FakeTestDataVFSHandler): for _ in test_lib.TestFlowHelper( "KnowledgeBaseInitializationFlow", self.client_mock, client_id=self.client_id, token=self.token): pass client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw") kb = artifact.GetArtifactKnowledgeBase(client) self.assertEqual(kb.os_major_version, 14) self.assertEqual(kb.os_minor_version, 4) # user 1,2,3 from wtmp. yagharek from netgroup. self.assertItemsEqual([x.username for x in kb.users], ["user1", "user2", "user3", "yagharek"]) user = kb.GetUser(username="******") self.assertEqual(user.last_logon.AsSecondsFromEpoch(), 1296552099) self.assertEqual(user.homedir, "/home/user1")
def testUploadFiles(self): """Test the upload file flows.""" with test_lib.ConfigOverrider( {"FileUploadFileStore.root_dir": self.temp_dir}): test_data_path = os.path.join(self.base_path, "test_img.dd") pathspec = rdf_paths.PathSpec( pathtype=rdf_paths.PathSpec.PathType.OS, path=test_data_path) session_id = None for session_id in test_lib.TestFlowHelper( "MultiUploadFile", ClientMock(client_id=self.client_id), token=self.token, pathspecs=[pathspec], client_id=self.client_id): pass results = flow.GRRFlow.ResultCollectionForFID(session_id, token=self.token) self.assertEqual(len(results), 1) for stat_entry in results: # Make sure the AFF4 file is the same as the original test file we tried # to upload. fd1 = aff4.FACTORY.Open(stat_entry.AFF4Path(self.client_id), token=self.token) fd2 = open(test_data_path, "rb") fd2.seek(0, 2) self.assertEqual(fd2.tell(), int(fd1.Get(fd1.Schema.SIZE))) self.CompareFDs(fd1, fd2)
def testKnowledgeBaseRetrievalDarwin(self): """Check we can retrieve a Darwin kb.""" self.ClearKB() with test_lib.ConfigOverrider( {"Artifacts.knowledge_base": ["OSXUsers"]}): with test_lib.VFSOverrider(rdf_paths.PathSpec.PathType.OS, test_lib.ClientVFSHandlerFixture): for _ in test_lib.TestFlowHelper( "KnowledgeBaseInitializationFlow", self.client_mock, client_id=self.client_id, token=self.token): pass client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw") kb = artifact.GetArtifactKnowledgeBase(client) self.assertEqual(kb.os_major_version, 10) self.assertEqual(kb.os_minor_version, 9) # scalzi from /Users dir listing. self.assertItemsEqual([x.username for x in kb.users], ["scalzi"]) user = kb.GetUser(username="******") self.assertEqual(user.homedir, "/Users/scalzi")
def _RunClientFileFinder(self, paths, action, network_bytes_limit=None, client_id=None): client_id = client_id or self.SetupClients(1)[0] with test_lib.ConfigOverrider({"Client.server_urls": [self.base_url]}): client = comms.GRRHTTPClient( ca_cert=config_lib.CONFIG["CA.certificate"], private_key=config_lib.CONFIG.Get("Client.private_key", default=None)) client.client_worker = worker_mocks.FakeThreadedWorker(client=client) client.server_certificate = config_lib.CONFIG["Frontend.certificate"] for s in test_lib.TestFlowHelper( "ClientFileFinder", action_mocks.ClientFileFinderClientMock( client_worker=client.client_worker), client_id=client_id, paths=paths, pathtype=rdf_paths.PathSpec.PathType.OS, action=action, process_non_regular_files=True, network_bytes_limit=network_bytes_limit, token=self.token): session_id = s return session_id
def testInitialEnrollment(self): """If the client has no certificate initially it should enroll.""" # Clear the certificate so we can generate a new one. with test_lib.ConfigOverrider({ "Client.private_key": "", "Client.retry_error_limit": 5 }): self.CreateNewClientObject() # Client should get a new Common Name. self.assertNotEqual( self.client_cn, self.client_communicator.communicator.common_name) self.client_cn = self.client_communicator.communicator.common_name # The client will sleep and re-attempt to connect multiple times. status = self.client_communicator.RunOnce() self.assertEqual(status.code, 406) # The client should now send an enrollment request. status = self.client_communicator.RunOnce() # Client should generate enrollment message by itself. self.assertEqual(len(self.messages), 1) self.assertEqual(self.messages[0].session_id, ca_enroller.Enroler.well_known_session_id)
def setUp(self): super(MasterTest, self).setUp() self.mock_service = MockDataStoreService() ip_addrs = socket.getaddrinfo("localhost", 0, socket.AF_UNSPEC, 0, socket.IPPROTO_TCP) self.host = ip_addrs[0][4][0] # Ports 7000+ are typically used for GRR data servers, so they are tested # here for illustration and documentation purposes. # We're also testing port 3000 as it is unique in that it encodes # differently to binary. pack_int(3000) returns a byte sequence which is # invalid utf8 and can cause problems in certain code. This has caused # bugs in the past, so this constitues a regression test. self.ports = [7000, 7001, 7002, 3000] if ipaddr.IPAddress(self.host).version == 6: urn_template = "http://[%s]:%i" else: urn_template = "http://%s:%i" server_list = [] for port in self.ports: server_list.append(urn_template % (self.host, port)) self.server_list_overrider = test_lib.ConfigOverrider( {"Dataserver.server_list": server_list}) self.server_list_overrider.Start()
def setUp(self): super(AuthTest, self).setUp() self.config_overrider = test_lib.ConfigOverrider({ "Dataserver.server_username": "******", "Dataserver.server_password": "******", "Dataserver.client_credentials": ["rootuser1:somelongpasswordaabb:rw"] }) self.config_overrider.Start()
def setUp(self): super(RouterMatcherTest, self).setUp() self.config_overrider = test_lib.ConfigOverrider( {"API.DefaultRouter": TestHttpApiRouter.__name__}) self.config_overrider.Start() # Make sure ApiAuthManager is initialized with this configuration setting. api_auth_manager.APIACLInit.InitApiAuthManager() self.router_matcher = http_api.RouterMatcher()
def testNoACLs(self): """All checking is skipped if no API.HandlerACLFile is defined.""" with test_lib.ConfigOverrider({"API.HandlerACLFile": ""}): auth_mgr = api_auth_manager.SimpleAPIAuthorizationManager() auth_mgr.CheckAccess(self.mock_handler, "u1") bad_handler = mock.MagicMock() bad_handler.enabled_by_default = True bad_handler.__class__.__name__ = "BadHandler" auth_mgr.CheckAccess(bad_handler, "u2")
def setUp(self): """Set up communicator tests.""" super(HTTPClientTests, self).setUp() # These tests change the config so we preserve state. self.config_stubber = test_lib.PreserveConfig() self.config_stubber.Start() certificate = self.ClientCertFromPrivateKey( config_lib.CONFIG["Client.private_key"]) self.server_serial_number = 0 self.server_private_key = config_lib.CONFIG["PrivateKeys.server_key"] self.server_certificate = config_lib.CONFIG["Frontend.certificate"] self.client_cn = certificate.GetCN() # Make a new client self.CreateNewClientObject() # The housekeeper threads of the time based caches also call time.time and # interfere with some tests so we disable them here. utils.InterruptableThread.exit = True # The same also applies to the StatsCollector thread. stats.StatsCollector.exit = True # Make a client mock self.client = aff4.FACTORY.Create(self.client_cn, aff4_grr.VFSGRRClient, mode="rw", token=self.token) self.client.Set(self.client.Schema.CERT(certificate.AsPEM())) self.client.Flush() # Stop the client from actually processing anything self.out_queue_overrider = test_lib.ConfigOverrider( {"Client.max_out_queue": 0}) self.out_queue_overrider.Start() # And cache it in the server self.CreateNewServerCommunicator() self.requests_stubber = utils.Stubber(requests, "request", self.UrlMock) self.requests_stubber.Start() self.sleep_stubber = utils.Stubber(time, "sleep", lambda x: None) self.sleep_stubber.Start() self.messages = [] ca_enroller.enrolment_cache.Flush() # Response to send back to clients. self.server_response = dict(session_id="aff4:/W:session", name="Echo", response_id=2)
def setUp(self): super(CloudBigTableDataStoreIntegrationTest, self).setUp() # Only make a single retry in tests. self.test_config = test_lib.ConfigOverrider({ "CloudBigtable.retry_max_attempts": 1, "CloudBigtable.retry_interval": 1 }) self.test_config.Start()
def setUp(self): super(SimpleAPIAuthorizationManagerTest, self).setUp() self.mock_handler = mock.MagicMock() self.mock_handler.enabled_by_default = True self.mock_handler.__class__.__name__ = "ApiCallHandler" # API ACLs are off by default, we need to set this to something so the tests # exercise the functionality. Each test will supply its own ACL data. self.aclfile_overrider = test_lib.ConfigOverrider( {"API.HandlerACLFile": "dummy"}) self.aclfile_overrider.Start()