def Handle(self, args, token=None): end = args.count or db.MAX_COUNT keywords = compatibility.ShlexSplit(args.query) api_clients = [] if data_store.RelationalDBEnabled(): index = client_index.ClientIndex() # LookupClients returns a sorted list of client ids. clients = index.LookupClients(keywords)[args.offset:args.offset + end] client_infos = data_store.REL_DB.MultiReadClientFullInfo(clients) for client_info in itervalues(client_infos): api_clients.append(ApiClient().InitFromClientInfo(client_info)) else: index = client_index.CreateClientIndex(token=token) result_urns = sorted( index.LookupClients(keywords))[args.offset:args.offset + end] result_set = aff4.FACTORY.MultiOpen(result_urns, token=token) for child in sorted(result_set): api_clients.append(ApiClient().InitFromAff4Object(child)) UpdateClientsFromFleetspeak(api_clients) return ApiSearchClientsResult(items=api_clients)
def Run(self): failing_descriptor = rdf_output_plugin.OutputPluginDescriptor( plugin_name=hunt_test_lib.FailingDummyHuntOutputPlugin.__name__) with test_lib.FakeTime(42, increment=1): if data_store.RelationalDBEnabled(): hunt_id = self.CreateHunt(description="the hunt", output_plugins=[failing_descriptor]) hunt.StartHunt(hunt_id) else: hunt_urn = self.StartHunt(description="the hunt", output_plugins=[failing_descriptor]) hunt_id = hunt_urn.Basename() self.client_ids = self.SetupClients(2) for index, client_id in enumerate(self.client_ids): self.RunHunt(client_ids=[client_id], failrate=-1) with test_lib.FakeTime(100042 + index * 100): try: self.ProcessHuntOutputPlugins() except process_results.ResultsProcessingError: if flags.FLAGS.pdb_post_mortem: pdb.post_mortem() self.Check("ListHuntOutputPluginErrors", args=hunt_plugin.ApiListHuntOutputPluginErrorsArgs( hunt_id=hunt_id, plugin_id="FailingDummyHuntOutputPlugin_0"), replace={hunt_id: "H:123456"})
def testArtifactSkipping(self): client_mock = action_mocks.ActionMock() # This does not match the Artifact so it will not be collected. client_id = self.SetupClient(0, system="Windows") artifact_list = ["FakeArtifact"] session_id = flow_test_lib.TestFlowHelper( aff4_flows.ArtifactCollectorFlow.__name__, client_mock, artifact_list=artifact_list, use_tsk=False, token=self.token, client_id=client_id) if data_store.RelationalDBEnabled(): flow_obj = data_store.REL_DB.ReadFlowObject(client_id.Basename(), session_id) state = flow_obj.persistent_data else: flow_obj = aff4.FACTORY.Open(session_id, token=self.token) state = flow_obj.state self.assertLen(state.artifacts_skipped_due_to_condition, 1) self.assertEqual(state.artifacts_skipped_due_to_condition[0], ["FakeArtifact", "os == 'Linux'"])
def testGlobRegistry(self): """Test that glob works on registry.""" client_id = test_lib.TEST_CLIENT_ID paths = [ "HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT" "\\CurrentVersion\\ProfileList\\ProfilesDirectory", "HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT" "\\CurrentVersion\\ProfileList\\AllUsersProfile" ] flow_test_lib.TestFlowHelper( filesystem.Glob.__name__, self.client_mock, paths=paths, pathtype=rdf_paths.PathSpec.PathType.REGISTRY, client_id=client_id, token=self.token) path = paths[0].replace("\\", "/") if data_store.RelationalDBEnabled(): path_info = data_store.REL_DB.ReadPathInfo( client_id.Basename(), rdf_objects.PathInfo.PathType.REGISTRY, components=tuple(path.split("/"))) self.assertEqual(path_info.stat_entry.registry_data.GetValue(), "%SystemDrive%\\Users") else: fd = aff4.FACTORY.Open(client_id.Add("registry").Add(path), token=self.token) self.assertEqual(fd.__class__.__name__, "VFSFile") self.assertEqual( fd.Get(fd.Schema.STAT).registry_data.GetValue(), "%SystemDrive%\\Users")
def Run(self): if data_store.RelationalDBEnabled(): replace = {} for i in range(0, 2): with test_lib.FakeTime((1 + i) * 1000): hunt_id = self.CreateHunt(description="hunt_%d" % i) if i % 2: hunt.StopHunt(hunt_id) replace[hunt_id] = "H:00000%d" % i else: replace = {} for i in range(0, 2): with test_lib.FakeTime((1 + i) * 1000): with self.CreateHunt(description="hunt_%d" % i) as hunt_obj: if i % 2: hunt_obj.Stop() replace[hunt_obj.urn.Basename()] = "H:00000%d" % i self.Check("ListHunts", args=hunt_plugin.ApiListHuntsArgs(), replace=replace) self.Check("ListHunts", args=hunt_plugin.ApiListHuntsArgs(count=1), replace=replace) self.Check("ListHunts", args=hunt_plugin.ApiListHuntsArgs(offset=1, count=1), replace=replace)
def SendMail(self, responses): """Sends a mail when the client has responded.""" if responses.success: if data_store.RelationalDBEnabled(): client = data_store.REL_DB.ReadClientSnapshot(self.client_id) hostname = client.knowledge_base.fqdn else: client = aff4.FACTORY.Open(self.client_id, token=self.token) hostname = client.Get(client.Schema.FQDN) subject = self.__class__.subject_template.render(hostname=hostname) body = self.__class__.template.render( client_id=self.client_id, admin_ui=config.CONFIG["AdminUI.url"], hostname=hostname, url="/clients/%s" % self.client_id, creator=self.token.username, signature=utils.SmartUnicode(config.CONFIG["Email.signature"])) email_alerts.EMAIL_ALERTER.SendEmail( self.args.email, "grr-noreply", utils.SmartStr(subject), utils.SmartStr(body), is_html=True) else: flow.FlowError("Error while pinging client.")
def _WriteFilesContentRel(self, responses): """Writes file contents of multiple files to the relational database.""" client_path_blob_refs = dict() client_path_path_info = dict() for response in responses: path_info = rdf_objects.PathInfo.FromStatEntry(response.stat_entry) chunks = response.transferred_file.chunks chunks = sorted(chunks, key=lambda _: _.offset) client_path = db.ClientPath.FromPathInfo(self.client_id, path_info) blob_refs = [] for c in chunks: blob_refs.append( rdf_objects.BlobReference( offset=c.offset, size=c.length, blob_id=rdf_objects.BlobID.FromBytes(c.digest))) client_path_path_info[client_path] = path_info client_path_blob_refs[client_path] = blob_refs if (data_store.RelationalDBEnabled() and client_path_blob_refs): use_external_stores = self.args.action.download.use_external_stores client_path_hash_id = file_store.AddFilesWithUnknownHashes( client_path_blob_refs, use_external_stores=use_external_stores) for client_path, hash_id in iteritems(client_path_hash_id): path_info = client_path_path_info[client_path] path_info.hash_entry.sha256 = hash_id.AsBytes() path_infos = list(itervalues(client_path_path_info)) data_store.REL_DB.WritePathInfos(self.client_id, path_infos)
def ProcessStat(self, responses): # Did it work? if not responses.success: # It's better to raise rather than merely logging since it will # make it to the flow's protobuf and users can # inspect the reason this flow failed. raise IOError("Could not stat file: %s" % responses.status) client_stat = responses.First() # Update the pathspec to the one we got from the client. self.state.pathspec = client_stat.pathspec # If the file was big enough, we'll store it as an AFF4SparseImage if client_stat.st_size > self.args.size_threshold: urn = self.state.pathspec.AFF4Path(self.client_urn) # TODO(user) When we can check the last update time of the # contents of a file, raise if the contents have been updated before here. fd = aff4.FACTORY.Create( urn, aff4_type=standard.AFF4SparseImage, token=self.token, mode="rw") fd.Set(fd.Schema.PATHSPEC, self.state.pathspec) fd.Set(fd.Schema.STAT, client_stat) fd.Flush() if data_store.RelationalDBEnabled(): path_info = rdf_objects.PathInfo.FromStatEntry(client_stat) data_store.REL_DB.WritePathInfos(self.client_id, [path_info]) else: # Otherwise, just get the whole file. self.CallFlow( transfer.MultiGetFile.__name__, pathspecs=[self.state.pathspec], next_state="End")
def ParseRunKeys(self, responses): """Get filenames from the RunKeys and download the files.""" filenames = [] if data_store.RelationalDBEnabled(): client = data_store.REL_DB.ReadClientSnapshot(self.client_id) kb = client.knowledge_base else: client = aff4.FACTORY.Open(self.client_id, mode="r", token=self.token) kb = artifact.GetArtifactKnowledgeBase(client) for response in responses: runkey = response.registry_data.string environ_vars = artifact_utils.GetWindowsEnvironmentVariablesMap(kb) path_guesses = path_detection_windows.DetectExecutablePaths([runkey], environ_vars) if not path_guesses: self.Log("Couldn't guess path for %s", runkey) for path in path_guesses: filenames.append( rdf_paths.PathSpec( path=path, pathtype=rdf_paths.PathSpec.PathType.TSK)) if filenames: self.CallFlow( transfer.MultiGetFile.__name__, pathspecs=filenames, next_state="Done")
def Handle(self, args, token=None): client_id = str(args.client_id) if data_store.RelationalDBEnabled(): md = data_store.REL_DB.ReadClientMetadata(client_id) if md.fleetspeak_enabled: ip_str, ipaddr_obj = _GetAddrFromFleetspeak(client_id) else: try: ipaddr_obj = md.ip.AsIPAddr() ip_str = str(ipaddr_obj) except ValueError: ipaddr_obj = None ip_str = "" else: client = aff4.FACTORY.Open(args.client_id.ToClientURN(), aff4_type=aff4_grr.VFSGRRClient, token=token) if client.Get(client.Schema.FLEETSPEAK_ENABLED): ip_str, ipaddr_obj = _GetAddrFromFleetspeak(client_id) else: ip_str = client.Get(client.Schema.CLIENT_IP) if ip_str: ipaddr_obj = ipaddress.ip_address(ip_str) else: ipaddr_obj = None status, info = ip_resolver.IP_RESOLVER.RetrieveIPInfo(ipaddr_obj) return ApiGetLastClientIPAddressResult(ip=ip_str, info=info, status=status)
def List(self, responses): """Collect the directory listing and store in the datastore.""" if not responses.success: raise flow.FlowError(str(responses.status)) self.Log("Listed %s", self.state.urn) with data_store.DB.GetMutationPool() as pool: if data_store.AFF4Enabled(): with aff4.FACTORY.Create( self.state.urn, standard.VFSDirectory, mode="w", mutation_pool=pool, token=self.token) as fd: fd.Set(fd.Schema.PATHSPEC(self.state.stat.pathspec)) fd.Set(fd.Schema.STAT(self.state.stat)) if data_store.RelationalDBEnabled(): path_info = rdf_objects.PathInfo.FromStatEntry(self.state.stat) data_store.REL_DB.WritePathInfos(self.client_id, [path_info]) stat_entries = list(map(rdf_client_fs.StatEntry, responses)) WriteStatEntries( stat_entries, client_id=self.client_id, mutation_pool=pool, token=self.token) for stat_entry in stat_entries: self.SendReply(stat_entry) # Send Stats to parent flows.
def Handle(self, args, token=None): if data_store.RelationalDBEnabled(): client_id = str(args.client_id) flow_id = str(args.operation_id) # TODO(user): test both exception scenarios below. try: flow_obj = data_store.REL_DB.ReadFlowObject(client_id, flow_id) except db.UnknownFlowError: raise InterrogateOperationNotFoundError( "Operation with id %s not found" % args.operation_id) expected_flow_name = compatibility.GetName(discovery.Interrogate) if flow_obj.flow_class_name != expected_flow_name: raise InterrogateOperationNotFoundError( "Operation with id %s not found" % args.operation_id) complete = flow_obj.flow_state != flow_obj.FlowState.RUNNING else: try: flow_obj = aff4.FACTORY.Open(args.operation_id, aff4_type=aff4_flows.Interrogate, token=token) complete = not flow_obj.GetRunner().IsRunning() except aff4.InstantiationError: raise InterrogateOperationNotFoundError( "Operation with id %s not found" % args.operation_id) result = ApiGetInterrogateOperationStateResult() if complete: result.state = ApiGetInterrogateOperationStateResult.State.FINISHED else: result.state = ApiGetInterrogateOperationStateResult.State.RUNNING return result
def Handle(self, args, token=None): end_time = args.end or rdfvalue.RDFDatetime.Now() start_time = args.start or end_time - rdfvalue.Duration("3m") diffs_only = args.mode == args.Mode.DIFF items = [] if data_store.RelationalDBEnabled(): client_id = str(args.client_id) history = data_store.REL_DB.ReadClientSnapshotHistory( client_id, timerange=(start_time, end_time)) labels = data_store.REL_DB.ReadClientLabels(client_id) for client in history[::-1]: c = ApiClient().InitFromClientObject(client) c.labels = labels items.append(c) else: all_clients = aff4.FACTORY.OpenDiscreteVersions( args.client_id.ToClientURN(), mode="r", age=(start_time.AsMicrosecondsSinceEpoch(), end_time.AsMicrosecondsSinceEpoch()), diffs_only=diffs_only, token=token) for fd in all_clients: items.append(ApiClient().InitFromAff4Object( fd, include_metadata=False)) return ApiGetClientVersionsResult(items=items)
def Handle(self, args, token=None): if not args.timestamp: age = rdfvalue.RDFDatetime.Now() else: age = rdfvalue.RDFDatetime(args.timestamp) api_client = None if data_store.RelationalDBEnabled(): client_id = str(args.client_id) info = data_store.REL_DB.ReadClientFullInfo(client_id) if info is None: raise api_call_handler_base.ResourceNotFoundError() if args.timestamp: # Assume that a snapshot for this particular timestamp exists. snapshots = data_store.REL_DB.ReadClientSnapshotHistory( client_id, timerange=(args.timestamp, args.timestamp)) if snapshots: info.last_snapshot = snapshots[0] info.last_startup_info = snapshots[0].startup_info api_client = ApiClient().InitFromClientInfo(info) else: client = aff4.FACTORY.Open(args.client_id.ToClientURN(), aff4_type=aff4_grr.VFSGRRClient, age=age, token=token) api_client = ApiClient().InitFromAff4Object(client) UpdateClientsFromFleetspeak([api_client]) return api_client
def AddLogToHunt(self, hunt_id, client_id, message): if isinstance(client_id, rdfvalue.RDFURN): client_id = client_id.Basename() if isinstance(hunt_id, rdfvalue.RDFURN): hunt_id = hunt_id.Basename() if data_store.RelationalDBEnabled(): flow_id = self._EnsureClientHasHunt(client_id, hunt_id) data_store.REL_DB.WriteFlowLogEntries([ rdf_flow_objects.FlowLogEntry(client_id=client_id, flow_id=flow_id, hunt_id=hunt_id, message=message) ]) else: hunt_obj = aff4.FACTORY.Open(rdfvalue.RDFURN("hunts").Add(hunt_id)) logs_collection_urn = hunt_obj.logs_collection_urn log_entry = rdf_flows.FlowLog( client_id=client_id, urn=rdf_client.ClientURN(client_id).Add(hunt_id), flow_name=hunt_obj.__class__.__name__, log_message=message) with data_store.DB.GetMutationPool() as pool: grr_collections.LogCollection.StaticAdd(logs_collection_urn, log_entry, mutation_pool=pool)
def StartInterrogationHunt(self): """Starts an interrogation hunt on all available clients.""" flow_name = compatibility.GetName(flows_discovery.Interrogate) flow_args = flows_discovery.InterrogateArgs(lightweight=False) description = "Interrogate run by cron to keep host info fresh." if data_store.RelationalDBEnabled(): hunt_id = hunt.CreateAndStartHunt( flow_name, flow_args, self.token.username, client_limit=0, client_rate=50, crash_limit=config.CONFIG["Cron.interrogate_crash_limit"], description=description, duration=rdfvalue.Duration("1w"), output_plugins=self.GetOutputPlugins()) self.Log("Started hunt %s.", hunt_id) else: with hunts_implementation.StartHunt( hunt_name=hunts_standard.GenericHunt.__name__, client_limit=0, flow_runner_args=rdf_flow_runner.FlowRunnerArgs(flow_name=flow_name), flow_args=flow_args, output_plugins=self.GetOutputPlugins(), crash_limit=config.CONFIG["Cron.interrogate_crash_limit"], client_rate=50, expiry_time=rdfvalue.Duration("1w"), description=description, token=self.token) as hunt_obj: hunt_obj.GetRunner().Start() self.Log("Started hunt %s.", hunt_obj.urn)
def setUp(self): super(SystemCronTestMixin, self).setUp() one_hour_ping = rdfvalue.RDFDatetime.Now() - rdfvalue.Duration("1h") eight_day_ping = rdfvalue.RDFDatetime.Now() - rdfvalue.Duration("8d") ancient_ping = rdfvalue.RDFDatetime.Now() - rdfvalue.Duration("61d") self.SetupClientsWithIndices(range(0, 10), system="Windows", ping=eight_day_ping) self.SetupClientsWithIndices(range(10, 20), system="Linux", ping=eight_day_ping) self.SetupClientsWithIndices(range(20, 22), system="Darwin", fleetspeak_enabled=True, ping=one_hour_ping) # These clients shouldn't be analyzed by any of the stats cronjobs. self.SetupClientsWithIndices(range(22, 24), system="Linux", ping=ancient_ping) for i in range(0, 10): client_id = u"C.1%015x" % i if data_store.AFF4Enabled(): with aff4.FACTORY.Open(client_id, mode="rw", token=self.token) as client: client.AddLabels([u"Label1", u"Label2"], owner=u"GRR") client.AddLabel(u"UserLabel", owner=u"jim") if data_store.RelationalDBEnabled(): data_store.REL_DB.AddClientLabels(client_id, u"GRR", [u"Label1", u"Label2"]) data_store.REL_DB.AddClientLabels(client_id, u"jim", [u"UserLabel"])
def _StartFlow(self, client_id, flow_cls, **kw): if data_store.RelationalDBEnabled(): flow_id = flow.StartFlow(flow_cls=flow_cls, client_id=client_id, **kw) # Lease the client message. data_store.REL_DB.LeaseClientActionRequests( client_id, lease_time=rdfvalue.Duration("10000s")) # Write some responses. In the relational db, the client queue will be # cleaned up as soon as all responses are available. Therefore we cheat # here and make it look like the request needs more responses so it's not # considered complete. # Write the status first. This will mark the request as waiting for 2 # responses. status = rdf_flow_objects.FlowStatus(client_id=client_id, flow_id=flow_id, request_id=1, response_id=2) data_store.REL_DB.WriteFlowResponses([status]) # Now we read the request, adjust the number, and write it back. reqs = data_store.REL_DB.ReadAllFlowRequestsAndResponses( client_id, flow_id) req = reqs[0][0] req.nr_responses_expected = 99 data_store.REL_DB.WriteFlowRequests([req]) # This response now won't trigger any deletion of client messages. response = rdf_flow_objects.FlowResponse( client_id=client_id, flow_id=flow_id, request_id=1, response_id=1, payload=rdf_client.Process(name="test_process")) data_store.REL_DB.WriteFlowResponses([response]) # This is not strictly needed as we don't display this information in the # UI. req.nr_responses_expected = 2 data_store.REL_DB.WriteFlowRequests([req]) return flow_id else: flow_id = flow.StartAFF4Flow( flow_name=compatibility.GetName(flow_cls), client_id=client_id, token=self.token, **kw).Basename() # Have the client write some responses. test_process = rdf_client.Process(name="test_process") mock = flow_test_lib.MockClient(client_id, action_mocks.ListProcessesMock( [test_process]), token=self.token) mock.Next() return flow_id
def StoreResults(self, responses): """Stores the results returned by the client to the db.""" if not responses.success: raise flow.FlowError(responses.status) self.state.files_found = len(responses) files_to_publish = [] with data_store.DB.GetMutationPool() as pool: transferred_file_responses = [] stat_entries = [] for response in responses: if response.HasField("transferred_file"): transferred_file_responses.append(response) elif response.HasField("stat_entry"): stat_entries.append(response.stat_entry) if data_store.AFF4Enabled(): self._WriteFilesContentAff4(transferred_file_responses, mutation_pool=pool) if data_store.RelationalDBEnabled(): self._WriteFilesContentRel(transferred_file_responses) self._WriteStatEntries(stat_entries, mutation_pool=pool) for response in responses: self.SendReply(response) if stat.S_ISREG(response.stat_entry.st_mode): files_to_publish.append( response.stat_entry.pathspec.AFF4Path(self.client_urn))
def FillClientStats(self, client_id): stats = [] for i in range(6): timestamp = int((i + 1) * 10 * 1e6) st = rdf_client_stats.ClientStats() sample = rdf_client_stats.CpuSample(timestamp=timestamp, user_cpu_time=10 + i, system_cpu_time=20 + i, cpu_percent=10 + i) st.cpu_samples.Append(sample) sample = rdf_client_stats.IOSample(timestamp=timestamp, read_bytes=10 + i, write_bytes=10 + i * 2) st.io_samples.Append(sample) stats.append(st) if data_store.AFF4Enabled(): for st in stats: with test_lib.FakeTime(st.cpu_samples[0].timestamp): with aff4.FACTORY.Create(client_id.Add("stats"), aff4_type=aff4_stats.ClientStats, token=self.token, mode="rw") as stats_fd: stats_fd.AddAttribute(stats_fd.Schema.STATS(st)) if data_store.RelationalDBEnabled(): for st in stats: with test_lib.FakeTime(st.cpu_samples[0].timestamp): data_store.REL_DB.WriteClientStats( client_id=client_id.Basename(), stats=st)
def testListVolumeShadowCopies(self): """Test the list Volume Shadow Copies flow.""" client_id = self.SetupClient(0) flow_name = windows_vsc.ListVolumeShadowCopies.__name__ # Run the flow in the simulated way flow_test_lib.TestFlowHelper(flow_name, TestClient(), token=self.token, client_id=client_id) if data_store.RelationalDBEnabled(): children = data_store.REL_DB.ListChildPathInfos( client_id.Basename(), rdf_objects.PathInfo.PathType.TSK, ["\\\\.\\HarddiskVolumeShadowCopy3"]) self.assertLen(children, 10) self.assertItemsEqual([x.components[-1] for x in children], ["file %s" % i for i in range(10)]) else: fd = aff4.FACTORY.Open( client_id.Add("fs/tsk/\\\\.\\HarddiskVolumeShadowCopy3"), token=self.token) children = list(fd.ListChildren()) self.assertLen(children, 10) self.assertEqual([x.Basename() for x in sorted(children)], ["file %s" % i for i in range(10)])
def _CheckNannyEmail(self, client_id, nanny_message, email_dict): # We expect the email to be sent. self.assertEqual(email_dict.get("address"), config.CONFIG["Monitoring.alert_email"]) # Make sure the message is included in the email message. self.assertIn(nanny_message, email_dict["message"]) if data_store.RelationalDBEnabled(): self.assertIn(client_id, email_dict["title"]) crash = data_store.REL_DB.ReadClientCrashInfo(client_id) else: self.assertIn(client_id.Basename(), email_dict["title"]) # Make sure crashes collections are created and written # into proper locations. First check the per-client crashes collection. client_crashes = list( aff4_grr.VFSGRRClient.CrashCollectionForCID(client_id)) self.assertLen(client_crashes, 1) crash = client_crashes[0] self.assertEqual(crash.client_id, client_id) self.assertEqual(crash.client_info.client_name, "GRR Monitor") self.assertEqual(crash.crash_type, "Nanny Message") self.assertEqual(crash.crash_message, nanny_message)
def _LoadAuditEvents(handlers, get_report_args, actions=None, token=None, transformers=None): """Returns AuditEvents for given handlers, actions, and timerange.""" if transformers is None: transformers = {} if data_store.RelationalDBEnabled(): entries = data_store.REL_DB.ReadAPIAuditEntries( min_timestamp=get_report_args.start_time, max_timestamp=get_report_args.start_time + get_report_args.duration, router_method_names=list(handlers.keys())) rows = [ _EntryToEvent(entry, handlers, transformers) for entry in entries ] else: entries = report_utils.GetAuditLogEntries( offset=get_report_args.duration, now=get_report_args.start_time + get_report_args.duration, token=token) if actions is None: actions = set(handlers.values()) rows = [entry for entry in entries if entry.action in actions] rows.sort(key=lambda row: row.timestamp, reverse=True) return rows
def testNotificationPointingToFlowIsShownOnFlowCompletion(self): self.Open("/") pathspec = rdf_paths.PathSpec(path=os.path.join( self.base_path, "test.plist"), pathtype=rdf_paths.PathSpec.PathType.OS) session_id = flow_test_lib.TestFlowHelper( flows_transfer.GetFile.__name__, client_mock=self.action_mock, client_id=self.client_id, pathspec=pathspec, token=self.token) if not data_store.RelationalDBEnabled(): session_id = session_id.Basename() # Clicking on this should show the notifications table. self.Click("css=button[id=notification_button]") self.WaitUntil(self.IsTextPresent, "Notifications") # Click on the "flow completed" notification. self.Click("css=td:contains('Flow GetFile completed')") self.WaitUntilNot(self.IsTextPresent, "Notifications") # Check that clicking on a notification changes the location and shows # the flow page. self.WaitUntilEqual( "/#/clients/%s/flows/%s" % (self.client_id, session_id), self.GetCurrentUrlPath) self.WaitUntil(self.IsTextPresent, session_id)
def Run(self): output_plugins = [ rdf_output_plugin.OutputPluginDescriptor( plugin_name=test_plugins.DummyHuntTestOutputPlugin.__name__, plugin_args=test_plugins.DummyHuntTestOutputPlugin.args_type( filename_regex="blah!", fetch_binaries=True)) ] with test_lib.FakeTime(42, increment=1): if data_store.RelationalDBEnabled(): hunt_id = self.CreateHunt(description="the hunt", output_plugins=output_plugins) hunt.StartHunt(hunt_id) else: hunt_urn = self.StartHunt(description="the hunt", output_plugins=output_plugins) hunt_id = hunt_urn.Basename() self.client_ids = self.SetupClients(2) for index, client_id in enumerate(self.client_ids): self.RunHunt(client_ids=[client_id], failrate=-1) with test_lib.FakeTime(100042 + index * 100): self.ProcessHuntOutputPlugins() self.Check("ListHuntOutputPluginLogs", args=hunt_plugin.ApiListHuntOutputPluginLogsArgs( hunt_id=hunt_id, plugin_id="DummyHuntTestOutputPlugin_0"), replace={hunt_id: "H:123456"})
def testShowsNotificationIfArchiveStreamingFailsInProgress(self): pathspec = rdf_paths.PathSpec(path=os.path.join( self.base_path, "test.plist"), pathtype=rdf_paths.PathSpec.PathType.OS) session_id = flow_test_lib.TestFlowHelper( flows_transfer.GetFile.__name__, client_mock=self.action_mock, client_id=self.client_id, pathspec=pathspec, token=self.token) if not data_store.RelationalDBEnabled(): session_id = session_id.Basename() def RaisingStub(*unused_args, **unused_kwargs): yield b"foo" yield b"bar" raise RuntimeError("something went wrong") with utils.Stubber(archive_generator.GetCompatClass(), "Generate", RaisingStub): self.Open("/#/clients/%s" % self.client_id) self.Click("css=a[grrtarget='client.flows']") self.Click("css=td:contains('GetFile')") self.Click("link=Results") self.Click("css=button.DownloadButton") self.WaitUntil( self.IsUserNotificationPresent, "Archive generation failed for flow %s" % session_id) # There will be no failure message, as we can't get a status from an # iframe that triggers the download. self.WaitUntilNot(self.IsTextPresent, "Can't generate archive: Unknown error")
def testDownloadsSingleHuntFileIfAuthorizationIsPresent(self): hunt_urn = self._CreateHuntWithDownloadedFile() hunt_id = hunt_urn.Basename() results = self.GetHuntResults(hunt_urn) self.RequestAndGrantHuntApproval(hunt_id) self.Open("/") self.Click("css=a[grrtarget=hunts]") self.Click("css=td:contains('%s')" % hunt_id) self.Click("css=li[heading=Results]") if data_store.RelationalDBEnabled(): fd = file_store.OpenFile( flow_export.CollectionItemToClientPath(results[0])) else: fd = aff4.FACTORY.Open(flow_export.CollectionItemToAff4Path( results[0]), token=self.token) with mock.patch.object(fd.__class__, "Read") as mock_obj: self.Click( "css=grr-results-collection button:has(span.glyphicon-download)" ) self.WaitUntil(lambda: mock_obj.called)
def AddResultsToHunt(self, hunt_id, client_id, values): if isinstance(client_id, rdfvalue.RDFURN): client_id = client_id.Basename() if isinstance(hunt_id, rdfvalue.RDFURN): hunt_id = hunt_id.Basename() if data_store.RelationalDBEnabled(): flow_id = self._EnsureClientHasHunt(client_id, hunt_id) for value in values: data_store.REL_DB.WriteFlowResults([ rdf_flow_objects.FlowResult(client_id=client_id, flow_id=flow_id, hunt_id=hunt_id, payload=value) ]) else: collection = aff4.FACTORY.Open( rdfvalue.RDFURN("hunts").Add(hunt_id), token=self.token).ResultCollection() with data_store.DB.GetMutationPool() as pool: for value in values: collection.Add(rdf_flows.GrrMessage(payload=value, source=client_id), mutation_pool=pool)
def _GetPassword(self, username): if data_store.RelationalDBEnabled(): user = data_store.REL_DB.ReadGRRUser(username) return user.password if user.HasField("password") else None else: user_obj = aff4.FACTORY.Open("aff4:/users/" + username, token=self.token) return user_obj.Get(user_obj.Schema.PASSWORD)
def setUp(self): super(ApprovalByLabelE2ETest, self).setUp() if data_store.AFF4Enabled(): self.SetUpLegacy() if data_store.RelationalDBEnabled(): self.SetUpRelationalDB() cls = (api_call_router_with_approval_checks. ApiCallRouterWithApprovalChecks) cls.ClearCache() approver = test_lib.ConfigOverrider({ "API.DefaultRouter": compatibility.GetName(cls), "ACL.approvers_config_file": os.path.join(self.base_path, "approvers.yaml") }) approver.Start() self.addCleanup(approver.Stop) # Get a fresh approval manager object and reload with test approvers. approval_manager_stubber = utils.Stubber( client_approval_auth, "CLIENT_APPROVAL_AUTH_MGR", client_approval_auth.ClientApprovalAuthorizationManager()) approval_manager_stubber.Start() self.addCleanup(approval_manager_stubber.Stop) # Force creation of new APIAuthorizationManager, so that configuration # changes are picked up. api_auth_manager.InitializeApiAuthManager()