def ProcessScanResults( self, responses): """Processes the results of the scan.""" if not responses.success: raise flow_base.FlowError(responses.status) if not responses: # Clients (versions 3306 and above) only send back responses when # the full signature has been received. return regions_to_dump = collections.defaultdict(set) for response in responses: for match in response.matches: self.SendReply(match) rules = set([m.rule_name for m in match.match]) rules_string = ",".join(sorted(rules)) logging.debug("YaraScan match in pid %d (%s) for rules %s.", match.process.pid, match.process.exe, rules_string) if self.args.dump_process_on_match: for process_match in match.match: for string_match in process_match.string_matches: regions_to_dump[match.process.pid].add(string_match.offset) if self.args.include_errors_in_results: for error in response.errors: self.SendReply(error) if self.args.include_misses_in_results: for miss in response.misses: self.SendReply(miss) for pid, offsets in regions_to_dump.items(): self.CallFlow( DumpProcessMemory.__name__, pids=[pid], prioritize_offsets=list(sorted(offsets)), size_limit=self.args.process_dump_size_limit, skip_special_regions=self.args.skip_special_regions, skip_mapped_files=self.args.skip_mapped_files, skip_shared_regions=self.args.skip_shared_regions, skip_executable_regions=self.args.skip_executable_regions, skip_readonly_regions=self.args.skip_readonly_regions, next_state=compatibility.GetName(self.CheckDumpProcessMemoryResults))
def Done(self, responses): """Retrieves the output for the hack.""" response = responses.First() if not responses.success: raise flow_base.FlowError("Execute Python hack failed: %s" % responses.status) if response: result = response.return_val # Send reply with full data, but only log the first 200 bytes. str_result = result[0:200] if len(result) >= 200: str_result += "...[truncated]" self.Log("Result: %s" % str_result) result = ExecutePythonHackResult() result.result_string = response.return_val self.SendReply(result)
def IterateProcesses(self, responses): """This stores the processes.""" if not responses.success: # Check for error, but continue. Errors are common on client. raise flow_base.FlowError("Error during process listing %s" % responses.status) if self.args.fetch_binaries: # Filter out processes entries without "exe" attribute and # deduplicate the list. paths_to_fetch = set() for p in responses: if p.exe and self.args.filename_regex.Match( p.exe) and self._ConnectionStateMatch(p): paths_to_fetch.add(p.exe) paths_to_fetch = sorted(paths_to_fetch) self.Log("Got %d processes, fetching binaries for %d...", len(responses), len(paths_to_fetch)) self.CallFlow(file_finder.FileFinder.__name__, paths=paths_to_fetch, action=rdf_file_finder.FileFinderAction.Download(), next_state="HandleDownloadedFiles") else: # Only send the list of processes if we don't fetch the binaries skipped = 0 for p in responses: # It's normal to have lots of sleeping processes with no executable path # associated. if p.exe: if self._FilenameMatch(p) and self._ConnectionStateMatch( p): self.SendReply(p) else: if self.args.connection_states: if self._ConnectionStateMatch(p): self.SendReply(p) else: skipped += 1 if skipped: self.Log("Skipped %s entries, missing path for regex" % skipped)
def Process( self, responses: flow_responses.Responses[rdf_timeline.TimelineResult], ) -> None: if not responses.success: raise flow_base.FlowError(responses.status) blob_ids = [] for response in responses: for blob_id in response.entry_batch_blob_ids: blob_ids.append(rdf_objects.BlobID(blob_id)) data_store.BLOBS.WaitForBlobs(blob_ids, timeout=_BLOB_STORE_TIMEOUT) for response in responses: self.SendReply(response) self.state.progress.total_entry_count += response.entry_count
def Start(self): """The start method.""" # Catch signature issues early. rules = self.args.yara_signature.GetRules() if not list(rules): raise flow_base.FlowError( "No rules found in the signature specification.") # Same for regex errors. if self.args.process_regex: re.compile(self.args.process_regex) self.CallClient( server_stubs.YaraProcessScan, request=self.args, next_state="ProcessScanResults")
def StoreSystemRoot(self, responses): if not responses.success or not responses.First(): if self.state.drive_letters: # We have at least one path that already has a drive letter so we'll log # rather than raise. self.Log("Error collecting SystemRoot artifact: %s", responses.status) else: raise flow_base.FlowError("Error collecting SystemRoot artifact: %s" % responses.status) drive = str(responses.First())[0:2] if drive: self.state.drive_letters.add(drive) else: self.Log("Bad result for systemdrive: %s", responses.First()) self.CallStateInline( next_state=compatibility.GetName(self.CollectVolumeInfo))
def SendLastBlob(self, responses): """Sends the last blob.""" if not responses.success: raise flow_base.FlowError("Error while calling UpdateAgent: %s" % responses.status) blobs = list(self._BlobIterator(self._binary_id)) offset = 0 for b in blobs[:-1]: offset += len(b.data) self.CallClient(server_stubs.UpdateAgent, executable=blobs[-1], more_data=False, offset=offset, write_path=self.state.write_path, next_state=compatibility.GetName(self.Interrogate), use_client_env=False)
def Start(self): """The start method.""" python_hack_urn = signed_binary_utils.GetAFF4PythonHackRoot().Add( self.args.hack_name) try: blob_iterator, _ = signed_binary_utils.FetchBlobsForSignedBinaryByURN( python_hack_urn) except signed_binary_utils.SignedBinaryNotFoundError: raise flow_base.FlowError("Python hack %s not found." % self.args.hack_name) # TODO(amoser): This will break if someone wants to execute lots of Python. for python_blob in blob_iterator: self.CallClient(server_stubs.ExecutePython, python_code=python_blob, py_args=self.args.py_args, next_state=compatibility.GetName(self.Done))
def SendMail(self, responses): """Sends a mail when the client has responded.""" if not responses.success: raise flow_base.FlowError("Error while pinging client.") client = data_store.REL_DB.ReadClientSnapshot(self.client_id) hostname = client.knowledge_base.fqdn subject = self.__class__.subject_template.render(hostname=hostname) body = self.__class__.template.render( client_id=self.client_id, admin_ui=config.CONFIG["AdminUI.url"], hostname=hostname, url="/clients/%s" % self.client_id, creator=self.token.username, signature=utils.SmartUnicode(config.CONFIG["Email.signature"])) email_alerts.EMAIL_ALERTER.SendEmail( self.args.email, "grr-noreply", subject, body, is_html=True)
def SendLastBlob(self, responses): """Sends the last blob.""" if not responses.success: raise flow_base.FlowError("Error while calling UpdateAgent: %s" % responses.status) binary_urn = rdfvalue.RDFURN(self.args.blob_path) blobs = list(self._BlobIterator(binary_urn)) offset = 0 for b in blobs[:-1]: offset += len(b.data) self.CallClient(server_stubs.UpdateAgent, executable=blobs[-1], more_data=False, offset=offset, write_path=self.state.write_path, next_state="Interrogate", use_client_env=False)
def StoreMBR(self, responses): """This method stores the MBR.""" if not responses.success: msg = "Could not retrieve MBR: %s" % responses.status self.Log(msg) raise flow_base.FlowError(msg) response = responses.First() self.state.buffers.append(response.data) self.state.bytes_downloaded += len(response.data) if self.state.bytes_downloaded >= self.args.length: mbr_data = b"".join(self.state.buffers) self.state.buffers = None self.Log("Successfully collected the MBR (%d bytes)." % len(mbr_data)) self.SendReply(rdfvalue.RDFBytes(mbr_data))
def Process( self, responses: flow_responses.Responses[rdf_osquery.OsqueryResult], ) -> None: if not responses.success: self._UpdateProgressWithError(responses.status) raise flow_base.FlowError(responses.status) self._UpdateProgress(responses) for response in responses: # Older agent versions might still send empty tables, so we simply ignore # such. if not response.table.rows: continue self.SendReply(response) self._FileCollectionFromColumns(responses)
def Callback(self, responses: _Responses) -> None: if not responses.success: raise flow_base.FlowError( f"Failed to start upload: {responses.status}") # TODO: Once progress updates are expected we should be fine # with more responses. For now though, only a single response is expected # and it should set session URL. if "session_url" in self.state: raise ValueError("Session URL already received.") if len(responses) != 1: raise ValueError( f"Unexpected number of responses: {len(responses)}") response = responses.First() if not isinstance(response, rdf_large_file.CollectLargeFileResult): raise TypeError(f"Unexpected response type: {type(response)}") self.state.session_uri = response.session_uri
def StoreNetstat(self, responses): """Collects the connections. Args: responses: A list of rdf_client_network.NetworkConnection objects. Raises: flow_base.FlowError: On failure to get retrieve the connections. """ if not responses.success: raise flow_base.FlowError("Failed to get connections. Err: {0}".format( responses.status)) for response in responses: if self.args.listening_only and response.state != "LISTEN": continue self.SendReply(response) self.state.conn_count = len(responses)
def ProcessScanResults(self, responses): """Processes the results of the scan.""" if not responses.success: raise flow_base.FlowError(responses.status) if not responses: # Clients (versions 3306 and above) only send back responses when # the full signature has been received. return pids_to_dump = set() for response in responses: for match in response.matches: self.SendReply(match) rules = set([m.rule_name for m in match.match]) rules_string = ",".join(sorted(rules)) logging.debug("YaraScan match in pid %d (%s) for rules %s.", match.process.pid, match.process.exe, rules_string) if self.args.dump_process_on_match: pids_to_dump.add(match.process.pid) if self.args.include_errors_in_results: for error in response.errors: self.SendReply(error) if self.args.include_misses_in_results: for miss in response.misses: self.SendReply(miss) if pids_to_dump: self.CallFlow( DumpProcessMemory.__name__, # pylint: disable=undefined-variable pids=list(pids_to_dump), skip_special_regions=self.args.skip_special_regions, skip_mapped_files=self.args.skip_mapped_files, skip_shared_regions=self.args.skip_shared_regions, skip_executable_regions=self.args.skip_executable_regions, skip_readonly_regions=self.args.skip_readonly_regions, next_state=compatibility.GetName( self.CheckDumpProcessMemoryResults))
def CollectImage(self, responses): """Collect the image and store it into the database.""" # If we have any log, forward them. for response in responses: if hasattr(response, "logs"): for log in response.logs: self.Log(log) if not responses.success: raise flow_base.FlowError( "Failed to dump the flash image: {0}".format(responses.status)) elif not responses.First().path: self.Log("No path returned. Skipping host.") return else: image_path = responses.First().path self.CallFlow(transfer.MultiGetFile.__name__, pathspecs=[image_path], request_data={"image_path": image_path}, next_state="DeleteTemporaryImage")
def SendLastBlob(self, responses): """Sends the last blob.""" if not responses.success: raise flow_base.FlowError("Error while calling UpdateAgent: %s" % responses.status) binary_urn = rdfvalue.RDFURN(self.args.binary) blobs = list(self._BlobIterator(binary_urn)) offset = 0 for b in blobs[:-1]: offset += len(b.data) self.CallClient(server_stubs.ExecuteBinaryCommand, executable=blobs[-1], more_data=False, args=shlex.split(self.args.command_line), offset=offset, write_path=self.state.write_path, next_state=compatibility.GetName(self.End), use_client_env=False)
def StoreResults(self, responses): """Stores the results returned by the client to the db.""" if not responses.success: raise flow_base.FlowError(responses.status) self.state.files_found = len(responses) transferred_file_responses = [] stat_entries = [] for response in responses: if response.HasField("transferred_file"): transferred_file_responses.append(response) elif response.HasField("stat_entry"): stat_entries.append(response.stat_entry) self._WriteFilesContent(transferred_file_responses) self._WriteStatEntries(stat_entries) for response in responses: self.SendReply(response)
def Start(self): """Schedules the read in the client (ReadLowLevel ClientAction).""" # TODO: Set `blob_size` according to `sector_block_size`. request = rdf_read_low_level.ReadLowLevelRequest( path=self.args.path, length=self.args.length, offset=self.args.offset) if self.args.HasField("sector_block_size"): request.sector_block_size = self.args.sector_block_size if not self.client_version or self.client_version >= 3459: self.CallClient(server_stubs.ReadLowLevel, request, next_state=compatibility.GetName( self.StoreBlobsAsTmpFile)) else: raise flow_base.FlowError( "ReadLowLevel Flow is only supported on " "client version 3459 or higher (target client " f"version is {self.client_version}).")
def Start(self): super(BrowserHistoryFlow, self).Start() if not (self.args.collect_chrome or self.args.collect_firefox or self.args.collect_internet_explorer or self.args.collect_opera or self.args.collect_safari): raise flow_base.FlowError( "Need to collect at least one type of history.") # Start a sub-flow for every browser to split results and progress in # the user interface more cleanly. if self.args.collect_chrome: self.CallFlow(collectors.ArtifactCollectorFlow.__name__, artifact_list=["ChromeHistory"], apply_parsers=False, next_state=self.ProcessArtifactResponses.__name__) if self.args.collect_firefox: self.CallFlow(collectors.ArtifactCollectorFlow.__name__, artifact_list=["FirefoxHistory"], apply_parsers=False, next_state=self.ProcessArtifactResponses.__name__) if self.args.collect_internet_explorer: self.CallFlow(collectors.ArtifactCollectorFlow.__name__, artifact_list=["InternetExplorerHistory"], apply_parsers=False, next_state=self.ProcessArtifactResponses.__name__) if self.args.collect_opera: self.CallFlow(collectors.ArtifactCollectorFlow.__name__, artifact_list=["OperaHistory"], apply_parsers=False, next_state=self.ProcessArtifactResponses.__name__) if self.args.collect_safari: self.CallFlow(collectors.ArtifactCollectorFlow.__name__, artifact_list=["SafariHistory"], apply_parsers=False, next_state=self.ProcessArtifactResponses.__name__)
def Start(self): """Determine the Firefox history directory.""" self.state.hist_count = 0 self.state.history_paths = [] if self.args.history_path: self.state.history_paths.append(self.args.history_path) else: self.state.history_paths = self.GuessHistoryPaths(self.args.username) if not self.state.history_paths: raise flow_base.FlowError("Could not find valid History paths.") filename = "places.sqlite" for path in self.state.history_paths: self.CallFlow( file_finder.FileFinder.__name__, paths=[os.path.join(path, "**2", filename)], pathtype=self.args.pathtype, action=rdf_file_finder.FileFinderAction.Download(), next_state=compatibility.GetName(self.ParseFiles))
def ProcessMemoryRegions(self, responses): if not responses.success: raise flow_base.FlowError(responses.status) # request_data is not present if "YaraProcessDumpResponse" in responses.request_data: # On case-sensitive filesystems, the requested PathSpecs (located in # YaraProcessDumpResponse.dumped_processes[*].memory_regions[*]file) might # differ from the real location of the received MemoryRegion (located in # StatEntry.pathspec). Since MemoryRegions are later identified # case-sensitive by their filename in file_store.OpenFile(), we need to # align both PathSpecs to make sure we can actually find MemoryRegions in # file_store again. dump_response = responses.request_data["YaraProcessDumpResponse"] _ReplaceDumpPathspecsWithMultiGetFilePathspec( dump_response, responses) self.SendReply(dump_response) for response in responses: self.SendReply(response) self.CallClient(server_stubs.DeleteGRRTempFiles, response.pathspec, next_state="LogDeleteFiles")
def Confirmation(self, responses): """Confirmation.""" if not responses.success: raise flow_base.FlowError( "Failed to write config. Err: {0}".format(responses.status))
def Done(self, responses): if not responses.success: raise flow_base.FlowError(str(responses.status)) for response in responses: self.Log(response.data)
def TemporaryImageRemoved(self, responses): """Verify that the temporary image has been removed successfully.""" if not responses.success: raise flow_base.FlowError( "Unable to delete the temporary flash image: " "%s" % responses.status)
def CheckDumpProcessMemoryResults(self, responses): if not responses.success: raise flow_base.FlowError(responses.status) for response in responses: self.SendReply(response)
def End(self, responses): if not responses.success: raise flow_base.FlowError(responses.status)
def ProcessArtifactResponses(self, responses): for response in responses: self.SendReply(response) if not responses.success: raise flow_base.FlowError(responses.status)
def Done(self, responses): if not responses.success: self.Log(responses.status.error_message) raise flow_base.FlowError(responses.status.error_message)
def LogDeleteFiles(self, responses): # Check that the DeleteFiles flow worked. if not responses.success: raise flow_base.FlowError("Could not delete file: %s" % responses.status)