def ListDeviceDirectories(self, responses): if not responses.success: raise flow.FlowError("Unable to query Volume Shadow Copy information.") for response in responses: device_object = response.GetItem("DeviceObject", "") global_root = r"\\?\GLOBALROOT\Device" if device_object.startswith(global_root): # The VSC device path is returned as \\?\GLOBALROOT\Device\ # HarddiskVolumeShadowCopy1 and need to pass it as # \\.\HarddiskVolumeShadowCopy1 to the ListDirectory flow device_object = r"\\." + device_object[len(global_root):] path_spec = rdf_paths.PathSpec( path=device_object, pathtype=rdf_paths.PathSpec.PathType.OS) path_spec.Append(path="/", pathtype=rdf_paths.PathSpec.PathType.TSK) self.Log("Listing Volume Shadow Copy device: %s.", device_object) self.CallClient( server_stubs.ListDirectory, pathspec=path_spec, next_state="ProcessListDirectory") aff4path = path_spec.AFF4Path(self.client_urn) self.state.raw_device = aff4path.Dirname() self.state.shadows.append(aff4path)
def List(self, responses): """Collect the directory listing and store in the datastore.""" if not responses.success: raise flow.FlowError(str(responses.status)) self.Status("Listed %s", self.state.urn) with data_store.DB.GetMutationPool() as pool: with aff4.FACTORY.Create(self.state.urn, standard.VFSDirectory, mode="w", mutation_pool=pool, token=self.token) as fd: fd.Set(fd.Schema.PATHSPEC(self.state.stat.pathspec)) fd.Set(fd.Schema.STAT(self.state.stat)) if data_store.RelationalDBWriteEnabled(): path_info = rdf_objects.PathInfo.FromStatEntry(self.state.stat) data_store.REL_DB.WritePathInfos(self.client_id.Basename(), [path_info]) stat_entries = list(map(rdf_client.StatEntry, responses)) WriteStatEntries(stat_entries, client_id=self.client_id, mutation_pool=pool, token=self.token) for stat_entry in stat_entries: self.SendReply(stat_entry) # Send Stats to parent flows.
def Start(self): """The start method.""" binary_urn = rdfvalue.RDFURN(self.args.binary) try: blob_iterator, _ = signed_binary_utils.FetchBlobsForSignedBinary( binary_urn, token=self.token) except signed_binary_utils.SignedBinaryNotFoundError: raise flow.FlowError("Executable binary %s not found." % self.args.binary) try: current_blob = next(blob_iterator) except StopIteration: current_blob = None offset = 0 write_path = "%d" % time.time() while current_blob is not None: try: next_blob = next(blob_iterator) except StopIteration: next_blob = None self.CallClient(server_stubs.ExecuteBinaryCommand, executable=current_blob, more_data=next_blob is not None, args=shlex.split(self.args.command_line), offset=offset, write_path=write_path, next_state="End") offset += len(current_blob.data) current_blob = next_blob
def StoreMBR(self, responses): """This method stores the MBR.""" if not responses.success: msg = "Could not retrieve MBR: %s" % responses.status self.Log(msg) raise flow.FlowError(msg) response = responses.First() self.state.buffers.append(response.data) self.state.bytes_downloaded += len(response.data) if self.state.bytes_downloaded >= self.args.length: mbr_data = b"".join(self.state.buffers) self.state.buffers = None if data_store.AFF4Enabled(): with aff4.FACTORY.Create(self.client_urn.Add("mbr"), aff4_grr.VFSFile, mode="w", token=self.token) as mbr: mbr.write(mbr_data) self.Log("Successfully collected the MBR (%d bytes)." % len(mbr_data)) self.SendReply(rdfvalue.RDFBytes(mbr_data))
def SendMail(self, responses): """Sends a mail when the client has responded.""" if responses.success: if data_store.RelationalDBReadEnabled(): client = data_store.REL_DB.ReadClientSnapshot(self.client_id) hostname = client.knowledge_base.fqdn else: client = aff4.FACTORY.Open(self.client_id, token=self.token) hostname = client.Get(client.Schema.FQDN) subject = self.__class__.subject_template.render(hostname=hostname) body = self.__class__.template.render( client_id=self.client_id, admin_ui=config.CONFIG["AdminUI.url"], hostname=hostname, url="/clients/%s" % self.client_id, creator=self.token.username, signature=utils.SmartUnicode(config.CONFIG["Email.signature"])) email_alerts.EMAIL_ALERTER.SendEmail(self.args.email, "grr-noreply", utils.SmartStr(subject), utils.SmartStr(body), is_html=True) else: flow.FlowError("Error while pinging client.")
def Interrogate(self, responses): if not responses.success: raise flow.FlowError("Installer reported an error: %s" % responses.status) self.Log("Installer completed.") self.CallFlow(discovery.Interrogate.__name__, next_state="Done")
def ProcessScanResults(self, responses): if not responses.success: raise flow.FlowError(responses.status) pids_to_dump = set() for response in responses: for match in response.matches: self.SendReply(match) rules = set([m.rule_name for m in match.match]) rules_string = ",".join(sorted(rules)) logging.debug("YaraScan match in pid %d (%s) for rules %s.", match.process.pid, match.process.exe, rules_string) if self.args.dump_process_on_match: pids_to_dump.add(match.process.pid) if self.args.include_errors_in_results: for error in response.errors: self.SendReply(error) if self.args.include_misses_in_results: for miss in response.misses: self.SendReply(miss) if pids_to_dump: self.CallFlow( YaraDumpProcessMemory.__name__, # pylint: disable=undefined-variable pids=list(pids_to_dump), skip_special_regions=self.args.skip_special_regions, skip_mapped_files=self.args.skip_mapped_files, skip_shared_regions=self.args.skip_shared_regions, skip_executable_regions=self.args.skip_executable_regions, skip_readonly_regions=self.args.skip_readonly_regions, next_state="CheckDumpProcessMemoryResults")
def SendBlobs(self, responses): """Sends all blobs that are not the first or the last.""" if not responses.success: raise flow.FlowError("Error while calling UpdateAgent: %s" % responses.status) binary_urn = rdfvalue.RDFURN(self.args.blob_path) blobs = list(self._BlobIterator(binary_urn)) to_send = blobs[1:-1] if not to_send: self.CallStateInline(next_state="SendLastBlob") return offset = len(blobs[0].data) for i, blob in enumerate(to_send): if i == len(to_send) - 1: next_state = "SendLastBlob" else: next_state = "CheckUpdateAgent" self.CallClient(server_stubs.UpdateAgent, executable=blob, more_data=True, offset=offset, write_path=self.state.write_path, next_state=next_state, use_client_env=False) offset += len(blob.data)
def ProcessResults(self, responses): if not responses.success: raise flow.FlowError(responses.status) response = responses.First() self.SendReply(response) for error in response.errors: p = error.process self.Log("Error dumping process %s (pid %d): %s" % (p.name, p.pid, error.error)) dump_files_to_get = [] for dumped_process in response.dumped_processes: p = dumped_process.process self.Log("Getting %d dump files for process %s (pid %d)." % (len(dumped_process.dump_files), p.name, p.pid)) for pathspec in dumped_process.dump_files: dump_files_to_get.append(pathspec) if not dump_files_to_get: self.Log("No memory dumped, exiting.") return self.CallFlow(transfer.MultiGetFile.__name__, pathspecs=dump_files_to_get, file_size=1024 * 1024 * 1024, next_state="DeleteFiles")
def ProcessFingerprint(self, responses): """Store the fingerprint response.""" if not responses.success: # Its better to raise rather than merely logging since it will make it to # the flow's protobuf and users can inspect the reason this flow failed. raise flow.FlowError("Could not fingerprint file: %s" % responses.status) response = responses.First() if response.pathspec.path: pathspec = response.pathspec else: pathspec = self.args.pathspec self.state.urn = pathspec.AFF4Path(self.client_id) with aff4.FACTORY.Create(self.state.urn, aff4_grr.VFSFile, mode="w", token=self.token) as fd: hash_obj = response.hash fd.Set(fd.Schema.HASH, hash_obj) if data_store.RelationalDBWriteEnabled(): path_info = rdf_objects.PathInfo.FromPathSpec(pathspec) path_info.hash_entry = response.hash data_store.REL_DB.WritePathInfos(self.client_id.Basename(), [path_info]) self.ReceiveFileFingerprint(self.state.urn, hash_obj, request_data=responses.request_data)
def End(self, responses): del responses if not self.state.shadows: raise flow.FlowError("No Volume Shadow Copies were found.\n" "The volume could have no Volume Shadow Copies " "as Windows versions pre Vista or the Volume " "Shadow Copy Service has been disabled.")
def Start(self): """Determine the Chrome directory.""" self.state.hist_count = 0 # List of paths where history files are located self.state.history_paths = [] if self.args.history_path: self.state.history_paths.append(self.args.history_path) if not self.state.history_paths: self.state.history_paths = self.GuessHistoryPaths( self.args.username) if not self.state.history_paths: raise flow.FlowError("Could not find valid History paths.") filenames = ["History"] if self.args.get_archive: filenames.append("Archived History") for path in self.state.history_paths: for fname in filenames: self.CallFlow( file_finder.FileFinder.__name__, paths=[os.path.join(path, fname)], pathtype=self.args.pathtype, action=rdf_file_finder.FileFinderAction.Download(), next_state="ParseFiles")
def ProcessKnowledgeBase(self, responses): """Collect and store any extra non-kb artifacts.""" if not responses.success: raise flow.FlowError( "Error while collecting the knowledge base: %s" % responses.status) kb = responses.First() if data_store.AFF4Enabled(): # AFF4 client. client = self._OpenClient(mode="rw") with client: client.Set(client.Schema.KNOWLEDGE_BASE, kb) # Copy usernames. usernames = [ user.username for user in kb.users if user.username ] client.AddAttribute( client.Schema.USERNAMES(" ".join(usernames))) self.CopyOSReleaseFromKnowledgeBase(kb, client) # rdf_objects.ClientSnapshot. # Information already present in the knowledge base takes precedence. if not kb.os: kb.os = self.state.os if not kb.fqdn: kb.fqdn = self.state.fqdn self.state.client.knowledge_base = kb if data_store.RelationalDBReadEnabled(): existing_client = data_store.REL_DB.ReadClientSnapshot( self.client_id) if existing_client is None: # This is the first time we interrogate this client. In that case, we # need to store basic information about this client right away so follow # up flows work properly. data_store.REL_DB.WriteClientSnapshot(self.state.client) self.CallFlow(collectors.ArtifactCollectorFlow.__name__, artifact_list=config. CONFIG["Artifacts.non_kb_interrogate_artifacts"], knowledge_base=kb, next_state="ProcessArtifactResponses") if data_store.AFF4Enabled(): # Update the client index for the AFF4 client. client_index.CreateClientIndex(token=self.token).AddClient(client) if data_store.RelationalDBWriteEnabled(): try: # Update the client index for the rdf_objects.ClientSnapshot. client_index.ClientIndex().AddClient(self.state.client) except db.UnknownClientError: pass
def _BlobIterator(self, binary_urn): try: blob_iterator, _ = signed_binary_utils.FetchBlobsForSignedBinary( binary_urn, token=self.token) except signed_binary_utils.SignedBinaryNotFoundError: raise flow.FlowError("%s is not a valid signed binary." % self.args.blob_path) return blob_iterator
def End(self, responses): # If this doesn't work these artifacts are so core to everything that we # just want to raise and kill any further collection. if not self.state.success: raise flow.FlowError( "Couldn't guess the system root and drive location") super(SystemRootSystemDriveFallbackFlow, self).End(responses)
def _BlobIterator(self, binary_urn): try: blob_iterator, _ = signed_binary_utils.FetchBlobsForSignedBinary( binary_urn, token=self.token) except signed_binary_utils.SignedBinaryNotFoundError: raise flow.FlowError("Executable binary %s not found." % self.args.binary) return blob_iterator
def ProcessCollected(self, responses): if not responses.success: self.Log("Artifact data collection failed. Status: %s.", responses.status) raise flow.FlowError(responses.status) self.Log("Artifact data collection completed successfully.") for response in responses: self._ParseResponse(response)
def Sleep(self, responses): if not responses.success: self.Log(responses.status.error_message) raise flow.FlowError(responses.status.error_message) if rdfvalue.RDFDatetime.Now() < self.state.end_time - self.sleep_time: start_time = rdfvalue.RDFDatetime.Now() + self.sleep_time self.CallState(next_state="SendMessage", start_time=start_time)
def DeleteFiles(self, responses): if not responses.success: raise flow.FlowError(responses.status) for response in responses: self.SendReply(response) self.CallClient(server_stubs.DeleteGRRTempFiles, response.pathspec, next_state="LogDeleteFiles")
def _ReleaseProcessedFlow(self, flow_obj): rdf_flow = flow_obj.rdf_flow if rdf_flow.processing_deadline < rdfvalue.RDFDatetime.Now(): raise flow.FlowError( "Lease expired for flow %s on %s (%s)." % (rdf_flow.flow_id, rdf_flow.client_id, rdf_flow.processing_deadline)) flow_obj.FlushQueuedMessages() return data_store.REL_DB.ReleaseProcessedFlow(rdf_flow)
def ProcessBase(self, responses): """Process any retrieved artifacts.""" artifact_name = responses.request_data["artifact_name"] self.state.in_flight_artifacts.remove(artifact_name) self.state.completed_artifacts.append(artifact_name) if not responses.success: self.Log("Failed to get artifact %s. Status: %s", artifact_name, responses.status) else: deps = self.SetKBValue(responses.request_data["artifact_name"], responses) if deps: # If we fulfilled a dependency, make sure we have collected all # artifacts that provide the dependency before marking it as fulfilled. for dep in deps: required_artifacts = artifact_registry.REGISTRY.GetArtifactNames( os_name=self.state.knowledge_base.os, provides=[dep]) if required_artifacts.issubset( self.state.completed_artifacts): self.state.fulfilled_deps.append(dep) else: self.state.partial_fulfilled_deps.add(dep) else: self.Log( "Failed to get artifact %s. Artifact failed to return value.", artifact_name) if self.state.awaiting_deps_artifacts: # Schedule any new artifacts for which we have now fulfilled dependencies. self._ScheduleCollection() # If we fail to fulfil deps for things we're supposed to collect, raise # an error. if (self.state.awaiting_deps_artifacts and not self.state.in_flight_artifacts): missing_deps = list( self.state.all_deps.difference( self.state["fulfilled_deps"])) if self.args.require_complete: raise flow.FlowError( "KnowledgeBase initialization failed as the " "following artifacts had dependencies that could" " not be fulfilled %s. Missing: %s" % ([ utils.SmartStr(a) for a in self.state.awaiting_deps_artifacts ], missing_deps)) else: self.Log( "Storing incomplete KnowledgeBase. The following artifacts " "had dependencies that could not be fulfilled %s. " "Missing: %s. Completed: %s" % (self.state.awaiting_deps_artifacts, missing_deps, self.state.completed_artifacts))
def SaveResourceUsage(self, status): """Method to tally resources.""" user_cpu = status.cpu_time_used.user_cpu_time system_cpu = status.cpu_time_used.system_cpu_time self.rdf_flow.cpu_time_used.user_cpu_time += user_cpu self.rdf_flow.cpu_time_used.system_cpu_time += system_cpu self.rdf_flow.network_bytes_sent += status.network_bytes_sent if self.rdf_flow.cpu_limit: user_cpu_total = self.rdf_flow.cpu_time_used.user_cpu_time system_cpu_total = self.rdf_flow.cpu_time_used.system_cpu_time if self.rdf_flow.cpu_limit < (user_cpu_total + system_cpu_total): # We have exceeded our limit, stop this flow. raise flow.FlowError("CPU limit exceeded.") if (self.rdf_flow.network_bytes_limit and self.rdf_flow.network_bytes_limit < self.rdf_flow.network_bytes_sent): # We have exceeded our byte limit, stop this flow. raise flow.FlowError("Network bytes limit exceeded.")
def InitializeKnowledgeBase(self): """Get the existing KB or create a new one if none exists.""" self.client = aff4.FACTORY.Open(self.client_id, token=self.token) # Always create a new KB to override any old values. self.state.knowledge_base = rdf_client.KnowledgeBase() SetCoreGRRKnowledgeBaseValues(self.state.knowledge_base, self.client) if not self.state.knowledge_base.os: # If we don't know what OS this is, there is no way to proceed. raise flow.FlowError("Client OS not set for: %s, cannot initialize" " KnowledgeBase" % self.client_id)
def Done(self, responses): response = responses.First() if not responses.success: raise flow.FlowError("Execute Python hack failed: %s" % responses.status) if response: result = utils.SmartStr(response.return_val) # Send reply with full data, but only log the first 200 bytes. str_result = result[0:200] if len(result) >= 200: str_result += "...[truncated]" self.Log("Result: %s" % str_result) self.SendReply(rdfvalue.RDFBytes(utils.SmartStr(response.return_val)))
def CheckFreeSpace(self, responses): if responses.success and responses.First(): disk_usage = responses.First() if disk_usage.free < self.state.memory_size: raise flow.FlowError( "Free space may be too low for local copy. Free " "space for path %s is %s bytes. Mem size is: %s " "bytes. Override with check_disk_free_space=False." % (disk_usage.path, disk_usage.free, self.state.memory_size)) else: logging.error("Couldn't determine free disk space for temporary files.") self.RunRekallPlugin()
def Stat(self, responses): """Save stat information on the directory.""" # Did it work? if not responses.success: raise flow.FlowError("Could not stat directory: %s" % responses.status) # Keep the stat response for later. stat_entry = rdf_client_fs.StatEntry(responses.First()) self.state.stat = stat_entry # The full path of the object is the combination of the client_id and the # path. self.state.urn = stat_entry.pathspec.AFF4Path(self.client_urn)
def Start(self): """Start.""" binary_path = self.args.blob_path if not binary_path: raise flow.FlowError("Please specify an installer binary.") binary_urn = rdfvalue.RDFURN(binary_path) try: blob_iterator, _ = signed_binary_utils.FetchBlobsForSignedBinary( binary_urn, token=self.token) except signed_binary_utils.SignedBinaryNotFoundError: raise flow.FlowError("%s is not a valid signed binary." % binary_path) offset = 0 write_path = "%d_%s" % (time.time(), binary_urn.Basename()) try: current_blob = next(blob_iterator) except StopIteration: current_blob = None while current_blob is not None: try: next_blob = next(blob_iterator) except StopIteration: next_blob = None more_data = next_blob is not None self.CallClient(server_stubs.UpdateAgent, executable=current_blob, more_data=more_data, offset=offset, write_path=write_path, next_state=("CheckUpdateAgent" if more_data else "Interrogate"), use_client_env=False) offset += len(current_blob.data) current_blob = next_blob
def Start(self): """Start.""" if not self.args.blob_path: raise flow.FlowError("Installer binary path is not specified.") binary_urn = rdfvalue.RDFURN(self.args.blob_path) self.state.write_path = "%d_%s" % (int(time.time()), binary_urn.Basename()) blob_iterator = self._BlobIterator(binary_urn) try: first_blob = next(blob_iterator) except StopIteration: raise flow.FlowError("File %s is empty." % self.args.blob_path) try: next(blob_iterator) except StopIteration: # This is the simple case where the binary fits into a single blob. We can # do all work in a single call to the client. self.CallClient( server_stubs.UpdateAgent, executable=first_blob, more_data=False, offset=0, write_path=self.state.write_path, next_state="Interrogate", use_client_env=False) return self.CallClient( server_stubs.UpdateAgent, executable=first_blob, more_data=True, offset=0, write_path=self.state.write_path, next_state="SendBlobs", use_client_env=False)
def DeleteFiles(self, responses): # Check that the MultiGetFile flow worked. if not responses.success: raise flow.FlowError("Could not get files: %s" % responses.status) for output_file in self.state.output_files: self.CallClient(server_stubs.DeleteGRRTempFiles, output_file, next_state="LogDeleteFiles") # Let calling flows know where files ended up in AFF4 space. self.SendReply( rdf_rekall_types.RekallResponse(downloaded_files=[ x.AFF4Path(self.client_urn) for x in responses ]))
def Start(self): python_hack_root_urn = config.CONFIG.Get("Config.python_hack_root") fd = aff4.FACTORY.Open(python_hack_root_urn.Add(self.args.hack_name), token=self.token) if not isinstance(fd, collects.GRRSignedBlob): raise flow.FlowError("Python hack %s not found." % self.args.hack_name) # TODO(amoser): This will break if someone wants to execute lots of Python. for python_blob in fd: self.CallClient(server_stubs.ExecutePython, python_code=python_blob, py_args=self.args.py_args, next_state="Done")