def ProcessHuntFlowDone(flow_obj, status_msg=None): """Notifis hunt about a given hunt-induced flow completion.""" if not hunt.IsLegacyHunt(flow_obj.parent_hunt_id): hunt_obj = hunt.StopHuntIfCPUOrNetworkLimitsExceeded( flow_obj.parent_hunt_id) hunt.CompleteHuntIfExpirationTimeReached(hunt_obj) return hunt_urn = rdfvalue.RDFURN("hunts").Add(flow_obj.parent_hunt_id) client_urn = rdf_client.ClientURN(flow_obj.client_id) # Update the counter metrics separately from collections to minimize # contention. with aff4.FACTORY.Open(hunt_urn, mode="rw") as fd: # Legacy AFF4 code expects token to be set. fd.token = access_control.ACLToken(username=fd.creator) if flow_obj.num_replies_sent: fd.context.clients_with_results_count += 1 fd.context.completed_clients_count += 1 fd.context.results_count += flow_obj.num_replies_sent fd.GetRunner().SaveResourceUsage(flow_obj.client_id, status_msg) with aff4.FACTORY.Open(hunt_urn, mode="rw") as fd: # Legacy AFF4 code expects token to be set. fd.token = access_control.ACLToken(username=fd.creator) fd.RegisterCompletedClient(client_urn) if flow_obj.num_replies_sent: fd.RegisterClientWithResults(client_urn) fd.StopHuntIfAverageLimitsExceeded()
def MarkDone(self, status=None): """Marks this flow as done.""" FLOW_COMPLETIONS.Increment( fields=[compatibility.GetName(self.__class__)]) # Notify our parent flow or hunt that we are done (if there's a parent flow # or hunt). if self.rdf_flow.parent_flow_id or self.rdf_flow.parent_hunt_id: status = rdf_flow_objects.FlowStatus( client_id=self.rdf_flow.client_id, request_id=self.rdf_flow.parent_request_id, response_id=self.GetNextResponseId(), status=rdf_flow_objects.FlowStatus.Status.OK, cpu_time_used=self.rdf_flow.cpu_time_used, network_bytes_sent=self.rdf_flow.network_bytes_sent, runtime_us=self.rdf_flow.runtime_us, flow_id=self.rdf_flow.parent_flow_id) if self.rdf_flow.parent_flow_id: self.flow_responses.append(status) elif self.rdf_flow.parent_hunt_id: hunt_obj = hunt.StopHuntIfCPUOrNetworkLimitsExceeded( self.rdf_flow.parent_hunt_id) hunt.CompleteHuntIfExpirationTimeReached(hunt_obj) self.rdf_flow.flow_state = self.rdf_flow.FlowState.FINISHED if self.ShouldSendNotifications(): self.NotifyAboutEnd()
def ProcessHuntFlowError(flow_obj, error_message=None, backtrace=None, status_msg=None): """Processes error and status message for a given hunt-induced flow.""" if not hunt.IsLegacyHunt(flow_obj.parent_hunt_id): hunt.StopHuntIfCPUOrNetworkLimitsExceeded(flow_obj.parent_hunt_id) return hunt_urn = rdfvalue.RDFURN("hunts").Add(flow_obj.parent_hunt_id) client_urn = rdf_client.ClientURN(flow_obj.client_id) error = rdf_hunts.HuntError(client_id=flow_obj.client_id, backtrace=backtrace) if error_message is not None: error.log_message = error_message with data_store.DB.GetMutationPool() as pool: grr_collections.HuntErrorCollection.StaticAdd( hunt_urn.Add("ErrorClients"), error, mutation_pool=pool) grr_collections.ClientUrnCollection.StaticAdd( hunt_urn.Add("CompletedClients"), client_urn, mutation_pool=pool) if status_msg is not None: with aff4.FACTORY.Open(hunt_urn, mode="rw") as fd: # Legacy AFF4 code expects token to be set. fd.token = access_control.ACLToken(username=fd.creator) fd.GetRunner().SaveResourceUsage(flow_obj.client_id, status_msg)
def WriteHuntResults(client_id, hunt_id, responses): """Writes hunt results from a given client as part of a given hunt.""" if not hunt.IsLegacyHunt(hunt_id): data_store.REL_DB.WriteFlowResults(responses) hunt.StopHuntIfCPUOrNetworkLimitsExceeded(hunt_id) return hunt_id_urn = rdfvalue.RDFURN("hunts").Add(hunt_id) msgs = [] for response in responses: if isinstance(response, rdf_flow_objects.FlowStatus): continue msgs.append( rdf_flows.GrrMessage(payload=response.payload, source=client_id)) with data_store.DB.GetMutationPool() as pool: for msg in msgs: hunts_results.HuntResultCollection.StaticAdd( hunt_id_urn.Add("Results"), msg, mutation_pool=pool) for msg in msgs: multi_type_collection.MultiTypeCollection.StaticAdd( hunt_id_urn.Add("ResultsPerType"), msg, mutation_pool=pool) stats_collector_instance.Get().IncrementCounter("hunt_results_added", delta=len(responses))
def Error(self, error_message: Optional[str] = None, backtrace: Optional[str] = None, status: Optional[rdf_structs.EnumNamedValue] = None) -> None: """Terminates this flow with an error.""" FLOW_ERRORS.Increment(fields=[compatibility.GetName(self.__class__)]) client_id = self.rdf_flow.client_id flow_id = self.rdf_flow.flow_id # backtrace is set for unexpected failures caught in a wildcard except # branch, thus these should be logged as error. backtrace is None for # faults that are anticipated in flows, thus should only be logged as # warning. if backtrace: logging.error("Error in flow %s on %s: %s, %s", flow_id, client_id, error_message, backtrace) else: logging.warning("Error in flow %s on %s: %s:", flow_id, client_id, error_message) if self.rdf_flow.parent_flow_id or self.rdf_flow.parent_hunt_id: status_msg = rdf_flow_objects.FlowStatus( client_id=client_id, request_id=self.rdf_flow.parent_request_id, response_id=self.GetNextResponseId(), cpu_time_used=self.rdf_flow.cpu_time_used, network_bytes_sent=self.rdf_flow.network_bytes_sent, runtime_us=self.rdf_flow.runtime_us, error_message=error_message, flow_id=self.rdf_flow.parent_flow_id, backtrace=backtrace) if status is not None: status_msg.status = status else: status_msg.status = rdf_flow_objects.FlowStatus.Status.ERROR if self.rdf_flow.parent_flow_id: self.flow_responses.append(status_msg) elif self.rdf_flow.parent_hunt_id: hunt.StopHuntIfCPUOrNetworkLimitsExceeded( self.rdf_flow.parent_hunt_id) self.rdf_flow.flow_state = self.rdf_flow.FlowState.ERROR if backtrace is not None: self.rdf_flow.backtrace = backtrace if error_message is not None: self.rdf_flow.error_message = error_message self.NotifyCreatorOfError()
def Error(self, error_message=None, backtrace=None, status=None, exception_context=False): """Terminates this flow with an error.""" FLOW_ERRORS.Increment(fields=[compatibility.GetName(self.__class__)]) client_id = self.rdf_flow.client_id flow_id = self.rdf_flow.flow_id if exception_context: # Have the log library fetch the backtrace from callers if we are # handling an exception. logging.exception("Error in flow %s on %s: %s:", flow_id, client_id, error_message) else: logging.error("Error in flow %s on %s: %s, %s", flow_id, client_id, error_message, backtrace) if self.rdf_flow.parent_flow_id or self.rdf_flow.parent_hunt_id: status_msg = rdf_flow_objects.FlowStatus( client_id=client_id, request_id=self.rdf_flow.parent_request_id, response_id=self.GetNextResponseId(), cpu_time_used=self.rdf_flow.cpu_time_used, network_bytes_sent=self.rdf_flow.network_bytes_sent, error_message=error_message, flow_id=self.rdf_flow.parent_flow_id, backtrace=backtrace) if status is not None: status_msg.status = status else: status_msg.status = rdf_flow_objects.FlowStatus.Status.ERROR if self.rdf_flow.parent_flow_id: self.flow_responses.append(status_msg) elif self.rdf_flow.parent_hunt_id: hunt.StopHuntIfCPUOrNetworkLimitsExceeded(self.rdf_flow.parent_hunt_id) self.rdf_flow.flow_state = self.rdf_flow.FlowState.ERROR if backtrace is not None: self.rdf_flow.backtrace = backtrace if error_message is not None: self.rdf_flow.error_message = error_message self.NotifyCreatorOfError()
def FlushQueuedMessages(self) -> None: """Flushes queued messages.""" # TODO(amoser): This could be done in a single db call, might be worth # optimizing. if self.flow_requests: data_store.REL_DB.WriteFlowRequests(self.flow_requests) self.flow_requests = [] if self.flow_responses: data_store.REL_DB.WriteFlowResponses(self.flow_responses) self.flow_responses = [] if self.client_action_requests: client_id = self.rdf_flow.client_id if fleetspeak_utils.IsFleetspeakEnabledClient(client_id): for request in self.client_action_requests: msg = rdf_flow_objects.GRRMessageFromClientActionRequest( request) fleetspeak_utils.SendGrrMessageThroughFleetspeak( client_id, msg) else: data_store.REL_DB.WriteClientActionRequests( self.client_action_requests) self.client_action_requests = [] if self.completed_requests: data_store.REL_DB.DeleteFlowRequests(self.completed_requests) self.completed_requests = [] if self.replies_to_write: # For top-level hunt-induced flows, write results to the hunt collection. if self.rdf_flow.parent_hunt_id: data_store.REL_DB.WriteFlowResults(self.replies_to_write) hunt.StopHuntIfCPUOrNetworkLimitsExceeded( self.rdf_flow.parent_hunt_id) else: # Write flow results to REL_DB, even if the flow is a nested flow. data_store.REL_DB.WriteFlowResults(self.replies_to_write) self.replies_to_write = []
def Error(self, error_message=None, backtrace=None, status=None): """Terminates this flow with an error.""" stats_collector_instance.Get().IncrementCounter( "flow_errors", fields=[compatibility.GetName(self.__class__)]) client_id = self.rdf_flow.client_id flow_id = self.rdf_flow.flow_id logging.error("Error in flow %s on %s: %s, %s", flow_id, client_id, error_message, backtrace) if self.rdf_flow.parent_flow_id or self.rdf_flow.parent_hunt_id: status_msg = rdf_flow_objects.FlowStatus( client_id=client_id, request_id=self.rdf_flow.parent_request_id, response_id=self.GetNextResponseId(), cpu_time_used=self.rdf_flow.cpu_time_used, network_bytes_sent=self.rdf_flow.network_bytes_sent, error_message=error_message, flow_id=self.rdf_flow.parent_flow_id, backtrace=backtrace) if status is not None: status_msg.status = status else: status_msg.status = rdf_flow_objects.FlowStatus.Status.ERROR if self.rdf_flow.parent_flow_id: self.flow_responses.append(status_msg) elif self.rdf_flow.parent_hunt_id: hunt.StopHuntIfCPUOrNetworkLimitsExceeded( self.rdf_flow.parent_hunt_id) self.rdf_flow.flow_state = self.rdf_flow.FlowState.ERROR if backtrace is not None: self.rdf_flow.backtrace = backtrace if error_message is not None: self.rdf_flow.error_message = error_message self.NotifyCreatorOfError()