def Error(self, error_message=None, backtrace=None, status=None): """Terminates this flow with an error.""" client_id = self.rdf_flow.client_id flow_id = self.rdf_flow.flow_id logging.error("Error in flow %s on %s: %s, %s", flow_id, client_id, error_message, backtrace) if self.rdf_flow.parent_flow_id: status_msg = rdf_flow_objects.FlowStatus( client_id=client_id, flow_id=self.rdf_flow.parent_flow_id, request_id=self.rdf_flow.parent_request_id, response_id=self.GetNextResponseId(), cpu_time_used=self.rdf_flow.cpu_time_used, network_bytes_sent=self.rdf_flow.network_bytes_sent, error_message=error_message, backtrace=backtrace) if status is not None: status_msg.status = status else: status_msg.status = rdf_flow_objects.FlowStatus.Status.ERROR self.flow_responses.append(status_msg) self.rdf_flow.flow_state = self.rdf_flow.FlowState.ERROR if backtrace is not None: self.rdf_flow.backtrace = backtrace if error_message is not None: self.rdf_flow.error_message = error_message self.NotifyCreatorOfError()
def MarkDone(self, status=None): """Marks this flow as done.""" FLOW_COMPLETIONS.Increment( fields=[compatibility.GetName(self.__class__)]) # Notify our parent flow or hunt that we are done (if there's a parent flow # or hunt). if self.rdf_flow.parent_flow_id or self.rdf_flow.parent_hunt_id: status = rdf_flow_objects.FlowStatus( client_id=self.rdf_flow.client_id, request_id=self.rdf_flow.parent_request_id, response_id=self.GetNextResponseId(), status=rdf_flow_objects.FlowStatus.Status.OK, cpu_time_used=self.rdf_flow.cpu_time_used, network_bytes_sent=self.rdf_flow.network_bytes_sent, runtime_us=self.rdf_flow.runtime_us, flow_id=self.rdf_flow.parent_flow_id) if self.rdf_flow.parent_flow_id: self.flow_responses.append(status) elif self.rdf_flow.parent_hunt_id: hunt_obj = hunt.StopHuntIfCPUOrNetworkLimitsExceeded( self.rdf_flow.parent_hunt_id) hunt.CompleteHuntIfExpirationTimeReached(hunt_obj) self.rdf_flow.flow_state = self.rdf_flow.FlowState.FINISHED if self.ShouldSendNotifications(): self.NotifyAboutEnd()
def MarkDone(self, status=None): """Marks this flow as done.""" stats_collector_instance.Get().IncrementCounter( "flow_completions", fields=[compatibility.GetName(self.__class__)]) # Notify our parent flow or hunt that we are done (if there's a parent flow # or hunt). if self.rdf_flow.parent_flow_id or self.rdf_flow.parent_hunt_id: status = rdf_flow_objects.FlowStatus( client_id=self.rdf_flow.client_id, request_id=self.rdf_flow.parent_request_id, response_id=self.GetNextResponseId(), status=rdf_flow_objects.FlowStatus.Status.OK, cpu_time_used=self.rdf_flow.cpu_time_used, network_bytes_sent=self.rdf_flow.network_bytes_sent, flow_id=self.rdf_flow.parent_flow_id) if self.rdf_flow.parent_flow_id: self.flow_responses.append(status) elif self.rdf_flow.parent_hunt_id: db_compat.ProcessHuntFlowDone(self.rdf_flow, status_msg=status) self.rdf_flow.flow_state = self.rdf_flow.FlowState.FINISHED if self.ShouldSendNotifications(): self.NotifyAboutEnd()
def _StartFlow(self, client_id, flow_cls, **kw): if data_store.RelationalDBEnabled(): flow_id = flow.StartFlow(flow_cls=flow_cls, client_id=client_id, **kw) # Lease the client message. data_store.REL_DB.LeaseClientActionRequests( client_id, lease_time=rdfvalue.Duration("10000s")) # Write some responses. In the relational db, the client queue will be # cleaned up as soon as all responses are available. Therefore we cheat # here and make it look like the request needs more responses so it's not # considered complete. # Write the status first. This will mark the request as waiting for 2 # responses. status = rdf_flow_objects.FlowStatus(client_id=client_id, flow_id=flow_id, request_id=1, response_id=2) data_store.REL_DB.WriteFlowResponses([status]) # Now we read the request, adjust the number, and write it back. reqs = data_store.REL_DB.ReadAllFlowRequestsAndResponses( client_id, flow_id) req = reqs[0][0] req.nr_responses_expected = 99 data_store.REL_DB.WriteFlowRequests([req]) # This response now won't trigger any deletion of client messages. response = rdf_flow_objects.FlowResponse( client_id=client_id, flow_id=flow_id, request_id=1, response_id=1, payload=rdf_client.Process(name="test_process")) data_store.REL_DB.WriteFlowResponses([response]) # This is not strictly needed as we don't display this information in the # UI. req.nr_responses_expected = 2 data_store.REL_DB.WriteFlowRequests([req]) return flow_id else: flow_id = flow.StartAFF4Flow( flow_name=compatibility.GetName(flow_cls), client_id=client_id, token=self.token, **kw).Basename() # Have the client write some responses. test_process = rdf_client.Process(name="test_process") mock = flow_test_lib.MockClient(client_id, action_mocks.ListProcessesMock( [test_process]), token=self.token) mock.Next() return flow_id
def testInspect(self): """Test the inspect UI.""" client_urn = self.SetupClient(0) client_id = client_urn.Basename() self.RequestAndGrantClientApproval(client_id) if data_store.RelationalDBEnabled(): flow_id = flow.StartFlow( client_id=client_id, flow_cls=flow_discovery.Interrogate) status = rdf_flow_objects.FlowStatus( client_id=client_id, flow_id=flow_id, request_id=1, response_id=2) data_store.REL_DB.WriteFlowResponses([status]) else: session_id = flow.StartAFF4Flow( client_id=client_urn, flow_name=flow_discovery.Interrogate.__name__, token=self.token) status = rdf_flows.GrrMessage( request_id=1, response_id=2, session_id=session_id, type=rdf_flows.GrrMessage.Type.STATUS, auth_state=rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED) with queue_manager.QueueManager(token=self.token) as manager: manager.QueueResponse(status) self.Open("/#/clients/%s/debug-requests" % client_id) # Check that the we can see both requests and responses. self.WaitUntil(self.IsTextPresent, "GetPlatformInfo") self.WaitUntil(self.IsTextPresent, "GetConfig") self.WaitUntil(self.IsTextPresent, "EnumerateInterfaces") self.WaitUntil(self.IsTextPresent, "STATUS")
def Error(self, error_message: Optional[str] = None, backtrace: Optional[str] = None, status: Optional[rdf_structs.EnumNamedValue] = None) -> None: """Terminates this flow with an error.""" FLOW_ERRORS.Increment(fields=[compatibility.GetName(self.__class__)]) client_id = self.rdf_flow.client_id flow_id = self.rdf_flow.flow_id # backtrace is set for unexpected failures caught in a wildcard except # branch, thus these should be logged as error. backtrace is None for # faults that are anticipated in flows, thus should only be logged as # warning. if backtrace: logging.error("Error in flow %s on %s: %s, %s", flow_id, client_id, error_message, backtrace) else: logging.warning("Error in flow %s on %s: %s:", flow_id, client_id, error_message) if self.rdf_flow.parent_flow_id or self.rdf_flow.parent_hunt_id: status_msg = rdf_flow_objects.FlowStatus( client_id=client_id, request_id=self.rdf_flow.parent_request_id, response_id=self.GetNextResponseId(), cpu_time_used=self.rdf_flow.cpu_time_used, network_bytes_sent=self.rdf_flow.network_bytes_sent, runtime_us=self.rdf_flow.runtime_us, error_message=error_message, flow_id=self.rdf_flow.parent_flow_id, backtrace=backtrace) if status is not None: status_msg.status = status else: status_msg.status = rdf_flow_objects.FlowStatus.Status.ERROR if self.rdf_flow.parent_flow_id: self.flow_responses.append(status_msg) elif self.rdf_flow.parent_hunt_id: hunt.StopHuntIfCPUOrNetworkLimitsExceeded( self.rdf_flow.parent_hunt_id) self.rdf_flow.flow_state = self.rdf_flow.FlowState.ERROR if backtrace is not None: self.rdf_flow.backtrace = backtrace if error_message is not None: self.rdf_flow.error_message = error_message self.NotifyCreatorOfError()
def _ResponsesAndStatus(self, client_id, flow_id, request_id, num_responses): return [ rdf_flow_objects.FlowResponse(client_id=client_id, flow_id=flow_id, request_id=request_id, response_id=i) for i in range(1, num_responses + 1) ] + [ rdf_flow_objects.FlowStatus(client_id=client_id, flow_id=flow_id, request_id=request_id, response_id=num_responses + 1) ]
def Error(self, error_message=None, backtrace=None, status=None, exception_context=False): """Terminates this flow with an error.""" FLOW_ERRORS.Increment(fields=[compatibility.GetName(self.__class__)]) client_id = self.rdf_flow.client_id flow_id = self.rdf_flow.flow_id if exception_context: # Have the log library fetch the backtrace from callers if we are # handling an exception. logging.exception("Error in flow %s on %s: %s:", flow_id, client_id, error_message) else: logging.error("Error in flow %s on %s: %s, %s", flow_id, client_id, error_message, backtrace) if self.rdf_flow.parent_flow_id or self.rdf_flow.parent_hunt_id: status_msg = rdf_flow_objects.FlowStatus( client_id=client_id, request_id=self.rdf_flow.parent_request_id, response_id=self.GetNextResponseId(), cpu_time_used=self.rdf_flow.cpu_time_used, network_bytes_sent=self.rdf_flow.network_bytes_sent, error_message=error_message, flow_id=self.rdf_flow.parent_flow_id, backtrace=backtrace) if status is not None: status_msg.status = status else: status_msg.status = rdf_flow_objects.FlowStatus.Status.ERROR if self.rdf_flow.parent_flow_id: self.flow_responses.append(status_msg) elif self.rdf_flow.parent_hunt_id: hunt.StopHuntIfCPUOrNetworkLimitsExceeded(self.rdf_flow.parent_hunt_id) self.rdf_flow.flow_state = self.rdf_flow.FlowState.ERROR if backtrace is not None: self.rdf_flow.backtrace = backtrace if error_message is not None: self.rdf_flow.error_message = error_message self.NotifyCreatorOfError()
def MarkDone(self, status=None): """Marks this flow as done.""" # Notify our parent flow that we are done if we have one. if self.rdf_flow.parent_flow_id: self.flow_responses.append( rdf_flow_objects.FlowStatus( client_id=self.rdf_flow.client_id, flow_id=self.rdf_flow.parent_flow_id, request_id=self.rdf_flow.parent_request_id, response_id=self.GetNextResponseId(), status=rdf_flow_objects.FlowStatus.Status.OK, cpu_time_used=self.rdf_flow.cpu_time_used, network_bytes_sent=self.rdf_flow.network_bytes_sent)) self.rdf_flow.flow_state = self.rdf_flow.FlowState.FINISHED self.NotifyAboutEnd()
def Error(self, error_message=None, backtrace=None, status=None): """Terminates this flow with an error.""" stats_collector_instance.Get().IncrementCounter( "flow_errors", fields=[compatibility.GetName(self.__class__)]) client_id = self.rdf_flow.client_id flow_id = self.rdf_flow.flow_id logging.error("Error in flow %s on %s: %s, %s", flow_id, client_id, error_message, backtrace) if self.rdf_flow.parent_flow_id or self.rdf_flow.parent_hunt_id: status_msg = rdf_flow_objects.FlowStatus( client_id=client_id, request_id=self.rdf_flow.parent_request_id, response_id=self.GetNextResponseId(), cpu_time_used=self.rdf_flow.cpu_time_used, network_bytes_sent=self.rdf_flow.network_bytes_sent, error_message=error_message, flow_id=self.rdf_flow.parent_flow_id, backtrace=backtrace) if status is not None: status_msg.status = status else: status_msg.status = rdf_flow_objects.FlowStatus.Status.ERROR if self.rdf_flow.parent_flow_id: self.flow_responses.append(status_msg) elif self.rdf_flow.parent_hunt_id: db_compat.ProcessHuntFlowError( self.rdf_flow, error_message=error_message, backtrace=backtrace, status_msg=status_msg) self.rdf_flow.flow_state = self.rdf_flow.FlowState.ERROR if backtrace is not None: self.rdf_flow.backtrace = backtrace if error_message is not None: self.rdf_flow.error_message = error_message self.NotifyCreatorOfError()
def _StartFlow(self, client_id, flow_cls, **kw): flow_id = flow.StartFlow(flow_cls=flow_cls, client_id=client_id, **kw) # Lease the client message. data_store.REL_DB.LeaseClientActionRequests( client_id, lease_time=rdfvalue.Duration.From(10000, rdfvalue.SECONDS)) # Write some responses. In the relational db, the client queue will be # cleaned up as soon as all responses are available. Therefore we cheat # here and make it look like the request needs more responses so it's not # considered complete. # Write the status first. This will mark the request as waiting for 2 # responses. status = rdf_flow_objects.FlowStatus(client_id=client_id, flow_id=flow_id, request_id=1, response_id=2) data_store.REL_DB.WriteFlowResponses([status]) # Now we read the request, adjust the number, and write it back. reqs = data_store.REL_DB.ReadAllFlowRequestsAndResponses( client_id, flow_id) req = reqs[0][0] req.nr_responses_expected = 99 data_store.REL_DB.WriteFlowRequests([req]) # This response now won't trigger any deletion of client messages. response = rdf_flow_objects.FlowResponse( client_id=client_id, flow_id=flow_id, request_id=1, response_id=1, payload=rdf_client.Process(name="test_process")) data_store.REL_DB.WriteFlowResponses([response]) # This is not strictly needed as we don't display this information in the # UI. req.nr_responses_expected = 2 data_store.REL_DB.WriteFlowRequests([req]) return flow_id
def testInspect(self): """Test the inspect UI.""" client_id = self.SetupClient(0) self.RequestAndGrantClientApproval(client_id) flow_id = flow.StartFlow( client_id=client_id, flow_cls=flow_discovery.Interrogate) status = rdf_flow_objects.FlowStatus( client_id=client_id, flow_id=flow_id, request_id=1, response_id=2) data_store.REL_DB.WriteFlowResponses([status]) self.Open("/#/clients/%s/debug-requests" % client_id) # Check that the we can see both requests and responses. self.WaitUntil(self.IsTextPresent, "GetPlatformInfo") self.WaitUntil(self.IsTextPresent, "GetConfig") self.WaitUntil(self.IsTextPresent, "EnumerateInterfaces") self.WaitUntil(self.IsTextPresent, "STATUS")
def MarkDone(self, status=None): """Marks this flow as done.""" # Notify our parent flow or hunt that we are done (if there's a parent flow # or hunt). if self.rdf_flow.parent_flow_id or self.rdf_flow.parent_hunt_id: status = rdf_flow_objects.FlowStatus( client_id=self.rdf_flow.client_id, request_id=self.rdf_flow.parent_request_id, response_id=self.GetNextResponseId(), status=rdf_flow_objects.FlowStatus.Status.OK, cpu_time_used=self.rdf_flow.cpu_time_used, network_bytes_sent=self.rdf_flow.network_bytes_sent, flow_id=self.rdf_flow.parent_flow_id) if self.rdf_flow.parent_flow_id: self.flow_responses.append(status) elif self.rdf_flow.parent_hunt_id: db_compat.ProcessHuntFlowDone(self.rdf_flow, status_msg=status) self.rdf_flow.flow_state = self.rdf_flow.FlowState.FINISHED if self.ShouldSendNotifications(): self.NotifyAboutEnd()
def testStatusMessagesCanBeWrittenAndRead(self): client_id, flow_id = self._SetupClientAndFlow() request = rdf_flow_objects.FlowRequest(client_id=client_id, flow_id=flow_id, request_id=1, needs_processing=False) self.db.WriteFlowRequests([request]) responses = [ rdf_flow_objects.FlowResponse(client_id=client_id, flow_id=flow_id, request_id=1, response_id=i) for i in range(3) ] # Also store an Iterator, why not. responses.append( rdf_flow_objects.FlowIterator(client_id=client_id, flow_id=flow_id, request_id=1, response_id=3)) responses.append( rdf_flow_objects.FlowStatus(client_id=client_id, flow_id=flow_id, request_id=1, response_id=4)) self.db.WriteFlowResponses(responses) all_requests = self.db.ReadAllFlowRequestsAndResponses( client_id, flow_id) self.assertEqual(len(all_requests), 1) _, read_responses = all_requests[0] self.assertEqual(list(read_responses), [0, 1, 2, 3, 4]) for i in range(3): self.assertIsInstance(read_responses[i], rdf_flow_objects.FlowResponse) self.assertIsInstance(read_responses[3], rdf_flow_objects.FlowIterator) self.assertIsInstance(read_responses[4], rdf_flow_objects.FlowStatus)
def _StartFlow(self, client_id, flow_cls, **kw): if data_store.RelationalDBFlowsEnabled(): flow_id = flow.StartFlow(flow_cls=flow_cls, client_id=client_id, **kw) # Lease the client message. data_store.REL_DB.LeaseClientMessages( client_id, lease_time=rdfvalue.Duration("10000s")) # Write some responses. response = rdf_flow_objects.FlowResponse( client_id=client_id, flow_id=flow_id, request_id=1, response_id=1, payload=rdf_client.Process(name="test_process")) status = rdf_flow_objects.FlowStatus(client_id=client_id, flow_id=flow_id, request_id=1, response_id=2) data_store.REL_DB.WriteFlowResponses([response, status]) return flow_id else: flow_id = flow.StartAFF4Flow( flow_name=compatibility.GetName(flow_cls), client_id=client_id, token=self.token, **kw).Basename() # Have the client write some responses. test_process = rdf_client.Process(name="test_process") mock = flow_test_lib.MockClient(client_id, action_mocks.ListProcessesMock( [test_process]), token=self.token) mock.Next() return flow_id