def StatFile(self, list_dir_req): if list_dir_req.pathspec.path == "/proc/kcore": result = rdf_client.StatEntry(pathspec=list_dir_req.pathspec, st_mode=400) status = rdf_flows.GrrStatus(status=rdf_flows.GrrStatus.ReturnedStatus.OK) return [result, status] raise IOError("Not found.")
def RekallAction(self, rekall_request): if rekall_request.device.path != "/proc/kcore": return [rdf_flows.GrrStatus( status=rdf_flows.GrrStatus.ReturnedStatus.GENERIC_ERROR, error_message="Should use kcore device when present.")] response = rdf_rekall_types.RekallResponse(json_messages="{}") return [response, rdf_client.Iterator(state="FINISHED")]
def ReceiveMessages(self, client_id, messages): """Receives and processes the messages from the source. For each message we update the request object, and place the response in that request's queue. If the request is complete, we send a message to the worker. Args: client_id: The client which sent the messages. messages: A list of GrrMessage RDFValues. """ now = time.time() with queue_manager.QueueManager( token=self.token, store=self.data_store) as manager: sessions_handled = [] for session_id, msgs in utils.GroupBy( messages, operator.attrgetter("session_id")).iteritems(): # Remove and handle messages to WellKnownFlows unprocessed_msgs = self.HandleWellKnownFlows(msgs) if not unprocessed_msgs: continue # Keep track of all the flows we handled in this request. sessions_handled.append(session_id) for msg in unprocessed_msgs: manager.QueueResponse(session_id, msg) for msg in unprocessed_msgs: # Messages for well known flows should notify even though they don't # have a status. if msg.request_id == 0: manager.QueueNotification( session_id=msg.session_id, priority=msg.priority) # Those messages are all the same, one notification is enough. break elif msg.type == rdf_flows.GrrMessage.Type.STATUS: # If we receive a status message from the client it means the client # has finished processing this request. We therefore can de-queue it # from the client queue. msg.task_id will raise if the task id is # not set (message originated at the client, there was no request on # the server) so we have to use .Get() instead. if msg.HasTaskID(): manager.DeQueueClientRequest(client_id, msg.task_id) manager.QueueNotification( session_id=msg.session_id, priority=msg.priority, last_status=msg.request_id) stat = rdf_flows.GrrStatus(msg.payload) if stat.status == rdf_flows.GrrStatus.ReturnedStatus.CLIENT_KILLED: # A client crashed while performing an action, fire an event. events.Events.PublishEvent( "ClientCrash", rdf_flows.GrrMessage(msg), token=self.token) logging.debug("Received %s messages in %s sec", len(messages), time.time() - now)
def InstallerNotifyServer(): """An emergency function Invoked when the client installation failed.""" # We make a temporary emergency config file to contain the new client id. Note # that the notification callback does not really mean anything to us, since # the client is not installed and we dont have basic interrogate information. config_lib.CONFIG.SetWriteBack("temp.yaml") try: log_data = open(config_lib.CONFIG["Installer.logfile"], "rb").read() except (IOError, OSError): log_data = "" # Start the client and send the server a message, then terminate. The # private key may be empty if we did not install properly yet. In this case, # the client will automatically generate a random client ID and private key # (and the message will be unauthenticated since we never enrolled.). comms.CommsInit().RunOnce() client = comms.GRRHTTPClient( ca_cert=config_lib.CONFIG["CA.certificate"], private_key=config_lib.CONFIG.Get("Client.private_key")) client.client_worker.SendReply( session_id=rdf_flows.FlowSessionID(flow_name="InstallationFailed"), message_type=rdf_flows.GrrMessage.Type.STATUS, request_id=0, response_id=0, rdf_value=rdf_flows.GrrStatus( status=rdf_flows.GrrStatus.ReturnedStatus.GENERIC_ERROR, error_message="Installation failed.", backtrace=log_data[-10000:])) client.RunOnce()
def testUnauthenticated(self): """What happens if an unauthenticated message is sent to the client? RuntimeError needs to be issued, and the client needs to send a GrrStatus message with the traceback in it. """ # Push a request on it message = rdf_flows.GrrMessage( name="MockAction", session_id=self.session_id, auth_state=rdf_flows.GrrMessage.AuthorizationState.UNAUTHENTICATED, request_id=1, generate_task_id=True) self.context.HandleMessage(message) # We expect to receive an GrrStatus to indicate an exception was # raised: # Check the response - one data and one status message_list = self.context.Drain().job self.assertEqual(len(message_list), 1) self.assertEqual(message_list[0].session_id, self.session_id) self.assertEqual(message_list[0].response_id, 1) status = rdf_flows.GrrStatus(message_list[0].payload) self.assertIn("not Authenticated", status.error_message) self.assertIn("RuntimeError", status.error_message) self.assertNotEqual(status.status, rdf_flows.GrrStatus.ReturnedStatus.OK)
def SendOKStatus(self, response_id, session_id): """Send a message to the flow.""" message = rdf_flows.GrrMessage( request_id=1, response_id=response_id, session_id=session_id, type=rdf_flows.GrrMessage.Type.STATUS, auth_state=rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED) status = rdf_flows.GrrStatus(status=rdf_flows.GrrStatus.ReturnedStatus.OK) message.payload = status self.SendMessage(message) # Now also set the state on the RequestState request_state, _ = data_store.DB.Resolve( message.session_id.Add("state"), queue_manager.QueueManager.FLOW_REQUEST_TEMPLATE % message.request_id, token=self.token) request_state = rdf_flows.RequestState(request_state) request_state.status = status data_store.DB.Set( message.session_id.Add("state"), queue_manager.QueueManager.FLOW_REQUEST_TEMPLATE % message.request_id, request_state, token=self.token) return message
def StatFile(self, args): """StatFile action mock.""" req = rdf_client.ListDirRequest(args) response = rdf_client.StatEntry(pathspec=req.pathspec, st_mode=33184, st_ino=1063090, st_dev=64512L, st_nlink=1, st_uid=139592, st_gid=5000, st_size=len(self.data), st_atime=1336469177, st_mtime=1336129892, st_ctime=1336129892) self.responses += 1 self.count += 1 # Create status message to report sample resource usage status = rdf_flows.GrrStatus( status=rdf_flows.GrrStatus.ReturnedStatus.OK) status.cpu_time_used.user_cpu_time = self.responses status.cpu_time_used.system_cpu_time = self.responses * 2 status.network_bytes_sent = self.responses * 3 # Every "failrate" client does not have this file. if self.count == self.failrate: self.count = 0 return [status] return [response, status]
def OnStartup(self): """A handler that is called on client startup.""" # We read the transaction log and fail any requests that are in it. If there # is anything in the transaction log we assume its there because we crashed # last time and let the server know. last_request = self.nanny_controller.GetTransactionLog() if last_request: status = rdf_flows.GrrStatus( status=rdf_flows.GrrStatus.ReturnedStatus.CLIENT_KILLED, error_message="Client killed during transaction") nanny_status = self.nanny_controller.GetNannyStatus() if nanny_status: status.nanny_status = nanny_status self.SendReply(status, request_id=last_request.request_id, response_id=1, session_id=last_request.session_id, message_type=rdf_flows.GrrMessage.Type.STATUS) self.nanny_controller.CleanTransactionLog() # Inform the server that we started. action_cls = actions.ActionPlugin.classes.get("SendStartupInfo", actions.ActionPlugin) action = action_cls(grr_worker=self) action.Run(None, ttl=1)
def SendResponse(self, session_id, data, client_id=None, well_known=False): if not isinstance(data, rdfvalue.RDFValue): data = rdf_protodict.DataBlob(string=data) if well_known: request_id, response_id = 0, 12345 else: request_id, response_id = 1, 1 with queue_manager.QueueManager(token=self.token) as flow_manager: flow_manager.QueueResponse( session_id, rdf_flows.GrrMessage(source=client_id, session_id=session_id, payload=data, request_id=request_id, response_id=response_id)) if not well_known: # For normal flows we have to send a status as well. flow_manager.QueueResponse( session_id, rdf_flows.GrrMessage( source=client_id, session_id=session_id, payload=rdf_flows.GrrStatus( status=rdf_flows.GrrStatus.ReturnedStatus.OK), request_id=request_id, response_id=response_id + 1, type=rdf_flows.GrrMessage.Type.STATUS)) flow_manager.QueueNotification(session_id=session_id) timestamp = flow_manager.frozen_timestamp return timestamp
def testReceiveUnsolicitedClientMessage(self): client_id = test_lib.TEST_CLIENT_ID flow_obj = self.FlowSetup( flow_test_lib.FlowOrderTest.__name__, client_id=client_id) session_id = flow_obj.session_id status = rdf_flows.GrrStatus(status=rdf_flows.GrrStatus.ReturnedStatus.OK) messages = [ # This message has no task_id set... rdf_flows.GrrMessage( request_id=1, response_id=1, session_id=session_id, payload=rdfvalue.RDFInteger(1), task_id=15), rdf_flows.GrrMessage( request_id=1, response_id=2, session_id=session_id, payload=status, type=rdf_flows.GrrMessage.Type.STATUS) ] self.server.ReceiveMessages(client_id, messages) manager = queue_manager.QueueManager(token=self.token) completed = list(manager.FetchCompletedRequests(session_id)) self.assertEqual(len(completed), 1)
def Error(self, backtrace, client_id=None, status=None): """Kills this flow with an error.""" client_id = client_id or self.runner_args.client_id if self.IsRunning(): # Set an error status reply = rdf_flows.GrrStatus() if status is None: reply.status = rdf_flows.GrrStatus.ReturnedStatus.GENERIC_ERROR else: reply.status = status if backtrace: reply.error_message = backtrace self.flow_obj.Terminate(status=reply) self.context.state = rdf_flows.FlowContext.State.ERROR if backtrace: logging.error("Error in flow %s (%s). Trace: %s", self.session_id, client_id, backtrace) self.context.backtrace = backtrace else: logging.error("Error in flow %s (%s).", self.session_id, client_id) self.Notify("FlowStatus", client_id, "Flow (%s) terminated due to error" % self.session_id)
def _SendTerminationMessage(self, status=None): """This notifies the parent flow of our termination.""" if not self.runner_args.request_state.session_id: # No parent flow, nothing to do here. return if status is None: status = rdf_flows.GrrStatus() client_resources = self.context.client_resources user_cpu = client_resources.cpu_usage.user_cpu_time sys_cpu = client_resources.cpu_usage.system_cpu_time status.cpu_time_used.user_cpu_time = user_cpu status.cpu_time_used.system_cpu_time = sys_cpu status.network_bytes_sent = self.context.network_bytes_sent status.child_session_id = self.session_id request_state = self.runner_args.request_state request_state.response_count += 1 # Make a response message msg = rdf_flows.GrrMessage( session_id=request_state.session_id, request_id=request_state.id, response_id=request_state.response_count, auth_state=rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED, type=rdf_flows.GrrMessage.Type.STATUS, payload=status) # Queue the response now self.queue_manager.QueueResponse(msg) self.QueueNotification(session_id=request_state.session_id)
def Error(self, backtrace, client_id=None, status_code=None): """Terminates this flow with an error.""" try: self.queue_manager.DestroyFlowStates(self.session_id) except queue_manager.MoreDataException: pass if not self.IsRunning(): return # Set an error status reply = rdf_flows.GrrStatus() if status_code is None: reply.status = rdf_flows.GrrStatus.ReturnedStatus.GENERIC_ERROR else: reply.status = status_code client_id = client_id or self.runner_args.client_id if backtrace: reply.error_message = backtrace logging.error("Error in flow %s (%s). Trace: %s", self.session_id, client_id, backtrace) self.context.backtrace = backtrace else: logging.error("Error in flow %s (%s).", self.session_id, client_id) self._SendTerminationMessage(reply) self.context.state = rdf_flows.FlowContext.State.ERROR self.Notify("FlowStatus", client_id, "Flow (%s) terminated due to error" % self.session_id) self.flow_obj.Flush()
def Terminate(self, status=None): """Terminates this flow.""" try: self.queue_manager.DestroyFlowStates(self.session_id) except queue_manager.MoreDataException: pass # This flow might already not be running. if self.context.state != rdf_flows.Flow.State.RUNNING: return try: # Close off the output collection. if self.output and len(self.output): self.output.Close() logging.info("%s flow results written to %s", len(self.output), self.output.urn) self.output = None except access_control.UnauthorizedAccess: # This might fail if the output has a pickled token. pass if self.args.request_state.session_id: logging.debug("Terminating flow %s", self.session_id) # Make a response or use the existing one. response = status or rdf_flows.GrrStatus() client_resources = self.context.client_resources user_cpu = client_resources.cpu_usage.user_cpu_time sys_cpu = client_resources.cpu_usage.system_cpu_time response.cpu_time_used.user_cpu_time = user_cpu response.cpu_time_used.system_cpu_time = sys_cpu response.network_bytes_sent = self.context.network_bytes_sent response.child_session_id = self.session_id request_state = self.args.request_state request_state.response_count += 1 # Make a response message msg = rdf_flows.GrrMessage( session_id=request_state.session_id, request_id=request_state.id, response_id=request_state.response_count, auth_state=rdf_flows.GrrMessage.AuthorizationState. AUTHENTICATED, type=rdf_flows.GrrMessage.Type.STATUS, payload=response) try: # Queue the response now self.queue_manager.QueueResponse(request_state.session_id, msg) finally: self.QueueNotification(session_id=request_state.session_id) # Mark as terminated. self.context.state = rdf_flows.Flow.State.TERMINATED self.flow_obj.Flush()
def SendToServer(self): """Schedule some packets from client to server.""" # Generate some client traffic for i in range(0, 10): self.client_communicator.client_worker.SendReply( rdf_flows.GrrStatus(), session_id=rdfvalue.SessionID("W:session"), response_id=i, request_id=1)
def __init__(self, grr_worker=None): """Initializes the action plugin. Args: grr_worker: The grr client worker object which may be used to e.g. send new actions on. """ self.grr_worker = grr_worker self.response_id = INITIAL_RESPONSE_ID self.cpu_used = None self.nanny_controller = None self.status = flows.GrrStatus(status=flows.GrrStatus.ReturnedStatus.OK)
def RunAction(self, action_cls, arg=None, grr_worker=None): if arg is None: arg = rdf_flows.GrrMessage() self.results = [] action = self._GetActionInstance(action_cls, grr_worker=grr_worker) action.status = rdf_flows.GrrStatus( status=rdf_flows.GrrStatus.ReturnedStatus.OK) action.Run(arg) return self.results
def SendOKStatus(self, response_id, session_id): """Send a message to the flow.""" message = rdf_flows.GrrMessage( request_id=1, response_id=response_id, session_id=session_id, type=rdf_flows.GrrMessage.Type.STATUS, auth_state=rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED) status = rdf_flows.GrrStatus(status=rdf_flows.GrrStatus.ReturnedStatus.OK) message.payload = status self.SendMessage(message)
def testEqualTimestampNotifications(self): frontend_server = front_end.FrontEndServer( certificate=config.CONFIG["Frontend.certificate"], private_key=config.CONFIG["PrivateKeys.server_key"], message_expiry_time=100, threadpool_prefix="notification-test") # This schedules 10 requests. session_id = flow.GRRFlow.StartFlow( client_id=self.client_id, flow_name="WorkerSendingTestFlow", token=self.token) # We pretend that the client processed all the 10 requests at once and # sends the replies in a single http poll. messages = [ rdf_flows.GrrMessage( request_id=i, response_id=1, session_id=session_id, payload=rdf_protodict.DataBlob(string="test%s" % i), auth_state="AUTHENTICATED", generate_task_id=True) for i in range(1, 11) ] status = rdf_flows.GrrStatus(status=rdf_flows.GrrStatus.ReturnedStatus.OK) statuses = [ rdf_flows.GrrMessage( request_id=i, response_id=2, session_id=session_id, payload=status, type=rdf_flows.GrrMessage.Type.STATUS, auth_state="AUTHENTICATED", generate_task_id=True) for i in range(1, 11) ] frontend_server.ReceiveMessages(self.client_id, messages + statuses) with queue_manager.QueueManager(token=self.token) as q: all_notifications = q.GetNotificationsByPriorityForAllShards( rdfvalue.RDFURN("aff4:/F")) medium_priority = rdf_flows.GrrNotification.Priority.MEDIUM_PRIORITY medium_notifications = all_notifications[medium_priority] my_notifications = [ n for n in medium_notifications if n.session_id == session_id ] # There must not be more than one notification. self.assertEqual(len(my_notifications), 1) notification = my_notifications[0] self.assertEqual(notification.first_queued, notification.timestamp) self.assertEqual(notification.last_status, 10)
def Run(self, unused_arg): """Run the kill.""" # Send a message back to the service to say that we are about to shutdown. reply = rdf_flows.GrrStatus(status=rdf_flows.GrrStatus.ReturnedStatus.OK) # Queue up the response message, jump the queue. self.SendReply(reply, message_type=rdf_flows.GrrMessage.Type.STATUS, priority=rdf_flows.GrrMessage.Priority.HIGH_PRIORITY + 1) # Give the http thread some time to send the reply. self.grr_worker.Sleep(10) # Die ourselves. logging.info("Dying on request.") os._exit(242) # pylint: disable=protected-access
def __init__(self, grr_worker=None): """Initializes the action plugin. Args: grr_worker: The grr client worker object which may be used to e.g. send new actions on. """ self.grr_worker = grr_worker self.response_id = INITIAL_RESPONSE_ID self.cpu_used = None self.nanny_controller = None self.status = rdf_flows.GrrStatus( status=rdf_flows.GrrStatus.ReturnedStatus.OK) self._last_gc_run = rdfvalue.RDFDatetime().Now() self._gc_frequency = config_lib.CONFIG["Client.gc_frequency"]
def Error(self, backtrace, client_id=None, status_code=None): """Terminates this flow with an error.""" try: self.queue_manager.DestroyFlowStates(self.session_id) except queue_manager.MoreDataException: pass if not self.IsRunning(): return # Set an error status reply = rdf_flows.GrrStatus() if status_code is None: reply.status = rdf_flows.GrrStatus.ReturnedStatus.GENERIC_ERROR else: reply.status = status_code client_id = client_id or self.runner_args.client_id if backtrace: reply.error_message = backtrace logging.error("Error in flow %s (%s). Trace: %s", self.session_id, client_id, backtrace) self.context.backtrace = backtrace else: logging.error("Error in flow %s (%s).", self.session_id, client_id) self._SendTerminationMessage(reply) self.context.state = rdf_flow_runner.FlowContext.State.ERROR if self.ShouldSendNotifications(): flow_ref = None if client_id: flow_ref = rdf_objects.FlowReference( client_id=client_id.Basename(), flow_id=self.session_id.Basename()) notification_lib.Notify( self.token.username, rdf_objects.UserNotification.Type.TYPE_FLOW_RUN_FAILED, "Flow (%s) terminated due to error" % self.session_id, rdf_objects.ObjectReference( reference_type=rdf_objects.ObjectReference.Type.FLOW, flow=flow_ref)) self.flow_obj.Flush()
def Terminate(self, status=None): """Terminates this flow.""" try: self.queue_manager.DestroyFlowStates(self.session_id) except queue_manager.MoreDataException: pass # This flow might already not be running. if self.context.state != rdf_flows.FlowContext.State.RUNNING: return if self.runner_args.request_state.session_id: # Make a response or use the existing one. response = status or rdf_flows.GrrStatus() client_resources = self.context.client_resources user_cpu = client_resources.cpu_usage.user_cpu_time sys_cpu = client_resources.cpu_usage.system_cpu_time response.cpu_time_used.user_cpu_time = user_cpu response.cpu_time_used.system_cpu_time = sys_cpu response.network_bytes_sent = self.context.network_bytes_sent response.child_session_id = self.session_id request_state = self.runner_args.request_state request_state.response_count += 1 # Make a response message msg = rdf_flows.GrrMessage( session_id=request_state.session_id, request_id=request_state.id, response_id=request_state.response_count, auth_state=rdf_flows.GrrMessage.AuthorizationState. AUTHENTICATED, type=rdf_flows.GrrMessage.Type.STATUS, payload=response) try: # Queue the response now self.queue_manager.QueueResponse(msg) finally: self.QueueNotification(session_id=request_state.session_id) # Mark as terminated. self.context.state = rdf_flows.FlowContext.State.TERMINATED self.flow_obj.Flush()
def testHandleError(self): """Test handling of a request which raises.""" # Push a request on it message = rdf_flows.GrrMessage( name="RaiseAction", session_id=self.session_id, auth_state=rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED, request_id=1) self.context.HandleMessage(message) # Check the response - one data and one status message_list = self.context.Drain().job self.assertEqual(message_list[0].session_id, self.session_id) self.assertEqual(message_list[0].response_id, 1) status = rdf_flows.GrrStatus(message_list[0].payload) self.assertIn("RuntimeError", status.error_message) self.assertNotEqual(status.status, rdf_flows.GrrStatus.ReturnedStatus.OK)
def testReceiveMessagesWithStatus(self): """Receiving a sequence of messages with a status.""" client_id = test_lib.TEST_CLIENT_ID flow_obj = self.FlowSetup( flow_test_lib.FlowOrderTest.__name__, client_id=client_id) session_id = flow_obj.session_id messages = [ rdf_flows.GrrMessage( request_id=1, response_id=i, session_id=session_id, payload=rdfvalue.RDFInteger(i), task_id=15) for i in range(1, 10) ] # Now add the status message status = rdf_flows.GrrStatus(status=rdf_flows.GrrStatus.ReturnedStatus.OK) messages.append( rdf_flows.GrrMessage( request_id=1, response_id=len(messages) + 1, task_id=15, session_id=messages[0].session_id, payload=status, type=rdf_flows.GrrMessage.Type.STATUS)) self.server.ReceiveMessages(client_id, messages) # Make sure the task is still on the client queue manager = queue_manager.QueueManager(token=self.token) tasks_on_client_queue = manager.Query(client_id, 100) self.assertEqual(len(tasks_on_client_queue), 1) stored_messages = data_store.DB.ReadResponsesForRequestId(session_id, 1) self.assertEqual(len(stored_messages), len(messages)) stored_messages.sort(key=lambda m: m.response_id) # Check that messages were stored correctly for stored_message, message in zip(stored_messages, messages): # We don't care about the last queueing time. stored_message.timestamp = None self.assertRDFValuesEqual(stored_message, message)
def QueueMessages(self, messages): """Queue a message from the server for processing. We maintain all the incoming messages in a queue. These messages are consumed until the outgoing queue fills to the allowable level. This mechanism allows us to throttle the server messages and limit the size of the outgoing queue on the client. Note that we can only limit processing of single request messages so if a single request message generates huge amounts of response messages we will still overflow the output queue. Therefore actions must be written in such a way that each request generates a limited and known maximum number and size of responses. (e.g. do not write a single client action to fetch the entire disk). Args: messages: List of parsed protobuf arriving from the server. """ # Push all the messages to our input queue for message in messages: self._in_queue.append(message) stats.STATS.IncrementCounter("grr_client_received_messages") # As long as our output queue has some room we can process some # input messages: while self._in_queue and ( self._out_queue_size < config_lib.CONFIG["Client.max_out_queue"]): message = self._in_queue.pop(0) try: self.HandleMessage(message) # Catch any errors and keep going here except Exception as e: # pylint: disable=broad-except self.SendReply( rdf_flows.GrrStatus( status=rdf_flows.GrrStatus.ReturnedStatus.GENERIC_ERROR, error_message=utils.SmartUnicode(e)), request_id=message.request_id, response_id=message.response_id, session_id=message.session_id, task_id=message.task_id, message_type=rdf_flows.GrrMessage.Type.STATUS) if flags.FLAGS.debug: pdb.post_mortem()
def _ScheduleResponseAndStatus(self, client_id, flow_id): with queue_manager.QueueManager(token=self.token) as flow_manager: # Schedule a response. flow_manager.QueueResponse( rdf_flows.GrrMessage( source=client_id, session_id=flow_id, payload=rdf_protodict.DataBlob(string="Helllo"), request_id=1, response_id=1)) # And a STATUS message. flow_manager.QueueResponse( rdf_flows.GrrMessage( source=client_id, session_id=flow_id, payload=rdf_flows.GrrStatus( status=rdf_flows.GrrStatus.ReturnedStatus.OK), request_id=1, response_id=2, type=rdf_flows.GrrMessage.Type.STATUS))
def StartClients(cls, hunt_id, client_ids, token=None): """This method is called by the foreman for each client it discovers. Note that this function is performance sensitive since it is called by the foreman for every client which needs to be scheduled. Args: hunt_id: The hunt to schedule. client_ids: List of clients that should be added to the hunt. token: An optional access token to use. """ token = token or access_control.ACLToken(username="******", reason="hunting") with queue_manager.QueueManager(token=token) as flow_manager: for client_id in client_ids: # Now we construct a special response which will be sent to the hunt # flow. Randomize the request_id so we do not overwrite other messages # in the queue. state = rdf_flows.RequestState(id=utils.PRNG.GetULong(), session_id=hunt_id, client_id=client_id, next_state="AddClient") # Queue the new request. flow_manager.QueueRequest(hunt_id, state) # Send a response. msg = rdf_flows.GrrMessage( session_id=hunt_id, request_id=state.id, response_id=1, auth_state=rdf_flows.GrrMessage.AuthorizationState. AUTHENTICATED, type=rdf_flows.GrrMessage.Type.STATUS, payload=rdf_flows.GrrStatus()) flow_manager.QueueResponse(hunt_id, msg) # And notify the worker about it. flow_manager.QueueNotification(session_id=hunt_id)
def testReceiveMessagesWithStatus(self): """Receiving a sequence of messages with a status.""" flow_obj = self.FlowSetup("FlowOrderTest") session_id = flow_obj.session_id messages = [ rdf_flows.GrrMessage(request_id=1, response_id=i, session_id=session_id, payload=rdfvalue.RDFInteger(i), task_id=15) for i in range(1, 10) ] # Now add the status message status = rdf_flows.GrrStatus( status=rdf_flows.GrrStatus.ReturnedStatus.OK) messages.append( rdf_flows.GrrMessage(request_id=1, response_id=len(messages) + 1, task_id=15, session_id=messages[0].session_id, payload=status, type=rdf_flows.GrrMessage.Type.STATUS)) self.server.ReceiveMessages(self.client_id, messages) # Make sure the task is still on the client queue manager = queue_manager.QueueManager(token=self.token) tasks_on_client_queue = manager.Query(self.client_id, 100) self.assertEqual(len(tasks_on_client_queue), 1) # Check that messages were stored correctly for message in messages: stored_message, _ = data_store.DB.Resolve( session_id.Add("state/request:00000001"), manager.FLOW_RESPONSE_TEMPLATE % (1, message.response_id), token=self.token) stored_message = rdf_flows.GrrMessage.FromSerializedString( stored_message) self.assertRDFValuesEqual(stored_message, message)
def testNoValidStatusRaceIsResolved(self): # This tests for the regression of a long standing race condition we saw # where notifications would trigger the reading of another request that # arrives later but wasn't completely written to the database yet. # Timestamp based notification handling should eliminate this bug. # We need a random flow object for this test. session_id = flow.GRRFlow.StartFlow(client_id=self.client_id, flow_name="WorkerSendingTestFlow", token=self.token) worker_obj = worker.GRRWorker(token=self.token) manager = queue_manager.QueueManager(token=self.token) manager.DeleteNotification(session_id) manager.Flush() # We have a first request that is complete (request_id 1, response_id 1). self.SendResponse(session_id, "Response 1") # However, we also have request #2 already coming in. The race is that # the queue manager might write the status notification to # session_id/state as "status:00000002" but not the status response # itself yet under session_id/state/request:00000002 request_id = 2 response_id = 1 flow_manager = queue_manager.QueueManager(token=self.token) flow_manager.FreezeTimestamp() flow_manager.QueueResponse( rdf_flows.GrrMessage( source=self.client_id, session_id=session_id, payload=rdf_protodict.DataBlob(string="Response 2"), request_id=request_id, response_id=response_id)) status = rdf_flows.GrrMessage( source=self.client_id, session_id=session_id, payload=rdf_flows.GrrStatus( status=rdf_flows.GrrStatus.ReturnedStatus.OK), request_id=request_id, response_id=response_id + 1, type=rdf_flows.GrrMessage.Type.STATUS) # Now we write half the status information. data_store.DB.StoreRequestsAndResponses(new_responses=[(status, None)]) # We make the race even a bit harder by saying the new notification gets # written right before the old one gets deleted. If we are not careful here, # we delete the new notification as well and the flow becomes stuck. def WriteNotification(self, arg_session_id, start=None, end=None): if arg_session_id == session_id: flow_manager.QueueNotification(session_id=arg_session_id) flow_manager.Flush() self.DeleteNotification.old_target(self, arg_session_id, start=start, end=end) with utils.Stubber(queue_manager.QueueManager, "DeleteNotification", WriteNotification): # This should process request 1 but not touch request 2. worker_obj.RunOnce() worker_obj.thread_pool.Join() flow_obj = aff4.FACTORY.Open(session_id, token=self.token) self.assertFalse(flow_obj.context.backtrace) self.assertNotEqual(flow_obj.context.state, rdf_flows.FlowContext.State.ERROR) request_data = data_store.DB.ReadResponsesForRequestId(session_id, 2) request_data.sort(key=lambda msg: msg.response_id) self.assertEqual(len(request_data), 2) # Make sure the status and the original request are still there. self.assertEqual(request_data[0].args_rdf_name, "DataBlob") self.assertEqual(request_data[1].args_rdf_name, "GrrStatus") # But there is nothing for request 1. request_data = data_store.DB.ReadResponsesForRequestId(session_id, 1) self.assertEqual(request_data, []) # The notification for request 2 should have survived. with queue_manager.QueueManager(token=self.token) as manager: notifications = manager.GetNotifications(queues.FLOWS) self.assertEqual(len(notifications), 1) notification = notifications[0] self.assertEqual(notification.session_id, session_id) self.assertEqual(notification.timestamp, flow_manager.frozen_timestamp) self.assertEqual(RESULTS, ["Response 1"]) # The last missing piece of request 2 is the actual status message. flow_manager.QueueResponse(status) flow_manager.Flush() # Now make sure request 2 runs as expected. worker_obj.RunOnce() worker_obj.thread_pool.Join() self.assertEqual(RESULTS, ["Response 1", "Response 2"])