示例#1
0
    def testDeleteRequest(self):
        """Check that we can efficiently destroy a single flow request."""
        session_id = rdfvalue.SessionID(flow_name="test3")

        request = rdf_flow_runner.RequestState(
            id=1,
            client_id=test_lib.TEST_CLIENT_ID,
            next_state="TestState",
            session_id=session_id)

        with queue_manager.QueueManager(token=self.token) as manager:
            manager.QueueRequest(request)
            manager.QueueResponse(
                rdf_flows.GrrMessage(session_id=session_id,
                                     request_id=1,
                                     response_id=1))

        # Check the request and responses are there.
        all_requests = list(manager.FetchRequestsAndResponses(session_id))
        self.assertEqual(len(all_requests), 1)
        self.assertEqual(all_requests[0][0], request)

        with queue_manager.QueueManager(token=self.token) as manager:
            manager.DeleteRequest(request)

        all_requests = list(manager.FetchRequestsAndResponses(session_id))
        self.assertEqual(len(all_requests), 0)
示例#2
0
文件: flow.py 项目: megatronGA/grr
    def Handle(self, args, token=None):
        client_id = args.client_id.ToString()
        requests_and_responses = data_store.REL_DB.ReadAllFlowRequestsAndResponses(
            client_id, str(args.flow_id))

        result = ApiListFlowRequestsResult()
        stop = None
        if args.count:
            stop = args.offset + args.count

        for request, response_dict in itertools.islice(requests_and_responses,
                                                       args.offset, stop):
            request_state = rdf_flow_runner.RequestState(
                client_id=client_id,
                id=request.request_id,
                next_state=request.next_state,
                session_id="{}/flows/{}".format(client_id,
                                                str(request.flow_id)))
            api_request = ApiFlowRequest(request_id=str(request.request_id),
                                         request_state=request_state)

            if response_dict:
                responses = [
                    response_dict[i].AsLegacyGrrMessage()
                    for i in sorted(response_dict)
                ]
                for r in responses:
                    r.ClearPayload()

                api_request.responses = responses

            result.items.append(api_request)

        return result
示例#3
0
    def testCountsActualNumberOfCompletedResponsesWhenApplyingTheLimit(self):
        session_id = rdfvalue.SessionID(flow_name="test")

        # Now queue more requests and responses:
        with queue_manager.QueueManager(token=self.token) as manager:
            # Start with request 1 - leave request 1 un-responded to.
            for request_id in range(5):
                request = rdf_flow_runner.RequestState(
                    id=request_id,
                    client_id=test_lib.TEST_CLIENT_ID,
                    next_state="TestState",
                    session_id=session_id)

                manager.QueueRequest(request)

                # Don't queue any actual responses, just a status message with a
                # fake response_id.
                manager.QueueResponse(
                    rdf_flows.GrrMessage(
                        session_id=session_id,
                        request_id=request_id,
                        response_id=1000,
                        type=rdf_flows.GrrMessage.Type.STATUS))

        # Check that even though status message for every request indicates 1000
        # responses, only the actual response count is used to apply the limit
        # when FetchCompletedResponses is called.
        completed_response = list(
            manager.FetchCompletedResponses(session_id, limit=5))
        self.assertEqual(len(completed_response), 5)
        for i, (request, responses) in enumerate(completed_response):
            self.assertEqual(request.id, i)
            # Responses contain just the status message.
            self.assertEqual(len(responses), 1)
示例#4
0
    def _HandleRelational(self, args):
        requests_and_responses = data_store.REL_DB.ReadAllFlowRequestsAndResponses(
            unicode(args.client_id), unicode(args.flow_id))

        result = ApiListFlowRequestsResult()
        stop = None
        if args.count:
            stop = args.offset + args.count

        for request, response_dict in itertools.islice(requests_and_responses,
                                                       args.offset, stop):
            client_urn = args.client_id.ToClientURN()
            request_state = rdf_flow_runner.RequestState(
                client_id=client_urn,
                id=request.request_id,
                next_state=request.next_state,
                session_id=client_urn.Add("flows").Add(unicode(
                    request.flow_id)))
            api_request = ApiFlowRequest(request_id=str(request.request_id),
                                         request_state=request_state)

            if response_dict:
                responses = [
                    response_dict[i].AsLegacyGrrMessage()
                    for i in sorted(response_dict)
                ]
                for r in responses:
                    r.ClearPayload()

                api_request.responses = responses

            result.items.append(api_request)

        return result
示例#5
0
    def testDestroyFlowStates(self):
        """Check that we can efficiently destroy the flow's request queues."""
        session_id = rdfvalue.SessionID(flow_name="test2")

        request = rdf_flow_runner.RequestState(
            id=1,
            client_id=test_lib.TEST_CLIENT_ID,
            next_state="TestState",
            session_id=session_id)

        with queue_manager.QueueManager(token=self.token) as manager:
            manager.QueueRequest(request)
            manager.QueueResponse(
                rdf_flows.GrrMessage(request_id=1,
                                     response_id=1,
                                     session_id=session_id))

        # Check the request and responses are there.
        all_requests = list(manager.FetchRequestsAndResponses(session_id))
        self.assertEqual(len(all_requests), 1)
        self.assertEqual(all_requests[0][0], request)

        # Read the response directly.
        responses = data_store.DB.ReadResponsesForRequestId(session_id, 1)
        self.assertEqual(len(responses), 1)
        response = responses[0]
        self.assertEqual(response.request_id, 1)
        self.assertEqual(response.response_id, 1)
        self.assertEqual(response.session_id, session_id)

        with queue_manager.QueueManager(token=self.token) as manager:
            manager.DestroyFlowStates(session_id)

        all_requests = list(manager.FetchRequestsAndResponses(session_id))
        self.assertEqual(len(all_requests), 0)

        # Check that the response is gone.
        responses = data_store.DB.ReadResponsesForRequestId(session_id, 1)
        self.assertEqual(len(responses), 0)

        # Ensure the rows are gone from the data store. Some data stores
        # don't store the queues in that way but there is no harm in
        # checking.
        self.assertEqual(
            data_store.DB.ResolveRow(session_id.Add("state/request:00000001")),
            [])

        self.assertEqual(data_store.DB.ResolveRow(session_id.Add("state")), [])
示例#6
0
    def CallState(self, next_state="", start_time=None):
        """This method is used to schedule a new state on a different worker.

    This is basically the same as CallFlow() except we are calling
    ourselves. The state will be invoked in a later time and receive all the
    messages we send.

    Args:
       next_state: The state in this flow to be invoked with the responses.

       start_time: Start the flow at this time. This Delays notification for
         flow processing into the future. Note that the flow may still be
         processed earlier if there are client responses waiting.

    Raises:
       FlowRunnerError: if the next state is not valid.
    """
        # Check if the state is valid
        if not getattr(self.flow_obj, next_state):
            raise FlowRunnerError("Next state %s is invalid.")

        # Queue the response message to the parent flow
        request_state = rdf_flow_runner.RequestState(
            id=self.GetNextOutboundId(),
            session_id=self.context.session_id,
            client_id=self.runner_args.client_id,
            next_state=next_state)

        self.QueueRequest(request_state, timestamp=start_time)

        # Send a fake reply.
        msg = rdf_flows.GrrMessage(
            session_id=self.session_id,
            request_id=request_state.id,
            response_id=1,
            auth_state=rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED,
            payload=rdf_flows.GrrStatus(),
            type=rdf_flows.GrrMessage.Type.STATUS)
        self.QueueResponse(msg, start_time)

        # Notify the worker about it.
        self.QueueNotification(session_id=self.session_id,
                               timestamp=start_time)
示例#7
0
  def CallFlow(self,
               flow_name=None,
               next_state=None,
               request_data=None,
               client_id=None,
               base_session_id=None,
               **kwargs):
    """Creates a new flow and send its responses to a state.

    This creates a new flow. The flow may send back many responses which will be
    queued by the framework until the flow terminates. The final status message
    will cause the entire transaction to be committed to the specified state.

    Args:
       flow_name: The name of the flow to invoke.
       next_state: The state in this flow, that responses to this message should
         go to.
       request_data: Any dict provided here will be available in the
         RequestState protobuf. The Responses object maintains a reference to
         this protobuf for use in the execution of the state method. (so you can
         access this data by responses.request). There is no format mandated on
         this data but it may be a serialized protobuf.
       client_id: If given, the flow is started for this client.
       base_session_id: A URN which will be used to build a URN.
       **kwargs: Arguments for the child flow.

    Raises:
       FlowRunnerError: If next_state is not one of the allowed next states.

    Returns:
       The URN of the child flow which was created.
    """
    client_id = client_id or self.runner_args.client_id

    # This looks very much like CallClient() above - we prepare a request state,
    # and add it to our queue - any responses from the child flow will return to
    # the request state and the stated next_state. Note however, that there is
    # no client_id or actual request message here because we directly invoke the
    # child flow rather than queue anything for it.
    state = rdf_flow_runner.RequestState(
        id=self.GetNextOutboundId(),
        session_id=utils.SmartUnicode(self.session_id),
        client_id=client_id,
        next_state=next_state,
        response_count=0)

    if request_data:
      state.data = rdf_protodict.Dict().FromDict(request_data)

    # If the urn is passed explicitly (e.g. from the hunt runner) use that,
    # otherwise use the urn from the flow_runner args. If both are None, create
    # a new collection and give the urn to the flow object.
    logs_urn = self._GetLogCollectionURN(
        kwargs.pop("logs_collection_urn", None) or
        self.runner_args.logs_collection_urn)

    # If we were called with write_intermediate_results, propagate down to
    # child flows.  This allows write_intermediate_results to be set to True
    # either at the top level parent, or somewhere in the middle of
    # the call chain.
    write_intermediate = (
        kwargs.pop("write_intermediate_results", False) or
        self.runner_args.write_intermediate_results)

    # Create the new child flow but do not notify the user about it.
    child_urn = self.flow_obj.StartAFF4Flow(
        client_id=client_id,
        flow_name=flow_name,
        base_session_id=base_session_id or self.session_id,
        request_state=state,
        token=self.token,
        notify_to_user=False,
        parent_flow=self.flow_obj,
        queue=self.runner_args.queue,
        write_intermediate_results=write_intermediate,
        logs_collection_urn=logs_urn,
        sync=True,
        **kwargs)

    self.QueueRequest(state)

    return child_urn
示例#8
0
  def CallClient(self,
                 action_cls,
                 request=None,
                 next_state=None,
                 request_data=None,
                 **kwargs):
    """Calls the client asynchronously.

    This sends a message to the client to invoke an Action. The run
    action may send back many responses. These will be queued by the
    framework until a status message is sent by the client. The status
    message will cause the entire transaction to be committed to the
    specified state.

    Args:
       action_cls: The function to call on the client.
       request: The request to send to the client. If not specified (Or None) we
         create a new RDFValue using the kwargs.
       next_state: The state in this flow, that responses to this message should
         go to.
       request_data: A dict which will be available in the RequestState
         protobuf. The Responses object maintains a reference to this protobuf
         for use in the execution of the state method. (so you can access this
         data by responses.request). Valid values are strings, unicode and
         protobufs.
       **kwargs: These args will be used to construct the client action semantic
         protobuf.

    Raises:
       FlowRunnerError: If called on a flow that doesn't run on a single client.
       ValueError: The request passed to the client does not have the correct
                     type.
    """
    client_id = self.runner_args.client_id
    if client_id is None:
      raise FlowRunnerError("CallClient() is used on a flow which was not "
                            "started with a client.")

    if not isinstance(client_id, rdf_client.ClientURN):
      # Try turning it into a ClientURN
      client_id = rdf_client.ClientURN(client_id)

    if action_cls.in_rdfvalue is None:
      if request:
        raise ValueError(
            "Client action %s does not expect args." % action_cls.__name__)
    else:
      if request is None:
        # Create a new rdf request.
        request = action_cls.in_rdfvalue(**kwargs)
      else:
        # Verify that the request type matches the client action requirements.
        if not isinstance(request, action_cls.in_rdfvalue):
          raise ValueError("Client action expected %s but got %s" %
                           (action_cls.in_rdfvalue, type(request)))

    outbound_id = self.GetNextOutboundId()

    # Create a new request state
    state = rdf_flow_runner.RequestState(
        id=outbound_id,
        session_id=self.session_id,
        next_state=next_state,
        client_id=client_id)

    if request_data is not None:
      state.data = rdf_protodict.Dict(request_data)

    # Send the message with the request state
    msg = rdf_flows.GrrMessage(
        session_id=utils.SmartUnicode(self.session_id),
        name=action_cls.__name__,
        request_id=outbound_id,
        require_fastpoll=self.runner_args.require_fastpoll,
        queue=client_id.Queue(),
        payload=request,
        generate_task_id=True)

    cpu_usage = self.context.client_resources.cpu_usage
    if self.runner_args.cpu_limit:
      msg.cpu_limit = max(
          self.runner_args.cpu_limit - cpu_usage.user_cpu_time -
          cpu_usage.system_cpu_time, 0)

      if msg.cpu_limit == 0:
        raise FlowRunnerError("CPU limit exceeded.")

    if self.runner_args.network_bytes_limit:
      msg.network_bytes_limit = max(
          self.runner_args.network_bytes_limit -
          self.context.network_bytes_sent, 0)
      if msg.network_bytes_limit == 0:
        raise FlowRunnerError("Network limit exceeded.")

    state.request = msg
    self.QueueRequest(state)
示例#9
0
 def testEmbeddedDict(self):
   state = rdf_flow_runner.RequestState(data=rdf_protodict.Dict({"a": 1}))
   serialized = state.SerializeToBytes()
   deserialized = rdf_flow_runner.RequestState.FromSerializedBytes(serialized)
   self.assertEqual(deserialized.data, state.data)
示例#10
0
文件: flow_runner.py 项目: slad99/grr
    def CallState(self,
                  messages=None,
                  next_state="",
                  request_data=None,
                  start_time=None):
        """This method is used to schedule a new state on a different worker.

    This is basically the same as CallFlow() except we are calling
    ourselves. The state will be invoked in a later time and receive all the
    messages we send.

    Args:
       messages: A list of rdfvalues to send. If the last one is not a
            GrrStatus, we append an OK Status.

       next_state: The state in this flow to be invoked with the responses.

       request_data: Any dict provided here will be available in the
             RequestState protobuf. The Responses object maintains a reference
             to this protobuf for use in the execution of the state method. (so
             you can access this data by responses.request).

       start_time: Start the flow at this time. This Delays notification for
         flow processing into the future. Note that the flow may still be
         processed earlier if there are client responses waiting.

    Raises:
       FlowRunnerError: if the next state is not valid.
    """
        if messages is None:
            messages = []

        # Check if the state is valid
        if not getattr(self.flow_obj, next_state):
            raise FlowRunnerError("Next state %s is invalid.")

        # Queue the response message to the parent flow
        request_state = rdf_flow_runner.RequestState(
            id=self.GetNextOutboundId(),
            session_id=self.context.session_id,
            client_id=self.runner_args.client_id,
            next_state=next_state)
        if request_data:
            request_state.data = rdf_protodict.Dict().FromDict(request_data)

        self.QueueRequest(request_state, timestamp=start_time)

        # Add the status message if needed.
        if not messages or not isinstance(messages[-1], rdf_flows.GrrStatus):
            messages.append(rdf_flows.GrrStatus())

        # Send all the messages
        for i, payload in enumerate(messages):
            if isinstance(payload, rdfvalue.RDFValue):
                msg = rdf_flows.GrrMessage(
                    session_id=self.session_id,
                    request_id=request_state.id,
                    response_id=1 + i,
                    auth_state=rdf_flows.GrrMessage.AuthorizationState.
                    AUTHENTICATED,
                    payload=payload,
                    type=rdf_flows.GrrMessage.Type.MESSAGE)

                if isinstance(payload, rdf_flows.GrrStatus):
                    msg.type = rdf_flows.GrrMessage.Type.STATUS
            else:
                raise FlowRunnerError("Bad message %s of type %s." %
                                      (payload, type(payload)))

            self.QueueResponse(msg, start_time)

        # Notify the worker about it.
        self.QueueNotification(session_id=self.session_id,
                               timestamp=start_time)