Exemple #1
0
  def Next(self):
    """Emulates execution of a single ClientActionRequest.

    Returns:
       True iff a ClientActionRequest was found for the client.
    """
    if self._is_fleetspeak_client:
      next_task = fleetspeak_test_lib.PopMessage(self.client_id)
      if next_task is None:
        return False
    else:
      request = data_store.REL_DB.LeaseClientActionRequests(
          self.client_id,
          lease_time=rdfvalue.Duration.From(10000, rdfvalue.SECONDS),
          limit=1)
      try:
        next_task = rdf_flow_objects.GRRMessageFromClientActionRequest(
            request[0])
      except IndexError:
        return False

    try:
      responses = self.client_mock.HandleMessage(next_task)
    except Exception as e:  # pylint: disable=broad-except
      logging.exception("Error %s occurred in client", e)
      responses = [
          self.client_mock.GenerateStatusMessage(
              next_task, 1, status="GENERIC_ERROR")
      ]

    # Now insert those on the flow state queue
    for response in responses:
      self.PushToStateQueue(response)

    return True
Exemple #2
0
  def Next(self):
    """Grab tasks for us from the server's queue."""
    request_tasks = data_store.REL_DB.LeaseClientActionRequests(
        self.client_id, lease_time=rdfvalue.DurationSeconds("10000s"), limit=1)
    request_tasks = [
        rdf_flow_objects.GRRMessageFromClientActionRequest(r)
        for r in request_tasks
    ]

    request_tasks.extend(self._mock_task_queue)
    self._mock_task_queue[:] = []  # Clear the referenced list.

    for message in request_tasks:
      try:
        responses = self.client_mock.HandleMessage(message)
        logging.info("Called client action %s generating %s responses",
                     message.name,
                     len(responses) + 1)
      except Exception as e:  # pylint: disable=broad-except
        logging.exception("Error %s occurred in client", e)
        responses = [
            self.client_mock.GenerateStatusMessage(
                message, 1, status="GENERIC_ERROR")
        ]

      # Now insert those on the flow state queue
      for response in responses:
        self.PushToStateQueue(response)

    return len(request_tasks)
Exemple #3
0
    def DrainTaskSchedulerQueueForClient(self, client, max_count=None):
        """Drains the client's Task Scheduler queue.

    1) Get all messages in the client queue.
    2) Sort these into a set of session_ids.
    3) Use data_store.DB.ResolvePrefix() to query all requests.
    4) Delete all responses for retransmitted messages (if needed).

    Args:
       client: The ClientURN object specifying this client.
       max_count: The maximum number of messages we will issue for the client.
         If not given, uses self.max_queue_size .

    Returns:
       The tasks respresenting the messages returned. If we can not send them,
       we can reschedule them for later.
    """
        if max_count is None:
            max_count = self.max_queue_size

        if max_count <= 0:
            return []

        client = rdf_client.ClientURN(client)
        start_time = time.time()
        # Drain the queue for this client
        action_requests = data_store.REL_DB.LeaseClientActionRequests(
            client.Basename(),
            lease_time=rdfvalue.DurationSeconds.FromSeconds(
                self.message_expiry_time),
            limit=max_count)
        result = [
            rdf_flow_objects.GRRMessageFromClientActionRequest(r)
            for r in action_requests
        ]

        stats_collector_instance.Get().IncrementCounter(
            "grr_messages_sent", len(result))
        if result:
            logging.debug("Drained %d messages for %s in %s seconds.",
                          len(result), client,
                          time.time() - start_time)

        return result
Exemple #4
0
    def Next(self):
        """Grab tasks for us from the server's queue."""
        with queue_manager.QueueManager(token=self.token) as manager:
            if data_store.RelationalDBEnabled():
                request_tasks = data_store.REL_DB.LeaseClientActionRequests(
                    self.client_id.Basename(),
                    lease_time=rdfvalue.Duration("10000s"),
                    limit=1)
                request_tasks = [
                    rdf_flow_objects.GRRMessageFromClientActionRequest(r)
                    for r in request_tasks
                ]
            else:
                request_tasks = manager.QueryAndOwn(self.client_id.Queue(),
                                                    limit=1,
                                                    lease_seconds=10000)

            request_tasks.extend(self._mock_task_queue)
            self._mock_task_queue[:] = []  # Clear the referenced list.

            for message in request_tasks:
                try:
                    responses = self.client_mock.HandleMessage(message)
                    logging.info(
                        "Called client action %s generating %s responses",
                        message.name,
                        len(responses) + 1)
                except Exception as e:  # pylint: disable=broad-except
                    logging.exception("Error %s occurred in client", e)
                    responses = [
                        self.client_mock.GenerateStatusMessage(
                            message, 1, status="GENERIC_ERROR")
                    ]

                # Now insert those on the flow state queue
                for response in responses:
                    self.PushToStateQueue(manager, response)

                # Additionally schedule a task for the worker
                manager.QueueNotification(session_id=message.session_id)

            return len(request_tasks)
Exemple #5
0
    def FlushQueuedMessages(self) -> None:
        """Flushes queued messages."""
        # TODO(amoser): This could be done in a single db call, might be worth
        # optimizing.

        if self.flow_requests:
            data_store.REL_DB.WriteFlowRequests(self.flow_requests)
            self.flow_requests = []

        if self.flow_responses:
            data_store.REL_DB.WriteFlowResponses(self.flow_responses)
            self.flow_responses = []

        if self.client_action_requests:
            client_id = self.rdf_flow.client_id
            if fleetspeak_utils.IsFleetspeakEnabledClient(client_id):
                for request in self.client_action_requests:
                    msg = rdf_flow_objects.GRRMessageFromClientActionRequest(
                        request)
                    fleetspeak_utils.SendGrrMessageThroughFleetspeak(
                        client_id, msg)
            else:
                data_store.REL_DB.WriteClientActionRequests(
                    self.client_action_requests)

            self.client_action_requests = []

        if self.completed_requests:
            data_store.REL_DB.DeleteFlowRequests(self.completed_requests)
            self.completed_requests = []

        if self.replies_to_write:
            # For top-level hunt-induced flows, write results to the hunt collection.
            if self.rdf_flow.parent_hunt_id:
                data_store.REL_DB.WriteFlowResults(self.replies_to_write)
                hunt.StopHuntIfCPUOrNetworkLimitsExceeded(
                    self.rdf_flow.parent_hunt_id)
            else:
                # Write flow results to REL_DB, even if the flow is a nested flow.
                data_store.REL_DB.WriteFlowResults(self.replies_to_write)
            self.replies_to_write = []
Exemple #6
0
    def testDrainTaskSchedulerQueue(self):
        client_id = u"C.1234567890123456"
        flow_id = flow.RandomFlowId()
        data_store.REL_DB.WriteClientMetadata(client_id,
                                              fleetspeak_enabled=False)

        rdf_flow = rdf_flow_objects.Flow(
            client_id=client_id,
            flow_id=flow_id,
            create_time=rdfvalue.RDFDatetime.Now())
        data_store.REL_DB.WriteFlowObject(rdf_flow)

        action_requests = []
        for i in range(3):
            data_store.REL_DB.WriteFlowRequests([
                rdf_flow_objects.FlowRequest(client_id=client_id,
                                             flow_id=flow_id,
                                             request_id=i)
            ])

            action_requests.append(
                rdf_flows.ClientActionRequest(client_id=client_id,
                                              flow_id=flow_id,
                                              request_id=i,
                                              action_identifier="WmiQuery"))

        data_store.REL_DB.WriteClientActionRequests(action_requests)
        server = TestServer()

        res = server.DrainTaskSchedulerQueueForClient(
            rdfvalue.RDFURN(client_id))
        msgs = [
            rdf_flow_objects.GRRMessageFromClientActionRequest(r)
            for r in action_requests
        ]
        for r in res:
            r.task_id = 0
        for m in msgs:
            m.task_id = 0

        self.assertItemsEqual(res, msgs)
Exemple #7
0
    def FlushQueuedMessages(self):
        # TODO(amoser): This could be done in a single db call, might be worth
        # optimizing.

        if self.flow_requests:
            data_store.REL_DB.WriteFlowRequests(self.flow_requests)
            self.flow_requests = []

        if self.flow_responses:
            data_store.REL_DB.WriteFlowResponses(self.flow_responses)
            self.flow_responses = []

        if self.client_action_requests:
            client_id = self.rdf_flow.client_id
            if fleetspeak_utils.IsFleetspeakEnabledClient(client_id):
                for request in self.client_action_requests:
                    msg = rdf_flow_objects.GRRMessageFromClientActionRequest(
                        request)
                    fleetspeak_utils.SendGrrMessageThroughFleetspeak(
                        client_id, msg)
            else:
                data_store.REL_DB.WriteClientActionRequests(
                    self.client_action_requests)

            self.client_action_requests = []

        if self.completed_requests:
            data_store.REL_DB.DeleteFlowRequests(self.completed_requests)
            self.completed_requests = []

        if self.replies_to_write:
            # For top-level hunt-induced flows, write results to the hunt collection.
            if self.rdf_flow.parent_hunt_id and not self.rdf_flow.parent_flow_id:
                db_compat.WriteHuntResults(self.rdf_flow.client_id,
                                           self.rdf_flow.parent_hunt_id,
                                           self.replies_to_write)
            else:
                data_store.REL_DB.WriteFlowResults(self.replies_to_write)
            self.replies_to_write = []
Exemple #8
0
  def DrainTaskSchedulerQueueForClient(self, client, max_count=None):
    """Drains the client's Task Scheduler queue.

    Args:
       client: The client id specifying this client.
       max_count: The maximum number of messages we will issue for the client.
         If not given, uses self.max_queue_size .

    Returns:
       The tasks respresenting the messages returned. If we can not send them,
       we can reschedule them for later.
    """
    if max_count is None:
      max_count = self.max_queue_size

    if max_count <= 0:
      return []

    start_time = time.time()
    # Drain the queue for this client
    action_requests = data_store.REL_DB.LeaseClientActionRequests(
        client,
        lease_time=rdfvalue.DurationSeconds.FromSeconds(
            self.message_expiry_time),
        limit=max_count)
    result = [
        rdf_flow_objects.GRRMessageFromClientActionRequest(r)
        for r in action_requests
    ]

    stats_collector_instance.Get().IncrementCounter("grr_messages_sent",
                                                    len(result))
    if result:
      logging.debug("Drained %d messages for %s in %s seconds.", len(result),
                    client,
                    time.time() - start_time)

    return result
Exemple #9
0
    def DrainTaskSchedulerQueueForClient(self, client, max_count=None):
        """Drains the client's Task Scheduler queue.

    1) Get all messages in the client queue.
    2) Sort these into a set of session_ids.
    3) Use data_store.DB.ResolvePrefix() to query all requests.
    4) Delete all responses for retransmitted messages (if needed).

    Args:
       client: The ClientURN object specifying this client.
       max_count: The maximum number of messages we will issue for the client.
         If not given, uses self.max_queue_size .

    Returns:
       The tasks respresenting the messages returned. If we can not send them,
       we can reschedule them for later.
    """
        if max_count is None:
            max_count = self.max_queue_size

        if max_count <= 0:
            return []

        client = rdf_client.ClientURN(client)
        start_time = time.time()
        # Drain the queue for this client
        if data_store.RelationalDBEnabled():
            action_requests = data_store.REL_DB.LeaseClientActionRequests(
                client.Basename(),
                lease_time=rdfvalue.Duration.FromSeconds(
                    self.message_expiry_time),
                limit=max_count)
            result = [
                rdf_flow_objects.GRRMessageFromClientActionRequest(r)
                for r in action_requests
            ]
        else:
            new_tasks = queue_manager.QueueManager(
                token=self.token).QueryAndOwn(
                    queue=client.Queue(),
                    limit=max_count,
                    lease_seconds=self.message_expiry_time)

            initial_ttl = rdf_flows.GrrMessage().task_ttl
            check_before_sending = []
            result = []
            for task in new_tasks:
                if task.task_ttl < initial_ttl - 1:
                    # This message has been leased before.
                    check_before_sending.append(task)
                else:
                    result.append(task)

            if check_before_sending:
                with queue_manager.QueueManager(token=self.token) as manager:
                    status_found = manager.MultiCheckStatus(
                        check_before_sending)

                    # All messages that don't have a status yet should be sent again.
                    for task in check_before_sending:
                        if task not in status_found:
                            result.append(task)
                        else:
                            manager.DeQueueClientRequest(task)

        stats_collector_instance.Get().IncrementCounter(
            "grr_messages_sent", len(result))
        if result:
            logging.debug("Drained %d messages for %s in %s seconds.",
                          len(result), client,
                          time.time() - start_time)

        return result