Exemple #1
0
    def DownloadFiles(self, responses):
        if not responses.success:
            self.Log("Failed to run ArtifactCollectorFlow: %s",
                     responses.status)
            return

        results_with_pathspecs = []
        results_without_pathspecs = []
        for response in responses:
            pathspecs = self.FindMatchingPathspecs(response)
            if pathspecs:
                for pathspec in pathspecs:
                    result = ArtifactFilesDownloaderResult(
                        original_result_type=response.__class__.__name__,
                        original_result=response,
                        found_pathspec=pathspec)
                    results_with_pathspecs.append(result)
            else:
                result = ArtifactFilesDownloaderResult(
                    original_result_type=response.__class__.__name__,
                    original_result=response)
                results_without_pathspecs.append(result)

        grouped_results = utils.GroupBy(results_with_pathspecs,
                                        lambda x: x.found_pathspec)
        for pathspec, group in grouped_results.items():
            self.StartFileFetch(pathspec, request_data=dict(results=group))

        for result in results_without_pathspecs:
            self.SendReply(result)
Exemple #2
0
  def MultiNotifyQueue(self, notifications, mutation_pool=None):
    """This is the same as NotifyQueue but for several session_ids at once.

    Args:
      notifications: A list of notifications.
      mutation_pool: A MutationPool object to schedule Notifications on.

    Raises:
      RuntimeError: An invalid session_id was passed.
    """
    extract_queue = lambda notification: notification.session_id.Queue()
    for queue, notifications in utils.GroupBy(notifications,
                                              extract_queue).iteritems():
      self._MultiNotifyQueue(queue, notifications, mutation_pool=mutation_pool)
Exemple #3
0
  def Schedule(self, tasks, mutation_pool, timestamp=None):
    """Schedule a set of Task() instances."""
    non_fleetspeak_tasks = []
    for queue, queued_tasks in utils.GroupBy(tasks,
                                             lambda x: x.queue).iteritems():
      if not queue:
        continue

      client_id = _GetClientIdFromQueue(queue)
      if fleetspeak_utils.IsFleetspeakEnabledClient(
          client_id, token=self.token):
        for task in queued_tasks:
          fleetspeak_utils.SendGrrMessageThroughFleetspeak(client_id, task)
        continue
      non_fleetspeak_tasks.extend(queued_tasks)

    if data_store.RelationalDBReadEnabled(category="client_messages"):
      data_store.REL_DB.WriteClientMessages(non_fleetspeak_tasks)
    else:
      timestamp = timestamp or self.frozen_timestamp
      mutation_pool.QueueScheduleTasks(non_fleetspeak_tasks, timestamp)
Exemple #4
0
  def DeleteNotifications(self, session_ids, start=None, end=None):
    """This deletes the notification when all messages have been processed."""
    if not session_ids:
      return

    for session_id in session_ids:
      if not isinstance(session_id, rdfvalue.SessionID):
        raise RuntimeError(
            "Can only delete notifications for rdfvalue.SessionIDs.")

    if start is None:
      start = 0
    else:
      start = int(start)

    if end is None:
      end = self.frozen_timestamp or rdfvalue.RDFDatetime.Now()

    for queue, ids in utils.GroupBy(
        session_ids, lambda session_id: session_id.Queue()).iteritems():
      queue_shards = self.GetAllNotificationShards(queue)
      self.data_store.DeleteNotifications(queue_shards, ids, start, end)
Exemple #5
0
  def _ProcessMessageHandlerRequests(self, requests):
    """Processes message handler requests."""
    logging.debug("Leased message handler request ids: %s", ",".join(
        str(r.request_id) for r in requests))
    grouped_requests = utils.GroupBy(requests, lambda r: r.handler_name)
    for handler_name, requests_for_handler in grouped_requests.items():
      handler_cls = handler_registry.handler_name_map.get(handler_name)
      if not handler_cls:
        logging.error("Unknown message handler: %s", handler_name)
        continue

      try:
        logging.debug("Running %d messages for handler %s",
                      len(requests_for_handler), handler_name)
        handler_cls(token=self.token).ProcessMessages(requests_for_handler)
      except Exception:  # pylint: disable=broad-except
        logging.exception("Exception while processing message handler %s",
                          handler_name)

    logging.debug("Deleting message handler request ids: %s", ",".join(
        str(r.request_id) for r in requests))
    data_store.REL_DB.DeleteMessageHandlerRequests(requests)
Exemple #6
0
  def _ProcessMessageHandlerRequests(self):
    """Processes message handler requests."""

    if not data_store.RelationalDBReadEnabled(category="message_handlers"):
      return 0

    now = rdfvalue.RDFDatetime.Now()
    if now - self.last_mh_lease_attempt < self.MH_LEASE_INTERVAL:
      return 0

    self.last_mh_lease_attempt = now

    requests = data_store.REL_DB.LeaseMessageHandlerRequests(
        lease_time=self.well_known_flow_lease_time, limit=1000)
    if not requests:
      return 0

    logging.debug("Leased message handler request ids: %s", ",".join(
        str(r.request_id) for r in requests))
    grouped_requests = utils.GroupBy(requests, lambda r: r.handler_name)
    for handler_name, requests_for_handler in grouped_requests.items():
      handler_cls = handler_registry.handler_name_map.get(handler_name)
      if not handler_cls:
        logging.error("Unknown message handler: %s", handler_name)
        continue

      try:
        logging.debug("Running %d messages for handler %s",
                      len(requests_for_handler), handler_name)
        handler_cls(token=self.token).ProcessMessages(requests_for_handler)
      except Exception:  # pylint: disable=broad-except
        logging.exception("Exception while processing message handler %s",
                          handler_name)

    logging.debug("Deleting message handler request ids: %s", ",".join(
        str(r.request_id) for r in requests))
    data_store.REL_DB.DeleteMessageHandlerRequests(requests)
    return len(requests)
Exemple #7
0
    def ReceiveMessages(self, client_id, messages):
        """Receives and processes the messages from the source.

    For each message we update the request object, and place the
    response in that request's queue. If the request is complete, we
    send a message to the worker.

    Args:
      client_id: The client which sent the messages.
      messages: A list of GrrMessage RDFValues.
    """
        now = time.time()
        with queue_manager.QueueManager(token=self.token) as manager:
            for session_id, msgs in utils.GroupBy(
                    messages, operator.attrgetter("session_id")).iteritems():

                # Remove and handle messages to WellKnownFlows
                leftover_msgs = self.HandleWellKnownFlows(msgs)

                unprocessed_msgs = []
                for msg in leftover_msgs:
                    if (msg.auth_state == msg.AuthorizationState.AUTHENTICATED
                            or msg.session_id
                            == self.unauth_allowed_session_id):
                        unprocessed_msgs.append(msg)

                if len(unprocessed_msgs) < len(leftover_msgs):
                    logging.info("Dropped %d unauthenticated messages for %s",
                                 len(leftover_msgs) - len(unprocessed_msgs),
                                 client_id)

                if not unprocessed_msgs:
                    continue

                for msg in unprocessed_msgs:
                    manager.QueueResponse(msg)

                for msg in unprocessed_msgs:
                    # Messages for well known flows should notify even though they don't
                    # have a status.
                    if msg.request_id == 0:
                        manager.QueueNotification(session_id=msg.session_id,
                                                  priority=msg.priority)
                        # Those messages are all the same, one notification is enough.
                        break
                    elif msg.type == rdf_flows.GrrMessage.Type.STATUS:
                        # If we receive a status message from the client it means the client
                        # has finished processing this request. We therefore can de-queue it
                        # from the client queue. msg.task_id will raise if the task id is
                        # not set (message originated at the client, there was no request on
                        # the server), so we have to check .HasTaskID() first.
                        if msg.HasTaskID():
                            manager.DeQueueClientRequest(msg)

                        manager.QueueNotification(session_id=msg.session_id,
                                                  priority=msg.priority,
                                                  last_status=msg.request_id)

                        stat = rdf_flows.GrrStatus(msg.payload)
                        if stat.status == rdf_flows.GrrStatus.ReturnedStatus.CLIENT_KILLED:
                            # A client crashed while performing an action, fire an event.
                            crash_details = rdf_client.ClientCrash(
                                client_id=client_id,
                                session_id=session_id,
                                backtrace=stat.backtrace,
                                crash_message=stat.error_message,
                                nanny_status=stat.nanny_status,
                                timestamp=rdfvalue.RDFDatetime.Now())
                            events.Events.PublishEvent("ClientCrash",
                                                       crash_details,
                                                       token=self.token)

        logging.debug("Received %s messages from %s in %s sec", len(messages),
                      client_id,
                      time.time() - now)
Exemple #8
0
  def Flush(self):
    """Writes the changes in this object to the datastore."""

    if data_store.RelationalDBReadEnabled(category="message_handlers"):
      message_handler_requests = []
      leftover_responses = []

      for r, timestamp in self.response_queue:
        if r.request_id == 0 and r.session_id in session_id_map:
          message_handler_requests.append(
              rdf_objects.MessageHandlerRequest(
                  client_id=r.source and r.source.Basename(),
                  handler_name=session_id_map[r.session_id],
                  request_id=r.response_id,
                  request=r.payload))
        else:
          leftover_responses.append((r, timestamp))

      if message_handler_requests:
        data_store.REL_DB.WriteMessageHandlerRequests(message_handler_requests)
      self.response_queue = leftover_responses

    self.data_store.StoreRequestsAndResponses(
        new_requests=self.request_queue,
        new_responses=self.response_queue,
        requests_to_delete=self.requests_to_delete)

    # We need to make sure that notifications are written after the requests so
    # we flush after writing all requests and only notify afterwards.
    mutation_pool = self.data_store.GetMutationPool()
    with mutation_pool:

      if data_store.RelationalDBReadEnabled(category="client_messages"):
        if self.client_messages_to_delete:
          data_store.REL_DB.DeleteClientMessages(self.client_messages_to_delete)
      else:
        messages_by_queue = utils.GroupBy(self.client_messages_to_delete,
                                          lambda request: request.queue)
        for queue, messages in messages_by_queue.items():
          self.Delete(queue, messages, mutation_pool=mutation_pool)

      if self.new_client_messages:
        for timestamp, messages in utils.GroupBy(self.new_client_messages,
                                                 lambda x: x[1]).iteritems():

          self.Schedule(
              [x[0] for x in messages],
              timestamp=timestamp,
              mutation_pool=mutation_pool)

    if self.notifications:
      for notification in self.notifications.itervalues():
        self.NotifyQueue(notification, mutation_pool=mutation_pool)

      mutation_pool.Flush()

    self.request_queue = []
    self.response_queue = []
    self.requests_to_delete = []

    self.client_messages_to_delete = []
    self.notifications = {}
    self.new_client_messages = []