예제 #1
0
  def testNotificationsAreDeletedFromAllShards(self):
    manager = queue_manager.QueueManager(token=self.token)
    manager.QueueNotification(
        session_id=rdfvalue.SessionID(
            base="aff4:/hunts", queue=queues.HUNTS, flow_name="42"))
    manager.Flush()
    manager.QueueNotification(
        session_id=rdfvalue.SessionID(
            base="aff4:/hunts", queue=queues.HUNTS, flow_name="43"))
    manager.Flush()
    # There should be two notifications in two different shards.
    shards_with_data = 0
    for _ in range(manager.num_notification_shards):
      shard_sessions = manager.GetNotifications(queues.HUNTS)
      if shard_sessions:
        shards_with_data += 1
        self.assertEqual(len(shard_sessions), 1)
    self.assertEqual(shards_with_data, 2)

    # This should still work, as we delete notifications from all shards.
    manager.DeleteNotification(
        rdfvalue.SessionID(
            base="aff4:/hunts", queue=queues.HUNTS, flow_name="43"))
    manager.DeleteNotification(
        rdfvalue.SessionID(
            base="aff4:/hunts", queue=queues.HUNTS, flow_name="42"))
    for _ in range(manager.num_notification_shards):
      shard_sessions = manager.GetNotifications(queues.HUNTS)
      self.assertFalse(shard_sessions)
예제 #2
0
  def testUsesFrozenTimestampWhenDeletingAndFetchingNotifications(self):
    # When used in "with" statement QueueManager uses the frozen timestamp
    # when fetching and deleting data. Test that if we have 2 managers
    # created at different times,  they will behave correctly when dealing
    # with notifications for the same session ids. I.e. older queue_manager
    # will only "see" it's own notification and younger queue_manager will
    # "see" both.
    with queue_manager.QueueManager(token=self.token) as manager1:
      manager1.QueueNotification(
          session_id=rdfvalue.SessionID(
              base="aff4:/hunts", queue=queues.HUNTS, flow_name="123456"))
      manager1.Flush()

      self._current_mock_time += 10
      with queue_manager.QueueManager(token=self.token) as manager2:
        manager2.QueueNotification(
            session_id=rdfvalue.SessionID(
                base="aff4:/hunts", queue=queues.HUNTS, flow_name="123456"))
        manager2.Flush()

        self.assertEqual(
            len(manager1.GetNotificationsForAllShards(queues.HUNTS)), 1)
        self.assertEqual(
            len(manager2.GetNotificationsForAllShards(queues.HUNTS)), 1)

        manager1.DeleteNotification(
            rdfvalue.SessionID(
                base="aff4:/hunts", queue=queues.HUNTS, flow_name="123456"))

        self.assertEqual(
            len(manager1.GetNotificationsForAllShards(queues.HUNTS)), 0)
        self.assertEqual(
            len(manager2.GetNotificationsForAllShards(queues.HUNTS)), 1)
예제 #3
0
    def Execute(self, action_cls, args):
        responses = list()

        def SendReply(value,
                      session_id=None,
                      message_type=rdf_flows.GrrMessage.Type.MESSAGE):
            if message_type != rdf_flows.GrrMessage.Type.MESSAGE:
                return

            if str(session_id) in self.wkfs:
                message = rdf_flows.GrrMessage(name=action_cls.__name__,
                                               payload=value,
                                               auth_state="AUTHENTICATED",
                                               session_id=session_id)
                self.wkfs[str(session_id)].ProcessMessage(message)
            else:
                responses.append(value)

        message = rdf_flows.GrrMessage(name=action_cls.__name__,
                                       payload=args,
                                       auth_state="AUTHENTICATED",
                                       session_id=rdfvalue.SessionID())

        action = action_cls(grr_worker=worker_mocks.FakeClientWorker())
        action.SendReply = SendReply
        action.Execute(message)

        return responses
예제 #4
0
class Foreman(flow.WellKnownFlow):
    """The foreman assigns new flows to clients based on their type.

  Clients periodically call the foreman flow to ask for new flows that might be
  scheduled for them based on their types. This allows the server to schedule
  flows for entire classes of machines based on certain criteria.
  """
    well_known_session_id = rdfvalue.SessionID(flow_name="Foreman")
    foreman_cache = None

    # How often we refresh the rule set from the data store.
    cache_refresh_time = 60

    lock = threading.Lock()

    def ProcessMessage(self, message):
        """Run the foreman on the client."""
        # Only accept authenticated messages
        if (message.auth_state !=
                rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED):
            return

        now = time.time()

        # Maintain a cache of the foreman
        with self.lock:
            if (self.foreman_cache is None
                    or now > self.foreman_cache.age + self.cache_refresh_time):
                self.foreman_cache = aff4.FACTORY.Open("aff4:/foreman",
                                                       mode="rw",
                                                       token=self.token)
                self.foreman_cache.age = now

        if message.source:
            self.foreman_cache.AssignTasksToClient(message.source.Basename())
예제 #5
0
  def testCountsActualNumberOfCompletedResponsesWhenApplyingTheLimit(self):
    session_id = rdfvalue.SessionID(flow_name="test")

    # Now queue more requests and responses:
    with queue_manager.QueueManager(token=self.token) as manager:
      # Start with request 1 - leave request 1 un-responded to.
      for request_id in range(5):
        request = rdf_flow_runner.RequestState(
            id=request_id,
            client_id=test_lib.TEST_CLIENT_ID,
            next_state="TestState",
            session_id=session_id)

        manager.QueueRequest(request)

        # Don't queue any actual responses, just a status message with a
        # fake response_id.
        manager.QueueResponse(
            rdf_flows.GrrMessage(
                session_id=session_id,
                request_id=request_id,
                response_id=1000,
                type=rdf_flows.GrrMessage.Type.STATUS))

    # Check that even though status message for every request indicates 1000
    # responses, only the actual response count is used to apply the limit
    # when FetchCompletedResponses is called.
    completed_response = list(
        manager.FetchCompletedResponses(session_id, limit=5))
    self.assertEqual(len(completed_response), 5)
    for i, (request, responses) in enumerate(completed_response):
      self.assertEqual(request.id, i)
      # Responses contain just the status message.
      self.assertEqual(len(responses), 1)
예제 #6
0
파일: standard.py 프로젝트: rainser/grr
    def Run(self, args):
        """Reads a buffer on the client and sends it to the server."""
        # Make sure we limit the size of our output
        if args.length > constants.CLIENT_MAX_BUFFER_SIZE:
            raise RuntimeError("Can not read buffers this large.")

        data = vfs.ReadVFS(args.pathspec,
                           args.offset,
                           args.length,
                           progress_callback=self.Progress)
        result = rdf_protodict.DataBlob(
            data=zlib.compress(data),
            compression=rdf_protodict.DataBlob.CompressionType.ZCOMPRESSION)

        digest = hashlib.sha256(data).digest()

        # Ensure that the buffer is counted against this response. Check network
        # send limit.
        self.ChargeBytesToSession(len(data))

        # Now return the data to the server into the special TransferStore well
        # known flow.
        self.grr_worker.SendReply(
            result, session_id=rdfvalue.SessionID(flow_name="TransferStore"))

        # Now report the hash of this blob to our flow as well as the offset and
        # length.
        self.SendReply(
            rdf_client.BufferReference(offset=args.offset,
                                       length=len(data),
                                       data=digest))
예제 #7
0
  def testDeleteRequest(self):
    """Check that we can efficiently destroy a single flow request."""
    session_id = rdfvalue.SessionID(flow_name="test3")

    request = rdf_flow_runner.RequestState(
        id=1,
        client_id=test_lib.TEST_CLIENT_ID,
        next_state="TestState",
        session_id=session_id)

    with queue_manager.QueueManager(token=self.token) as manager:
      manager.QueueRequest(request)
      manager.QueueResponse(
          rdf_flows.GrrMessage(
              session_id=session_id, request_id=1, response_id=1))

    # Check the request and responses are there.
    all_requests = list(manager.FetchRequestsAndResponses(session_id))
    self.assertEqual(len(all_requests), 1)
    self.assertEqual(all_requests[0][0], request)

    with queue_manager.QueueManager(token=self.token) as manager:
      manager.DeleteRequest(request)

    all_requests = list(manager.FetchRequestsAndResponses(session_id))
    self.assertEqual(len(all_requests), 0)
예제 #8
0
class TransferStore(flow.WellKnownFlow):
  """Store a buffer into a determined location."""
  well_known_session_id = rdfvalue.SessionID(flow_name="TransferStore")

  def ProcessMessages(self, msg_list):
    blobs = []
    for message in msg_list:
      if (message.auth_state !=
          rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED):
        logging.error("TransferStore request from %s is not authenticated.",
                      message.source)
        continue

      read_buffer = message.payload
      data = read_buffer.data
      if not data:
        continue

      if (read_buffer.compression ==
          rdf_protodict.DataBlob.CompressionType.ZCOMPRESSION):
        data = zlib.decompress(data)
      elif (read_buffer.compression ==
            rdf_protodict.DataBlob.CompressionType.UNCOMPRESSED):
        pass
      else:
        raise ValueError("Unsupported compression")

      blobs.append(data)

    data_store.DB.StoreBlobs(blobs, token=self.token)

  def ProcessMessage(self, message):
    """Write the blob into the AFF4 blob storage area."""
    return self.ProcessMessages([message])
예제 #9
0
파일: worker_test.py 프로젝트: rainser/grr
class WorkerSendingWKTestFlow(flow.WellKnownFlow):

    well_known_session_id = rdfvalue.SessionID(
        flow_name="WorkerSendingWKTestFlow")

    def ProcessMessage(self, message):
        RESULTS.append(message)
예제 #10
0
파일: worker_test.py 프로젝트: rainser/grr
    def testWorkerDeletesNotificationsForBrokenObjects(self):
        # Test notifications for objects that don't exist.
        session_id = rdfvalue.SessionID(queue=queues.FLOWS, flow_name="123456")

        self.CheckNotificationsDisappear(session_id)

        # Now check objects that are actually broken.

        # Start a new flow.
        session_id = flow.StartFlow(flow_name="WorkerSendingTestFlow",
                                    client_id=self.client_id,
                                    token=self.token)
        # Overwrite the type of the object such that opening it will now fail.
        data_store.DB.Set(session_id, "aff4:type", "DeprecatedClass")

        # Starting a new flow schedules notifications for the worker already but
        # this test actually checks that there are none. Thus, we have to delete
        # them or the test fails.
        data_store.DB.DeleteSubject(queues.FLOWS)

        # Check it really does.
        with self.assertRaises(aff4.InstantiationError):
            aff4.FACTORY.Open(session_id, token=self.token)

        self.CheckNotificationsDisappear(session_id)
예제 #11
0
파일: worker_test.py 프로젝트: rainser/grr
    def testWellKnownFlowResponsesAreProcessedOnlyOnce(self):
        worker_obj = worker_lib.GRRWorker(token=self.token)

        # Send a message to a WellKnownFlow - ClientStatsAuto.
        client_id = rdf_client.ClientURN("C.1100110011001100")
        self.SendResponse(rdfvalue.SessionID(queue=queues.STATS,
                                             flow_name="Stats"),
                          data=rdf_client.ClientStats(RSS_size=1234),
                          client_id=client_id,
                          well_known=True)

        # Process all messages
        worker_obj.RunOnce()
        worker_obj.thread_pool.Join()

        client = aff4.FACTORY.Open(client_id.Add("stats"), token=self.token)
        stats = client.Get(client.Schema.STATS)
        self.assertEqual(stats.RSS_size, 1234)

        aff4.FACTORY.Delete(client_id.Add("stats"), token=self.token)

        # Process all messages once again - there should be no actual processing
        # done, as all the responses were processed last time.
        worker_obj.RunOnce()
        worker_obj.thread_pool.Join()

        # Check that stats haven't changed as no new responses were processed.
        client = aff4.FACTORY.Open(client_id.Add("stats"), token=self.token)
        self.assertIsNone(client.Get(client.Schema.STATS))
예제 #12
0
class ClientStartupHandler(flow.WellKnownFlow):
    """Handles client startup events."""

    well_known_session_id = rdfvalue.SessionID(flow_name="Startup")

    def ProcessMessage(self, message=None):
        """Handle a startup event."""

        client_id = message.source
        new_si = message.payload
        drift = rdfvalue.Duration("5m")

        if data_store.RelationalDBReadEnabled():
            current_si = data_store.REL_DB.ReadClientStartupInfo(
                client_id.Basename())

            # We write the updated record if the client_info has any changes
            # or the boot time is more than 5 minutes different.
            if (not current_si or current_si.client_info != new_si.client_info
                    or not current_si.boot_time
                    or abs(current_si.boot_time - new_si.boot_time) > drift):
                try:
                    data_store.REL_DB.WriteClientStartupInfo(
                        client_id.Basename(), new_si)
                except db.UnknownClientError:
                    # On first contact with a new client, this write will fail.
                    logging.info(
                        "Can't write StartupInfo for unknown client %s",
                        client_id)
        else:
            changes = False
            with aff4.FACTORY.Create(client_id,
                                     aff4_grr.VFSGRRClient,
                                     mode="rw",
                                     token=self.token) as client:
                old_info = client.Get(client.Schema.CLIENT_INFO)
                old_boot = client.Get(client.Schema.LAST_BOOT_TIME, 0)

                info = new_si.client_info

                # Only write to the datastore if we have new information.
                if info != old_info:
                    client.Set(client.Schema.CLIENT_INFO(info))
                    changes = True

                client.AddLabels(info.labels, owner="GRR")

                # Allow for some drift in the boot times (5 minutes).
                if not old_boot or abs(old_boot - new_si.boot_time) > drift:
                    client.Set(client.Schema.LAST_BOOT_TIME(new_si.boot_time))
                    changes = True

            if data_store.RelationalDBWriteEnabled() and changes:
                try:
                    data_store.REL_DB.WriteClientStartupInfo(
                        client_id.Basename(), new_si)
                except db.UnknownClientError:
                    pass
예제 #13
0
 def Send(self, response):
   self.grr_worker.SendReply(
       rdf_client.ClientStats.Downsampled(response),
       session_id=rdfvalue.SessionID(queue=queues.STATS, flow_name="Stats"),
       response_id=0,
       request_id=0,
       priority=rdf_flows.GrrMessage.Priority.LOW_PRIORITY,
       message_type=rdf_flows.GrrMessage.Type.MESSAGE,
       require_fastpoll=False)
예제 #14
0
class GetClientStatsAuto(flow.WellKnownFlow,
                         GetClientStatsProcessResponseMixin):
    """This action pushes client stats to the server automatically."""

    category = None

    well_known_session_id = rdfvalue.SessionID(flow_name="Stats",
                                               queue=queues.STATS)

    def ProcessMessage(self, message):
        """Processes a stats response from the client."""
        client_stats = rdf_client.ClientStats(message.payload)
        self.ProcessResponse(message.source, client_stats)
예제 #15
0
  def testGetNotificationsForAllShards(self):
    manager = queue_manager.QueueManager(token=self.token)
    manager.QueueNotification(
        session_id=rdfvalue.SessionID(
            base="aff4:/hunts", queue=queues.HUNTS, flow_name="42"))
    manager.Flush()

    manager.QueueNotification(
        session_id=rdfvalue.SessionID(
            base="aff4:/hunts", queue=queues.HUNTS, flow_name="43"))
    manager.Flush()

    live_shard_count = 0
    for _ in range(manager.num_notification_shards):
      shard_sessions = manager.GetNotifications(queues.HUNTS)
      self.assertLess(len(shard_sessions), 2)
      if len(shard_sessions) == 1:
        live_shard_count += 1
    self.assertEqual(live_shard_count, 2)

    notifications = manager.GetNotificationsForAllShards(queues.HUNTS)
    self.assertEqual(len(notifications), 2)
예제 #16
0
class WellKnownSessionTest(flow.WellKnownFlow):
    """Tests the well known flow implementation."""
    well_known_session_id = rdfvalue.SessionID(queue=rdfvalue.RDFURN("test"),
                                               flow_name="TestSessionId")

    messages = []

    def __init__(self, *args, **kwargs):
        flow.WellKnownFlow.__init__(self, *args, **kwargs)

    def ProcessMessage(self, message):
        """Record the message id for testing."""
        self.messages.append(int(message.payload))
예제 #17
0
    def __init__(self,
                 certificate,
                 private_key,
                 max_queue_size=50,
                 message_expiry_time=120,
                 max_retransmission_time=10,
                 threadpool_prefix="grr_threadpool"):
        # Identify ourselves as the server.
        self.token = access_control.ACLToken(username="******",
                                             reason="Implied.")
        self.token.supervisor = True

        if data_store.RelationalDBReadEnabled():
            self._communicator = RelationalServerCommunicator(
                certificate=certificate, private_key=private_key)
        else:
            self._communicator = ServerCommunicator(certificate=certificate,
                                                    private_key=private_key,
                                                    token=self.token)

        self.message_expiry_time = message_expiry_time
        self.max_retransmission_time = max_retransmission_time
        self.max_queue_size = max_queue_size
        self.thread_pool = threadpool.ThreadPool.Factory(
            threadpool_prefix,
            min_threads=2,
            max_threads=config.CONFIG["Threadpool.size"])
        self.thread_pool.Start()

        # There is only a single session id that we accept unauthenticated
        # messages for, the one to enroll new clients.
        self.unauth_allowed_session_id = rdfvalue.SessionID(
            queue=queues.ENROLLMENT, flow_name="Enrol")

        # Some well known flows are run on the front end.
        available_wkfs = flow.WellKnownFlow.GetAllWellKnownFlows(
            token=self.token)
        whitelist = set(config.CONFIG["Frontend.well_known_flows"])

        available_wkf_set = set(available_wkfs)
        unknown_flows = whitelist - available_wkf_set
        if unknown_flows:
            raise ValueError("Unknown flows in Frontend.well_known_flows: %s" %
                             ",".join(unknown_flows))

        self.well_known_flows = {
            flow_name: available_wkfs[flow_name]
            for flow_name in whitelist & available_wkf_set
        }
예제 #18
0
파일: events_test.py 프로젝트: qsdj/grr
    def testEventNotification(self):
        """Test that events are sent to listeners."""
        TestListener.received_events = []

        event = rdf_flows.GrrMessage(
            session_id=rdfvalue.SessionID(flow_name="SomeFlow"),
            name="test message",
            payload=rdf_paths.PathSpec(path="foobar", pathtype="TSK"),
            source="aff4:/C.0000000000000001",
            auth_state="AUTHENTICATED")

        events.Events.PublishEvent("TestEvent", event, token=self.token)

        # Make sure the source is correctly propagated.
        self.assertEqual(TestListener.received_events[0], event)
예제 #19
0
  def GetNewSessionID(self):
    """Returns a random session ID for this flow based on the runner args.

    Returns:
      A formatted session id URN.
    """
    # Calculate a new session id based on the flow args. Note that our caller
    # can specify the base path to the session id, but they can not influence
    # the exact session id we pick. This ensures that callers can not engineer a
    # session id clash forcing us to overwrite an existing flow.
    base = self.runner_args.base_session_id
    if base is None:
      base = self.runner_args.client_id or aff4.ROOT_URN
      base = base.Add("flows")

    return rdfvalue.SessionID(base=base, queue=self.runner_args.queue)
예제 #20
0
  def testNotificationRequeueing(self):
    with test_lib.ConfigOverrider({"Worker.queue_shards": 1}):
      session_id = rdfvalue.SessionID(
          base="aff4:/testflows", queue=queues.HUNTS, flow_name="123")
      with test_lib.FakeTime(1000):
        # Schedule a notification.
        with queue_manager.QueueManager(token=self.token) as manager:
          manager.QueueNotification(session_id=session_id)

      with test_lib.FakeTime(1100):
        with queue_manager.QueueManager(token=self.token) as manager:
          notifications = manager.GetNotifications(queues.HUNTS)
          self.assertEqual(len(notifications), 1)
          # This notification was first queued and last queued at time 1000.
          notification = notifications[0]
          self.assertEqual(notification.timestamp.AsSecondsSinceEpoch(), 1000)
          self.assertEqual(notification.first_queued.AsSecondsSinceEpoch(),
                           1000)
          # Now requeue the same notification.
          manager.DeleteNotification(session_id)
          manager.QueueNotification(notification)

      with test_lib.FakeTime(1200):
        with queue_manager.QueueManager(token=self.token) as manager:
          notifications = manager.GetNotifications(queues.HUNTS)
          self.assertEqual(len(notifications), 1)
          notification = notifications[0]
          # Now the last queue time is 1100, the first queue time is still 1000.
          self.assertEqual(notification.timestamp.AsSecondsSinceEpoch(), 1100)
          self.assertEqual(notification.first_queued.AsSecondsSinceEpoch(),
                           1000)
          # Again requeue the same notification.
          manager.DeleteNotification(session_id)
          manager.QueueNotification(notification)

      expired = 1000 + queue_manager.QueueManager.notification_expiry_time
      with test_lib.FakeTime(expired):
        with queue_manager.QueueManager(token=self.token) as manager:
          notifications = manager.GetNotifications(queues.HUNTS)
          self.assertEqual(len(notifications), 1)
          # Again requeue the notification, this time it should be dropped.
          manager.DeleteNotification(session_id)
          manager.QueueNotification(notifications[0])

        with queue_manager.QueueManager(token=self.token) as manager:
          notifications = manager.GetNotifications(queues.HUNTS)
          self.assertEqual(len(notifications), 0)
예제 #21
0
  def testDestroyFlowStates(self):
    """Check that we can efficiently destroy the flow's request queues."""
    session_id = rdfvalue.SessionID(flow_name="test2")

    request = rdf_flow_runner.RequestState(
        id=1,
        client_id=test_lib.TEST_CLIENT_ID,
        next_state="TestState",
        session_id=session_id)

    with queue_manager.QueueManager(token=self.token) as manager:
      manager.QueueRequest(request)
      manager.QueueResponse(
          rdf_flows.GrrMessage(
              request_id=1, response_id=1, session_id=session_id))

    # Check the request and responses are there.
    all_requests = list(manager.FetchRequestsAndResponses(session_id))
    self.assertEqual(len(all_requests), 1)
    self.assertEqual(all_requests[0][0], request)

    # Read the response directly.
    responses = data_store.DB.ReadResponsesForRequestId(session_id, 1)
    self.assertEqual(len(responses), 1)
    response = responses[0]
    self.assertEqual(response.request_id, 1)
    self.assertEqual(response.response_id, 1)
    self.assertEqual(response.session_id, session_id)

    with queue_manager.QueueManager(token=self.token) as manager:
      manager.DestroyFlowStates(session_id)

    all_requests = list(manager.FetchRequestsAndResponses(session_id))
    self.assertEqual(len(all_requests), 0)

    # Check that the response is gone.
    responses = data_store.DB.ReadResponsesForRequestId(session_id, 1)
    self.assertEqual(len(responses), 0)

    # Ensure the rows are gone from the data store. Some data stores
    # don't store the queues in that way but there is no harm in
    # checking.
    self.assertEqual(
        data_store.DB.ResolveRow(session_id.Add("state/request:00000001")), [])

    self.assertEqual(data_store.DB.ResolveRow(session_id.Add("state")), [])
예제 #22
0
    def testNannyMessage(self):
        client_id = self.SetupClient(0)
        nanny_message = "Oh no!"
        self.email_message = {}

        def SendEmail(address, sender, title, message, **_):
            self.email_message.update(
                dict(address=address,
                     sender=sender,
                     title=title,
                     message=message))

        with utils.Stubber(email_alerts.EMAIL_ALERTER, "SendEmail", SendEmail):
            self.SendResponse(
                session_id=rdfvalue.SessionID(flow_name="NannyMessage"),
                data=nanny_message,
                client_id=client_id,
                well_known=True)

            # Now emulate a worker to process the event.
            worker = worker_test_lib.MockWorker(token=self.token)
            while worker.Next():
                pass
            worker.pool.Join()

            # We expect the email to be sent.
            self.assertEqual(self.email_message.get("address"),
                             config.CONFIG["Monitoring.alert_email"])
            self.assertTrue(str(client_id) in self.email_message["title"])

            # Make sure the message is included in the email message.
            self.assertTrue(nanny_message in self.email_message["message"])

            # Make sure crashes collections are created and written
            # into proper locations. First check the per-client crashes collection.
            client_crashes = list(
                aff4_grr.VFSGRRClient.CrashCollectionForCID(client_id))

            self.assertEqual(len(client_crashes), 1)
            crash = client_crashes[0]
            self.assertEqual(crash.client_id, client_id)
            self.assertEqual(crash.client_info.client_name, "GRR Monitor")
            self.assertEqual(crash.crash_type, "Nanny Message")
            self.assertEqual(crash.crash_message, nanny_message)
예제 #23
0
def WakeStuckFlow(session_id):
    """Wake up stuck flows.

  A stuck flow is one which is waiting for the client to do something, but the
  client requests have been removed from the client queue. This can happen if
  the system is too loaded and the client messages have TTLed out. In this case
  we reschedule the client requests for this session.

  Args:
    session_id: The session for the flow to wake.

  Returns:
    The total number of client messages re-queued.
  """
    session_id = rdfvalue.SessionID(session_id)
    woken = 0
    checked_pending = False

    with queue_manager.QueueManager() as manager:
        for request, responses in manager.FetchRequestsAndResponses(
                session_id):
            # We need to check if there are client requests pending.
            if not checked_pending:
                task = manager.Query(request.client_id,
                                     task_id="task:%s" %
                                     request.request.task_id)

                if task:
                    # Client has tasks pending already.
                    return

                checked_pending = True

            if (not responses
                    or responses[-1].type != rdf_flows.GrrMessage.Type.STATUS):
                manager.QueueClientMessage(request.request)
                woken += 1

            if responses and responses[
                    -1].type == rdf_flows.GrrMessage.Type.STATUS:
                manager.QueueNotification(session_id)

    return woken
예제 #24
0
파일: ca_enroller.py 프로젝트: rainser/grr
class Enroler(flow.WellKnownFlow):
    """Manage enrolment requests."""

    well_known_session_id = rdfvalue.SessionID(queue=queues.ENROLLMENT,
                                               flow_name="Enrol")

    def ProcessMessage(self, message):
        """Begins an enrollment flow for this client.

    Args:
        message: The Certificate sent by the client. Note that this
        message is not authenticated.
    """
        cert = rdf_crypto.Certificate(message.payload)

        queue = self.well_known_session_id.Queue()

        client_id = message.source

        # It makes no sense to enrol the same client multiple times, so we
        # eliminate duplicates. Note, that we can still enroll clients multiple
        # times due to cache expiration.
        try:
            enrolment_cache.Get(client_id)
            return
        except KeyError:
            enrolment_cache.Put(client_id, 1)

        # Create a new client object for this client.
        client = aff4.FACTORY.Create(client_id,
                                     aff4_grr.VFSGRRClient,
                                     mode="rw",
                                     token=self.token)

        # Only enroll this client if it has no certificate yet.
        if not client.Get(client.Schema.CERT):
            # Start the enrollment flow for this client.
            flow.StartFlow(client_id=client_id,
                           flow_name=CAEnroler.__name__,
                           csr=cert,
                           queue=queue,
                           token=self.token)
예제 #25
0
class ClientAlertHandler(NannyMessageHandler):
    """A listener for client messages."""

    well_known_session_id = rdfvalue.SessionID(flow_name="ClientAlert")

    mail_template = jinja2.Template("""
<html><body><h1>GRR client message received.</h1>

The client {{ client_id }} ({{ hostname }}) just sent a message:<br>
<br>
{{ message }}
<br>
Click <a href='{{ admin_ui }}/#{{ url }}'> here </a> to access this machine.

<p>{{ signature }}</p>

</body></html>""",
                                    autoescape=True)

    subject = "GRR client message received from %s."

    logline = "Client message from %s: %s"
예제 #26
0
class SendStartupInfo(actions.ActionPlugin):

  in_rdfvalue = None
  out_rdfvalues = [rdf_client.StartupInfo]

  well_known_session_id = rdfvalue.SessionID(flow_name="Startup")

  def Run(self, unused_arg, ttl=None):
    """Returns the startup information."""
    logging.debug("Sending startup information.")
    boot_time = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(psutil.boot_time())
    response = rdf_client.StartupInfo(
        boot_time=boot_time, client_info=GetClientInformation())

    self.grr_worker.SendReply(
        response,
        session_id=self.well_known_session_id,
        response_id=0,
        request_id=0,
        priority=rdf_flows.GrrMessage.Priority.LOW_PRIORITY,
        message_type=rdf_flows.GrrMessage.Type.MESSAGE,
        require_fastpoll=False,
        ttl=ttl)
예제 #27
0
  def testMultipleNotificationsForTheSameSessionId(self):
    manager = queue_manager.QueueManager(token=self.token)
    manager.QueueNotification(
        session_id=rdfvalue.SessionID(
            base="aff4:/hunts", queue=queues.HUNTS, flow_name="123456"),
        timestamp=(self._current_mock_time + 10) * 1e6)
    manager.QueueNotification(
        session_id=rdfvalue.SessionID(
            base="aff4:/hunts", queue=queues.HUNTS, flow_name="123456"),
        timestamp=(self._current_mock_time + 20) * 1e6)
    manager.QueueNotification(
        session_id=rdfvalue.SessionID(
            base="aff4:/hunts", queue=queues.HUNTS, flow_name="123456"),
        timestamp=(self._current_mock_time + 30) * 1e6)
    manager.Flush()

    self.assertEqual(len(manager.GetNotificationsForAllShards(queues.HUNTS)), 0)

    self._current_mock_time += 10
    self.assertEqual(len(manager.GetNotificationsForAllShards(queues.HUNTS)), 1)
    manager.DeleteNotification(
        rdfvalue.SessionID(
            base="aff4:/hunts", queue=queues.HUNTS, flow_name="123456"))

    self._current_mock_time += 10
    self.assertEqual(len(manager.GetNotificationsForAllShards(queues.HUNTS)), 1)
    manager.DeleteNotification(
        rdfvalue.SessionID(
            base="aff4:/hunts", queue=queues.HUNTS, flow_name="123456"))

    self._current_mock_time += 10
    self.assertEqual(len(manager.GetNotificationsForAllShards(queues.HUNTS)), 1)
    manager.DeleteNotification(
        rdfvalue.SessionID(
            base="aff4:/hunts", queue=queues.HUNTS, flow_name="123456"))

    self._current_mock_time += 10
    self.assertEqual(len(manager.GetNotificationsForAllShards(queues.HUNTS)), 0)
예제 #28
0
 def RegisterWellKnownFlow(self, wkf):
     session_id = rdfvalue.SessionID(flow_name=wkf.FLOW_NAME)
     self.wkfs[str(session_id)] = wkf
예제 #29
0
class WellKnownSessionTest2(WellKnownSessionTest):
    """Another testing well known flow."""
    well_known_session_id = rdfvalue.SessionID(queue=rdfvalue.RDFURN("test"),
                                               flow_name="TestSessionId2")
예제 #30
0
from grr.core.grr_response_core.lib.rdfvalues import flows as rdf_flows
from grr.server.grr_response_server import data_store
from grr.server.grr_response_server import fleetspeak_utils
from grr.server.grr_response_server.rdfvalues import objects as rdf_objects


class Error(Exception):
  """Base class for errors in this module."""


class MoreDataException(Error):
  """Raised when there is more data available."""


session_id_map = {
    rdfvalue.SessionID(queue=queues.ENROLLMENT, flow_name="Enrol"): "Enrol",
    rdfvalue.SessionID(queue=queues.STATS, flow_name="Stats"): "StatsHandler",
    rdfvalue.SessionID(flow_name="TransferStore"): "BlobHandler",
}



def _GetClientIdFromQueue(q):
  """Returns q's client id, if q is a client task queue, otherwise None.

  Args:
    q: rdfvalue.RDFURN

  Returns:
    string or None
  """