예제 #1
0
    def testNotificationsAreDeletedFromAllShards(self):
        manager = queue_manager.QueueManager(token=self.token)
        manager.QueueNotification(session_id=rdfvalue.SessionID(
            base="aff4:/hunts", queue=queues.HUNTS, flow_name="42"))
        manager.Flush()
        manager.QueueNotification(session_id=rdfvalue.SessionID(
            base="aff4:/hunts", queue=queues.HUNTS, flow_name="43"))
        manager.Flush()
        # There should be two notifications in two different shards.
        shards_with_data = 0
        for _ in range(manager.num_notification_shards):
            shard_sessions = manager.GetNotifications(queues.HUNTS)
            if shard_sessions:
                shards_with_data += 1
                self.assertEqual(len(shard_sessions), 1)
        self.assertEqual(shards_with_data, 2)

        # This should still work, as we delete notifications from all shards.
        manager.DeleteNotification(
            rdfvalue.SessionID(base="aff4:/hunts",
                               queue=queues.HUNTS,
                               flow_name="43"))
        manager.DeleteNotification(
            rdfvalue.SessionID(base="aff4:/hunts",
                               queue=queues.HUNTS,
                               flow_name="42"))
        for _ in range(manager.num_notification_shards):
            shard_sessions = manager.GetNotifications(queues.HUNTS)
            self.assertFalse(shard_sessions)
예제 #2
0
  def ClientServerCommunicate(self, timestamp=None):
    """Tests the end to end encrypted communicators."""
    message_list = rdf_flows.MessageList()
    for i in range(1, 11):
      message_list.job.Append(
          session_id=rdfvalue.SessionID(
              base="aff4:/flows", queue=queues.FLOWS, flow_name=i),
          name="OMG it's a string")

    result = rdf_flows.ClientCommunication()
    timestamp = self.client_communicator.EncodeMessages(
        message_list, result, timestamp=timestamp)
    self.cipher_text = result.SerializeToBytes()

    (decoded_messages, source, client_timestamp) = (
        self.server_communicator.DecryptMessage(self.cipher_text))

    self.assertEqual(source, self.client_communicator.common_name)
    self.assertEqual(client_timestamp, timestamp)
    self.assertLen(decoded_messages, 10)
    for i in range(1, 11):
      self.assertEqual(
          decoded_messages[i - 1].session_id,
          rdfvalue.SessionID(
              base="aff4:/flows", queue=queues.FLOWS, flow_name=i))

    return decoded_messages
예제 #3
0
class Foreman(flow.WellKnownFlow):
    """The foreman assigns new flows to clients based on their type.

  Clients periodically call the foreman flow to ask for new flows that might be
  scheduled for them based on their types. This allows the server to schedule
  flows for entire classes of machines based on certain criteria.
  """
    well_known_session_id = rdfvalue.SessionID(flow_name="Foreman")
    foreman_cache = None

    # How often we refresh the rule set from the data store.
    cache_refresh_time = 60

    lock = threading.Lock()

    def ProcessMessage(self, message):
        """Run the foreman on the client."""
        # Only accept authenticated messages
        if (message.auth_state !=
                rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED):
            return

        now = time.time()

        # Maintain a cache of the foreman
        with self.lock:
            if (self.foreman_cache is None
                    or now > self.foreman_cache.age + self.cache_refresh_time):
                self.foreman_cache = aff4.FACTORY.Open("aff4:/foreman",
                                                       mode="rw",
                                                       token=self.token)
                self.foreman_cache.age = now

        if message.source:
            self.foreman_cache.AssignTasksToClient(message.source.Basename())
예제 #4
0
class NannyMessageHandlerFlow(NannyMessageHandlerMixin, flow.WellKnownFlow):
    """A listener for nanny messages."""

    well_known_session_id = rdfvalue.SessionID(flow_name="NannyMessage")

    def ProcessMessage(self, message=None):
        self.SendEmail(message.source.Basename(), message.payload.string)
예제 #5
0
  def testNannyMessageHandlerForUnknownClient(self):
    client_id = "C.1000000000000000"
    nanny_message = "Oh no!"
    email_dict = {}

    def SendEmail(address, sender, title, message, **_):
      email_dict.update(
          dict(address=address, sender=sender, title=title, message=message))

    with test_lib.ConfigOverrider({
        "Database.useForReads": True,
        "Database.useForReads.message_handlers": True
    }):
      with utils.Stubber(email_alerts.EMAIL_ALERTER, "SendEmail", SendEmail):
        flow_test_lib.MockClient(client_id, None)._PushHandlerMessage(
            rdf_flows.GrrMessage(
                source=client_id,
                session_id=rdfvalue.SessionID(flow_name="NannyMessage"),
                payload=rdf_protodict.DataBlob(string=nanny_message),
                request_id=0,
                auth_state="AUTHENTICATED",
                response_id=123))

    # We expect the email to be sent.
    self.assertEqual(
        email_dict.get("address"), config.CONFIG["Monitoring.alert_email"])

    # Make sure the message is included in the email message.
    self.assertIn(nanny_message, email_dict["message"])

    if data_store.RelationalDBReadEnabled():
      self.assertIn(client_id, email_dict["title"])
    else:
      self.assertIn(client_id.Basename(), email_dict["title"])
예제 #6
0
class Enroler(flow.WellKnownFlow):
    """Manage enrolment requests."""

    well_known_session_id = rdfvalue.SessionID(queue=queues.ENROLLMENT,
                                               flow_name="Enrol")

    def ProcessMessage(self, message):
        """Begins an enrollment flow for this client.

    Args:
        message: The Certificate sent by the client. Note that this message is
          not authenticated.
    """
        cert = rdf_crypto.Certificate(message.payload)

        queue = self.well_known_session_id.Queue()

        client_id = message.source

        # It makes no sense to enrol the same client multiple times, so we
        # eliminate duplicates. Note, that we can still enroll clients multiple
        # times due to cache expiration.
        try:
            enrolment_cache.Get(client_id)
            return
        except KeyError:
            enrolment_cache.Put(client_id, 1)

        # Create a new client object for this client.
        if data_store.AFF4Enabled():
            client = aff4.FACTORY.Create(client_id,
                                         aff4_grr.VFSGRRClient,
                                         mode="rw",
                                         token=self.token)
            client_cert = client.Get(client.Schema.CERT)

        if data_store.RelationalDBReadEnabled():
            try:
                md = data_store.REL_DB.ReadClientMetadata(client_id.Basename())
                client_cert = md.certificate
            except db.UnknownClientError:
                client_cert = None

        if data_store.RelationalDBWriteEnabled():
            data_store.REL_DB.WriteClientMetadata(client_id.Basename(),
                                                  fleetspeak_enabled=False)

        # Only enroll this client if it has no certificate yet.
        if not client_cert:
            # Start the enrollment flow for this client.

            # Note, that the actual CAEnroler class is autogenerated from the
            # CAEnrolerMixin by the DualDBFlow decorator confusing the linter - hence
            # the disable directive.
            flow.StartAFF4Flow(
                client_id=client_id,
                flow_name=CAEnroler.__name__,  # pylint: disable=undefined-variable
                csr=cert,
                queue=queue,
                token=self.token)
예제 #7
0
    def _StoreDataAndHash(self, data: AnyStr, offset: int) -> None:
        """Uploads data as blob and replies hash to flow.

    Args:
      data: Bytes to be stored as a blob.
      offset: Offset where the data was read from.
    """

        data_blob = rdf_protodict.DataBlob(
            data=zlib.compress(data),
            compression=rdf_protodict.DataBlob.CompressionType.ZCOMPRESSION)

        # Ensure that the buffer is counted against this response. Check network
        # send limit.
        self.ChargeBytesToSession(len(data))

        # Now return the data to the server into the special TransferStore well
        # known flow.
        self.grr_worker.SendReply(
            data_blob,
            session_id=rdfvalue.SessionID(flow_name="TransferStore"))

        # Now report the hash of this blob to our flow as well as the offset and
        # length.
        digest = hashlib.sha256(data).digest()

        buffer_reference = rdf_client.BufferReference(offset=offset,
                                                      length=len(data),
                                                      data=digest)
        self._partial_file_hash.update(data)
        partial_file_hash = self._partial_file_hash.digest()

        self.SendReply(
            rdf_read_low_level.ReadLowLevelResult(
                blob=buffer_reference, accumulated_hash=partial_file_hash))
예제 #8
0
class Timeline(actions.ActionPlugin):
    """A client action for timeline collection."""

    in_rdfvalue = rdf_timeline.TimelineArgs
    out_rdfvalues = [rdf_timeline.TimelineResult]

    _TRANSFER_STORE_ID = rdfvalue.SessionID(flow_name="TransferStore")

    def Run(self, args: rdf_timeline.TimelineArgs) -> None:
        """Executes the client action."""
        entries = iterator.Counted(Walk(args.root))
        for entry_batch in rdf_timeline.TimelineEntry.SerializeStream(entries):
            entry_batch_blob = rdf_protodict.DataBlob(data=entry_batch)
            self.SendReply(entry_batch_blob,
                           session_id=self._TRANSFER_STORE_ID)

            entry_batch_blob_id = hashlib.sha256(entry_batch).digest()

            result = rdf_timeline.TimelineResult()
            result.entry_batch_blob_ids.append(entry_batch_blob_id)
            result.entry_count = entries.count
            self.SendReply(result)

            # Each result should contain information only about the number of entries
            # in the current batch, so after the results are sent we simply reset the
            # counter.
            entries.Reset()
예제 #9
0
파일: worker_test.py 프로젝트: x35029/grr
class WorkerSendingWKTestFlow(flow.WellKnownFlow):

    well_known_session_id = rdfvalue.SessionID(
        flow_name="WorkerSendingWKTestFlow")

    def ProcessMessage(self, message):
        RESULTS.append(message)
예제 #10
0
    def Run(self, args):
        """Reads a buffer on the client and sends it to the server."""
        # Make sure we limit the size of our output
        if args.length > constants.CLIENT_MAX_BUFFER_SIZE:
            raise RuntimeError("Can not read buffers this large.")

        data = vfs.ReadVFS(args.pathspec,
                           args.offset,
                           args.length,
                           progress_callback=self.Progress)
        result = rdf_protodict.DataBlob(
            data=zlib.compress(data),
            compression=rdf_protodict.DataBlob.CompressionType.ZCOMPRESSION)

        digest = hashlib.sha256(data).digest()

        # Ensure that the buffer is counted against this response. Check network
        # send limit.
        self.ChargeBytesToSession(len(data))

        # Now return the data to the server into the special TransferStore well
        # known flow.
        self.grr_worker.SendReply(
            result, session_id=rdfvalue.SessionID(flow_name="TransferStore"))

        # Now report the hash of this blob to our flow as well as the offset and
        # length.
        self.SendReply(
            rdf_client.BufferReference(offset=args.offset,
                                       length=len(data),
                                       data=digest))
예제 #11
0
  def testBlobHandlerMessagesAreHandledOnTheFrontend(self):
    client_id = "C.1234567890123456"
    data_store.REL_DB.WriteClientMetadata(client_id, fleetspeak_enabled=False)

    # Check that the worker queue is empty.
    self.assertEmpty(data_store.REL_DB.ReadMessageHandlerRequests())

    data = b"foo"
    data_blob = rdf_protodict.DataBlob(
        data=zlib.compress(data),
        compression=rdf_protodict.DataBlob.CompressionType.ZCOMPRESSION)
    messages = [
        rdf_flows.GrrMessage(
            source=client_id,
            session_id=str(rdfvalue.SessionID(flow_name="TransferStore")),
            payload=data_blob,
            auth_state=rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED,
        )
    ]
    ReceiveMessages(client_id, messages)

    # Check that the worker queue is still empty.
    self.assertEmpty(data_store.REL_DB.ReadMessageHandlerRequests())

    # Check that the blob was written to the blob store.
    self.assertTrue(
        data_store.BLOBS.CheckBlobExists(rdf_objects.BlobID.FromBlobData(data)))
예제 #12
0
    def testDeleteRequest(self):
        """Check that we can efficiently destroy a single flow request."""
        session_id = rdfvalue.SessionID(flow_name="test3")

        request = rdf_flow_runner.RequestState(
            id=1,
            client_id=test_lib.TEST_CLIENT_ID,
            next_state="TestState",
            session_id=session_id)

        with queue_manager.QueueManager(token=self.token) as manager:
            manager.QueueRequest(request)
            manager.QueueResponse(
                rdf_flows.GrrMessage(session_id=session_id,
                                     request_id=1,
                                     response_id=1))

        # Check the request and responses are there.
        all_requests = list(manager.FetchRequestsAndResponses(session_id))
        self.assertEqual(len(all_requests), 1)
        self.assertEqual(all_requests[0][0], request)

        with queue_manager.QueueManager(token=self.token) as manager:
            manager.DeleteRequest(request)

        all_requests = list(manager.FetchRequestsAndResponses(session_id))
        self.assertEqual(len(all_requests), 0)
예제 #13
0
    def testClientAlertFlow(self):
        client_id = self.SetupClient(0)
        email_dict = {}
        with test_lib.ConfigOverrider(
            {"Database.useForReads.message_handlers": False}):
            client_message = "Oh no!"
            self.SendResponse(
                session_id=rdfvalue.SessionID(flow_name="ClientAlert"),
                data=client_message,
                client_id=client_id,
                well_known=True)

        def SendEmail(address, sender, title, message, **_):
            email_dict.update(
                dict(address=address,
                     sender=sender,
                     title=title,
                     message=message))

        with utils.Stubber(email_alerts.EMAIL_ALERTER, "SendEmail", SendEmail):
            # Now emulate a worker to process the event.
            worker = worker_test_lib.MockWorker(token=self.token)
            while worker.Next():
                pass
            worker.pool.Join()

        self._CheckAlertEmail(client_id, client_message, email_dict)
예제 #14
0
    def Execute(self, action_cls, args):
        responses = list()

        def SendReply(value,
                      session_id=None,
                      message_type=rdf_flows.GrrMessage.Type.MESSAGE):
            if message_type != rdf_flows.GrrMessage.Type.MESSAGE:
                return

            if str(session_id) in self.wkfs:
                message = rdf_flows.GrrMessage(name=action_cls.__name__,
                                               payload=value,
                                               auth_state="AUTHENTICATED",
                                               session_id=session_id)
                self.wkfs[str(session_id)].ProcessMessage(message)
            else:
                responses.append(value)

        message = rdf_flows.GrrMessage(name=action_cls.__name__,
                                       payload=args,
                                       auth_state="AUTHENTICATED",
                                       session_id=rdfvalue.SessionID())

        action = action_cls(grr_worker=worker_mocks.FakeClientWorker())
        action.SendReply = SendReply
        action.Execute(message)

        return responses
예제 #15
0
    def testCountsActualNumberOfCompletedResponsesWhenApplyingTheLimit(self):
        session_id = rdfvalue.SessionID(flow_name="test")

        # Now queue more requests and responses:
        with queue_manager.QueueManager(token=self.token) as manager:
            # Start with request 1 - leave request 1 un-responded to.
            for request_id in range(5):
                request = rdf_flow_runner.RequestState(
                    id=request_id,
                    client_id=test_lib.TEST_CLIENT_ID,
                    next_state="TestState",
                    session_id=session_id)

                manager.QueueRequest(request)

                # Don't queue any actual responses, just a status message with a
                # fake response_id.
                manager.QueueResponse(
                    rdf_flows.GrrMessage(
                        session_id=session_id,
                        request_id=request_id,
                        response_id=1000,
                        type=rdf_flows.GrrMessage.Type.STATUS))

        # Check that even though status message for every request indicates 1000
        # responses, only the actual response count is used to apply the limit
        # when FetchCompletedResponses is called.
        completed_response = list(
            manager.FetchCompletedResponses(session_id, limit=5))
        self.assertEqual(len(completed_response), 5)
        for i, (request, responses) in enumerate(completed_response):
            self.assertEqual(request.id, i)
            # Responses contain just the status message.
            self.assertEqual(len(responses), 1)
예제 #16
0
파일: worker_test.py 프로젝트: x35029/grr
    def testWorkerDeletesNotificationsForBrokenObjects(self):
        # Test notifications for objects that don't exist.
        session_id = rdfvalue.SessionID(queue=queues.FLOWS, flow_name="123456")

        self.CheckNotificationsDisappear(session_id)

        # Now check objects that are actually broken.

        # Start a new flow.
        session_id = flow.StartAFF4Flow(flow_name="WorkerSendingTestFlow",
                                        client_id=self.client_id,
                                        token=self.token)
        # Overwrite the type of the object such that opening it will now fail.
        data_store.DB.Set(session_id, "aff4:type", "DeprecatedClass")

        # Starting a new flow schedules notifications for the worker already but
        # this test actually checks that there are none. Thus, we have to delete
        # them or the test fails.
        data_store.DB.DeleteSubject(queues.FLOWS)

        # Check it really does.
        with self.assertRaises(aff4.InstantiationError):
            aff4.FACTORY.Open(session_id, token=self.token)

        self.CheckNotificationsDisappear(session_id)
예제 #17
0
파일: worker_test.py 프로젝트: x35029/grr
    def testWellKnownFlowResponsesAreProcessedOnlyOnce(self):
        worker_obj = self._TestWorker()

        # Send a message to a WellKnownFlow - ClientStatsAuto.
        client_id = rdf_client.ClientURN("C.1100110011001100")
        self.SendResponse(rdfvalue.SessionID(queue=queues.STATS,
                                             flow_name="Stats"),
                          data=rdf_client_stats.ClientStats(RSS_size=1234),
                          client_id=client_id,
                          well_known=True)

        # Process all messages
        worker_obj.RunOnce()
        worker_obj.thread_pool.Join()

        client = aff4.FACTORY.Open(client_id.Add("stats"), token=self.token)
        stats = client.Get(client.Schema.STATS)
        self.assertEqual(stats.RSS_size, 1234)

        aff4.FACTORY.Delete(client_id.Add("stats"), token=self.token)

        # Process all messages once again - there should be no actual processing
        # done, as all the responses were processed last time.
        worker_obj.RunOnce()
        worker_obj.thread_pool.Join()

        # Check that stats haven't changed as no new responses were processed.
        client = aff4.FACTORY.Open(client_id.Add("stats"), token=self.token)
        self.assertIsNone(client.Get(client.Schema.STATS))
예제 #18
0
    def testClientAlertHandler(self):
        client_id = self.SetupClient(0).Basename()
        client_message = "Oh no!"
        email_dict = {}

        def SendEmail(address, sender, title, message, **_):
            email_dict.update(
                dict(address=address,
                     sender=sender,
                     title=title,
                     message=message))

        with test_lib.ConfigOverrider({
                "Database.useForReads":
                True,
                "Database.useForReads.message_handlers":
                True
        }):
            with utils.Stubber(email_alerts.EMAIL_ALERTER, "SendEmail",
                               SendEmail):
                flow_test_lib.MockClient(client_id, None)._PushHandlerMessage(
                    rdf_flows.GrrMessage(
                        source=client_id,
                        session_id=rdfvalue.SessionID(flow_name="ClientAlert"),
                        payload=rdf_protodict.DataBlob(string=client_message),
                        request_id=0,
                        auth_state="AUTHENTICATED",
                        response_id=123))

        self._CheckAlertEmail(client_id, client_message, email_dict)
예제 #19
0
class TransferStore(flow.WellKnownFlow):
    """Store a buffer into a determined location."""
    well_known_session_id = rdfvalue.SessionID(flow_name="TransferStore")

    def ProcessMessages(self, msg_list):
        blobs = []
        for message in msg_list:
            if (message.auth_state !=
                    rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED):
                logging.error(
                    "TransferStore request from %s is not authenticated.",
                    message.source)
                continue

            read_buffer = message.payload
            data = read_buffer.data
            if not data:
                continue

            if (read_buffer.compression ==
                    rdf_protodict.DataBlob.CompressionType.ZCOMPRESSION):
                data = zlib.decompress(data)
            elif (read_buffer.compression ==
                  rdf_protodict.DataBlob.CompressionType.UNCOMPRESSED):
                pass
            else:
                raise ValueError("Unsupported compression")

            blobs.append(data)

        data_store.DB.StoreBlobs(blobs, token=self.token)

    def ProcessMessage(self, message):
        """Write the blob into the AFF4 blob storage area."""
        return self.ProcessMessages([message])
예제 #20
0
    def testNannyMessageHandlerForUnknownClient(self):
        client_id = self.SetupClient(0)
        nanny_message = "Oh no!"
        email_dict = {}

        def SendEmail(address, sender, title, message, **_):
            email_dict.update(
                dict(address=address,
                     sender=sender,
                     title=title,
                     message=message))

        with utils.Stubber(email_alerts.EMAIL_ALERTER, "SendEmail", SendEmail):
            flow_test_lib.MockClient(client_id, None)._PushHandlerMessage(
                rdf_flows.GrrMessage(
                    source=client_id,
                    session_id=rdfvalue.SessionID(flow_name="NannyMessage"),
                    payload=rdf_protodict.DataBlob(string=nanny_message),
                    request_id=0,
                    auth_state="AUTHENTICATED",
                    response_id=123))

        # We expect the email to be sent.
        self.assertEqual(email_dict.get("address"),
                         config.CONFIG["Monitoring.alert_email"])

        # Make sure the message is included in the email message.
        self.assertIn(nanny_message, email_dict["message"])

        self.assertIn(client_id, email_dict["title"])
예제 #21
0
 def Send(self, response):
   self.grr_worker.SendReply(
       rdf_client_stats.ClientStats.Downsampled(response),
       session_id=rdfvalue.SessionID(queue=queues.STATS, flow_name="Stats"),
       response_id=0,
       request_id=0,
       message_type=rdf_flows.GrrMessage.Type.MESSAGE,
       require_fastpoll=False)
예제 #22
0
class ClientStartupHandler(flow.WellKnownFlow):
    """Handles client startup events."""

    well_known_session_id = rdfvalue.SessionID(flow_name="Startup")

    def ProcessMessage(self, message=None):
        """Handle a startup event."""

        client_id = message.source
        new_si = message.payload
        drift = rdfvalue.Duration("5m")

        if data_store.RelationalDBReadEnabled():
            current_si = data_store.REL_DB.ReadClientStartupInfo(
                client_id.Basename())

            # We write the updated record if the client_info has any changes
            # or the boot time is more than 5 minutes different.
            if (not current_si or current_si.client_info != new_si.client_info
                    or not current_si.boot_time
                    or abs(current_si.boot_time - new_si.boot_time) > drift):
                try:
                    data_store.REL_DB.WriteClientStartupInfo(
                        client_id.Basename(), new_si)
                except db.UnknownClientError:
                    # On first contact with a new client, this write will fail.
                    logging.info(
                        "Can't write StartupInfo for unknown client %s",
                        client_id)
        else:
            changes = False
            with aff4.FACTORY.Create(client_id,
                                     aff4_grr.VFSGRRClient,
                                     mode="rw",
                                     token=self.token) as client:
                old_info = client.Get(client.Schema.CLIENT_INFO)
                old_boot = client.Get(client.Schema.LAST_BOOT_TIME, 0)

                info = new_si.client_info

                # Only write to the datastore if we have new information.
                if info != old_info:
                    client.Set(client.Schema.CLIENT_INFO(info))
                    changes = True

                client.AddLabels(info.labels, owner="GRR")

                # Allow for some drift in the boot times (5 minutes).
                if not old_boot or abs(old_boot - new_si.boot_time) > drift:
                    client.Set(client.Schema.LAST_BOOT_TIME(new_si.boot_time))
                    changes = True

            if data_store.RelationalDBWriteEnabled() and changes:
                try:
                    data_store.REL_DB.WriteClientStartupInfo(
                        client_id.Basename(), new_si)
                except db.UnknownClientError:
                    pass
예제 #23
0
 def SendToServer(self):
   """Schedule some packets from client to server."""
   # Generate some client traffic
   for i in range(0, 10):
     self.client_communicator.client_worker.SendReply(
         rdf_flows.GrrStatus(),
         session_id=rdfvalue.SessionID("W:session"),
         response_id=i,
         request_id=1)
예제 #24
0
class SendStartupInfo(actions.ActionPlugin):

  in_rdfvalue = None
  out_rdfvalues = [rdf_client.StartupInfo]

  well_known_session_id = rdfvalue.SessionID(flow_name="Startup")

  def _CheckInterrogateTrigger(self) -> bool:
    interrogate_trigger_path = config.CONFIG["Client.interrogate_trigger_path"]
    if not interrogate_trigger_path:
      logging.info(
          "Client.interrogate_trigger_path not set, skipping the check.")
      return False

    if not os.path.exists(interrogate_trigger_path):
      logging.info("Interrogate trigger file (%s) does not exist.",
                   interrogate_trigger_path)
      return False

    logging.info("Interrogate trigger file exists: %s",
                 interrogate_trigger_path)

    # First try to remove the file and return True only if the removal
    # is successful. This is to prevent a permission error + a crash loop from
    # trigger infinite amount of interrogations.
    try:
      os.remove(interrogate_trigger_path)
    except (OSError, IOError) as e:
      logging.exception(
          "Not triggering interrogate - failed to remove the "
          "interrogate trigger file (%s): %s", interrogate_trigger_path, e)
      return False

    return True

  def Run(self, unused_arg, ttl=None):
    """Returns the startup information."""
    logging.debug("Sending startup information.")

    boot_time = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(psutil.boot_time())
    response = rdf_client.StartupInfo(
        boot_time=boot_time,
        client_info=GetClientInformation(),
        interrogate_requested=self._CheckInterrogateTrigger(),
    )

    self.grr_worker.SendReply(
        response,
        session_id=self.well_known_session_id,
        response_id=0,
        request_id=0,
        message_type=rdf_flows.GrrMessage.Type.MESSAGE,
        require_fastpoll=False,
        ttl=ttl)
예제 #25
0
    def testGetNotificationsForAllShards(self):
        manager = queue_manager.QueueManager(token=self.token)
        manager.QueueNotification(session_id=rdfvalue.SessionID(
            base="aff4:/hunts", queue=queues.HUNTS, flow_name="42"))
        manager.Flush()

        manager.QueueNotification(session_id=rdfvalue.SessionID(
            base="aff4:/hunts", queue=queues.HUNTS, flow_name="43"))
        manager.Flush()

        live_shard_count = 0
        for _ in range(manager.num_notification_shards):
            shard_sessions = manager.GetNotifications(queues.HUNTS)
            self.assertLess(len(shard_sessions), 2)
            if len(shard_sessions) == 1:
                live_shard_count += 1
        self.assertEqual(live_shard_count, 2)

        notifications = manager.GetNotificationsForAllShards(queues.HUNTS)
        self.assertEqual(len(notifications), 2)
예제 #26
0
class GetClientStatsAuto(flow.WellKnownFlow,
                         GetClientStatsProcessResponseMixin):
    """This action pushes client stats to the server automatically."""

    category = None

    well_known_session_id = rdfvalue.SessionID(flow_name="Stats",
                                               queue=queues.STATS)

    def ProcessMessage(self, message):
        """Processes a stats response from the client."""
        self.ProcessResponse(message.source.Basename(), message.payload)
예제 #27
0
class WellKnownSessionTest(flow.WellKnownFlow):
    """Tests the well known flow implementation."""
    well_known_session_id = rdfvalue.SessionID(queue=rdfvalue.RDFURN("test"),
                                               flow_name="TestSessionId")

    messages = []

    def __init__(self, *args, **kwargs):
        flow.WellKnownFlow.__init__(self, *args, **kwargs)

    def ProcessMessage(self, message):
        """Record the message id for testing."""
        self.messages.append(int(message.payload))
예제 #28
0
    def testNotificationRequeueing(self):
        with test_lib.ConfigOverrider({"Worker.queue_shards": 1}):
            session_id = rdfvalue.SessionID(base="aff4:/testflows",
                                            queue=queues.HUNTS,
                                            flow_name="123")
            with test_lib.FakeTime(1000):
                # Schedule a notification.
                with queue_manager.QueueManager(token=self.token) as manager:
                    manager.QueueNotification(session_id=session_id)

            with test_lib.FakeTime(1100):
                with queue_manager.QueueManager(token=self.token) as manager:
                    notifications = manager.GetNotifications(queues.HUNTS)
                    self.assertEqual(len(notifications), 1)
                    # This notification was first queued and last queued at time 1000.
                    notification = notifications[0]
                    self.assertEqual(
                        notification.timestamp.AsSecondsSinceEpoch(), 1000)
                    self.assertEqual(
                        notification.first_queued.AsSecondsSinceEpoch(), 1000)
                    # Now requeue the same notification.
                    manager.DeleteNotification(session_id)
                    manager.QueueNotification(notification)

            with test_lib.FakeTime(1200):
                with queue_manager.QueueManager(token=self.token) as manager:
                    notifications = manager.GetNotifications(queues.HUNTS)
                    self.assertEqual(len(notifications), 1)
                    notification = notifications[0]
                    # Now the last queue time is 1100, the first queue time is still 1000.
                    self.assertEqual(
                        notification.timestamp.AsSecondsSinceEpoch(), 1100)
                    self.assertEqual(
                        notification.first_queued.AsSecondsSinceEpoch(), 1000)
                    # Again requeue the same notification.
                    manager.DeleteNotification(session_id)
                    manager.QueueNotification(notification)

            expired = 1000 + queue_manager.QueueManager.notification_expiry_time
            with test_lib.FakeTime(expired):
                with queue_manager.QueueManager(token=self.token) as manager:
                    notifications = manager.GetNotifications(queues.HUNTS)
                    self.assertEqual(len(notifications), 1)
                    # Again requeue the notification, this time it should be dropped.
                    manager.DeleteNotification(session_id)
                    manager.QueueNotification(notifications[0])

                with queue_manager.QueueManager(token=self.token) as manager:
                    notifications = manager.GetNotifications(queues.HUNTS)
                    self.assertEqual(len(notifications), 0)
예제 #29
0
    def testUsesFrozenTimestampWhenDeletingAndFetchingNotifications(self):
        # When used in "with" statement QueueManager uses the frozen timestamp
        # when fetching and deleting data. Test that if we have 2 managers
        # created at different times,  they will behave correctly when dealing
        # with notifications for the same session ids. I.e. older queue_manager
        # will only "see" it's own notification and younger queue_manager will
        # "see" both.
        with queue_manager.QueueManager(token=self.token) as manager1:
            manager1.QueueNotification(session_id=rdfvalue.SessionID(
                base="aff4:/hunts", queue=queues.HUNTS, flow_name="123456"))
            manager1.Flush()

            self._current_mock_time += 10
            with queue_manager.QueueManager(token=self.token) as manager2:
                manager2.QueueNotification(session_id=rdfvalue.SessionID(
                    base="aff4:/hunts", queue=queues.HUNTS,
                    flow_name="123456"))
                manager2.Flush()

                self.assertEqual(
                    len(manager1.GetNotificationsForAllShards(queues.HUNTS)),
                    1)
                self.assertEqual(
                    len(manager2.GetNotificationsForAllShards(queues.HUNTS)),
                    1)

                manager1.DeleteNotification(
                    rdfvalue.SessionID(base="aff4:/hunts",
                                       queue=queues.HUNTS,
                                       flow_name="123456"))

                self.assertEqual(
                    len(manager1.GetNotificationsForAllShards(queues.HUNTS)),
                    0)
                self.assertEqual(
                    len(manager2.GetNotificationsForAllShards(queues.HUNTS)),
                    1)
예제 #30
0
    def __init__(self,
                 certificate,
                 private_key,
                 max_queue_size=50,
                 message_expiry_time=120,
                 max_retransmission_time=10,
                 threadpool_prefix="grr_threadpool"):
        # Identify ourselves as the server.
        self.token = access_control.ACLToken(username="******",
                                             reason="Implied.")
        self.token.supervisor = True

        if data_store.RelationalDBReadEnabled():
            self._communicator = RelationalServerCommunicator(
                certificate=certificate, private_key=private_key)
        else:
            self._communicator = ServerCommunicator(certificate=certificate,
                                                    private_key=private_key,
                                                    token=self.token)

        self.message_expiry_time = message_expiry_time
        self.max_retransmission_time = max_retransmission_time
        self.max_queue_size = max_queue_size
        self.thread_pool = threadpool.ThreadPool.Factory(
            threadpool_prefix,
            min_threads=2,
            max_threads=config.CONFIG["Threadpool.size"])
        self.thread_pool.Start()

        # There is only a single session id that we accept unauthenticated
        # messages for, the one to enroll new clients.
        self.unauth_allowed_session_id = rdfvalue.SessionID(
            queue=queues.ENROLLMENT, flow_name="Enrol")

        # Some well known flows are run on the front end.
        available_wkfs = flow.WellKnownFlow.GetAllWellKnownFlows(
            token=self.token)
        whitelist = set(config.CONFIG["Frontend.well_known_flows"])

        available_wkf_set = set(available_wkfs)
        unknown_flows = whitelist - available_wkf_set
        if unknown_flows:
            raise ValueError("Unknown flows in Frontend.well_known_flows: %s" %
                             ",".join(unknown_flows))

        self.well_known_flows = {
            flow_name: available_wkfs[flow_name]
            for flow_name in whitelist & available_wkf_set
        }