Beispiel #1
0
def TestHuntHelperWithMultipleMocks(client_mocks,
                                    check_flow_errors=False,
                                    token=None,
                                    iteration_limit=None):
    """Runs a hunt with a given set of clients mocks.

  Args:
    client_mocks: Dictionary of (client_id->client_mock) pairs. Client mock
        objects are used to handle client actions. Methods names of a client
        mock object correspond to client actions names. For an example of a
        client mock object, see SampleHuntMock.
    check_flow_errors: If True, raises when one of hunt-initiated flows fails.
    token: An instance of access_control.ACLToken security token.
    iteration_limit: If None, hunt will run until it's finished. Otherwise,
        worker_mock.Next() will be called iteration_limit number of times.
        Every iteration processes worker's message queue. If new messages
        are sent to the queue during the iteration processing, they will
        be processed on next iteration,
  """

    total_flows = set()

    # Worker always runs with absolute privileges, therefore making the token
    # SetUID().
    token = token.SetUID()

    client_mocks = [
        flow_test_lib.MockClient(client_id, client_mock, token=token)
        for client_id, client_mock in client_mocks.iteritems()
    ]
    worker_mock = worker_test_lib.MockWorker(
        check_flow_errors=check_flow_errors, token=token)

    # Run the clients and worker until nothing changes any more.
    while iteration_limit is None or iteration_limit > 0:
        client_processed = 0

        for client_mock in client_mocks:
            client_processed += client_mock.Next()

        flows_run = []

        for flow_run in worker_mock.Next():
            total_flows.add(flow_run)
            flows_run.append(flow_run)

        if client_processed == 0 and not flows_run:
            break

        if iteration_limit:
            iteration_limit -= 1

    if check_flow_errors:
        flow_test_lib.CheckFlowErrors(total_flows, token=token)
Beispiel #2
0
    def testWorkerPrioritization(self):
        """Test that flow priorities work on the worker side."""

        result = []
        client_mock = PriorityClientMock(result)
        client_mock = flow_test_lib.MockClient(self.client_id,
                                               client_mock,
                                               token=self.token)
        worker_mock = worker_test_lib.MockWorker(check_flow_errors=True,
                                                 token=self.token)

        # Start some flows with different priorities.
        # pyformat: disable
        args = [
            (rdf_flows.GrrMessage.Priority.LOW_PRIORITY, "low priority"),
            (rdf_flows.GrrMessage.Priority.MEDIUM_PRIORITY, "medium priority"),
            (rdf_flows.GrrMessage.Priority.LOW_PRIORITY, "low priority2"),
            (rdf_flows.GrrMessage.Priority.HIGH_PRIORITY, "high priority"),
            (rdf_flows.GrrMessage.Priority.MEDIUM_PRIORITY, "medium priority2")
        ]
        # pyformat: enable

        server_result = []
        PriorityFlow.storage = server_result

        for (priority, msg) in args:
            flow.GRRFlow.StartFlow(client_id=self.client_id,
                                   flow_name="PriorityFlow",
                                   msg=msg,
                                   priority=priority,
                                   token=self.token)

        while True:
            # Run all the clients first so workers have messages to choose from.
            client_processed = 1
            while client_processed:
                client_processed = client_mock.Next()
            # Now process the results, this should happen in the correct order.
            flows_run = []
            for flow_run in worker_mock.Next():
                flows_run.append(flow_run)

            if not flows_run:
                break

        # The flows should be run in order of priority.
        self.assertEqual(server_result[0:1], [u"high priority"])
        self.assertEqual(sorted(server_result[1:3]),
                         [u"medium priority", u"medium priority2"])
        self.assertEqual(sorted(server_result[3:5]),
                         [u"low priority", u"low priority2"])
Beispiel #3
0
    def testNannyMessage(self):
        nanny_message = "Oh no!"
        self.email_message = {}

        def SendEmail(address, sender, title, message, **_):
            self.email_message.update(
                dict(address=address,
                     sender=sender,
                     title=title,
                     message=message))

        with utils.Stubber(email_alerts.EMAIL_ALERTER, "SendEmail", SendEmail):
            msg = rdf_flows.GrrMessage(
                session_id=rdfvalue.SessionID(flow_name="NannyMessage"),
                payload=rdf_protodict.DataBlob(string=nanny_message),
                source=self.client_id,
                auth_state=rdf_flows.GrrMessage.AuthorizationState.
                AUTHENTICATED)

            # This is normally done by the FrontEnd when a CLIENT_KILLED message is
            # received.
            events.Events.PublishEvent("NannyMessage", msg, token=self.token)

            # Now emulate a worker to process the event.
            worker = worker_test_lib.MockWorker(token=self.token)
            while worker.Next():
                pass
            worker.pool.Join()

            # We expect the email to be sent.
            self.assertEqual(self.email_message.get("address"),
                             config.CONFIG["Monitoring.alert_email"])
            self.assertTrue(str(self.client_id) in self.email_message["title"])

            # Make sure the message is included in the email message.
            self.assertTrue(nanny_message in self.email_message["message"])

            # Make sure crashes collections are created and written
            # into proper locations. First check the per-client crashes collection.
            client_crashes = list(
                aff4_grr.VFSGRRClient.CrashCollectionForCID(self.client_id,
                                                            token=self.token))

            self.assertEqual(len(client_crashes), 1)
            crash = client_crashes[0]
            self.assertEqual(crash.client_id, self.client_id)
            self.assertEqual(crash.client_info.client_name, "GRR Monitor")
            self.assertEqual(
                crash.crash_type,
                "aff4:/flows/" + queues.FLOWS.Basename() + ":NannyMessage")
            self.assertEqual(crash.crash_message, nanny_message)
Beispiel #4
0
    def testAuditEntryIsCreatedForEveryClient(self):
        self.handler.Handle(client_plugin.ApiAddClientsLabelsArgs(
            client_ids=self.client_ids, labels=["drei", "ein", "zwei"]),
                            token=self.token)

        # We need to run .Simulate() so that the appropriate event is fired,
        # collected, and finally written to the logs that we inspect.
        mock_worker = worker_test_lib.MockWorker(token=self.token)
        mock_worker.Simulate()

        event = self._FindAuditEvent()
        self.assertIsNotNone(event)
        self.assertEqual(event.user, self.token.username)
        self.assertEqual(
            event.description, "%s.drei,%s.ein,%s.zwei" %
            (self.token.username, self.token.username, self.token.username))
Beispiel #5
0
    def testNannyMessage(self):
        client_id = self.SetupClient(0)
        nanny_message = "Oh no!"
        self.email_message = {}

        def SendEmail(address, sender, title, message, **_):
            self.email_message.update(
                dict(address=address,
                     sender=sender,
                     title=title,
                     message=message))

        with utils.Stubber(email_alerts.EMAIL_ALERTER, "SendEmail", SendEmail):
            self.SendResponse(
                session_id=rdfvalue.SessionID(flow_name="NannyMessage"),
                data=nanny_message,
                client_id=client_id,
                well_known=True)

            # Now emulate a worker to process the event.
            worker = worker_test_lib.MockWorker(token=self.token)
            while worker.Next():
                pass
            worker.pool.Join()

            # We expect the email to be sent.
            self.assertEqual(self.email_message.get("address"),
                             config.CONFIG["Monitoring.alert_email"])
            self.assertTrue(str(client_id) in self.email_message["title"])

            # Make sure the message is included in the email message.
            self.assertTrue(nanny_message in self.email_message["message"])

            # Make sure crashes collections are created and written
            # into proper locations. First check the per-client crashes collection.
            client_crashes = list(
                aff4_grr.VFSGRRClient.CrashCollectionForCID(client_id))

            self.assertEqual(len(client_crashes), 1)
            crash = client_crashes[0]
            self.assertEqual(crash.client_id, client_id)
            self.assertEqual(crash.client_info.client_name, "GRR Monitor")
            self.assertEqual(crash.crash_type, "Nanny Message")
            self.assertEqual(crash.crash_message, nanny_message)
Beispiel #6
0
    def testClientPrioritization(self):
        """Test that flow priorities work on the client side."""

        result = []
        client_mock = PriorityClientMock(result)
        client_mock = flow_test_lib.MockClient(self.client_id,
                                               client_mock,
                                               token=self.token)
        worker_mock = worker_test_lib.MockWorker(check_flow_errors=True,
                                                 token=self.token)

        # Start some flows with different priorities.
        # pyformat: disable
        args = [
            (rdf_flows.GrrMessage.Priority.LOW_PRIORITY, "low priority"),
            (rdf_flows.GrrMessage.Priority.MEDIUM_PRIORITY, "medium priority"),
            (rdf_flows.GrrMessage.Priority.LOW_PRIORITY, "low priority2"),
            (rdf_flows.GrrMessage.Priority.HIGH_PRIORITY, "high priority"),
            (rdf_flows.GrrMessage.Priority.MEDIUM_PRIORITY, "medium priority2")
        ]
        # pyformat: enable

        for (priority, msg) in args:
            flow.StartFlow(client_id=self.client_id,
                           flow_name="PriorityFlow",
                           msg=msg,
                           priority=priority,
                           token=self.token)

        while True:
            client_processed = client_mock.Next()
            flows_run = []
            for flow_run in worker_mock.Next():
                flows_run.append(flow_run)

            if client_processed == 0 and not flows_run:
                break

        # The flows should be run in order of priority.
        self.assertEqual(result[0:1], [u"high priority"])
        self.assertEqual(sorted(result[1:3]),
                         [u"medium priority", u"medium priority2"])
        self.assertEqual(sorted(result[3:5]),
                         [u"low priority", u"low priority2"])
Beispiel #7
0
  def testClientEventNotification(self):
    """Make sure that client events handled securely."""
    ClientListener.received_events = []
    NoClientListener.received_events = []

    event = rdf_flows.GrrMessage(
        source="C.1395c448a443c7d9",
        auth_state=rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED)

    event.payload = rdf_paths.PathSpec(path="foobar")

    events.Events.PublishEvent("TestEvent", event, token=self.token)
    worker_test_lib.MockWorker(token=self.token).Simulate()

    # The same event should be sent to both listeners, but only the listener
    # which accepts client messages should register it.
    self.assertRDFValuesEqual(ClientListener.received_events[0][0].payload,
                              event.payload)
    self.assertEqual(NoClientListener.received_events, [])
Beispiel #8
0
    def _RunRateLimitedHunt(self, client_ids, start_time):
        client_rule_set = rdf_foreman.ForemanClientRuleSet(rules=[
            rdf_foreman.ForemanClientRule(
                rule_type=rdf_foreman.ForemanClientRule.Type.REGEX,
                regex=rdf_foreman.ForemanRegexClientRule(
                    field="CLIENT_NAME", attribute_regex="GRR"))
        ])

        with implementation.GRRHunt.StartHunt(hunt_name=DummyHunt.__name__,
                                              client_rule_set=client_rule_set,
                                              client_rate=1,
                                              token=self.token) as hunt:
            hunt.Run()

        # Pretend to be the foreman now and dish out hunting jobs to all the
        # clients..
        foreman = aff4.FACTORY.Open("aff4:/foreman",
                                    mode="rw",
                                    token=self.token)
        for client_id in client_ids:
            foreman.AssignTasksToClient(client_id.Basename())

        self.assertEqual(len(DummyHunt.client_ids), 0)

        # Run the hunt.
        worker_mock = worker_test_lib.MockWorker(check_flow_errors=True,
                                                 queues=queues.HUNTS,
                                                 token=self.token)

        # One client is scheduled in the first minute.
        with test_lib.FakeTime(start_time + 2):
            worker_mock.Simulate()
        self.assertEqual(len(DummyHunt.client_ids), 1)

        # No further clients will be scheduled until the end of the first minute.
        with test_lib.FakeTime(start_time + 59):
            worker_mock.Simulate()
        self.assertEqual(len(DummyHunt.client_ids), 1)

        return worker_mock, hunt.urn
Beispiel #9
0
    def testAttributesOfFileFoundInHashFileStoreAreSetCorrectly(self):
        client_ids = self.SetupClients(2)

        filename = os.path.join(self.base_path, "tcpip.sig")
        pathspec = rdf_paths.PathSpec(pathtype=rdf_paths.PathSpec.PathType.OS,
                                      path=filename)
        urn1 = pathspec.AFF4Path(client_ids[0])
        urn2 = pathspec.AFF4Path(client_ids[1])

        for client_id in client_ids:
            client_mock = action_mocks.FileFinderClientMock()
            for _ in flow_test_lib.TestFlowHelper(
                    file_finder.FileFinder.__name__,
                    client_mock,
                    token=self.token,
                    client_id=client_id,
                    paths=[filename],
                    action=rdf_file_finder.FileFinderAction(
                        action_type=rdf_file_finder.FileFinderAction.Action.
                        DOWNLOAD)):
                pass
            # Running worker to make sure FileStore.AddFileToStore event is processed
            # by the worker.
            worker = worker_test_lib.MockWorker(token=self.token)
            worker.Simulate()

        fd1 = aff4.FACTORY.Open(urn1, token=self.token)
        self.assertTrue(isinstance(fd1, aff4_grr.VFSBlobImage))

        fd2 = aff4.FACTORY.Open(urn2, token=self.token)
        self.assertTrue(isinstance(fd2, aff4_grr.VFSBlobImage))

        self.assertTrue(fd1.Get(fd1.Schema.STAT))
        self.assertTrue(fd2.Get(fd2.Schema.STAT))
        self.assertEqual(fd1.Get(fd1.Schema.SIZE), fd2.Get(fd2.Schema.SIZE))
        self.assertEqual(fd1.Get(fd1.Schema.CONTENT_LAST),
                         fd2.Get(fd2.Schema.CONTENT_LAST))
Beispiel #10
0
    def testUserModificationAudit(self):
        worker = worker_test_lib.MockWorker(token=self.token)
        token = self.GenerateToken(username="******", reason="reason")

        grr_api = api.GrrApi(connector=api_shell_raw_access_lib.RawConnector(
            token=token, page_size=10))
        grr_user = grr_api.root.CreateGrrUser(
            "testuser",
            password="******",
            user_type=int(api_user.ApiGrrUser.UserType.USER_TYPE_ADMIN))
        worker.Simulate()

        grr_user.Modify(password="******",
                        user_type=int(
                            api_user.ApiGrrUser.UserType.USER_TYPE_STANDARD))
        worker.Simulate()

        grr_user.Delete()
        worker.Simulate()

        log_entries = []
        for log in audit._AllLegacyAuditLogs(token=self.token):
            log_entries.extend(log)

        self.assertLen(log_entries, 3)

        self.assertEqual(log_entries[0].action, "USER_ADD")
        self.assertEqual(log_entries[0].urn, "aff4:/users/testuser")
        self.assertEqual(log_entries[0].user, "usermodtest")

        self.assertEqual(log_entries[1].action, "USER_UPDATE")
        self.assertEqual(log_entries[1].urn, "aff4:/users/testuser")
        self.assertEqual(log_entries[1].user, "usermodtest")

        self.assertEqual(log_entries[2].action, "USER_DELETE")
        self.assertEqual(log_entries[2].urn, "aff4:/users/testuser")
        self.assertEqual(log_entries[2].user, "usermodtest")
Beispiel #11
0
    def testUserModificationAudit(self):
        audit.AuditEventListener.created_logs.clear()
        worker = worker_test_lib.MockWorker(token=self.token)
        token = self.GenerateToken(username="******", reason="reason")

        maintenance_utils.AddUser("testuser",
                                  password="******",
                                  labels=["admin"],
                                  token=token)
        worker.Simulate()

        maintenance_utils.UpdateUser("testuser",
                                     "xxx",
                                     delete_labels=["admin"],
                                     token=token)
        worker.Simulate()

        maintenance_utils.DeleteUser("testuser", token=token)
        worker.Simulate()

        log_entries = []
        for log in audit.AllAuditLogs(token=self.token):
            log_entries.extend(log)

        self.assertEqual(len(log_entries), 3)

        self.assertEqual(log_entries[0].action, "USER_ADD")
        self.assertEqual(log_entries[0].urn, "aff4:/users/testuser")
        self.assertEqual(log_entries[0].user, "usermodtest")

        self.assertEqual(log_entries[1].action, "USER_UPDATE")
        self.assertEqual(log_entries[1].urn, "aff4:/users/testuser")
        self.assertEqual(log_entries[1].user, "usermodtest")

        self.assertEqual(log_entries[2].action, "USER_DELETE")
        self.assertEqual(log_entries[2].urn, "aff4:/users/testuser")
        self.assertEqual(log_entries[2].user, "usermodtest")
Beispiel #12
0
  def testClientAlertFlow(self):
    client_id = self.SetupClient(0)
    email_dict = {}
    with test_lib.ConfigOverrider(
        {"Database.useForReads.message_handlers": False}):
      client_message = "Oh no!"
      self.SendResponse(
          session_id=rdfvalue.SessionID(flow_name="ClientAlert"),
          data=client_message,
          client_id=client_id,
          well_known=True)

    def SendEmail(address, sender, title, message, **_):
      email_dict.update(
          dict(address=address, sender=sender, title=title, message=message))

    with utils.Stubber(email_alerts.EMAIL_ALERTER, "SendEmail", SendEmail):
      # Now emulate a worker to process the event.
      worker = worker_test_lib.MockWorker(token=self.token)
      while worker.Next():
        pass
      worker.pool.Join()

    self._CheckAlertEmail(client_id, client_message, email_dict)
Beispiel #13
0
def TestFlowHelper(flow_urn_or_cls_name,
                   client_mock=None,
                   client_id=None,
                   check_flow_errors=True,
                   token=None,
                   sync=True,
                   **kwargs):
    """Build a full test harness: client - worker + start flow.

  Args:
    flow_urn_or_cls_name: RDFURN pointing to existing flow (in this case the
      given flow will be run) or flow class name (in this case flow of the given
      class will be created and run).
    client_mock: Client mock object.
    client_id: Client id of an emulated client.
    check_flow_errors: If True, TestFlowHelper will raise on errors during flow
      execution.
    token: Security token.
    sync: Whether StartAFF4Flow call should be synchronous or not.
    **kwargs: Arbitrary args that will be passed to flow.StartAFF4Flow().

  Returns:
    The session id of the flow that was run.
  """

    if data_store.RelationalDBEnabled():
        if isinstance(client_id, rdfvalue.RDFURN):
            client_id = client_id.Basename()

        flow_cls = registry.FlowRegistry.FlowClassByName(flow_urn_or_cls_name)
        return StartAndRunFlow(flow_cls,
                               creator=token.username,
                               client_mock=client_mock,
                               client_id=client_id,
                               check_flow_errors=check_flow_errors,
                               flow_args=kwargs.pop("args", None),
                               **kwargs)

    if client_id or client_mock:
        client_mock = MockClient(client_id, client_mock, token=token)

    worker_mock = worker_test_lib.MockWorker(
        check_flow_errors=check_flow_errors, token=token)

    if isinstance(flow_urn_or_cls_name, rdfvalue.RDFURN):
        session_id = flow_urn_or_cls_name
    else:
        # Instantiate the flow:
        session_id = flow.StartAFF4Flow(client_id=client_id,
                                        flow_name=flow_urn_or_cls_name,
                                        sync=sync,
                                        token=token,
                                        **kwargs)

    total_flows = set()
    total_flows.add(session_id)

    # Run the client and worker until nothing changes any more.
    while True:
        if client_mock:
            client_processed = client_mock.Next()
        else:
            client_processed = 0

        flows_run = []
        for flow_run in worker_mock.Next():
            total_flows.add(flow_run)
            flows_run.append(flow_run)

        if client_processed == 0 and not flows_run:
            break

    # We should check for flow errors:
    if check_flow_errors:
        CheckFlowErrors(total_flows, token=token)

    return session_id
Beispiel #14
0
def TestFlowHelper(flow_urn_or_cls_name,
                   client_mock=None,
                   client_id=None,
                   check_flow_errors=True,
                   token=None,
                   notification_event=None,
                   sync=True,
                   **kwargs):
    """Build a full test harness: client - worker + start flow.

  Args:
    flow_urn_or_cls_name: RDFURN pointing to existing flow (in this case the
                          given flow will be run) or flow class name (in this
                          case flow of the given class will be created and run).
    client_mock: Client mock object.
    client_id: Client id of an emulated client.
    check_flow_errors: If True, TestFlowHelper will raise on errors during flow
                       execution.
    token: Security token.
    notification_event: A well known flow session_id of an event listener. Event
                        will be published once the flow finishes.
    sync: Whether StartFlow call should be synchronous or not.
    **kwargs: Arbitrary args that will be passed to flow.GRRFlow.StartFlow().
  Yields:
    The caller should iterate over the generator to get all the flows
    and subflows executed.
  """
    if client_id or client_mock:
        client_mock = MockClient(client_id, client_mock, token=token)

    worker_mock = worker_test_lib.MockWorker(
        check_flow_errors=check_flow_errors, token=token)

    if isinstance(flow_urn_or_cls_name, rdfvalue.RDFURN):
        session_id = flow_urn_or_cls_name
    else:
        # Instantiate the flow:
        session_id = flow.GRRFlow.StartFlow(
            client_id=client_id,
            flow_name=flow_urn_or_cls_name,
            notification_event=notification_event,
            sync=sync,
            token=token,
            **kwargs)

    total_flows = set()
    total_flows.add(session_id)

    # Run the client and worker until nothing changes any more.
    while True:
        if client_mock:
            client_processed = client_mock.Next()
        else:
            client_processed = 0

        flows_run = []
        for flow_run in worker_mock.Next():
            total_flows.add(flow_run)
            flows_run.append(flow_run)

        if client_processed == 0 and not flows_run:
            break

        yield session_id

    # We should check for flow errors:
    if check_flow_errors:
        CheckFlowErrors(total_flows, token=token)
Beispiel #15
0
def TestHuntHelperWithMultipleMocks(client_mocks,
                                    check_flow_errors=False,
                                    token=None,
                                    iteration_limit=None,
                                    worker=None):
  """Runs a hunt with a given set of clients mocks.

  Args:
    client_mocks: Dictionary of (client_id->client_mock) pairs. Client mock
      objects are used to handle client actions. Methods names of a client mock
      object correspond to client actions names. For an example of a client mock
      object, see SampleHuntMock.
    check_flow_errors: If True, raises when one of hunt-initiated flows fails.
    token: An instance of access_control.ACLToken security token.
    iteration_limit: If None, hunt will run until it's finished. Otherwise,
      worker_mock.Next() will be called iteration_limit number of times. Every
      iteration processes worker's message queue. If new messages are sent to
      the queue during the iteration processing, they will be processed on next
      iteration.
    worker: flow_test_lib.TestWorker object to use.

  Returns:
    A number of iterations complete.
  """

  if token is None:
    token = access_control.ACLToken(username="******")

  total_flows = set()

  # Worker always runs with absolute privileges, therefore making the token
  # SetUID().
  token = token.SetUID()

  client_mocks = [
      flow_test_lib.MockClient(client_id, client_mock, token=token)
      for client_id, client_mock in iteritems(client_mocks)
  ]

  if worker is None:
    rel_db_worker = flow_test_lib.TestWorker(threadpool_size=0, token=True)
    data_store.REL_DB.RegisterFlowProcessingHandler(rel_db_worker.ProcessFlow)
  else:
    rel_db_worker = worker

  num_iterations = 0

  try:
    worker_mock = worker_test_lib.MockWorker(
        check_flow_errors=check_flow_errors, token=token)

    # Run the clients and worker until nothing changes any more.
    while iteration_limit is None or num_iterations < iteration_limit:
      worker_processed = []
      if data_store.RelationalDBEnabled():
        data_store.REL_DB.delegate.WaitUntilNoFlowsToProcess(timeout=10)
        worker_processed = rel_db_worker.ResetProcessedFlows()

      client_processed = 0

      for client_mock in client_mocks:
        client_processed += client_mock.Next()

      flows_run = []

      for flow_run in worker_mock.Next():
        total_flows.add(flow_run)
        flows_run.append(flow_run)

      flows_run.extend(worker_processed)

      num_iterations += 1

      if client_processed == 0 and not flows_run and not worker_processed:
        break

    if check_flow_errors:
      flow_test_lib.CheckFlowErrors(total_flows, token=token)
  finally:
    if worker is None:
      data_store.REL_DB.UnregisterFlowProcessingHandler(timeout=60)
      rel_db_worker.Shutdown()

  return num_iterations
Beispiel #16
0
  def testEventNotification(self):
    """Test that events are sent to listeners."""
    NoClientListener.received_events = []
    worker = worker_test_lib.MockWorker(token=self.token)

    event = rdf_flows.GrrMessage(
        session_id=rdfvalue.SessionID(flow_name="SomeFlow"),
        name="test message",
        payload=rdf_paths.PathSpec(path="foobar", pathtype="TSK"),
        source="aff4:/C.0000000000000001",
        auth_state="AUTHENTICATED")

    # Not allowed to publish a message from a client..
    events.Events.PublishEvent("TestEvent", event, token=self.token)
    worker.Simulate()

    self.assertEqual(NoClientListener.received_events, [])

    event.source = "Source"

    # First make the message unauthenticated.
    event.auth_state = rdf_flows.GrrMessage.AuthorizationState.UNAUTHENTICATED

    # Publish the event.
    events.Events.PublishEvent("TestEvent", event, token=self.token)
    worker.Simulate()

    # This should not work - the unauthenticated message is dropped.
    self.assertEqual(NoClientListener.received_events, [])

    # Now make the message authenticated.
    event.auth_state = rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED

    # Publish the event.
    events.Events.PublishEvent("TestEvent", event, token=self.token)
    worker.Simulate()

    # This should now work:
    self.assertEqual(len(NoClientListener.received_events), 1)

    # Make sure the source is correctly propagated.
    self.assertEqual(NoClientListener.received_events[0][0].source,
                     "aff4:/Source")
    self.assertEqual(NoClientListener.received_events[0][1].path, "foobar")

    NoClientListener.received_events = []
    # Now schedule ten events at the same time.
    for i in xrange(10):
      event.source = "Source%d" % i
      events.Events.PublishEvent("TestEvent", event, token=self.token)

    worker.Simulate()

    self.assertEqual(len(NoClientListener.received_events), 10)

    # Events do not have to be delivered in order so we sort them here for
    # comparison.
    NoClientListener.received_events.sort(key=lambda x: x[0].source)
    for i in range(10):
      self.assertEqual(NoClientListener.received_events[i][0].source,
                       "aff4:/Source%d" % i)
      self.assertEqual(NoClientListener.received_events[i][1].path, "foobar")