def _StartFlow(self, client_id, flow_cls, **kw):
        if data_store.RelationalDBEnabled():
            flow_id = flow.StartFlow(flow_cls=flow_cls,
                                     client_id=client_id,
                                     **kw)
            # Lease the client message.
            data_store.REL_DB.LeaseClientActionRequests(
                client_id, lease_time=rdfvalue.Duration("10000s"))
            # Write some responses. In the relational db, the client queue will be
            # cleaned up as soon as all responses are available. Therefore we cheat
            # here and make it look like the request needs more responses so it's not
            # considered complete.

            # Write the status first. This will mark the request as waiting for 2
            # responses.
            status = rdf_flow_objects.FlowStatus(client_id=client_id,
                                                 flow_id=flow_id,
                                                 request_id=1,
                                                 response_id=2)
            data_store.REL_DB.WriteFlowResponses([status])

            # Now we read the request, adjust the number, and write it back.
            reqs = data_store.REL_DB.ReadAllFlowRequestsAndResponses(
                client_id, flow_id)
            req = reqs[0][0]

            req.nr_responses_expected = 99

            data_store.REL_DB.WriteFlowRequests([req])

            # This response now won't trigger any deletion of client messages.
            response = rdf_flow_objects.FlowResponse(
                client_id=client_id,
                flow_id=flow_id,
                request_id=1,
                response_id=1,
                payload=rdf_client.Process(name="test_process"))
            data_store.REL_DB.WriteFlowResponses([response])

            # This is not strictly needed as we don't display this information in the
            # UI.
            req.nr_responses_expected = 2
            data_store.REL_DB.WriteFlowRequests([req])

            return flow_id

        else:
            flow_id = flow.StartAFF4Flow(
                flow_name=compatibility.GetName(flow_cls),
                client_id=client_id,
                token=self.token,
                **kw).Basename()
            # Have the client write some responses.
            test_process = rdf_client.Process(name="test_process")
            mock = flow_test_lib.MockClient(client_id,
                                            action_mocks.ListProcessesMock(
                                                [test_process]),
                                            token=self.token)
            mock.Next()
            return flow_id
    def Run(self):
        client_id = self.SetupClient(0)
        with test_lib.FakeTime(42):
            flow_urn = flow.StartAFF4Flow(
                flow_name=processes.ListProcesses.__name__,
                client_id=client_id,
                token=self.token)

            test_process = client_test_lib.MockWindowsProcess(
                name="test_process")
            with utils.Stubber(psutil, "Process", lambda: test_process):
                mock = flow_test_lib.MockClient(client_id,
                                                None,
                                                token=self.token)
                while mock.Next():
                    pass

        replace = {flow_urn.Basename(): "W:ABCDEF"}

        manager = queue_manager.QueueManager(token=self.token)
        requests_responses = manager.FetchRequestsAndResponses(flow_urn)
        for request, responses in requests_responses:
            replace[str(request.request.task_id)] = "42"
            for response in responses:
                replace[str(response.task_id)] = "42"

        self.Check("ListFlowRequests",
                   args=flow_plugin.ApiListFlowRequestsArgs(
                       client_id=client_id.Basename(),
                       flow_id=flow_urn.Basename()),
                   replace=replace)
Exemple #3
0
    def testCPULimitForFlows(self):
        """This tests that the client actions are limited properly."""
        result = {}
        client_mock = action_mocks.CPULimitClientMock(result)
        client_mock = flow_test_lib.MockClient(self.client_id,
                                               client_mock,
                                               token=self.token)

        client_mock.EnableResourceUsage(user_cpu_usage=[10],
                                        system_cpu_usage=[10],
                                        network_usage=[1000])

        worker_obj = worker.GRRWorker(token=self.token)

        flow.GRRFlow.StartFlow(client_id=self.client_id,
                               flow_name=flow_test_lib.CPULimitFlow.__name__,
                               cpu_limit=1000,
                               network_bytes_limit=10000,
                               token=self.token)

        self._Process([client_mock], worker_obj)

        self.assertEqual(result["cpulimit"], [1000, 980, 960])
        self.assertEqual(result["networklimit"], [10000, 9000, 8000])

        return result
Exemple #4
0
    def testNannyMessageHandlerForUnknownClient(self):
        client_id = self.SetupClient(0)
        nanny_message = "Oh no!"
        email_dict = {}

        def SendEmail(address, sender, title, message, **_):
            email_dict.update(
                dict(address=address,
                     sender=sender,
                     title=title,
                     message=message))

        with utils.Stubber(email_alerts.EMAIL_ALERTER, "SendEmail", SendEmail):
            flow_test_lib.MockClient(client_id, None)._PushHandlerMessage(
                rdf_flows.GrrMessage(
                    source=client_id,
                    session_id=rdfvalue.SessionID(flow_name="NannyMessage"),
                    payload=rdf_protodict.DataBlob(string=nanny_message),
                    request_id=0,
                    auth_state="AUTHENTICATED",
                    response_id=123))

        # We expect the email to be sent.
        self.assertEqual(email_dict.get("address"),
                         config.CONFIG["Monitoring.alert_email"])

        # Make sure the message is included in the email message.
        self.assertIn(nanny_message, email_dict["message"])

        self.assertIn(client_id, email_dict["title"])
Exemple #5
0
  def testNannyMessageHandlerForUnknownClient(self):
    client_id = "C.1000000000000000"
    nanny_message = "Oh no!"
    email_dict = {}

    def SendEmail(address, sender, title, message, **_):
      email_dict.update(
          dict(address=address, sender=sender, title=title, message=message))

    with test_lib.ConfigOverrider({
        "Database.useForReads": True,
        "Database.useForReads.message_handlers": True
    }):
      with utils.Stubber(email_alerts.EMAIL_ALERTER, "SendEmail", SendEmail):
        flow_test_lib.MockClient(client_id, None)._PushHandlerMessage(
            rdf_flows.GrrMessage(
                source=client_id,
                session_id=rdfvalue.SessionID(flow_name="NannyMessage"),
                payload=rdf_protodict.DataBlob(string=nanny_message),
                request_id=0,
                auth_state="AUTHENTICATED",
                response_id=123))

    # We expect the email to be sent.
    self.assertEqual(
        email_dict.get("address"), config.CONFIG["Monitoring.alert_email"])

    # Make sure the message is included in the email message.
    self.assertIn(nanny_message, email_dict["message"])

    if data_store.RelationalDBReadEnabled():
      self.assertIn(client_id, email_dict["title"])
    else:
      self.assertIn(client_id.Basename(), email_dict["title"])
    def testClientAlertHandler(self):
        client_id = self.SetupClient(0).Basename()
        client_message = "Oh no!"
        email_dict = {}

        def SendEmail(address, sender, title, message, **_):
            email_dict.update(
                dict(address=address,
                     sender=sender,
                     title=title,
                     message=message))

        with test_lib.ConfigOverrider({
                "Database.useForReads":
                True,
                "Database.useForReads.message_handlers":
                True
        }):
            with utils.Stubber(email_alerts.EMAIL_ALERTER, "SendEmail",
                               SendEmail):
                flow_test_lib.MockClient(client_id, None)._PushHandlerMessage(
                    rdf_flows.GrrMessage(
                        source=client_id,
                        session_id=rdfvalue.SessionID(flow_name="ClientAlert"),
                        payload=rdf_protodict.DataBlob(string=client_message),
                        request_id=0,
                        auth_state="AUTHENTICATED",
                        response_id=123))

        self._CheckAlertEmail(client_id, client_message, email_dict)
Exemple #7
0
    def testDelayedCallState(self):
        """Tests the ability to delay a CallState invocation."""
        with test_lib.FakeTime(10000):
            client_mock = ClientMock()
            client_mock = flow_test_lib.MockClient(self.client_id,
                                                   client_mock,
                                                   token=self.token)
            worker_mock = worker_test_lib.MockWorker(check_flow_errors=True,
                                                     token=self.token)

            flow.GRRFlow.StartFlow(client_id=self.client_id,
                                   flow_name="DelayedCallStateFlow",
                                   token=self.token)

            self.Work(client_mock, worker_mock)

            # We should have done the first CallState so far.
            self.assertEqual(DelayedCallStateFlow.flow_ran, 1)

        with test_lib.FakeTime(10050):
            # 50 seconds more is not enough.
            self.Work(client_mock, worker_mock)
            self.assertEqual(DelayedCallStateFlow.flow_ran, 1)

        with test_lib.FakeTime(10100):
            # But 100 is.
            self.Work(client_mock, worker_mock)
            self.assertEqual(DelayedCallStateFlow.flow_ran, 2)
    def Run(self):
        with test_lib.FakeTime(42):
            flow_urn = flow.GRRFlow.StartFlow(
                flow_name=processes.ListProcesses.__name__,
                client_id=self.client_id,
                token=self.token)

        mock = flow_test_lib.MockClient(self.client_id, None, token=self.token)
        while mock.Next():
            pass

        replace = {flow_urn.Basename(): "W:ABCDEF"}

        manager = queue_manager.QueueManager(token=self.token)
        requests_responses = manager.FetchRequestsAndResponses(flow_urn)
        for request, responses in requests_responses:
            replace[str(request.request.task_id)] = "42"
            for response in responses:
                replace[str(response.task_id)] = "42"

        self.Check("ListFlowRequests",
                   args=flow_plugin.ApiListFlowRequestsArgs(
                       client_id=self.client_id.Basename(),
                       flow_id=flow_urn.Basename()),
                   replace=replace)
Exemple #9
0
    def Run(self):
        client_ids = self.SetupClients(1)
        client_id = client_ids[0]

        replace = {}
        with test_lib.FakeTime(42):
            flow_urn = flow.GRRFlow.StartFlow(
                client_id=client_id,
                flow_name=processes.ListProcesses.__name__,
                token=self.token)
            replace[flow_urn.Basename()] = "F:123456"

            # Here we emulate a mock client with no actions (None) that should produce
            # an error.
            mock = flow_test_lib.MockClient(client_id, None, token=self.token)
            while mock.Next():
                pass

        manager = queue_manager.QueueManager(token=self.token)
        requests_responses = manager.FetchRequestsAndResponses(flow_urn)
        for request, responses in requests_responses:
            replace[str(request.request.task_id)] = "42"
            for response in responses:
                replace[str(response.task_id)] = "43"

        self.Check("ListClientActionRequests",
                   args=client_plugin.ApiListClientActionRequestsArgs(
                       client_id=client_id.Basename()),
                   replace=replace)
        self.Check("ListClientActionRequests",
                   args=client_plugin.ApiListClientActionRequestsArgs(
                       client_id=client_id.Basename(), fetch_responses=True),
                   replace=replace)
Exemple #10
0
def TestHuntHelperWithMultipleMocks(client_mocks,
                                    check_flow_errors=False,
                                    token=None,
                                    iteration_limit=None):
    """Runs a hunt with a given set of clients mocks.

  Args:
    client_mocks: Dictionary of (client_id->client_mock) pairs. Client mock
      objects are used to handle client actions. Methods names of a client mock
      object correspond to client actions names. For an example of a client mock
      object, see SampleHuntMock.
    check_flow_errors: If True, raises when one of hunt-initiated flows fails.
    token: An instance of access_control.ACLToken security token.
    iteration_limit: If None, hunt will run until it's finished. Otherwise,
      worker_mock.Next() will be called iteration_limit number of times. Every
      iteration processes worker's message queue. If new messages are sent to
      the queue during the iteration processing, they will be processed on next
      iteration,
  """

    total_flows = set()

    # Worker always runs with absolute privileges, therefore making the token
    # SetUID().
    token = token.SetUID()

    client_mocks = [
        flow_test_lib.MockClient(client_id, client_mock, token=token)
        for client_id, client_mock in iteritems(client_mocks)
    ]

    with flow_test_lib.TestWorker(token=True) as rel_db_worker:
        worker_mock = worker_test_lib.MockWorker(
            check_flow_errors=check_flow_errors, token=token)

        # Run the clients and worker until nothing changes any more.
        while iteration_limit is None or iteration_limit > 0:
            client_processed = 0

            for client_mock in client_mocks:
                client_processed += client_mock.Next()

            flows_run = []

            for flow_run in worker_mock.Next():
                total_flows.add(flow_run)
                flows_run.append(flow_run)

            worker_processed = rel_db_worker.ResetProcessedFlows()
            flows_run.extend(worker_processed)

            if client_processed == 0 and not flows_run:
                break

            if iteration_limit:
                iteration_limit -= 1

        if check_flow_errors:
            flow_test_lib.CheckFlowErrors(total_flows, token=token)
Exemple #11
0
def TestHuntHelperWithMultipleMocks(client_mocks,
                                    iteration_limit=None,
                                    worker=None):
    """Runs a hunt with a given set of clients mocks.

  Args:
    client_mocks: Dictionary of (client_id->client_mock) pairs. Client mock
      objects are used to handle client actions. Methods names of a client mock
      object correspond to client actions names. For an example of a client mock
      object, see SampleHuntMock.
    iteration_limit: If None, hunt will run until it's finished. Otherwise,
      worker_mock.Next() will be called iteration_limit number of times. Every
      iteration processes worker's message queue. If new messages are sent to
      the queue during the iteration processing, they will be processed on next
      iteration.
    worker: flow_test_lib.TestWorker object to use.

  Returns:
    A number of iterations complete.
  """

    client_mocks = [
        flow_test_lib.MockClient(client_id, client_mock)
        for client_id, client_mock in client_mocks.items()
    ]

    if worker is None:
        rel_db_worker = flow_test_lib.TestWorker()
        data_store.REL_DB.RegisterFlowProcessingHandler(
            rel_db_worker.ProcessFlow)
    else:
        rel_db_worker = worker

    num_iterations = 0

    try:
        # Run the clients and worker until nothing changes any more.
        while iteration_limit is None or num_iterations < iteration_limit:
            data_store.REL_DB.delegate.WaitUntilNoFlowsToProcess(timeout=10)
            worker_processed = rel_db_worker.ResetProcessedFlows()

            client_processed = 0

            for client_mock in client_mocks:
                client_processed += int(client_mock.Next())

            num_iterations += 1

            if client_processed == 0 and not worker_processed:
                break

    finally:
        if worker is None:
            data_store.REL_DB.UnregisterFlowProcessingHandler(timeout=60)
            rel_db_worker.Shutdown()

    return num_iterations
Exemple #12
0
    def setUp(self):
        super(GRRFuseTest, self).setUp()

        self.client_id = self.SetupClient(0)

        self.client_name = str(self.client_id)[len("aff4:/"):]

        with aff4.FACTORY.Open(self.client_id, token=self.token,
                               mode="rw") as fd:
            fd.Set(fd.Schema.SYSTEM("Linux"))
            kb = fd.Schema.KNOWLEDGE_BASE()
            fd.Set(kb)

        with aff4.FACTORY.Create(self.client_id.Add("fs/os"),
                                 aff4_standard.VFSDirectory,
                                 mode="rw",
                                 token=self.token) as fd:
            fd.Set(fd.Schema.PATHSPEC(path="/", pathtype="OS"))

        # Ignore cache so our tests always get client side updates.
        self.grr_fuse = fuse_mount.GRRFuse(root="/",
                                           token=self.token,
                                           ignore_cache=True)

        self.action_mock = action_mocks.ActionMock(
            admin.GetClientInfo,
            admin.GetConfiguration,
            admin.GetPlatformInfo,
            file_fingerprint.FingerprintFile,
            linux.EnumerateFilesystems,
            linux.EnumerateInterfaces,
            linux.EnumerateUsers,
            linux.GetInstallDate,
            searching.Find,
            standard.HashBuffer,
            standard.ListDirectory,
            standard.GetFileStat,
            standard.TransferBuffer,
        )

        self.client_mock = flow_test_lib.MockClient(self.client_id,
                                                    self.action_mock,
                                                    token=self.token)

        self.update_stubber = utils.Stubber(self.grr_fuse,
                                            "_RunAndWaitForVFSFileUpdate",
                                            self._RunAndWaitForVFSFileUpdate)
        self.update_stubber.Start()

        self.start_flow_stubber = utils.Stubber(flow_utils, "StartFlowAndWait",
                                                self.StartFlowAndWait)
        self.start_flow_stubber.Start()
Exemple #13
0
    def testWorkerPrioritization(self):
        """Test that flow priorities work on the worker side."""

        result = []
        client_mock = PriorityClientMock(result)
        client_mock = flow_test_lib.MockClient(self.client_id,
                                               client_mock,
                                               token=self.token)
        worker_mock = worker_test_lib.MockWorker(check_flow_errors=True,
                                                 token=self.token)

        # Start some flows with different priorities.
        # pyformat: disable
        args = [
            (rdf_flows.GrrMessage.Priority.LOW_PRIORITY, "low priority"),
            (rdf_flows.GrrMessage.Priority.MEDIUM_PRIORITY, "medium priority"),
            (rdf_flows.GrrMessage.Priority.LOW_PRIORITY, "low priority2"),
            (rdf_flows.GrrMessage.Priority.HIGH_PRIORITY, "high priority"),
            (rdf_flows.GrrMessage.Priority.MEDIUM_PRIORITY, "medium priority2")
        ]
        # pyformat: enable

        server_result = []
        PriorityFlow.storage = server_result

        for (priority, msg) in args:
            flow.GRRFlow.StartFlow(client_id=self.client_id,
                                   flow_name="PriorityFlow",
                                   msg=msg,
                                   priority=priority,
                                   token=self.token)

        while True:
            # Run all the clients first so workers have messages to choose from.
            client_processed = 1
            while client_processed:
                client_processed = client_mock.Next()
            # Now process the results, this should happen in the correct order.
            flows_run = []
            for flow_run in worker_mock.Next():
                flows_run.append(flow_run)

            if not flows_run:
                break

        # The flows should be run in order of priority.
        self.assertEqual(server_result[0:1], [u"high priority"])
        self.assertEqual(sorted(server_result[1:3]),
                         [u"medium priority", u"medium priority2"])
        self.assertEqual(sorted(server_result[3:5]),
                         [u"low priority", u"low priority2"])
Exemple #14
0
  def Run(self):
    client_id = self.SetupClient(0)
    with test_lib.FakeTime(42):
      flow_id = flow_test_lib.StartFlow(
          processes.ListProcesses, client_id, creator=self.test_username)
      test_process = rdf_client.Process(name="test_process")
      mock = flow_test_lib.MockClient(
          client_id, action_mocks.ListProcessesMock([test_process]))
      mock.Next()

    replace = api_regression_test_lib.GetFlowTestReplaceDict(client_id, flow_id)

    self.Check(
        "ListFlowRequests",
        args=flow_plugin.ApiListFlowRequestsArgs(
            client_id=client_id, flow_id=flow_id),
        replace=replace)
Exemple #15
0
    def testClientPrioritization(self):
        """Test that flow priorities work on the client side."""

        result = []
        client_mock = PriorityClientMock(result)
        client_mock = flow_test_lib.MockClient(self.client_id,
                                               client_mock,
                                               token=self.token)
        worker_mock = worker_test_lib.MockWorker(check_flow_errors=True,
                                                 token=self.token)

        # Start some flows with different priorities.
        # pyformat: disable
        args = [
            (rdf_flows.GrrMessage.Priority.LOW_PRIORITY, "low priority"),
            (rdf_flows.GrrMessage.Priority.MEDIUM_PRIORITY, "medium priority"),
            (rdf_flows.GrrMessage.Priority.LOW_PRIORITY, "low priority2"),
            (rdf_flows.GrrMessage.Priority.HIGH_PRIORITY, "high priority"),
            (rdf_flows.GrrMessage.Priority.MEDIUM_PRIORITY, "medium priority2")
        ]
        # pyformat: enable

        for (priority, msg) in args:
            flow.StartFlow(client_id=self.client_id,
                           flow_name="PriorityFlow",
                           msg=msg,
                           priority=priority,
                           token=self.token)

        while True:
            client_processed = client_mock.Next()
            flows_run = []
            for flow_run in worker_mock.Next():
                flows_run.append(flow_run)

            if client_processed == 0 and not flows_run:
                break

        # The flows should be run in order of priority.
        self.assertEqual(result[0:1], [u"high priority"])
        self.assertEqual(sorted(result[1:3]),
                         [u"medium priority", u"medium priority2"])
        self.assertEqual(sorted(result[3:5]),
                         [u"low priority", u"low priority2"])
Exemple #16
0
    def testInspect(self):
        """Test the inspect UI."""
        client_id = self.SetupClient(0)

        self.RequestAndGrantClientApproval(client_id)

        flow.StartAFF4Flow(client_id=rdf_client.ClientURN(client_id),
                           flow_name=flow_discovery.Interrogate.__name__,
                           token=self.token)
        mock = flow_test_lib.MockClient(client_id, None, token=self.token)
        while mock.Next():
            pass

        self.Open("/#/clients/%s/debug-requests" % client_id.Basename())

        # Check that the we can see both requests and responses.
        self.WaitUntil(self.IsTextPresent, "GetPlatformInfo")
        self.WaitUntil(self.IsTextPresent, "GetConfig")
        self.WaitUntil(self.IsTextPresent, "EnumerateInterfaces")
        self.WaitUntil(self.IsTextPresent, "GENERIC_ERROR")
        self.WaitUntil(self.IsTextPresent, "STATUS")
        self.WaitUntil(self.IsTextPresent, "Task id")
Exemple #17
0
    def RunFlow(self, flow_name, **kwargs):
        result = {}
        client_mock = action_mocks.CPULimitClientMock(result)
        client_mock = flow_test_lib.MockClient(self.client_id,
                                               client_mock,
                                               token=self.token)
        worker_mock = ResourcedWorker(check_flow_errors=True, token=self.token)

        flow.GRRFlow.StartFlow(client_id=self.client_id,
                               flow_name=flow_name,
                               token=self.token,
                               **kwargs)

        while True:
            client_processed = client_mock.Next()
            flows_run = []
            for flow_run in worker_mock.Next():
                flows_run.append(flow_run)

            if client_processed == 0 and not flows_run:
                break

        return result
Exemple #18
0
    def testNannyMessageHandler(self):
        client_id = self.SetupClient(0)
        nanny_message = "Oh no!"
        email_dict = {}

        def SendEmail(address, sender, title, message, **_):
            email_dict.update(
                dict(address=address,
                     sender=sender,
                     title=title,
                     message=message))

        with utils.Stubber(email_alerts.EMAIL_ALERTER, "SendEmail", SendEmail):
            flow_test_lib.MockClient(client_id, None)._PushHandlerMessage(
                rdf_flows.GrrMessage(
                    source=client_id,
                    session_id=rdfvalue.SessionID(flow_name="NannyMessage"),
                    payload=rdf_protodict.DataBlob(string=nanny_message),
                    request_id=0,
                    auth_state="AUTHENTICATED",
                    response_id=123))

        self._CheckNannyEmail(client_id, nanny_message, email_dict)
Exemple #19
0
    def _StartFlow(self, client_id, flow_cls, **kw):
        if data_store.RelationalDBFlowsEnabled():
            flow_id = flow.StartFlow(flow_cls=flow_cls,
                                     client_id=client_id,
                                     **kw)
            # Lease the client message.
            data_store.REL_DB.LeaseClientMessages(
                client_id, lease_time=rdfvalue.Duration("10000s"))
            # Write some responses.
            response = rdf_flow_objects.FlowResponse(
                client_id=client_id,
                flow_id=flow_id,
                request_id=1,
                response_id=1,
                payload=rdf_client.Process(name="test_process"))
            status = rdf_flow_objects.FlowStatus(client_id=client_id,
                                                 flow_id=flow_id,
                                                 request_id=1,
                                                 response_id=2)
            data_store.REL_DB.WriteFlowResponses([response, status])
            return flow_id

        else:
            flow_id = flow.StartAFF4Flow(
                flow_name=compatibility.GetName(flow_cls),
                client_id=client_id,
                token=self.token,
                **kw).Basename()
            # Have the client write some responses.
            test_process = rdf_client.Process(name="test_process")
            mock = flow_test_lib.MockClient(client_id,
                                            action_mocks.ListProcessesMock(
                                                [test_process]),
                                            token=self.token)
            mock.Next()
            return flow_id
Exemple #20
0
    def testCPULimitForHunts(self):
        worker_obj = worker.GRRWorker(token=self.token)

        client_ids = ["C.%016X" % i for i in xrange(10, 20)]
        result = {}
        client_mocks = []
        for client_id in client_ids:
            client_mock = action_mocks.CPULimitClientMock(result)
            client_mock = flow_test_lib.MockClient(
                rdf_client.ClientURN(client_id), client_mock, token=self.token)

            client_mock.EnableResourceUsage(user_cpu_usage=[10],
                                            system_cpu_usage=[10],
                                            network_usage=[1000])
            client_mocks.append(client_mock)

        flow_runner_args = rdf_flows.FlowRunnerArgs(
            flow_name=flow_test_lib.CPULimitFlow.__name__)
        with implementation.GRRHunt.StartHunt(
                hunt_name=standard.GenericHunt.__name__,
                flow_runner_args=flow_runner_args,
                cpu_limit=5000,
                per_client_cpu_limit=10000,
                network_bytes_limit=1000000,
                client_rate=0,
                token=self.token) as hunt:
            hunt.GetRunner().Start()

        implementation.GRRHunt.StartClients(hunt.session_id, client_ids[:1])
        self._Process(client_mocks, worker_obj)
        implementation.GRRHunt.StartClients(hunt.session_id, client_ids[1:2])
        self._Process(client_mocks, worker_obj)
        implementation.GRRHunt.StartClients(hunt.session_id, client_ids[2:3])
        self._Process(client_mocks, worker_obj)

        # The limiting factor here is the overall hunt limit of 5000 cpu
        # seconds. Clients that finish should decrease the remaining quota
        # and the following clients should get the reduced quota.
        self.assertEqual(result["cpulimit"], [
            5000.0, 4980.0, 4960.0, 4940.0, 4920.0, 4900.0, 4880.0, 4860.0,
            4840.0
        ])
        self.assertEqual(result["networklimit"], [
            1000000L, 999000L, 998000L, 997000L, 996000L, 995000L, 994000L,
            993000L, 992000L
        ])

        result.clear()

        with implementation.GRRHunt.StartHunt(
                hunt_name=standard.GenericHunt.__name__,
                flow_runner_args=flow_runner_args,
                per_client_cpu_limit=3000,
                per_client_network_limit_bytes=3000000,
                client_rate=0,
                token=self.token) as hunt:
            hunt.GetRunner().Start()

        implementation.GRRHunt.StartClients(hunt.session_id, client_ids[:1])
        self._Process(client_mocks, worker_obj)
        implementation.GRRHunt.StartClients(hunt.session_id, client_ids[1:2])
        self._Process(client_mocks, worker_obj)
        implementation.GRRHunt.StartClients(hunt.session_id, client_ids[2:3])
        self._Process(client_mocks, worker_obj)

        # This time, the per client limit is 3000s / 3000000 bytes. Every
        # client should get the same limit.
        self.assertEqual(result["cpulimit"], [
            3000.0, 2980.0, 2960.0, 3000.0, 2980.0, 2960.0, 3000.0, 2980.0,
            2960.0
        ])
        self.assertEqual(result["networklimit"], [
            3000000, 2999000, 2998000, 3000000, 2999000, 2998000, 3000000,
            2999000, 2998000
        ])
        result.clear()

        for client_mock in client_mocks:
            client_mock.EnableResourceUsage(user_cpu_usage=[500],
                                            system_cpu_usage=[500],
                                            network_usage=[1000000])

        with implementation.GRRHunt.StartHunt(
                hunt_name=standard.GenericHunt.__name__,
                flow_runner_args=flow_runner_args,
                per_client_cpu_limit=3000,
                cpu_limit=5000,
                per_client_network_limit_bytes=3000000,
                network_bytes_limit=5000000,
                client_rate=0,
                token=self.token) as hunt:
            hunt.GetRunner().Start()

        implementation.GRRHunt.StartClients(hunt.session_id, client_ids[:1])
        self._Process(client_mocks, worker_obj)
        implementation.GRRHunt.StartClients(hunt.session_id, client_ids[1:2])
        self._Process(client_mocks, worker_obj)
        implementation.GRRHunt.StartClients(hunt.session_id, client_ids[2:3])
        self._Process(client_mocks, worker_obj)

        # The first client gets the full per client limit of 3000s, and
        # uses all of it. The hunt has a limit of just 5000 total so the
        # second client gets started with a limit of 2000. It can only run
        # two of three states, the last client will not be started at all
        # due to out of quota.
        self.assertEqual(result["cpulimit"],
                         [3000.0, 2000.0, 1000.0, 2000.0, 1000.0])
        self.assertEqual(result["networklimit"],
                         [3000000, 2000000, 1000000, 2000000, 1000000])

        errors = list(hunt.GetClientsErrors())
        self.assertEqual(len(errors), 2)
        # Client side out of cpu.
        self.assertIn("CPU limit exceeded", errors[0].log_message)
        # Server side out of cpu.
        self.assertIn("Out of CPU quota", errors[1].backtrace)
Exemple #21
0
def TestHuntHelperWithMultipleMocks(client_mocks,
                                    check_flow_errors=False,
                                    token=None,
                                    iteration_limit=None,
                                    worker=None):
  """Runs a hunt with a given set of clients mocks.

  Args:
    client_mocks: Dictionary of (client_id->client_mock) pairs. Client mock
      objects are used to handle client actions. Methods names of a client mock
      object correspond to client actions names. For an example of a client mock
      object, see SampleHuntMock.
    check_flow_errors: If True, raises when one of hunt-initiated flows fails.
    token: An instance of access_control.ACLToken security token.
    iteration_limit: If None, hunt will run until it's finished. Otherwise,
      worker_mock.Next() will be called iteration_limit number of times. Every
      iteration processes worker's message queue. If new messages are sent to
      the queue during the iteration processing, they will be processed on next
      iteration.
    worker: flow_test_lib.TestWorker object to use.

  Returns:
    A number of iterations complete.
  """

  if token is None:
    token = access_control.ACLToken(username="******")

  total_flows = set()

  # Worker always runs with absolute privileges, therefore making the token
  # SetUID().
  token = token.SetUID()

  client_mocks = [
      flow_test_lib.MockClient(client_id, client_mock, token=token)
      for client_id, client_mock in iteritems(client_mocks)
  ]

  if worker is None:
    rel_db_worker = flow_test_lib.TestWorker(threadpool_size=0, token=True)
    data_store.REL_DB.RegisterFlowProcessingHandler(rel_db_worker.ProcessFlow)
  else:
    rel_db_worker = worker

  num_iterations = 0

  try:
    worker_mock = worker_test_lib.MockWorker(
        check_flow_errors=check_flow_errors, token=token)

    # Run the clients and worker until nothing changes any more.
    while iteration_limit is None or num_iterations < iteration_limit:
      worker_processed = []
      if data_store.RelationalDBEnabled():
        data_store.REL_DB.delegate.WaitUntilNoFlowsToProcess(timeout=10)
        worker_processed = rel_db_worker.ResetProcessedFlows()

      client_processed = 0

      for client_mock in client_mocks:
        client_processed += client_mock.Next()

      flows_run = []

      for flow_run in worker_mock.Next():
        total_flows.add(flow_run)
        flows_run.append(flow_run)

      flows_run.extend(worker_processed)

      num_iterations += 1

      if client_processed == 0 and not flows_run and not worker_processed:
        break

    if check_flow_errors:
      flow_test_lib.CheckFlowErrors(total_flows, token=token)
  finally:
    if worker is None:
      data_store.REL_DB.UnregisterFlowProcessingHandler(timeout=60)
      rel_db_worker.Shutdown()

  return num_iterations