コード例 #1
0
def run(settings):
    """
    Run the load test programmatically.
    """
    env = Environment(
        user_classes=[getattr(main, settings["persona_arg"])],
        host=settings["base_url"],
    )
    env.create_local_runner()

    # start a greenlet that saves current stats to history
    gevent.spawn(stats_history, env.runner)

    # start the load test
    env.runner.start(user_count=int(settings["users"]),
                     spawn_rate=int(settings["users"]))

    # add a listener for failures
    env.events.request_failure.add_listener(main.failure_handler)

    # stop the runner at the end
    duration_s = parse_timespan(settings["duration"])
    gevent.spawn_later(duration_s, lambda: stop(env))

    # wait for the greenlets
    env.runner.greenlet.join()
コード例 #2
0
    def test_stop_timeout(self):
        short_time = 0.05

        class MyTaskSet(TaskSet):
            @task
            def my_task(self):
                MyTaskSet.state = "first"
                gevent.sleep(short_time)
                MyTaskSet.state = "second"  # should only run when run time + stop_timeout is > short_time
                gevent.sleep(short_time)
                MyTaskSet.state = "third"  # should only run when run time + stop_timeout is > short_time * 2

        class MyTestUser(User):
            tasks = [MyTaskSet]
            wait_time = constant(0)

        environment = Environment(user_classes=[MyTestUser])
        runner = environment.create_local_runner()
        runner.start(1, 1, wait=False)
        gevent.sleep(short_time / 2)
        runner.quit()
        self.assertEqual("first", MyTaskSet.state)

        # exit with timeout
        environment = Environment(user_classes=[MyTestUser],
                                  stop_timeout=short_time / 2)
        runner = environment.create_local_runner()
        runner.start(1, 1, wait=False)
        gevent.sleep(short_time)
        runner.quit()
        self.assertEqual("second", MyTaskSet.state)

        # allow task iteration to complete, with some margin
        environment = Environment(user_classes=[MyTestUser],
                                  stop_timeout=short_time * 3)
        runner = environment.create_local_runner()
        runner.start(1, 1, wait=False)
        gevent.sleep(short_time)
        timeout = gevent.Timeout(short_time * 2)
        timeout.start()
        try:
            runner.quit()
            runner.greenlet.join()
        except gevent.Timeout:
            self.fail(
                "Got Timeout exception. Some locusts must have kept runnining after iteration finish"
            )
        finally:
            timeout.cancel()
        self.assertEqual("third", MyTaskSet.state)
コード例 #3
0
    def test_stop_timeout_exit_during_wait(self):
        short_time = 0.05

        class MyTaskSet(TaskSet):
            @task
            def my_task(self):
                pass

        class MyTestUser(User):
            tasks = [MyTaskSet]
            wait_time = between(1, 1)

        environment = Environment(user_classes=[MyTestUser],
                                  stop_timeout=short_time)
        runner = environment.create_local_runner()
        runner.start(1, 1)
        gevent.sleep(
            short_time
        )  # sleep to make sure locust has had time to start waiting
        timeout = gevent.Timeout(short_time)
        timeout.start()
        try:
            runner.quit()
            runner.greenlet.join()
        except gevent.Timeout:
            self.fail(
                "Got Timeout exception. Waiting locusts should stop immediately, even when using stop_timeout."
            )
        finally:
            timeout.cancel()
コード例 #4
0
 def test_user_count_in_csv_history_stats(self):
     start_time = int(time.time())
     class TestUser(User):
         wait_time = constant(10)
         @task
         def t(self):
             self.environment.runner.stats.log_request("GET", "/", 10, 10)
     environment = Environment(user_classes=[TestUser])
     runner = environment.create_local_runner()
     runner.start(3, 5) # spawn a user every 0.2 second
     gevent.sleep(0.1)
     
     greenlet = gevent.spawn(stats_writer, environment, self.STATS_BASE_NAME, full_history=True)
     gevent.sleep(0.6)
     gevent.kill(greenlet)
     
     runner.stop()
     
     with open(self.STATS_HISTORY_FILENAME) as f:
         reader = csv.DictReader(f)
         rows = [r for r in reader]
     
     self.assertEqual(6, len(rows))
     for i in range(3):
         row = rows.pop(0)
         self.assertEqual("%i" % (i + 1), row["User Count"])
         self.assertEqual("/", row["Name"])
         self.assertEqual("%i" % (i + 1), row["Total Request Count"])
         self.assertGreaterEqual(int(row["Timestamp"]), start_time)
         row = rows.pop(0)
         self.assertEqual("%i" % (i + 1), row["User Count"])
         self.assertEqual("Aggregated", row["Name"])
         self.assertEqual("%i" % (i + 1), row["Total Request Count"])
         self.assertGreaterEqual(int(row["Timestamp"]), start_time)
コード例 #5
0
class LocustRunner:
    def __init__(self, base_url: str, user: User = RootUser):
        self.env = Environment(user_classes=[user])
        self.env.host = base_url
        self.env.create_local_runner()

    def start_web_ui(self):
        self.env.create_web_ui("127.0.0.1", 8089)

    def stop_web_ui(self):
        self.env.web_ui.stop()

    def run(self, user_count: int, hatch_rate: int, total_run_time: int,
            csv_prefix: str):
        self.env.runner.start(user_count, hatch_rate)
        gevent.spawn(stats_writer, self.env, csv_prefix, True)
        gevent.spawn_later(total_run_time, lambda: self.env.runner.quit())

    def wait_runner(self):
        self.env.runner.greenlet.join()
コード例 #6
0
def run_locust(test_file=None, directory=None, profile=None):
    from locust.env import Environment
    from locust.stats import stats_printer
    import gevent

    env = Environment(user_classes=[DocStorageUser])
    env.create_local_runner()
    setattr(env, "source_file", test_file or SOURCE_FILE)
    setattr(env, "working_directory", directory or WORKING_DIRECTORY)
    setattr(env, "aws_profile", profile)
    env.runner.start(CONCURRENT_USERS, hatch_rate=1)
    env.runner.greenlet.join()

    # start a greenlet that periodically outputs the current stats
    gevent.spawn(stats_printer(env.stats))

    # in 60 seconds stop the runner
    gevent.spawn_later(60, lambda: env.runner.quit())

    # wait for the greenlets
    env.runner.greenlet.join()
コード例 #7
0
ファイル: bench.py プロジェクト: fsaez/pidotapi
def locust_run(c, r, t, base, path, webui):
    class User(HttpUser):
        wait_time = constant(1)
        host = base

        @task
        def my_task(self):
            self.client.get(path)

    env = Environment(user_classes=[User])
    env.create_local_runner()
    if webui:
        env.create_web_ui("127.0.0.1", 8089)
        webbrowser.open('http://127.0.0.1:8089')
    gevent.spawn(stats_printer(env.stats))
    gevent.spawn(stats_history, env.runner)
    env.runner.start(c, spawn_rate=c)
    gevent.spawn_later(t, lambda: env.runner.quit())
    env.runner.greenlet.join()
    if webui:
        env.web_ui.stop()
コード例 #8
0
    def test_user_count_in_csv_history_stats(self):
        start_time = int(time.time())

        class TestUser(User):
            wait_time = constant(10)

            @task
            def t(self):
                self.environment.runner.stats.log_request("GET", "/", 10, 10)

        environment = Environment(user_classes=[TestUser])
        stats_writer = StatsCSVFileWriter(environment,
                                          PERCENTILES_TO_REPORT,
                                          self.STATS_BASE_NAME,
                                          full_history=True)
        runner = environment.create_local_runner()
        # spawn a user every _TEST_CSV_STATS_INTERVAL_SEC second
        user_count = 15
        spawn_rate = 5
        assert 1 / 5 == _TEST_CSV_STATS_INTERVAL_SEC
        runner_greenlet = gevent.spawn(runner.start, user_count, spawn_rate)
        gevent.sleep(0.1)

        greenlet = gevent.spawn(stats_writer)
        gevent.sleep(user_count / spawn_rate)
        gevent.kill(greenlet)
        stats_writer.close_files()
        runner.stop()
        gevent.kill(runner_greenlet)

        with open(self.STATS_HISTORY_FILENAME) as f:
            reader = csv.DictReader(f)
            rows = [r for r in reader]

        self.assertEqual(2 * user_count, len(rows))
        for i in range(int(user_count / spawn_rate)):
            for _ in range(spawn_rate):
                row = rows.pop(0)
                self.assertEqual("%i" % ((i + 1) * spawn_rate),
                                 row["User Count"])
                self.assertEqual("/", row["Name"])
                self.assertEqual("%i" % ((i + 1) * spawn_rate),
                                 row["Total Request Count"])
                self.assertGreaterEqual(int(row["Timestamp"]), start_time)
                row = rows.pop(0)
                self.assertEqual("%i" % ((i + 1) * spawn_rate),
                                 row["User Count"])
                self.assertEqual("Aggregated", row["Name"])
                self.assertEqual("%i" % ((i + 1) * spawn_rate),
                                 row["Total Request Count"])
                self.assertGreaterEqual(int(row["Timestamp"]), start_time)
コード例 #9
0
ファイル: locust_user.py プロジェクト: shengjun1985/milvus
def locust_executor(host,
                    port,
                    collection_name,
                    connection_type="single",
                    run_params=None):
    m = MilvusClient(host=host, port=port, collection_name=collection_name)
    MyUser.tasks = {}
    MyUser.op_info = run_params["op_info"]
    MyUser.params = {}
    tasks = run_params["tasks"]
    for op, value in tasks.items():
        task = {eval("Tasks." + op): value["weight"]}
        MyUser.tasks.update(task)
        MyUser.params[op] = value["params"] if "params" in value else None
    logger.info(MyUser.tasks)

    MyUser.tasks = {Tasks.load: 1, Tasks.flush: 1}
    MyUser.client = MilvusTask(host=host,
                               port=port,
                               collection_name=collection_name,
                               connection_type=connection_type,
                               m=m)
    # MyUser.info = m.get_info(collection_name)
    env = Environment(events=events, user_classes=[MyUser])

    runner = env.create_local_runner()
    # setup logging
    # setup_logging("WARNING", "/dev/null")
    # greenlet_exception_logger(logger=logger)
    gevent.spawn(stats_printer(env.stats))
    # env.create_web_ui("127.0.0.1", 8089)
    # gevent.spawn(stats_printer(env.stats), env, "test", full_history=True)
    # events.init.fire(environment=env, runner=runner)
    clients_num = run_params["clients_num"]
    spawn_rate = run_params["spawn_rate"]
    during_time = run_params["during_time"]
    runner.start(clients_num, spawn_rate=spawn_rate)
    gevent.spawn_later(during_time, lambda: runner.quit())
    runner.greenlet.join()
    print_stats(env.stats)
    result = {
        "rps": round(env.stats.total.current_rps, 1),
        "fail_ratio": env.stats.total.fail_ratio,
        "max_response_time": round(env.stats.total.max_response_time, 1),
        "avg_response_time": round(env.stats.total.avg_response_time, 1)
    }
    runner.stop()
    return result
コード例 #10
0
ファイル: testcases.py プロジェクト: uz2ee/locust
class LocustTestCase(unittest.TestCase):
    """
    Test case class that restores locust.events.EventHook listeners on tearDown, so that it is
    safe to register any custom event handlers within the test.
    """
    def setUp(self):
        # Prevent args passed to test runner from being passed to Locust
        del sys.argv[1:]

        locust.events = Events()
        self.environment = Environment(events=locust.events,
                                       catch_exceptions=False)
        self.runner = self.environment.create_local_runner()

        # When running the tests in Python 3 we get warnings about unclosed sockets.
        # This causes tests that depends on calls to sys.stderr to fail, so we'll
        # suppress those warnings. For more info see:
        # https://github.com/requests/requests/issues/1882
        try:
            warnings.filterwarnings(action="ignore",
                                    message="unclosed <socket object",
                                    category=ResourceWarning)
        except NameError:
            # ResourceWarning doesn't exist in Python 2, but since the warning only appears
            # on Python 3 we don't need to mock it. Instead we can happily ignore the exception
            pass

        # set up mocked logging handler
        self._logger_class = MockedLoggingHandler()
        self._logger_class.setLevel(logging.INFO)
        self._root_log_handlers = [h for h in logging.root.handlers]
        [logging.root.removeHandler(h) for h in logging.root.handlers]
        logging.root.addHandler(self._logger_class)
        logging.root.setLevel(logging.INFO)
        self.mocked_log = MockedLoggingHandler

        # set unhandled exception flag to False
        log.unhandled_greenlet_exception = False

    def tearDown(self):
        # restore logging class
        logging.root.removeHandler(self._logger_class)
        [logging.root.addHandler(h) for h in self._root_log_handlers]
        self.mocked_log.reset()

        clear_all_functools_lru_cache()
コード例 #11
0
ファイル: locustfile.py プロジェクト: abdullahqutb/teenyURL
                             "userID": 1,
                             "longURL": "www.google.com"
                         })

    @task
    def task_404(self):
        self.client.get("/non-existing-path")

    @task
    def my_task(self):
        self.client.get("/api/Url/1")


# setup Environment and Runner
env = Environment(user_classes=[User])
env.create_local_runner()

# start a WebUI instance
env.create_web_ui("127.0.0.1", 8089)

# start a greenlet that periodically outputs the current stats
gevent.spawn(stats_printer(env.stats))

# start a greenlet that save current stats to history
gevent.spawn(stats_history, env.runner)

# start the test
env.runner.start(1, spawn_rate=100)

# in 60 seconds stop the runner
gevent.spawn_later(60, lambda: env.runner.quit())
コード例 #12
0
class TestMasterRunner(LocustTestCase):
    def setUp(self):
        super(TestMasterRunner, self).setUp()
        self.environment = Environment(events=locust.events,
                                       catch_exceptions=False)

    def tearDown(self):
        super(TestMasterRunner, self).tearDown()

    def get_runner(self):
        return self.environment.create_master_runner("*", 5557)

    def test_worker_connect(self):
        with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server:
            master = self.get_runner()
            server.mocked_send(
                Message("client_ready", None, "zeh_fake_client1"))
            self.assertEqual(1, len(master.clients))
            self.assertTrue(
                "zeh_fake_client1" in master.clients,
                "Could not find fake client in master instance's clients dict")
            server.mocked_send(
                Message("client_ready", None, "zeh_fake_client2"))
            server.mocked_send(
                Message("client_ready", None, "zeh_fake_client3"))
            server.mocked_send(
                Message("client_ready", None, "zeh_fake_client4"))
            self.assertEqual(4, len(master.clients))

            server.mocked_send(Message("quit", None, "zeh_fake_client3"))
            self.assertEqual(3, len(master.clients))

    def test_worker_stats_report_median(self):
        with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server:
            master = self.get_runner()
            server.mocked_send(Message("client_ready", None, "fake_client"))

            master.stats.get("/", "GET").log(100, 23455)
            master.stats.get("/", "GET").log(800, 23455)
            master.stats.get("/", "GET").log(700, 23455)

            data = {"user_count": 1}
            self.environment.events.report_to_master.fire(
                client_id="fake_client", data=data)
            master.stats.clear_all()

            server.mocked_send(Message("stats", data, "fake_client"))
            s = master.stats.get("/", "GET")
            self.assertEqual(700, s.median_response_time)

    def test_worker_stats_report_with_none_response_times(self):
        with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server:
            master = self.get_runner()
            server.mocked_send(Message("client_ready", None, "fake_client"))

            master.stats.get("/mixed", "GET").log(0, 23455)
            master.stats.get("/mixed", "GET").log(800, 23455)
            master.stats.get("/mixed", "GET").log(700, 23455)
            master.stats.get("/mixed", "GET").log(None, 23455)
            master.stats.get("/mixed", "GET").log(None, 23455)
            master.stats.get("/mixed", "GET").log(None, 23455)
            master.stats.get("/mixed", "GET").log(None, 23455)
            master.stats.get("/onlyNone", "GET").log(None, 23455)

            data = {"user_count": 1}
            self.environment.events.report_to_master.fire(
                client_id="fake_client", data=data)
            master.stats.clear_all()

            server.mocked_send(Message("stats", data, "fake_client"))
            s1 = master.stats.get("/mixed", "GET")
            self.assertEqual(700, s1.median_response_time)
            self.assertEqual(500, s1.avg_response_time)
            s2 = master.stats.get("/onlyNone", "GET")
            self.assertEqual(0, s2.median_response_time)
            self.assertEqual(0, s2.avg_response_time)

    def test_master_marks_downed_workers_as_missing(self):
        with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server:
            master = self.get_runner()
            server.mocked_send(Message("client_ready", None, "fake_client"))
            sleep(6)
            # print(master.clients['fake_client'].__dict__)
            assert master.clients['fake_client'].state == STATE_MISSING

    def test_last_worker_quitting_stops_test(self):
        with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server:
            master = self.get_runner()
            server.mocked_send(Message("client_ready", None, "fake_client1"))
            server.mocked_send(Message("client_ready", None, "fake_client2"))

            master.start(1, 2)
            server.mocked_send(Message("hatching", None, "fake_client1"))
            server.mocked_send(Message("hatching", None, "fake_client2"))

            server.mocked_send(Message("quit", None, "fake_client1"))
            sleep(0)
            self.assertEqual(1, len(master.clients.all))
            self.assertNotEqual(
                STATE_STOPPED, master.state,
                "Not all workers quit but test stopped anyway.")

            server.mocked_send(Message("quit", None, "fake_client2"))
            sleep(0)
            self.assertEqual(0, len(master.clients.all))
            self.assertEqual(STATE_STOPPED, master.state,
                             "All workers quit but test didn't stop.")

    @mock.patch("locust.runners.HEARTBEAT_INTERVAL", new=0.1)
    def test_last_worker_missing_stops_test(self):
        with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server:
            master = self.get_runner()
            server.mocked_send(Message("client_ready", None, "fake_client1"))
            server.mocked_send(Message("client_ready", None, "fake_client2"))

            master.start(1, 2)
            server.mocked_send(Message("hatching", None, "fake_client1"))
            server.mocked_send(Message("hatching", None, "fake_client2"))

            sleep(0.3)
            server.mocked_send(
                Message("heartbeat", {
                    'state': STATE_RUNNING,
                    'current_cpu_usage': 50
                }, "fake_client1"))

            sleep(0.3)
            self.assertEqual(1, len(master.clients.missing))
            self.assertNotEqual(
                STATE_STOPPED, master.state,
                "Not all workers went missing but test stopped anyway.")

            sleep(0.3)
            self.assertEqual(2, len(master.clients.missing))
            self.assertEqual(STATE_STOPPED, master.state,
                             "All workers went missing but test didn't stop.")

    def test_master_total_stats(self):
        with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server:
            master = self.get_runner()
            server.mocked_send(Message("client_ready", None, "fake_client"))
            stats = RequestStats()
            stats.log_request("GET", "/1", 100, 3546)
            stats.log_request("GET", "/1", 800, 56743)
            stats2 = RequestStats()
            stats2.log_request("GET", "/2", 700, 2201)
            server.mocked_send(
                Message(
                    "stats", {
                        "stats": stats.serialize_stats(),
                        "stats_total": stats.total.serialize(),
                        "errors": stats.serialize_errors(),
                        "user_count": 1,
                    }, "fake_client"))
            server.mocked_send(
                Message(
                    "stats", {
                        "stats": stats2.serialize_stats(),
                        "stats_total": stats2.total.serialize(),
                        "errors": stats2.serialize_errors(),
                        "user_count": 2,
                    }, "fake_client"))
            self.assertEqual(700, master.stats.total.median_response_time)

    def test_master_total_stats_with_none_response_times(self):
        with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server:
            master = self.get_runner()
            server.mocked_send(Message("client_ready", None, "fake_client"))
            stats = RequestStats()
            stats.log_request("GET", "/1", 100, 3546)
            stats.log_request("GET", "/1", 800, 56743)
            stats.log_request("GET", "/1", None, 56743)
            stats2 = RequestStats()
            stats2.log_request("GET", "/2", 700, 2201)
            stats2.log_request("GET", "/2", None, 2201)
            stats3 = RequestStats()
            stats3.log_request("GET", "/3", None, 2201)
            server.mocked_send(
                Message(
                    "stats", {
                        "stats": stats.serialize_stats(),
                        "stats_total": stats.total.serialize(),
                        "errors": stats.serialize_errors(),
                        "user_count": 1,
                    }, "fake_client"))
            server.mocked_send(
                Message(
                    "stats", {
                        "stats": stats2.serialize_stats(),
                        "stats_total": stats2.total.serialize(),
                        "errors": stats2.serialize_errors(),
                        "user_count": 2,
                    }, "fake_client"))
            server.mocked_send(
                Message(
                    "stats", {
                        "stats": stats3.serialize_stats(),
                        "stats_total": stats3.total.serialize(),
                        "errors": stats3.serialize_errors(),
                        "user_count": 2,
                    }, "fake_client"))
            self.assertEqual(700, master.stats.total.median_response_time)

    def test_master_current_response_times(self):
        start_time = 1
        with mock.patch("time.time") as mocked_time:
            mocked_time.return_value = start_time
            with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server:
                master = self.get_runner()
                self.environment.stats.reset_all()
                mocked_time.return_value += 1.0234
                server.mocked_send(Message("client_ready", None,
                                           "fake_client"))
                stats = RequestStats()
                stats.log_request("GET", "/1", 100, 3546)
                stats.log_request("GET", "/1", 800, 56743)
                server.mocked_send(
                    Message(
                        "stats", {
                            "stats": stats.serialize_stats(),
                            "stats_total": stats.total.get_stripped_report(),
                            "errors": stats.serialize_errors(),
                            "user_count": 1,
                        }, "fake_client"))
                mocked_time.return_value += 1
                stats2 = RequestStats()
                stats2.log_request("GET", "/2", 400, 2201)
                server.mocked_send(
                    Message(
                        "stats", {
                            "stats": stats2.serialize_stats(),
                            "stats_total": stats2.total.get_stripped_report(),
                            "errors": stats2.serialize_errors(),
                            "user_count": 2,
                        }, "fake_client"))
                mocked_time.return_value += 4
                self.assertEqual(
                    400,
                    master.stats.total.get_current_response_time_percentile(
                        0.5))
                self.assertEqual(
                    800,
                    master.stats.total.get_current_response_time_percentile(
                        0.95))

                # let 10 second pass, do some more requests, send it to the master and make
                # sure the current response time percentiles only accounts for these new requests
                mocked_time.return_value += 10.10023
                stats.log_request("GET", "/1", 20, 1)
                stats.log_request("GET", "/1", 30, 1)
                stats.log_request("GET", "/1", 3000, 1)
                server.mocked_send(
                    Message(
                        "stats", {
                            "stats": stats.serialize_stats(),
                            "stats_total": stats.total.get_stripped_report(),
                            "errors": stats.serialize_errors(),
                            "user_count": 2,
                        }, "fake_client"))
                self.assertEqual(
                    30,
                    master.stats.total.get_current_response_time_percentile(
                        0.5))
                self.assertEqual(
                    3000,
                    master.stats.total.get_current_response_time_percentile(
                        0.95))

    def test_rebalance_locust_users_on_worker_connect(self):
        with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server:
            master = self.get_runner()
            server.mocked_send(
                Message("client_ready", None, "zeh_fake_client1"))
            self.assertEqual(1, len(master.clients))
            self.assertTrue(
                "zeh_fake_client1" in master.clients,
                "Could not find fake client in master instance's clients dict")

            master.start(100, 20)
            self.assertEqual(1, len(server.outbox))
            client_id, msg = server.outbox.pop()
            self.assertEqual(100, msg.data["num_users"])
            self.assertEqual(20, msg.data["hatch_rate"])

            # let another worker connect
            server.mocked_send(
                Message("client_ready", None, "zeh_fake_client2"))
            self.assertEqual(2, len(master.clients))
            self.assertEqual(2, len(server.outbox))
            client_id, msg = server.outbox.pop()
            self.assertEqual(50, msg.data["num_users"])
            self.assertEqual(10, msg.data["hatch_rate"])
            client_id, msg = server.outbox.pop()
            self.assertEqual(50, msg.data["num_users"])
            self.assertEqual(10, msg.data["hatch_rate"])

    def test_sends_hatch_data_to_ready_running_hatching_workers(self):
        '''Sends hatch job to running, ready, or hatching workers'''
        with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server:
            master = self.get_runner()
            master.clients[1] = WorkerNode(1)
            master.clients[2] = WorkerNode(2)
            master.clients[3] = WorkerNode(3)
            master.clients[1].state = STATE_INIT
            master.clients[2].state = STATE_HATCHING
            master.clients[3].state = STATE_RUNNING
            master.start(user_count=5, hatch_rate=5)

            self.assertEqual(3, len(server.outbox))

    def test_start_event(self):
        """
        Tests that test_start event is fired
        """
        with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server:
            master = self.get_runner()

            run_count = [0]

            @self.environment.events.test_start.add_listener
            def on_test_start(*a, **kw):
                run_count[0] += 1

            for i in range(5):
                server.mocked_send(
                    Message("client_ready", None, "fake_client%i" % i))

            master.start(7, 7)
            self.assertEqual(5, len(server.outbox))
            self.assertEqual(1, run_count[0])

            # change number of users and check that test_start isn't fired again
            master.start(7, 7)
            self.assertEqual(1, run_count[0])

            # stop and start to make sure test_start is fired again
            master.stop()
            master.start(3, 3)
            self.assertEqual(2, run_count[0])

            master.quit()

    def test_stop_event(self):
        """
        Tests that test_stop event is fired
        """
        with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server:
            master = self.get_runner()

            run_count = [0]

            @self.environment.events.test_stop.add_listener
            def on_test_stop(*a, **kw):
                run_count[0] += 1

            for i in range(5):
                server.mocked_send(
                    Message("client_ready", None, "fake_client%i" % i))

            master.start(7, 7)
            self.assertEqual(5, len(server.outbox))
            master.stop()
            self.assertEqual(1, run_count[0])

            run_count[0] = 0
            for i in range(5):
                server.mocked_send(
                    Message("client_ready", None, "fake_client%i" % i))
            master.start(7, 7)
            master.stop()
            master.quit()
            self.assertEqual(1, run_count[0])

    def test_stop_event_quit(self):
        """
        Tests that test_stop event is fired when quit() is called directly
        """
        with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server:
            master = self.get_runner()

            run_count = [0]

            @self.environment.events.test_stop.add_listener
            def on_test_stop(*a, **kw):
                run_count[0] += 1

            for i in range(5):
                server.mocked_send(
                    Message("client_ready", None, "fake_client%i" % i))

            master.start(7, 7)
            self.assertEqual(5, len(server.outbox))
            master.quit()
            self.assertEqual(1, run_count[0])

    def test_spawn_zero_locusts(self):
        class MyTaskSet(TaskSet):
            @task
            def my_task(self):
                pass

        class MyTestUser(User):
            tasks = [MyTaskSet]
            wait_time = constant(0.1)

        environment = Environment(user_classes=[MyTestUser])
        runner = LocalRunner(environment)

        timeout = gevent.Timeout(2.0)
        timeout.start()

        try:
            runner.start(0, 1, wait=True)
            runner.hatching_greenlet.join()
        except gevent.Timeout:
            self.fail(
                "Got Timeout exception. A locust seems to have been spawned, even though 0 was specified."
            )
        finally:
            timeout.cancel()

    def test_spawn_uneven_locusts(self):
        """
        Tests that we can accurately spawn a certain number of locusts, even if it's not an 
        even number of the connected workers
        """
        with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server:
            master = self.get_runner()
            for i in range(5):
                server.mocked_send(
                    Message("client_ready", None, "fake_client%i" % i))

            master.start(7, 7)
            self.assertEqual(5, len(server.outbox))

            num_users = 0
            for _, msg in server.outbox:
                num_users += msg.data["num_users"]

            self.assertEqual(
                7, num_users,
                "Total number of locusts that would have been spawned is not 7"
            )

    def test_spawn_fewer_locusts_than_workers(self):
        with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server:
            master = self.get_runner()
            for i in range(5):
                server.mocked_send(
                    Message("client_ready", None, "fake_client%i" % i))

            master.start(2, 2)
            self.assertEqual(5, len(server.outbox))

            num_users = 0
            for _, msg in server.outbox:
                num_users += msg.data["num_users"]

            self.assertEqual(
                2, num_users,
                "Total number of locusts that would have been spawned is not 2"
            )

    def test_spawn_locusts_in_stepload_mode(self):
        with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server:
            master = self.get_runner()
            for i in range(5):
                server.mocked_send(
                    Message("client_ready", None, "fake_client%i" % i))

            # start a new swarming in Step Load mode: total locust count of 10, hatch rate of 2, step locust count of 5, step duration of 2s
            master.start_stepload(10, 2, 5, 2)

            # make sure the first step run is started
            sleep(0.5)
            self.assertEqual(5, len(server.outbox))

            num_users = 0
            end_of_last_step = len(server.outbox)
            for _, msg in server.outbox:
                num_users += msg.data["num_users"]

            self.assertEqual(
                5, num_users,
                "Total number of locusts that would have been spawned for first step is not 5"
            )

            # make sure the first step run is complete
            sleep(2)
            num_users = 0
            idx = end_of_last_step
            while idx < len(server.outbox):
                msg = server.outbox[idx][1]
                num_users += msg.data["num_users"]
                idx += 1
            self.assertEqual(
                10, num_users,
                "Total number of locusts that would have been spawned for second step is not 10"
            )

    def test_exception_in_task(self):
        class MyUser(User):
            @task
            def will_error(self):
                raise HeyAnException(":(")

        self.environment.user_classes = [MyUser]
        runner = self.environment.create_local_runner()

        l = MyUser(self.environment)

        self.assertRaises(HeyAnException, l.run)
        self.assertRaises(HeyAnException, l.run)
        self.assertEqual(1, len(runner.exceptions))

        hash_key, exception = runner.exceptions.popitem()
        self.assertTrue("traceback" in exception)
        self.assertTrue("HeyAnException" in exception["traceback"])
        self.assertEqual(2, exception["count"])

    def test_exception_is_catched(self):
        """ Test that exceptions are stored, and execution continues """
        class MyTaskSet(TaskSet):
            def __init__(self, *a, **kw):
                super(MyTaskSet, self).__init__(*a, **kw)
                self._task_queue = [
                    {
                        "callable": self.will_error,
                        "args": [],
                        "kwargs": {}
                    },
                    {
                        "callable": self.will_stop,
                        "args": [],
                        "kwargs": {}
                    },
                ]

            @task(1)
            def will_error(self):
                raise HeyAnException(":(")

            @task(1)
            def will_stop(self):
                raise StopUser()

        class MyUser(User):
            wait_time = constant(0.01)
            tasks = [MyTaskSet]

        # set config to catch exceptions in locust users
        self.environment.catch_exceptions = True
        self.environment.user_classes = [MyUser]
        runner = LocalRunner(self.environment)
        l = MyUser(self.environment)

        # make sure HeyAnException isn't raised
        l.run()
        l.run()
        # make sure we got two entries in the error log
        self.assertEqual(2, len(self.mocked_log.error))

        # make sure exception was stored
        self.assertEqual(1, len(runner.exceptions))
        hash_key, exception = runner.exceptions.popitem()
        self.assertTrue("traceback" in exception)
        self.assertTrue("HeyAnException" in exception["traceback"])
        self.assertEqual(2, exception["count"])

    def test_master_reset_connection(self):
        """ Test that connection will be reset when network issues found """
        with mock.patch("locust.runners.FALLBACK_INTERVAL", new=0.1):
            with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server:
                master = self.get_runner()
                self.assertEqual(0, len(master.clients))
                server.mocked_send(
                    Message("client_ready", NETWORK_BROKEN, "fake_client"))
                self.assertTrue(master.connection_broken)
                server.mocked_send(Message("client_ready", None,
                                           "fake_client"))
                sleep(0.2)
                self.assertFalse(master.connection_broken)
                self.assertEqual(1, len(master.clients))
                master.quit()
コード例 #13
0
 def test_runner_reference_on_environment(self):
     env = Environment()
     runner = env.create_local_runner()
     self.assertEqual(env, runner.environment)
     self.assertEqual(runner, env.runner)
コード例 #14
0
def locust_executor(host,
                    port,
                    collection_name,
                    connection_type="single",
                    run_params=None):
    m = MilvusClient(host=host, port=port, collection_name=collection_name)
    MyUser.tasks = {}
    MyUser.op_info = run_params["op_info"]
    MyUser.params = {}
    tasks = run_params["tasks"]
    for op, value in tasks.items():
        task = {eval("Tasks." + op): value["weight"]}
        MyUser.tasks.update(task)
        MyUser.params[op] = value["params"] if "params" in value else None
    logger.info(MyUser.tasks)
    MyUser.values = {
        "ids": [random.randint(1000000, 10000000) for _ in range(nb)],
        "get_ids": [random.randint(1, 10000000) for _ in range(nb)],
        "X": utils.generate_vectors(nq, MyUser.op_info["dimension"])
    }

    # MyUser.tasks = {Tasks.query: 1, Tasks.flush: 1}
    MyUser.client = MilvusTask(host=host,
                               port=port,
                               collection_name=collection_name,
                               connection_type=connection_type,
                               m=m)
    if "load_shape" in run_params and run_params["load_shape"]:
        test = StepLoadShape()
        test.init(run_params["step_time"], run_params["step_load"],
                  run_params["spawn_rate"], run_params["during_time"])
        env = Environment(events=events,
                          user_classes=[MyUser],
                          shape_class=test)
        runner = env.create_local_runner()
        env.runner.start_shape()
    else:
        env = Environment(events=events, user_classes=[MyUser])
        runner = env.create_local_runner()
    # setup logging
    # setup_logging("WARNING", "/dev/null")
    # greenlet_exception_logger(logger=logger)
    gevent.spawn(stats_printer(env.stats))
    # env.create_web_ui("127.0.0.1", 8089)
    # gevent.spawn(stats_printer(env.stats), env, "test", full_history=True)
    # events.init.fire(environment=env, runner=runner)
    clients_num = run_params[
        "clients_num"] if "clients_num" in run_params else 0
    step_load = run_params["step_load"] if "step_load" in run_params else 0
    step_time = run_params["step_time"] if "step_time" in run_params else 0
    spawn_rate = run_params["spawn_rate"]
    during_time = run_params["during_time"]
    runner.start(clients_num, spawn_rate=spawn_rate)
    gevent.spawn_later(during_time, lambda: runner.quit())
    runner.greenlet.join()
    print_stats(env.stats)
    result = {
        "rps": round(env.stats.total.current_rps,
                     1),  # Number of interface requests per second
        "fail_ratio":
        env.stats.total.fail_ratio,  # Interface request failure rate
        "max_response_time": round(env.stats.total.max_response_time,
                                   1),  # Maximum interface response time
        "avg_response_time": round(env.stats.total.avg_response_time,
                                   1)  # ratio of average response time
    }
    runner.stop()
    return result
コード例 #15
0
ファイル: kubedepend.py プロジェクト: martonorova/kubedepend
def main(nosave, fault_profile, measurement_count, load_duration,
         locust_user_count, locust_spawn_rate, cluster_type, comment):

    check_working_dir()

    # Save the start time of the measurement sequence
    start_time = datetime.now().strftime("%m-%d-%Y_%H-%M-%S.%f")

    # lint helm chart
    try:
        logging.info('Linting Helm chart...')
        subprocess.check_output(['helm', 'lint', '../charts/kubedepend-chaos'])
        logging.info('Linting Helm chart finished OK')
    except subprocess.CalledProcessError as error:
        logging.error('Helm lint failed, exiting...')
        exit()

    # assembling helm value options according to fault profile
    helm_value_sets = assemble_helm_set_options(fault_profile)

    # filter out emtpy strings
    helm_command = [x for x in HELM_COMMAND_FIX_PART + helm_value_sets if x]

    # Save current stack into archive
    if not nosave:
        save_helm_chart(helm_command=helm_command)
        archive_stack(start_time)

    sequence_result = MeasurementSequenceResult(
        start_time=start_time,
        fault_profile=fault_profile,
        cluster_type=cluster_type,
        load_duration=load_duration,
        locust_user_count=locust_user_count,
        locust_spawn_rate=locust_spawn_rate,
        comment=comment)

    for i in range(measurement_count):

        logging.info(f'Start measurement #{i + 1}')

        logging.info('Waiting for stable system state...')
        wait_for_stable_state()

        # Initialize measurement result
        measurement_result = MeasurementResult()

        # Setup Locust objects

        # setup Environment and Runner
        env = Environment(user_classes=[User])
        env.create_local_runner()

        # start a greenlet that periodically outputs the current stats
        gevent.spawn(stats_printer(env.stats))

        # start a greenlet that save current stats to history
        gevent.spawn(stats_history, env.runner)

        logging.info('Creating chaos objects...')

        subprocess.run(helm_command)

        logging.info('Chaos objects applied.')

        logging.info('Generating load...')

        # start the test
        env.runner.start(user_count=locust_user_count,
                         spawn_rate=locust_spawn_rate)

        # in 'duartion' seconds stop the runner
        gevent.spawn_later(load_duration, lambda: env.runner.quit())

        # wait for the greenlets
        env.runner.greenlet.join()

        logging.info('Load generation finished')

        # get dependability metrics

        metrics = get_dependability_metrics(load_duration)

        # add metrics to measurement result
        measurement_result.backend_metrics = metrics
        # end measurement (fill end_time attribute)
        measurement_result.end()
        # add measurement result to sequence result
        sequence_result.add_measurement_result(measurement_result)

        # save dependability metrics

        logging.info('Deleting chaos objects...')

        subprocess.run(
            ['helm', 'delete', 'kubedepend-chaos', '-n', 'chaos-testing'])

        logging.info('Chaos objects deleted.')

    if not nosave:
        sequence_result.save_results('results/results.csv')

    # TODO stop running greenlets (stats)

    logging.info('Test finished')
コード例 #16
0
ファイル: loadtest.py プロジェクト: ktrueda/invokust
class LocustLoadTest(object):
    """
    Runs a Locust load test and returns statistics
    """
    def __init__(self, settings):
        self.settings = settings
        self.start_time = None
        self.end_time = None
        gevent.signal_handler(signal.SIGTERM, sig_term_handler)

    def stats(self):
        """
        Returns the statistics from the load test in JSON
        """
        statistics = {
            "requests": {},
            "failures": {},
            "num_requests": self.env.runner.stats.num_requests,
            "num_requests_fail": self.env.runner.stats.num_failures,
            "start_time": self.start_time,
            "end_time": self.end_time,
        }

        for name, value in self.env.runner.stats.entries.items():
            locust_task_name = "{0}_{1}".format(name[1], name[0])
            statistics["requests"][locust_task_name] = {
                "request_type": name[1],
                "num_requests": value.num_requests,
                "min_response_time": value.min_response_time,
                "median_response_time": value.median_response_time,
                "avg_response_time": value.avg_response_time,
                "max_response_time": value.max_response_time,
                "response_times": value.response_times,
                "response_time_percentiles": {
                    55: value.get_response_time_percentile(0.55),
                    65: value.get_response_time_percentile(0.65),
                    75: value.get_response_time_percentile(0.75),
                    85: value.get_response_time_percentile(0.85),
                    95: value.get_response_time_percentile(0.95),
                },
                "total_rps": value.total_rps,
                "total_rpm": value.total_rps * 60,
            }

        for id, error in self.env.runner.errors.items():
            error_dict = error.to_dict()
            locust_task_name = "{0}_{1}".format(error_dict["method"],
                                                error_dict["name"])
            statistics["failures"][locust_task_name] = error_dict

        return statistics

    def set_run_time_in_sec(self, run_time_str):
        try:
            self.run_time_in_sec = parse_timespan(run_time_str)
        except ValueError:
            logger.error(
                "Invalid format for `run_time` parameter: '%s', "
                "Valid formats are: 20s, 3m, 2h, 1h20m, 3h30m10s, etc." %
                run_time_str)
            sys.exit(1)
        except TypeError:
            logger.error(
                "`run_time` must be a string, not %s. Received value: % " %
                (type(run_time_str), run_time_str))
            sys.exit(1)

    def run(self):
        """
        Run the load test.
        """

        if self.settings.run_time:
            self.set_run_time_in_sec(run_time_str=self.settings.run_time)

            logger.info("Run time limit set to %s seconds" %
                        self.run_time_in_sec)

            def timelimit_stop():
                logger.info(
                    "Run time limit reached: %s seconds. Stopping Locust Runner."
                    % self.run_time_in_sec)
                self.env.runner.quit()
                self.end_time = time.time()
                logger.info("Locust completed %s requests with %s errors" %
                            (self.env.runner.stats.num_requests,
                             len(self.env.runner.errors)))
                logger.info(json.dumps(self.stats()))

            gevent.spawn_later(self.run_time_in_sec, timelimit_stop)

        try:
            logger.info("Starting Locust with settings %s " %
                        vars(self.settings))

            self.env = Environment(
                user_classes=self.settings.classes,
                host=self.settings.host,
                tags=self.settings.tags,
                exclude_tags=self.settings.exclude_tags,
                reset_stats=self.settings.reset_stats,
                stop_timeout=self.settings.stop_timeout,
            )

            self.env.create_local_runner()
            gevent.spawn(stats_printer(self.env.stats))

            self.env.runner.start(user_count=self.settings.num_users,
                                  spawn_rate=self.settings.spawn_rate)

            self.start_time = time.time()
            self.env.runner.greenlet.join()

        except Exception as e:
            logger.error("Locust exception {0}".format(repr(e)))

        finally:
            self.env.events.quitting.fire()