Example #1
0
    def setUp(self):
        # Set our test scenario differently from the default; must be BEFORE
        # the super call.
        self.scenario_dict = dict(
            name='Master Test Scenario - ablkei',
            sizes=[
                dict(name='tiny', size_min=99, size_max=100),
                dict(name='small', size_min=1990, size_max=1990,
                     crud_profile=[71, 9, 12, 8]),
                dict(name='medium', size_min=2990, size_max=3000),
                dict(name='unused', size_min=9876543, size_max=9876543),
                dict(name='large', size_min=399000, size_max=400000,
                     crud_profile=[16, 61, 7, 16]),
                dict(name='huge', size_min=49900000, size_max=71499999)],
            initial_files=dict(
                tiny=300, small=400, medium=500, large=200, huge=70,
            ),
            operation_count=5000,
            #             C  R  U  D
            crud_profile=[5, 3, 1, 1],
            user_count=2,
        )
        super(TestMaster, self).setUp()

        self.zmq_host = 'slick.queue.com'
        self.zmq_work_port = 7482
        self.zmq_results_port = 18398
        self.work_endpoint = 'tcp://%s:%d' % (self.zmq_host,
                                              self.zmq_work_port)
        self.results_endpoint = 'tcp://%s:%d' % (self.zmq_host,
                                                 self.zmq_results_port)

        self.mock_context = flexmock()
        flexmock(zmq.Context).new_instances(self.mock_context).once

        self.mock_work_push = flexmock(send=self._send)
        self.mock_context.should_receive('socket').with_args(
            zmq.PUSH,
        ).and_return(self.mock_work_push).once
        self.mock_work_push.should_receive('bind').with_args(
            self.work_endpoint,
        ).once

        self.mock_results_pull = flexmock(recv=self._recv)
        self.mock_context.should_receive('socket').with_args(
            zmq.PULL,
        ).and_return(self.mock_results_pull).once
        self.mock_results_pull.should_receive('bind').with_args(
            self.results_endpoint,
        ).once

        self.master = Master(self.zmq_host, self.zmq_work_port,
                             self.zmq_results_port,
                             connect_timeout=3.14159,
                             network_timeout=2.71828)

        self._send_calls = []
        self._recv_returns = []
Example #2
0
    def setUp(self):
        # Set our test scenario differently from the default; must be BEFORE
        # the super call.
        self.scenario_dict = dict(
            name='Master Test Scenario - ablkei',
            sizes=[
                dict(name='tiny', size_min=99, size_max=100),
                dict(name='small', size_min=199, size_max=200),
                dict(name='medium', size_min=299, size_max=300),
                dict(name='large', size_min=399, size_max=400),
                dict(name='huge', size_min=499, size_max=500)],
            initial_files=dict(
                tiny=300, small=300, medium=300, large=100, huge=70,
            ),
            operation_count=5000,
            #             C  R  U  D
            crud_profile=[5, 3, 1, 1],
            user_count=2,
        )
        super(TestMaster, self).setUp()

        self.stub_queue = flexmock()
        self.stub_queue.should_receive('watch').with_args(ssbench.STATS_TUBE).once
        self.stub_queue.should_receive('ignore').with_args(ssbench.DEFAULT_TUBE).once
        self.master = Master(self.stub_queue)

        self.result_index = 1  # for self.gen_result()

        self.stub_results = [
            self.gen_result(1, ssbench.CREATE_OBJECT, 'small', 100.0, 101.0, 103.0),
            self.gen_result(1, ssbench.READ_OBJECT, 'tiny', 103.0, 103.1, 103.8),
            self.gen_result(1, ssbench.CREATE_OBJECT, 'huge', 103.8, 105.0, 106.0),
            self.gen_result(1, ssbench.UPDATE_OBJECT, 'large', 106.1, 106.3, 106.4),
            #
            # exceptions should be ignored
            dict(worker_id=2, type=ssbench.UPDATE_OBJECT, completed_at=39293.2, exception='wacky!'),
            self.gen_result(2, ssbench.UPDATE_OBJECT, 'medium', 100.1, 100.9, 102.9),
            self.gen_result(2, ssbench.DELETE_OBJECT, 'large', 102.9, 103.0, 103.3),
            self.gen_result(2, ssbench.CREATE_OBJECT, 'tiny', 103.3, 103.4, 103.5),
            self.gen_result(2, ssbench.READ_OBJECT, 'small', 103.5, 103.7, 104.0),
            #
            self.gen_result(3, ssbench.READ_OBJECT, 'tiny', 100.1, 101.1, 101.9),
            # worker 3 took a while (observer lower concurrency in second 102
            self.gen_result(3, ssbench.DELETE_OBJECT, 'small', 103.1, 103.6, 103.9),
            self.gen_result(3, ssbench.READ_OBJECT, 'medium', 103.9, 104.2, 104.3),
            self.gen_result(3, ssbench.UPDATE_OBJECT, 'tiny', 104.3, 104.9, 104.999),
        ]
Example #3
0
    def setUp(self):
        # Set our test scenario differently from the default; must be BEFORE
        # the super call.
        self.scenario_dict = dict(
            name='Master Test Scenario - ablkei',
            sizes=[
                dict(name='tiny', size_min=99, size_max=100),
                dict(name='small', size_min=1990, size_max=1990,
                     crud_profile=[71, 9, 12, 8]),
                dict(name='medium', size_min=2990, size_max=3000),
                dict(name='unused', size_min=9876543, size_max=9876543),
                dict(name='large', size_min=399000, size_max=400000,
                     crud_profile=[16, 61, 7, 16]),
                dict(name='huge', size_min=49900000, size_max=71499999)],
            initial_files=dict(
                tiny=300, small=400, medium=500, large=200, huge=70,
            ),
            operation_count=5000,
            #             C  R  U  D
            crud_profile=[5, 3, 1, 1],
            user_count=2,
        )
        super(TestMaster, self).setUp()

        self.zmq_host = 'slick.queue.com'
        self.zmq_work_port = 7482
        self.zmq_results_port = 18398
        self.work_endpoint = 'tcp://%s:%d' % (self.zmq_host,
                                              self.zmq_work_port)
        self.results_endpoint = 'tcp://%s:%d' % (self.zmq_host,
                                                 self.zmq_results_port)

        self.mock_context = flexmock()
        flexmock(zmq.Context).new_instances(self.mock_context).once

        self.mock_work_push = flexmock()
        self.mock_context.should_receive('socket').with_args(
            zmq.PUSH,
        ).and_return(self.mock_work_push).once
        self.mock_work_push.should_receive('bind').with_args(
            self.work_endpoint,
        ).once

        self.mock_results_pull = flexmock()
        self.mock_context.should_receive('socket').with_args(
            zmq.PULL,
        ).and_return(self.mock_results_pull).once
        self.mock_results_pull.should_receive('bind').with_args(
            self.results_endpoint,
        ).once

        self.master = Master(self.zmq_host, self.zmq_work_port,
                             self.zmq_results_port,
                             connect_timeout=3.14159,
                             network_timeout=2.71828)

        self.result_index = 1  # for self.gen_result()

        self.stub_results = [
            self.gen_result(
                1, ssbench.CREATE_OBJECT, 'small', 100.0, 101.0, 103.0),
            self.gen_result(
                1, ssbench.READ_OBJECT, 'tiny', 103.0, 103.1, 103.8),
            self.gen_result(
                1, ssbench.CREATE_OBJECT, 'huge', 103.8, 105.0, 106.0),
            self.gen_result(
                1, ssbench.UPDATE_OBJECT, 'large', 106.1, 106.3, 106.4),
            #
            # exceptions should be ignored
            dict(worker_id=2, type=ssbench.UPDATE_OBJECT,
                 completed_at=39293.2, exception='wacky!', traceback='ugh'),
            self.gen_result(
                2, ssbench.UPDATE_OBJECT, 'medium', 100.1, 100.9, 102.9),
            self.gen_result(
                2, ssbench.DELETE_OBJECT, 'large', 102.9, 103.0, 103.3),
            self.gen_result(
                2, ssbench.CREATE_OBJECT, 'tiny', 103.3, 103.4, 103.5),
            self.gen_result(
                2, ssbench.READ_OBJECT, 'small', 103.5, 103.7, 104.0),
            #
            self.gen_result(
                3, ssbench.READ_OBJECT, 'tiny', 100.1, 101.1, 101.9),
            # worker 3 took a while (observer lower concurrency in second 102
            self.gen_result(
                3, ssbench.DELETE_OBJECT, 'small', 103.1, 103.6, 103.9),
            self.gen_result(
                3, ssbench.READ_OBJECT, 'medium', 103.9, 104.2, 104.3),
            self.gen_result(
                3, ssbench.UPDATE_OBJECT, 'tiny', 104.3, 104.9, 104.999),
        ]
Example #4
0
class TestMaster(ScenarioFixture, TestCase):
    maxDiff = None

    def setUp(self):
        # Set our test scenario differently from the default; must be BEFORE
        # the super call.
        self.scenario_dict = dict(
            name='Master Test Scenario - ablkei',
            sizes=[
                dict(name='tiny', size_min=99, size_max=100),
                dict(name='small', size_min=1990, size_max=1990,
                     crud_profile=[71, 9, 12, 8]),
                dict(name='medium', size_min=2990, size_max=3000),
                dict(name='unused', size_min=9876543, size_max=9876543),
                dict(name='large', size_min=399000, size_max=400000,
                     crud_profile=[16, 61, 7, 16]),
                dict(name='huge', size_min=49900000, size_max=71499999)],
            initial_files=dict(
                tiny=300, small=400, medium=500, large=200, huge=70,
            ),
            operation_count=5000,
            #             C  R  U  D
            crud_profile=[5, 3, 1, 1],
            user_count=2,
        )
        super(TestMaster, self).setUp()

        self.zmq_host = 'slick.queue.com'
        self.zmq_work_port = 7482
        self.zmq_results_port = 18398
        self.work_endpoint = 'tcp://%s:%d' % (self.zmq_host,
                                              self.zmq_work_port)
        self.results_endpoint = 'tcp://%s:%d' % (self.zmq_host,
                                                 self.zmq_results_port)

        self.mock_context = flexmock()
        flexmock(zmq.Context).new_instances(self.mock_context).once

        self.mock_work_push = flexmock()
        self.mock_context.should_receive('socket').with_args(
            zmq.PUSH,
        ).and_return(self.mock_work_push).once
        self.mock_work_push.should_receive('bind').with_args(
            self.work_endpoint,
        ).once

        self.mock_results_pull = flexmock()
        self.mock_context.should_receive('socket').with_args(
            zmq.PULL,
        ).and_return(self.mock_results_pull).once
        self.mock_results_pull.should_receive('bind').with_args(
            self.results_endpoint,
        ).once

        self.master = Master(self.zmq_host, self.zmq_work_port,
                             self.zmq_results_port,
                             connect_timeout=3.14159,
                             network_timeout=2.71828)

        self.result_index = 1  # for self.gen_result()

        self.stub_results = [
            self.gen_result(
                1, ssbench.CREATE_OBJECT, 'small', 100.0, 101.0, 103.0),
            self.gen_result(
                1, ssbench.READ_OBJECT, 'tiny', 103.0, 103.1, 103.8),
            self.gen_result(
                1, ssbench.CREATE_OBJECT, 'huge', 103.8, 105.0, 106.0),
            self.gen_result(
                1, ssbench.UPDATE_OBJECT, 'large', 106.1, 106.3, 106.4),
            #
            # exceptions should be ignored
            dict(worker_id=2, type=ssbench.UPDATE_OBJECT,
                 completed_at=39293.2, exception='wacky!', traceback='ugh'),
            self.gen_result(
                2, ssbench.UPDATE_OBJECT, 'medium', 100.1, 100.9, 102.9),
            self.gen_result(
                2, ssbench.DELETE_OBJECT, 'large', 102.9, 103.0, 103.3),
            self.gen_result(
                2, ssbench.CREATE_OBJECT, 'tiny', 103.3, 103.4, 103.5),
            self.gen_result(
                2, ssbench.READ_OBJECT, 'small', 103.5, 103.7, 104.0),
            #
            self.gen_result(
                3, ssbench.READ_OBJECT, 'tiny', 100.1, 101.1, 101.9),
            # worker 3 took a while (observer lower concurrency in second 102
            self.gen_result(
                3, ssbench.DELETE_OBJECT, 'small', 103.1, 103.6, 103.9),
            self.gen_result(
                3, ssbench.READ_OBJECT, 'medium', 103.9, 104.2, 104.3),
            self.gen_result(
                3, ssbench.UPDATE_OBJECT, 'tiny', 104.3, 104.9, 104.999),
        ]

    def tearDown(self):
        super(TestMaster, self).tearDown()

    def gen_result(self, worker_id, op_type, size_str, start, first_byte,
                   last_byte):
        self.result_index += 1

        return {
            # There are other keys in a "result", but these are the only ones
            # used for the reporting.
            'worker_id': worker_id,
            'type': op_type,
            'size_str': size_str,
            'size': 989,
            'first_byte_latency': first_byte - start,
            'last_byte_latency': last_byte - start,
            'trans_id': 'txID%03d' % self.result_index,
            'completed_at': last_byte,
        }

    def test_format_bytes(self):
        input_expected = [
            (0, '  0  B'),
            (7, '  7  B'),
            (17, ' 17  B'),
            (117, '117  B'),
            (999, '999  B'),
            (1000, '  1 kB'),
            (1001, '  1 kB'),
            (1100, '  1 kB'),
            (1500, '  2 kB'),
            (1999, '  2 kB'),
            (2000, '  2 kB'),
            (7500, '  8 kB'),
            (7900, '  8 kB'),
            (9999, ' 10 kB'),
            (10000, ' 10 kB'),
            (19999, ' 20 kB'),
            (20000, ' 20 kB'),
            (84999, ' 85 kB'),
            (85000, ' 85 kB'),
            (99999, '100 kB'),
            (100000, '100 kB'),
            (100001, '100 kB'),
            (700500, '701 kB'),
            (999999, '  1 MB'),
            (1000000, '  1 MB'),
            (1000001, '  1 MB'),
            (4123456, '  4 MB'),
            (4543210, '  5 MB'),
            (99999999, '100 MB'),
            (100000000, '100 MB'),
            (9999999999, ' 10 GB'),
            (10000000000, ' 10 GB'),
            (99999999999, '100 GB'),
            (100000000000, '100 GB'),
            (234567890123, '235 GB'),
        ]
        for input_bytes, formatting in input_expected:
            self.assertEqual(
                formatting, self.master._format_bytes(input_bytes),
                '_format_bytes(%d) = %s instead of %s' % (
                    input_bytes, self.master._format_bytes(input_bytes),
                    formatting))

    def test_calculate_scenario_stats_aggregate(self):
        first_byte_latency_all = [1, 0.1, 1.2, 0.2, 0.8, 0.1, 0.1,
                                  0.2, 1, 0.5, 0.3, 0.6]
        last_byte_latency_all = [3, 0.8, 2.2, 0.3, 2.8, 0.4, 0.2,
                                 0.5, 1.8, 0.8, 0.4, 0.699]
        scen_stats = self.master.calculate_scenario_stats(self.scenario,
                                                          self.stub_results)
        self.assertDictEqual(dict(
            worker_count=3, start=100.0, stop=106.4, req_count=12,
            avg_req_per_sec=round(12 / (106.4 - 100), 6),
            first_byte_latency=dict(
                min='%6.3f' % 0.1,
                max='%7.3f' % 1.2,
                avg='%7.3f' % stats.lmean(first_byte_latency_all),
                pctile='%7.3f' % 1.2,
                std_dev='%7.3f' % stats.lsamplestdev(first_byte_latency_all),
                median='%7.3f' % stats.lmedianscore(first_byte_latency_all),
            ),
            last_byte_latency=dict(
                min='%6.3f' % 0.2,
                max='%7.3f' % 3.0,
                avg='%7.3f' % stats.lmean(last_byte_latency_all),
                pctile='%7.3f' % 3.0,
                std_dev='%7.3f' % stats.lsamplestdev(last_byte_latency_all),
                median='  0.749',  # XXX why??
                # median='%7.3f' % stats.lmedianscore(last_byte_latency_all),
            ),
            worst_first_byte_latency=(1.2, 'txID004'),
            worst_last_byte_latency=(3.0, 'txID002'),
        ), scen_stats['agg_stats'])

    def test_calculate_scenario_stats_aggregate_low_pctile(self):
        first_byte_latency_all = [1, 0.1, 1.2, 0.2, 0.8, 0.1, 0.1,
                                  0.2, 1, 0.5, 0.3, 0.6]
        last_byte_latency_all = [3, 0.8, 2.2, 0.3, 2.8, 0.4, 0.2,
                                 0.5, 1.8, 0.8, 0.4, 0.699]
        scen_stats = self.master.calculate_scenario_stats(self.scenario,
                                                          self.stub_results,
                                                          nth_pctile=20)
        self.assertDictEqual(dict(
            worker_count=3, start=100.0, stop=106.4, req_count=12,
            avg_req_per_sec=round(12 / (106.4 - 100), 6),
            first_byte_latency=dict(
                min='%6.3f' % 0.1,
                max='%7.3f' % 1.2,
                avg='%7.3f' % stats.lmean(first_byte_latency_all),
                pctile='%7.3f' % sorted(first_byte_latency_all)[2],
                std_dev='%7.3f' % stats.lsamplestdev(first_byte_latency_all),
                median='%7.3f' % stats.lmedianscore(first_byte_latency_all),
            ),
            last_byte_latency=dict(
                min='%6.3f' % 0.2,
                max='%7.3f' % 3.0,
                avg='%7.3f' % stats.lmean(last_byte_latency_all),
                pctile='%7.3f' % sorted(last_byte_latency_all)[2],
                std_dev='%7.3f' % stats.lsamplestdev(last_byte_latency_all),
                median='  0.749',  # XXX why??
                # median='%7.3f' % stats.lmedianscore(last_byte_latency_all),
            ),
            worst_first_byte_latency=(1.2, 'txID004'),
            worst_last_byte_latency=(3.0, 'txID002'),
        ), scen_stats['agg_stats'])

    def test_calculate_scenario_stats_worker1(self):
        w1_first_byte_latency = [1.0, 0.1, 1.2, 0.2]
        w1_last_byte_latency = [3.0, 0.8, 2.2, 0.3]
        scen_stats = self.master.calculate_scenario_stats(self.scenario,
                                                          self.stub_results)
        self.assertDictEqual(dict(
            start=100.0, stop=106.4, req_count=4,
            avg_req_per_sec=round(4 / (106.4 - 100), 6),
            first_byte_latency=dict(
                min='%6.3f' % min(w1_first_byte_latency),
                max='%7.3f' % max(w1_first_byte_latency),
                avg='%7.3f' % stats.lmean(w1_first_byte_latency),
                pctile='%7.3f' % 1.2,
                std_dev='%7.3f' % stats.lsamplestdev(w1_first_byte_latency),
                median='%7.3f' % stats.lmedianscore(w1_first_byte_latency),
            ),
            last_byte_latency=dict(
                min='%6.3f' % min(w1_last_byte_latency),
                max='%7.3f' % max(w1_last_byte_latency),
                avg='%7.3f' % stats.lmean(w1_last_byte_latency),
                pctile='%7.3f' % 3.0,
                std_dev='%7.3f' % stats.lsamplestdev(w1_last_byte_latency),
                median='%7.3f' % stats.lmedianscore(w1_last_byte_latency),
            ),
            worst_first_byte_latency=(float(max(w1_first_byte_latency)),
                                      'txID004'),
            worst_last_byte_latency=(
                float(max(w1_last_byte_latency)), 'txID002'),
        ), scen_stats['worker_stats'][1])

    def test_calculate_scenario_stats_worker2(self):
        w2_first_byte_latency = [0.8, 0.1, 0.1, 0.2]
        w2_last_byte_latency = [2.8, 0.4, 0.2, 0.5]
        scen_stats = self.master.calculate_scenario_stats(self.scenario,
                                                          self.stub_results)
        self.assertDictEqual(dict(
            start=100.1, stop=104.0, req_count=4,
            avg_req_per_sec=round(4 / (104.0 - 100.1), 6),
            first_byte_latency=dict(
                min='%6.3f' % min(w2_first_byte_latency),
                max='%7.3f' % max(w2_first_byte_latency),
                avg='%7.3f' % stats.lmean(w2_first_byte_latency),
                pctile='%7.3f' % 0.8,
                std_dev='%7.3f' % stats.lsamplestdev(w2_first_byte_latency),
                median='%7.3f' % stats.lmedianscore(w2_first_byte_latency),
            ),
            last_byte_latency=dict(
                min='%6.3f' % min(w2_last_byte_latency),
                max='%7.3f' % max(w2_last_byte_latency),
                avg='%7.3f' % stats.lmean(w2_last_byte_latency),
                pctile='%7.3f' % 2.8,
                std_dev='%7.3f' % stats.lsamplestdev(w2_last_byte_latency),
                median='%7.3f' % stats.lmedianscore(w2_last_byte_latency),
            ),
            worst_first_byte_latency=(float(max(w2_first_byte_latency)),
                                      'txID006'),
            worst_last_byte_latency=(
                float(max(w2_last_byte_latency)), 'txID006'),
        ), scen_stats['worker_stats'][2])

    def test_calculate_scenario_stats_worker3(self):
        w3_first_byte_latency = [1, 0.5, 0.3, 0.6]
        w3_last_byte_latency = [1.8, 0.8, 0.4, 0.699]
        scen_stats = self.master.calculate_scenario_stats(self.scenario,
                                                          self.stub_results)
        self.assertDictEqual(dict(
            start=100.1, stop=104.999, req_count=4,
            avg_req_per_sec=round(4 / (104.999 - 100.1), 6),
            first_byte_latency=dict(
                min='%6.3f' % min(w3_first_byte_latency),
                max='%7.3f' % max(w3_first_byte_latency),
                avg='%7.3f' % stats.lmean(w3_first_byte_latency),
                pctile='%7.3f' % 1.0,
                std_dev='%7.3f' % stats.lsamplestdev(w3_first_byte_latency),
                median='%7.3f' % stats.lmedianscore(w3_first_byte_latency),
            ),
            last_byte_latency=dict(
                min='%6.3f' % min(w3_last_byte_latency),
                max='%7.3f' % max(w3_last_byte_latency),
                avg='%7.3f' % stats.lmean(w3_last_byte_latency),
                pctile='%7.3f' % 1.8,
                std_dev='%7.3f' % stats.lsamplestdev(w3_last_byte_latency),
                median='%7.3f' % stats.lmedianscore(w3_last_byte_latency),
            ),
            worst_first_byte_latency=(
                float(max(w3_first_byte_latency)), 'txID010'),
            worst_last_byte_latency=(
                float(max(w3_last_byte_latency)), 'txID010'),
        ), scen_stats['worker_stats'][3])

    def test_calculate_scenario_stats_create(self):
        # Stats for Create
        c_first_byte_latency = [1, 1.2, 0.1]
        c_last_byte_latency = [3.0, 2.2, 0.2]
        scen_stats = self.master.calculate_scenario_stats(self.scenario,
                                                          self.stub_results)
        self.assertDictEqual(dict(
            start=100.0, stop=106.0, req_count=3,
            avg_req_per_sec=round(3 / (106 - 100.0), 6),
            first_byte_latency=dict(
                min='%6.3f' % min(c_first_byte_latency),
                max='%7.3f' % max(c_first_byte_latency),
                pctile='%7.3f' % max(c_first_byte_latency),
                avg='%7.3f' % stats.lmean(c_first_byte_latency),
                std_dev='%7.3f' % stats.lsamplestdev(c_first_byte_latency),
                median='%7.3f' % stats.lmedianscore(c_first_byte_latency),
            ),
            last_byte_latency=dict(
                min='%6.3f' % min(c_last_byte_latency),
                max='%7.3f' % max(c_last_byte_latency),
                pctile='%7.3f' % max(c_last_byte_latency),
                avg='%7.3f' % stats.lmean(c_last_byte_latency),
                std_dev='%7.3f' % stats.lsamplestdev(c_last_byte_latency),
                median='%7.3f' % stats.lmedianscore(c_last_byte_latency),
            ),
            worst_first_byte_latency=(max(c_first_byte_latency), 'txID004'),
            worst_last_byte_latency=(max(c_last_byte_latency), 'txID002'),
            size_stats=OrderedDict([
                ('tiny', {'avg_req_per_sec': 5.0,
                          'first_byte_latency': {'avg': '%7.3f' % 0.1,
                                                 'max': '%7.3f' % 0.1,
                                                 'pctile': '%7.3f' % 0.1,
                                                 'median': '%7.3f' % 0.1,
                                                 'min': '%6.3f' % 0.1,
                                                 'std_dev': '%7.3f' % 0.0},
                          'last_byte_latency': {'avg': '%7.3f' % 0.2,
                                                'max': '%7.3f' % 0.2,
                                                'pctile': '%7.3f' % 0.2,
                                                'median': '%7.3f' % 0.2,
                                                'min': '%6.3f' % 0.2,
                                                'std_dev': '%7.3f' % 0.0},
                          'worst_first_byte_latency': (0.1, 'txID008'),
                          'worst_last_byte_latency': (0.2, 'txID008'),
                          'req_count': 1,
                          'start': 103.3,
                          'stop': 103.5}),
                ('small', {'avg_req_per_sec': 0.333333,
                           'first_byte_latency': {'avg': '%7.3f' % 1.0,
                                                  'max': '%7.3f' % 1.0,
                                                  'pctile': '%7.3f' % 1.0,
                                                  'median': '%7.3f' % 1.0,
                                                  'min': '%6.3f' % 1.0,
                                                  'std_dev': '%7.3f' % 0.0},
                           'last_byte_latency': {'avg': '%7.3f' % 3.0,
                                                 'max': '%7.3f' % 3.0,
                                                 'pctile': '%7.3f' % 3.0,
                                                 'median': '%7.3f' % 3.0,
                                                 'min': '%6.3f' % 3.0,
                                                 'std_dev': '%7.3f' % 0.0},
                           'worst_first_byte_latency': (1.0, 'txID002'),
                           'worst_last_byte_latency': (3.0, 'txID002'),
                           'req_count': 1,
                           'start': 100.0,
                           'stop': 103.0}),
                ('huge', {'avg_req_per_sec': 0.454545,
                          'first_byte_latency': {'avg': '%7.3f' % 1.2,
                                                 'max': '%7.3f' % 1.2,
                                                 'pctile': '%7.3f' % 1.2,
                                                 'median': '%7.3f' % 1.2,
                                                 'min': '%6.3f' % 1.2,
                                                 'std_dev': '%7.3f' % 0.0},
                          'last_byte_latency': {'avg': '%7.3f' % 2.2,
                                                'max': '%7.3f' % 2.2,
                                                'pctile': '%7.3f' % 2.2,
                                                'median': '%7.3f' % 2.2,
                                                'min': '%6.3f' % 2.2,
                                                'std_dev': '%7.3f' % 0.0},
                          'worst_first_byte_latency': (1.2, 'txID004'),
                          'worst_last_byte_latency': (2.2, 'txID004'),
                          'req_count': 1,
                          'start': 103.8,
                          'stop': 106.0})]),
        ), scen_stats['op_stats'][ssbench.CREATE_OBJECT])

    def test_calculate_scenario_stats_read(self):
        # Stats for Read
        r_first_byte_latency = [0.1, 0.2, 1.0, 0.3]
        r_last_byte_latency = [0.8, 0.5, 1.8, 0.4]
        scen_stats = self.master.calculate_scenario_stats(self.scenario,
                                                          self.stub_results)
        self.assertDictEqual(dict(
            start=100.1, stop=104.3, req_count=4,
            avg_req_per_sec=round(4 / (104.3 - 100.1), 6),
            first_byte_latency=dict(
                min='%6.3f' % min(r_first_byte_latency),
                max='%7.3f' % max(r_first_byte_latency),
                pctile='%7.3f' % max(r_first_byte_latency),
                avg='%7.3f' % stats.lmean(r_first_byte_latency),
                std_dev='%7.3f' % stats.lsamplestdev(r_first_byte_latency),
                median='%7.3f' % stats.lmedianscore(r_first_byte_latency),
            ),
            last_byte_latency=dict(
                min='%6.3f' % min(r_last_byte_latency),
                max='%7.3f' % max(r_last_byte_latency),
                pctile='%7.3f' % max(r_last_byte_latency),
                avg='%7.3f' % stats.lmean(r_last_byte_latency),
                std_dev='%7.3f' % stats.lsamplestdev(r_last_byte_latency),
                median='%7.3f' % stats.lmedianscore(r_last_byte_latency),
            ),
            worst_first_byte_latency=(max(r_first_byte_latency), 'txID010'),
            worst_last_byte_latency=(max(r_last_byte_latency), 'txID010'),
            size_stats=OrderedDict([
                ('tiny', {'avg_req_per_sec': 0.540541,
                          'first_byte_latency': {'avg': '%7.3f' % 0.55,
                                                 'max': '%7.3f' % 1.0,
                                                 'pctile': '%7.3f' % 1.0,
                                                 'median': '%7.3f' % 0.55,
                                                 'min': '%6.3f' % 0.1,
                                                 'std_dev': '%7.3f' % 0.45},
                          'last_byte_latency': {'avg': '%7.3f' % 1.3,
                                                'max': '%7.3f' % 1.8,
                                                'pctile': '%7.3f' % 1.8,
                                                'median': '%7.3f' % 1.3,
                                                'min': '%6.3f' % 0.8,
                                                'std_dev': '%7.3f' % 0.5},
                          'worst_first_byte_latency': (1.0, 'txID010'),
                          'worst_last_byte_latency': (1.8, 'txID010'),
                          'req_count': 2,
                          'start': 100.1,
                          'stop': 103.8}),
                ('small', {'avg_req_per_sec': 2.0,
                           'first_byte_latency': {'avg': '%7.3f' % 0.2,
                                                  'max': '%7.3f' % 0.2,
                                                  'pctile': '%7.3f' % 0.2,
                                                  'median': '%7.3f' % 0.2,
                                                  'min': '%6.3f' % 0.2,
                                                  'std_dev': '%7.3f' % 0.0},
                           'last_byte_latency': {'avg': '%7.3f' % 0.5,
                                                 'max': '%7.3f' % 0.5,
                                                 'pctile': '%7.3f' % 0.5,
                                                 'median': '%7.3f' % 0.5,
                                                 'min': '%6.3f' % 0.5,
                                                 'std_dev': '%7.3f' % 0.0},
                           'worst_first_byte_latency': (0.2, 'txID009'),
                           'worst_last_byte_latency': (0.5, 'txID009'),
                           'req_count': 1,
                           'start': 103.5,
                           'stop': 104.0}),
                ('medium', {'avg_req_per_sec': 2.5,
                            'first_byte_latency': {'avg': '%7.3f' % 0.3,
                                                   'max': '%7.3f' % 0.3,
                                                   'pctile': '%7.3f' % 0.3,
                                                   'median': '%7.3f' % 0.3,
                                                   'min': '%6.3f' % 0.3,
                                                   'std_dev': '%7.3f' % 0.0},
                            'last_byte_latency': {'avg': '%7.3f' % 0.4,
                                                  'max': '%7.3f' % 0.4,
                                                  'pctile': '%7.3f' % 0.4,
                                                  'median': '%7.3f' % 0.4,
                                                  'min': '%6.3f' % 0.4,
                                                  'std_dev': '%7.3f' % 0.0},
                            'worst_first_byte_latency': (0.3, 'txID012'),
                            'worst_last_byte_latency': (0.4, 'txID012'),
                            'req_count': 1,
                            'start': 103.9,
                            'stop': 104.3})]),
        ), scen_stats['op_stats'][ssbench.READ_OBJECT])

    def test_calculate_scenario_stats_update(self):
        u_first_byte_latency = [0.2, 0.8, 0.6]
        u_last_byte_latency = [0.3, 2.8, 0.699]
        scen_stats = self.master.calculate_scenario_stats(self.scenario,
                                                          self.stub_results)
        self.assertDictEqual(dict(
            start=100.1, stop=106.4, req_count=3,
            avg_req_per_sec=round(3 / (106.4 - 100.1), 6),
            first_byte_latency=dict(
                min='%6.3f' % min(u_first_byte_latency),
                max='%7.3f' % max(u_first_byte_latency),
                avg='%7.3f' % stats.lmean(u_first_byte_latency),
                pctile='%7.3f' % 0.8,
                std_dev='%7.3f' % stats.lsamplestdev(u_first_byte_latency),
                median='%7.3f' % stats.lmedianscore(u_first_byte_latency),
            ),
            worst_first_byte_latency=(max(u_first_byte_latency), 'txID006'),
            last_byte_latency=dict(
                min='%6.3f' % min(u_last_byte_latency),
                max='%7.3f' % max(u_last_byte_latency),
                avg='%7.3f' % stats.lmean(u_last_byte_latency),
                pctile='%7.3f' % 2.8,
                std_dev='%7.3f' % stats.lsamplestdev(u_last_byte_latency),
                median='%7.3f' % stats.lmedianscore(u_last_byte_latency),
            ),
            worst_last_byte_latency=(max(u_last_byte_latency), 'txID006'),
            size_stats=OrderedDict([
                ('tiny', {'avg_req_per_sec': 1.430615,
                          'first_byte_latency': {'avg': '%7.3f' % 0.6,
                                                 'pctile': '%7.3f' % 0.6,
                                                 'max': '%7.3f' % 0.6,
                                                 'median': '%7.3f' % 0.6,
                                                 'min': '%6.3f' % 0.6,
                                                 'std_dev': '%7.3f' % 0.0},
                          'worst_first_byte_latency': (0.6, 'txID013'),
                          'last_byte_latency': {'avg': '%7.3f' % 0.699,
                                                'pctile': '%7.3f' % 0.699,
                                                'max': '%7.3f' % 0.699,
                                                'median': '%7.3f' % 0.699,
                                                'min': '%6.3f' % 0.699,
                                                'std_dev': '%7.3f' % 0.0},
                          'worst_last_byte_latency': (0.699, 'txID013'),
                          'req_count': 1,
                          'start': 104.3,
                          'stop': 104.999}),
                ('medium', {'avg_req_per_sec': 0.357143,
                            'first_byte_latency': {'avg': '%7.3f' % 0.8,
                                                   'pctile': '%7.3f' % 0.8,
                                                   'max': '%7.3f' % 0.8,
                                                   'median': '%7.3f' % 0.8,
                                                   'min': '%6.3f' % 0.8,
                                                   'std_dev': '%7.3f' % 0.0},
                            'worst_first_byte_latency': (0.8, 'txID006'),
                            'last_byte_latency': {'avg': '%7.3f' % 2.8,
                                                  'pctile': '%7.3f' % 2.8,
                                                  'max': '%7.3f' % 2.8,
                                                  'median': '%7.3f' % 2.8,
                                                  'min': '%6.3f' % 2.8,
                                                  'std_dev': '%7.3f' % 0.0},
                            'worst_last_byte_latency': (2.8, 'txID006'),
                            'req_count': 1,
                            'start': 100.1,
                            'stop': 102.9}),
                ('large', {'avg_req_per_sec': 3.333333,
                           'first_byte_latency': {'avg': '%7.3f' % 0.2,
                                                  'pctile': '%7.3f' % 0.2,
                                                  'max': '%7.3f' % 0.2,
                                                  'median': '%7.3f' % 0.2,
                                                  'min': '%6.3f' % 0.2,
                                                  'std_dev': '%7.3f' % 0.0},
                           'worst_first_byte_latency': (0.2, 'txID005'),
                           'last_byte_latency': {'avg': '%7.3f' % 0.3,
                                                 'pctile': '%7.3f' % 0.3,
                                                 'max': '%7.3f' % 0.3,
                                                 'median': '%7.3f' % 0.3,
                                                 'min': '%6.3f' % 0.3,
                                                 'std_dev': '%7.3f' % 0.0},
                           'worst_last_byte_latency': (0.3, 'txID005'),
                           'req_count': 1,
                           'start': 106.1,
                           'stop': 106.4})]),
        ), scen_stats['op_stats'][ssbench.UPDATE_OBJECT])

    def test_calculate_scenario_stats_delete(self):
        d_first_byte_latency = [0.1, 0.5]
        d_last_byte_latency = [0.4, 0.8]
        scen_stats = self.master.calculate_scenario_stats(self.scenario,
                                                          self.stub_results)
        self.assertDictEqual(dict(
            start=102.9, stop=103.9, req_count=2,
            avg_req_per_sec=round(2 / (103.9 - 102.9), 6),
            first_byte_latency=dict(
                min='%6.3f' % min(d_first_byte_latency),
                max='%7.3f' % max(d_first_byte_latency),
                pctile='%7.3f' % max(d_first_byte_latency),
                avg='%7.3f' % stats.lmean(d_first_byte_latency),
                std_dev='%7.3f' % stats.lsamplestdev(d_first_byte_latency),
                median='%7.3f' % stats.lmedianscore(d_first_byte_latency),
            ),
            last_byte_latency=dict(
                min='%6.3f' % min(d_last_byte_latency),
                max='%7.3f' % max(d_last_byte_latency),
                pctile='%7.3f' % max(d_last_byte_latency),
                avg='%7.3f' % stats.lmean(d_last_byte_latency),
                std_dev='%7.3f' % stats.lsamplestdev(d_last_byte_latency),
                median='%7.3f' % stats.lmedianscore(d_last_byte_latency),
            ),
            worst_first_byte_latency=(max(d_first_byte_latency), 'txID011'),
            worst_last_byte_latency=(max(d_last_byte_latency), 'txID011'),
            size_stats=OrderedDict([
                ('small', {'avg_req_per_sec': 1.25,
                           'first_byte_latency': {'avg': '%7.3f' % 0.5,
                                                  'max': '%7.3f' % 0.5,
                                                  'pctile': '%7.3f' % 0.5,
                                                  'median': '%7.3f' % 0.5,
                                                  'min': '%6.3f' % 0.5,
                                                  'std_dev': '%7.3f' % 0.0},
                           'last_byte_latency': {'avg': '%7.3f' % 0.8,
                                                 'max': '%7.3f' % 0.8,
                                                 'pctile': '%7.3f' % 0.8,
                                                 'median': '%7.3f' % 0.8,
                                                 'min': '%6.3f' % 0.8,
                                                 'std_dev': '%7.3f' % 0.0},
                           'worst_first_byte_latency': (0.5, 'txID011'),
                           'worst_last_byte_latency': (0.8, 'txID011'),
                           'req_count': 1,
                           'start': 103.1,
                           'stop': 103.9}),
                ('large', {'avg_req_per_sec': 2.5,
                           'first_byte_latency': {'avg': '%7.3f' % 0.1,
                                                  'max': '%7.3f' % 0.1,
                                                  'pctile': '%7.3f' % 0.1,
                                                  'median': '%7.3f' % 0.1,
                                                  'min': '%6.3f' % 0.1,
                                                  'std_dev': '%7.3f' % 0.0},
                           'last_byte_latency': {'avg': '%7.3f' % 0.4,
                                                 'max': '%7.3f' % 0.4,
                                                 'pctile': '%7.3f' % 0.4,
                                                 'median': '%7.3f' % 0.4,
                                                 'min': '%6.3f' % 0.4,
                                                 'std_dev': '%7.3f' % 0.0},
                           'worst_first_byte_latency': (0.1, 'txID007'),
                           'worst_last_byte_latency': (0.4, 'txID007'),
                           'req_count': 1,
                           'start': 102.9,
                           'stop': 103.3})]),
        ), scen_stats['op_stats'][ssbench.DELETE_OBJECT])

    def test_calculate_scenario_size_stats(self):
        d_first_byte_latency = [0.1, 0.5]
        d_last_byte_latency = [0.4, 0.8]
        scen_stats = self.master.calculate_scenario_stats(self.scenario,
                                                          self.stub_results)
        self.assertDictEqual(OrderedDict([
            ('tiny', {'avg_req_per_sec': 0.816493,
                      'first_byte_latency': {'avg': '%7.3f' % 0.45,
                                             'max': '%7.3f' % 1.0,
                                             'pctile': '%7.3f' % 1.0,
                                             'median': '%7.3f' % 0.35,
                                             'min': '%6.3f' % 0.1,
                                             'std_dev': '%7.3f' % 0.377492},
                      'last_byte_latency': {'avg': '%7.3f' % 0.87475,
                                            'max': '%7.3f' % 1.8,
                                            'pctile': '%7.3f' % 1.8,
                                            'median': '%7.3f' % 0.7494,
                                            'min': '%6.3f' % 0.2,
                                            'std_dev': '%7.3f' % 0.580485},
                      'worst_first_byte_latency': (1.0, 'txID010'),
                      'worst_last_byte_latency': (1.8, 'txID010'),
                      'req_count': 4,
                      'start': 100.1,
                      'stop': 104.999}),
            ('small', {'avg_req_per_sec': 0.75,
                       'first_byte_latency': {'avg': '%7.3f' % 0.566667,
                                              'max': '%7.3f' % 1.0,
                                              'pctile': '%7.3f' % 1.0,
                                              'median': '%7.3f' % 0.5,
                                              'min': '%6.3f' % 0.2,
                                              'std_dev': '%7.3f' % 0.329983},
                       'last_byte_latency': {'avg': '%7.3f' % 1.433333,
                                             'max': '%7.3f' % 3.0,
                                             'pctile': '%7.3f' % 3.0,
                                             'median': '%7.3f' % 0.8,
                                             'min': '%6.3f' % 0.5,
                                             'std_dev': '%7.3f' % 1.11455},
                       'worst_first_byte_latency': (1.0, 'txID002'),
                       'worst_last_byte_latency': (3.0, 'txID002'),
                       'req_count': 3,
                       'start': 100.0,
                       'stop': 104.0}),
            ('medium', {'avg_req_per_sec': 0.47619,
                        'first_byte_latency': {'avg': '%7.3f' % 0.55,
                                               'max': '%7.3f' % 0.8,
                                               'pctile': '%7.3f' % 0.8,
                                               'median': '%7.3f' % 0.55,
                                               'min': '%6.3f' % 0.3,
                                               'std_dev': '%7.3f' % 0.25},
                        'last_byte_latency': {'avg': '%7.3f' % 1.6,
                                              'max': '%7.3f' % 2.8,
                                              'pctile': '%7.3f' % 2.8,
                                              'median': '%7.3f' % 1.6,
                                              'min': '%6.3f' % 0.4,
                                              'std_dev': '%7.3f' % 1.2},
                        'worst_first_byte_latency': (0.8, 'txID006'),
                        'worst_last_byte_latency': (2.8, 'txID006'),
                        'req_count': 2,
                        'start': 100.1,
                        'stop': 104.3}),
            ('large', {'avg_req_per_sec': 0.571429,
                       'first_byte_latency': {'avg': '%7.3f' % 0.15,
                                              'max': '%7.3f' % 0.2,
                                              'pctile': '%7.3f' % 0.2,
                                              'median': '%7.3f' % 0.15,
                                              'min': '%6.3f' % 0.1,
                                              'std_dev': '%7.3f' % 0.05},
                       'last_byte_latency': {'avg': '%7.3f' % 0.35,
                                             'max': '%7.3f' % 0.4,
                                             'pctile': '%7.3f' % 0.4,
                                             'median': '%7.3f' % 0.35,
                                             'min': '%6.3f' % 0.3,
                                             'std_dev': '%7.3f' % 0.05},
                       'worst_first_byte_latency': (0.2, 'txID005'),
                       'worst_last_byte_latency': (0.4, 'txID007'),
                       'req_count': 2,
                       'start': 102.9,
                       'stop': 106.4}),
            ('huge', {'avg_req_per_sec': 0.454545,
                      'first_byte_latency': {'avg': '%7.3f' % 1.2,
                                             'max': '%7.3f' % 1.2,
                                             'pctile': '%7.3f' % 1.2,
                                             'median': '%7.3f' % 1.2,
                                             'min': '%6.3f' % 1.2,
                                             'std_dev': '%7.3f' % 0.0},
                      'last_byte_latency': {'avg': '%7.3f' % 2.2,
                                            'max': '%7.3f' % 2.2,
                                            'pctile': '%7.3f' % 2.2,
                                            'median': '%7.3f' % 2.2,
                                            'min': '%6.3f' % 2.2,
                                            'std_dev': '%7.3f' % 0.0},
                      'worst_first_byte_latency': (1.2, 'txID004'),
                      'worst_last_byte_latency': (2.2, 'txID004'),
                      'req_count': 1,
                      'start': 103.8,
                      'stop': 106.0})]),
            scen_stats['size_stats'])

    def test_calculate_scenario_stats_time_series(self):
        # Time series (reqs completed each second
        scen_stats = self.master.calculate_scenario_stats(self.scenario,
                                                          self.stub_results)
        self.assertDictEqual(dict(
            start=101,
            start_time=99.19999999999999,
            stop=106,
            data=[1, 1, 5, 3, 0, 2],
        ), scen_stats['time_series'])

    def test_write_rps_histogram(self):
        # Write out time series data (requests-per-second histogram) to an
        # already open CSV file
        scen_stats = self.master.calculate_scenario_stats(self.scenario,
                                                          self.stub_results)

        test_csv_file = StringIO()
        self.master.write_rps_histogram(scen_stats, test_csv_file)
        test_csv_file.seek(0)
        reader = csv.reader(test_csv_file)
        self.assertListEqual([
            ["Seconds Since Start", "Requests Completed"],
            ['1', '1'],
            ['2', '1'],
            ['3', '5'],
            ['4', '3'],
            ['5', '0'],
            ['6', '2'],
        ], list(reader))

    def test_generate_scenario_report(self):
        # Time series (reqs completed each second
        scen_stats = self.master.calculate_scenario_stats(self.scenario,
                                                          self.stub_results,
                                                          nth_pctile=50)
        self.assertListEqual(u"""
Master Test Scenario - ablkei
Worker count:   3   Concurrency:   2  Ran 1970-01-01 00:01:39 UTC to 1970-01-01 00:01:46 UTC (7s)

% Ops    C   R   U   D       Size Range       Size Name
 20%   % 50  30  10  10       99  B - 100  B  tiny
 27%   % 71   9  12   8        2 kB           small
 34%   % 50  30  10  10        3 kB -   3 kB  medium
  0%   % 50  30  10  10       10 MB           unused
 14%   % 16  61   7  16      399 kB - 400 kB  large
  5%   % 50  30  10  10       50 MB -  71 MB  huge
---------------------------------------------------------------------
         51  29  10  10      CRUD weighted average

TOTAL
       Count:    12  Average requests per second:   1.9
                            min       max      avg      std_dev  50%-ile                   Worst latency TX ID
       First-byte latency:  0.100 -   1.200    0.508  (  0.386)    0.400  (all obj sizes)  txID004
       Last-byte  latency:  0.200 -   3.000    1.158  (  0.970)    0.749  (all obj sizes)  txID002
       First-byte latency:  0.100 -   1.000    0.450  (  0.377)    0.350  (    tiny objs)  txID010
       Last-byte  latency:  0.200 -   1.800    0.875  (  0.580)    0.749  (    tiny objs)  txID010
       First-byte latency:  0.200 -   1.000    0.567  (  0.330)    0.500  (   small objs)  txID002
       Last-byte  latency:  0.500 -   3.000    1.433  (  1.115)    0.800  (   small objs)  txID002
       First-byte latency:  0.300 -   0.800    0.550  (  0.250)    0.550  (  medium objs)  txID006
       Last-byte  latency:  0.400 -   2.800    1.600  (  1.200)    1.600  (  medium objs)  txID006
       First-byte latency:  0.100 -   0.200    0.150  (  0.050)    0.150  (   large objs)  txID005
       Last-byte  latency:  0.300 -   0.400    0.350  (  0.050)    0.350  (   large objs)  txID007
       First-byte latency:  1.200 -   1.200    1.200  (  0.000)    1.200  (    huge objs)  txID004
       Last-byte  latency:  2.200 -   2.200    2.200  (  0.000)    2.200  (    huge objs)  txID004

CREATE
       Count:     3  Average requests per second:   0.5
                            min       max      avg      std_dev  50%-ile                   Worst latency TX ID
       First-byte latency:  0.100 -   1.200    0.767  (  0.478)    1.000  (all obj sizes)  txID004
       Last-byte  latency:  0.200 -   3.000    1.800  (  1.178)    2.200  (all obj sizes)  txID002
       First-byte latency:  0.100 -   0.100    0.100  (  0.000)    0.100  (    tiny objs)  txID008
       Last-byte  latency:  0.200 -   0.200    0.200  (  0.000)    0.200  (    tiny objs)  txID008
       First-byte latency:  1.000 -   1.000    1.000  (  0.000)    1.000  (   small objs)  txID002
       Last-byte  latency:  3.000 -   3.000    3.000  (  0.000)    3.000  (   small objs)  txID002
       First-byte latency:  1.200 -   1.200    1.200  (  0.000)    1.200  (    huge objs)  txID004
       Last-byte  latency:  2.200 -   2.200    2.200  (  0.000)    2.200  (    huge objs)  txID004

READ
       Count:     4  Average requests per second:   1.0
                            min       max      avg      std_dev  50%-ile                   Worst latency TX ID
       First-byte latency:  0.100 -   1.000    0.400  (  0.354)    0.250  (all obj sizes)  txID010
       Last-byte  latency:  0.400 -   1.800    0.875  (  0.554)    0.650  (all obj sizes)  txID010
       First-byte latency:  0.100 -   1.000    0.550  (  0.450)    0.550  (    tiny objs)  txID010
       Last-byte  latency:  0.800 -   1.800    1.300  (  0.500)    1.300  (    tiny objs)  txID010
       First-byte latency:  0.200 -   0.200    0.200  (  0.000)    0.200  (   small objs)  txID009
       Last-byte  latency:  0.500 -   0.500    0.500  (  0.000)    0.500  (   small objs)  txID009
       First-byte latency:  0.300 -   0.300    0.300  (  0.000)    0.300  (  medium objs)  txID012
       Last-byte  latency:  0.400 -   0.400    0.400  (  0.000)    0.400  (  medium objs)  txID012

UPDATE
       Count:     3  Average requests per second:   0.5
                            min       max      avg      std_dev  50%-ile                   Worst latency TX ID
       First-byte latency:  0.200 -   0.800    0.533  (  0.249)    0.600  (all obj sizes)  txID006
       Last-byte  latency:  0.300 -   2.800    1.266  (  1.097)    0.699  (all obj sizes)  txID006
       First-byte latency:  0.600 -   0.600    0.600  (  0.000)    0.600  (    tiny objs)  txID013
       Last-byte  latency:  0.699 -   0.699    0.699  (  0.000)    0.699  (    tiny objs)  txID013
       First-byte latency:  0.800 -   0.800    0.800  (  0.000)    0.800  (  medium objs)  txID006
       Last-byte  latency:  2.800 -   2.800    2.800  (  0.000)    2.800  (  medium objs)  txID006
       First-byte latency:  0.200 -   0.200    0.200  (  0.000)    0.200  (   large objs)  txID005
       Last-byte  latency:  0.300 -   0.300    0.300  (  0.000)    0.300  (   large objs)  txID005

DELETE
       Count:     2  Average requests per second:   2.0
                            min       max      avg      std_dev  50%-ile                   Worst latency TX ID
       First-byte latency:  0.100 -   0.500    0.300  (  0.200)    0.300  (all obj sizes)  txID011
       Last-byte  latency:  0.400 -   0.800    0.600  (  0.200)    0.600  (all obj sizes)  txID011
       First-byte latency:  0.500 -   0.500    0.500  (  0.000)    0.500  (   small objs)  txID011
       Last-byte  latency:  0.800 -   0.800    0.800  (  0.000)    0.800  (   small objs)  txID011
       First-byte latency:  0.100 -   0.100    0.100  (  0.000)    0.100  (   large objs)  txID007
       Last-byte  latency:  0.400 -   0.400    0.400  (  0.000)    0.400  (   large objs)  txID007

""".split('\n'), self.master.generate_scenario_report(self.scenario, scen_stats).split('\n'))
Example #5
0
class TestMaster(ScenarioFixture, TestCase):
    maxDiff = None

    def setUp(self):
        # Set our test scenario differently from the default; must be BEFORE
        # the super call.
        self.scenario_dict = dict(
            name='Master Test Scenario - ablkei',
            sizes=[
                dict(name='tiny', size_min=99, size_max=100),
                dict(name='small',
                     size_min=1990,
                     size_max=1990,
                     crud_profile=[71, 9, 12, 8]),
                dict(name='medium', size_min=2990, size_max=3000),
                dict(name='unused', size_min=9876543, size_max=9876543),
                dict(name='large',
                     size_min=399000,
                     size_max=400000,
                     crud_profile=[16, 61, 7, 16]),
                dict(name='huge', size_min=49900000, size_max=71499999)
            ],
            initial_files=dict(
                tiny=300,
                small=400,
                medium=500,
                large=200,
                huge=70,
            ),
            operation_count=5000,
            #             C  R  U  D
            crud_profile=[5, 3, 1, 1],
            user_count=2,
        )
        super(TestMaster, self).setUp()

        self.zmq_host = 'slick.queue.com'
        self.zmq_work_port = 7482
        self.zmq_results_port = 18398
        self.work_endpoint = 'tcp://%s:%d' % (self.zmq_host,
                                              self.zmq_work_port)
        self.results_endpoint = 'tcp://%s:%d' % (self.zmq_host,
                                                 self.zmq_results_port)

        self.mock_context = flexmock()
        flexmock(zmq.Context).new_instances(self.mock_context).once

        self.mock_work_push = flexmock(send=self._send)
        self.mock_context.should_receive('socket').with_args(
            zmq.PUSH, ).and_return(self.mock_work_push).once
        self.mock_work_push.should_receive('bind').with_args(
            self.work_endpoint, ).once

        self.mock_results_pull = flexmock(recv=self._recv)
        self.mock_context.should_receive('socket').with_args(
            zmq.PULL, ).and_return(self.mock_results_pull).once
        self.mock_results_pull.should_receive('bind').with_args(
            self.results_endpoint, ).once

        with mock.patch.object(ssbench.master, 'is_ipv6') as mock_is_ipv6:
            mock_is_ipv6.return_value = False
            self.master = Master(self.zmq_host,
                                 self.zmq_work_port,
                                 self.zmq_results_port,
                                 connect_timeout=3.14159,
                                 network_timeout=2.71828)
            mock_is_ipv6.assert_called_once_with(self.zmq_host)

        self._send_calls = []
        self._recv_returns = []

    def tearDown(self):
        super(TestMaster, self).tearDown()

    def _send(self, data):
        self._send_calls.append(data)

    def _recv(self):
        value = self._recv_returns.pop(0)
        return value

    def assert_bench_output(self, output, expected):
        expected_stderr = '''\
        Benchmark Run:
          X    work job raised an exception
          .  <  1s first-byte-latency
          o  <  3s first-byte-latency
          O  < 10s first-byte-latency
          * >= 10s first-byte-latency
          _  <  1s last-byte-latency  (CREATE or UPDATE)
          |  <  3s last-byte-latency  (CREATE or UPDATE)
          ^  < 10s last-byte-latency  (CREATE or UPDATE)
          @ >= 10s last-byte-latency  (CREATE or UPDATE)
        '''
        expected_stderr = textwrap.dedent(expected_stderr)
        expected_stderr += expected + '\n'
        self.assertEqual(output, expected_stderr)

    def test_run_scenario_with_noop(self):
        bench_jobs = list(self.scenario.bench_jobs())

        job_result = dict(
            type='type',
            container='container',
            name='john.smith',
            first_byte_latency=0,
        )
        recvs = [[job_result] for _ in range(len(bench_jobs))]
        self._recv_returns = map(msgpack.dumps, recvs)

        process_raw_results_calls = []

        def mock_process_raw_results(raw_results):
            process_raw_results_calls.append(raw_results)

        # create a mock run result object
        temp_file = tempfile.NamedTemporaryFile()
        mock_run_results = flexmock(RunResults(temp_file.name))
        mock_run_results \
            .should_receive('process_raw_results') \
            .replace_with(mock_process_raw_results) \
            .times(len(bench_jobs))

        ori_stderr = sys.stderr
        stderr = StringIO.StringIO()
        sys.stderr = stderr
        try:
            self.master.run_scenario(self.scenario,
                                     auth_kwargs={},
                                     noop=True,
                                     run_results=mock_run_results)
            sys.stderr.flush()
        finally:
            sys.stderr = ori_stderr

        # make sure we get expected result in the RunResults
        parsed_calls = map(lambda d: msgpack.loads(d)[0],
                           process_raw_results_calls)
        expected_results = [job_result] * len(bench_jobs)
        self.assertEqual(parsed_calls, expected_results)

    def test_run_scenario_only_doable_job_should_pass(self):
        def not_doable_jobs():
            yield dict(
                type=ssbench.CREATE_OBJECT,
                size_str='small',
                test_id=0,
            )
            yield dict(
                type=ssbench.READ_OBJECT,
                size_str='small',
                test_id=1,
            )

        # make the scenario returns not doable jobs sequence
        self.scenario = flexmock(self.scenario)
        self.scenario \
            .should_receive('bench_jobs') \
            .replace_with(not_doable_jobs)
        # make the scenario return no init jobs
        self.scenario \
            .should_receive('initial_jobs') \
            .replace_with(lambda: [])

        # make clinet not to send a real request
        mock_client = flexmock(client)
        mock_client \
            .should_receive('head_container')

        bench_jobs = list(self.scenario.bench_jobs())

        def mock_result(**kwargs):
            job_result = dict(
                type=ssbench.READ_OBJECT,
                size_str='small',
                container='container',
                name='john.smith',
                first_byte_latency=0,
            )
            job_result.update(kwargs)
            return job_result

        # the first create object result will let RunState put it in queue
        recvs = [[mock_result(type=ssbench.CREATE_OBJECT)]] + \
            [[mock_result()] for i in range(len(bench_jobs))]
        self._recv_returns = map(msgpack.dumps, recvs)

        # run the scenario
        auth_kwargs = dict(
            token='MOCK_TOKEN',
            storage_urls=['http://127.0.0.1:8080/auth/v1.0'],
        )

        orig_stderr = sys.stderr
        sys.stderr = open('/dev/null', 'wb')

        try:
            self.master.run_scenario(self.scenario,
                                     auth_kwargs=auth_kwargs,
                                     run_results=None,
                                     batch_size=2)
        finally:
            sys.stderr = orig_stderr

        sent_jobs = map(msgpack.loads, self._send_calls)
        sent_jobs = sum(sent_jobs, [])  # flatten the list

        # As the sequence is
        #     Batch 1.
        #         Create -> doable
        #         Read -> not doable
        # only doable jobs should be passed to the worker

        # There is a bug which allows non-doable job to be passed
        # into send job queue as None, we are here to make sure
        # None doesn't exist in the sent_jobs
        self.assertNotIn(None, sent_jobs)

    def test_run_scenario_output(self):
        # Don't actually run a lot of jobs...
        self.scenario.operation_count = 100
        bench_jobs = list(self.scenario.bench_jobs())

        def run_with_args(**kwargs):
            job_result = dict(
                type='type',
                container='container',
                name='john.smith',
            )
            job_result.update(kwargs)
            recvs = [[job_result] for _ in range(len(bench_jobs))]
            self._recv_returns = map(msgpack.dumps, recvs)

            ori_stderr = sys.stderr
            stderr = StringIO.StringIO()
            sys.stderr = stderr
            try:
                self.master.run_scenario(self.scenario,
                                         auth_kwargs={},
                                         noop=True,
                                         run_results=None)
                sys.stderr.flush()
            finally:
                sys.stderr = ori_stderr
            stderr_output = stderr.getvalue()
            return stderr_output

        # Test frist byte latency output
        first_byte_0s = run_with_args(first_byte_latency=0)
        self.assert_bench_output(first_byte_0s, '.' * len(bench_jobs))

        first_byte_lt3s = run_with_args(first_byte_latency=2)
        self.assert_bench_output(first_byte_lt3s, 'o' * len(bench_jobs))

        first_byte_lt10s = run_with_args(first_byte_latency=8)
        self.assert_bench_output(first_byte_lt10s, 'O' * len(bench_jobs))

        first_byte_ge10s = run_with_args(first_byte_latency=12)
        self.assert_bench_output(first_byte_ge10s, '*' * len(bench_jobs))

        # Test last byte latency output
        last_byte_0s = run_with_args(last_byte_latency=0)
        self.assert_bench_output(last_byte_0s, '_' * len(bench_jobs))

        last_byte_lt3s = run_with_args(last_byte_latency=2)
        self.assert_bench_output(last_byte_lt3s, '|' * len(bench_jobs))

        last_byte_lt10s = run_with_args(last_byte_latency=8)
        self.assert_bench_output(last_byte_lt10s, '^' * len(bench_jobs))

        last_byte_ge10s = run_with_args(last_byte_latency=12)
        self.assert_bench_output(last_byte_ge10s, '@' * len(bench_jobs))

        # Test exception output
        exception_output = run_with_args(exception=1)
        self.assert_bench_output(exception_output, 'X' * len(bench_jobs))

    def test_cleanup_containers(self):
        container_test_sets = [
            # default policy
            {
                'delete_containers': ['ssbench_001_default_policy'],
                'other_containers':
                ['foo', 'bar', 'ssbench_001_ec_policy', 'ssbench_002_stuff'],
                'base':
                'ssbench',
                'policy':
                'default_policy'
            },

            # ec_policy
            {
                'delete_containers': ['ssbench_001_ec_policy'],
                'other_containers': [
                    'foo', 'bar', 'ssbench_001_default_policy',
                    'ssbench_002_stuff'
                ],
                'base':
                'ssbench',
                'policy':
                'ec_policy'
            },

            # policy named "stuff"
            {
                'delete_containers': ['ssbench_002_stuff'],
                'other_containers': [
                    'foo', 'bar', 'ssbench_001_ec_policy',
                    'ssbench_001_ec_policy'
                ],
                'base':
                'ssbench',
                'policy':
                'stuff'
            }
        ]

        for test_input in container_test_sets:
            client = mock.Mock()
            policy = test_input['policy']
            base_name = test_input['base']
            delete_containers = test_input['delete_containers']
            other_containers = test_input['other_containers']
            all_containers = delete_containers + other_containers
            container_info = [{
                'name': container,
                'count': 0
            } for container in all_containers]
            client.get_account.return_value = (None, container_info)
            client.get_container.return_value = (None, [])
            client.http_connection.return_value = None

            auth_args = {
                'token': 'auth_token',
                'storage_urls': ['http://storageUrl']
            }

            with mock.patch('ssbench.master.client', new=client):
                self.master.cleanup_containers(auth_args, base_name, 1, policy)

            expected_calls = []
            for container in delete_containers:
                expected_calls.append(
                    mock.call(auth_args['storage_urls'][0],
                              auth_args['token'],
                              container,
                              http_conn=None))
            client.delete_container.assert_has_calls(expected_calls,
                                                     any_order=True)
Example #6
0
class TestMaster(ScenarioFixture, TestCase):
    maxDiff = None

    def setUp(self):
        # Set our test scenario differently from the default; must be BEFORE
        # the super call.
        self.scenario_dict = dict(
            name='Master Test Scenario - ablkei',
            sizes=[
                dict(name='tiny', size_min=99, size_max=100),
                dict(name='small', size_min=1990, size_max=1990,
                     crud_profile=[71, 9, 12, 8]),
                dict(name='medium', size_min=2990, size_max=3000),
                dict(name='unused', size_min=9876543, size_max=9876543),
                dict(name='large', size_min=399000, size_max=400000,
                     crud_profile=[16, 61, 7, 16]),
                dict(name='huge', size_min=49900000, size_max=71499999)],
            initial_files=dict(
                tiny=300, small=400, medium=500, large=200, huge=70,
            ),
            operation_count=5000,
            #             C  R  U  D
            crud_profile=[5, 3, 1, 1],
            user_count=2,
        )
        super(TestMaster, self).setUp()

        self.zmq_host = 'slick.queue.com'
        self.zmq_work_port = 7482
        self.zmq_results_port = 18398
        self.work_endpoint = 'tcp://%s:%d' % (self.zmq_host,
                                              self.zmq_work_port)
        self.results_endpoint = 'tcp://%s:%d' % (self.zmq_host,
                                                 self.zmq_results_port)

        self.mock_context = flexmock()
        flexmock(zmq.Context).new_instances(self.mock_context).once

        self.mock_work_push = flexmock(send=self._send)
        self.mock_context.should_receive('socket').with_args(
            zmq.PUSH,
        ).and_return(self.mock_work_push).once
        self.mock_work_push.should_receive('bind').with_args(
            self.work_endpoint,
        ).once

        self.mock_results_pull = flexmock(recv=self._recv)
        self.mock_context.should_receive('socket').with_args(
            zmq.PULL,
        ).and_return(self.mock_results_pull).once
        self.mock_results_pull.should_receive('bind').with_args(
            self.results_endpoint,
        ).once

        self.master = Master(self.zmq_host, self.zmq_work_port,
                             self.zmq_results_port,
                             connect_timeout=3.14159,
                             network_timeout=2.71828)

        self._send_calls = []
        self._recv_returns = []

    def tearDown(self):
        super(TestMaster, self).tearDown()

    def _send(self, data):
        self._send_calls.append(data)

    def _recv(self):
        value = self._recv_returns.pop(0)
        return value

    def assert_bench_output(self, output, expected):
        expected_stderr = '''\
        Benchmark Run:
          X    work job raised an exception
          .  <  1s first-byte-latency
          o  <  3s first-byte-latency
          O  < 10s first-byte-latency
          * >= 10s first-byte-latency
          _  <  1s last-byte-latency  (CREATE or UPDATE)
          |  <  3s last-byte-latency  (CREATE or UPDATE)
          ^  < 10s last-byte-latency  (CREATE or UPDATE)
          @ >= 10s last-byte-latency  (CREATE or UPDATE)
        '''
        expected_stderr = textwrap.dedent(expected_stderr)
        expected_stderr += expected + '\n'
        self.assertEqual(output, expected_stderr)

    def test_run_scenario_with_noop(self):
        bench_jobs = list(self.scenario.bench_jobs())

        job_result = dict(
            type='type',
            container='container',
            name='john.smith',
            first_byte_latency=0,
        )
        recvs = [[job_result] for _ in range(len(bench_jobs))]
        self._recv_returns = map(msgpack.dumps, recvs)

        process_raw_results_calls = []

        def mock_process_raw_results(raw_results):
            process_raw_results_calls.append(raw_results)

        # create a mock run result object
        temp_file = tempfile.NamedTemporaryFile()
        mock_run_results = flexmock(RunResults(temp_file.name))
        mock_run_results \
            .should_receive('process_raw_results') \
            .replace_with(mock_process_raw_results) \
            .times(len(bench_jobs))

        ori_stderr = sys.stderr
        stderr = StringIO.StringIO()
        sys.stderr = stderr
        try:
            self.master.run_scenario(self.scenario, auth_kwargs={},
                                     noop=True, run_results=mock_run_results)
            sys.stderr.flush()
        finally:
            sys.stderr = ori_stderr

        # make sure we get expected result in the RunResults
        parsed_calls = map(lambda d: msgpack.loads(d)[0], process_raw_results_calls)
        expected_results = [job_result] * len(bench_jobs)
        self.assertEqual(parsed_calls, expected_results)

    def test_run_scenario_only_doable_job_should_pass(self):

        def not_doable_jobs():
            yield dict(
                type=ssbench.CREATE_OBJECT,
                size_str='small',
                test_id=0,
            )
            yield dict(
                type=ssbench.READ_OBJECT,
                size_str='small',
                test_id=1,
            )

        # make the scenario returns not doable jobs sequence
        self.scenario = flexmock(self.scenario)
        self.scenario \
            .should_receive('bench_jobs') \
            .replace_with(not_doable_jobs)
        # make the scenario return no init jobs
        self.scenario \
            .should_receive('initial_jobs') \
            .replace_with(lambda: [])

        # make clinet not to send a real request
        mock_client = flexmock(client)
        mock_client \
            .should_receive('head_container')

        bench_jobs = list(self.scenario.bench_jobs())

        def mock_result(**kwargs):
            job_result = dict(
                type=ssbench.READ_OBJECT,
                size_str='small',
                container='container',
                name='john.smith',
                first_byte_latency=0,
            )
            job_result.update(kwargs)
            return job_result

        # the first create object result will let RunState put it in queue
        recvs = [[mock_result(type=ssbench.CREATE_OBJECT)]] + \
            [[mock_result()] for i in range(len(bench_jobs))]
        self._recv_returns = map(msgpack.dumps, recvs)

        # run the scenario
        auth_kwargs = dict(
            token='MOCK_TOKEN',
            storage_urls=['http://127.0.0.1:8080/auth/v1.0'],
        )

        orig_stderr = sys.stderr
        sys.stderr = open('/dev/null', 'wb')

        try:
            self.master.run_scenario(self.scenario, auth_kwargs=auth_kwargs,
                                     run_results=None, batch_size=2)
        finally:
            sys.stderr = orig_stderr

        sent_jobs = map(msgpack.loads, self._send_calls)
        sent_jobs = sum(sent_jobs, [])  # flatten the list

        # As the sequence is
        #     Batch 1.
        #         Create -> doable
        #         Read -> not doable
        # only doable jobs should be passed to the worker

        # There is a bug which allows non-doable job to be passed
        # into send job queue as None, we are here to make sure
        # None doesn't exist in the sent_jobs
        self.assertNotIn(None, sent_jobs)

    def test_run_scenario_output(self):
        # Don't actually run a lot of jobs...
        self.scenario.operation_count = 100
        bench_jobs = list(self.scenario.bench_jobs())

        def run_with_args(**kwargs):
            job_result = dict(
                type='type',
                container='container',
                name='john.smith',
            )
            job_result.update(kwargs)
            recvs = [[job_result] for _ in range(len(bench_jobs))]
            self._recv_returns = map(msgpack.dumps, recvs)

            ori_stderr = sys.stderr
            stderr = StringIO.StringIO()
            sys.stderr = stderr
            try:
                self.master.run_scenario(self.scenario, auth_kwargs={},
                                         noop=True, run_results=None)
                sys.stderr.flush()
            finally:
                sys.stderr = ori_stderr
            stderr_output = stderr.getvalue()
            return stderr_output

        # Test frist byte latency output
        first_byte_0s = run_with_args(first_byte_latency=0)
        self.assert_bench_output(first_byte_0s, '.' * len(bench_jobs))

        first_byte_lt3s = run_with_args(first_byte_latency=2)
        self.assert_bench_output(first_byte_lt3s, 'o' * len(bench_jobs))

        first_byte_lt10s = run_with_args(first_byte_latency=8)
        self.assert_bench_output(first_byte_lt10s, 'O' * len(bench_jobs))

        first_byte_ge10s = run_with_args(first_byte_latency=12)
        self.assert_bench_output(first_byte_ge10s, '*' * len(bench_jobs))

        # Test last byte latency output
        last_byte_0s = run_with_args(last_byte_latency=0)
        self.assert_bench_output(last_byte_0s, '_' * len(bench_jobs))

        last_byte_lt3s = run_with_args(last_byte_latency=2)
        self.assert_bench_output(last_byte_lt3s, '|' * len(bench_jobs))

        last_byte_lt10s = run_with_args(last_byte_latency=8)
        self.assert_bench_output(last_byte_lt10s, '^' * len(bench_jobs))

        last_byte_ge10s = run_with_args(last_byte_latency=12)
        self.assert_bench_output(last_byte_ge10s, '@' * len(bench_jobs))

        # Test exception output
        exception_output = run_with_args(exception=1)
        self.assert_bench_output(exception_output, 'X' * len(bench_jobs))
Example #7
0
class TestMaster(ScenarioFixture, TestCase):
    maxDiff = None

    def setUp(self):
        # Set our test scenario differently from the default; must be BEFORE
        # the super call.
        self.scenario_dict = dict(
            name='Master Test Scenario - ablkei',
            sizes=[
                dict(name='tiny', size_min=99, size_max=100),
                dict(name='small', size_min=199, size_max=200),
                dict(name='medium', size_min=299, size_max=300),
                dict(name='large', size_min=399, size_max=400),
                dict(name='huge', size_min=499, size_max=500)],
            initial_files=dict(
                tiny=300, small=300, medium=300, large=100, huge=70,
            ),
            operation_count=5000,
            #             C  R  U  D
            crud_profile=[5, 3, 1, 1],
            user_count=2,
        )
        super(TestMaster, self).setUp()

        self.stub_queue = flexmock()
        self.stub_queue.should_receive('watch').with_args(ssbench.STATS_TUBE).once
        self.stub_queue.should_receive('ignore').with_args(ssbench.DEFAULT_TUBE).once
        self.master = Master(self.stub_queue)

        self.result_index = 1  # for self.gen_result()

        self.stub_results = [
            self.gen_result(1, ssbench.CREATE_OBJECT, 'small', 100.0, 101.0, 103.0),
            self.gen_result(1, ssbench.READ_OBJECT, 'tiny', 103.0, 103.1, 103.8),
            self.gen_result(1, ssbench.CREATE_OBJECT, 'huge', 103.8, 105.0, 106.0),
            self.gen_result(1, ssbench.UPDATE_OBJECT, 'large', 106.1, 106.3, 106.4),
            #
            # exceptions should be ignored
            dict(worker_id=2, type=ssbench.UPDATE_OBJECT, completed_at=39293.2, exception='wacky!'),
            self.gen_result(2, ssbench.UPDATE_OBJECT, 'medium', 100.1, 100.9, 102.9),
            self.gen_result(2, ssbench.DELETE_OBJECT, 'large', 102.9, 103.0, 103.3),
            self.gen_result(2, ssbench.CREATE_OBJECT, 'tiny', 103.3, 103.4, 103.5),
            self.gen_result(2, ssbench.READ_OBJECT, 'small', 103.5, 103.7, 104.0),
            #
            self.gen_result(3, ssbench.READ_OBJECT, 'tiny', 100.1, 101.1, 101.9),
            # worker 3 took a while (observer lower concurrency in second 102
            self.gen_result(3, ssbench.DELETE_OBJECT, 'small', 103.1, 103.6, 103.9),
            self.gen_result(3, ssbench.READ_OBJECT, 'medium', 103.9, 104.2, 104.3),
            self.gen_result(3, ssbench.UPDATE_OBJECT, 'tiny', 104.3, 104.9, 104.999),
        ]

    def tearDown(self):
        super(TestMaster, self).tearDown()

    def gen_result(self, worker_id, op_type, size_str, start, first_byte,
                   last_byte):
        self.result_index += 1

        return {
            # There are other keys in a "result", but these are the only ones
            # used for the reporting.
            'worker_id': worker_id,
            'type': op_type,
            'size_str': size_str,
            'size': 989,
            'first_byte_latency': first_byte - start,
            'last_byte_latency': last_byte - start,
            'completed_at': last_byte,
        }

    def test_calculate_scenario_stats_aggregate(self):
        first_byte_latency_all = [1, 0.1, 1.2, 0.2, 0.8, 0.1, 0.1, 0.2, 1, 0.5, 0.3, 0.6]
        last_byte_latency_all =  [3, 0.8, 2.2, 0.3, 2.8, 0.4, 0.2, 0.5, 1.8, 0.8, 0.4, 0.699]
        scen_stats = self.master.calculate_scenario_stats(self.scenario,
                                                          self.stub_results)
        self.assertDictEqual(dict(
            worker_count=3, start=100.0, stop=106.4, req_count=12,
            avg_req_per_sec=round(12 / (106.4 - 100), 6),
            first_byte_latency=dict(
                min='%6.3f' % 0.1,
                max='%7.3f' % 1.2,
                avg='%7.3f' % stats.lmean(first_byte_latency_all),
                std_dev='%7.3f' % stats.lsamplestdev(first_byte_latency_all),
                median='%7.3f' % stats.lmedianscore(first_byte_latency_all),
            ),
            last_byte_latency=dict(
                min='%6.3f' % 0.2,
                max='%7.3f' % 3.0,
                avg='%7.3f' % stats.lmean(last_byte_latency_all),
                std_dev='%7.3f' % stats.lsamplestdev(last_byte_latency_all),
                median='  0.749',  # XXX why??
                #median='%7.3f' % stats.lmedianscore(last_byte_latency_all),
            ),
        ), scen_stats['agg_stats'])

    def test_calculate_scenario_stats_worker1(self):
        w1_first_byte_latency = [1.0, 0.1, 1.2, 0.2]
        w1_last_byte_latency = [3.0, 0.8, 2.2, 0.3]
        scen_stats = self.master.calculate_scenario_stats(self.scenario,
                                                          self.stub_results)
        self.assertDictEqual(dict(
            start=100.0, stop=106.4, req_count=4,
            avg_req_per_sec=round(4 / (106.4 - 100), 6),
            first_byte_latency=dict(
                min='%6.3f' % min(w1_first_byte_latency),
                max='%7.3f' % max(w1_first_byte_latency),
                avg='%7.3f' % stats.lmean(w1_first_byte_latency),
                std_dev='%7.3f' % stats.lsamplestdev(w1_first_byte_latency),
                median='%7.3f' % stats.lmedianscore(w1_first_byte_latency),
            ),
            last_byte_latency=dict(
                min='%6.3f' % min(w1_last_byte_latency),
                max='%7.3f' % max(w1_last_byte_latency),
                avg='%7.3f' % stats.lmean(w1_last_byte_latency),
                std_dev='%7.3f' % stats.lsamplestdev(w1_last_byte_latency),
                median='%7.3f' % stats.lmedianscore(w1_last_byte_latency),
            ),
        ), scen_stats['worker_stats'][1])

    def test_calculate_scenario_stats_worker2(self):
        w2_first_byte_latency = [0.8, 0.1, 0.1, 0.2]
        w2_last_byte_latency = [2.8, 0.4, 0.2, 0.5]
        scen_stats = self.master.calculate_scenario_stats(self.scenario,
                                                          self.stub_results)
        self.assertDictEqual(dict(
            start=100.1, stop=104.0, req_count=4,
            avg_req_per_sec=round(4 / (104.0 - 100.1), 6),
            first_byte_latency=dict(
                min='%6.3f' % min(w2_first_byte_latency),
                max='%7.3f' % max(w2_first_byte_latency),
                avg='%7.3f' % stats.lmean(w2_first_byte_latency),
                std_dev='%7.3f' % stats.lsamplestdev(w2_first_byte_latency),
                median='%7.3f' % stats.lmedianscore(w2_first_byte_latency),
            ),
            last_byte_latency=dict(
                min='%6.3f' % min(w2_last_byte_latency),
                max='%7.3f' % max(w2_last_byte_latency),
                avg='%7.3f' % stats.lmean(w2_last_byte_latency),
                std_dev='%7.3f' % stats.lsamplestdev(w2_last_byte_latency),
                median='%7.3f' % stats.lmedianscore(w2_last_byte_latency),
            ),
        ), scen_stats['worker_stats'][2])

    def test_calculate_scenario_stats_worker3(self):
        w3_first_byte_latency = [1, 0.5, 0.3, 0.6]
        w3_last_byte_latency = [1.8, 0.8, 0.4, 0.699]
        scen_stats = self.master.calculate_scenario_stats(self.scenario,
                                                          self.stub_results)
        self.assertDictEqual(dict(
            start=100.1, stop=104.999, req_count=4,
            avg_req_per_sec=round(4 / (104.999 - 100.1), 6),
            first_byte_latency=dict(
                min='%6.3f' % min(w3_first_byte_latency),
                max='%7.3f' % max(w3_first_byte_latency),
                avg='%7.3f' % stats.lmean(w3_first_byte_latency),
                std_dev='%7.3f' % stats.lsamplestdev(w3_first_byte_latency),
                median='%7.3f' % stats.lmedianscore(w3_first_byte_latency),
            ),
            last_byte_latency=dict(
                min='%6.3f' % min(w3_last_byte_latency),
                max='%7.3f' % max(w3_last_byte_latency),
                avg='%7.3f' % stats.lmean(w3_last_byte_latency),
                std_dev='%7.3f' % stats.lsamplestdev(w3_last_byte_latency),
                median='%7.3f' % stats.lmedianscore(w3_last_byte_latency),
            ),
        ), scen_stats['worker_stats'][3])

    def test_calculate_scenario_stats_create(self):
        # Stats for Create
        c_first_byte_latency = [1, 1.2, 0.1]
        c_last_byte_latency = [3.0, 2.2, 0.2]
        scen_stats = self.master.calculate_scenario_stats(self.scenario,
                                                          self.stub_results)
        self.assertDictEqual(dict(
            start=100.0, stop=106.0, req_count=3,
            avg_req_per_sec=round(3 / (106 - 100.0), 6),
            first_byte_latency=dict(
                min='%6.3f' % min(c_first_byte_latency),
                max='%7.3f' % max(c_first_byte_latency),
                avg='%7.3f' % stats.lmean(c_first_byte_latency),
                std_dev='%7.3f' % stats.lsamplestdev(c_first_byte_latency),
                median='%7.3f' % stats.lmedianscore(c_first_byte_latency),
            ),
            last_byte_latency=dict(
                min='%6.3f' % min(c_last_byte_latency),
                max='%7.3f' % max(c_last_byte_latency),
                avg='%7.3f' % stats.lmean(c_last_byte_latency),
                std_dev='%7.3f' % stats.lsamplestdev(c_last_byte_latency),
                median='%7.3f' % stats.lmedianscore(c_last_byte_latency),
            ),
            size_stats=OrderedDict([
                ('tiny', {'avg_req_per_sec': 5.0,
                        'first_byte_latency': {'avg': '%7.3f' % 0.1,
                                               'max': '%7.3f' % 0.1,
                                               'median': '%7.3f' % 0.1,
                                               'min': '%6.3f' % 0.1,
                                               'std_dev': '%7.3f' % 0.0},
                        'last_byte_latency': {'avg': '%7.3f' % 0.2,
                                              'max': '%7.3f' % 0.2,
                                              'median': '%7.3f' % 0.2,
                                              'min': '%6.3f' % 0.2,
                                              'std_dev': '%7.3f' % 0.0},
                        'req_count': 1,
                        'start': 103.3,
                        'stop': 103.5}),
                ('small', {'avg_req_per_sec': 0.333333,
                          'first_byte_latency': {'avg': '%7.3f' % 1.0,
                                                 'max': '%7.3f' % 1.0,
                                                 'median': '%7.3f' % 1.0,
                                                 'min': '%6.3f' % 1.0,
                                                 'std_dev': '%7.3f' % 0.0},
                          'last_byte_latency': {'avg': '%7.3f' % 3.0,
                                                'max': '%7.3f' % 3.0,
                                                'median': '%7.3f' % 3.0,
                                                'min': '%6.3f' % 3.0,
                                                'std_dev': '%7.3f' % 0.0},
                          'req_count': 1,
                          'start': 100.0,
                          'stop': 103.0}),
                ('huge', {'avg_req_per_sec': 0.454545,
                             'first_byte_latency': {'avg': '%7.3f' % 1.2,
                                                    'max': '%7.3f' % 1.2,
                                                    'median': '%7.3f' % 1.2,
                                                    'min': '%6.3f' % 1.2,
                                                    'std_dev': '%7.3f' % 0.0},
                             'last_byte_latency': {'avg': '%7.3f' % 2.2,
                                                   'max': '%7.3f' % 2.2,
                                                   'median': '%7.3f' % 2.2,
                                                   'min': '%6.3f' % 2.2,
                                                   'std_dev': '%7.3f' % 0.0},
                             'req_count': 1,
                             'start': 103.8,
                             'stop': 106.0})]),
        ), scen_stats['op_stats'][ssbench.CREATE_OBJECT])

    def test_calculate_scenario_stats_read(self):
        # Stats for Read
        r_first_byte_latency = [0.1, 0.2, 1.0, 0.3]
        r_last_byte_latency = [0.8, 0.5, 1.8, 0.4]
        scen_stats = self.master.calculate_scenario_stats(self.scenario,
                                                          self.stub_results)
        self.assertDictEqual(dict(
            start=100.1, stop=104.3, req_count=4,
            avg_req_per_sec=round(4 / (104.3 - 100.1), 6),
            first_byte_latency=dict(
                min='%6.3f' % min(r_first_byte_latency),
                max='%7.3f' % max(r_first_byte_latency),
                avg='%7.3f' % stats.lmean(r_first_byte_latency),
                std_dev='%7.3f' % stats.lsamplestdev(r_first_byte_latency),
                median='%7.3f' % stats.lmedianscore(r_first_byte_latency),
            ),
            last_byte_latency=dict(
                min='%6.3f' % min(r_last_byte_latency),
                max='%7.3f' % max(r_last_byte_latency),
                avg='%7.3f' % stats.lmean(r_last_byte_latency),
                std_dev='%7.3f' % stats.lsamplestdev(r_last_byte_latency),
                median='%7.3f' % stats.lmedianscore(r_last_byte_latency),
            ),
            size_stats=OrderedDict([
                ('tiny', {'avg_req_per_sec': 0.540541,
                        'first_byte_latency': {'avg': '%7.3f' % 0.55,
                                               'max': '%7.3f' % 1.0,
                                               'median': '%7.3f' % 0.55,
                                               'min': '%6.3f' % 0.1,
                                               'std_dev': '%7.3f' % 0.45},
                        'last_byte_latency': {'avg': '%7.3f' % 1.3,
                                              'max': '%7.3f' % 1.8,
                                              'median': '%7.3f' % 1.3,
                                              'min': '%6.3f' % 0.8,
                                              'std_dev': '%7.3f' % 0.5},
                        'req_count': 2,
                        'start': 100.1,
                        'stop': 103.8}),
                ('small', {'avg_req_per_sec': 2.0,
                          'first_byte_latency': {'avg': '%7.3f' % 0.2,
                                                 'max': '%7.3f' % 0.2,
                                                 'median': '%7.3f' % 0.2,
                                                 'min': '%6.3f' % 0.2,
                                                 'std_dev': '%7.3f' % 0.0},
                          'last_byte_latency': {'avg': '%7.3f' % 0.5,
                                                'max': '%7.3f' % 0.5,
                                                'median': '%7.3f' % 0.5,
                                                'min': '%6.3f' % 0.5,
                                                'std_dev': '%7.3f' % 0.0},
                          'req_count': 1,
                          'start': 103.5,
                          'stop': 104.0}),
                ('medium', {'avg_req_per_sec': 2.5,
                          'first_byte_latency': {'avg': '%7.3f' % 0.3,
                                                 'max': '%7.3f' % 0.3,
                                                 'median': '%7.3f' % 0.3,
                                                 'min': '%6.3f' % 0.3,
                                                 'std_dev': '%7.3f' % 0.0},
                          'last_byte_latency': {'avg': '%7.3f' % 0.4,
                                                'max': '%7.3f' % 0.4,
                                                'median': '%7.3f' % 0.4,
                                                'min': '%6.3f' % 0.4,
                                                'std_dev': '%7.3f' % 0.0},
                          'req_count': 1,
                          'start': 103.9,
                          'stop': 104.3})]),
        ), scen_stats['op_stats'][ssbench.READ_OBJECT])

    def test_calculate_scenario_stats_update(self):
        u_first_byte_latency = [0.2, 0.8, 0.6]
        u_last_byte_latency = [0.3, 2.8, 0.699]
        scen_stats = self.master.calculate_scenario_stats(self.scenario,
                                                          self.stub_results)
        self.assertDictEqual(dict(
            start=100.1, stop=106.4, req_count=3,
            avg_req_per_sec=round(3 / (106.4 - 100.1), 6),
            first_byte_latency=dict(
                min='%6.3f' % min(u_first_byte_latency),
                max='%7.3f' % max(u_first_byte_latency),
                avg='%7.3f' % stats.lmean(u_first_byte_latency),
                std_dev='%7.3f' % stats.lsamplestdev(u_first_byte_latency),
                median='%7.3f' % stats.lmedianscore(u_first_byte_latency),
            ),
            last_byte_latency=dict(
                min='%6.3f' % min(u_last_byte_latency),
                max='%7.3f' % max(u_last_byte_latency),
                avg='%7.3f' % stats.lmean(u_last_byte_latency),
                std_dev='%7.3f' % stats.lsamplestdev(u_last_byte_latency),
                median='%7.3f' % stats.lmedianscore(u_last_byte_latency),
            ),
            size_stats=OrderedDict([
                ('tiny', {'avg_req_per_sec': 1.430615,
                        'first_byte_latency': {'avg': '%7.3f' % 0.6,
                                               'max': '%7.3f' % 0.6,
                                               'median': '%7.3f' % 0.6,
                                               'min': '%6.3f' % 0.6,
                                               'std_dev': '%7.3f' % 0.0},
                        'last_byte_latency': {'avg': '%7.3f' % 0.699,
                                              'max': '%7.3f' % 0.699,
                                              'median': '%7.3f' % 0.699,
                                              'min': '%6.3f' % 0.699,
                                              'std_dev': '%7.3f' % 0.0},
                        'req_count': 1,
                        'start': 104.3,
                        'stop': 104.999}),
                ('medium', {'avg_req_per_sec': 0.357143,
                          'first_byte_latency': {'avg': '%7.3f' % 0.8,
                                                 'max': '%7.3f' % 0.8,
                                                 'median': '%7.3f' % 0.8,
                                                 'min': '%6.3f' % 0.8,
                                                 'std_dev': '%7.3f' % 0.0},
                          'last_byte_latency': {'avg': '%7.3f' % 2.8,
                                                'max': '%7.3f' % 2.8,
                                                'median': '%7.3f' % 2.8,
                                                'min': '%6.3f' % 2.8,
                                                'std_dev': '%7.3f' % 0.0},
                          'req_count': 1,
                          'start': 100.1,
                          'stop': 102.9}),
                ('large', {'avg_req_per_sec': 3.333333,
                            'first_byte_latency': {'avg': '%7.3f' % 0.2,
                                                   'max': '%7.3f' % 0.2,
                                                   'median': '%7.3f' % 0.2,
                                                   'min': '%6.3f' % 0.2,
                                                   'std_dev': '%7.3f' % 0.0},
                            'last_byte_latency': {'avg': '%7.3f' % 0.3,
                                                  'max': '%7.3f' % 0.3,
                                                  'median': '%7.3f' % 0.3,
                                                  'min': '%6.3f' % 0.3,
                                                  'std_dev': '%7.3f' % 0.0},
                            'req_count': 1,
                            'start': 106.1,
                            'stop': 106.4})]),
        ), scen_stats['op_stats'][ssbench.UPDATE_OBJECT])

    def test_calculate_scenario_stats_delete(self):
        d_first_byte_latency = [0.1, 0.5]
        d_last_byte_latency = [0.4, 0.8]
        scen_stats = self.master.calculate_scenario_stats(self.scenario,
                                                          self.stub_results)
        self.assertDictEqual(dict(
            start=102.9, stop=103.9, req_count=2,
            avg_req_per_sec=round(2 / (103.9 - 102.9), 6),
            first_byte_latency=dict(
                min='%6.3f' % min(d_first_byte_latency),
                max='%7.3f' % max(d_first_byte_latency),
                avg='%7.3f' % stats.lmean(d_first_byte_latency),
                std_dev='%7.3f' % stats.lsamplestdev(d_first_byte_latency),
                median='%7.3f' % stats.lmedianscore(d_first_byte_latency),
            ),
            last_byte_latency=dict(
                min='%6.3f' % min(d_last_byte_latency),
                max='%7.3f' % max(d_last_byte_latency),
                avg='%7.3f' % stats.lmean(d_last_byte_latency),
                std_dev='%7.3f' % stats.lsamplestdev(d_last_byte_latency),
                median='%7.3f' % stats.lmedianscore(d_last_byte_latency),
            ),
            size_stats=OrderedDict([
                ('small', {'avg_req_per_sec': 1.25,
                          'first_byte_latency': {'avg': '%7.3f' % 0.5,
                                                 'max': '%7.3f' % 0.5,
                                                 'median': '%7.3f' % 0.5,
                                                 'min': '%6.3f' % 0.5,
                                                 'std_dev': '%7.3f' % 0.0},
                          'last_byte_latency': {'avg': '%7.3f' % 0.8,
                                                'max': '%7.3f' % 0.8,
                                                'median': '%7.3f' % 0.8,
                                                'min': '%6.3f' % 0.8,
                                                'std_dev': '%7.3f' % 0.0},
                          'req_count': 1,
                          'start': 103.1,
                          'stop': 103.9}),
                ('large', {'avg_req_per_sec': 2.5,
                            'first_byte_latency': {'avg': '%7.3f' % 0.1,
                                                   'max': '%7.3f' % 0.1,
                                                   'median': '%7.3f' % 0.1,
                                                   'min': '%6.3f' % 0.1,
                                                   'std_dev': '%7.3f' % 0.0},
                            'last_byte_latency': {'avg': '%7.3f' % 0.4,
                                                  'max': '%7.3f' % 0.4,
                                                  'median': '%7.3f' % 0.4,
                                                  'min': '%6.3f' % 0.4,
                                                  'std_dev': '%7.3f' % 0.0},
                            'req_count': 1,
                            'start': 102.9,
                            'stop': 103.3})]),
        ), scen_stats['op_stats'][ssbench.DELETE_OBJECT])

    def test_calculate_scenario_size_stats(self):
        d_first_byte_latency = [0.1, 0.5]
        d_last_byte_latency = [0.4, 0.8]
        scen_stats = self.master.calculate_scenario_stats(self.scenario,
                                                          self.stub_results)
        self.assertDictEqual(OrderedDict([
            ('tiny', {'avg_req_per_sec': 0.816493,
                    'first_byte_latency': {'avg': '%7.3f' % 0.45,
                                           'max': '%7.3f' % 1.0,
                                           'median': '%7.3f' % 0.35,
                                           'min': '%6.3f' % 0.1,
                                           'std_dev': '%7.3f' % 0.377492},
                    'last_byte_latency': {'avg': '%7.3f' % 0.87475,
                                          'max': '%7.3f' % 1.8,
                                          'median': '%7.3f' % 0.7494,
                                          'min': '%6.3f' % 0.2,
                                          'std_dev': '%7.3f' % 0.580485},
                    'req_count': 4,
                    'start': 100.1,
                    'stop': 104.999}),
            ('small', {'avg_req_per_sec': 0.75,
                      'first_byte_latency': {'avg': '%7.3f' % 0.566667,
                                             'max': '%7.3f' % 1.0,
                                             'median': '%7.3f' % 0.5,
                                             'min': '%6.3f' % 0.2,
                                             'std_dev': '%7.3f' % 0.329983},
                      'last_byte_latency': {'avg': '%7.3f' % 1.433333,
                                            'max': '%7.3f' % 3.0,
                                            'median': '%7.3f' % 0.8,
                                            'min': '%6.3f' % 0.5,
                                            'std_dev': '%7.3f' % 1.11455},
                      'req_count': 3,
                      'start': 100.0,
                      'stop': 104.0}),
            ('medium', {'avg_req_per_sec': 0.47619,
                      'first_byte_latency': {'avg': '%7.3f' % 0.55,
                                             'max': '%7.3f' % 0.8,
                                             'median': '%7.3f' % 0.55,
                                             'min': '%6.3f' % 0.3,
                                             'std_dev': '%7.3f' % 0.25},
                      'last_byte_latency': {'avg': '%7.3f' % 1.6,
                                            'max': '%7.3f' % 2.8,
                                            'median': '%7.3f' % 1.6,
                                            'min': '%6.3f' % 0.4,
                                            'std_dev': '%7.3f' % 1.2},
                      'req_count': 2,
                      'start': 100.1,
                      'stop': 104.3}),
            ('large', {'avg_req_per_sec': 0.571429,
                        'first_byte_latency': {'avg': '%7.3f' % 0.15,
                                               'max': '%7.3f' % 0.2,
                                               'median': '%7.3f' % 0.15,
                                               'min': '%6.3f' % 0.1,
                                               'std_dev': '%7.3f' % 0.05},
                        'last_byte_latency': {'avg': '%7.3f' % 0.35,
                                              'max': '%7.3f' % 0.4,
                                              'median': '%7.3f' % 0.35,
                                              'min': '%6.3f' % 0.3,
                                              'std_dev': '%7.3f' % 0.05},
                        'req_count': 2,
                        'start': 102.9,
                        'stop': 106.4}),
            ('huge', {'avg_req_per_sec': 0.454545,
                         'first_byte_latency': {'avg': '%7.3f' % 1.2,
                                                'max': '%7.3f' % 1.2,
                                                'median': '%7.3f' % 1.2,
                                                'min': '%6.3f' % 1.2,
                                                'std_dev': '%7.3f' % 0.0},
                         'last_byte_latency': {'avg': '%7.3f' % 2.2,
                                               'max': '%7.3f' % 2.2,
                                               'median': '%7.3f' % 2.2,
                                               'min': '%6.3f' % 2.2,
                                               'std_dev': '%7.3f' % 0.0},
                         'req_count': 1,
                         'start': 103.8,
                         'stop': 106.0})]),
            scen_stats['size_stats'])

    def test_calculate_scenario_stats_time_series(self):
        # Time series (reqs completed each second
        scen_stats = self.master.calculate_scenario_stats(self.scenario,
                                                          self.stub_results)
        self.assertDictEqual(dict(
            start=101,
            data=[1, 1, 5, 3, 0, 2],
        ), scen_stats['time_series'])

    def test_write_rps_histogram(self):
        # Write out time series data (requests-per-second histogram) to an
        # already open CSV file
        scen_stats = self.master.calculate_scenario_stats(self.scenario,
                                                          self.stub_results)

        test_csv_file = StringIO()
        self.master.write_rps_histogram(scen_stats, test_csv_file)
        test_csv_file.seek(0)
        reader = csv.reader(test_csv_file)
        self.assertListEqual([
            ["Seconds Since Start", "Requests Completed"],
            ['1', '1'],
            ['2', '1'],
            ['3', '5'],
            ['4', '3'],
            ['5', '0'],
            ['6', '2'],
        ], list(reader))


    def test_generate_scenario_report(self):
        # Time series (reqs completed each second
        scen_stats = self.master.calculate_scenario_stats(self.scenario,
                                                          self.stub_results)
        self.assertListEqual(u"""
Master Test Scenario - ablkei
  C   R   U   D     Worker count:   3   Concurrency:   2
% 50  30  10  10

TOTAL
       Count:    12  Average requests per second:   1.9
                            min       max      avg      std_dev    median
       First-byte latency:  0.100 -   1.200    0.508  (  0.386)    0.400  (all obj sizes)
       Last-byte  latency:  0.200 -   3.000    1.158  (  0.970)    0.749  (all obj sizes)
       First-byte latency:  0.100 -   1.000    0.450  (  0.377)    0.350  (tiny objs)
       Last-byte  latency:  0.200 -   1.800    0.875  (  0.580)    0.749  (tiny objs)
       First-byte latency:  0.200 -   1.000    0.567  (  0.330)    0.500  (small objs)
       Last-byte  latency:  0.500 -   3.000    1.433  (  1.115)    0.800  (small objs)
       First-byte latency:  0.300 -   0.800    0.550  (  0.250)    0.550  (medium objs)
       Last-byte  latency:  0.400 -   2.800    1.600  (  1.200)    1.600  (medium objs)
       First-byte latency:  0.100 -   0.200    0.150  (  0.050)    0.150  (large objs)
       Last-byte  latency:  0.300 -   0.400    0.350  (  0.050)    0.350  (large objs)
       First-byte latency:  1.200 -   1.200    1.200  (  0.000)    1.200  (huge objs)
       Last-byte  latency:  2.200 -   2.200    2.200  (  0.000)    2.200  (huge objs)

CREATE
       Count:     3  Average requests per second:   0.5
                            min       max      avg      std_dev    median
       First-byte latency:  0.100 -   1.200    0.767  (  0.478)    1.000  (all obj sizes)
       Last-byte  latency:  0.200 -   3.000    1.800  (  1.178)    2.200  (all obj sizes)
       First-byte latency:  0.100 -   0.100    0.100  (  0.000)    0.100  (tiny objs)
       Last-byte  latency:  0.200 -   0.200    0.200  (  0.000)    0.200  (tiny objs)
       First-byte latency:  1.000 -   1.000    1.000  (  0.000)    1.000  (small objs)
       Last-byte  latency:  3.000 -   3.000    3.000  (  0.000)    3.000  (small objs)
       First-byte latency:  1.200 -   1.200    1.200  (  0.000)    1.200  (huge objs)
       Last-byte  latency:  2.200 -   2.200    2.200  (  0.000)    2.200  (huge objs)

READ
       Count:     4  Average requests per second:   1.0
                            min       max      avg      std_dev    median
       First-byte latency:  0.100 -   1.000    0.400  (  0.354)    0.250  (all obj sizes)
       Last-byte  latency:  0.400 -   1.800    0.875  (  0.554)    0.650  (all obj sizes)
       First-byte latency:  0.100 -   1.000    0.550  (  0.450)    0.550  (tiny objs)
       Last-byte  latency:  0.800 -   1.800    1.300  (  0.500)    1.300  (tiny objs)
       First-byte latency:  0.200 -   0.200    0.200  (  0.000)    0.200  (small objs)
       Last-byte  latency:  0.500 -   0.500    0.500  (  0.000)    0.500  (small objs)
       First-byte latency:  0.300 -   0.300    0.300  (  0.000)    0.300  (medium objs)
       Last-byte  latency:  0.400 -   0.400    0.400  (  0.000)    0.400  (medium objs)

UPDATE
       Count:     3  Average requests per second:   0.5
                            min       max      avg      std_dev    median
       First-byte latency:  0.200 -   0.800    0.533  (  0.249)    0.600  (all obj sizes)
       Last-byte  latency:  0.300 -   2.800    1.266  (  1.097)    0.699  (all obj sizes)
       First-byte latency:  0.600 -   0.600    0.600  (  0.000)    0.600  (tiny objs)
       Last-byte  latency:  0.699 -   0.699    0.699  (  0.000)    0.699  (tiny objs)
       First-byte latency:  0.800 -   0.800    0.800  (  0.000)    0.800  (medium objs)
       Last-byte  latency:  2.800 -   2.800    2.800  (  0.000)    2.800  (medium objs)
       First-byte latency:  0.200 -   0.200    0.200  (  0.000)    0.200  (large objs)
       Last-byte  latency:  0.300 -   0.300    0.300  (  0.000)    0.300  (large objs)

DELETE
       Count:     2  Average requests per second:   2.0
                            min       max      avg      std_dev    median
       First-byte latency:  0.100 -   0.500    0.300  (  0.200)    0.300  (all obj sizes)
       Last-byte  latency:  0.400 -   0.800    0.600  (  0.200)    0.600  (all obj sizes)
       First-byte latency:  0.500 -   0.500    0.500  (  0.000)    0.500  (small objs)
       Last-byte  latency:  0.800 -   0.800    0.800  (  0.000)    0.800  (small objs)
       First-byte latency:  0.100 -   0.100    0.100  (  0.000)    0.100  (large objs)
       Last-byte  latency:  0.400 -   0.400    0.400  (  0.000)    0.400  (large objs)


""".split('\n'), self.master.generate_scenario_report(self.scenario, scen_stats).split('\n'))