Пример #1
0
 def test_calculate_scenario_stats_worker2(self):
     w2_first_byte_latency = [0.8, 0.1, 0.1, 0.2]
     w2_last_byte_latency = [2.8, 0.4, 0.2, 0.5]
     scen_stats = self.master.calculate_scenario_stats(self.scenario,
                                                       self.stub_results)
     self.assertDictEqual(dict(
         start=100.1, stop=104.0, req_count=4,
         avg_req_per_sec=round(4 / (104.0 - 100.1), 6),
         first_byte_latency=dict(
             min='%6.3f' % min(w2_first_byte_latency),
             max='%7.3f' % max(w2_first_byte_latency),
             avg='%7.3f' % stats.lmean(w2_first_byte_latency),
             pctile='%7.3f' % 0.8,
             std_dev='%7.3f' % stats.lsamplestdev(w2_first_byte_latency),
             median='%7.3f' % stats.lmedianscore(w2_first_byte_latency),
         ),
         last_byte_latency=dict(
             min='%6.3f' % min(w2_last_byte_latency),
             max='%7.3f' % max(w2_last_byte_latency),
             avg='%7.3f' % stats.lmean(w2_last_byte_latency),
             pctile='%7.3f' % 2.8,
             std_dev='%7.3f' % stats.lsamplestdev(w2_last_byte_latency),
             median='%7.3f' % stats.lmedianscore(w2_last_byte_latency),
         ),
         worst_first_byte_latency=(float(max(w2_first_byte_latency)),
                                   'txID006'),
         worst_last_byte_latency=(
             float(max(w2_last_byte_latency)), 'txID006'),
     ), scen_stats['worker_stats'][2])
Пример #2
0
 def test_calculate_scenario_stats_worker3(self):
     w3_first_byte_latency = [1, 0.5, 0.3, 0.6]
     w3_last_byte_latency = [1.8, 0.8, 0.4, 0.699]
     scen_stats = self.master.calculate_scenario_stats(self.scenario,
                                                       self.stub_results)
     self.assertDictEqual(dict(
         start=100.1, stop=104.999, req_count=4,
         avg_req_per_sec=round(4 / (104.999 - 100.1), 6),
         first_byte_latency=dict(
             min='%6.3f' % min(w3_first_byte_latency),
             max='%7.3f' % max(w3_first_byte_latency),
             avg='%7.3f' % stats.lmean(w3_first_byte_latency),
             pctile='%7.3f' % 1.0,
             std_dev='%7.3f' % stats.lsamplestdev(w3_first_byte_latency),
             median='%7.3f' % stats.lmedianscore(w3_first_byte_latency),
         ),
         last_byte_latency=dict(
             min='%6.3f' % min(w3_last_byte_latency),
             max='%7.3f' % max(w3_last_byte_latency),
             avg='%7.3f' % stats.lmean(w3_last_byte_latency),
             pctile='%7.3f' % 1.8,
             std_dev='%7.3f' % stats.lsamplestdev(w3_last_byte_latency),
             median='%7.3f' % stats.lmedianscore(w3_last_byte_latency),
         ),
         worst_first_byte_latency=(
             float(max(w3_first_byte_latency)), 'txID010'),
         worst_last_byte_latency=(
             float(max(w3_last_byte_latency)), 'txID010'),
     ), scen_stats['worker_stats'][3])
Пример #3
0
 def test_calculate_scenario_stats_worker1(self):
     w1_first_byte_latency = [1.0, 0.1, 1.2, 0.2]
     w1_last_byte_latency = [3.0, 0.8, 2.2, 0.3]
     scen_stats = self.master.calculate_scenario_stats(self.scenario,
                                                       self.stub_results)
     self.assertDictEqual(dict(
         start=100.0, stop=106.4, req_count=4,
         avg_req_per_sec=round(4 / (106.4 - 100), 6),
         first_byte_latency=dict(
             min='%6.3f' % min(w1_first_byte_latency),
             max='%7.3f' % max(w1_first_byte_latency),
             avg='%7.3f' % stats.lmean(w1_first_byte_latency),
             pctile='%7.3f' % 1.2,
             std_dev='%7.3f' % stats.lsamplestdev(w1_first_byte_latency),
             median='%7.3f' % stats.lmedianscore(w1_first_byte_latency),
         ),
         last_byte_latency=dict(
             min='%6.3f' % min(w1_last_byte_latency),
             max='%7.3f' % max(w1_last_byte_latency),
             avg='%7.3f' % stats.lmean(w1_last_byte_latency),
             pctile='%7.3f' % 3.0,
             std_dev='%7.3f' % stats.lsamplestdev(w1_last_byte_latency),
             median='%7.3f' % stats.lmedianscore(w1_last_byte_latency),
         ),
         worst_first_byte_latency=(float(max(w1_first_byte_latency)),
                                   'txID004'),
         worst_last_byte_latency=(
             float(max(w1_last_byte_latency)), 'txID002'),
     ), scen_stats['worker_stats'][1])
Пример #4
0
 def test_calculate_scenario_stats_delete(self):
     d_first_byte_latency = [0.1, 0.5]
     d_last_byte_latency = [0.4, 0.8]
     scen_stats = self.master.calculate_scenario_stats(self.scenario,
                                                       self.stub_results)
     self.assertDictEqual(dict(
         start=102.9, stop=103.9, req_count=2,
         avg_req_per_sec=round(2 / (103.9 - 102.9), 6),
         first_byte_latency=dict(
             min='%6.3f' % min(d_first_byte_latency),
             max='%7.3f' % max(d_first_byte_latency),
             avg='%7.3f' % stats.lmean(d_first_byte_latency),
             std_dev='%7.3f' % stats.lsamplestdev(d_first_byte_latency),
             median='%7.3f' % stats.lmedianscore(d_first_byte_latency),
         ),
         last_byte_latency=dict(
             min='%6.3f' % min(d_last_byte_latency),
             max='%7.3f' % max(d_last_byte_latency),
             avg='%7.3f' % stats.lmean(d_last_byte_latency),
             std_dev='%7.3f' % stats.lsamplestdev(d_last_byte_latency),
             median='%7.3f' % stats.lmedianscore(d_last_byte_latency),
         ),
         size_stats=OrderedDict([
             ('small', {'avg_req_per_sec': 1.25,
                       'first_byte_latency': {'avg': '%7.3f' % 0.5,
                                              'max': '%7.3f' % 0.5,
                                              'median': '%7.3f' % 0.5,
                                              'min': '%6.3f' % 0.5,
                                              'std_dev': '%7.3f' % 0.0},
                       'last_byte_latency': {'avg': '%7.3f' % 0.8,
                                             'max': '%7.3f' % 0.8,
                                             'median': '%7.3f' % 0.8,
                                             'min': '%6.3f' % 0.8,
                                             'std_dev': '%7.3f' % 0.0},
                       'req_count': 1,
                       'start': 103.1,
                       'stop': 103.9}),
             ('large', {'avg_req_per_sec': 2.5,
                         'first_byte_latency': {'avg': '%7.3f' % 0.1,
                                                'max': '%7.3f' % 0.1,
                                                'median': '%7.3f' % 0.1,
                                                'min': '%6.3f' % 0.1,
                                                'std_dev': '%7.3f' % 0.0},
                         'last_byte_latency': {'avg': '%7.3f' % 0.4,
                                               'max': '%7.3f' % 0.4,
                                               'median': '%7.3f' % 0.4,
                                               'min': '%6.3f' % 0.4,
                                               'std_dev': '%7.3f' % 0.0},
                         'req_count': 1,
                         'start': 102.9,
                         'stop': 103.3})]),
     ), scen_stats['op_stats'][ssbench.DELETE_OBJECT])
Пример #5
0
 def test_calculate_scenario_stats_aggregate_low_pctile(self):
     first_byte_latency_all = [1, 0.1, 1.2, 0.2, 0.8, 0.1, 0.1,
                               0.2, 1, 0.5, 0.3, 0.6]
     last_byte_latency_all = [3, 0.8, 2.2, 0.3, 2.8, 0.4, 0.2,
                              0.5, 1.8, 0.8, 0.4, 0.699]
     scen_stats = self.master.calculate_scenario_stats(self.scenario,
                                                       self.stub_results,
                                                       nth_pctile=20)
     self.assertDictEqual(dict(
         worker_count=3, start=100.0, stop=106.4, req_count=12,
         avg_req_per_sec=round(12 / (106.4 - 100), 6),
         first_byte_latency=dict(
             min='%6.3f' % 0.1,
             max='%7.3f' % 1.2,
             avg='%7.3f' % stats.lmean(first_byte_latency_all),
             pctile='%7.3f' % sorted(first_byte_latency_all)[2],
             std_dev='%7.3f' % stats.lsamplestdev(first_byte_latency_all),
             median='%7.3f' % stats.lmedianscore(first_byte_latency_all),
         ),
         last_byte_latency=dict(
             min='%6.3f' % 0.2,
             max='%7.3f' % 3.0,
             avg='%7.3f' % stats.lmean(last_byte_latency_all),
             pctile='%7.3f' % sorted(last_byte_latency_all)[2],
             std_dev='%7.3f' % stats.lsamplestdev(last_byte_latency_all),
             median='  0.749',  # XXX why??
             # median='%7.3f' % stats.lmedianscore(last_byte_latency_all),
         ),
         worst_first_byte_latency=(1.2, 'txID004'),
         worst_last_byte_latency=(3.0, 'txID002'),
     ), scen_stats['agg_stats'])
Пример #6
0
def statdict(times, start, interval):
    standard_deviation = 0
    if len(times) > 1:
        standard_deviation = stats.lstdev(times)
    return dibj(interval=interval,
                start=start,
                max=max(times) * 1000,
                min=min(times) * 1000,
                mean=stats.lmean(times) * 1000,
                median=stats.lmedianscore(times) * 1000,
                p90th=abs(stats.lscoreatpercentile(times, 0.9)) * 1000,
                p98th=abs(stats.lscoreatpercentile(times, 0.98)) * 1000,
                p99th=abs(stats.lscoreatpercentile(times, 0.99)) * 1000,
                standard_deviation=standard_deviation * 1000,
                howmany=len(times))
Пример #7
0
def statdict(times, start, interval):
    standard_deviation = 0
    if len(times) > 1:
        standard_deviation = stats.lstdev(times)
    return dibj(interval=interval,
                start=start,
                max=max(times) * 1000,
                min=min(times) * 1000,
                mean=stats.lmean(times) * 1000,
                median=stats.lmedianscore(times) * 1000,
                p90th=abs(stats.lscoreatpercentile(times, 0.9)) * 1000,
                p98th=abs(stats.lscoreatpercentile(times, 0.98)) * 1000,
                p99th=abs(stats.lscoreatpercentile(times, 0.99)) * 1000,
                standard_deviation=standard_deviation * 1000,
                howmany=len(times))
Пример #8
0
 def test_calculate_scenario_stats_update(self):
     u_first_byte_latency = [0.2, 0.8, 0.6]
     u_last_byte_latency = [0.3, 2.8, 0.699]
     scen_stats = self.master.calculate_scenario_stats(self.scenario,
                                                       self.stub_results)
     self.assertDictEqual(dict(
         start=100.1, stop=106.4, req_count=3,
         avg_req_per_sec=round(3 / (106.4 - 100.1), 6),
         first_byte_latency=dict(
             min='%6.3f' % min(u_first_byte_latency),
             max='%7.3f' % max(u_first_byte_latency),
             avg='%7.3f' % stats.lmean(u_first_byte_latency),
             pctile='%7.3f' % 0.8,
             std_dev='%7.3f' % stats.lsamplestdev(u_first_byte_latency),
             median='%7.3f' % stats.lmedianscore(u_first_byte_latency),
         ),
         worst_first_byte_latency=(max(u_first_byte_latency), 'txID006'),
         last_byte_latency=dict(
             min='%6.3f' % min(u_last_byte_latency),
             max='%7.3f' % max(u_last_byte_latency),
             avg='%7.3f' % stats.lmean(u_last_byte_latency),
             pctile='%7.3f' % 2.8,
             std_dev='%7.3f' % stats.lsamplestdev(u_last_byte_latency),
             median='%7.3f' % stats.lmedianscore(u_last_byte_latency),
         ),
         worst_last_byte_latency=(max(u_last_byte_latency), 'txID006'),
         size_stats=OrderedDict([
             ('tiny', {'avg_req_per_sec': 1.430615,
                       'first_byte_latency': {'avg': '%7.3f' % 0.6,
                                              'pctile': '%7.3f' % 0.6,
                                              'max': '%7.3f' % 0.6,
                                              'median': '%7.3f' % 0.6,
                                              'min': '%6.3f' % 0.6,
                                              'std_dev': '%7.3f' % 0.0},
                       'worst_first_byte_latency': (0.6, 'txID013'),
                       'last_byte_latency': {'avg': '%7.3f' % 0.699,
                                             'pctile': '%7.3f' % 0.699,
                                             'max': '%7.3f' % 0.699,
                                             'median': '%7.3f' % 0.699,
                                             'min': '%6.3f' % 0.699,
                                             'std_dev': '%7.3f' % 0.0},
                       'worst_last_byte_latency': (0.699, 'txID013'),
                       'req_count': 1,
                       'start': 104.3,
                       'stop': 104.999}),
             ('medium', {'avg_req_per_sec': 0.357143,
                         'first_byte_latency': {'avg': '%7.3f' % 0.8,
                                                'pctile': '%7.3f' % 0.8,
                                                'max': '%7.3f' % 0.8,
                                                'median': '%7.3f' % 0.8,
                                                'min': '%6.3f' % 0.8,
                                                'std_dev': '%7.3f' % 0.0},
                         'worst_first_byte_latency': (0.8, 'txID006'),
                         'last_byte_latency': {'avg': '%7.3f' % 2.8,
                                               'pctile': '%7.3f' % 2.8,
                                               'max': '%7.3f' % 2.8,
                                               'median': '%7.3f' % 2.8,
                                               'min': '%6.3f' % 2.8,
                                               'std_dev': '%7.3f' % 0.0},
                         'worst_last_byte_latency': (2.8, 'txID006'),
                         'req_count': 1,
                         'start': 100.1,
                         'stop': 102.9}),
             ('large', {'avg_req_per_sec': 3.333333,
                        'first_byte_latency': {'avg': '%7.3f' % 0.2,
                                               'pctile': '%7.3f' % 0.2,
                                               'max': '%7.3f' % 0.2,
                                               'median': '%7.3f' % 0.2,
                                               'min': '%6.3f' % 0.2,
                                               'std_dev': '%7.3f' % 0.0},
                        'worst_first_byte_latency': (0.2, 'txID005'),
                        'last_byte_latency': {'avg': '%7.3f' % 0.3,
                                              'pctile': '%7.3f' % 0.3,
                                              'max': '%7.3f' % 0.3,
                                              'median': '%7.3f' % 0.3,
                                              'min': '%6.3f' % 0.3,
                                              'std_dev': '%7.3f' % 0.0},
                        'worst_last_byte_latency': (0.3, 'txID005'),
                        'req_count': 1,
                        'start': 106.1,
                        'stop': 106.4})]),
     ), scen_stats['op_stats'][ssbench.UPDATE_OBJECT])
Пример #9
0
 def test_calculate_scenario_stats_read(self):
     # Stats for Read
     r_first_byte_latency = [0.1, 0.2, 1.0, 0.3]
     r_last_byte_latency = [0.8, 0.5, 1.8, 0.4]
     scen_stats = self.master.calculate_scenario_stats(self.scenario,
                                                       self.stub_results)
     self.assertDictEqual(dict(
         start=100.1, stop=104.3, req_count=4,
         avg_req_per_sec=round(4 / (104.3 - 100.1), 6),
         first_byte_latency=dict(
             min='%6.3f' % min(r_first_byte_latency),
             max='%7.3f' % max(r_first_byte_latency),
             pctile='%7.3f' % max(r_first_byte_latency),
             avg='%7.3f' % stats.lmean(r_first_byte_latency),
             std_dev='%7.3f' % stats.lsamplestdev(r_first_byte_latency),
             median='%7.3f' % stats.lmedianscore(r_first_byte_latency),
         ),
         last_byte_latency=dict(
             min='%6.3f' % min(r_last_byte_latency),
             max='%7.3f' % max(r_last_byte_latency),
             pctile='%7.3f' % max(r_last_byte_latency),
             avg='%7.3f' % stats.lmean(r_last_byte_latency),
             std_dev='%7.3f' % stats.lsamplestdev(r_last_byte_latency),
             median='%7.3f' % stats.lmedianscore(r_last_byte_latency),
         ),
         worst_first_byte_latency=(max(r_first_byte_latency), 'txID010'),
         worst_last_byte_latency=(max(r_last_byte_latency), 'txID010'),
         size_stats=OrderedDict([
             ('tiny', {'avg_req_per_sec': 0.540541,
                       'first_byte_latency': {'avg': '%7.3f' % 0.55,
                                              'max': '%7.3f' % 1.0,
                                              'pctile': '%7.3f' % 1.0,
                                              'median': '%7.3f' % 0.55,
                                              'min': '%6.3f' % 0.1,
                                              'std_dev': '%7.3f' % 0.45},
                       'last_byte_latency': {'avg': '%7.3f' % 1.3,
                                             'max': '%7.3f' % 1.8,
                                             'pctile': '%7.3f' % 1.8,
                                             'median': '%7.3f' % 1.3,
                                             'min': '%6.3f' % 0.8,
                                             'std_dev': '%7.3f' % 0.5},
                       'worst_first_byte_latency': (1.0, 'txID010'),
                       'worst_last_byte_latency': (1.8, 'txID010'),
                       'req_count': 2,
                       'start': 100.1,
                       'stop': 103.8}),
             ('small', {'avg_req_per_sec': 2.0,
                        'first_byte_latency': {'avg': '%7.3f' % 0.2,
                                               'max': '%7.3f' % 0.2,
                                               'pctile': '%7.3f' % 0.2,
                                               'median': '%7.3f' % 0.2,
                                               'min': '%6.3f' % 0.2,
                                               'std_dev': '%7.3f' % 0.0},
                        'last_byte_latency': {'avg': '%7.3f' % 0.5,
                                              'max': '%7.3f' % 0.5,
                                              'pctile': '%7.3f' % 0.5,
                                              'median': '%7.3f' % 0.5,
                                              'min': '%6.3f' % 0.5,
                                              'std_dev': '%7.3f' % 0.0},
                        'worst_first_byte_latency': (0.2, 'txID009'),
                        'worst_last_byte_latency': (0.5, 'txID009'),
                        'req_count': 1,
                        'start': 103.5,
                        'stop': 104.0}),
             ('medium', {'avg_req_per_sec': 2.5,
                         'first_byte_latency': {'avg': '%7.3f' % 0.3,
                                                'max': '%7.3f' % 0.3,
                                                'pctile': '%7.3f' % 0.3,
                                                'median': '%7.3f' % 0.3,
                                                'min': '%6.3f' % 0.3,
                                                'std_dev': '%7.3f' % 0.0},
                         'last_byte_latency': {'avg': '%7.3f' % 0.4,
                                               'max': '%7.3f' % 0.4,
                                               'pctile': '%7.3f' % 0.4,
                                               'median': '%7.3f' % 0.4,
                                               'min': '%6.3f' % 0.4,
                                               'std_dev': '%7.3f' % 0.0},
                         'worst_first_byte_latency': (0.3, 'txID012'),
                         'worst_last_byte_latency': (0.4, 'txID012'),
                         'req_count': 1,
                         'start': 103.9,
                         'stop': 104.3})]),
     ), scen_stats['op_stats'][ssbench.READ_OBJECT])
Пример #10
0
 def test_calculate_scenario_stats_create(self):
     # Stats for Create
     c_first_byte_latency = [1, 1.2, 0.1]
     c_last_byte_latency = [3.0, 2.2, 0.2]
     scen_stats = self.master.calculate_scenario_stats(self.scenario,
                                                       self.stub_results)
     self.assertDictEqual(dict(
         start=100.0, stop=106.0, req_count=3,
         avg_req_per_sec=round(3 / (106 - 100.0), 6),
         first_byte_latency=dict(
             min='%6.3f' % min(c_first_byte_latency),
             max='%7.3f' % max(c_first_byte_latency),
             pctile='%7.3f' % max(c_first_byte_latency),
             avg='%7.3f' % stats.lmean(c_first_byte_latency),
             std_dev='%7.3f' % stats.lsamplestdev(c_first_byte_latency),
             median='%7.3f' % stats.lmedianscore(c_first_byte_latency),
         ),
         last_byte_latency=dict(
             min='%6.3f' % min(c_last_byte_latency),
             max='%7.3f' % max(c_last_byte_latency),
             pctile='%7.3f' % max(c_last_byte_latency),
             avg='%7.3f' % stats.lmean(c_last_byte_latency),
             std_dev='%7.3f' % stats.lsamplestdev(c_last_byte_latency),
             median='%7.3f' % stats.lmedianscore(c_last_byte_latency),
         ),
         worst_first_byte_latency=(max(c_first_byte_latency), 'txID004'),
         worst_last_byte_latency=(max(c_last_byte_latency), 'txID002'),
         size_stats=OrderedDict([
             ('tiny', {'avg_req_per_sec': 5.0,
                       'first_byte_latency': {'avg': '%7.3f' % 0.1,
                                              'max': '%7.3f' % 0.1,
                                              'pctile': '%7.3f' % 0.1,
                                              'median': '%7.3f' % 0.1,
                                              'min': '%6.3f' % 0.1,
                                              'std_dev': '%7.3f' % 0.0},
                       'last_byte_latency': {'avg': '%7.3f' % 0.2,
                                             'max': '%7.3f' % 0.2,
                                             'pctile': '%7.3f' % 0.2,
                                             'median': '%7.3f' % 0.2,
                                             'min': '%6.3f' % 0.2,
                                             'std_dev': '%7.3f' % 0.0},
                       'worst_first_byte_latency': (0.1, 'txID008'),
                       'worst_last_byte_latency': (0.2, 'txID008'),
                       'req_count': 1,
                       'start': 103.3,
                       'stop': 103.5}),
             ('small', {'avg_req_per_sec': 0.333333,
                        'first_byte_latency': {'avg': '%7.3f' % 1.0,
                                               'max': '%7.3f' % 1.0,
                                               'pctile': '%7.3f' % 1.0,
                                               'median': '%7.3f' % 1.0,
                                               'min': '%6.3f' % 1.0,
                                               'std_dev': '%7.3f' % 0.0},
                        'last_byte_latency': {'avg': '%7.3f' % 3.0,
                                              'max': '%7.3f' % 3.0,
                                              'pctile': '%7.3f' % 3.0,
                                              'median': '%7.3f' % 3.0,
                                              'min': '%6.3f' % 3.0,
                                              'std_dev': '%7.3f' % 0.0},
                        'worst_first_byte_latency': (1.0, 'txID002'),
                        'worst_last_byte_latency': (3.0, 'txID002'),
                        'req_count': 1,
                        'start': 100.0,
                        'stop': 103.0}),
             ('huge', {'avg_req_per_sec': 0.454545,
                       'first_byte_latency': {'avg': '%7.3f' % 1.2,
                                              'max': '%7.3f' % 1.2,
                                              'pctile': '%7.3f' % 1.2,
                                              'median': '%7.3f' % 1.2,
                                              'min': '%6.3f' % 1.2,
                                              'std_dev': '%7.3f' % 0.0},
                       'last_byte_latency': {'avg': '%7.3f' % 2.2,
                                             'max': '%7.3f' % 2.2,
                                             'pctile': '%7.3f' % 2.2,
                                             'median': '%7.3f' % 2.2,
                                             'min': '%6.3f' % 2.2,
                                             'std_dev': '%7.3f' % 0.0},
                       'worst_first_byte_latency': (1.2, 'txID004'),
                       'worst_last_byte_latency': (2.2, 'txID004'),
                       'req_count': 1,
                       'start': 103.8,
                       'stop': 106.0})]),
     ), scen_stats['op_stats'][ssbench.CREATE_OBJECT])