Exemple #1
0
 def __init__(self, name_format):
     self.histogram = True
     self.verbose = False
     self.benchmarks = []
     self.performance_regressions = []
     self.sort = u"min"
     self.compare = '0001'
     self.logger = logging.getLogger(__name__)
     self.machine_id = "FoobarOS"
     self.machine_info = {'foo': 'bar'}
     self.save = self.autosave = self.json = False
     self.name_format = NAME_FORMATTERS[name_format]
     self.options = {
         'min_rounds': 123,
         'min_time': 234,
         'max_time': 345,
         'use_cprofile': False,
     }
     self.cprofile_sort_by = 'cumtime'
     self.compare_fail = []
     self.config = Namespace(hook=Namespace(
         pytest_benchmark_group_stats=pytest_benchmark_group_stats,
         pytest_benchmark_generate_machine_info=lambda **kwargs:
         {'foo': 'bar'},
         pytest_benchmark_update_machine_info=lambda **kwargs: None,
         pytest_benchmark_compare_machine_info=
         pytest_benchmark_compare_machine_info,
         pytest_benchmark_generate_json=pytest_benchmark_generate_json,
         pytest_benchmark_update_json=lambda **kwargs: None,
         pytest_benchmark_generate_commit_info=lambda **kwargs:
         {'foo': 'bar'},
         pytest_benchmark_update_commit_info=lambda **kwargs: None,
     ))
     self.storage = FileStorage(str(STORAGE),
                                default_machine_id=get_machine_id(),
                                logger=self.logger)
     self.group_by = 'group'
     self.columns = [
         'min', 'max', 'mean', 'stddev', 'median', 'iqr', 'outliers',
         'rounds', 'iterations'
     ]
     for bench_file in reversed(
             self.storage.query("[0-9][0-9][0-9][0-9]_*")):
         with bench_file.open('rU') as fh:
             data = json.load(fh)
         self.benchmarks.extend(
             Namespace(as_dict=lambda include_data=False, stats=True, flat=
                       False, _bench=bench, cprofile='cumtime': dict(
                           _bench, **_bench["stats"])
                       if flat else dict(_bench),
                       name=bench['name'],
                       fullname=bench['fullname'],
                       group=bench['group'],
                       options=bench['options'],
                       has_error=False,
                       params=None,
                       **bench['stats']) for bench in data['benchmarks'])
         break
class MockSession(BenchmarkSession):
    def __init__(self, name_format):
        self.histogram = True
        self.verbose = False
        self.benchmarks = []
        self.performance_regressions = []
        self.sort = u"min"
        self.compare = '0001'
        logger = logging.getLogger(__name__)
        self.logger = Namespace(
            debug=lambda *args, **_kwargs: logger.debug(*args),
            info=lambda *args, **_kwargs: logger.info(*args),
            warn=lambda *args, **_kwargs: logger.warn(*args),
            error=lambda *args, **_kwargs: logger.error(*args),
        )
        self.machine_id = "FoobarOS"
        self.machine_info = {'foo': 'bar'}
        self.save = self.autosave = self.json = False
        self.name_format = NAME_FORMATTERS[name_format]
        self.options = {
            'min_rounds': 123,
            'min_time': 234,
            'max_time': 345,
            'cprofile': False,
        }
        self.cprofile_sort_by = 'cumtime'
        self.compare_fail = []
        self.config = Namespace(hook=Namespace(
            pytest_benchmark_scale_unit=pytest_benchmark_scale_unit,
            pytest_benchmark_group_stats=pytest_benchmark_group_stats,
            pytest_benchmark_generate_machine_info=lambda **kwargs: {'foo': 'bar'},
            pytest_benchmark_update_machine_info=lambda **kwargs: None,
            pytest_benchmark_compare_machine_info=pytest_benchmark_compare_machine_info,
            pytest_benchmark_generate_json=pytest_benchmark_generate_json,
            pytest_benchmark_update_json=lambda **kwargs: None,
            pytest_benchmark_generate_commit_info=lambda **kwargs: {'foo': 'bar'},
            pytest_benchmark_update_commit_info=lambda **kwargs: None,
        ))
        self.storage = FileStorage(str(STORAGE), default_machine_id=get_machine_id(), logger=self.logger)
        self.group_by = 'group'
        self.columns = ['min', 'max', 'mean', 'stddev', 'median', 'iqr',
                        'outliers', 'rounds', 'iterations', 'ops']
        for bench_file, data in reversed(list(self.storage.load("[0-9][0-9][0-9][0-9]_*"))):
            self.benchmarks.extend(
                Namespace(
                    as_dict=lambda include_data=False, stats=True, flat=False, _bench=bench, cprofile='cumtime':
                        dict(_bench, **_bench["stats"]) if flat else dict(_bench),
                    name=bench['name'],
                    fullname=bench['fullname'],
                    group=bench['group'],
                    options=bench['options'],
                    has_error=False,
                    params=None,
                    **bench['stats']
                )
                for bench in data['benchmarks']
            )
            break