예제 #1
0
 def __init__(self, name_format):
     self.histogram = True
     self.verbose = False
     self.benchmarks = []
     self.performance_regressions = []
     self.sort = u"min"
     self.compare = '0001'
     self.logger = logging.getLogger(__name__)
     self.machine_id = "FoobarOS"
     self.machine_info = {'foo': 'bar'}
     self.save = self.autosave = self.json = False
     self.name_format = NAME_FORMATTERS[name_format]
     self.options = {
         'min_rounds': 123,
         'min_time': 234,
         'max_time': 345,
         'use_cprofile': False,
     }
     self.cprofile_sort_by = 'cumtime'
     self.compare_fail = []
     self.config = Namespace(hook=Namespace(
         pytest_benchmark_group_stats=pytest_benchmark_group_stats,
         pytest_benchmark_generate_machine_info=lambda **kwargs:
         {'foo': 'bar'},
         pytest_benchmark_update_machine_info=lambda **kwargs: None,
         pytest_benchmark_compare_machine_info=
         pytest_benchmark_compare_machine_info,
         pytest_benchmark_generate_json=pytest_benchmark_generate_json,
         pytest_benchmark_update_json=lambda **kwargs: None,
         pytest_benchmark_generate_commit_info=lambda **kwargs:
         {'foo': 'bar'},
         pytest_benchmark_update_commit_info=lambda **kwargs: None,
     ))
     self.storage = FileStorage(str(STORAGE),
                                default_machine_id=get_machine_id(),
                                logger=self.logger)
     self.group_by = 'group'
     self.columns = [
         'min', 'max', 'mean', 'stddev', 'median', 'iqr', 'outliers',
         'rounds', 'iterations'
     ]
     for bench_file in reversed(
             self.storage.query("[0-9][0-9][0-9][0-9]_*")):
         with bench_file.open('rU') as fh:
             data = json.load(fh)
         self.benchmarks.extend(
             Namespace(as_dict=lambda include_data=False, stats=True, flat=
                       False, _bench=bench, cprofile='cumtime': dict(
                           _bench, **_bench["stats"])
                       if flat else dict(_bench),
                       name=bench['name'],
                       fullname=bench['fullname'],
                       group=bench['group'],
                       options=bench['options'],
                       has_error=False,
                       params=None,
                       **bench['stats']) for bench in data['benchmarks'])
         break
예제 #2
0
 def __init__(self, name_format):
     self.histogram = True
     self.verbose = False
     self.benchmarks = []
     self.performance_regressions = []
     self.sort = u"min"
     self.compare = '0001'
     logger = logging.getLogger(__name__)
     self.logger = Namespace(
         debug=lambda *args, **_kwargs: logger.debug(*args),
         info=lambda *args, **_kwargs: logger.info(*args),
         warn=lambda *args, **_kwargs: logger.warn(*args),
         error=lambda *args, **_kwargs: logger.error(*args),
     )
     self.machine_id = "FoobarOS"
     self.machine_info = {'foo': 'bar'}
     self.save = self.autosave = self.json = False
     self.name_format = NAME_FORMATTERS[name_format]
     self.options = {
         'min_rounds': 123,
         'min_time': 234,
         'max_time': 345,
         'cprofile': False,
     }
     self.cprofile_sort_by = 'cumtime'
     self.compare_fail = []
     self.config = Namespace(hook=Namespace(
         pytest_benchmark_scale_unit=pytest_benchmark_scale_unit,
         pytest_benchmark_group_stats=pytest_benchmark_group_stats,
         pytest_benchmark_generate_machine_info=lambda **kwargs: {'foo': 'bar'},
         pytest_benchmark_update_machine_info=lambda **kwargs: None,
         pytest_benchmark_compare_machine_info=pytest_benchmark_compare_machine_info,
         pytest_benchmark_generate_json=pytest_benchmark_generate_json,
         pytest_benchmark_update_json=lambda **kwargs: None,
         pytest_benchmark_generate_commit_info=lambda **kwargs: {'foo': 'bar'},
         pytest_benchmark_update_commit_info=lambda **kwargs: None,
     ))
     self.storage = FileStorage(str(STORAGE), default_machine_id=get_machine_id(), logger=self.logger)
     self.group_by = 'group'
     self.columns = ['min', 'max', 'mean', 'stddev', 'median', 'iqr',
                     'outliers', 'rounds', 'iterations', 'ops']
     for bench_file, data in reversed(list(self.storage.load("[0-9][0-9][0-9][0-9]_*"))):
         self.benchmarks.extend(
             Namespace(
                 as_dict=lambda include_data=False, stats=True, flat=False, _bench=bench, cprofile='cumtime':
                     dict(_bench, **_bench["stats"]) if flat else dict(_bench),
                 name=bench['name'],
                 fullname=bench['fullname'],
                 group=bench['group'],
                 options=bench['options'],
                 has_error=False,
                 params=None,
                 **bench['stats']
             )
             for bench in data['benchmarks']
         )
         break
예제 #3
0
 def __init__(self, name_format):
     self.histogram = True
     self.storage = Storage(str(STORAGE), default_machine_id=get_machine_id(), logger=None)
     self.benchmarks = []
     self.performance_regressions = []
     self.sort = u"min"
     self.compare = '0001'
     self.logger = logging.getLogger(__name__)
     self.machine_id = "FoobarOS"
     self.machine_info = {'foo': 'bar'}
     self.name_format = NAME_FORMATTERS[name_format]
     self.save = self.autosave = self.json = False
     self.options = {
         'min_rounds': 123,
         'min_time': 234,
         'max_time': 345,
     }
     self.compare_fail = []
     self.config = Namespace(hook=Namespace(
         pytest_benchmark_group_stats=pytest_benchmark_group_stats,
         pytest_benchmark_generate_machine_info=lambda **kwargs: {'foo': 'bar'},
         pytest_benchmark_update_machine_info=lambda **kwargs: None,
         pytest_benchmark_compare_machine_info=pytest_benchmark_compare_machine_info,
         pytest_benchmark_generate_json=pytest_benchmark_generate_json,
         pytest_benchmark_update_json=lambda **kwargs: None,
         pytest_benchmark_generate_commit_info=lambda **kwargs: {'foo': 'bar'},
         pytest_benchmark_update_commit_info=lambda **kwargs: None,
     ))
     self.group_by = 'group'
     self.columns = ['min', 'max', 'mean', 'stddev', 'median', 'iqr',
                     'outliers', 'rounds', 'iterations']
     for bench_file in reversed(self.storage.query("[0-9][0-9][0-9][0-9]_*")):
         with bench_file.open('rU') as fh:
             data = json.load(fh)
         self.benchmarks.extend(
             Namespace(
                 as_dict=lambda include_data=False, stats=True, flat=False, _bench=bench:
                     dict(_bench, **_bench["stats"]) if flat else dict(_bench),
                 name=bench['name'],
                 fullname=bench['fullname'],
                 group=bench['group'],
                 options=bench['options'],
                 has_error=False,
                 params=None,
                 **bench['stats']
             )
             for bench in data['benchmarks']
         )
         break
예제 #4
0
def get_doc(seconds_blocked, request):
    fullname = request.node._nodeid
    name = request.node.name
    group = None
    marker = request.node.get_marker("responsivness")
    if marker:
        group = marker.kwargs.get("group")

    doc = {
        "datetime": datetime.datetime.utcnow().isoformat(),
        "commit_info": get_commit_info(),
        "fullname": fullname,
        "name": name,
        "group": group,
        "machine_info": pytest_benchmark_generate_machine_info(),
        "seconds_blocked": seconds_blocked,
    }

    # generate a doc id like the one used by pytest-benchmark
    machine_id = get_machine_id()
    tag = get_tag()
    doc_id = machine_id + "_" + tag + "_" + fullname

    return doc, doc_id
예제 #5
0
파일: elastic.py 프로젝트: leapcode/soledad
def get_doc(seconds_blocked, request):
    fullname = request.node._nodeid
    name = request.node.name
    group = None
    marker = request.node.get_marker("responsivness")
    if marker:
        group = marker.kwargs.get("group")

    doc = {
        "datetime": datetime.datetime.utcnow().isoformat(),
        "commit_info": get_commit_info(),
        "fullname": fullname,
        "name": name,
        "group": group,
        "machine_info": pytest_benchmark_generate_machine_info(),
        "seconds_blocked": seconds_blocked,
    }

    # generate a doc id like the one used by pytest-benchmark
    machine_id = get_machine_id()
    tag = get_tag()
    doc_id = machine_id + "_" + tag + "_" + fullname

    return doc, doc_id
예제 #6
0
# -*- coding: utf-8 -*-

from pathlib import Path

from pytest_benchmark.utils import get_machine_id


DATA_PATH = Path.home() / '.csaps_benchmark'

PKG_PATH = Path(__file__).parent

PYTEST_CONFIG_PATH = PKG_PATH / 'pytest.ini'
PYTEST_CACHE_PATH = DATA_PATH / '.cache'

BENCHMARKS_PATH = PKG_PATH / 'benchmarks'

BENCHMARK_STORAGE_PATH = DATA_PATH / 'benchmarks'
REPORT_STORAGE_PATH = DATA_PATH / 'reports'

BENCHMARK_MACHINE_ID_PATH = BENCHMARK_STORAGE_PATH / get_machine_id()
REPORT_MACHINE_ID_PATH = REPORT_STORAGE_PATH / get_machine_id()