def run(self, result_uri, raw_result_uri, test_run_uri, task_uri, results_uri, **params): logger.info('Starting processing results...') self.position = params.get('task_position', self.MIDDLE) self.on_start(task_uri) generators = { 'phantomas': [PhantomasMetricGenerator()], 'mw_profiler': [ProfilerMetricGenerator()], 'python.requests': [RequestsMetricGenerator()], 'selenium': [SeleniumMetricGenerator()] } metrics = Collection() result = ApiClient.get(result_uri) for raw_result_uri in result['raw_results']: raw_result = ApiClient.get(raw_result_uri) for item in raw_result['data']: for generator in generators[raw_result['generator']]: generator(metrics, raw_result) ApiClient.put( result_uri, { 'test_run': test_run_uri, 'task': task_uri, 'results': metrics.serialize(), })
def extract(self, context, data): context['origin'] = 'mw_profiler' metric_defs = { 'response_time': MetricType.TIME, 'database.queries.list': MetricType.QUERY_LIST, 'database.queries.time': MetricType.TIME, 'database.queries.master_count': MetricType.COUNT, 'database.queries.slave_count': MetricType.COUNT, 'memcached.time': MetricType.TIME, 'memcached.miss_count': MetricType.COUNT, 'memcached.hit_count': MetricType.COUNT, 'memcached.dupe_count': MetricType.COUNT, } name_template = 'server.app.{}' metrics = { name: Metric(name_template.format(name), context, type) for name, type in metric_defs.items() } for single_run in data: data = self.parse_data(single_run['content']) for name, raw_value in data.items(): metrics[name].add_value(raw_value, None) return Collection(metrics.values())
def handle(self, *args, **options): with open(options['datafile'], 'r') as f: data = ujson.load(f) all_metrics = Collection.unserialize(data) query = Query().where_eq('id', 'browser.dom.event.interactive') result_set = query.execute(all_metrics) dump_result_set(result_set)
def extract(self, context, data): context['origin'] = 'selenium' noexternals_context = merge_context(context, {'mode': 'noexternals'}) noads_context = merge_context(context, {'mode': 'noads'}) anon_search_context = merge_context(context, { 'scenario': 'anon_visit', }) anon_search_context.pop('url', None) user_search_context = merge_context(context, { 'scenario': 'user_visit', }) user_search_context.pop('url', None) context['mode'] = 'default' metrics = Collection() types = [ # (noexternals_context, 'oasis_perftest_medium_article_no_externals'), # (noads_context, 'oasis_perftest_medium_article_no_ads'), (context, 'load_provided_url'), ] metric_defs = [ ('browser.transaction.time', MetricType.TIME, 'total_load_time'), ('browser.dom.event.interactive', MetricType.TIME, 'interactive_time'), ('browser.dom.event.content_loaded', MetricType.TIME, 'dom_content_loaded_time'), ('browser.dom.event.complete', MetricType.TIME, 'dom_complete_time'), ] for test_context, test_name in types: for out_name, data_type, in_name in metric_defs: self.fan_out_by_url_and_push_metrics( metrics, out_name, context, data_type, values=self.metric_values_single_step_with_urls( data, test_name, in_name)) # total time # metrics.add(Metric( # 'browser.transaction.time', anon_search_context, MetricType.TIME, # values=self.total_load_time_all_steps(data, 'perftest_oasis_anon_search_pageviews'))) # metrics.add(Metric( # 'browser.transaction.time', user_search_context, MetricType.TIME, # values=self.total_load_time_all_steps(data, 'perftest_oasis_user_search_pageviews'))) return metrics
def extract(self, context, data): context['origin'] = 'requests' metrics = Collection() metrics.add( Metric('server.app.response_time', context, MetricType.TIME, values=[(float(single_run['time']), None) for single_run in data])) metrics.add( Metric('server.app.response_size', context, MetricType.BYTES, values=[(single_run['content_length'], None) for single_run in data])) return metrics
def handle(self, *args, **options): if options['datafile']: with open(options['datafile'], 'r') as f: data = ujson.load(f) elif options['resultid']: url = build_absolute_uri('/api/v1/results/{}/'.format(options['resultid'])) response = requests.get(url) data = response.json()['results'] else: raise RuntimeError('You need to specify either --resultid or --datafile') all_metrics = Collection.unserialize(data) basic_metric_set = BasicMetricSet(all_metrics) basic_metrics = basic_metric_set.items for k, v in basic_metrics.items(): print('METRIC: {}'.format(k)) dump_stats(v)
def extract(self, context, data): context['origin'] = 'phantomas' metrics = Collection() if len(data) == 0: return metrics metric_names = set() for run in data: metric_names.update(run['metrics'].keys()) for metric_name in metric_names: raw_id = 'raw.phantomas.' + metric_name id = raw_id type = MetricType.UNKNOWN if metric_name in self.KNOWN_METRICS: metric_def = self.KNOWN_METRICS[metric_name] if ':' in metric_def: type, id = metric_def.split(':') else: id = metric_def raw_values_and_infos = [ (self.normalize_phantomas_value(run['metrics'][metric_name], type), run['offenders'][metric_name] if metric_name in run['offenders'] else None) for run in data ] metrics.add(Metric(id, context, type, None, raw_values_and_infos)) if id != raw_id: metrics.add( Metric(raw_id, context, type, None, raw_values_and_infos)) return metrics
def get_metrics(self): results = self.results.all()[0:1] if len(results) == 0: return None return Collection.unserialize(results[0].results)