Beispiel #1
0
def get_two_metrics(output_directory, resource_path, rules):
    metrics = [
        Metric('MetricOne', 'TestOne.csv', 'HostnameOne', output_directory,
               resource_path, 'MetricOne', None, None, rules, None, None),
        Metric('MetricTwo', 'TestTwo.csv', 'HostnameOne', output_directory,
               resource_path, 'MetricTwo', None, None, rules, None, None)
    ]
    return metrics
Beispiel #2
0
def initialize_metric(section, infile_list, hostname, output_directory, resource_path, label, ts_start, ts_end, rule_strings, important_sub_metrics, other_options):
  """
  Initialize appropriate metric based on type of metric.
  :param: section: config section name or auto discovered metric type
  :param: infile_list: list of input log files for the metric
  :param: hostname: hostname associated with the logs origin
  :param: output_directory: report location
  :param: resource_path: resource path for report
  :param: label: label for config section or auto discovered metric type
  :param: ts_start: start time for analysis
  :param: ts_end: end time for analysis
  :param: rule_strings: list of slas
  :param: important_sub_metrics: list of important sub metrics
  :param: other_options: kwargs
  :return: metric object
  """
  bin_path = os.path.abspath(os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))),'bin'))
  metric = None
  metric_type = section.split('-')[0]
  if metric_type in metric_classes:
    if 'SAR' in metric_type:
      metric = metric_classes['SAR'](section, infile_list, hostname, output_directory, resource_path, label, ts_start, ts_end, rule_strings, important_sub_metrics, **other_options)
    else:
      metric = metric_classes[metric_type](section, infile_list, hostname, output_directory, resource_path, label, ts_start, ts_end, rule_strings, important_sub_metrics, **other_options)
  else:
    metric = Metric(section, infile_list, hostname, output_directory, resource_path, label, ts_start, ts_end, rule_strings, important_sub_metrics, **other_options)
  metric.bin_path = bin_path
  return metric
Beispiel #3
0
def test_metrics_with_summary_with_partial_error():
    """
  Tests to verify that metric reports are generated for OK metrics if there are some metrics that are in error. Also a summary report should be created.
  """
    global output_directory
    global input_log_directory
    global resource_path
    rules = {}
    metrics = get_two_metrics(output_directory, resource_path, rules)
    files_list = [
        os.path.join(input_log_directory, 'a.csv'),
        os.path.join(input_log_directory, 'b.csv')
    ]
    for metric in metrics:
        metric.csv_files = files_list
        metric.stats_files = files_list
        metric.timeseries_csv_list = files_list
        metric.important_stats_files = files_list
        metric.percentiles_files = files_list

    metrics.append(
        Metric('MetricThree', 'TestThree.csv', 'HostnameOne', output_directory,
               resource_path, 'MetricThree', None, None, rules, None, None))

    aggregate_metrics = []
    correlated_plots = []
    rpt = Report(None,
                 output_directory,
                 resource_directory,
                 resource_path,
                 metrics + aggregate_metrics,
                 correlated_plots=correlated_plots)
    rpt.generate()
    assert naarad.utils.is_valid_file(
        os.path.join(output_directory, 'MetricOne_report.html'))
    assert naarad.utils.is_valid_file(
        os.path.join(output_directory, 'MetricTwo_report.html'))
    assert not naarad.utils.is_valid_file(
        os.path.join(output_directory, 'MetricThree_report.html'))
    assert naarad.utils.is_valid_file(
        os.path.join(output_directory, 'report.html'))
    assert naarad.utils.is_valid_file(
        os.path.join(output_directory, 'summary_report.html'))
    os.system('rm -rf tmp_report_test/*.*')
    os.system('rm -rf tmp_report_test/resources/*.*')