Ejemplo n.º 1
0
    def __init__(self, rc='test-results.rc'):
        '''
        Load the test-results.rc file into self.
        '''
        center("TestResultsRepository.__init__")

        try:
            # Find it ...
            #
            fid = rc
            if not path.exists(fid):  # Current directory
                fid = path.join(path.expanduser('~'), rc)
                if not path.exists(fid):  # Users home directory
                    fid = path.join(path.dirname(argv[0]), rc)
                    if not path.exists(fid):
                        fid = path.join(path.dirname(argv[0]), 'lib', rc)
                        if not path.exists(fid):
                            raise FileDoesntExist(rc)

            self.cfg = json_load(fid)

        except FileDoesntExist as e:
            raise TestResultsRepositoryError(
                'The file (%s) does not exist.\n' % e.file_name)
            cleave("TestResultsRepository.__init__")

        cleave("TestResultsRepository.__init__")
Ejemplo n.º 2
0
    def __init__(self, rc='test-results.rc'):
        '''
        Load the test-results.rc file into self.
        '''
        Dbg.enter("TestResultsRepository.__init__")

        try:
            # Find it ...
            #
            fid = rc
            if not path.exists(fid):  # Current directory
                fid = path.join(path.expanduser('~'), rc)
                if not path.exists(fid):  # Users home directory
                    fid = path.join(path.dirname(argv[0]), rc)
                    if not path.exists(fid):
                        raise FileDoesntExist(rc)

            self.cfg = json_load(fid)

            self.text_file_template = Template(
                filename=locate('text-file.mako'),
                default_filters=['decode.utf8'],
                input_encoding='utf-8',
                output_encoding='utf8')

        except FileDoesntExist as e:
            raise TestResultsRepositoryError(
                'The file (%s) does not exist.\n' % e.file_name)
            Dbg.leave("TestResultsRepository.__init__")

        Dbg.leave("TestResultsRepository.__init__")
Ejemplo n.º 3
0
 def results(self, test_run):
     center("TestResultsRepository.results")
     try:
         retval = json_load(
             path.join(self.cfg['repository_root'], test_run,
                       'results.json'))
     except FileDoesntExist as e:
         raise TestResultsRepositoryError(
             'The file (%s) does not exist.\n' % e.file_name)
         cleave("TestResultsRepository.__init__")
     cleave("TestResultsRepository.results")
     return retval
Ejemplo n.º 4
0
    def attributes(self):
        center("JenkinsTestResultsTree.attributes")

        retval = None
        try:
            retval = json_load(path.join(self.arkive, 'test-attributes.json'))

        except FileDoesntExist as e:
            raise JenkinsTestResultsTreeError(
                'The Jenkins test results tree (%s) specified on the command line does\n           not appear to be a valid results tree, the file (%s)\n           does not exist.\n'
                % (self.root, e.file_name))
            cleave("TestResultsRepository.__init__")

        cleave("JenkinsTestResultsTree.attributes")
        return retval
Ejemplo n.º 5
0
def load_dependencies():
    return utils.json_load(get_dependencies_filename())
Ejemplo n.º 6
0
    def filtered_data(self, chartname, path_to_data, normalize):
        """
        read all data files in the path location and return a dictionary containing data that meets the filter.
        Only include metrics in the chart definition
        If data is to be normalized, do that using the baseline file in the chart definition
        """
        chartdef = self.charts[chartname]
        filterdata = self.filters[chartdef['filter']]
        included_metrics = chartdef['included_metrics'].split(',')

        if normalize:
            # If we're normalizing the data we take all the included metrics from the
            # baseline file, then find the highest value one, and scale everything to that.
            # This is done to obscure real values in public reports
            baseline_fn = path.join(path_to_data, 'baselines',
                                    chartdef['baseline_file'])
            if not path.isfile(baseline_fn):
                raise ValueError("Baseline File <%s> does not exist" %
                                 baseline_fn)
            baseline = json_load(baseline_fn)
            basekeys = baseline['metrics'].keys()
            if chartdef['included_metrics'] != 'all':
                # remove any metrics that we don't use
                for metric in basekeys:
                    if 'all' not in included_metrics:
                        if metric not in included_metrics:
                            del baseline['metrics'][metric]

            # Now, find the highest value among all the request metrics so we can scale to that
            norm_val = None
            # get these again, we may have deleted some
            basekeys = baseline['metrics'].keys()
            for metric in basekeys:
                #print('%s : %s' % (metric, baseline['metrics'][metric]))
                if norm_val is None:
                    norm_val = float(baseline['metrics'][metric])
                elif (float(baseline['metrics'][metric]) > norm_val):
                    norm_val = float(baseline['metrics'][metric])

        # Now we open each file, check filters, and process it
        dfl = listdir(path_to_data)
        tests = {}
        for fname in dfl:
            if not fname.endswith('.json'):
                continue

            data = json_load(path.join(path_to_data, fname))
            #print 'testing filters for <%s>' % fname
            if not self.ismatch(chartname, data['meta']):
                #print 'file <%s> did not match filters' % fname
                continue

            print 'file <%s> passes filters, processing' % fname
            if len(data['metrics']) == 0:
                # this can apparently happen when tests are aborted
                print 'Data file contains no metrics, skipping'
                continue

            # filter metrics as requested
            mkeys = data['metrics'].keys()
            for metric in mkeys:
                if 'all' not in included_metrics:
                    if metric not in included_metrics:
                        del data['metrics'][metric]

            if len(data['metrics']) == 0:
                raise ValueError(
                    'No metrics left in data file after filtering')

            # if we're scaling to a baseline, do it
            if normalize:
                # get these agin, we may have deleted some
                mkeys = data['metrics'].keys()
                for metric in mkeys:
                    mval = float(data['metrics'][metric]) / norm_val
                    data['metrics'][metric] = '%s' % mval

            data['meta']['chart-title'] = chartdef['chart_title']
            if normalize:
                data['meta']['y-label'] = chartdef['y_label'] + ' (normalized)'
            else:
                data['meta']['y-label'] = chartdef['y_label']

            # Save this test, as the BUILD_ID string, which can be sorted chronologically
            #print data['meta']
            buildid = data['meta']['environ']['BUILD_ID']
            tests[buildid] = data

        if len(tests) == 0:
            raise ValueError(
                "There are no data records to write to the output file, perhaps none passed the filters"
            )

        return tests