Beispiel #1
0
    def __init__(self, resultfiles):
        """
        Create an initial object with all of the result information rolled up
        in an easy to process form.

        The constructor of the summary class has an attribute for each HTML
        summary page, which are fed into the index.mako file to produce HTML
        files. resultfiles is a list of paths to JSON results generated by
        piglit-run.
        """

        def buildDictionary(summary):
            # Build a dictionary from test name to pass count/total count, i.e.
            # counts['spec/glsl/foo'] == (456, 800)
            counts = {}

            if not summary.tests:
                return {}

            lastGroup = ''

            # Build a dictionary of group stati, passing groupname = status.
            # This is the "worst" status of the group in descending order:
            # crash, skip, fail, warn, pass
            status = {}

            # currentStack is a stack containing numerical values that that
            # relate to a status output, 5 for crash, 4 for skip, 3 for fail, 2
            # for warn, 1 for pass
            currentStatus = []

            # Stack contains tuples like: (pass count, total count)
            stack = []

            def openGroup(name):
                stack.append((0, 0))

                # Since NotRun is the "lowest" status for HTML generation, if
                # there is another status it will replace skip
                currentStatus.append(so.NotRun())

            def closeGroup(group_name):
                # We're done with this group, record the number of pass/total
                # in the dictionary.
                (nr_pass, nr_total) = stack.pop()
                counts[group_name] = (nr_pass, nr_total)

                # Also add our pass/total count to the parent group's counts
                # (which are still being computed)
                (parent_pass, parent_total) = stack[-1]
                stack[-1] = (parent_pass + nr_pass, parent_total + nr_total)

                # Add the status back to the group hierarchy
                if currentStatus[-2] < currentStatus[-1]:
                    currentStatus[-2] = currentStatus[-1]
                status[group_name] = currentStatus.pop()

            openGroup('fake')
            openGroup('all')

            # fulltest is a full test name,
            # i.e. tests/shaders/glsl-algebraic-add-zero
            for fulltest in sorted(summary.tests):
                # same as fulltest.rpartition('/')
                group, test = path.split(fulltest)

                if group != lastGroup:
                    # We're in a different group now.  Close the old ones
                    # and open the new ones.
                    for x in path.relpath(group, lastGroup).split('/'):
                        if x != '..':
                            openGroup(x)
                        else:
                            closeGroup(lastGroup)
                            lastGroup = path.normpath(path.join(lastGroup,
                                                                ".."))

                    lastGroup = group

                # Add the current test
                (pass_so_far, total_so_far) = stack[-1]
                if summary.tests[fulltest]['result'] == so.Pass():
                    pass_so_far += 1
                if summary.tests[fulltest]['result'] != so.Skip():
                    total_so_far += 1
                stack[-1] = (pass_so_far, total_so_far)

                # compare the status
                if summary.tests[fulltest]['result'] > currentStatus[-1]:
                    currentStatus[-1] = summary.tests[fulltest]['result']

            # Work back up the stack closing groups as we go until we reach the
            # top, where we need to manually close this as "all"
            while len(stack) > 2:
                closeGroup(lastGroup)
                lastGroup = path.dirname(lastGroup)
            closeGroup("all")

            assert(len(stack) == 1)
            assert(len(currentStatus) == 1)

            return counts, status

        # Create a Result object for each piglit result and append it to the
        # results list
        self.results = [core.load_results(i) for i in resultfiles]

        self.status = {}
        self.fractions = {}
        self.totals = {}
        self.tests = {'all': set(), 'changes': set(), 'problems': set(),
                      'skipped': set(), 'regressions': set(), 'fixes': set()}

        for each in self.results:
            # Build a dict of the status output of all of the tests, with the
            # name of the test run as the key for that result, this will be
            # used to write pull the statuses later
            fraction, status = buildDictionary(each)
            self.fractions.update({each.name: fraction})
            self.status.update({each.name: status})

            # Create a list with all the test names in it
            self.tests['all'] = set(self.tests['all']) | set(each.tests)

        # Create the lists of statuses like problems, regressions, fixes,
        # changes and skips
        for test in self.tests['all']:
            status = []
            for each in self.results:
                try:
                    status.append(each.tests[test]['result'])
                except KeyError:
                    status.append(so.NotRun())

            # Problems include: warn, dmesg-warn, fail, dmesg-fail, and crash.
            # Skip does not go on this page, it has the 'skipped' page
            if so.Skip() > max(status) > so.Pass():
                self.tests['problems'].add(test)

            # Find all tests with a status of skip
            if so.Skip() in status:
                self.tests['skipped'].add(test)

            # find fixes, regressions, and changes
            for i in xrange(len(status) - 1):
                first = status[i]
                last = status[i + 1]
                if first < last and so.NotRun() not in (first, last):
                    self.tests['regressions'].add(test)
                if first > last and so.NotRun() not in (first, last):
                    self.tests['fixes'].add(test)
                # Changes cannot be added in the fixes and regressions passes
                # becasue NotRun is a change, but not a regression or fix
                if first != last:
                    self.tests['changes'].add(test)
Beispiel #2
0
@author: jhkwakkel
'''
from matplotlib.mlab import rec2csv
import numpy as np
from core import TIME

from core import load_results


def write_results_to_csv(results, directory):

    experiments, outcomes = results
    #     deceased_pop = outcomes['relative market price']
    #     time = outcomes[TIME]

    rec2csv(experiments, directory + '/x.csv', withheader=True)

    for key, value in outcomes.iteritems():
        np.savetxt(directory + '/{}.csv'.format(key), value, delimiter=',')


#     np.savetxt('./data/scarcity/relative_market_price.csv', deceased_pop, delimiter=',')
#     np.savetxt('./data/scarcity/time.csv', time, delimiter=',')
#
    for entry in x.dtype.descr:
        print entry

fn = r'./data/eng_trans_100.cPickle'
results = load_results(fn, zipped=False)
write_results_to_csv(results, './data/eng_trans')
'''
Created on Mar 22, 2014

@author: jhkwakkel
'''
from matplotlib.mlab import rec2csv
import numpy as np
from core import TIME

from core import load_results

def write_results_to_csv(results, directory):

    experiments, outcomes = results
#     deceased_pop = outcomes['relative market price']
#     time = outcomes[TIME]
    
    rec2csv(experiments, directory+'/x.csv', withheader=True)
    
    for key, value in outcomes.iteritems():
        np.savetxt(directory+'/{}.csv'.format(key), value, delimiter=',')
#     np.savetxt('./data/scarcity/relative_market_price.csv', deceased_pop, delimiter=',')
#     np.savetxt('./data/scarcity/time.csv', time, delimiter=',')
#     
    for entry in x.dtype.descr:
        print entry

fn =r'./data/eng_trans_100.cPickle'
results = load_results(fn, zipped=False)
write_results_to_csv(results, './data/eng_trans')
                accuracies_table_models.append(model)
                accuracies_table_data[i, 0] = np.mean(
                    data[3][model][difficulty][:, -1])
                accuracies_table_data[i, 1] = 1.98 * np.std(
                    data[3][model][difficulty][:, -1], )
                i += 1

    accuracies_table_df = pd.DataFrame(accuracies_table_data,
                                       columns=accuracies_table_header,
                                       index=accuracies_table_models)
    accuracies_table_df.round(4).to_csv(f'tables/accuracies_table.csv')


if __name__ == '__main__':
    path = '/home/jeff/Documents/RL_datasets/all_results'
    train_results, test_results = load_results(path)

    train_epoch_metrics = [
        'val_nb_nodes', 'val_solve_time', 'val_lp_iters', 'train_loss',
        'train_dqn_loss', 'train_cql_loss'
    ]

    mean_std_metrics = [
        'mean_nb_nodes', 'mean_solve_time', 'mean_lp_iters', 'std_nb_nodes',
        'std_solve_time', 'std_lp_iters'
    ]

    test_metrics = [
        'mean_nb_nodes', 'mean_solve_time', 'mean_lp_iters',
        'mean_nb_nodes_cpu', 'mean_solve_time_cpu', 'mean_lp_iters_cpu',
        'mean_sb_accuracy'
Beispiel #5
0
    def __init__(self, resultfiles):
        """
        Create an initial object with all of the result information rolled up
        in an easy to process form.

        The constructor of the summary class has an attribute for each HTML
        summary page, which are fed into the index.mako file to produce HTML
        files. resultfiles is a list of paths to JSON results generated by
        piglit-run.
        """

        # Create a Result object for each piglit result and append it to the
        # results list
        self.results = [core.load_results(i) for i in resultfiles]

        self.status = {}
        self.fractions = {}
        self.totals = {}
        self.tests = {'all': set(), 'changes': set(), 'problems': set(),
                      'skipped': set(), 'regressions': set(), 'fixes': set()}

        def fgh(test, result):
            """ Helper for updating the fractions and status lists """
            fraction[test] = tuple(
                [sum(i) for i in zip(fraction[test], result.fraction)])
            if result != so.Skip() and status[test] < result:
                status[test] = result

        for results in self.results:
            # Create a set of all of the tset names across all of the runs
            self.tests['all'] = set(self.tests['all'] | set(results.tests))

            # Create two dictionaries that have a default factory: they return
            # a default value instead of a key error.
            # This default key must be callable
            self.fractions[results.name] = collections.defaultdict(lambda: (0, 0))
            self.status[results.name] = collections.defaultdict(so.NotRun)

            # short names
            fraction = self.fractions[results.name]
            status = self.status[results.name]

            for key, value in results.tests.iteritems():
                #FIXME: Add subtest support

                # Walk the test name as if it was a path, at each level update
                # the tests passed over the total number of tests (fractions),
                # and update the status of the current level if the status of
                # the previous level was worse, but is not skip
                while key != '':
                    fgh(key, value['result'])
                    key = path.dirname(key)

                # when we hit the root update the 'all' group and stop
                fgh('all', value['result'])

        # Create the lists of statuses like problems, regressions, fixes,
        # changes and skips
        for test in self.tests['all']:
            status = []
            for each in self.results:
                try:
                    status.append(each.tests[test]['result'])
                except KeyError:
                    status.append(so.NotRun())

            # Problems include: warn, dmesg-warn, fail, dmesg-fail, and crash.
            # Skip does not go on this page, it has the 'skipped' page
            if max(status) > so.Pass():
                self.tests['problems'].add(test)

            # Find all tests with a status of skip
            if so.Skip() in status:
                self.tests['skipped'].add(test)

            # find fixes, regressions, and changes
            for i in xrange(len(status) - 1):
                first = status[i]
                last = status[i + 1]
                if max(first, so.Pass()) < last:
                    self.tests['regressions'].add(test)
                if first > max(last, so.Pass()):
                    self.tests['fixes'].add(test)
                # Changes cannot be added in the fixes and regressions passes
                # becasue NotRun is a change, but not a regression or fix
                if first != last:
                    self.tests['changes'].add(test)