def main():
    args = parse_custom_args()

    if not args.revision:
        rev = 'WORK'
        name = 'current'
    elif args.revision.lower() == 'baseline':
        rev = BASELINE
        name = 'baseline'
    else:
        rev = checkouts.get_global_rev(REPO, args.revision)
        name = rev

    combo = [(Translator(REPO, rev=rev), Preprocessor(REPO, rev=rev), Planner(REPO, rev=rev))]

    exp = DownwardExperiment(path=get_exp_dir(name, args.test), repo=REPO, combinations=combo)
    exp.add_suite(SUITES[args.test])
    for nick, config in CONFIGS[args.test]:
        exp.add_config(nick, config)
    exp.add_report(AbsoluteReport(attributes=ABSOLUTE_ATTRIBUTES), name='report')

    # Only compare results if we are not running the baseline experiment.
    if rev != BASELINE:
        exp.steps.insert(0, Step('rm-eval-dir', shutil.rmtree, exp.eval_dir, ignore_errors=True))
        exp.add_step(Step('fetch-baseline-results', Fetcher(),
                          get_exp_dir('baseline', args.test) + '-eval',
                          exp.eval_dir))
        exp.add_report(AbsoluteReport(attributes=ABSOLUTE_ATTRIBUTES), name='comparison')
        exp.add_step(Step('rm-preprocess-dir', shutil.rmtree, exp.preprocess_exp_path))
        exp.add_step(Step('rm-exp-dir', shutil.rmtree, exp.path))
        exp.add_step(Step('rm-preprocessed-tasks', shutil.rmtree, exp.preprocessed_tasks_dir))
        exp.add_report(RegressionCheckReport(BASELINE, RELATIVE_CHECKS),
                       name='regression-check')

    exp()
예제 #2
0
파일: hstar_2_h.py 프로젝트: galdreiman/PAC
    def _get_table(self, attribute, domain=None):
	#get original table
	table = AbsoluteReport._get_table(self, attribute, domain)
	
	#if table is coverage, and in problem resolution, take also initial-h-value and add a column of the ratio
	if attribute == 'coverage'   :
		h_attr = Attribute('initial_h_value', min_wins=False, absolute=True)
		h_table = AbsoluteReport._get_table(self, h_attr, domain)
		hstar_attr = Attribute('cost', min_wins=False, absolute=True)
		hstar_table = AbsoluteReport._get_table(self, hstar_attr, domain)
		ratio_attr = Attribute('hstar_to_h', min_wins=False, absolute=True)
		ratio_table = AbsoluteReport._get_table(self, ratio_attr, domain)
        	ratio_col = {}
		h_col = {}
		hstar_col = {}

        	# Filter those rows which have no significant changes
        	for row in table.row_names:
			hstar = hstar_table.get_row(row)
			h = h_table.get_row(row)
			ratio = ratio_table.get(row)
			print ratio
            		ratio_col[row] = ratio['WORK-lmcut']
			h_col[row] = h[0]
			hstar_col[row] = hstar[0]

		table.set_column_order(table.col_names + ['h*'])
        	table.add_col('h*/h(s)', ratio_col)
		table.add_col('h(s)', h_col)
		table.add_col('h*', hstar_col)
 		table.min_wins = None
        	table.colored = False
       
        return table
예제 #3
0
    def __init__(self, algorithm_pairs, **kwargs):
        """
        See :py:class:`AbsoluteReport <downward.reports.absolute.AbsoluteReport>`
        for inherited parameters.

        *algorithm_pairs* is the list of algorithm pairs you want to
        compare.

        All columns in the report will be arranged such that the
        compared algorithms appear next to each other. After the two
        columns containing absolute values for the compared algorithms,
        a third column ("Diff") is added showing the difference between
        the two values.

        Algorithms may appear in multiple comparisons. Algorithms not
        mentioned in *algorithm_pairs* are not included in the report.

        If you want to compare algorithms A and B, instead of a pair
        ``('A', 'B')`` you may pass a triple ``('A', 'B', 'A vs.
        B')``. The third entry of the triple will be used as the name
        of the corresponding "Diff" column.

        For example, if the properties file contains algorithms A, B, C
        and D and *algorithm_pairs* is ``[('A', 'B', 'Diff BA'), ('A',
        'C')]`` the resulting columns will be A, B, Diff BA (contains B
        - A), A, C , Diff (contains C - A).

        Example:

        >>> from downward.experiment import FastDownwardExperiment
        >>> exp = FastDownwardExperiment()
        >>> algorithm_pairs = [
        ...     ('default-lmcut', 'issue123-lmcut', 'Diff lmcut')]
        >>> exp.add_report(ComparativeReport(
        ...     algorithm_pairs, attributes=['coverage']))

        Example output:

            +----------+---------------+----------------+------------+
            | coverage | default-lmcut | issue123-lmcut | Diff lmcut |
            +==========+===============+================+============+
            | depot    |            15 |             17 |          2 |
            +----------+---------------+----------------+------------+
            | gripper  |             7 |              6 |         -1 |
            +----------+---------------+----------------+------------+

        """
        if 'filter_algorithm' in kwargs:
            logging.critical(
                'ComparativeReport doesn\'t support "filter_algorithm". '
                'Use "algorithm_pairs" to select and order algorithms.')
        if algorithm_pairs:
            algos = set()
            for tup in algorithm_pairs:
                for algo in tup[:2]:
                    algos.add(algo)
            kwargs['filter_algorithm'] = algos
        AbsoluteReport.__init__(self, **kwargs)
        self._algorithm_pairs = algorithm_pairs
예제 #4
0
 def write(self):
     AbsoluteReport.write(self)
     markup = self.get_markup()
     if markup:
         print 'There has been a regression:'
         print
         print markup
         logging.critical('Regression found.')
예제 #5
0
 def write(self):
     AbsoluteReport.write(self)
     markup = self.get_markup()
     if markup:
         print 'There has been a regression:'
         print
         print markup
         logging.critical('Regression found.')
예제 #6
0
    def __init__(self, baseline, checks, **kwargs):
        """
        *baseline* must be a global revision identifier.

        *checks must be an iterable of Check instances.*
        """
        AbsoluteReport.__init__(self, **kwargs)
        self.baseline = baseline
        self.checks = checks
 def write(self):
     AbsoluteReport.write(self)
     markup = self.get_markup()
     if markup:
         print('There has been a regression:')
         print()
         print(markup)
     success = not markup
     self.result_handler(success)
    def __init__(self, nick, output_file, **kwargs):

        AbsoluteReport.__init__(self, **kwargs)
        #add new attributes to list
        self.derived_properties.append(hstar_to_h)
        self.derived_properties.append(statistics)
        self.derived_properties.append(commualtive_hstar_to_h)
        self.nick = 'WORK-' + nick
        self.outFile = output_file
예제 #9
0
    def __init__(self, nick, output_file, **kwargs):

        AbsoluteReport.__init__(self, **kwargs)
        #add new attributes to list
        self.derived_properties.append(hstar_to_h)
        self.derived_properties.append(statistics)
        self.derived_properties.append(commualtive_hstar_to_h)
        self.nick = 'WORK-'+nick
        self.outFile = output_file
예제 #10
0
    def __init__(self, baseline, checks, **kwargs):
        """
        *baseline* must be a global revision identifier.

        *checks must be an iterable of Check instances.*
        """
        AbsoluteReport.__init__(self, **kwargs)
        self.baseline = baseline
        self.checks = checks
예제 #11
0
    def _get_statistics_table(self, attribute, domain=None):
        #get original table
        table = AbsoluteReport._get_table(self, attribute, domain)

        #if attribute is statistics, take also initial-h-value and add a column of the ratio
        if attribute == 'statistics'   :
            h_attr = Attribute('initial_h_value', min_wins=False, absolute=True)
            h_table = AbsoluteReport._get_table(self, h_attr, domain)
            hstar_attr = Attribute('cost', min_wins=False, absolute=True)
            hstar_table = AbsoluteReport._get_table(self, hstar_attr, domain)
            ratio_attr = Attribute('hstar_to_h', min_wins=False, absolute=True)
            ratio_table = AbsoluteReport._get_table(self, ratio_attr, domain)
            ff_h_value_attr = Attribute('initial_ff_h_value', min_wins=False, absolute=True)
            ff_h_value_table = AbsoluteReport._get_table(self, ff_h_value_attr, domain)
            ratio_col = {}
            h_col = {}
            hstar_col = {}
            ff_h_value_col = {}
            h_ff_to_h = []

            for row in table.row_names:
                hstar           = hstar_table.get_row(row)
                h               = h_table.get_row(row)
                ff_h_val        = ff_h_value_table.get_row(row)
                ratio           = ratio_table.get(row)
                
                ratio_col[row]      = ratio[self.nick]
                h_col[row]          = h[0]
                ff_h_value_col[row] = ff_h_val[0]
                hstar_col[row]      = hstar[0]

                # build h-ff/h:
                if(ff_h_val[0] != None and hstar[0] != None):
                    if hstar[0] != 0:
                        h_ff_to_h.append(ff_h_val[0]/hstar[0])

            table.set_column_order(table.col_names + ['h*'])
            table.add_col('h*/h(s)', ratio_col)
            table.add_col('h(s)', h_col)
            table.add_col('ff_h(s)', ff_h_value_col)
            table.add_col('h*', hstar_col)
            table.min_wins = None
            table.colored = False

            # domain_dir = self.outFile + '/' + domain
            # tools.makedirs(domain_dir)
            # domain_file = domain_dir + '/' + 'PAC_Statistics.csv'
            # with open(domain_file, 'w') as csvfile:
            #     writer = csv.writer(csvfile)
            #     [writer.writerow(r) for r in table]

            self.save_stat_table_to_file(domain,table)
            self.create_commulative_h_ff_to_h_table(domain, h_ff_to_h)

            return table
예제 #12
0
def main():
    args = parse_custom_args()

    if not args.revision:
        rev = 'WORK'
        name = 'current'
    elif args.revision.lower() == 'baseline':
        rev = BASELINE
        name = 'baseline'
    else:
        rev = checkouts.get_global_rev(REPO, args.revision)
        name = rev

    combo = [(Translator(REPO,
                         rev=rev), Preprocessor(REPO,
                                                rev=rev), Planner(REPO,
                                                                  rev=rev))]

    exp = DownwardExperiment(path=get_exp_dir(name, args.test),
                             repo=REPO,
                             combinations=combo)
    exp.add_suite(SUITES[args.test])
    for nick, config in CONFIGS[args.test]:
        exp.add_config(nick, config)
    exp.add_report(AbsoluteReport(attributes=ABSOLUTE_ATTRIBUTES),
                   name='report')

    # Only compare results if we are not running the baseline experiment.
    if rev != BASELINE:
        dirty_paths = [
            path for path in [exp.preprocess_exp_path, exp.path, exp.eval_dir]
            if os.path.exists(path)
        ]
        if dirty_paths:
            logging.critical(
                'The last run found a regression. Please inspect what '
                'went wrong and then delete the following directories '
                'manually: %s' % dirty_paths)
        exp.add_step(
            Step('fetch-baseline-results', Fetcher(),
                 get_exp_dir('baseline', args.test) + '-eval', exp.eval_dir))
        exp.add_report(AbsoluteReport(attributes=ABSOLUTE_ATTRIBUTES),
                       name='comparison')
        exp.add_report(RegressionCheckReport(BASELINE, RELATIVE_CHECKS),
                       name='regression-check')
        # We abort if there is a regression and keep the directories.
        exp.add_step(
            Step('rm-preprocess-dir', shutil.rmtree, exp.preprocess_exp_path))
        exp.add_step(Step('rm-exp-dir', shutil.rmtree, exp.path))
        exp.add_step(
            Step('rm-preprocessed-tasks', shutil.rmtree,
                 exp.preprocessed_tasks_dir))
        exp.add_step(Step('rm-eval-dir', shutil.rmtree, exp.eval_dir))

    exp()
예제 #13
0
    def _get_statistics_table(self, attribute, domain=None):
        #get original table
        table = AbsoluteReport._get_table(self, attribute, domain)

        #if attribute is statistics, take also initial-h-value and add a column of the ratio
        if attribute == 'statistics':
            h_attr = Attribute('initial_h_value',
                               min_wins=False,
                               absolute=True)
            h_table = AbsoluteReport._get_table(self, h_attr, domain)
            hstar_attr = Attribute('cost', min_wins=False, absolute=True)
            hstar_table = AbsoluteReport._get_table(self, hstar_attr, domain)
            ratio_attr = Attribute('hstar_to_h', min_wins=False, absolute=True)
            ratio_table = AbsoluteReport._get_table(self, ratio_attr, domain)
            ff_h_value_attr = Attribute('initial_ff_h_value',
                                        min_wins=False,
                                        absolute=True)
            ff_h_value_table = AbsoluteReport._get_table(
                self, ff_h_value_attr, domain)
            ratio_col = {}
            h_col = {}
            hstar_col = {}
            ff_h_value_col = {}
            h_ff_to_h = []

            for row in table.row_names:
                hstar = hstar_table.get_row(row)
                h = h_table.get_row(row)
                ratio = ratio_table.get(row)
                ratio_col[row] = ratio[self.nick]
                h_col[row] = h[0]
                hstar_col[row] = hstar[0]

                ff_h_val = ff_h_value_table.get_row(row)
                ff_h_value_col[row] = ff_h_val[0]

                # build h-ff/h:
                if (ff_h_val[0] != None and hstar[0] != None):
                    if hstar[0] != 0:
                        h_ff_to_h.append(hstar[0] / ff_h_val[0])

            table.set_column_order(table.col_names + ['h*'])
            table.add_col('h*/h(s)', ratio_col)
            table.add_col('h(s)', h_col)
            table.add_col('h*', hstar_col)
            table.add_col('ff_h(s)', ff_h_value_col)
            table.min_wins = None
            table.colored = False

            self.save_stat_table_to_file(domain, table)
            self.create_commulative_h_ff_to_h_table(domain, h_ff_to_h)

            return table
예제 #14
0
    def get_points(self, domain):
        """
        By default plot the configs on the x-axis and the attribute values on
        the y-axis. All values are in the same category.
        """

        h_attr = Attribute('initial_h_value', min_wins=False, absolute=True)
        h_table = AbsoluteReport._get_table(self, h_attr, domain)
        hstar_attr = Attribute('cost', min_wins=False, absolute=True)
        hstar_table = AbsoluteReport._get_table(self, hstar_attr, domain)
        coverage_attr = Attribute('coverage', min_wins=False, absolute=True)
        coverage_table = AbsoluteReport._get_table(self, coverage_attr, domain)

        hstar_to_h = {}
        total_solved = 0
        counter = 0
        for row in h_table.row_names:
            hstar = hstar_table.get_row(row)
            h = h_table.get_row(row)
            hstar_val = hstar[0]
            h_val = h[0]
            coverage = coverage_table.get_row(row)
            coverage_val = coverage[0]
            if hstar_val is not None and h_val is not None and h_val > 0:
                hstar_to_h[counter] = hstar_val / h_val
            else:
                hstar_to_h[counter] = 0
            counter = counter + 1
            if coverage_val > 0:
                total_solved = total_solved + 1

        ratios = {
            0.75, 0.8, 0.85, 0.9, 0.95, 1, 1.05, 1.1, 1.15, 1.2, 1.25, 1.3,
            1.35, 1.4, 1.45, 1.5, 1.55, 1.6, 1.65, 1.7, 1.75, 1.8, 1.85, 1.9,
            1.95, 2, 2.05, 2.1, 2.15, 2.2, 2.25, 2.3, 2.35, 2.4, 2.45, 2.5,
            2.55, 2.6, 2.65, 2.7, 2.75, 2.8, 2.85, 2.9, 2.95, 3.0, 3.05, 3.1,
            3.15, 3.2, 3.25, 3.3, 3.35, 3.4, 3.45, 3.5, 3.55, 3.6, 3.65, 3.7
        }
        percentages = Set()

        for ratio in ratios:
            _sum = 0
            for key in hstar_to_h:
                if hstar_to_h[key] <= ratio and hstar_to_h[key] > 0:
                    _sum = _sum + 1
            if total_solved == 0:
                _sum_percent = 0
            else:
                _sum_percent = _sum * 100 / total_solved

            percentages.add((ratio, _sum_percent))

        return list(percentages)
예제 #15
0
파일: MyPlot.py 프로젝트: galdreiman/PAC
    def get_points(self, domain):
        """
        By default plot the configs on the x-axis and the attribute values on
        the y-axis. All values are in the same category.
        """



	h_attr = Attribute('initial_h_value', min_wins=False, absolute=True)
	h_table = AbsoluteReport._get_table(self, h_attr, domain)
	hstar_attr = Attribute('cost', min_wins=False, absolute=True)
	hstar_table = AbsoluteReport._get_table(self, hstar_attr, domain)
	coverage_attr = Attribute('coverage', min_wins=False, absolute=True)
	coverage_table = AbsoluteReport._get_table(self, coverage_attr, domain)

	
	hstar_to_h = {}
	total_solved = 0
	counter = 0
        for row in h_table.row_names:
		hstar = hstar_table.get_row(row)
		h = h_table.get_row(row)
		hstar_val = hstar[0] 
		h_val = h[0]
		coverage = coverage_table.get_row(row)
		coverage_val = coverage[0]
		if hstar_val is not None and h_val is not None and h_val > 0:		
			hstar_to_h[counter] = hstar_val / h_val
		else:
			hstar_to_h[counter] = 0
		counter = counter + 1
		if coverage_val > 0:
			total_solved = total_solved + 1
			

	ratios = {0.75,0.8,0.85,0.9,0.95,1,1.05,1.1,1.15,1.2,1.25,1.3,1.35,1.4,1.45,1.5,1.55,1.6,1.65,1.7,1.75,1.8,1.85,1.9,1.95,2,2.05,2.1,2.15,2.2,2.25,2.3,2.35,2.4,2.45,2.5,2.55,2.6,2.65,2.7,2.75,2.8,2.85,2.9,2.95,3.0,3.05,3.1,3.15,3.2,3.25,3.3,3.35,3.4,3.45,3.5,3.55,3.6,3.65,3.7}
	percentages = Set()

	for ratio in ratios:
		_sum = 0
		for key in hstar_to_h:     
			if hstar_to_h[key] <= ratio and hstar_to_h[key] > 0:
				_sum = _sum + 1
		if total_solved == 0:
			_sum_percent = 0
		else:
			_sum_percent = _sum*100 / total_solved

		percentages.add((ratio,_sum_percent))

        return list(percentages)
예제 #16
0
def main():
    args = parse_custom_args()

    if args.revision.lower() == 'baseline':
        rev = BASELINE
        name = 'baseline'
    else:
        rev = args.revision
        name = rev

    exp = FastDownwardExperiment(path=get_exp_dir(name, args.test),
                                 revision_cache=REVISION_CACHE)
    exp.add_suite(BENCHMARKS_DIR, SUITES[args.test])
    for config_nick, config in CONFIGS[args.test]:
        exp.add_algorithm(rev + "-" + config_nick, REPO, rev, config)

    exp.add_parser(exp.EXITCODE_PARSER)
    exp.add_parser(exp.TRANSLATOR_PARSER)
    exp.add_parser(exp.SINGLE_SEARCH_PARSER)
    exp.add_parser(exp.PLANNER_PARSER)

    exp.add_step('build', exp.build)
    exp.add_step('start', exp.start_runs)
    exp.add_fetcher(name='fetch')
    exp.add_report(AbsoluteReport(attributes=ABSOLUTE_ATTRIBUTES),
                   name='report')

    # Only compare results if we are not running the baseline experiment.
    if rev != BASELINE:
        dirty_paths = [
            path for path in [exp.path, exp.eval_dir] if os.path.exists(path)
        ]
        if dirty_paths:
            logging.critical(
                'The last run found a regression. Please inspect what '
                'went wrong and then delete the following directories '
                'manually: %s' % dirty_paths)
        exp.add_fetcher(src=get_exp_dir('baseline', args.test) + '-eval',
                        dest=exp.eval_dir,
                        merge=True,
                        name='fetch-baseline-results')
        exp.add_report(AbsoluteReport(attributes=ABSOLUTE_ATTRIBUTES),
                       name='comparison')
        exp.add_report(RegressionCheckReport(BASELINE, RELATIVE_CHECKS),
                       name='regression-check')
        # We abort if there is a regression and keep the directories.
        exp.add_step('rm-exp-dir', shutil.rmtree, exp.path)
        exp.add_step('rm-eval-dir', shutil.rmtree, exp.eval_dir)

    exp.run_steps()
예제 #17
0
    def add_absolute_report_step(self, name='', **kwargs):
        """Add step that makes an absolute report.

        Absolute reports are useful for experiments that don't
        compare revisions.

        The report is written to the experiment evaluation directory.

        *name* is a custom name for the report.

        All *kwargs* will be passed to the AbsoluteReport class. If
        the keyword argument *attributes* is not specified, a
        default list of attributes is used. ::

            exp.add_absolute_report_step(attributes=["coverage"])

        """
        kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
        if name == '':
            name = get_experiment_name()
        report = AbsoluteReport(**kwargs)
        outfile = os.path.join(self.eval_dir,
                               name + "." + report.output_format)
        self.add_report(report, outfile=outfile)
        self.add_step(
            Step('publish-absolute-report', subprocess.call,
                 ['publish', outfile]))
예제 #18
0
    def add_absolute_report_step(self, **kwargs):
        """Add step that makes an absolute report.

        Absolute reports are useful for experiments that don't
        compare revisions.

        The report is written to the experiment evaluation directory.

        All *kwargs* will be passed to the AbsoluteReport class. If
        the keyword argument *attributes* is not specified, a
        default list of attributes is used. ::

            exp.add_absolute_report_step(attributes=["coverage"])

        """
        kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
        report = AbsoluteReport(**kwargs)
        # oufile is of the form <rev1>-<rev2>-...-<revn>.<format>
        outfile = ''
        for rev in self.revisions:
            outfile += rev
            outfile += '-'
        outfile = outfile[:len(outfile) - 1]
        outfile += '.'
        outfile += report.output_format
        outfile = os.path.join(self.eval_dir, outfile)
        self.add_report(report, outfile=outfile)
        self.add_step(
            Step('publish-absolute-report', subprocess.call,
                 ['publish', outfile]))
예제 #19
0
def main():
    args = parse_custom_args()

    if args.revision.lower() == 'baseline':
        rev = BASELINE
        name = 'baseline'
    else:
        rev = cached_revision.get_global_rev(REPO,
                                             vcs=cached_revision.MERCURIAL,
                                             rev=args.revision)
        name = rev

    exp = FastDownwardExperiment(path=get_exp_dir(name, args.test),
                                 revision_cache=REVISION_CACHE)
    exp.add_suite(BENCHMARKS_DIR, SUITES[args.test])
    for config_nick, config in CONFIGS[args.test]:
        exp.add_algorithm(rev + "-" + config_nick, REPO, rev, config)

    exp.add_parser(exp.EXITCODE_PARSER)
    exp.add_parser(exp.TRANSLATOR_PARSER)
    exp.add_parser(exp.SINGLE_SEARCH_PARSER)
    exp.add_parser(exp.PLANNER_PARSER)

    exp.add_step('build', exp.build)
    exp.add_step('start', exp.start_runs)
    exp.add_fetcher(name='fetch')
    exp.add_report(AbsoluteReport(attributes=ABSOLUTE_ATTRIBUTES),
                   name='report')

    # Only compare results if we are not running the baseline experiment.
    if rev != BASELINE:

        def result_handler(success):
            regression_test_handler(args.test, rev, success)

        exp.add_fetcher(src=get_exp_dir('baseline', args.test) + '-eval',
                        dest=exp.eval_dir,
                        merge=True,
                        name='fetch-baseline-results')
        exp.add_report(AbsoluteReport(attributes=ABSOLUTE_ATTRIBUTES),
                       name='comparison')
        exp.add_report(RegressionCheckReport(BASELINE, RELATIVE_CHECKS,
                                             result_handler),
                       name='regression-check')

    exp.run_steps()
예제 #20
0
파일: compare.py 프로젝트: galdreiman/PAC
 def _get_empty_table(self, attribute=None, title=None, columns=None):
     table = AbsoluteReport._get_empty_table(self, attribute=attribute,
                                             title=title, columns=columns)
     summary_functions = [sum, reports.avg]
     if title == 'summary':
         summary_functions = []
     diff_module = DiffColumnsModule(self._get_compared_configs(), summary_functions)
     table.dynamic_data_modules.append(diff_module)
     return table
예제 #21
0
파일: relative.py 프로젝트: galdreiman/PAC
    def __init__(self, resolution, rel_change=0.0, abs_change=0, **kwargs):
        """
        Compare exactly two configurations. For each problem and attribute
        add a table row with the two absolute values and their quotient.

        *resolution* must be one of "domain" or "problem".

        Only include pairs of attribute values x and y if
        abs(y/x - 1) >= *rel_change*.

        Only add pairs of values to the result if their absolute difference is
        bigger than *abs_change*.

        If neither *rel_change* nor *abs_change* are given, no problem rows are
        filtered out.
        """
        AbsoluteReport.__init__(self, resolution, **kwargs)
        self.rel_change = rel_change
        self.abs_change = abs_change
예제 #22
0
    def __init__(self, resolution, rel_change=0.0, abs_change=0, **kwargs):
        """
        Compare exactly two configurations. For each problem and attribute
        add a table row with the two absolute values and their quotient.

        *resolution* must be one of "domain" or "problem".

        Only include pairs of attribute values x and y if
        abs(y/x - 1) >= *rel_change*.

        Only add pairs of values to the result if their absolute difference is
        bigger than *abs_change*.

        If neither *rel_change* nor *abs_change* are given, no problem rows are
        filtered out.
        """
        AbsoluteReport.__init__(self, resolution, **kwargs)
        self.rel_change = rel_change
        self.abs_change = abs_change
예제 #23
0
 def _get_empty_table(self, attribute=None, title=None, columns=None):
     table = AbsoluteReport._get_empty_table(self,
                                             attribute=attribute,
                                             title=title,
                                             columns=columns)
     summary_functions = [sum, reports.avg]
     if title == 'summary':
         summary_functions = []
     diff_module = DiffColumnsModule(self._get_compared_configs(),
                                     summary_functions)
     table.dynamic_data_modules.append(diff_module)
     return table
예제 #24
0
 def _get_empty_table(self, attribute=None, title=None, columns=None):
     table = AbsoluteReport._get_empty_table(self,
                                             attribute=attribute,
                                             title=title,
                                             columns=columns)
     summary_functions = [sum, reports.arithmetic_mean]
     if title == 'Summary':
         summary_functions = []
     diff_module = DiffColumnsModule(self._algorithm_pairs,
                                     summary_functions)
     table.dynamic_data_modules.append(diff_module)
     return table
예제 #25
0
    def __init__(self, compared_configs, **kwargs):
        """
        See :py:class:`AbsoluteReport <downward.reports.absolute.AbsoluteReport>`
        for inherited parameters.

        *compared_configs* is a list of tuples of 2 or 3 elements. The first two entries
        in each tuple are configs that should be compared. If a third entry is present it
        is used as the name of the column showing the difference between the two configs.
        Otherwise the column will be named 'Diff'.
        All columns in the report will be arranged such that the configurations that are
        compared are next to each other. After those two columns a diff column is added
        that shows the difference between the two values. If a config occurs in more than
        one comparison it is repeated every time. Configs that are in the original data
        but are not mentioned in compared_configs are not printed.
        For example if the data contains configs A, B, C and D and *compared_configs* is
        ``[('A', 'B', 'Diff BA'), ('A', 'C')]`` the resulting columns will be
        A, B, Diff BA (contains B - A), A, C , Diff (contains C - A).

        Example::

            compared_configs = [
                ('c406c4f77e13-astar_lmcut', '6e09db9b3003-astar_lmcut', 'Diff (lmcut)'),
                ('c406c4f77e13-astar_ff', '6e09db9b3003-astar_ff', 'Diff (ff)')]
            exp.add_report(CompareConfigsReport(compared_configs))

        """
        if 'filter_config' in kwargs or 'filter_config_nick' in kwargs:
            logging.critical(
                'Filtering config(nicks) is not supported in '
                'CompareConfigsReport. Use the parameter '
                '"compared_configs" to define which configs are shown '
                'and in what order they should appear.')
        if compared_configs:
            configs = set()
            for t in compared_configs:
                for config in t[0:2]:
                    configs.add(config)
            kwargs['filter_config'] = configs
        AbsoluteReport.__init__(self, **kwargs)
        self._compared_configs = compared_configs
예제 #26
0
파일: compare.py 프로젝트: galdreiman/PAC
    def __init__(self, compared_configs, **kwargs):
        """
        See :py:class:`AbsoluteReport <downward.reports.absolute.AbsoluteReport>`
        for inherited parameters.

        *compared_configs* is a list of tuples of 2 or 3 elements. The first two entries
        in each tuple are configs that should be compared. If a third entry is present it
        is used as the name of the column showing the difference between the two configs.
        Otherwise the column will be named 'Diff'.
        All columns in the report will be arranged such that the configurations that are
        compared are next to each other. After those two columns a diff column is added
        that shows the difference between the two values. If a config occurs in more than
        one comparison it is repeated every time. Configs that are in the original data
        but are not mentioned in compared_configs are not printed.
        For example if the data contains configs A, B, C and D and *compared_configs* is
        ``[('A', 'B', 'Diff BA'), ('A', 'C')]`` the resulting columns will be
        A, B, Diff BA (contains B - A), A, C , Diff (contains C - A).

        Example::

            compared_configs = [
                ('c406c4f77e13-astar_lmcut', '6e09db9b3003-astar_lmcut', 'Diff (lmcut)'),
                ('c406c4f77e13-astar_ff', '6e09db9b3003-astar_ff', 'Diff (ff)')]
            exp.add_report(CompareConfigsReport(compared_configs))

        """
        if 'filter_config' in kwargs or 'filter_config_nick' in kwargs:
            logging.critical('Filtering config(nicks) is not supported in '
                             'CompareConfigsReport. Use the parameter '
                             '"compared_configs" to define which configs are shown '
                             'and in what order they should appear.')
        if compared_configs:
            configs = set()
            for t in compared_configs:
                for config in t[0:2]:
                    configs.add(config)
            kwargs['filter_config'] = configs
        AbsoluteReport.__init__(self, **kwargs)
        self._compared_configs = compared_configs
예제 #27
0
    def _get_table(self, attribute, domain=None):
        #get original table
        table = AbsoluteReport._get_table(self, attribute, domain)

        #if table is coverage, and in problem resolution, take also initial-h-value and add a column of the ratio
        if attribute == 'coverage':
            h_attr = Attribute('initial_h_value',
                               min_wins=False,
                               absolute=True)
            h_table = AbsoluteReport._get_table(self, h_attr, domain)
            hstar_attr = Attribute('cost', min_wins=False, absolute=True)
            hstar_table = AbsoluteReport._get_table(self, hstar_attr, domain)
            ratio_attr = Attribute('hstar_to_h', min_wins=False, absolute=True)
            ratio_table = AbsoluteReport._get_table(self, ratio_attr, domain)
            ratio_col = {}
            h_col = {}
            hstar_col = {}

            # Filter those rows which have no significant changes
            for row in table.row_names:
                hstar = hstar_table.get_row(row)
                h = h_table.get_row(row)
                ratio = ratio_table.get(row)
                print ratio
                ratio_col[row] = ratio['WORK-lmcut']
                h_col[row] = h[0]
                hstar_col[row] = hstar[0]

            table.set_column_order(table.col_names + ['h*'])
            table.add_col('h*/h(s)', ratio_col)
            table.add_col('h(s)', h_col)
            table.add_col('h*', hstar_col)
            table.min_wins = None
            table.colored = False

        return table
예제 #28
0
    def _get_table(self, attribute, domain=None):
        table = AbsoluteReport._get_table(self, attribute, domain)
        quotient_col = {}
        percent_col = {}

        # Filter those rows which have no significant changes
        for row in table.row_names:
            val1, val2 = table.get_row(row)

            if not val1 and not val2:
                # Delete row if both values are missing (None) or 0.
                del table[row]
                continue
            elif val1 is None or val2 is None:
                # Don't add quotient if exactly one value is None.
                quotient_col[row] = NOT_AVAILABLE
                continue

            abs_change = abs(val1 - val2)

            if val1 == 0 or val2 == 0:
                # If one value is 0, only add row if the change is big enough.
                if abs_change >= self.abs_change:
                    quotient_col[row] = NOT_AVAILABLE
                else:
                    del table[row]
                continue

            quotient = val2 / val1
            percent_change = abs(quotient - 1.0)

            if (percent_change >= self.rel_change
                    and abs_change >= self.abs_change):
                quotient_col[row] = quotient
                percent_col[row] = percent_change
            else:
                del table[row]

        # Add table also if there were missing cells
        if len(quotient_col) == 0:
            return 'No changes.'

        table.set_column_order(table.col_names + ['Factor'])
        table.add_col('Factor', quotient_col)
        table.add_col('%-Change', percent_col)
        table.min_wins = None
        table.colored = False
        return table
예제 #29
0
파일: relative.py 프로젝트: galdreiman/PAC
    def _get_table(self, attribute, domain=None):
        table = AbsoluteReport._get_table(self, attribute, domain)
        quotient_col = {}
        percent_col = {}

        # Filter those rows which have no significant changes
        for row in table.row_names:
            val1, val2 = table.get_row(row)

            if not val1 and not val2:
                # Delete row if both values are missing (None) or 0.
                del table[row]
                continue
            elif val1 is None or val2 is None:
                # Don't add quotient if exactly one value is None.
                quotient_col[row] = NOT_AVAILABLE
                continue

            abs_change = abs(val1 - val2)

            if val1 == 0 or val2 == 0:
                # If one value is 0, only add row if the change is big enough.
                if abs_change >= self.abs_change:
                    quotient_col[row] = NOT_AVAILABLE
                else:
                    del table[row]
                continue

            quotient = val2 / val1
            percent_change = abs(quotient - 1.0)

            if percent_change >= self.rel_change and abs_change >= self.abs_change:
                quotient_col[row] = quotient
                percent_col[row] = percent_change
            else:
                del table[row]

        # Add table also if there were missing cells
        if len(quotient_col) == 0:
            return "No changes."

        table.set_column_order(table.col_names + ["Factor"])
        table.add_col("Factor", quotient_col)
        table.add_col("%-Change", percent_col)
        table.min_wins = None
        table.colored = False
        return table
예제 #30
0
def add_absolute_report(exp, *, name=None, outfile=None, **kwargs):
    report = AbsoluteReport(**kwargs)
    if name and not outfile:
        outfile = f"{name}.{report.output_format}"
    elif outfile and not name:
        name = Path(outfile).name
    elif not name and not outfile:
        name = f"{exp.name}-abs"
        outfile = f"{name}.{report.output_format}"

    if not Path(outfile).is_absolute():
        outfile = Path(exp.eval_dir) / outfile

    exp.add_report(report, name=name, outfile=outfile)
    if not REMOTE:
        exp.add_step(f"open-{name}", subprocess.call, ["xdg-open", outfile])
    exp.add_step(f"publish-{name}", subprocess.call, ["publish", outfile])
예제 #31
0
    def add_absolute_report_step(self, **kwargs):
        """Add step that makes an absolute report.

        Absolute reports are useful for experiments that don't
        compare revisions.

        The report is written to the experiment evaluation directory.

        All *kwargs* will be passed to the AbsoluteReport class. If
        the keyword argument *attributes* is not specified, a
        default list of attributes is used. ::

            exp.add_absolute_report_step(attributes=["coverage"])

        """
        kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
        report = AbsoluteReport(**kwargs)
        outfile = get_experiment_name() + "." + report.output_format
        self.add_report(report, outfile=outfile)
예제 #32
0
    def create_commulative_h_star_table(self, domain):
        #get relevant value from original table
        cost_attr = Attribute('cost', min_wins=False, absolute=True)
        cost_table = AbsoluteReport._get_table(self, cost_attr, domain)

        #define list of costs:
        cost_list = []

        #calculate number of solved problems
        total_solved = 0
        for row in cost_table.row_names:
            curr_val = cost_table.get(row)
            val = curr_val[self.nick]
            if val > 0:
                total_solved = total_solved + 1
                cost_list.append(val)

        cost_set = list(set(cost_list)) # remove duplicate element
        cost_dict = {}
        for value in sorted(cost_set):
            smaller_than_value_counter = 0;
            for compared_value in cost_list:
                if compared_value <= value:
                    smaller_than_value_counter += 1
            cost_dict[value] = smaller_than_value_counter*100 / total_solved


        #write results into .cvs file:
        domain_dir = self.outFile + '/' + domain
        tools.makedirs(domain_dir)
        domain_file = domain_dir + '/' + 'PAC_Commulative_hstar.csv'
        file = open(domain_file, "w")

        sorted_cost_dict_keys = sorted(cost_dict.keys())
        for hstar in sorted_cost_dict_keys:
            toWrite = str(hstar) + ',' + str(cost_dict[hstar]) + '\n'
            file.write(toWrite)

        file.close()
예제 #33
0
    def create_commulative_h_star_table(self, domain):
        #get relevant value from original table
        cost_attr = Attribute('cost', min_wins=False, absolute=True)
        cost_table = AbsoluteReport._get_table(self, cost_attr, domain)

        #define list of costs:
        cost_list = []

        #calculate number of solved problems
        total_solved = 0
        for row in cost_table.row_names:
            curr_val = cost_table.get(row)
            val = curr_val[self.nick]
            if val > 0:
                total_solved = total_solved + 1
                cost_list.append(val)

        cost_set = list(set(cost_list))  # remove duplicate element
        cost_dict = {}
        for value in sorted(cost_set):
            smaller_than_value_counter = 0
            for compared_value in cost_list:
                if compared_value <= value:
                    smaller_than_value_counter += 1
            cost_dict[value] = smaller_than_value_counter * 100 / total_solved

        #write results into .cvs file:
        domain_dir = self.outFile + '/' + domain
        tools.makedirs(domain_dir)
        domain_file = domain_dir + '/' + 'PAC_Commulative_hstar.csv'
        file = open(domain_file, "w")

        sorted_cost_dict_keys = sorted(cost_dict.keys())
        for hstar in sorted_cost_dict_keys:
            toWrite = str(hstar) + ',' + str(cost_dict[hstar]) + '\n'
            file.write(toWrite)

        file.close()
PROPERTIES = {
    "ff-gripper-prob01.pddl": {
        "domain": "gripper",
        "problem": "prob01.pddl",
        "algorithm": "ff",
        "coverage": 1,
        "expansions": 1234,
    },
    "blind-gripper-prob01.pddl": {
        "domain": "gripper",
        "problem": "prob01.pddl",
        "algorithm": "blind",
        "coverage": 1,
        "expansions": 6543,
    },
}


def write_properties(eval_dir):
    tools.makedirs(eval_dir)
    with open(os.path.join(eval_dir, 'properties'), 'w') as f:
        json.dump(PROPERTIES, f)


# Create new experiment. The file <EXP_DIR>-eval/properties must exist.
exp = Experiment(EXP_DIR)
exp.add_report(AbsoluteReport(attributes=['coverage', 'expansions']))

write_properties(exp.eval_dir)
exp.run_steps()
예제 #35
0
    def _get_commulative_table(self, domain):
        #init new table
        title = 'Commulative'
        columns = {'Percentage', 'h*/h(s)'}
        min_wins = False
        colored = True
        table = reports.Table(title=title, min_wins=min_wins, colored=colored)
        table.set_column_order(columns)
        link = '#%s' % title
        formatter = reports.CellFormatter(link=link)
        table.cell_formatters[table.header_row][
            table.header_column] = formatter
        domain_dir = self.outFile + '/' + domain
        tools.makedirs(domain_dir)
        domain_file = domain_dir + '/' + 'PAC_Commulative_ratio.csv'
        file = open(domain_file, "w")

        #get relevant value from original table
        ratio_attr = Attribute('hstar_to_h', min_wins=False, absolute=True)
        ratio_table = AbsoluteReport._get_table(self, ratio_attr, domain)
        #define arrays to work
        ratios = [
            0.75, 0.8, 0.85, 0.9, 0.95, 1, 1.05, 1.1, 1.15, 1.2, 1.25, 1.3,
            1.35, 1.4, 1.45, 1.5, 1.55, 1.6, 1.65, 1.7, 1.75, 1.8, 1.85, 1.9,
            1.95, 2, 2.05, 2.1, 2.15, 2.2, 2.25, 2.3, 2.35, 2.4, 2.45, 2.5,
            2.55, 2.6, 2.65, 2.7, 2.75, 2.8, 2.85, 2.9, 2.95, 3.0, 3.05, 3.1,
            3.15, 3.2, 3.25, 3.3, 3.35, 3.4, 3.45, 3.5, 3.55, 3.6, 3.65, 3.7,
            3.75, 3.80, 3.85, 3.9, 3.95, 4.0, 4.05, 4.1, 4.15, 4.2, 2.25, 4.3,
            4.35, 4.4, 4.45, 4.5
        ]
        names = [
            'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
            'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
            'za', 'zb', 'zc', 'zd', 'ze', 'zf', 'zg', 'zh', 'zi', 'zj', 'zk',
            'zl', 'zm', 'zn', 'zo', 'aa', 'ab', 'ac', 'ad', 'ae', 'af', 'ag',
            'ah', 'ai', 'aj', 'ak', 'al', 'am', 'an', 'ao', 'ap', 'aq', 'ar',
            'as', 'at', 'au', 'av', 'aw', 'ax', 'ay', 'az', 'ba', 'bb', 'bc',
            'bd', 'be', 'bf', 'bg', 'bh', 'bi'
        ]
        counter = 0

        #calculate number of solved problems
        total_solved = 0
        for row in ratio_table.row_names:
            curr_val = ratio_table.get(row)
            val = curr_val[self.nick]
            if val > 0:
                total_solved = total_solved + 1

        #for each ratio (1,1.05...), find the number of problems with this ratio, calc percentage and add row
        for ratio in ratios:
            _sum = 0
            for row in ratio_table.row_names:
                curr_val = ratio_table.get(row)
                val = curr_val[self.nick]
                if val <= ratio and val > 0:
                    _sum = _sum + 1

            if total_solved == 0:
                _sum_percent = 0
            else:
                _sum_percent = _sum * 100 / total_solved

            #add new row
            row_to_add = {}
            row_to_add['Percentage'] = _sum_percent
            row_to_add['h*/h(s)'] = ratio
            table.add_row(names[counter], row_to_add)
            counter = counter + 1
            #TODO - save only one ratio per percentage
            toWrite = str(ratio) + ',' + str(_sum_percent) + '\n'
            file.write(toWrite)

        file.close()

        self.create_commulative_h_star_table(domain)

        return table
예제 #36
0
    def _get_commulative_table(self, domain):
        #init new table
        title = 'Commulative'
        columns = {'Percentage','h*/h(s)'}
        min_wins = False
        colored = True
        table = reports.Table(title=title, min_wins=min_wins, colored=colored)
        table.set_column_order(columns)
        link = '#%s' % title
        formatter = reports.CellFormatter(link=link)
        table.cell_formatters[table.header_row][table.header_column] = formatter
        domain_dir = self.outFile + '/' + domain
        tools.makedirs(domain_dir)
        domain_file = domain_dir + '/' + 'PAC_Commulative_ratio.csv'
        file = open(domain_file, "w")

        #get relevant value from original table
        ratio_attr = Attribute('hstar_to_h', min_wins=False, absolute=True)
        ratio_table = AbsoluteReport._get_table(self, ratio_attr, domain)
        #define arrays to work
        ratios = [0.75,0.8,0.85,0.9,0.95,1,1.05,1.1,1.15,1.2,1.25,1.3,1.35,1.4,1.45,1.5,1.55,1.6,1.65,1.7,1.75,1.8,1.85,1.9,1.95,2,2.05,2.1,2.15,2.2,2.25,2.3,2.35,2.4,2.45,2.5,2.55,2.6,2.65,2.7,2.75,2.8,2.85,2.9,2.95,3.0,3.05,3.1,3.15,3.2,3.25,3.3,3.35,3.4,3.45,3.5,3.55,3.6,3.65,3.7,3.75,3.80,3.85,3.9,3.95,4.0,4.05,4.1,4.15,4.2,2.25,4.3,4.35,4.4,4.45,4.5]
        names = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','za','zb','zc','zd','ze','zf','zg','zh','zi','zj','zk','zl','zm','zn','zo','aa','ab','ac','ad','ae','af','ag','ah','ai','aj','ak','al','am','an','ao','ap','aq','ar','as','at',  'au','av','aw','ax','ay','az','ba','bb','bc','bd','be','bf','bg','bh','bi']
        counter = 0

        #calculate number of solved problems
        total_solved = 0
        for row in ratio_table.row_names:
                curr_val = ratio_table.get(row)
                val = curr_val[self.nick]
                if val > 0:
                    total_solved = total_solved + 1

        #for each ratio (1,1.05...), find the number of problems with this ratio, calc percentage and add row
        for ratio in ratios:
            _sum = 0
            for row in ratio_table.row_names:
                curr_val = ratio_table.get(row)
                val = curr_val[self.nick]
                if val <= ratio and val > 0:
                    _sum = _sum + 1

            if total_solved == 0:
                _sum_percent = 0
            else:
                _sum_percent = _sum*100 / total_solved

            #add new row
            row_to_add = {}
            row_to_add['Percentage'] = _sum_percent
            row_to_add['h*/h(s)'] = ratio
            table.add_row(names[counter],row_to_add)
            counter = counter + 1
            #TODO - save only one ratio per percentage
            toWrite = str(ratio)+','+str(_sum_percent)+'\n'
            file.write(toWrite)

        file.close()

        self.create_commulative_h_star_table(domain)
        


        return table
예제 #37
0
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)

exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_algorithm("blind", REPO, REV, ["--search", "astar(blind())"])
exp.add_algorithm("lmcut", REPO, REV, ["--search", "astar(lmcut())"])

# Add step that writes experiment files to disk.
exp.add_step("build", exp.build)

# Add step that executes all runs.
exp.add_step("start", exp.start_runs)

# Add step that collects properties from run directories and
# writes them to *-eval/properties.
exp.add_fetcher(name="fetch")

# Add report step (AbsoluteReport is the standard report).
exp.add_report(AbsoluteReport(attributes=ATTRIBUTES), outfile="report.html")

# Add scatter plot report step.
exp.add_report(
    ScatterPlotReport(attributes=["expansions"], filter_algorithm=["blind", "lmcut"]),
    outfile="scatterplot.png",
)

# Parse the commandline and show or run experiment steps.
exp.run_steps()
예제 #38
0
#      (Translator(REPO, rev='WORK'), Preprocessor(REPO, rev=3097), Planner(MYOTHER_REPO)),
# ]

exp = DownwardExperiment(
    EXPPATH,
    REPO,  # combinations=combos,
    limits={'search_time': 60})

exp.add_suite(['gripper:prob01.pddl'])
exp.add_suite('zenotravel:pfile2')
exp.add_config('ff', ['--search', 'lazy(single(ff()))'])
exp.add_config('add', ['--search', 'lazy(single(add()))'])
exp.add_portfolio(
    os.path.join(REPO, 'src', 'search', 'downward-seq-sat-fdss-1.py'))

exp.add_report(AbsoluteReport('problem'),
               name='make-report',
               outfile='report-abs-p.html')


def solved(run):
    return run['coverage'] == 1


exp.add_step(
    Step('suite', SuiteReport(filter=solved), exp.eval_dir,
         os.path.join(exp.eval_dir, 'suite.py')))

exp.add_step(
    Step(
        'scatter',
예제 #39
0
파일: relative.py 프로젝트: galdreiman/PAC
 def write(self):
     if not len(self.configs) == 2:
         logging.error("Relative reports are only possible for 2 configs. " 'Selected configs: "%s"' % self.configs)
         sys.exit(1)
     AbsoluteReport.write(self)
예제 #40
0
PROPERTIES = {
    "ff-gripper-prob01.pddl": {
        "domain": "gripper",
        "problem": "prob01.pddl",
        "algorithm": "ff",
        "coverage": 1,
        "expansions": 1234,
    },
    "blind-gripper-prob01.pddl": {
        "domain": "gripper",
        "problem": "prob01.pddl",
        "algorithm": "blind",
        "coverage": 1,
        "expansions": 6543,
    },
}


def write_properties(eval_dir):
    tools.makedirs(eval_dir)
    with open(os.path.join(eval_dir, "properties"), "w") as f:
        json.dump(PROPERTIES, f)


# Create new experiment. The file <EXP_DIR>-eval/properties must exist.
exp = Experiment(EXP_DIR)
exp.add_report(AbsoluteReport(attributes=["coverage", "expansions"]))

write_properties(exp.eval_dir)
exp.run_steps()
예제 #41
0
파일: hstar_2_h.py 프로젝트: galdreiman/PAC
    def __init__(self, **kwargs):

        AbsoluteReport.__init__(self, **kwargs)
	self.derived_properties.append(hstar_to_h)
예제 #42
0
#! /usr/bin/env python
"""
Example downward experiment that runs FF on a single problem.

Please adapt EXPPATH and REPO to be the path where the experiment shall be put
and the location of your Fast Downward repository.

The file planner-ext.py contains an "advanced" version of this basic experiment.
"""

from downward.experiment import DownwardExperiment
from downward.reports.absolute import AbsoluteReport

EXPPATH = 'exp-planner'
REPO = '/home/jendrik/projects/Downward/downward'

exp = DownwardExperiment(EXPPATH, REPO)

exp.add_suite('gripper:prob01.pddl')
exp.add_config('ff', ['--search', 'lazy(single(ff()))'])

exp.add_report(AbsoluteReport())

exp()
예제 #43
0
# Showcase some fetcher options.


def eval_dir(num):
    return os.path.join(exp.eval_dir, "test%d" % num)


exp.add_fetcher(dest=eval_dir(1),
                name="fetcher-test1",
                filter=only_two_algorithms)
exp.add_fetcher(dest=eval_dir(2),
                name="fetcher-test2",
                filter_algorithm="lama11")

# Add report steps.
exp.add_report(AbsoluteReport(attributes=["coverage", "cost"]),
               name="report-abs-d")
quality_filters = QualityFilters()
exp.add_report(
    AbsoluteReport(
        attributes=[
            "coverage",
            "cost",
            Attribute("quality", function=reports.arithmetic_mean),
        ],
        filter=[quality_filters.store_costs, quality_filters.add_quality],
    ),
    name="report-abs-builtin-filters",
)
exp.add_report(
    AbsoluteReport(attributes=["coverage"], filter=only_two_algorithms),
        run.set_property('problem', task.problem)
        run.set_property('algorithm', "features")
        run.set_property('domain_file', task.domain_file)
        run.set_property('problem_file', task.problem_file)
        # Every run has to have a unique id in the form of a list.
        # The algorithm name is only really needed when there are
        # multiple algorithms.
        run.set_property('id', [config_name, task.domain, task.problem])


add_exp()


# Add step that writes experiment files to disk.
exp.add_step('build', exp.build)

# Add step that executes all runs.
exp.add_step('start', exp.start_runs)

# Add step that collects properties from run directories and
# writes them to *-eval/properties.
exp.add_fetcher(name='fetch')

# Make a report.
exp.add_report(
    AbsoluteReport(attributes=ATTRIBUTES),
    outfile='%s.html' % report_name)

# Parse the commandline and run the specified steps.
exp.run_steps()