コード例 #1
0
ファイル: common_setup.py プロジェクト: yanxi0830/downward
 def make_comparison_tables():
     for rev1, rev2, outfile in get_revision_pairs_and_files():
         compared_configs = []
         for config in self._configs:
             config_nick = config.nick
             compared_configs.append(("%s-%s" % (rev1, config_nick),
                                      "%s-%s" % (rev2, config_nick),
                                      "Diff (%s)" % config_nick))
         report = ComparativeReport(compared_configs, **kwargs)
         report(self.eval_dir, outfile)
コード例 #2
0
ファイル: common_setup.py プロジェクト: Eldeeqq/bi-zum
 def make_comparison_tables():
     for rev1, rev2 in itertools.combinations(self._revisions, 2):
         compared_configs = []
         for config in self._configs:
             config_nick = config.nick
             compared_configs.append(("%s-%s" % (rev1, config_nick),
                                      "%s-%s" % (rev2, config_nick),
                                      "Diff (%s)" % config_nick))
         report = ComparativeReport(compared_configs, **kwargs)
         outfile = os.path.join(
             self.eval_dir, "%s-%s-%s-compare%s.%s" %
             (self.name, rev1, rev2, suffix, report.output_format))
         report(self.eval_dir, outfile)
コード例 #3
0
}

for format in ["png", "tex"]:
    exp.add_report(
        ScatterPlotReport(
            attributes=["cost"],
            format=format,
            filter=only_two_algorithms,
            get_category=get_domain,
            scale="linear",
            matplotlib_options=matplotlib_options,
        ),
        outfile=os.path.join("plots", "scatter-domain." + format),
    )
exp.add_report(
    ComparativeReport([("lama11", "iter-hadd")], attributes=["coverage"]),
    name="report-compare",
    outfile="compare.html",
)

exp.add_report(
    TaskwiseReport(attributes=["cost", "coverage"], filter_algorithm=["ipdb"]),
    name="report-taskwise",
    outfile="taskwise.html",
)

exp.add_report(AbsoluteReport(), name="report-abs-p")

exp.add_step("finished", call, ["echo", "Experiment", "finished."])

if __name__ == "__main__":
コード例 #4
0
    "coverage", "error", "expansions_until_last_jump", "memory",
    "score_memory", "total_time", "score_total_time",
    "hash_set_load_factor", "hash_set_resizings"]

# Compare revisions.
# lmcut-base-32 vs. lmcut-v1-32 vs. lmcut-v3-32
# lmcut-base-64 vs. lmcut-v1-64 vs. lmcut-v3-64
for build in BUILDS:
    for rev1, rev2 in itertools.combinations(REVISIONS, 2):
        algorithm_pairs = [
            ("{rev1}-{config_nick}-{build}".format(**locals()),
             "{rev2}-{config_nick}-{build}".format(**locals()),
             "Diff ({config_nick}-{build})".format(**locals()))
            for config_nick, search in SEARCHES]
        exp.add_report(
            ComparativeReport(algorithm_pairs, attributes=attributes),
            name="issue213-{rev1}-vs-{rev2}-{build}".format(**locals()))

# Compare builds.
# lmcut-base-32 vs. lmcut-base-64
# lmcut-v1-32 vs. lmcut-v1-64
# lmcut-v3-32 vs. lmcut v3-64
for build1, build2 in itertools.combinations(BUILDS, 2):
    for rev in REVISIONS:
        algorithm_pairs = [
            ("{rev}-{config_nick}-{build1}".format(**locals()),
             "{rev}-{config_nick}-{build2}".format(**locals()),
             "Diff ({config_nick}-{rev})".format(**locals()))
            for config_nick, search in SEARCHES]
        exp.add_report(
            ComparativeReport(algorithm_pairs, attributes=attributes),
コード例 #5
0
ファイル: v1-lama.py プロジェクト: yanxi0830/downward
exp = IssueExperiment(
    revisions=REVISIONS,
    configs=CONFIGS,
    environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)

exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.ANYTIME_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)

exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')

#exp.add_absolute_report_step()
#exp.add_comparison_table_step()

attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES

for build in BUILDS:
    algorithm_pairs = [
        ("{rev}-{nick1}".format(**locals()),
         "{rev}-{nick2}".format(**locals()), "Diff ({rev})".format(**locals()))
        for (nick1, _), (nick2, _) in itertools.combinations(CONFIG_NICKS, 2)
    ]
    exp.add_report(ComparativeReport(algorithm_pairs, attributes=attributes),
                   name="issue839-{nick1}-vs-{nick2}".format(**locals()))

exp.run_steps()
コード例 #6
0
exp = IssueExperiment(
    revisions=REVISIONS,
    configs=CONFIGS,
    environment=ENVIRONMENT,
)

exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_resource('sg_parser', 'sg-parser.py', dest='sg-parser.py')
exp.add_command('sg-parser', ['{sg_parser}'])

attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [
    Attribute("sg_construction_time", functions=[finite_sum], min_wins=True),
    Attribute("sg_peak_mem_diff", functions=[finite_sum], min_wins=True),
]

# Instead of comparing all revision pairs in separate reports, create a
# single report comparing neighboring revisions.
# exp.add_comparison_table_step(attributes=attributes)
compared_configs = []
for rev1, rev2 in zip(REVISIONS[:-1], REVISIONS[1:]):
    for config in CONFIGS:
        config_nick = config.nick
        compared_configs.append(("{rev1}-{config_nick}".format(**locals()),
                                 "{rev2}-{config_nick}".format(**locals()),
                                 "Diff ({config_nick})".format(**locals())))
exp.add_report(ComparativeReport(compared_configs, attributes=attributes),
               name="compare-all-tags")

exp.run_steps()
コード例 #7
0
def main(revisions=None):
    benchmarks_dir = os.environ["DOWNWARD_BENCHMARKS_IPC2018"]
    # optimal union satisficing
    suite = []
    #suite.extend(['briefcaseworld', 'cavediving-14-adl', 'citycar-sat14-adl', 'fsc-blocks', 'fsc-grid-a1', 'fsc-grid-a2', 'fsc-grid-r', 'fsc-hall', 'fsc-visualmarker', 'gedp-ds2ndp', 'miconic-simpleadl', 't0-adder', 't0-coins', 't0-comm', 't0-grid-dispose', 't0-grid-push', 't0-grid-trash', 't0-sortnet', 't0-sortnet-alt', 't0-uts'])
    suite.extend([
        "agricola-sat18", "caldera-sat18", "caldera-split-sat18",
        "data-network-sat18", "flashfill-sat18", "nurikabe-sat18",
        "organic-synthesis-sat18", "organic-synthesis-split-sat18",
        "settlers-sat18", "snake-sat18", "spider-sat18", "termes-sat18"
    ])

    environment = OracleGridEngineEnvironment(queue='all.q')

    BUILD_OPTIONS = ["release64"]
    DRIVER_OPTIONS = [
        "--build", "release64", "--overall-time-limit", "30m",
        "--overall-memory-limit", "4096M"
    ]

    configs = {
        IssueConfig('rb-ce-cerberus', [
            '--heuristic', 'hrb=RB(dag=from_coloring, extract_plan=true)',
            '--search', 'lazy_greedy([hrb],reopen_closed=false)'
        ],
                    build_options=BUILD_OPTIONS,
                    driver_options=DRIVER_OPTIONS),
    }

    exp = IssueExperiment(
        revisions=revisions,
        configs=configs,
        environment=environment,
    )
    exp.add_suite(benchmarks_dir, suite)

    exp.add_parser(exp.LAB_STATIC_PROPERTIES_PARSER)
    exp.add_parser(exp.LAB_DRIVER_PARSER)
    exp.add_parser(exp.EXITCODE_PARSER)
    exp.add_parser(exp.TRANSLATOR_PARSER)
    exp.add_parser(exp.SINGLE_SEARCH_PARSER)
    #exp.add_parser(exp.PLANNER_PARSER)

    attributes = exp.DEFAULT_TABLE_ATTRIBUTES

    exp.add_step('build', exp.build)
    exp.add_step('start', exp.start_runs)
    exp.add_fetcher(name='fetch')

    exp.add_absolute_report_step(attributes=attributes)
    report_name = os.path.basename(exp.path.rstrip('/'))
    exp.add_step('copy_report', subprocess.call, [
        'cp',
        os.path.join(exp.eval_dir, '%s.html' % report_name),
        '/storage/US1J6721/EXTERNAL'
    ])

    exp.add_step('print_dest', subprocess.call, [
        'echo',
        os.path.join("https://syss063.pok.stglabs.ibm.com/users/mkatz/storage",
                     '%s.html' % report_name)
    ])

    algorithm_nicks = ['translate-symm-stabgoal-stabinit']

    OTHER_REV = ''
    exp.add_fetcher('data/2018-06-22-lifted-stabinit-stabgoal-order-mk-eval',
                    filter_algorithm=[
                        '{}-{}'.format(OTHER_REV, x) for x in algorithm_nicks
                    ])

    exp.add_report(
        ComparativeReport(
            algorithm_pairs=[('{}-{}'.format(OTHER_REV,
                                             x), '{}-{}'.format(REVISION, x))
                             for x in algorithm_nicks],
            attributes=attributes,
        ),
        outfile=os.path.join(exp.eval_dir, 'a' + exp.name + '-compare.html'),
    )

    exp.run_steps()
コード例 #8
0
def generate_pairwise_comparison_rp(exp, args, header_config) :
    if (len(args.SOLVERS)>0) :
        algo_pairs = []
        for i, algo1 in enumerate(args.SOLVERS):
            for j, algo2 in enumerate(args.SOLVERS):
                if (i<j) :
                    algo_pairs.append((basename(algo1), basename(algo2)))
                else :
                    continue
        exp.add_report(ComparativeReport(algo_pairs, attributes=['coverage']),
                outfile='coverage_report.html') 
        # Compare performance of algorithms using Scatter Plot
        matplotlib_options = {
            'font.family': 'serif',
            'font.weight': 'normal',
            # Used if more specific sizes not set.
            #'font.size': 20,
            #'axes.labelsize': 20,
            #'axes.titlesize': 30,
            #'legend.fontsize': 22,
            #'xtick.labelsize': 10,
            #'ytick.labelsize': 10,
            #'lines.markersize': 10,
            #'lines.markeredgewidth': 0.25,
            #'lines.linewidth': 1,
            # Width and height in inches.
            'figure.figsize': [8, 8],
            'savefig.dpi': 100,
            'scatter.edgecolors': 'black',
            'axes.grid': True,
            'image.aspect': 'equal',
            'axes.formatter.useoffset': False,
        }
        matplotlib_cactus_options = {
            'figure.figsize': [16, 9],
            'savefig.dpi': 100,
            'axes.grid': True,
            'axes.formatter.useoffset': False,
            'legend.fontsize' : 'small',
            'lines.markersize'  : 2,
            'legend.markerscale' : 4,
            'markers.fillstyle' : 'none',
        }   
        index = 0
        """
        for algo1, algo2 in algo_pairs :
            if(re.search(r'_ste',  algo1) and re.search(r'_ste', algo2)):
                continue
            if(re.search(r'_ste', algo2)):
                temp = algo1
                algo1 = algo2
                algo2 = temp
            exp.add_report(ScatterPlotReport(
                title = str(args.EXP_NAME+
                    " - number of node expanded").title(),
                attributes=["expanded"],
                filter_algorithm=[algo1, algo2],
                filter=remove_cov, get_category=domain_as_category,
                xscale='log', yscale='log',
                show_missing=False,
                matplotlib_options=matplotlib_options, 
                format="png"  # Use "tex" for pgfplots output.
                ), name="scatterplot-expanded_"+str(index))
            exp.add_report(ScatterPlotReport(
                title = str(args.EXP_NAME+
                    " - number of nodes generated").title(),
                attributes=["generated"],
                filter_algorithm=[algo1, algo2],
                filter=remove_cov, get_category=domain_as_category,
                xscale='log', yscale='log',
                show_missing=False,
                matplotlib_options=matplotlib_options, 
                format="png"  # Use "tex" for pgfplots output.
                ), name="scatterplot-generated_"+str(index))
            exp.add_report(ScatterPlotReport(
                title = str(args.EXP_NAME+
                    " - number of nodes pruned").title(),
                attributes=["pruned"],
                filter_algorithm=[algo1, algo2],
                filter=remove_cov, get_category=domain_as_category,
                xscale='linear', yscale='linear',
                show_missing=False,
                matplotlib_options=matplotlib_options, 
                format="png"  # Use "tex" for pgfplots output.
                ), name="scatterplot-pruned_"+str(index))
            exp.add_report(ScatterPlotReport(
                title = str(args.EXP_NAME+
                    " - runtime").title(),
                attributes=["runtime"],
                filter_algorithm=[algo1, algo2],
                filter=remove_cov, get_category=domain_as_category,
                show_missing=False,
                xscale='log', yscale='log',
                matplotlib_options=matplotlib_options, 
                format="png"  # Use "tex" for pgfplots output.
                ), name="scatterplot-runtime_"+str(index))
            exp.add_report(ScatterPlotReport(
                title = str(args.EXP_NAME+
                    " - max resident memory").title(),
                attributes=["memory"],
                filter_algorithm=[algo1, algo2],
                filter=remove_cov, get_category=domain_as_category,
                show_missing=False,
                xscale='log', yscale='log',
                matplotlib_options=matplotlib_options, 
                format="png"  # Use "tex" for pgfplots output.
                ), name="scatterplot-memory_"+str(index))
            exp.add_report(ScatterPlotReport(
                title = str(args.EXP_NAME+
                    " - max width reached").title(),
                attributes=["width"],
                filter_algorithm=[algo1, algo2],
                filter=remove_cov, get_category=domain_as_category,
                xscale='linear', yscale='linear',
                show_missing=False,
                matplotlib_options=matplotlib_options, 
                format="png"  # Use "tex" for pgfplots output.
                ), name="scatterplot-width_"+str(index))
            exp.add_report(ScatterPlotReport(
                title = str(args.EXP_NAME+
                    " - cost of plan").title(),
                attributes=["cost"],
                filter_algorithm=[algo1, algo2],
                filter=remove_cov, get_category=domain_as_category,
                xscale='log', yscale='log',
                show_missing=False,
                matplotlib_options=matplotlib_options, 
                format="png"  # Use "tex" for pgfplots output.
                ), name="scatterplot-quality_"+str(index))
            index += 1
            # End loop
        """

        # plot instance solved over various exec properties
        plot_runtime = True 
        plot_generated = True 
        plot_expanded = True 
        plot_memory = True 
        for i, algo in enumerate(args.SOLVERS) :
            ATTRIBUTES =  get_attributes(header_config, algo)
            if not 'runtime_solver' in ATTRIBUTES:
                plot_runtime = False
            if not 'generated' in ATTRIBUTES:
                plot_generated = False
            if not 'expanded' in ATTRIBUTES:
                plot_expanded = False
            if not 'memory' in ATTRIBUTES:
                plot_memory = False

        if plot_runtime :
            exp.add_report(CactusPlotReport(
                title = str(args.EXP_NAME+
                    " - Instances solved over time").title(),
                time_limit = args.TIME_LIMIT,
                attributes=["runtime_solver"],
                xscale='linear', yscale='linear',
                ylabel='Number of Instances Solved',
                matplotlib_options=matplotlib_cactus_options, 
                format="png"  # Use "tex" for pgfplots output.
                ), name="Instances solved over time"+str(index))
        if plot_generated :
            exp.add_report(CactusPlotReport(
                title = str(args.EXP_NAME+
                    " - Instances solved over nodes generated").title(),
                time_limit = args.TIME_LIMIT,
                attributes=["generated"],
                xscale='log', yscale='linear',
                ylabel='Number of Instances Solved', xlabel='Number of generated nodes in thousands',
                matplotlib_options=matplotlib_cactus_options,
                format="png"  # Use "tex" for pgfplots output.
                ), name="Instances solved over nodes generated"+str(index))
        if plot_expanded :
            exp.add_report(CactusPlotReport(
                title = str(args.EXP_NAME+
                    " - Instances solved over nodes expanded").title(),
                time_limit = args.TIME_LIMIT,
                attributes=["expanded"],
                xscale='log', yscale='linear',
                ylabel='Number of Instances Solved', xlabel='Number of expanded nodes in thousands',
                matplotlib_options=matplotlib_cactus_options,
                format="png"  # Use "tex" for pgfplots output.
                ), name="Instances solved over nodes expanded"+str(index))
        if plot_memory:
            exp.add_report(CactusPlotReport(
                title = str(args.EXP_NAME+
                    " - Instances solved over memory consumed").title(),
                time_limit = args.TIME_LIMIT,
                attributes=["memory"],
                xscale='linear', yscale='linear',
                ylabel='Number of Instances Solved', xlabel="Memory consumed(MB)",
                matplotlib_options=matplotlib_cactus_options,
                format="png"  # Use "tex" for pgfplots output.
                ), name="Instances solved over memory consumed"+str(index))


        search_params = [] #[ ['k-BFWS'], ['BFWS-f5']]
        for search_param in search_params:
            exp.add_report(CactusPlotReport(
                title = str(args.EXP_NAME+
                    " - Instances solved over time").title(),
                search_param = search_param,
                time_limit = args.TIME_LIMIT,
                attributes=["runtime"],
                xscale='log', yscale='linear',
                ylabel='Number of Instances Solved',
                matplotlib_options=matplotlib_cactus_options, 
                format="png"  # Use "tex" for pgfplots output.
                ), name="_".join(search_param)+"_Instances solved over time"+str(index))
            exp.add_report(CactusPlotReport(
                title = str(args.EXP_NAME+
                    " - Instances solved over nodes generated").title(),
                search_param = search_param,
                time_limit = args.TIME_LIMIT,
                attributes=["generated"],
                xscale='log', yscale='linear',
                ylabel='Number of Instances Solved', xlabel='Number of generated nodes in thousands',
                matplotlib_options=matplotlib_cactus_options, 
                format="png"  # Use "tex" for pgfplots output.
                ), name="_".join(search_param)+"_Instances solved over nodes generated"+str(index))
            exp.add_report(CactusPlotReport(
                title = str(args.EXP_NAME+
                    " - Instances solved over nodes expanded").title(),
                search_param = search_param,
                time_limit = args.TIME_LIMIT,
                attributes=["expanded"],
                xscale='log', yscale='linear',
                ylabel='Number of Instances Solved', xlabel='Number of expanded nodes in thousands',
                matplotlib_options=matplotlib_cactus_options,
                format="png"  # Use "tex" for pgfplots output.
                ), name="_".join(search_param)+"_Instances solved over nodes expanded"+str(index))
            exp.add_report(CactusPlotReport(
                title = str(args.EXP_NAME+
                    " - Instances solved over memory consumed").title(),
                search_param = search_param,
                time_limit = args.TIME_LIMIT,
                attributes=["memory"],
                xscale='linear', yscale='linear',
                ylabel='Number of Instances Solved', xlabel="Memory consumed(MB)",
                matplotlib_options=matplotlib_cactus_options,
                format="png"  # Use "tex" for pgfplots output.
                ), name="_".join(search_param)+"_Instances solved over memory consumed"+str(index))
コード例 #9
0
ファイル: v3-blind.py プロジェクト: Eldeeqq/bi-zum
    priority=0, email="*****@*****.**")

if common_setup.is_test_run():
    SUITE = IssueExperiment.DEFAULT_TEST_SUITE
    ENVIRONMENT = LocalEnvironment(processes=1)

exp = IssueExperiment(
    revisions=REVISIONS,
    configs=CONFIGS,
    environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)

exp.add_absolute_report_step()

algorithm_pairs = []
revision1, revision2 = REVISIONS
for build in BUILDS:
    for config_nick, search in SEARCHES:
            algorithm_pairs.append(
                ("{revision1}-{config_nick}-{build}".format(**locals()),
                 "{revision2}-{config_nick}-{build}".format(**locals()),
                 "Diff ({config_nick}-{build})".format(**locals())))
exp.add_report(
    ComparativeReport(
        algorithm_pairs,
        attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES),
    name="issue213-v2-vs-v3-blind")

exp.run_steps()
コード例 #10
0
    'expansions',
    'initial_h_value',
    'generated',
    'memory',
    'planner_memory',
    'planner_time',
    'run_dir',
    'search_time',
    'total_time',
])


def get_keep_redundant_pairs():
    pairs = []
    for h in HEURISTICS:
        for ls in LAYER_STRATEGY:
            for overapprox in OVERAPPROXIMATE:
                for lit in NECESSARY_LITERALS:
                    pairs.append(
                        ("tip-%s-%s-%s-%s" % (h, ls, overapprox, lit),
                         "tip-%s-%s-%s-%s-kr" % (h, ls, overapprox, lit)))
    return pairs


exp.add_absolute_report_step(attributes=attributes)
exp.add_report(ComparativeReport(get_keep_redundant_pairs(),
                                 attributes=attributes),
               outfile="issue453-v2-compare_keep_redundant.html")

exp.run_steps()
コード例 #11
0
    'lines.markeredgewidth': 0.25,
    'lines.linewidth': 1,
    'figure.figsize': [8, 8],  # Width and height in inches.
    'savefig.dpi': 100,
}

for format in ["png", "tex"]:
    exp.add_report(ScatterPlotReport(attributes=['cost'],
                                     format=format,
                                     filter=only_two_algorithms,
                                     get_category=get_domain,
                                     xscale='linear',
                                     yscale='linear',
                                     matplotlib_options=matplotlib_options),
                   outfile=os.path.join('plots', 'scatter-domain.' + format))
exp.add_report(ComparativeReport([('lama11', 'iter-hadd')],
                                 attributes=['coverage']),
               name='report-compare',
               outfile='compare.html')

exp.add_report(TaskwiseReport(attributes=['cost', 'coverage'],
                              filter_algorithm=['ipdb']),
               name='report-taskwise',
               outfile='taskwise.html')

exp.add_report(AbsoluteReport(), name='report-abs-p')

exp.add_step('finished', call, ['echo', 'Experiment', 'finished.'])

if __name__ == '__main__':
    exp.run_steps()
コード例 #12
0
]
attributes = exp.DEFAULT_TABLE_ATTRIBUTES
attributes.extend(extra_attributes)

exp.add_fetcher('data/issue707-v1-eval')
exp.add_fetcher('data/issue707-v2-pruning-variants-eval')

outfile = os.path.join(exp.eval_dir, "issue707-v1-v2-dfp-compare.html")
exp.add_report(
    ComparativeReport(
        algorithm_pairs=[
            ('%s-dfp-b50k' % 'issue707-v1',
             '%s-dfp-b50k-nopruneunreachable' % 'issue707-v2'),
            ('%s-dfp-b50k' % 'issue707-v1',
             '%s-dfp-b50k-nopruneirrelevant' % 'issue707-v2'),
            ('%s-dfp-b50k' % 'issue707-v1',
             '%s-dfp-b50k-noprune' % 'issue707-v2'),
            #('%s-dfp-f50k' % 'issue707-v1', '%s-dfp-f50k-nopruneunreachable' % 'issue707-v2'),
            #('%s-dfp-f50k' % 'issue707-v1', '%s-dfp-f50k-nopruneirrelevant' % 'issue707-v2'),
            #('%s-dfp-f50k' % 'issue707-v1', '%s-dfp-f50k-noprune' % 'issue707-v2'),
            #('%s-dfp-ginf' % 'issue707-v1', '%s-dfp-ginf-nopruneunreachable' % 'issue707-v2'),
            #('%s-dfp-ginf' % 'issue707-v1', '%s-dfp-ginf-nopruneirrelevant' % 'issue707-v2'),
            #('%s-dfp-ginf' % 'issue707-v1', '%s-dfp-ginf-noprune' % 'issue707-v2'),
        ],
        attributes=attributes),
    outfile=outfile)
exp.add_step('publish-issue707-v1-v2-dfp-compare.html', subprocess.call,
             ['publish', outfile])

exp.run_steps()
コード例 #13
0
ファイル: v5-compare.py プロジェクト: yanxi0830/downward
attributes.extend(extra_attributes)

exp.add_fetcher('data/issue668-v5-hack-eval')
exp.add_fetcher('data/issue668-v5-clean-eval')

exp.add_report(ComparativeReport(
    attributes=attributes,
    algorithm_pairs=[
        ('%s-sbf-miasm-rl-otn-abp-b50k' % 'issue668-v5-hack',
         '%s-sbf-miasm-rl-otn-abp-b50k' % 'issue668-v5-clean'),
        ('%s-sbf-miasm-rl-nto-abp-b50k' % 'issue668-v5-hack',
         '%s-sbf-miasm-rl-nto-abp-b50k' % 'issue668-v5-clean'),
        ('%s-sbf-miasm-rl-rnd-abp-b50k' % 'issue668-v5-hack',
         '%s-sbf-miasm-rl-rnd-abp-b50k' % 'issue668-v5-clean'),
        ('%s-sbf-miasm-l-otn-abp-b50k' % 'issue668-v5-hack',
         '%s-sbf-miasm-l-otn-abp-b50k' % 'issue668-v5-clean'),
        ('%s-sbf-miasm-l-nto-abp-b50k' % 'issue668-v5-hack',
         '%s-sbf-miasm-l-nto-abp-b50k' % 'issue668-v5-clean'),
        ('%s-sbf-miasm-l-rnd-abp-b50k' % 'issue668-v5-hack',
         '%s-sbf-miasm-l-rnd-abp-b50k' % 'issue668-v5-clean'),
        ('%s-sbf-miasm-rnd-otn-abp-b50k' % 'issue668-v5-hack',
         '%s-sbf-miasm-rnd-otn-abp-b50k' % 'issue668-v5-clean'),
        ('%s-sbf-miasm-rnd-nto-abp-b50k' % 'issue668-v5-hack',
         '%s-sbf-miasm-rnd-nto-abp-b50k' % 'issue668-v5-clean'),
        ('%s-sbf-miasm-rnd-rnd-abp-b50k' % 'issue668-v5-hack',
         '%s-sbf-miasm-rnd-rnd-abp-b50k' % 'issue668-v5-clean'),
    ]),
               outfile='issue668-v5-hack-vs-clean-abp.html')
exp.add_report(ComparativeReport(
    attributes=attributes,
    algorithm_pairs=[
        ('%s-sbf-miasm-rl-otn-pba-b50k' % 'issue668-v5-hack',
コード例 #14
0
ファイル: fetch.py プロジェクト: Eldeeqq/bi-zum
        return True

    def filter_tasks_with_equal_values(self, run):
        values = self._tasks_to_values[self._get_task(run)]
        return len(set(values)) != 1


exp.add_fetcher(src='data/issue939-base-eval')
exp.add_fetcher(src='data/issue939-v1-eval', merge=True)

ATTRIBUTES = ["error", "run_dir", "translator_*", "translator_output_sas_hash"]
#exp.add_comparison_table_step(attributes=ATTRIBUTES)

same_value_filters = SameValueFilters("translator_output_sas_hash")
# exp.add_comparison_table_step(
#     name="filtered",
#     attributes=ATTRIBUTES,
#     filter=[same_value_filters.store_values, same_value_filters.filter_tasks_with_equal_values])

exp.add_report(TranslatorDiffReport(
    attributes=["domain", "problem", "algorithm", "run_dir"]),
               outfile="different_output_sas.csv")

exp.add_report(AbsoluteReport(attributes=ATTRIBUTES))
exp.add_report(
    ComparativeReport(
        [('issue939-base-translate-only', 'issue939-v1-translate-only')],
        attributes=ATTRIBUTES))

exp.run_steps()