예제 #1
0
def derive_total_benchmark_order_test():
    category_results = svcomp15.read_category('static/results-xml-raw', 'mixed-examples')
    r = utils.derive_total_benchmark_order(
        category_results,
        'static/sv-benchmarks/c/mixed-examples/data_structures_set_multi_proc_false-unreach-call_ground.i',
        svcomp15.compare_results)
    nt.assert_equal(r, ['cpachecker', 'smack', 'cbmc'])
예제 #2
0
def derive_total_benchmark_order_test():
    category_results = svcomp15.read_category('static/results-xml-raw',
                                              'mixed-examples')
    r = utils.derive_total_benchmark_order(
        category_results,
        'static/sv-benchmarks/c/mixed-examples/data_structures_set_multi_proc_false-unreach-call_ground.i',
        svcomp15.compare_results)
    nt.assert_equal(r, ['cpachecker', 'smack', 'cbmc'])
예제 #3
0
def create_benchmark_best_tool_df(results, compare_results):
    """
    Todo
    :param results:
    :param compare_results:
    :return:
    """
    df = pd.concat(results, axis=1)
    # rows with na values give us not information, so drop them.
    df.dropna(inplace=True)
    ret_df = pd.DataFrame(columns=['best_tool'])
    ret_df.index.name = 'sourcefile'
    for row in df.iterrows():
        sourcefile, results_df = row
        ret_df.set_value(sourcefile, 'best_tool', utils.derive_total_benchmark_order(results,
                                                                                     sourcefile,
                                                                                     compare_results)[0])
    return ret_df
예제 #4
0
def create_benchmark_best_tool_df(results, compare_results):
    """
    Todo
    :param results:
    :param compare_results:
    :return:
    """
    df = pd.concat(results, axis=1)
    # rows with na values give us not information, so drop them.
    df.dropna(inplace=True)
    ret_df = pd.DataFrame(columns=['best_tool'])
    ret_df.index.name = 'sourcefile'
    for row in df.iterrows():
        sourcefile, results_df = row
        ret_df.set_value(
            sourcefile, 'best_tool',
            utils.derive_total_benchmark_order(results, sourcefile,
                                               compare_results)[0])
    return ret_df