Beispiel #1
0
def main():
    parser = argparse.ArgumentParser(
        description=
        "Analyzing execution method for mapping loops on coarse-grained programmable dataflow accelerators."
    )

    # Set architecture specifications
    parser.add_argument(
        "--arch-spec",
        help="Path of the file containing architecture specification.")

    args = parser.parse_args()
    if args.arch_spec:
        with open(args.arch_spec) as jsonFile:
            # Parameters used by analytical model of dataflow execution
            json_data = json.load(jsonFile)
            env_params = json_data["arch_details"]
            env = expr_parameters.Environment(**env_params)
    else:
        env = expr_parameters.Environment()
        params = expr_parameters.ExprParameters(env)

    layer = get_sample_layer(env)
    get_EDP(layer)
    get_energy(layer)
    get_performance(layer)
Beispiel #2
0
def get_sample_layer():
    env = expr_parameters.Environment()
    env.pe_pipeline_stages = 1
    args = {
        "name": "test_conv2d12_from_resnet18_v1",
        "channels": 64,
        "kernel_size": "[6, 6]",
        "padding": "[2, 2]",
        "strides": "[2, 2]",
        "output_shape": [1, 64, 5, 5],
        "input_shape": [1, 64, 10, 10],
        "batch_size": 8,
    }
    layer = ConvLayer(env, **args)
    layer.set_tiling("N", [2, 2, 2, 1])
    layer.set_tiling("M", [2, 4, 2, 4])
    layer.set_tiling("C", [4, 2, 2, 4])
    layer.set_tiling("Ox", [1, 5, 1, 1])
    layer.set_tiling("Oy", [1, 1, 5, 1])
    layer.set_tiling("Fx", [1, 2, 3, 1])
    layer.set_tiling("Fy", [2, 1, 3, 1])

    for level in layer._loop_IVs:
        layer.set_ordering(level, ["N", "C", "Fx", "Fy", "M", "Ox", "Oy"])

    return layer
Beispiel #3
0
def run_dataflow(layer_index, env_config=None):
    env = expr_parameters.Environment()
    env.pe_pipeline_stages = 1
    layer_args = layer_info.resnet_parameters[layer_index]
    layer = ConvLayer(env, **layer_args)

    base_TCs = layer.base_TCs
    tcs = [base_TCs[iv] for iv in IV_ORDER]
    return generate_combinations(layer_index, tcs, env_config)
Beispiel #4
0
def find_config(args):
    config_file_name = args.config_file_name

    n_PEs = dse_parameters.n_PEs
    energy_rf = dse_parameters.energy_rf
    energy_spm = dse_parameters.energy_spm

    env = expr_parameters.Environment()
    env.pe_pipeline_stages = 1
    layer_args = layer_info.resnet_parameters[args.layer_index]
    layer = ConvLayer(env, **layer_args)

    opt_parameters = {}

    """
    energy_rf = {
        256: 0.48,
    }

    energy_spm = {
        4096: 99.0984375,
    }

    n_PEs = [64, 128, 256]
    """

    #pbar = tqdm.tqdm(total=len(n_PEs)*len(energy_spm)*len(energy_rf))

    params = []
    for n_PE in n_PEs:
        for spm_size, spm_energy in energy_spm.items():
            for rf_size, rf_energy in energy_rf.items():
                if pass_config(n_PE, rf_size, spm_size):
                    continue
                param = (layer, n_PE, spm_size, rf_size, spm_energy, rf_energy)
                params.append(param)

    pbar = tqdm.tqdm(total=len(params))

    multiprocessing = True
    if multiprocessing:
        with concurrent.futures.ProcessPoolExecutor() as executor:
            for ret_tuple in executor.map(find_opt_parameters, params):
                key, global_config, env_config, num_evaluations = ret_tuple
                pbar.update()
                opt_parameters[key] = (global_config, env_config, num_evaluations)
    else:
        for param in params:
            key, global_config, env_config, num_evaluations = find_opt_parameters(param)
    """
    key = (n_PE, spm_size, rf_size)
    opt_parameters[key], num_evaluations = find_opt_parameters(layer, n_PE, spm_size, rf_size, spm_energy, rf_energy)
    print(key, num_evaluations)
    """

    print(opt_parameters)
    pickle.dump(opt_parameters, open(config_file_name+".p", "wb"))
Beispiel #5
0
def get_sample_layer():
    env = expr_parameters.Environment()
    params = { # C(M,N) = A(M, K) . B(K, N)
        "name": "gemm_example",
        "M": 1000,
        "K": 1000,
        "N": 1000,
    }
    # Tiling factors (DRAM, SPM, RF, SPATIAL):  ((250, 2, 20), (2, 1, 1), (1, 4, 50), (2, 125, 1))
    # Ordering (DRAM, SPM): (['Y', 'K', 'X'], ['Y', 'X', 'K'])

    layer = GemmLayer(env, **params)
    layer.set_tiling("X", [250, 2, 1, 2])
    layer.set_tiling("Y", [2, 1, 4, 125])
    layer.set_tiling("K", [20, 1, 50, 1])
    return layer
Beispiel #6
0
def run_dse(args):
    config_file_name = args.config_file_name + ".p"
    global_configs = pickle.load(open(config_file_name, "rb"))

    env = expr_parameters.Environment()
    env.pe_pipeline_stages = 1
    layer_args = layer_info.resnet_parameters[args.layer_index]
    layer = ConvLayer(env, **layer_args)

    columns = ("spm_size", "rf_size", "n_PE", "edp", "total_evaluated",
        "start_time", "end_time")
    final_df = pd.DataFrame(columns=columns)
    records = []

    print("evaluating {} design points".format(len(global_configs)))
    i = 1
    for key, (global_config, env_config, num_evaluations) in global_configs.items():
        n_PE, spm_size, rf_size = key
        if global_config == None:
            min_edp = -1
            start_time, end_time = None, None
        else:
            start_time = datetime.datetime.now()
            result = optimizer.optimize(layer, global_config, env_config)
            min_edp = result["min_edp"]
            end_time = datetime.datetime.now()

        records.append((spm_size, rf_size, n_PE, min_edp, num_evaluations, start_time, end_time))
        """
        final_df.loc[len(final_df)] = [spm_size, rf_size, n_PE, min_edp,
            num_evaluations, start_time, end_time]
        """

        print("{}/{}, evaluated: {}\n".format(i, len(global_configs), num_evaluations))
        i += 1

    final_df = pd.DataFrame.from_records(records, columns=columns)
    final_df = final_df.astype({
        "spm_size": int,
        "rf_size": int,
        "n_PE": int,
        "total_evaluated": int,
    })
    pickle.dump(final_df, open("dse_output"+config_file_name+".p", "wb"))

    file_name = "dse_output_layer_{}.xlsx".format(config_file_name)
    final_df.to_excel(file_name)
Beispiel #7
0
def get_sample_layer():
    env = expr_parameters.Environment()
    args = {
        "name": "test_conv2d12_from_resnet18_v1",
        "channels": 256,
        "kernel_size": "[3, 3]",
        "padding": "[1, 1]",
        "strides": "[1, 1]",
        "output_shape": [1, 256, 14, 14],
        "input_shape": [1, 256, 14, 14],
        "batch_size": 4,
    }
    layer = ConvLayer(env, batch=4, **args)
    layer.set_tiling("N", [4, 1, 1, 1])
    layer.set_tiling("M", [16, 1, 8, 2])
    layer.set_tiling("C", [8, 16, 1, 2])
    layer.set_tiling("Ox", [1, 1, 2, 7])
    layer.set_tiling("Oy", [1, 1, 2, 7])
    layer.set_tiling("Fx", [1, 1, 3, 1])
    layer.set_tiling("Fy", [1, 1, 3, 1])
    return layer
Beispiel #8
0
def main():
    args = parse_arguments()
    layer_arguments = layer_info.resnet_parameters[int(args.layer)]
    env = expr_parameters.Environment()
    env.pe_pipeline_stages = 1
    layer = ConvLayer(env=env, **layer_arguments)
    print("compiling layer", args.layer)

    params = expr_parameters.ExprParameters(env)

    if args.opt_utilization:
        params.PE_UTILIZATION = 0.8
        params.RF_UTILIZATION = 0.8
        params.SPM_UTILIZATION = 0.5

    params.PRUNE_NO_FEATURE_DRAM = True if args.opt_no_feature_dram else False
    params.PRUNE_NO_REDUCTION = True if args.opt_no_spatial_reduction else False

    start_time = datetime.datetime.now()
    result = optimizer.optimize(layer, params)
    end_time = datetime.datetime.now()

    delta = end_time - start_time
    num_evaluations = optimizer.get_num_evaluations(layer, params)
    print("Optimized energy-delay product (EDP): %.4E" %  result["min_edp"])
    print("Execution method for optimized EDP: \nTiling factors (DRAM, SPM, RF, SPATIAL): ", result["min_edp_seq"])
    print("Ordering (DRAM, SPM):", result["min_edp_ordering"])
    print("Minimized energy: %.4E" % result["min_energy"])
    print("Execution method for minimized energy: \nTiling factors (DRAM, SPM, RF, SPATIAL): ", result["min_energy_seq"])
    print("Ordering (DRAM, SPM):", result["min_energy_ordering"])
    print("Minimized execution cycles: %.4E" % result["min_cycle"])
    print("Execution method for minimized execution cycles: \nTiling factors (DRAM, SPM, RF, SPATIAL): ", result["min_cycle_seq"])
    print("Ordering (DRAM, SPM):", result["min_cycle_ordering"])
    print("Execution methods evaluated:", num_evaluations)
    print("Time spent in exploration: {} seconds".format(delta.total_seconds()))

    return
Beispiel #9
0
def run_thread(params):
    layer_index, tc_list, spatial_factor, env_config = params

    if env_config != None:
        env = expr_parameters.Environment(
            rf_energy=env_config["rf_energy"],
            spm_energy=env_config["spm_energy"],
        )
    else:
        env = expr_parameters.Environment()

    env.pe_pipeline_stages = 1
    layer_args = layer_info.resnet_parameters[layer_index]
    layer = ConvLayer(env, **layer_args)
    stride = layer.strides

    tc_list_after_spatial = [
        int(x / y) for x, y in zip(tc_list, spatial_factor)
    ]
    tc_list_factors_spatial = [factors(tc) for tc in tc_list_after_spatial]

    min_energy = float("inf")
    min_energy_sequence = None
    min_edp = float("inf")
    min_edp_sequence = None

    evaluated = 0

    for rf_factor in of_bucket(tc_list_factors_spatial):
        if not valid_rf(stride, rf_factor):
            continue

        tc_list_after_rf = [
            int(x / y) for x, y in zip(tc_list_after_spatial, rf_factor)
        ]
        tc_list_factors_rf = [factors(tc) for tc in tc_list_after_rf]

        for spm_factor in of_bucket(tc_list_factors_rf):
            spatial_rf_factor = (x * y
                                 for x, y in zip(spatial_factor, rf_factor))
            if not valid_spm(stride, spatial_rf_factor, spm_factor):
                continue

            tc_list_after_spm = tuple(
                [int(x / y) for x, y in zip(tc_list_after_rf, spm_factor)])
            dram_factor = tc_list_after_spm
            if not valid_dram(dram_factor):
                continue
            #assert tc_list == [ x*y*z*w for x,y,z,w in zip(spatial_factor, rf_factor, spm_factor, dram_factor)]

            evaluated += 1

            for idx in range(len(IV_ORDER)):
                tiling_factor = [
                    dram_factor[idx], spm_factor[idx], rf_factor[idx],
                    spatial_factor[idx]
                ]
                layer.set_tiling(IV_ORDER[idx], tiling_factor)

            edp, energy, cycle = layer.get_min_edp_energy_cycle()
            if edp < min_edp:
                min_edp = edp
                min_edp_sequence = (dram_factor, spm_factor, rf_factor,
                                    spatial_factor)

            if energy < min_energy:
                min_energy = energy
                min_energy_sequence = (dram_factor, spm_factor, rf_factor,
                                       spatial_factor)
    """
    file_name = "_".join([ str(i) for i in spatial_factor ])
    file_name = str(layer_index) + "_" + file_name
    output_file = "out/" + expr_name + "/" + file_name + ".txt"
    with open(output_file, "w") as outFile:
        line = ",".join([ str(item) for item in [min_edp, min_edp_sequence, min_energy, min_energy_sequence] ])
        outFile.write(line)
    """

    return min_edp, min_edp_sequence, min_energy, min_energy_sequence, evaluated
Beispiel #10
0
def get_joined_df(csv_dir, ref_files, our_test_dir):

    ref_dfs = []
    for ref_file_name in ref_files:
        df = pd.read_csv(csv_dir + "/" + ref_file_name)
        ref_dfs.append(df)

    for i, ref_df in enumerate(ref_dfs):
        print("processing", ref_files[i])
        layer_arguments = layer_info.resnet_parameters[layers[i]]
        env = expr_parameters.Environment()
        env.pe_pipeline_stages = 1
        layer = ConvLayer(env, **layer_arguments)

        loop_IVs = ["N", "M", "C", "Ox", "Oy", "Fx", "Fy"]

        ref_df["energy_from_dMazeRunner"] = np.nan
        ref_df["cycle_from_dMazeRunner"] = np.nan
        ref_df["edp_from_dMazeRunner"] = np.nan
        ref_df["energy_diff_percent"] = np.nan
        ref_df["layer"] = layers[i]

        ref_df.drop(ref_df[ref_df["Verify Tiling"] != True].index,
                    inplace=True)

        for index, row in ref_df.iterrows():
            if not row["Verify Tiling"]:
                continue

            #set tiling
            dram_tiling = ast.literal_eval(row["DRAM_tiling"])
            spm_tiling = ast.literal_eval(row["SPM_tiling"])
            rf_tiling = ast.literal_eval(row["RF_tiling"])
            spatial_tiling = ast.literal_eval(row["Spatial_tiling"])
            for idx in range(len(loop_IVs)):
                tiling_factor = [
                    dram_tiling[idx], spm_tiling[idx], rf_tiling[idx],
                    spatial_tiling[idx]
                ]
                layer.set_tiling(loop_IVs[idx], tiling_factor)

            #find key from ordering
            spm_ordering = ast.literal_eval(row["SPM_schedule"])
            dram_ordering = ast.literal_eval(row["DRAM_schedule"])

            spm_ordering = tuple([iv.title() for iv in spm_ordering])
            dram_ordering = tuple([iv.title() for iv in dram_ordering])

            spm_reuse_factor = layer.determine_data_reuse(
                "SPM", user_ordering=spm_ordering)[0]
            dram_reuse_factor = layer.determine_data_reuse(
                "DRAM", user_ordering=dram_ordering)[0]

            spm_ordering = layer.get_ordering_from_reuse_factor(
                spm_reuse_factor, "SPM")
            dram_ordering = layer.get_ordering_from_reuse_factor(
                dram_reuse_factor, "DRAM")
            key = (dram_ordering, spm_ordering)

            cycles_of_all_orderings = layer.get_Cycles_One_Layer()
            energy_of_all_orderings = layer.get_Energy_One_Layer()

            ##### use the set of ordering that minimizes energy
            energy, dram_ordering, spm_ordering = layer.get_min_energy()
            key = (dram_ordering, spm_ordering)
            #####

            cycle, energy = cycles_of_all_orderings[
                key], energy_of_all_orderings[key]
            ref_df.at[index, "energy_from_dMazeRunner"] = energy
            ref_df.at[index, "cycle_from_dMazeRunner"] = cycle
            ref_df.at[index, "edp_from_dMazeRunner"] = energy * cycle

            # energy distribution
            energy_distributions_of_all_orderings = layer.get_Energy_Distribution(
            )
            energy_MAC, energy_RF, energy_NOC, energy_SPM, energy_DRAM = energy_distributions_of_all_orderings[
                key]
            ref_df.at[index, "energy_MAC_dMazeRunner"] = energy_MAC
            ref_df.at[index, "energy_RF_dMazeRunner"] = energy_RF
            ref_df.at[index, "energy_NOC_dMazeRunner"] = energy_NOC
            ref_df.at[index, "energy_SPM_dMazeRunner"] = energy_SPM
            ref_df.at[index, "energy_DRAM_dMazeRunner"] = energy_DRAM

            energy_from_ref = row["Energy"]
            ref_df.at[index, "energy_diff_percent"] = 100 * \
                abs(energy-energy_from_ref)/energy_from_ref

            stride = layer.strides
            n_banks, size_in_bytes = get_spm_bank_and_size(
                stride, spatial_tiling, rf_tiling, spm_tiling)
            ref_df.at[index, "spm_banks_yang_et_al"] = n_banks
            ref_df.at[index, "spm_size_in_bytes_yang_et_al"] = size_in_bytes

    final_df = pd.DataFrame()

    for layer_index, ref_df in enumerate(ref_dfs):

        def change_dataflow_name(old_name):
            name = old_name.replace("IC", "C")
            name = name.replace("OC", "M")
            name = name.replace("ON", "N")
            tokens = name.strip().split("_")
            if len(tokens) not in [2, 4]:
                return None
            if len(tokens) == 4:
                tokens = tokens[:2]
            x, y = tokens
            return (y.title() + " | " + x.title())

        def parse_config(old_name):
            tokens = old_name.strip().split("_")
            if len(tokens) == 4:
                return tokens[2] + "_" + tokens[3]
            else:
                return None

        columns = list(ref_df.columns.values)
        columns.remove("layer")
        new_column_order = ["layer"] + columns
        ref_df = ref_df[new_column_order]

        ref_df["dataflow_str"] = ref_df["Data_flow_mechanism"].apply(
            change_dataflow_name)
        ref_df["dataflow_config"] = ref_df["Data_flow_mechanism"].apply(
            parse_config)

        final_df = pd.concat([final_df, ref_df])

    final_df = final_df.rename(index=str,
                               columns={
                                   "dataflow_str": "dataflow",
                                   "Energy": "energy_theirs",
                                   "energy_from_dMazeRunner":
                                   "energy_dMazeRunner",
                                   "energy_best": "energy_optimal",
                                   "cycle_from_dMazeRunner":
                                   "cycle_dMazeRunner",
                                   "edp_from_dMazeRunner": "edp_dMazeRunner",
                                   "energy_diff_percent": "energy_diff_%",
                               })

    final_df["energy_RF_diff_%"] = (final_df["Energy_RF"] -
                                    final_df["energy_RF_dMazeRunner"]
                                    ) / final_df["energy_RF_dMazeRunner"] * 100
    final_df["energy_NOC_diff_%"] = (
        final_df["Energy_NoC"] - final_df["energy_NOC_dMazeRunner"]
    ) / final_df["energy_NOC_dMazeRunner"] * 100
    final_df["energy_SPM_diff_%"] = (
        final_df["Energy_SPM"] - final_df["energy_SPM_dMazeRunner"]
    ) / final_df["energy_SPM_dMazeRunner"] * 100
    final_df["energy_DRAM_diff_%"] = (
        final_df["Energy_DRAM"] - final_df["energy_DRAM_dMazeRunner"]
    ) / final_df["energy_DRAM_dMazeRunner"] * 100

    return final_df
Beispiel #11
0
def main():
    args = parse_arguments()

    if args.list_models:
        print_summary.print_supported_models(args)
        exit()

    try:
        download_func = eval("download_block_from_" + args.frontend)
        model_layers = download_func(args)

    except Exception as ex:
        print("Failed to download model", args.model)
        print("Error:", ex)
        exit()

    print("\n"+"="*40+"\n")

    if args.list_layers:
        for i, layer in enumerate(model_layers):
            if type(layer) == dict:
                layer_name = layer["name"]
                layer_type = "Special_Function"
            else:
                layer_name = layer.name
                layer_type = str(type(layer)).split(".")[-1][:-2]
            print("{}: {} ({})".format(i, layer_name, layer_type))
        exit()

    if args.arch_spec:
        with open(args.arch_spec) as jsonFile:
            # Parameters used by analytical model of dataflow execution
            json_data = json.load(jsonFile)
            env_params = json_data["arch_details"]
            env = expr_parameters.Environment(**env_params)
            # Params used by map-space generator and optimizer
            expr_params = json_data["arch_basic"]
            params = expr_parameters.ExprParameters(**expr_params)
    else:
        env = expr_parameters.Environment()
        params = expr_parameters.ExprParameters(env)

    params.PRUNE_NO_FEATURE_DRAM = True if args.opt_no_feature_dram else False
    params.PRUNE_NO_REDUCTION = True if args.opt_no_spatial_reduction else False
    params.MIN_EXEC_METHODS = int(args.min_exec_methods) if int(args.min_exec_methods) > 1 else 1

    if args.auto_optimize:
        params.PE_UTILIZATION = 0.8
        params.RF_UTILIZATION = 0.8
        params.SPM_UTILIZATION = 0.5
        params.PRUNE_NO_FEATURE_DRAM = True
        params.PRUNE_NO_REDUCTION = True
        params.MIN_EXEC_METHODS = 100

    if args.min_resource_utilization:
        threshold_resource_util = args.min_resource_utilization
        params.PE_UTILIZATION = threshold_resource_util[0]
        params.RF_UTILIZATION = threshold_resource_util[1]
        params.SPM_UTILIZATION = threshold_resource_util[2]

    try:
        layers = [model_layers[int(args.layer)]]
        layer = layers[0]
        if type(layer) not in [ConvLayer, GemmLayer]:
            try:
                layer_type = layer.name
            except:
                layer_type = layer["name"]
                print("The requeted layer ({}) is not yet supported for analyzing execution on PE-array of dataflow accelerator.".format(layer_type))
                print("To list all the layers of a model, please use option --list-layers.")
                exit()
    except:
        if (args.layer) and int(args.layer) >= len(model_layers):
            print("Specified model does not feature any layer numbered as specified index.")
            print("To list all the layers of a model, please use option --list-layers.")
            exit()
        elif not (args.layer):
            print("Evaluating entire model for optimized dataflow acceleration.")
            layers = model_layers
        else:
            exit()

    total_energy = 0
    total_cycles = 0
    total_edp = 0
    total_evaluations = 0
    total_time = datetime.timedelta(0)

    for i, layer in enumerate(layers):
        if type(layer) not in [ConvLayer, GemmLayer]:
            continue
        layer.env = env
        layer_type = str(type(layer)).split(".")[-1][:-2]
        print("\n")
        print("compiling layer {}: {} ({})".format(i, layer.name, layer_type))
        print(layer)

        result = None
        skip_layer = False
        start_time = datetime.datetime.now()

        result = optimizer.optimize(layer, params)
        num_evaluations = optimizer.get_num_evaluations(layer, params)

        # Adaptive search for finding efficient execution methods
        while (result == None) or (num_evaluations < params.MIN_EXEC_METHODS):
            if args.auto_optimize:
                adjust_opts_success = adjust_optimization_strategies(params)
                if adjust_opts_success == False:
                    print("Optimizer did not find any execution method to evaluate. Please try with another application model and/or target architecture instead.")
                    skip_layer = True
                    break
                result = optimizer.optimize(layer, params)
                num_evaluations = optimizer.get_num_evaluations(layer, params)
            else:
                print("Optimizer did not find any execution method to evaluate. Please try some different optimization strategy, e.g., apply smaller pruning factors, or try auto-optimizer.")
                skip_layer = True
                break

        if skip_layer == True:
            continue

        end_time = datetime.datetime.now()

        delta = end_time - start_time
        num_evaluations = optimizer.get_num_evaluations(layer, params)
        print("Optimized energy-delay product (EDP): %.4E" %  result["min_edp"])
        print("Execution method for optimized EDP: \nTiling factors (DRAM, SPM, RF, SPATIAL): ", result["min_edp_seq"])
        print("Ordering (DRAM, SPM):", result["min_edp_ordering"])
        print("Minimized energy: %.4E" % result["min_energy"])
        print("Execution method for minimized energy: \nTiling factors (DRAM, SPM, RF, SPATIAL): ", result["min_energy_seq"])
        print("Ordering (DRAM, SPM):", result["min_energy_ordering"])
        print("Minimized execution cycles: %.4E" % result["min_cycle"])
        print("Execution method for minimized execution cycles: \nTiling factors (DRAM, SPM, RF, SPATIAL): ", result["min_cycle_seq"])
        print("Ordering (DRAM, SPM):", result["min_cycle_ordering"])
        print("Execution methods evaluated:", num_evaluations)
        print("Time spent in exploration: {} seconds".format(delta.total_seconds()))

        total_edp += result["min_edp"]
        total_energy += result["min_energy"]
        total_cycles += result["min_cycle"]
        total_evaluations += num_evaluations
        total_time += delta

    if len(layers) > 1:
        print("\n")
        print("="*20, "Summary", "="*20)
        print("Optimized Total EDP: %.4E" %  total_edp)
        print("Optimized Total Energy: %.4E" %  total_energy)
        print("Optimized Total Exeuction Cycles: %.4E" %  total_cycles)
        print("Total execution methods evaluated: {}".format(total_evaluations))
        print("Search space exploration done in %.2f" % (total_time.total_seconds())  + " seconds.")

    return