Esempio n. 1
0
def experiment_run(algo, dataset_from, store_to, tenants_used, tenants_to_use,
                   s_evaluator, s_tenant_strategy, s_dc_strategy):
    map_evaluator = {
        "naive": Evaluator_naive,
        "sum": Evaluator_sum,
        "detailed": Evaluator_detailed,
    }
    map_tenant = {
        "tenant_random": get_tenants_random,
        "tenant_heaviest": get_tenants_heaviest,
        "tenant_lightest": get_tenants_lightest,
    }
    map_dc = {
        "dc_random": get_dcs_random,
        "dc_utilized": get_dcs_utilized,
        "dc_emptiest": get_dcs_emptiest,
    }

    e = map_evaluator[s_evaluator]()

    dcs, tenants = from_directory(os.path.join("examples", dataset_from))
    tenants1, tenants2 = tenants[:tenants_used], tenants[tenants_used:]

    tenants_for_run = [x for x in tenants1 if x.mark is not None
                       ] + tenants2[:tenants_to_use]

    benchmarks, _ = main_algo(3, 1, dcs, tenants_for_run, e,
                              map_tenant[s_tenant_strategy],
                              map_dc[s_dc_strategy])

    path = "experiments/{}/{}/{}/{}/{}".format(algo, store_to, s_evaluator,
                                               s_tenant_strategy,
                                               s_dc_strategy)
    dump_experiment(benchmarks, os.path.join(path, "benchmark.json"))
    into_directory(dcs, tenants, os.path.join(path, "data"))
Esempio n. 2
0
def empty_experiment_run(algo, s_evaluator, s_tenant_strategy, s_dc_strategy):
    map_evaluator = {
        "naive": Evaluator_naive,
        "sum": Evaluator_sum,
        "detailed": Evaluator_detailed,
    }
    map_tenant = {
        "tenant_random": get_tenants_random,
        "tenant_heaviest": get_tenants_heaviest,
        "tenant_lightest": get_tenants_lightest,
    }
    map_dc = {
        "dc_random": get_dcs_random,
        "dc_utilized": get_dcs_utilized,
        "dc_emptiest": get_dcs_emptiest,
    }

    e = map_evaluator[s_evaluator]()

    dcs, tenants = from_directory("examples/dcs_empty_sorted/")
    tenants1, tenants2 = tenants[:800], tenants[800:]

    benchmarks, _ = main_algo(3, 1, dcs, tenants1, e,
                              map_tenant[s_tenant_strategy],
                              map_dc[s_dc_strategy])

    path = "experiments/{}/empty800/{}/{}/{}".format(algo, s_evaluator,
                                                     s_tenant_strategy,
                                                     s_dc_strategy)
    dump_experiment(benchmarks, os.path.join(path, "benchmark.json"))
    into_directory(dcs, tenants, os.path.join(path, "data"))
Esempio n. 3
0
def iterations_view_placed(directory):
    iterations = [int(x) for x in os.listdir(directory) if x.isdigit()]
    iterations = sorted(iterations)

    df = pd.DataFrame(columns=[str(i + 1) for i in range(8)])

    for iteration in iterations:
        dirname = os.path.join(directory, str(iteration))
        dcs, tenants = from_directory(dirname)
        benchmark = placement_tenants(tenants)
        elem = {k: v for k, v in benchmark["vals"]}
        df = df.append(elem, ignore_index=True)

    df_diff = df.diff()
    df_diff.iloc[0] = df.iloc[0]

    ax = df_diff.T.plot(kind='bar', stacked=True)
    ax.set_xlabel("Номер ЦОД")
    ax.set_ylabel("Размещено запросов")

    return df, ax
Esempio n. 4
0
def iterations_view_utilization(directory):
    iterations = [int(x) for x in os.listdir(directory) if x.isdigit()]
    iterations = sorted(iterations)

    data = []
    columns = [str(x + 1) for x in range(8)]

    for iteration in iterations:
        dirname = os.path.join(directory, str(iteration))
        dcs, tenants = from_directory(dirname)
        benchmark = utilization_dcs(dcs)
        data.append(benchmark["vals"])

    df = pd.DataFrame(data=data, columns=columns)

    df_diff = df.diff()
    df_diff.iloc[0] = df.iloc[0]

    ax = df_diff.T.plot(kind='bar', stacked=True)
    ax.set_xlabel("Номер ЦОД")
    ax.set_ylabel("Загрузка ресурсов")

    return df, ax
Esempio n. 5
0
def by_one_iterations_algo(directory):
    map_evaluator = {
        "naive": Evaluator_naive,
        "sum": Evaluator_sum,
        "detailed": Evaluator_detailed,
    }
    map_tenant = {
        "random": get_tenants_random,
        "heaviest": get_tenants_heaviest,
        "lightest": get_tenants_lightest,
    }
    map_dc = {
        "random": get_dcs_random,
        "utilized": get_dcs_utilized,
        "emptiest": get_dcs_emptiest,
    }

    filename_config = os.path.join(directory, "config.json")
    with open(filename_config, "r") as f:
        config = json.load(f)

    dcs_per_tenant = config["dcs_per_tenant"]
    e = map_evaluator[config["evaluator"]]()
    strategy_choose_tenant = map_tenant[config["tenant"]]
    strategy_choose_dc = map_dc[config["dc"]]

    filenames = os.listdir(directory)
    iterations = [int(x) for x in filenames if x.isdigit()]
    last_iteration = max(iterations)
    last_iteration_path = os.path.join(directory, str(last_iteration))

    dcs, tenants_all = from_directory(last_iteration_path)

    tenants_placed = [x for x in tenants_all if x.mark is not None]
    tenants_to_place = [x for x in tenants_all if x.mark is None][:67]
    tenants_in_process = tenants_placed + tenants_to_place

    for tenant in tenants_in_process:
        tenant.evaluation = e.get_tenant_evaluation(tenant)

    dict_dcs = {dc.name: dc for dc in dcs}
    dict_tenants = {tenant.name: tenant for tenant in tenants_in_process}

    all_timings = []
    all_sendings = []
    all_returns = []
    is_placed = []

    start_time = time.time()
    for i in range(len(tenants_to_place)):
        all_placed, timings, sendings, returns_count = algo_step(
            dcs_per_tenant, dcs, tenants_placed + [tenants_to_place[i]], e,
            strategy_choose_tenant, strategy_choose_dc, dict_dcs, dict_tenants)
        all_timings.append(timings)
        all_sendings.append(sendings)
        all_returns.append(returns_count)
        is_placed.append(all_placed)

    elapsed_time = time.time() - start_time

    returns_count = reduce(
        lambda x, y: {
            "yes": x["yes"] + y["yes"],
            "no": x["no"] + y["no"]
        }, all_returns, {
            'no': 0,
            'yes': 0
        })
    timings = np.array(all_timings).sum(axis=0).tolist()
    # sendings = np.array(all_sendings).T.tolist()[0]
    sendings = [[] for _ in range(8)]
    for iteration in all_sendings:
        for i, val in enumerate(iteration):
            sendings[i].extend(val)

    new_iteration_path = os.path.join(directory, str(last_iteration + 1))
    os.mkdir(new_iteration_path)
    into_directory(dcs, tenants_all, new_iteration_path)

    filename_timings = os.path.join(directory, "timings.csv")
    local_time = ",".join([str(x) if x is not None else "" for x in timings])
    line = "{},{},{}\n".format(last_iteration + 1, elapsed_time, local_time)
    with open(filename_timings, "a") as f:
        f.write(line)

    filename_returns = os.path.join(directory, "returns.csv")
    line = "{},{},{}\n".format(last_iteration + 1, returns_count["yes"],
                               returns_count["no"])
    with open(filename_returns, "a") as f:
        f.write(line)

    filename_sendings = os.path.join(directory, "sendings.csv")
    acceptings = []
    for sending, dc in zip(sendings, dcs):
        accepting = set(sending) - dc.names_rejected
        acceptings.append(list(accepting))

    line = [last_iteration + 1, sendings, acceptings]

    line = json.dumps(line) + "\n"

    with open(filename_sendings, "a") as f:
        f.write(line)
Esempio n. 6
0
def iterations_algo(directory):
    map_evaluator = {
        "naive": Evaluator_naive,
        "sum": Evaluator_sum,
        "detailed": Evaluator_detailed,
    }
    map_tenant = {
        "random": get_tenants_random,
        "heaviest": get_tenants_heaviest,
        "lightest": get_tenants_lightest,
    }
    map_dc = {
        "random": get_dcs_random,
        "utilized": get_dcs_utilized,
        "emptiest": get_dcs_emptiest,
    }

    filename_config = os.path.join(directory, "config.json")
    with open(filename_config, "r") as f:
        config = json.load(f)

    dcs_per_tenant = config["dcs_per_tenant"]
    e = map_evaluator[config["evaluator"]]()
    strategy_choose_tenant = map_tenant[config["tenant"]]
    strategy_choose_dc = map_dc[config["dc"]]

    filenames = os.listdir(directory)
    iterations = [int(x) for x in filenames if x.isdigit()]
    last_iteration = max(iterations)
    last_iteration_path = os.path.join(directory, str(last_iteration))

    dcs, tenants = from_directory(last_iteration_path)

    for tenant in tenants:
        tenant.evaluation = e.get_tenant_evaluation(tenant)

    dict_dcs = {dc.name: dc for dc in dcs}
    dict_tenants = {tenant.name: tenant for tenant in tenants}

    start_time = time.time()
    all_placed, timings, sendings, returns_count = algo_step(
        dcs_per_tenant, dcs, tenants, e, strategy_choose_tenant,
        strategy_choose_dc, dict_dcs, dict_tenants)
    elapsed_time = time.time() - start_time

    new_iteration_path = os.path.join(directory, str(last_iteration + 1))
    os.mkdir(new_iteration_path)
    into_directory(dcs, tenants, new_iteration_path)

    filename_timings = os.path.join(directory, "timings.csv")
    local_time = ",".join([str(x) if x is not None else "" for x in timings])
    line = "{},{},{}\n".format(last_iteration + 1, elapsed_time, local_time)
    with open(filename_timings, "a") as f:
        f.write(line)

    filename_returns = os.path.join(directory, "returns.csv")
    line = "{},{},{}\n".format(last_iteration + 1, returns_count["yes"],
                               returns_count["no"])
    with open(filename_returns, "a") as f:
        f.write(line)

    filename_sendings = os.path.join(directory, "sendings.csv")
    acceptings = []
    for sending, dc in zip(sendings, dcs):
        accepting = set(sending) - dc.names_rejected
        acceptings.append(list(accepting))

    line = [last_iteration + 1, sendings, acceptings]

    line = json.dumps(line) + "\n"

    with open(filename_sendings, "a") as f:
        f.write(line)
Esempio n. 7
0
from data_loader import from_directory, into_directory
from evaluators import Evaluator_sum, Evaluator_naive
from algo import main_algo

from strategies import get_tenants_random, get_dcs_random

from strategies import get_dcs_utilized, get_dcs_emptiest
from strategies import get_tenants_heaviest, get_tenants_lightest

from checking_results import utilization_dcs, placement_tenants

import time

if __name__ == '__main__':
    dcs, tenants = from_directory("examples/dcs_empty_sorted")

    e = Evaluator_sum()

    start_time = time.time()
    main_algo(3, 1, dcs, tenants, e, get_tenants_heaviest, get_dcs_utilized)
    elapsed_time = time.time() - start_time
    print("time elapsed", int(elapsed_time), "s")

    tp = placement_tenants(tenants)
    print("tenants: {}/{}".format(tp["placed"], tp["total"]))
    ud = utilization_dcs(dcs)
    print("utilization: \navg {}\nmax {}".format(ud["avg"], ud["max"]))

    into_directory(dcs, tenants, "examples/result_dcs_empty_sorted")