def single_server_df(arr1: ArrivalDistribution, ser1: ConstantRate,
                     opt_method: OptMethod,
                     perform_param_list: PerformParamList) -> pd.DataFrame:
    """Compute output bound for T in T_list and write into dataframe
    Args:
        arr1: Arrival object
        ser1: Service object
        opt_method: method name as string, GS or PS
        perform_param_list: list of performance parameter values

    Returns:
        dataframe
    """

    bound = [0.0] * len(perform_param_list.values_list)
    new_bound = [0.0] * len(perform_param_list.values_list)

    for _i in range(len(perform_param_list.values_list)):
        setting = SingleServerPerform(
            arr=arr1,
            const_rate=ser1,
            perform_param=perform_param_list.get_parameter_at_i(_i))

        if opt_method == OptMethod.GRID_SEARCH:
            bound[_i] = Optimize(setting=setting).grid_search(bound_list=[
                (0.1, 4.0)
            ],
                                                              delta=0.1)
            new_bound[_i] = OptimizeNew(setting_new=setting,
                                        new=True).grid_search(bound_list=[
                                            (0.1, 4.0), (0.9, 8.0)
                                        ],
                                                              delta=0.05)

        elif opt_method == OptMethod.PATTERN_SEARCH:
            bound[_i] = Optimize(setting=setting).pattern_search(
                start_list=[0.5], delta=3.0, delta_min=0.01)
            new_bound[_i] = OptimizeNew(setting_new=setting,
                                        new=True).pattern_search(
                                            start_list=[0.5, 2.0],
                                            delta=3.0,
                                            delta_min=0.01)
        else:
            raise NameError(
                "Optimization parameter {0} is infeasible".format(opt_method))

    delay_bounds_df = pd.DataFrame({
        "bound": bound,
        "new_bound": new_bound
    },
                                   index=perform_param_list.values_list)
    delay_bounds_df = delay_bounds_df[["bound", "new_bound"]]

    return delay_bounds_df
예제 #2
0
def compare_optimization(setting: SettingNew,
                         opt_methods: List[OptMethod],
                         number_l=1) -> List[float]:
    """Measures time for different optimizations"""
    new = True
    print_x = False

    list_of_bounds: List[float] = []
    list_of_times: List[float] = []
    list_of_approaches: List[str] = []

    for opt in opt_methods:
        start = timer()
        if opt == OptMethod.GRID_SEARCH:
            theta_bounds = [(0.1, 4.0)]

            bound_list = theta_bounds[:]
            for _i in range(1, number_l + 1):
                bound_list.append((0.9, 4.0))

            bound = OptimizeNew(setting_new=setting, new=new,
                                print_x=print_x).grid_search(
                                    bound_list=bound_list, delta=0.1)

        elif opt == OptMethod.PATTERN_SEARCH:
            theta_start = 0.5

            start_list = [theta_start] + [1.0] * number_l

            bound = OptimizeNew(setting_new=setting, new=new,
                                print_x=print_x).pattern_search(
                                    start_list=start_list,
                                    delta=3.0,
                                    delta_min=0.01)

        elif opt == OptMethod.NELDER_MEAD:
            theta_start = 0.5

            start_list = [theta_start] + [1.0] * number_l
            start_simplex = InitialSimplex(parameters_to_optimize=number_l +
                                           1).gao_han(start_list=start_list)

            bound = OptimizeNew(setting_new=setting, new=new,
                                print_x=print_x).nelder_mead(
                                    simplex=start_simplex, sd_min=10**(-2))

        elif opt == OptMethod.BASIN_HOPPING:
            theta_start = 0.5

            start_list = [theta_start] + [1.0] * number_l

            bound = OptimizeNew(
                setting_new=setting, new=new,
                print_x=print_x).basin_hopping(start_list=start_list)

        elif opt == OptMethod.SIMULATED_ANNEALING:
            simul_anneal_param = SimAnnealParams()
            theta_start = 0.5

            start_list = [theta_start] + [1.0] * number_l

            bound = OptimizeNew(setting_new=setting, new=new,
                                print_x=print_x).sim_annealing(
                                    start_list=start_list,
                                    sim_anneal_params=simul_anneal_param)

        elif opt == OptMethod.DIFFERENTIAL_EVOLUTION:
            theta_bounds = [(0.1, 4.0)]

            bound_list = theta_bounds[:]
            for _i in range(1, number_l + 1):
                bound_list.append((0.9, 4.0))

            bound = OptimizeNew(
                setting_new=setting, new=new,
                print_x=print_x).diff_evolution(bound_list=bound_list)

        elif opt == OptMethod.BFGS:
            theta_start = 0.5

            start_list = [theta_start] + [1.0] * number_l

            bound = OptimizeNew(setting_new=setting, new=new,
                                print_x=print_x).bfgs(start_list=start_list)

        elif opt == OptMethod.GS_OLD:
            theta_bounds = [(0.1, 4.0)]

            bound_list = theta_bounds[:]
            for _i in range(1, number_l + 1):
                bound_list.append((0.9, 4.0))

            bound = OptimizeNew(setting_new=setting, new=new,
                                print_x=print_x).grid_search_old(
                                    bound_list=bound_list, delta=0.1)

        elif opt == OptMethod.NM_OLD:
            nelder_mead_param = NelderMeadParameters()
            theta_start = 0.5

            start_list = [theta_start] + [1.0] * number_l
            start_simplex = InitialSimplex(parameters_to_optimize=number_l +
                                           1).gao_han(start_list=start_list)

            bound = OptimizeNew(setting_new=setting, new=new,
                                print_x=print_x).nelder_mead_old(
                                    simplex=start_simplex,
                                    nelder_mead_param=nelder_mead_param,
                                    sd_min=10**(-2))

        else:
            raise NameError("Optimization parameter {0} is infeasible".format(
                opt.name))

        stop = timer()
        list_of_bounds.append(bound)
        list_of_times.append(stop - start)
        list_of_approaches.append(opt.name)

    print("list_of_approaches: ", list_of_approaches)
    print("list_of_times: ", list_of_times)
    print("list_of_bounds: ")
    return list_of_bounds
예제 #3
0
def csv_single_param_exp_lower(start_time: int,
                               perform_param: PerformParameter,
                               mc_dist: MonteCarloDist,
                               sample=False) -> dict:
    total_iterations = 10**2
    valid_iterations = total_iterations
    metric = "relative"
    sample_size = 10**3

    delta = 0.05

    size_array = [total_iterations, 2]
    # [rows, columns]

    if mc_dist.mc_enum == MCEnum.UNIFORM:
        param_array = np.random.uniform(
            low=0, high=mc_dist.param_list[0], size=size_array)
    elif mc_dist.mc_enum == MCEnum.EXPONENTIAL:
        param_array = np.random.exponential(
            scale=mc_dist.param_list[0], size=size_array)
    else:
        raise NameError("Distribution parameter {0} is infeasible".format(
            mc_dist.mc_enum))

    res_array = np.empty([total_iterations, 3])
    if sample:
        res_array_sample = np.empty([total_iterations, 3])

    for i in tqdm(range(total_iterations)):
        setting = SingleServerPerform(
            arr=DM1(lamb=param_array[i, 0]),
            const_rate=ConstantRate(rate=param_array[i, 1]),
            perform_param=perform_param)

        theta_bounds = [(0.1, 4.0)]
        bound_array = theta_bounds[:]

        res_array[i, 0] = Optimize(setting=setting).grid_search(
            bound_list=bound_array, delta=delta)

        bound_array_power = theta_bounds[:]
        bound_array_power.append((0.9, 4.0))

        res_array[i, 1] = OptimizeNew(setting_new=setting).grid_search(
            bound_list=bound_array_power, delta=delta)

        if perform_param.perform_metric == PerformEnum.DELAY_PROB:
            res_array[i, 2] = delay_prob_lower_exp_dm1_opt(
                t=start_time,
                delay=perform_param.value,
                lamb=param_array[i, 0],
                rate=param_array[i, 1])

            if sample:
                res_array_sample[i, 0] = res_array[i, 0]
                res_array_sample[i, 1] = res_array[i, 1]
                res_array_sample[i, 2] = delay_prob_sample_exp_dm1_opt(
                    t=start_time,
                    delay=perform_param.value,
                    lamb=param_array[i, 0],
                    rate=param_array[i, 1],
                    sample_size=sample_size)

            if res_array[i, 0] > 1.0:
                res_array[i, ] = nan
                if sample:
                    res_array_sample[i, ] = nan

        elif perform_param.perform_metric == PerformEnum.OUTPUT:
            res_array[i, 2] = output_lower_exp_dm1_opt(
                s=start_time,
                delta_time=perform_param.value,
                lamb=param_array[i, 0],
                rate=param_array[i, 1])

        else:
            raise NameError("{0} is an infeasible performance metric".format(
                perform_param.perform_metric))

        if (res_array[i, 1] == inf or res_array[i, 2] == inf
                or res_array[i, 0] == nan or res_array[i, 1] == nan
                or res_array[i, 2] == nan):
            res_array[i, ] = nan
            res_array_sample[i, ] = nan
            valid_iterations -= 1

    # print("exponential results", res_array[:, 2])

    res_dict = three_col_array_to_results(
        arrival_enum=ArrivalEnum.DM1,
        res_array=res_array,
        valid_iterations=valid_iterations,
        metric=metric)

    res_dict.update({
        "iterations": total_iterations,
        "delta_time": perform_param.value,
        "optimization": "grid_search",
        "metric": "relative",
        "MCDistribution": mc_dist.to_name(),
        "MCParam": mc_dist.param_to_string()
    })

    res_dict_sample = three_col_array_to_results(
        arrival_enum=ArrivalEnum.DM1,
        res_array=res_array_sample,
        valid_iterations=valid_iterations,
        metric=metric)

    res_dict_sample.update({
        "iterations": total_iterations,
        "delta_time": perform_param.value,
        "optimization": "grid_search",
        "metric": "relative",
        "MCDistribution": mc_dist.to_name(),
        "MCParam": mc_dist.param_to_string()
    })

    with open(
            "lower_single_{0}_DM1_results_MC{1}_power_exp.csv".format(
                perform_param.to_name(), mc_dist.to_name()), 'w') as csv_file:
        writer = csv.writer(fileobj=csv_file)
        for key, value in res_dict.items():
            writer.writerow([key, value])
    if sample:
        with open(
                "sample_single_{0}_DM1_results_MC{1}_power_exp.csv".format(
                    perform_param.to_name(), mc_dist.to_name()),
                'w') as csv_file:
            writer = csv.writer(fileobj=csv_file)
            for key, value in res_dict_sample.items():
                writer.writerow([key, value])

    return res_dict
예제 #4
0
def fat_cross_df(arr_list: List[ArrivalDistribution],
                 ser_list: List[ConstantRate], opt_method: OptMethod,
                 perform_param_list: PerformParamList) -> pd.DataFrame:
    """Compute delay bound for T in T_list and write into dataframe.

    Args:
        arr_list: Arrival object list
        ser_list: Service object list
        opt_method: method to_name as string, PS or GS
        perform_param_list: list of performance parameter values

    Returns:
        dataframe

    """
    bound = [0.0] * len(perform_param_list.values_list)
    new_bound = [0.0] * len(perform_param_list.values_list)

    for _i in range(len(perform_param_list.values_list)):
        perform_param = perform_param_list.get_parameter_at_i(_i)
        setting = FatCrossPerform(arr_list=arr_list,
                                  ser_list=ser_list,
                                  perform_param=perform_param)

        if opt_method == OptMethod.GRID_SEARCH:
            bound[_i] = Optimize(setting=setting).grid_search(bound_list=[
                (0.1, 5.0)
            ],
                                                              delta=0.1)
            new_bound[_i] = OptimizeNew(setting_new=setting,
                                        new=True).grid_search(bound_list=[
                                            (0.1, 5.0), (0.9, 6.0)
                                        ],
                                                              delta=0.05)

        elif opt_method == OptMethod.PATTERN_SEARCH:
            bound[_i] = Optimize(setting=setting).pattern_search(
                start_list=[0.5], delta=3.0, delta_min=0.01)

            new_bound[_i] = OptimizeNew(setting_new=setting,
                                        new=True).pattern_search(
                                            start_list=[0.5, 2.0],
                                            delta=3.0,
                                            delta_min=0.01)

        elif opt_method == OptMethod.GS_OLD:
            bound[_i] = Optimize(setting=setting).grid_search_old(bound_list=[
                (0.1, 5.0)
            ],
                                                                  delta=0.1)
            new_bound[_i] = OptimizeNew(setting_new=setting,
                                        new=True).grid_search_old(bound_list=[
                                            (0.1, 5.0), (0.9, 6.0)
                                        ],
                                                                  delta=0.1)

        else:
            raise ValueError(
                "Optimization parameter {0} is infeasible".format(opt_method))

    results_df = pd.DataFrame({
        "bound": bound,
        "new_bound": new_bound
    },
                              index=perform_param_list.values_list)
    results_df = results_df[["bound", "new_bound"]]

    return results_df
예제 #5
0
def compute_overhead(setting: SettingNew,
                     opt_method: OptMethod,
                     number_l=1) -> tuple:
    """Compare computation times."""

    if opt_method == OptMethod.GRID_SEARCH:
        bound_array = [(0.1, 4.0)]

        start = timer()
        Optimize(setting=setting).grid_search(bound_list=bound_array,
                                              delta=0.1)
        stop = timer()
        time_standard = stop - start

        for _ in range(1, number_l + 1):
            bound_array.append((0.9, 4.0))

        start = timer()
        OptimizeNew(setting_new=setting).grid_search(bound_list=bound_array,
                                                     delta=0.1)
        stop = timer()
        time_lyapunov = stop - start

    elif opt_method == OptMethod.PATTERN_SEARCH:
        start_list = [0.5]

        start = timer()
        Optimize(setting=setting).pattern_search(start_list=start_list,
                                                 delta=3.0,
                                                 delta_min=0.01)
        stop = timer()
        time_standard = stop - start

        start_list = [0.5] + [1.0] * number_l

        start = timer()
        OptimizeNew(setting_new=setting).pattern_search(start_list=start_list,
                                                        delta=3.0,
                                                        delta_min=0.01)
        stop = timer()
        time_lyapunov = stop - start

    elif opt_method == OptMethod.NELDER_MEAD:
        start_simplex = InitialSimplex(parameters_to_optimize=1).uniform_dist(
            max_theta=1.0)

        start = timer()
        Optimize(setting=setting).nelder_mead(simplex=start_simplex,
                                              sd_min=10**(-2))
        stop = timer()
        time_standard = stop - start

        start_simplex_new = InitialSimplex(parameters_to_optimize=number_l +
                                           1).uniform_dist(max_theta=1.0,
                                                           max_l=2.0)

        start = timer()
        OptimizeNew(setting_new=setting).nelder_mead(simplex=start_simplex_new,
                                                     sd_min=10**(-2))
        stop = timer()
        time_lyapunov = stop - start

    else:
        raise NameError(
            f"Optimization parameter {opt_method.name} is infeasible")

    return time_standard, time_lyapunov
예제 #6
0
def compute_improvement(setting: SettingNew,
                        opt_method: OptMethod,
                        number_l=1,
                        print_x=False,
                        show_warn=False) -> tuple:
    """Compare standard_bound with the new Lyapunov bound."""

    if opt_method == OptMethod.GRID_SEARCH:
        theta_bounds = [(0.1, 4.0)]

        standard_bound = Optimize(setting=setting,
                                  print_x=print_x,
                                  show_warn=show_warn).grid_search(
                                      bound_list=theta_bounds, delta=0.1)

        bound_array = theta_bounds[:]
        for _i in range(1, number_l + 1):
            bound_array.append((0.9, 4.0))

        new_bound = OptimizeNew(setting_new=setting,
                                print_x=print_x,
                                show_warn=show_warn).grid_search(
                                    bound_list=bound_array, delta=0.1)

    elif opt_method == OptMethod.PATTERN_SEARCH:
        theta_start = 0.5

        start_list = [theta_start]

        standard_bound = Optimize(setting=setting,
                                  print_x=print_x,
                                  show_warn=show_warn).pattern_search(
                                      start_list=start_list,
                                      delta=3.0,
                                      delta_min=0.01)

        start_list_new = [theta_start] + [1.0] * number_l

        new_bound = OptimizeNew(setting_new=setting,
                                print_x=print_x,
                                show_warn=show_warn).pattern_search(
                                    start_list=start_list_new,
                                    delta=3.0,
                                    delta_min=0.01)

        # This part is there to overcome opt_method issues
        if new_bound > standard_bound:
            new_bound = standard_bound

    elif opt_method == OptMethod.NELDER_MEAD:
        theta_start = 0.5

        start_list = [theta_start]
        start_simplex = InitialSimplex(parameters_to_optimize=1).gao_han(
            start_list=start_list)

        standard_bound = Optimize(setting=setting,
                                  print_x=print_x,
                                  show_warn=show_warn).nelder_mead(
                                      simplex=start_simplex, sd_min=10**(-2))

        start_list_new = [theta_start] + [1.0] * number_l
        start_simplex_new = InitialSimplex(parameters_to_optimize=number_l +
                                           1).gao_han(
                                               start_list=start_list_new)

        new_bound = OptimizeNew(setting_new=setting,
                                print_x=print_x,
                                show_warn=show_warn).nelder_mead(
                                    simplex=start_simplex_new, sd_min=10**(-2))

        # This part is there to overcome opt_method issues
        if new_bound > standard_bound:
            new_bound = standard_bound

    elif opt_method == OptMethod.BASIN_HOPPING:
        theta_start = 0.5

        start_list = [theta_start]

        standard_bound = Optimize(
            setting=setting, print_x=print_x,
            show_warn=show_warn).basin_hopping(start_list=start_list)

        start_list_new = [theta_start] + [1.0] * number_l

        new_bound = OptimizeNew(
            setting_new=setting, print_x=print_x,
            show_warn=show_warn).basin_hopping(start_list=start_list_new)

        # This part is there to overcome opt_method issues
        if new_bound > standard_bound:
            new_bound = standard_bound

    elif opt_method == OptMethod.SIMULATED_ANNEALING:
        simul_anneal_param = SimAnnealParams()
        theta_start = 0.5

        start_list = [theta_start]

        standard_bound = Optimize(setting=setting,
                                  print_x=print_x,
                                  show_warn=show_warn).sim_annealing(
                                      start_list=start_list,
                                      sim_anneal_params=simul_anneal_param)

        start_list_new = [theta_start] + [1.0] * number_l

        new_bound = OptimizeNew(setting_new=setting,
                                print_x=print_x,
                                show_warn=show_warn).sim_annealing(
                                    start_list=start_list_new,
                                    sim_anneal_params=simul_anneal_param)

        # This part is there to overcome opt_method issues
        if new_bound > standard_bound:
            new_bound = standard_bound

    elif opt_method == OptMethod.DIFFERENTIAL_EVOLUTION:
        theta_bounds = [(0.1, 8.0)]

        standard_bound = Optimize(
            setting=setting,
            print_x=print_x).diff_evolution(bound_list=theta_bounds)

        bound_array = theta_bounds[:]
        for _i in range(1, number_l + 1):
            bound_array.append((0.9, 8.0))

        new_bound = OptimizeNew(
            setting_new=setting,
            print_x=print_x).diff_evolution(bound_list=bound_array)

    else:
        raise NameError(
            f"Optimization parameter {opt_method.name} is infeasible")

    # This part is there to overcome opt_method issues
    if new_bound > standard_bound:
        new_bound = standard_bound

    if standard_bound == 0 or new_bound == 0:
        standard_bound = nan
        new_bound = nan

    return standard_bound, new_bound
예제 #7
0
    SINGLE_SERVER = SingleServerPerform(arr=DM1(lamb=1.0),
                                        const_rate=ConstantRate(rate=10.0),
                                        perform_param=OUTPUT_TIME6)

    print(SINGLE_SERVER.bound(param_list=[0.1]))

    print(SINGLE_SERVER.new_bound(param_l_list=[0.1, 2.7]))

    print(
        Optimize(SINGLE_SERVER, print_x=True,
                 show_warn=True).grid_search(bound_list=[(0.1, 5.0)],
                                             delta=0.1))
    print(
        OptimizeNew(SINGLE_SERVER, print_x=True,
                    show_warn=True).grid_search(bound_list=[(0.1, 5.0),
                                                            (0.9, 8.0)],
                                                delta=0.1))
    print(
        OptimizeNew(SINGLE_SERVER, print_x=True,
                    show_warn=True).pattern_search(start_list=[0.5, 1.0],
                                                   delta=3,
                                                   delta_min=0.01))

    DELAY_PROB8 = PerformParameter(perform_metric=PerformEnum.DELAY_PROB,
                                   value=8)

    SINGLE_SERVER2 = SingleServerPerform(arr=MMOOFluid(mu=0.7,
                                                       lamb=0.4,
                                                       burst=1.2),
                                         const_rate=ConstantRate(rate=1.0),
                                         perform_param=DELAY_PROB8)