Esempio n. 1
0
def single_hop_comparison(
        aggregation: int, sigma_single: float, rho_single: float,
        service_rate: float, perform_param: PerformParameter,
        opt_method: OptMethod) -> (float, float, float, float, float):

    print_x = False
    constant_rate_server = ConstantRate(service_rate)

    tb_const = TokenBucketConstant(sigma_single=sigma_single,
                                   rho_single=rho_single,
                                   n=aggregation)

    dnc_fifo_single: float = fifo_delay(token_bucket_constant=tb_const,
                                        constant_rate=constant_rate_server)

    const_single = SingleServerPerform(arr=tb_const,
                                       const_rate=constant_rate_server,
                                       perform_param=perform_param)

    leaky_mass_1 = SingleServerPerform(arr=LeakyBucketMassOne(
        sigma_single=sigma_single, rho_single=rho_single, n=aggregation),
                                       const_rate=constant_rate_server,
                                       perform_param=perform_param)

    leaky_mass_2 = SingleServerPerform(arr=LeakyBucketMassTwo(
        sigma_single=sigma_single, rho_single=rho_single, n=aggregation),
                                       const_rate=constant_rate_server,
                                       perform_param=perform_param)

    exact_mass_2 = SingleServerPerform(arr=LeakyBucketMassTwoExact(
        sigma_single=sigma_single, rho_single=rho_single, n=aggregation),
                                       const_rate=constant_rate_server,
                                       perform_param=perform_param)

    bound_list = [(0.05, 15.0)]
    delta = 0.05

    if opt_method == OptMethod.GRID_SEARCH:
        const_opt = Optimize(setting=const_single,
                             print_x=print_x).grid_search(
                                 bound_list=bound_list, delta=delta)

        leaky_mass_1_opt = Optimize(setting=leaky_mass_1,
                                    print_x=print_x).grid_search(
                                        bound_list=bound_list, delta=delta)

        leaky_mass_2_opt = Optimize(setting=leaky_mass_2,
                                    print_x=print_x).grid_search(
                                        bound_list=bound_list, delta=delta)

        exact_mass_2_opt = Optimize(setting=exact_mass_2,
                                    print_x=print_x).grid_search(
                                        bound_list=bound_list, delta=delta)

    else:
        raise NameError("Optimization parameter {0} is infeasible".format(
            opt_method.name))

    return (dnc_fifo_single, const_opt, leaky_mass_1_opt, leaky_mass_2_opt,
            exact_mass_2_opt)
def tandem_compare(arr_list: List[ArrivalDistribution],
                   arr_list2: List[ArrivalDistribution],
                   ser_list: List[ConstantRate], opt_method: OptMethod,
                   perform_param: PerformParameter,
                   nc_analysis: NCAnalysis) -> tuple:

    if nc_analysis == NCAnalysis.SFA or nc_analysis == NCAnalysis.PMOO:
        setting = TandemSFA(
            arr_list=arr_list, ser_list=ser_list, perform_param=perform_param)
        setting2 = TandemSFA(
            arr_list=arr_list2, ser_list=ser_list, perform_param=perform_param)
    elif (nc_analysis == NCAnalysis.TFA
          and perform_param.perform_metric == PerformEnum.DELAY):
        setting = TandemTFADelay(
            arr_list=arr_list, ser_list=ser_list, prob_d=perform_param.value)
        setting2 = TandemTFADelay(
            arr_list=arr_list2, ser_list=ser_list, prob_d=perform_param.value)
    else:
        raise NameError(
            "{0} is an infeasible analysis type".format(nc_analysis))

    if opt_method == OptMethod.GRID_SEARCH:
        bound = Optimize(setting=setting).grid_search(
            bound_list=[(0.05, 15.0)], delta=0.05)
        bound2 = Optimize(setting=setting2).grid_search(
            bound_list=[(0.05, 15.0)], delta=0.05)
    else:
        raise NameError(
            "Optimization parameter {0} is infeasible".format(opt_method))

    return bound, bound2
def single_server_df(arr_list: List[ArrivalDistribution],
                     ser_list: List[ConstantRateServer], opt_method: OptMethod,
                     perform_param_list: PerformParamList) -> pd.DataFrame:
    """
    Compute output standard_bound for T in T_list and write into dataframe
    Args:
        arr_list: Arrival object list
        ser_list: Service object list
        opt_method: method name as string, GS or PS
        perform_param_list: list of performance parameter values

    Returns:
        dataframe
    """

    standard_bound = [0.0] * len(perform_param_list)
    new_bound = [0.0] * len(perform_param_list)

    for _i in range(len(perform_param_list)):
        setting = SingleServerMitPerform(
            arr_list=arr_list,
            server=ser_list[0],
            perform_param=perform_param_list.get_parameter_at_i(_i))

        if opt_method == OptMethod.GRID_SEARCH:
            standard_bound[_i] = Optimize(setting=setting,
                                          number_param=1).grid_search(
                                              grid_bounds=[(0.1, 4.0)],
                                              delta=0.1)
            new_bound[_i] = OptimizeMitigator(setting_h_mit=setting,
                                              number_param=2).grid_search(
                                                  grid_bounds=[(0.1, 4.0),
                                                               (0.9, 8.0)],
                                                  delta=0.05)

        elif opt_method == OptMethod.PATTERN_SEARCH:
            standard_bound[_i] = Optimize(setting=setting,
                                          number_param=1).pattern_search(
                                              start_list=[0.5],
                                              delta=3.0,
                                              delta_min=0.01)
            new_bound[_i] = OptimizeMitigator(setting_h_mit=setting,
                                              number_param=2).pattern_search(
                                                  start_list=[0.5, 2.0],
                                                  delta=3.0,
                                                  delta_min=0.01)
        else:
            raise NotImplementedError(
                f"Optimization parameter {opt_method} is infeasible")

    delay_bounds_df = pd.DataFrame(
        {
            "standard_bound": standard_bound,
            "h_mit_bound": new_bound
        },
        index=perform_param_list.values_list)

    return delay_bounds_df
def single_server_df(arr1: ArrivalDistribution, ser1: ConstantRate,
                     opt_method: OptMethod,
                     perform_param_list: PerformParamList) -> pd.DataFrame:
    """Compute output bound for T in T_list and write into dataframe
    Args:
        arr1: Arrival object
        ser1: Service object
        opt_method: method name as string, GS or PS
        perform_param_list: list of performance parameter values

    Returns:
        dataframe
    """

    bound = [0.0] * len(perform_param_list.values_list)
    new_bound = [0.0] * len(perform_param_list.values_list)

    for _i in range(len(perform_param_list.values_list)):
        setting = SingleServerPerform(
            arr=arr1,
            const_rate=ser1,
            perform_param=perform_param_list.get_parameter_at_i(_i))

        if opt_method == OptMethod.GRID_SEARCH:
            bound[_i] = Optimize(setting=setting).grid_search(bound_list=[
                (0.1, 4.0)
            ],
                                                              delta=0.1)
            new_bound[_i] = OptimizeNew(setting_new=setting,
                                        new=True).grid_search(bound_list=[
                                            (0.1, 4.0), (0.9, 8.0)
                                        ],
                                                              delta=0.05)

        elif opt_method == OptMethod.PATTERN_SEARCH:
            bound[_i] = Optimize(setting=setting).pattern_search(
                start_list=[0.5], delta=3.0, delta_min=0.01)
            new_bound[_i] = OptimizeNew(setting_new=setting,
                                        new=True).pattern_search(
                                            start_list=[0.5, 2.0],
                                            delta=3.0,
                                            delta_min=0.01)
        else:
            raise NameError(
                "Optimization parameter {0} is infeasible".format(opt_method))

    delay_bounds_df = pd.DataFrame({
        "bound": bound,
        "new_bound": new_bound
    },
                                   index=perform_param_list.values_list)
    delay_bounds_df = delay_bounds_df[["bound", "new_bound"]]

    return delay_bounds_df
Esempio n. 5
0
def compare_avoid_dep_212(setting: SettingMSOBFP,
                          print_x=False) -> Tuple[float, float, float]:
    """Compare standard_bound with the new Lyapunov standard_bound."""

    delta_val = 0.05

    one_param_bounds = [(0.1, 10.0)]
    two_param_bounds = [(0.1, 10.0), (1.1, 10.0)]

    standard_bound = Optimize(setting=setting, number_param=2,
                              print_x=print_x).grid_search(
                                  grid_bounds=two_param_bounds,
                                  delta=delta_val)

    server_bound = OptimizeServerBound(setting_msob_fp=setting,
                                       number_param=1,
                                       print_x=print_x).grid_search(
                                           grid_bounds=one_param_bounds,
                                           delta=delta_val)

    fp_bound = OptimizeFPBound(setting_msob_fp=setting,
                               number_param=2,
                               print_x=print_x).grid_search(
                                   grid_bounds=two_param_bounds,
                                   delta=delta_val)

    return standard_bound, server_bound, fp_bound
Esempio n. 6
0
def compare_time_211(setting: SettingMSOBFP) -> Tuple[float, float, float]:
    """Compare standard_bound with the new Lyapunov standard_bound."""

    delta_val = 0.05

    one_param_bounds = [(0.1, 10.0)]
    two_param_bounds = [(0.1, 10.0), (1.1, 10.0)]

    start = timer()
    Optimize(setting=setting,
             number_param=2).grid_search(grid_bounds=two_param_bounds,
                                         delta=delta_val)
    stop = timer()
    time_standard = stop - start

    start = timer()
    OptimizeServerBound(setting_msob_fp=setting, number_param=1).grid_search(
        grid_bounds=one_param_bounds, delta=delta_val)
    stop = timer()
    time_server_bound = stop - start

    start = timer()
    OptimizeFPBound(setting_msob_fp=setting,
                    number_param=1).grid_search(grid_bounds=one_param_bounds,
                                                delta=delta_val)
    stop = timer()
    time_fp_bound = stop - start

    return time_standard, time_server_bound, time_fp_bound
def overlapping_tandem_adjust_arr_df(
        list_arr_list: List[List[ArrivalDistribution]],
        ser_list: List[ConstantRateServer], server_index: int,
        perform_param: PerformParameter) -> pd.DataFrame:
    """Compute delay standard_bound for T in T_list and write into dataframe.

    Args:
        list_arr_list: different Arrival object lists
        ser_list: Service object list
        server_index: index of the server to be analyzed
        perform_param: performance parameter

    Returns:
        dataframe

    """
    delta_val = 0.05

    one_param_bounds = [(0.1, 10.0)]
    two_param_bounds = [(0.1, 10.0), (1.1, 10.0)]

    standard_bound = [0.0] * len(list_arr_list)
    server_bound = [0.0] * len(list_arr_list)
    fp_bound = [0.0] * len(list_arr_list)
    utilizations = [0.0] * len(list_arr_list)

    for i in range(len(list_arr_list)):
        overlapping_tandem_setting = OverlappingTandemPerform(
            arr_list=list_arr_list[i],
            ser_list=ser_list,
            perform_param=perform_param)

        standard_bound[i] = Optimize(setting=overlapping_tandem_setting,
                                     number_param=2).grid_search(
                                         grid_bounds=two_param_bounds,
                                         delta=delta_val)
        server_bound[i] = OptimizeServerBound(
            setting_msob_fp=overlapping_tandem_setting,
            number_param=1).grid_search(grid_bounds=one_param_bounds,
                                        delta=delta_val)
        fp_bound[i] = OptimizeFPBound(
            setting_msob_fp=overlapping_tandem_setting,
            number_param=1).grid_search(grid_bounds=one_param_bounds,
                                        delta=delta_val)

        utilizations[i] = overlapping_tandem_setting.server_util(
            server_index=server_index)

    results_df = pd.DataFrame(
        {
            "standard_bound": standard_bound,
            "server_bound": server_bound,
            "fp_bound": fp_bound
        },
        index=utilizations)
    results_df = results_df[["standard_bound", "server_bound", "fp_bound"]]

    return results_df
Esempio n. 8
0
def overlapping_tandem_df(
        arr_list: List[ArrivalDistribution],
        ser_list: List[ConstantRateServer],
        perform_param_list: PerformParamList) -> pd.DataFrame:
    """Compute delay standard_bound for T in T_list and write into dataframe.

    Args:
        arr_list: Arrival object list
        ser_list: Service object list
        perform_param_list: list of performance parameter values

    Returns:
        dataframe

    """
    delta_val = 0.05

    one_param_bounds = [(0.1, 10.0)]
    two_param_bounds = [(0.1, 10.0), (1.1, 10.0)]

    standard_bound = [0.0] * len(perform_param_list)
    server_bound = [0.0] * len(perform_param_list)
    fp_bound = [0.0] * len(perform_param_list)

    for i in range(len(perform_param_list)):
        overlapping_tandem_setting = OverlappingTandemPerform(
            arr_list=arr_list,
            ser_list=ser_list,
            perform_param=perform_param_list.get_parameter_at_i(i))

        standard_bound[i] = Optimize(setting=overlapping_tandem_setting,
                                     number_param=2).grid_search(
                                         grid_bounds=two_param_bounds,
                                         delta=delta_val)
        server_bound[i] = OptimizeServerBound(
            setting_msob_fp=overlapping_tandem_setting,
            number_param=1).grid_search(grid_bounds=one_param_bounds,
                                        delta=delta_val)
        fp_bound[i] = OptimizeFPBound(
            setting_msob_fp=overlapping_tandem_setting,
            number_param=1).grid_search(grid_bounds=one_param_bounds,
                                        delta=delta_val)

    results_df = pd.DataFrame(
        {
            "standard_bound": standard_bound,
            "server_bound": server_bound,
            "fp_bound": fp_bound,
        },
        index=perform_param_list.values_list)
    results_df = results_df[["standard_bound", "server_bound", "fp_bound"]]

    print(
        f"utilization: {overlapping_tandem_setting.approximate_utilization()}")

    return results_df
Esempio n. 9
0
    def helper_function(rate: float):
        if opt_method == OptMethod.GRID_SEARCH:
            if indep:
                single_server = SingleServerBandwidth(
                    arr_list=arr_list,
                    s_e2e=ConstantRateServer(rate=rate),
                    perform_param=PerformParameter(
                        perform_metric=PerformEnum.DELAY_PROB,
                        value=target_delay),
                    indep=True,
                    geom_series=geom_series)

                current_delay_prob = Optimize(setting=single_server,
                                              number_param=1).grid_search(
                                                  grid_bounds=[(0.1, 5.0)],
                                                  delta=0.1)
            else:
                single_server = SingleServerBandwidth(
                    arr_list=arr_list,
                    s_e2e=ConstantRateServer(rate=rate),
                    perform_param=PerformParameter(
                        perform_metric=PerformEnum.DELAY_PROB,
                        value=target_delay),
                    indep=False,
                    geom_series=geom_series)

                current_delay_prob = Optimize(setting=single_server,
                                              number_param=2).grid_search(
                                                  grid_bounds=[(0.1, 5.0),
                                                               (1.1, 5.0)],
                                                  delta=0.1)

        else:
            raise NotImplementedError("This optimization method is not "
                                      "implemented")

        return current_delay_prob - target_delay_prob
Esempio n. 10
0
from single_server.single_server_perform import SingleServerPerform
from utils.perform_parameter import PerformParameter

if __name__ == '__main__':
    DELAY_PROB8 = PerformParameter(perform_metric=PerformEnum.DELAY_PROB,
                                   value=8)

    SINGLE_SERVER = SingleServerPerform(arr=MMOOFluid(mu=0.5,
                                                      lamb=0.5,
                                                      burst=2.0),
                                        const_rate=ConstantRate(rate=1.5),
                                        perform_param=DELAY_PROB8)

    print(
        Optimize(SINGLE_SERVER, print_x=True,
                 show_warn=True).grid_search_old(bound_list=[(0.1, 5.0)],
                                                 delta=0.01))

    print(
        Optimize(SINGLE_SERVER, print_x=True,
                 show_warn=True).grid_search(bound_list=[(0.1, 5.0)],
                                             delta=0.01))

    SINGLE_SERVER2 = SingleServerPerform(arr=MMOODisc(stay_on=0.5,
                                                      stay_off=0.5,
                                                      burst=2.0),
                                         const_rate=ConstantRate(rate=1.5),
                                         perform_param=DELAY_PROB8)

    print(
        Optimize(SINGLE_SERVER2, print_x=True,
Esempio n. 11
0
def csv_single_param_exp_lower(start_time: int,
                               perform_param: PerformParameter,
                               mc_dist: MonteCarloDist,
                               sample=False) -> dict:
    total_iterations = 10**2
    valid_iterations = total_iterations
    metric = "relative"
    sample_size = 10**3

    delta = 0.05

    size_array = [total_iterations, 2]
    # [rows, columns]

    if mc_dist.mc_enum == MCEnum.UNIFORM:
        param_array = np.random.uniform(
            low=0, high=mc_dist.param_list[0], size=size_array)
    elif mc_dist.mc_enum == MCEnum.EXPONENTIAL:
        param_array = np.random.exponential(
            scale=mc_dist.param_list[0], size=size_array)
    else:
        raise NameError("Distribution parameter {0} is infeasible".format(
            mc_dist.mc_enum))

    res_array = np.empty([total_iterations, 3])
    if sample:
        res_array_sample = np.empty([total_iterations, 3])

    for i in tqdm(range(total_iterations)):
        setting = SingleServerPerform(
            arr=DM1(lamb=param_array[i, 0]),
            const_rate=ConstantRate(rate=param_array[i, 1]),
            perform_param=perform_param)

        theta_bounds = [(0.1, 4.0)]
        bound_array = theta_bounds[:]

        res_array[i, 0] = Optimize(setting=setting).grid_search(
            bound_list=bound_array, delta=delta)

        bound_array_power = theta_bounds[:]
        bound_array_power.append((0.9, 4.0))

        res_array[i, 1] = OptimizeNew(setting_new=setting).grid_search(
            bound_list=bound_array_power, delta=delta)

        if perform_param.perform_metric == PerformEnum.DELAY_PROB:
            res_array[i, 2] = delay_prob_lower_exp_dm1_opt(
                t=start_time,
                delay=perform_param.value,
                lamb=param_array[i, 0],
                rate=param_array[i, 1])

            if sample:
                res_array_sample[i, 0] = res_array[i, 0]
                res_array_sample[i, 1] = res_array[i, 1]
                res_array_sample[i, 2] = delay_prob_sample_exp_dm1_opt(
                    t=start_time,
                    delay=perform_param.value,
                    lamb=param_array[i, 0],
                    rate=param_array[i, 1],
                    sample_size=sample_size)

            if res_array[i, 0] > 1.0:
                res_array[i, ] = nan
                if sample:
                    res_array_sample[i, ] = nan

        elif perform_param.perform_metric == PerformEnum.OUTPUT:
            res_array[i, 2] = output_lower_exp_dm1_opt(
                s=start_time,
                delta_time=perform_param.value,
                lamb=param_array[i, 0],
                rate=param_array[i, 1])

        else:
            raise NameError("{0} is an infeasible performance metric".format(
                perform_param.perform_metric))

        if (res_array[i, 1] == inf or res_array[i, 2] == inf
                or res_array[i, 0] == nan or res_array[i, 1] == nan
                or res_array[i, 2] == nan):
            res_array[i, ] = nan
            res_array_sample[i, ] = nan
            valid_iterations -= 1

    # print("exponential results", res_array[:, 2])

    res_dict = three_col_array_to_results(
        arrival_enum=ArrivalEnum.DM1,
        res_array=res_array,
        valid_iterations=valid_iterations,
        metric=metric)

    res_dict.update({
        "iterations": total_iterations,
        "delta_time": perform_param.value,
        "optimization": "grid_search",
        "metric": "relative",
        "MCDistribution": mc_dist.to_name(),
        "MCParam": mc_dist.param_to_string()
    })

    res_dict_sample = three_col_array_to_results(
        arrival_enum=ArrivalEnum.DM1,
        res_array=res_array_sample,
        valid_iterations=valid_iterations,
        metric=metric)

    res_dict_sample.update({
        "iterations": total_iterations,
        "delta_time": perform_param.value,
        "optimization": "grid_search",
        "metric": "relative",
        "MCDistribution": mc_dist.to_name(),
        "MCParam": mc_dist.param_to_string()
    })

    with open(
            "lower_single_{0}_DM1_results_MC{1}_power_exp.csv".format(
                perform_param.to_name(), mc_dist.to_name()), 'w') as csv_file:
        writer = csv.writer(fileobj=csv_file)
        for key, value in res_dict.items():
            writer.writerow([key, value])
    if sample:
        with open(
                "sample_single_{0}_DM1_results_MC{1}_power_exp.csv".format(
                    perform_param.to_name(), mc_dist.to_name()),
                'w') as csv_file:
            writer = csv.writer(fileobj=csv_file)
            for key, value in res_dict_sample.items():
                writer.writerow([key, value])

    return res_dict
Esempio n. 12
0
def fat_cross_df(arr_list: List[ArrivalDistribution],
                 ser_list: List[ConstantRate], opt_method: OptMethod,
                 perform_param_list: PerformParamList) -> pd.DataFrame:
    """Compute delay bound for T in T_list and write into dataframe.

    Args:
        arr_list: Arrival object list
        ser_list: Service object list
        opt_method: method to_name as string, PS or GS
        perform_param_list: list of performance parameter values

    Returns:
        dataframe

    """
    bound = [0.0] * len(perform_param_list.values_list)
    new_bound = [0.0] * len(perform_param_list.values_list)

    for _i in range(len(perform_param_list.values_list)):
        perform_param = perform_param_list.get_parameter_at_i(_i)
        setting = FatCrossPerform(arr_list=arr_list,
                                  ser_list=ser_list,
                                  perform_param=perform_param)

        if opt_method == OptMethod.GRID_SEARCH:
            bound[_i] = Optimize(setting=setting).grid_search(bound_list=[
                (0.1, 5.0)
            ],
                                                              delta=0.1)
            new_bound[_i] = OptimizeNew(setting_new=setting,
                                        new=True).grid_search(bound_list=[
                                            (0.1, 5.0), (0.9, 6.0)
                                        ],
                                                              delta=0.05)

        elif opt_method == OptMethod.PATTERN_SEARCH:
            bound[_i] = Optimize(setting=setting).pattern_search(
                start_list=[0.5], delta=3.0, delta_min=0.01)

            new_bound[_i] = OptimizeNew(setting_new=setting,
                                        new=True).pattern_search(
                                            start_list=[0.5, 2.0],
                                            delta=3.0,
                                            delta_min=0.01)

        elif opt_method == OptMethod.GS_OLD:
            bound[_i] = Optimize(setting=setting).grid_search_old(bound_list=[
                (0.1, 5.0)
            ],
                                                                  delta=0.1)
            new_bound[_i] = OptimizeNew(setting_new=setting,
                                        new=True).grid_search_old(bound_list=[
                                            (0.1, 5.0), (0.9, 6.0)
                                        ],
                                                                  delta=0.1)

        else:
            raise ValueError(
                "Optimization parameter {0} is infeasible".format(opt_method))

    results_df = pd.DataFrame({
        "bound": bound,
        "new_bound": new_bound
    },
                              index=perform_param_list.values_list)
    results_df = results_df[["bound", "new_bound"]]

    return results_df
Esempio n. 13
0
def compute_improvement(setting: SettingNew,
                        opt_method: OptMethod,
                        number_l=1,
                        print_x=False,
                        show_warn=False) -> tuple:
    """Compare standard_bound with the new Lyapunov bound."""

    if opt_method == OptMethod.GRID_SEARCH:
        theta_bounds = [(0.1, 4.0)]

        standard_bound = Optimize(setting=setting,
                                  print_x=print_x,
                                  show_warn=show_warn).grid_search(
                                      bound_list=theta_bounds, delta=0.1)

        bound_array = theta_bounds[:]
        for _i in range(1, number_l + 1):
            bound_array.append((0.9, 4.0))

        new_bound = OptimizeNew(setting_new=setting,
                                print_x=print_x,
                                show_warn=show_warn).grid_search(
                                    bound_list=bound_array, delta=0.1)

    elif opt_method == OptMethod.PATTERN_SEARCH:
        theta_start = 0.5

        start_list = [theta_start]

        standard_bound = Optimize(setting=setting,
                                  print_x=print_x,
                                  show_warn=show_warn).pattern_search(
                                      start_list=start_list,
                                      delta=3.0,
                                      delta_min=0.01)

        start_list_new = [theta_start] + [1.0] * number_l

        new_bound = OptimizeNew(setting_new=setting,
                                print_x=print_x,
                                show_warn=show_warn).pattern_search(
                                    start_list=start_list_new,
                                    delta=3.0,
                                    delta_min=0.01)

        # This part is there to overcome opt_method issues
        if new_bound > standard_bound:
            new_bound = standard_bound

    elif opt_method == OptMethod.NELDER_MEAD:
        theta_start = 0.5

        start_list = [theta_start]
        start_simplex = InitialSimplex(parameters_to_optimize=1).gao_han(
            start_list=start_list)

        standard_bound = Optimize(setting=setting,
                                  print_x=print_x,
                                  show_warn=show_warn).nelder_mead(
                                      simplex=start_simplex, sd_min=10**(-2))

        start_list_new = [theta_start] + [1.0] * number_l
        start_simplex_new = InitialSimplex(parameters_to_optimize=number_l +
                                           1).gao_han(
                                               start_list=start_list_new)

        new_bound = OptimizeNew(setting_new=setting,
                                print_x=print_x,
                                show_warn=show_warn).nelder_mead(
                                    simplex=start_simplex_new, sd_min=10**(-2))

        # This part is there to overcome opt_method issues
        if new_bound > standard_bound:
            new_bound = standard_bound

    elif opt_method == OptMethod.BASIN_HOPPING:
        theta_start = 0.5

        start_list = [theta_start]

        standard_bound = Optimize(
            setting=setting, print_x=print_x,
            show_warn=show_warn).basin_hopping(start_list=start_list)

        start_list_new = [theta_start] + [1.0] * number_l

        new_bound = OptimizeNew(
            setting_new=setting, print_x=print_x,
            show_warn=show_warn).basin_hopping(start_list=start_list_new)

        # This part is there to overcome opt_method issues
        if new_bound > standard_bound:
            new_bound = standard_bound

    elif opt_method == OptMethod.SIMULATED_ANNEALING:
        simul_anneal_param = SimAnnealParams()
        theta_start = 0.5

        start_list = [theta_start]

        standard_bound = Optimize(setting=setting,
                                  print_x=print_x,
                                  show_warn=show_warn).sim_annealing(
                                      start_list=start_list,
                                      sim_anneal_params=simul_anneal_param)

        start_list_new = [theta_start] + [1.0] * number_l

        new_bound = OptimizeNew(setting_new=setting,
                                print_x=print_x,
                                show_warn=show_warn).sim_annealing(
                                    start_list=start_list_new,
                                    sim_anneal_params=simul_anneal_param)

        # This part is there to overcome opt_method issues
        if new_bound > standard_bound:
            new_bound = standard_bound

    elif opt_method == OptMethod.DIFFERENTIAL_EVOLUTION:
        theta_bounds = [(0.1, 8.0)]

        standard_bound = Optimize(
            setting=setting,
            print_x=print_x).diff_evolution(bound_list=theta_bounds)

        bound_array = theta_bounds[:]
        for _i in range(1, number_l + 1):
            bound_array.append((0.9, 8.0))

        new_bound = OptimizeNew(
            setting_new=setting,
            print_x=print_x).diff_evolution(bound_list=bound_array)

    else:
        raise NameError(
            f"Optimization parameter {opt_method.name} is infeasible")

    # This part is there to overcome opt_method issues
    if new_bound > standard_bound:
        new_bound = standard_bound

    if standard_bound == 0 or new_bound == 0:
        standard_bound = nan
        new_bound = nan

    return standard_bound, new_bound
    RANGES_2 = [slice(0.1, 10.0, 0.1), slice(1.1, 10.0, 0.1)]

    PRINT_X = True

    print("Utilization:")
    print(
        SquarePerform(arr_list=ARR_LIST,
                      ser_list=SER_LIST,
                      perform_param=DELAY_PROB_TIME).approximate_utilization())

    print("Standard Approach:")
    print(
        Optimize(setting=SquarePerform(arr_list=ARR_LIST,
                                       ser_list=SER_LIST,
                                       perform_param=DELAY_PROB_TIME),
                 number_param=2,
                 print_x=PRINT_X).grid_search(grid_bounds=[(0.1, 10.0),
                                                           (1.1, 10.0)],
                                              delta=0.1))

    print("Server Bound:")
    print(
        OptimizeServerBound(
            setting_msob_fp=SquarePerform(arr_list=ARR_LIST,
                                          ser_list=SER_LIST,
                                          perform_param=DELAY_PROB_TIME),
            number_param=1,
            print_x=PRINT_X).grid_search(grid_bounds=[(0.1, 10.0)], delta=0.1))

    print("Flow Prolongation:")
    print(
Esempio n. 15
0
"""Computed some examples from the project"""

from fat_tree.fat_cross_perform import FatCrossPerform
from nc_arrivals.qt import DM1
from nc_operations.perform_enum import PerformEnum
from nc_service.constant_rate_server import ConstantRate
from optimization.optimize import Optimize
from utils.perform_parameter import PerformParameter

if __name__ == '__main__':
    PROB_VALUES = [0.5, 0.4, 0.3, 0.2, 0.1, 0.05, 0.01]

    for p in PROB_VALUES:
        DELAY_TIME = PerformParameter(perform_metric=PerformEnum.DELAY,
                                      value=p)

        EXAMPLE = FatCrossPerform(arr_list=[DM1(lamb=1.0)],
                                  ser_list=[ConstantRate(rate=2.0)],
                                  perform_param=DELAY_TIME)

        print(
            Optimize(setting=EXAMPLE, print_x=False,
                     show_warn=True).grid_search(bound_list=[(0.01, 1.1)],
                                                 delta=0.01))
Esempio n. 16
0
                                   rho_single=RHO_SINGLE,
                                   n=NUMBER_AGGREGATIONS)

    CONST_SINGLE = SingleServerPerform(arr=TB_CONST,
                                       const_rate=CR_SERVER,
                                       perform_param=DELAY5)

    LEAKY_MASS_1 = SingleServerPerform(arr=LeakyBucketMassOne(
        sigma_single=SIGMA_SINGLE,
        rho_single=RHO_SINGLE,
        n=NUMBER_AGGREGATIONS),
                                       const_rate=CR_SERVER,
                                       perform_param=DELAY5)

    CONST_OPT = Optimize(setting=CONST_SINGLE,
                         print_x=PRINT_X).grid_search(bound_list=BOUND_LIST,
                                                      delta=DELTA)
    print("const_opt", CONST_OPT)

    LEAKY_MASS_1_OPT = Optimize(setting=LEAKY_MASS_1,
                                print_x=PRINT_X).grid_search(
                                    bound_list=BOUND_LIST, delta=DELTA)
    print("leaky_mass_1_opt", LEAKY_MASS_1_OPT)

    print("leaky_bucket_alter_opt")
    for _i in range(10):
        leaky_bucket_alter_opt = del_prob_alter_opt(delay_value=DELAY_VAL,
                                                    sigma_single=SIGMA_SINGLE,
                                                    rho_single=RHO_SINGLE,
                                                    ser=CR_SERVER,
                                                    t=_i,
def fat_cross_perform_df(arr_list: List[ArrivalDistribution],
                         ser_list: List[ConstantRateServer],
                         opt_method: OptMethod,
                         perform_param_list: PerformParamList) -> pd.DataFrame:
    """Compute delay standard_bound for T in T_list and write into dataframe.

    Args:
        arr_list: Arrival object list
        ser_list: Service object list
        opt_method: PS or GS
        perform_param_list: list of performance parameter values

    Returns:
        dataframe

    """
    delta_val = 0.05

    one_param_bounds = [(delta_val, 10.0)]

    standard_bound = [0.0] * len(perform_param_list)
    h_mit_bound = [0.0] * len(perform_param_list)

    print_x = False

    fat_cross_setting = FatCrossPerform(
        arr_list=arr_list,
        ser_list=ser_list,
        perform_param=PerformParameter(perform_metric=PerformEnum.DELAY_PROB,
                                       value=0))

    print(f"utilization: {fat_cross_setting.approximate_utilization()}")
    print()

    for i in range(len(perform_param_list)):
        fat_cross_setting = FatCrossPerform(
            arr_list=arr_list,
            ser_list=ser_list,
            perform_param=perform_param_list.get_parameter_at_i(i))

        if opt_method == OptMethod.GRID_SEARCH:
            standard_bound[i] = Optimize(setting=fat_cross_setting,
                                         number_param=1,
                                         print_x=print_x).grid_search(
                                             grid_bounds=one_param_bounds,
                                             delta=delta_val)
            h_mit_bound[i] = OptimizeMitigator(
                setting_h_mit=fat_cross_setting,
                number_param=2,
                print_x=print_x).grid_search(grid_bounds=[(delta_val, 10.0),
                                                          (1 + delta_val, 8.0)
                                                          ],
                                             delta=delta_val)

        elif opt_method == OptMethod.PATTERN_SEARCH:
            standard_bound[i] = Optimize(setting=fat_cross_setting,
                                         number_param=1,
                                         print_x=print_x).pattern_search(
                                             start_list=[0.5],
                                             delta=3.0,
                                             delta_min=0.01)

            h_mit_bound[i] = OptimizeMitigator(setting_h_mit=fat_cross_setting,
                                               number_param=2,
                                               print_x=print_x).pattern_search(
                                                   start_list=[0.5, 2.0],
                                                   delta=3.0,
                                                   delta_min=0.01)

        else:
            raise NotImplementedError(f"Optimization parameter {opt_method} "
                                      f"is infeasible")

    results_df = pd.DataFrame(
        {
            "standard_bound": standard_bound,
            "h_mit_bound": h_mit_bound
        },
        index=perform_param_list.values_list)

    return results_df
Esempio n. 18
0
                                a=lower_interval,
                                b=upper_interval,
                                full_output=True)
    return res[0]


if __name__ == '__main__':
    print("Single Server Performance Bounds:\n")

    DELAY6 = PerformParameter(perform_metric=PerformEnum.DELAY_PROB, value=6)

    ARR_LIST = [MMOOCont(mu=0.2, lamb=0.5, peak_rate=2.6)]

    SINGLE_SERVER = SingleServerBandwidth(arr_list=ARR_LIST,
                                          s_e2e=ConstantRateServer(rate=2.0),
                                          perform_param=DELAY6)
    RESULTING_DELAY_PROB = Optimize(SINGLE_SERVER,
                                    number_param=1,
                                    print_x=True).grid_search(grid_bounds=[
                                        (0.1, 5.0)
                                    ],
                                                              delta=0.1)
    print(f"delay probability = {RESULTING_DELAY_PROB}")

    REQUIRED_BANDWIDTH = get_bandwidth_from_delay(arr_list=ARR_LIST,
                                                  target_delay=6,
                                                  target_delay_prob=0.034,
                                                  lower_interval=0.0,
                                                  upper_interval=200.0)
    print(f"required bandwidth = {REQUIRED_BANDWIDTH}")
def single_hop_contour(sigma_single: float,
                       rho_single: float,
                       utilization: float,
                       perform_param: PerformParameter,
                       pure_snc=False) -> int:
    print_x = False
    show_warn = False

    bound_list = [(0.05, 15.0)]
    delta = 0.05

    aggregation = 1

    # util = n * rho / service => service = n * rho / util

    constant_rate_server = ConstantRate(aggregation * rho_single / utilization)

    tb_const = TokenBucketConstant(sigma_single=sigma_single,
                                   rho_single=rho_single,
                                   n=aggregation)

    if pure_snc:
        competitor = Optimize(
            setting=SingleServerPerform(arr=tb_const,
                                        const_rate=constant_rate_server,
                                        perform_param=perform_param),
            print_x=print_x,
            show_warn=show_warn).grid_search(bound_list=bound_list,
                                             delta=delta)

    else:
        competitor = fifo_delay(token_bucket_constant=tb_const,
                                constant_rate=constant_rate_server)

    leaky_mass_1_opt = Optimize(setting=SingleServerPerform(
        arr=LeakyBucketMassOne(sigma_single=sigma_single,
                               rho_single=rho_single,
                               n=aggregation),
        const_rate=constant_rate_server,
        perform_param=perform_param),
                                print_x=print_x,
                                show_warn=show_warn).grid_search(
                                    bound_list=bound_list, delta=delta)

    while competitor < leaky_mass_1_opt:
        aggregation += 1

        constant_rate_server = ConstantRate(aggregation * rho_single /
                                            utilization)

        tb_const = TokenBucketConstant(sigma_single=sigma_single,
                                       rho_single=rho_single,
                                       n=aggregation)

        if pure_snc:
            competitor = Optimize(
                setting=SingleServerPerform(arr=tb_const,
                                            const_rate=constant_rate_server,
                                            perform_param=perform_param),
                print_x=print_x,
                show_warn=show_warn).grid_search(bound_list=bound_list,
                                                 delta=delta)

        else:
            competitor = fifo_delay(token_bucket_constant=tb_const,
                                    constant_rate=constant_rate_server)

        leaky_mass_1_opt = Optimize(setting=SingleServerPerform(
            arr=LeakyBucketMassOne(sigma_single=sigma_single,
                                   rho_single=rho_single,
                                   n=aggregation),
            const_rate=constant_rate_server,
            perform_param=perform_param),
                                    print_x=print_x,
                                    show_warn=show_warn).grid_search(
                                        bound_list=bound_list, delta=delta)

    # print("(dnc_fifo_single, const_opt, leaky_mass_1_opt)")
    # print(dnc_fifo_single, const_opt, leaky_mass_1_opt)

    return aggregation
Esempio n. 20
0
def compare_mitigator(setting: SettingMitigator,
                      opt_method: OptMethod,
                      number_l=1,
                      print_x=False) -> Tuple[float, float]:
    """Compare standard_bound with the new Lyapunov standard_bound."""

    if opt_method == OptMethod.GRID_SEARCH:
        delta_val = 0.1
        theta_bounds = [(delta_val, 4.0)]

        standard_bound = Optimize(setting=setting,
                                  number_param=1,
                                  print_x=print_x).grid_search(
                                      grid_bounds=theta_bounds,
                                      delta=delta_val)

        bound_array = theta_bounds[:]
        for _i in range(1, number_l + 1):
            bound_array.append((1.0 + delta_val, 4.0))

        h_mit_bound = OptimizeMitigator(setting_h_mit=setting,
                                        number_param=number_l + 1,
                                        print_x=print_x).grid_search(
                                            grid_bounds=bound_array,
                                            delta=delta_val)

    elif opt_method == OptMethod.PATTERN_SEARCH:
        theta_start = 0.5

        start_list = [theta_start]

        standard_bound = Optimize(setting=setting,
                                  number_param=1,
                                  print_x=print_x).pattern_search(
                                      start_list=start_list,
                                      delta=3.0,
                                      delta_min=0.01)

        start_list_new = [theta_start] + [1.0] * number_l

        h_mit_bound = OptimizeMitigator(setting_h_mit=setting,
                                        number_param=number_l + 1,
                                        print_x=print_x).pattern_search(
                                            start_list=start_list_new,
                                            delta=3.0,
                                            delta_min=0.01)

        # This part is there to overcome opt_method issues
        if h_mit_bound > standard_bound:
            h_mit_bound = standard_bound

    elif opt_method == OptMethod.NELDER_MEAD:
        theta_start = 0.5

        start_list = [theta_start]
        start_simplex = InitialSimplex(parameters_to_optimize=1).gao_han(
            start_list=start_list)

        standard_bound = Optimize(setting=setting,
                                  number_param=1,
                                  print_x=print_x).nelder_mead(
                                      simplex=start_simplex, sd_min=10**(-2))

        start_list_new = [theta_start] + [1.0] * number_l
        start_simplex_new = InitialSimplex(parameters_to_optimize=number_l +
                                           1).gao_han(
                                               start_list=start_list_new)

        h_mit_bound = OptimizeMitigator(setting_h_mit=setting,
                                        number_param=number_l + 1,
                                        print_x=print_x).nelder_mead(
                                            simplex=start_simplex_new,
                                            sd_min=10**(-2))

        # This part is there to overcome opt_method issues
        if h_mit_bound > standard_bound:
            h_mit_bound = standard_bound

    elif opt_method == OptMethod.BASIN_HOPPING:
        theta_start = 0.5

        start_list = [theta_start]

        standard_bound = Optimize(
            setting=setting, number_param=1,
            print_x=print_x).basin_hopping(start_list=start_list)

        start_list_new = [theta_start] + [1.0] * number_l

        h_mit_bound = OptimizeMitigator(
            setting_h_mit=setting, number_param=number_l + 1,
            print_x=print_x).basin_hopping(start_list=start_list_new)

        # This part is there to overcome opt_method issues
        if h_mit_bound > standard_bound:
            h_mit_bound = standard_bound

    elif opt_method == OptMethod.DUAL_ANNEALING:
        theta_bounds = [(0.1, 4.0)]

        standard_bound = Optimize(
            setting=setting, number_param=1,
            print_x=print_x).dual_annealing(bound_list=theta_bounds)

        bound_array = theta_bounds[:]
        for _i in range(1, number_l + 1):
            bound_array.append((0.9, 4.0))

        h_mit_bound = OptimizeMitigator(
            setting_h_mit=setting, number_param=number_l + 1,
            print_x=print_x).dual_annealing(bound_list=bound_array)

        # This part is there to overcome opt_method issues
        if h_mit_bound > standard_bound:
            h_mit_bound = standard_bound

    elif opt_method == OptMethod.DIFFERENTIAL_EVOLUTION:
        theta_bounds = [(0.1, 8.0)]

        standard_bound = Optimize(
            setting=setting, number_param=1,
            print_x=print_x).diff_evolution(bound_list=theta_bounds)

        bound_array = theta_bounds[:]
        for _i in range(1, number_l + 1):
            bound_array.append((0.9, 8.0))

        h_mit_bound = OptimizeMitigator(
            setting_h_mit=setting, number_param=number_l + 1,
            print_x=print_x).diff_evolution(bound_list=bound_array)

    else:
        raise NameError(
            f"Optimization parameter {opt_method.name} is infeasible")

    # This part is there to overcome opt_method issues
    if h_mit_bound > standard_bound:
        h_mit_bound = standard_bound

    if standard_bound == 0 or h_mit_bound == 0:
        standard_bound = nan
        h_mit_bound = nan

    return standard_bound, h_mit_bound
    CONST_RATE_2 = ConstantRate(rate=0.3)

    SIMPLEX_START = np.array([[0.1], [0.3]])
    # SIMPLEX_START = np.array([[100], [200]])
    SIMPLEX_START_NEW = np.array([[0.1, 2.0], [0.3, 1.2], [0.4, 1.1]])
    SIMPLEX_RAND = InitialSimplex(parameters_to_optimize=1).uniform_dist(
        max_theta=0.6, max_l=2.0)

    NM_PARAM_SET = NelderMeadParameters()

    SETTING = FatCrossPerform(
        arr_list=[MMOO_1, MMOO_2],
        ser_list=[CONST_RATE_1, CONST_RATE_2],
        perform_param=DELAY_4)

    OPTI_OLD = Optimize(setting=SETTING, print_x=True)
    print(OPTI_OLD.grid_search(bound_list=[(0.1, 4.0)], delta=0.1))
    print(OPTI_OLD.pattern_search(start_list=[0.5], delta=3.0, delta_min=0.01))
    print(Optimize.nelder_mead(self=OPTI_OLD, simplex=SIMPLEX_RAND))
    print(
        Optimize.nelder_mead_old(
            self=OPTI_OLD,
            simplex=SIMPLEX_RAND,
            nelder_mead_param=NM_PARAM_SET))
    print(OPTI_OLD.basin_hopping(start_list=[2.0]))
    print(OPTI_OLD.diff_evolution(bound_list=[(0.1, 4.0)]))
    print(OPTI_OLD.bfgs(start_list=[0.4]))

    OPTI_NEW = OptimizeNew(setting_new=SETTING, new=True, print_x=True)
    print(
        OPTI_NEW.grid_search_old(
Esempio n. 22
0
def compare_time(setting: SettingMitigator,
                 opt_method: OptMethod,
                 number_l=1) -> tuple:
    """Compare computation times."""

    if opt_method == OptMethod.GRID_SEARCH:
        bound_array = [(0.1, 4.0)]

        start = timer()
        Optimize(setting=setting,
                 number_param=1).grid_search(grid_bounds=bound_array,
                                             delta=0.1)
        stop = timer()
        time_standard = stop - start

        for _ in range(1, number_l + 1):
            bound_array.append((0.9, 4.0))

        start = timer()
        OptimizeMitigator(setting_h_mit=setting, number_param=number_l +
                          1).grid_search(grid_bounds=bound_array, delta=0.1)
        stop = timer()
        time_lyapunov = stop - start

    elif opt_method == OptMethod.PATTERN_SEARCH:
        start_list = [0.5]

        start = timer()
        Optimize(setting=setting,
                 number_param=1).pattern_search(start_list=start_list,
                                                delta=3.0,
                                                delta_min=0.01)
        stop = timer()
        time_standard = stop - start

        start_list = [0.5] + [1.0] * number_l

        start = timer()
        OptimizeMitigator(setting_h_mit=setting, number_param=number_l +
                          1).pattern_search(start_list=start_list,
                                            delta=3.0,
                                            delta_min=0.01)
        stop = timer()
        time_lyapunov = stop - start

    elif opt_method == OptMethod.NELDER_MEAD:
        start_simplex = InitialSimplex(parameters_to_optimize=1).uniform_dist(
            max_theta=1.0)

        start = timer()
        Optimize(setting=setting,
                 number_param=1).nelder_mead(simplex=start_simplex,
                                             sd_min=10**(-2))
        stop = timer()
        time_standard = stop - start

        start_simplex_new = InitialSimplex(parameters_to_optimize=number_l +
                                           1).uniform_dist(max_theta=1.0,
                                                           max_l=2.0)

        start = timer()
        OptimizeMitigator(setting_h_mit=setting, number_param=number_l +
                          1).nelder_mead(simplex=start_simplex_new,
                                         sd_min=10**(-2))
        stop = timer()
        time_lyapunov = stop - start

    elif opt_method == OptMethod.DUAL_ANNEALING:
        bound_array = [(0.1, 4.0)]

        start = timer()
        Optimize(setting=setting,
                 number_param=1).dual_annealing(bound_list=bound_array)
        stop = timer()
        time_standard = stop - start

        for _ in range(1, number_l + 1):
            bound_array.append((0.9, 4.0))

        start = timer()
        OptimizeMitigator(setting_h_mit=setting, number_param=number_l +
                          1).dual_annealing(bound_list=bound_array)
        stop = timer()
        time_lyapunov = stop - start

    else:
        raise NameError(
            f"Optimization parameter {opt_method.name} is infeasible")

    return time_standard, time_lyapunov
Esempio n. 23
0
def csv_single_param_exp(start_time: int,
                         delay: int,
                         mc_dist: MonteCarloDist,
                         target_util: float,
                         total_iterations: int,
                         sample=False) -> dict:
    valid_iterations = total_iterations
    metric = ChangeEnum.RATIO_REF_NEW
    sample_size = 10**2

    delta = 0.05

    size_array = [total_iterations, 2]
    # [rows, columns]

    if mc_dist.mc_enum == MCEnum.UNIFORM:
        param_array = np.random.uniform(low=0,
                                        high=mc_dist.param_list[0],
                                        size=size_array)
    elif mc_dist.mc_enum == MCEnum.EXPONENTIAL:
        param_array = np.random.exponential(scale=mc_dist.param_list[0],
                                            size=size_array)
    else:
        raise NameError(
            f"Distribution parameter {mc_dist.mc_enum} is infeasible")

    res_array = np.empty([total_iterations, 2])
    res_array_sample = np.empty([total_iterations, 2])

    for i in tqdm(range(total_iterations)):
        single_setting = SingleServerMitPerform(
            arr_list=[DM1(lamb=param_array[i, 0])],
            server=ConstantRateServer(rate=param_array[i, 1]),
            perform_param=PerformParameter(
                perform_metric=PerformEnum.DELAY_PROB, value=delay))

        computation_necessary = True

        # print(res_array[i, ])
        if target_util > 0.0:
            util = single_setting.approximate_utilization()
            if util < target_util or util > 1:
                res_array[i, ] = np.nan
                computation_necessary = False

        if computation_necessary:

            theta_bounds = [(0.1, 4.0)]

            res_array[i, 0] = Optimize(setting=single_setting,
                                       number_param=1).grid_search(
                                           grid_bounds=theta_bounds,
                                           delta=delta)

            res_array[i,
                      1] = delay_prob_lower_exp_dm1_opt(t=start_time,
                                                        delay=delay,
                                                        lamb=param_array[i, 0],
                                                        rate=param_array[i, 1])

            if sample:
                res_array_sample[i, 1] = delay_prob_sample_exp_dm1_opt(
                    t=start_time,
                    delay=delay,
                    lamb=param_array[i, 0],
                    rate=param_array[i, 1],
                    sample_size=sample_size)

            if res_array[i, 0] > 1.0:
                res_array[i, ] = np.nan
                if sample:
                    res_array_sample[i, ] = np.nan

        if np.isnan(res_array[i, 0]) or np.isnan(res_array[i, 1]):
            res_array[i, ] = np.nan
            res_array_sample[i, ] = np.nan
            valid_iterations -= 1

    # print("exponential results", res_array[:, 2])

    res_dict = two_col_array_to_results(arrival_enum=ArrivalEnum.DM1,
                                        param_array=param_array,
                                        res_array=res_array,
                                        number_servers=1,
                                        valid_iterations=valid_iterations,
                                        compare_metric=metric)

    res_dict.update({
        "iterations": total_iterations,
        "delta_time": delay,
        "optimization": "grid_search",
        "metric": metric.name,
        "MCDistribution": mc_dist.to_name(),
        "MCParam": mc_dist.param_to_string()
    })

    if sample:
        res_array_sample[:, 0] = res_array[:, 0]

        res_dict_sample = two_col_array_to_results(
            arrival_enum=ArrivalEnum.DM1,
            param_array=param_array,
            res_array=res_array_sample,
            number_servers=1,
            valid_iterations=valid_iterations,
            compare_metric=metric)

        res_dict_sample.update({
            "iterations": total_iterations,
            "delta_time": delay,
            "optimization": "grid_search",
            "metric": metric.name,
            "MCDistribution": mc_dist.to_name(),
            "MCParam": mc_dist.param_to_string()
        })

    suffix = f"single_DELAY_PROB_DM1_results" \
        f"_MC{mc_dist.to_name()}_power_exp.csv"

    with open("lower_" + suffix, 'w') as csv_file:
        writer = csv.writer(csv_file)
        for key, value in res_dict.items():
            writer.writerow([key, value])
    if sample:
        with open("sample_" + suffix, 'w') as csv_file:
            writer = csv.writer(csv_file)
            for key, value in res_dict_sample.items():
                writer.writerow([key, value])

    return res_dict
Esempio n. 24
0
from optimization.sim_anneal_param import SimAnnealParams
from utils.perform_parameter import PerformParameter

if __name__ == '__main__':
    print("Single Server Performance Bounds:\n")

    DELAY_PROB8 = PerformParameter(perform_metric=PerformEnum.DELAY_PROB,
                                   value=8)

    SINGLE_SERVER = SingleServerPerform(foi=DM1(lamb=1.0),
                                        server=ConstantRateServer(rate=10.0),
                                        perform_param=DELAY_PROB8)

    print(
        Optimize(SINGLE_SERVER, number_param=1,
                 print_x=True).grid_search(grid_bounds=[(0.1, 5.0)],
                                           delta=0.1))

    SINGLE_SERVER2 = SingleServerPerform(foi=MMOOCont(mu=0.7,
                                                      lamb=0.4,
                                                      peak_rate=1.2),
                                         server=ConstantRateServer(rate=1.0),
                                         perform_param=DELAY_PROB8)

    print(
        Optimize(SINGLE_SERVER2, number_param=1,
                 print_x=True).grid_search(grid_bounds=[(0.1, 5.0)],
                                           delta=0.1))

    DELAY_PROB_REV = PerformParameter(perform_metric=PerformEnum.DELAY,
                                      value=0.0183)