예제 #1
0
        def helper_function(delay: float) -> float:
            perform_delay = PerformParameter(
                perform_metric=PerformEnum.DELAY_PROB, value=delay)
            current_delay_prob = pmoo_explicit(foi=foi,
                                               cross_flows=cross_flows,
                                               ser_list=ser_list,
                                               theta=theta,
                                               perform_param=perform_delay,
                                               indep=indep)

            return target_delay_prob - current_delay_prob
예제 #2
0
        def helper_function(delay: float) -> float:
            perform_delay = PerformParameter(
                perform_metric=PerformEnum.DELAY_PROB, value=delay)
            current_delay_prob = sfa_explicit(
                foi=foi,
                leftover_service_list=leftover_service_list,
                theta=theta,
                perform_param=perform_delay,
                indep=indep)

            return target_delay_prob - current_delay_prob
예제 #3
0
def new_delay(theta: float, delay: int, a_1: ArrivalDistribution,
              a_2: ArrivalDistribution, s_1: ConstantRate,
              s_2: TokenBucketConstant, s_3: ConstantRate) -> float:
    s_net: Service = Convolve(ser1=Leftover(arr=a_2, ser=s_1),
                              ser2=Leftover(arr=s_2, ser=s_3),
                              indep=True)

    return evaluate_single_hop(foi=a_1,
                               s_net=s_net,
                               theta=theta,
                               perform_param=PerformParameter(
                                   perform_metric=PerformEnum.DELAY_PROB,
                                   value=delay))
예제 #4
0
def grid_param_simple_exp(delay: int, opt_method: OptMethod, metric: str,
                          lamb1_range, lamb2_range, rate1_range,
                          rate2_range) -> dict:
    """Choose parameters along a grid."""
    total_iterations = len(lamb1_range) * len(lamb2_range) * len(
        rate1_range) * len(rate2_range)

    param_array = np.empty([total_iterations, 4])
    res_array = np.empty([total_iterations, 2])

    i = 0
    for lamb1 in lamb1_range:
        for lamb2 in lamb2_range:
            for rate1 in rate1_range:
                for rate2 in rate2_range:
                    delay_prob = PerformParameter(
                        perform_metric=PerformEnum.DELAY_PROB, value=delay)

                    setting = FatCrossPerform(
                        arr_list=[DM1(lamb=lamb1),
                                  DM1(lamb=lamb2)],
                        ser_list=[
                            ConstantRate(rate=rate1),
                            ConstantRate(rate=rate2)
                        ],
                        perform_param=delay_prob)
                    param_array[i, 0] = lamb1
                    param_array[i, 1] = lamb2
                    param_array[i, 2] = rate1
                    param_array[i, 3] = rate2

                    # bound, new_bound
                    res_array[i, 0], res_array[i, 1] = compute_improvement(
                        setting=setting, opt_method=opt_method, number_l=1)

                    # This might be a very dangerous condition
                    if (res_array[i, 1] >= 1 or res_array[i, 0] == nan
                            or res_array[i, 1] == nan):
                        res_array[i, ] = nan

                    if i % floor(total_iterations / 10) == 0:
                        print("iteration {0} of {1}".format(
                            i, total_iterations))

                    i += 1

    return two_col_array_to_results(arrival_enum=ArrivalEnum.DM1,
                                    metric=metric,
                                    param_array=param_array,
                                    res_array=res_array,
                                    number_servers=2)
            def helper_function(delay: float) -> float:
                perform_delay = PerformParameter(
                    perform_metric=PerformEnum.DELAY_PROB, value=delay)
                current_delay_prob = sfa_tandem_bound(
                    foi=foi,
                    leftover_service_list=leftover_service_list,
                    theta=theta,
                    perform_param=perform_delay,
                    p_list=p_list,
                    e2e_enum=e2e_enum,
                    indep=indep,
                    geom_series=True)

                return target_delay_prob - current_delay_prob
            def helper_function(delay: float) -> float:
                perform_delay = PerformParameter(
                    perform_metric=PerformEnum.DELAY_PROB, value=delay)
                current_delay_prob = pmoo_tandem_bound(
                    foi=foi,
                    cross_flows_on_foi_path=cross_flows_on_foi_path,
                    ser_on_foi_path=ser_on_foi_path,
                    theta=theta,
                    perform_param=perform_delay,
                    e2e_enum=e2e_enum,
                    indep=indep,
                    geom_series=True)

                return target_delay_prob - current_delay_prob
예제 #7
0
    def helper_function(rate: float):
        if opt_method == OptMethod.GRID_SEARCH:
            if indep:
                single_server = SingleServerBandwidth(
                    arr_list=arr_list,
                    s_e2e=ConstantRateServer(rate=rate),
                    perform_param=PerformParameter(
                        perform_metric=PerformEnum.DELAY_PROB,
                        value=target_delay),
                    indep=True,
                    geom_series=geom_series)

                current_delay_prob = Optimize(setting=single_server,
                                              number_param=1).grid_search(
                                                  grid_bounds=[(0.1, 5.0)],
                                                  delta=0.1)
            else:
                single_server = SingleServerBandwidth(
                    arr_list=arr_list,
                    s_e2e=ConstantRateServer(rate=rate),
                    perform_param=PerformParameter(
                        perform_metric=PerformEnum.DELAY_PROB,
                        value=target_delay),
                    indep=False,
                    geom_series=geom_series)

                current_delay_prob = Optimize(setting=single_server,
                                              number_param=2).grid_search(
                                                  grid_bounds=[(0.1, 5.0),
                                                               (1.1, 5.0)],
                                                  delta=0.1)

        else:
            raise NotImplementedError("This optimization method is not "
                                      "implemented")

        return current_delay_prob - target_delay_prob
예제 #8
0
def standard_delay(theta: float, p: float, delay: int,
                   a_1: ArrivalDistribution, a_2: ArrivalDistribution,
                   a_3: ArrivalDistribution, s_1: ConstantRate,
                   s_2: ConstantRate, s_3: ConstantRate) -> float:
    f_3_output: Arrival = Deconvolve(arr=a_3,
                                     ser=Leftover(arr=Deconvolve(arr=a_2,
                                                                 ser=s_1),
                                                  ser=s_2))
    s_net: Service = Convolve(ser1=Leftover(arr=a_2, ser=s_1),
                              ser2=Leftover(arr=f_3_output, ser=s_3),
                              indep=False,
                              p=p)

    return evaluate_single_hop(foi=a_1,
                               s_net=s_net,
                               theta=theta,
                               perform_param=PerformParameter(
                                   perform_metric=PerformEnum.DELAY_PROB,
                                   value=delay))
예제 #9
0
def csv_single_param_exp(start_time: int,
                         delay: int,
                         mc_dist: MonteCarloDist,
                         target_util: float,
                         total_iterations: int,
                         sample=False) -> dict:
    valid_iterations = total_iterations
    metric = ChangeEnum.RATIO_REF_NEW
    sample_size = 10**2

    delta = 0.05

    size_array = [total_iterations, 2]
    # [rows, columns]

    if mc_dist.mc_enum == MCEnum.UNIFORM:
        param_array = np.random.uniform(low=0,
                                        high=mc_dist.param_list[0],
                                        size=size_array)
    elif mc_dist.mc_enum == MCEnum.EXPONENTIAL:
        param_array = np.random.exponential(scale=mc_dist.param_list[0],
                                            size=size_array)
    else:
        raise NameError(
            f"Distribution parameter {mc_dist.mc_enum} is infeasible")

    res_array = np.empty([total_iterations, 2])
    res_array_sample = np.empty([total_iterations, 2])

    for i in tqdm(range(total_iterations)):
        single_setting = SingleServerMitPerform(
            arr_list=[DM1(lamb=param_array[i, 0])],
            server=ConstantRateServer(rate=param_array[i, 1]),
            perform_param=PerformParameter(
                perform_metric=PerformEnum.DELAY_PROB, value=delay))

        computation_necessary = True

        # print(res_array[i, ])
        if target_util > 0.0:
            util = single_setting.approximate_utilization()
            if util < target_util or util > 1:
                res_array[i, ] = np.nan
                computation_necessary = False

        if computation_necessary:

            theta_bounds = [(0.1, 4.0)]

            res_array[i, 0] = Optimize(setting=single_setting,
                                       number_param=1).grid_search(
                                           grid_bounds=theta_bounds,
                                           delta=delta)

            res_array[i,
                      1] = delay_prob_lower_exp_dm1_opt(t=start_time,
                                                        delay=delay,
                                                        lamb=param_array[i, 0],
                                                        rate=param_array[i, 1])

            if sample:
                res_array_sample[i, 1] = delay_prob_sample_exp_dm1_opt(
                    t=start_time,
                    delay=delay,
                    lamb=param_array[i, 0],
                    rate=param_array[i, 1],
                    sample_size=sample_size)

            if res_array[i, 0] > 1.0:
                res_array[i, ] = np.nan
                if sample:
                    res_array_sample[i, ] = np.nan

        if np.isnan(res_array[i, 0]) or np.isnan(res_array[i, 1]):
            res_array[i, ] = np.nan
            res_array_sample[i, ] = np.nan
            valid_iterations -= 1

    # print("exponential results", res_array[:, 2])

    res_dict = two_col_array_to_results(arrival_enum=ArrivalEnum.DM1,
                                        param_array=param_array,
                                        res_array=res_array,
                                        number_servers=1,
                                        valid_iterations=valid_iterations,
                                        compare_metric=metric)

    res_dict.update({
        "iterations": total_iterations,
        "delta_time": delay,
        "optimization": "grid_search",
        "metric": metric.name,
        "MCDistribution": mc_dist.to_name(),
        "MCParam": mc_dist.param_to_string()
    })

    if sample:
        res_array_sample[:, 0] = res_array[:, 0]

        res_dict_sample = two_col_array_to_results(
            arrival_enum=ArrivalEnum.DM1,
            param_array=param_array,
            res_array=res_array_sample,
            number_servers=1,
            valid_iterations=valid_iterations,
            compare_metric=metric)

        res_dict_sample.update({
            "iterations": total_iterations,
            "delta_time": delay,
            "optimization": "grid_search",
            "metric": metric.name,
            "MCDistribution": mc_dist.to_name(),
            "MCParam": mc_dist.param_to_string()
        })

    suffix = f"single_DELAY_PROB_DM1_results" \
        f"_MC{mc_dist.to_name()}_power_exp.csv"

    with open("lower_" + suffix, 'w') as csv_file:
        writer = csv.writer(csv_file)
        for key, value in res_dict.items():
            writer.writerow([key, value])
    if sample:
        with open("sample_" + suffix, 'w') as csv_file:
            writer = csv.writer(csv_file)
            for key, value in res_dict_sample.items():
                writer.writerow([key, value])

    return res_dict
    filename = name
    filename += f"_time_{perform_param.to_name()}_{arrival_enum.name}" \
        f"_MC{mc_dist.to_name()}_{opt_method.name}_util_{target_util}"

    with open(filename + ".csv", 'w') as csv_file:
        writer = csv.writer(csv_file)
        for key, value in time_dict.items():
            writer.writerow([key, value])

    return time_dict


if __name__ == '__main__':
    # DELAY_PROB10 = PerformParameter(perform_metric=PerformEnum.DELAY_PROB,
    #                                 value=10)
    DELAY_6 = PerformParameter(perform_metric=PerformEnum.DELAY,
                               value=10**(-6))

    COMMON_PERFORM_PARAM = DELAY_6
    COMMON_OPTIMIZATION = OptMethod.GRID_SEARCH
    COMMON_METRIC = ChangeEnum.RATIO_REF_NEW
    TARGET_UTIL = 0.75

    # MC_UNIF20 = MonteCarloDist(mc_enum=MCEnum.UNIFORM, param_list=[20.0])
    MC_UNIF10 = MonteCarloDist(mc_enum=MCEnum.UNIFORM, param_list=[10.0])

    PROCESS = ArrivalEnum.MD1

    print(
        csv_msob_fp_time(name="overlapping_tandem",
                         number_flows=3,
                         number_servers=3,
예제 #11
0
                                  number_servers=number_servers,
                                  time_ratio=time_ratio))

    filename = (f"time_{perform_param.to_name()}_{arrival_enum.name}"
                f"_{opt_method.name}.csv")

    with open(filename, 'w') as csv_file:
        writer = csv.writer(csv_file)
        for key, value in time_ratio.items():
            writer.writerow([key, value])

    return time_ratio


if __name__ == '__main__':
    DELAY_PROB = PerformParameter(perform_metric=PerformEnum.DELAY_PROB,
                                  value=4)

    COMMON_OPTIMIZATION = OptMethod.PATTERN_SEARCH

    MC_UNIF20 = MonteCarloDist(mc_enum=MCEnum.UNIFORM, param_list=[20.0])

    list_number_servers1 = [2, 4, 6, 8, 10, 12]

    print(
        csv_fat_cross_time(arrival_enum=ArrivalEnum.DM1,
                           list_number_servers=list_number_servers1,
                           perform_param=DELAY_PROB,
                           opt_method=COMMON_OPTIMIZATION,
                           mc_dist=MC_UNIF20,
                           target_util=0.5))
예제 #12
0
if __name__ == '__main__':
    from nc_arrivals.iid import DM1
    from nc_operations.perform_enum import PerformEnum
    from nc_server.constant_rate_server import ConstantRateServer
    from optimization.optimize import Optimize

    from msob_and_fp.optimize_fp_bound import OptimizeFPBound
    from msob_and_fp.optimize_server_bound import OptimizeServerBound

    # from optimization.function_optimizer import optimizer_perform
    # DELAY_PROB_TIME = PerformParameter(
    #     perform_metric=PerformEnum.DELAY_PROB, value=6)
    # DELAY_PROB_TIME = PerformParameter(perform_metric=PerformEnum.DELAY_PROB,
    #                                    value=8)
    DELAY_PROB_TIME = PerformParameter(perform_metric=PerformEnum.DELAY,
                                       value=1E-3)

    ARR_LIST = [DM1(lamb=2.3), DM1(lamb=4.5), DM1(lamb=1.7), DM1(lamb=4.5)]

    SER_LIST = [
        ConstantRateServer(rate=1.2),
        ConstantRateServer(rate=6.2),
        ConstantRateServer(rate=7.3),
        ConstantRateServer(rate=6.2)
    ]

    if ARR_LIST[0].is_discrete() is False:
        raise ValueError("Distribution must be discrete")

    RANGES = [slice(0.1, 10.0, 0.1)]
    RANGES_2 = [slice(0.1, 10.0, 0.1), slice(1.1, 10.0, 0.1)]
                return inf
        else:
            try:
                return self.setting_bound.bound(param_list=param_list)
            except (ParameterOutOfBounds, OverflowError):
                return inf


if __name__ == '__main__':
    from fat_tree.fat_cross_perform import FatCrossPerform
    from utils.perform_parameter import PerformParameter
    from nc_operations.perform_enum import PerformEnum
    from nc_service.constant_rate_server import ConstantRate
    from nc_arrivals.markov_modulated import MMOOFluid

    DELAY_4 = PerformParameter(perform_metric=PerformEnum.DELAY, value=0.0001)

    MMOO_1 = MMOOFluid(mu=1.0, lamb=2.2, burst=3.4)
    MMOO_2 = MMOOFluid(mu=3.6, lamb=1.6, burst=0.4)
    CONST_RATE_1 = ConstantRate(rate=2.0)
    CONST_RATE_2 = ConstantRate(rate=0.3)

    SIMPLEX_START = np.array([[0.1], [0.3]])
    # SIMPLEX_START = np.array([[100], [200]])
    SIMPLEX_START_NEW = np.array([[0.1, 2.0], [0.3, 1.2], [0.4, 1.1]])
    SIMPLEX_RAND = InitialSimplex(parameters_to_optimize=1).uniform_dist(
        max_theta=0.6, max_l=2.0)

    NM_PARAM_SET = NelderMeadParameters()

    SETTING = FatCrossPerform(
예제 #14
0
#         for key, value in res_dict.items():
#             writer.writerow([key, value])
#
#     return res_dict

if __name__ == '__main__':
    # TOTAL_ITERATIONS = 10**3
    TOTAL_ITERATIONS = 200
    METRIC = "relative"

    DELTA = 0.05

    START = 30

    DELTA_TIME = 10
    DELAY10 = PerformParameter(
        perform_metric=PerformEnum.DELAY_PROB, value=DELTA_TIME)

    # DELTA_TIME = 4
    # OUTPUT4 = PerformParameter(
    #     perform_metric=PerformEnum.OUTPUT, value=DELTA_TIME)

    # LAMB = 1.0
    # SERVICE_RATE = 1.2
    #
    # BOUND_LIST = [(0.05, 10.0)]
    # BOUND_LIST_NEW = [(0.05, 10.0), (1.05, 20.0)]
    # DELTA = 0.05
    # PRINT_X = False
    #
    # CR_SERVER = ConstantRate(SERVICE_RATE)
    #
예제 #15
0
from fat_tree.fat_cross_perform import FatCrossPerform
from nc_arrivals.arrival_distribution import ArrivalDistribution
from nc_arrivals.markov_modulated import MMOOFluid
from nc_arrivals.qt import DM1
from nc_operations.perform_enum import PerformEnum
from nc_service.constant_rate_server import ConstantRate
from optimization.optimize import Optimize
from optimization.optimize_new import OptimizeNew
from single_server.single_server_perform import SingleServerPerform
from utils.perform_parameter import PerformParameter

if __name__ == '__main__':
    # Single server output calculation
    print("Single Server Performance Bounds:\n")

    OUTPUT_TIME6 = PerformParameter(perform_metric=PerformEnum.OUTPUT, value=6)

    SINGLE_SERVER = SingleServerPerform(arr=DM1(lamb=1.0),
                                        const_rate=ConstantRate(rate=10.0),
                                        perform_param=OUTPUT_TIME6)

    print(SINGLE_SERVER.bound(param_list=[0.1]))

    print(SINGLE_SERVER.new_bound(param_l_list=[0.1, 2.7]))

    print(
        Optimize(SINGLE_SERVER, print_x=True,
                 show_warn=True).grid_search(bound_list=[(0.1, 5.0)],
                                             delta=0.1))
    print(
        OptimizeNew(SINGLE_SERVER, print_x=True,
def fat_cross_perform_df(arr_list: List[ArrivalDistribution],
                         ser_list: List[ConstantRateServer],
                         opt_method: OptMethod,
                         perform_param_list: PerformParamList) -> pd.DataFrame:
    """Compute delay standard_bound for T in T_list and write into dataframe.

    Args:
        arr_list: Arrival object list
        ser_list: Service object list
        opt_method: PS or GS
        perform_param_list: list of performance parameter values

    Returns:
        dataframe

    """
    delta_val = 0.05

    one_param_bounds = [(delta_val, 10.0)]

    standard_bound = [0.0] * len(perform_param_list)
    h_mit_bound = [0.0] * len(perform_param_list)

    print_x = False

    fat_cross_setting = FatCrossPerform(
        arr_list=arr_list,
        ser_list=ser_list,
        perform_param=PerformParameter(perform_metric=PerformEnum.DELAY_PROB,
                                       value=0))

    print(f"utilization: {fat_cross_setting.approximate_utilization()}")
    print()

    for i in range(len(perform_param_list)):
        fat_cross_setting = FatCrossPerform(
            arr_list=arr_list,
            ser_list=ser_list,
            perform_param=perform_param_list.get_parameter_at_i(i))

        if opt_method == OptMethod.GRID_SEARCH:
            standard_bound[i] = Optimize(setting=fat_cross_setting,
                                         number_param=1,
                                         print_x=print_x).grid_search(
                                             grid_bounds=one_param_bounds,
                                             delta=delta_val)
            h_mit_bound[i] = OptimizeMitigator(
                setting_h_mit=fat_cross_setting,
                number_param=2,
                print_x=print_x).grid_search(grid_bounds=[(delta_val, 10.0),
                                                          (1 + delta_val, 8.0)
                                                          ],
                                             delta=delta_val)

        elif opt_method == OptMethod.PATTERN_SEARCH:
            standard_bound[i] = Optimize(setting=fat_cross_setting,
                                         number_param=1,
                                         print_x=print_x).pattern_search(
                                             start_list=[0.5],
                                             delta=3.0,
                                             delta_min=0.01)

            h_mit_bound[i] = OptimizeMitigator(setting_h_mit=fat_cross_setting,
                                               number_param=2,
                                               print_x=print_x).pattern_search(
                                                   start_list=[0.5, 2.0],
                                                   delta=3.0,
                                                   delta_min=0.01)

        else:
            raise NotImplementedError(f"Optimization parameter {opt_method} "
                                      f"is infeasible")

    results_df = pd.DataFrame(
        {
            "standard_bound": standard_bound,
            "h_mit_bound": h_mit_bound
        },
        index=perform_param_list.values_list)

    return results_df
예제 #17
0
"""Computed some examples from the project"""

from fat_tree.fat_cross_perform import FatCrossPerform
from nc_arrivals.qt import DM1
from nc_operations.perform_enum import PerformEnum
from nc_service.constant_rate_server import ConstantRate
from optimization.optimize import Optimize
from utils.perform_parameter import PerformParameter

if __name__ == '__main__':
    PROB_VALUES = [0.5, 0.4, 0.3, 0.2, 0.1, 0.05, 0.01]

    for p in PROB_VALUES:
        DELAY_TIME = PerformParameter(perform_metric=PerformEnum.DELAY,
                                      value=p)

        EXAMPLE = FatCrossPerform(arr_list=[DM1(lamb=1.0)],
                                  ser_list=[ConstantRate(rate=2.0)],
                                  perform_param=DELAY_TIME)

        print(
            Optimize(setting=EXAMPLE, print_x=False,
                     show_warn=True).grid_search(bound_list=[(0.01, 1.1)],
                                                 delta=0.01))
예제 #18
0
 def get_parameter_at_i(self, i: int) -> PerformParameter:
     return PerformParameter(perform_metric=self.perform_metric,
                             value=self.values_list[i])
예제 #19
0
    filename = name
    filename += f"_results_{perform_param.to_name()}_{arrival_enum.name}_" \
                f"MC{mc_dist.to_name()}_{opt_method.name}_" \
                f"{compare_metric.name}_util_{target_util}"

    with open(filename + ".csv", 'w') as csv_file:
        writer = csv.writer(csv_file)
        for key, value in res_dict.items():
            writer.writerow([key, value])

    return res_dict


if __name__ == '__main__':
    COMMON_PERFORM_PARAM = PerformParameter(
        perform_metric=PerformEnum.DELAY_PROB, value=10)

    # COMMON_PERFORM_PARAM = PerformParameter(perform_metric=PerformEnum.DELAY,
    #                                         value=1e-6)

    COMMON_OPTIMIZATION = OptMethod.GRID_SEARCH
    COMMON_METRIC = ChangeEnum.RATIO_REF_NEW
    TARGET_UTIL = 0.7

    # MC_UNIF20 = MonteCarloDist(mc_enum=MCEnum.UNIFORM, param_list=[20.0])
    MC_UNIF10 = MonteCarloDist(mc_enum=MCEnum.UNIFORM, param_list=[10.0])
    MC_EXP1 = MonteCarloDist(mc_enum=MCEnum.EXPONENTIAL, param_list=[1.0])

    # ARRIVAL_PROCESSES = [
    #     ArrivalEnum.DM1, ArrivalEnum.MMOOFluid, ArrivalEnum.MD1
    # ]
예제 #20
0
                   0], time_array[i,
                                  1] = compute_overhead(setting=setting,
                                                        opt_method=opt_method,
                                                        number_l=1)

        if i % floor(total_iterations / 10) == 0:
            print(f"iteration {i} of {total_iterations}")

    return time_array_to_results(arrival_enum=arrival_enum,
                                 time_array=time_array,
                                 number_servers=1,
                                 time_ratio=time_ratio)


if __name__ == '__main__':
    OUTPUT_TIME = PerformParameter(perform_metric=PerformEnum.OUTPUT, value=4)

    COMMON_OPTIMIZATION = OptMethod.GRID_SEARCH

    MC_UNIF20 = MonteCarloDist(mc_enum=MCEnum.UNIFORM, param_list=[20.0])

    print(
        mc_time_single(arrival_enum=ArrivalEnum.DM1,
                       perform_param=OUTPUT_TIME,
                       opt_method=COMMON_OPTIMIZATION,
                       mc_dist=MC_UNIF20))

    print(
        mc_time_single(arrival_enum=ArrivalEnum.MMOO,
                       perform_param=OUTPUT_TIME,
                       opt_method=COMMON_OPTIMIZATION,
예제 #21
0
        grid_res = scipy.optimize.brute(func=helper_fun,
                                        ranges=(slice(0.05, 20.0, 0.05), ),
                                        full_output=True)

    except (FloatingPointError, OverflowError):
        return inf

    if print_x:
        print("grid search optimal x: ", grid_res[0].tolist())

    return grid_res[1]


if __name__ == '__main__':
    DELAY_VAL = 5
    DELAY5 = PerformParameter(perform_metric=PerformEnum.DELAY_PROB,
                              value=DELAY_VAL)

    DELAY_PROB_VAL = 10**(-5)
    DELAY_PROB6 = PerformParameter(perform_metric=PerformEnum.DELAY,
                                   value=DELAY_PROB_VAL)

    NUMBER_AGGREGATIONS = 4

    RHO_SINGLE = 1.0
    SIGMA_SINGLE = 8.0
    SERVICE_RATE = 6.5

    BOUND_LIST = [(0.05, 20.0)]
    DELTA = 0.05
    PRINT_X = True
예제 #22
0
from h_mitigator.fat_cross_perform import FatCrossPerform
from nc_arrivals.arrival_distribution import ArrivalDistribution
from nc_arrivals.markov_modulated import MMOOCont
from nc_arrivals.iid import DM1
from nc_operations.single_server_perform import SingleServerPerform
from nc_operations.perform_enum import PerformEnum
from nc_server.constant_rate_server import ConstantRateServer
from optimization.optimize import Optimize
from optimization.sim_anneal_param import SimAnnealParams
from utils.perform_parameter import PerformParameter

if __name__ == '__main__':
    print("Single Server Performance Bounds:\n")

    DELAY_PROB8 = PerformParameter(perform_metric=PerformEnum.DELAY_PROB,
                                   value=8)

    SINGLE_SERVER = SingleServerPerform(foi=DM1(lamb=1.0),
                                        server=ConstantRateServer(rate=10.0),
                                        perform_param=DELAY_PROB8)

    print(
        Optimize(SINGLE_SERVER, number_param=1,
                 print_x=True).grid_search(grid_bounds=[(0.1, 5.0)],
                                           delta=0.1))

    SINGLE_SERVER2 = SingleServerPerform(foi=MMOOCont(mu=0.7,
                                                      lamb=0.4,
                                                      peak_rate=1.2),
                                         server=ConstantRateServer(rate=1.0),
                                         perform_param=DELAY_PROB8)
                                theta=param_l_list[0],
                                delta_time=self.perform_param.value,
                                l_power=param_l_list[1])

        else:
            raise NameError(f"{self.perform_param.perform_metric} is an"
                            f"infeasible performance metric")

    def to_string(self) -> str:
        return self.to_name() + "_" + self.arr.to_value(
        ) + "_" + self.ser.to_value() + self.perform_param.to_name_value()


if __name__ == '__main__':
    EXP_ARRIVAL1 = DM1(lamb=1.0)
    CONST_RATE16 = ConstantRate(rate=1.6)
    OUTPUT_4 = PerformParameter(perform_metric=PerformEnum.OUTPUT, value=4)
    EX_OUTPUT = SingleServerPerform(arr=EXP_ARRIVAL1,
                                    const_rate=CONST_RATE16,
                                    perform_param=OUTPUT_4)
    print(EX_OUTPUT.bound(param_list=[0.5]))
    print(EX_OUTPUT.new_bound(param_l_list=[0.5, 1.2]))

    DELAY_PROB_4 = PerformParameter(perform_metric=PerformEnum.DELAY_PROB,
                                    value=4)
    EX_DELAY_PROB = SingleServerPerform(arr=EXP_ARRIVAL1,
                                        const_rate=CONST_RATE16,
                                        perform_param=DELAY_PROB_4)
    print(EX_DELAY_PROB.bound(param_list=[0.5]))
    print(EX_DELAY_PROB.new_bound(param_l_list=[0.5, 1.2]))
            server_index=server_index)

    results_df = pd.DataFrame(
        {
            "standard_bound": standard_bound,
            "server_bound": server_bound,
            "fp_bound": fp_bound
        },
        index=utilizations)
    results_df = results_df[["standard_bound", "server_bound", "fp_bound"]]

    return results_df


if __name__ == '__main__':
    DELAY3 = PerformParameter(perform_metric=PerformEnum.DELAY, value=10**(-3))

    # print(
    #     arrival_list_to_csv(
    #         prefix="overlapping_tandem_",
    #         data_frame_creator=overlapping_tandem_adjust_arr_df,
    #         list_arr_list=[[DM1(lamb=2.3),
    #                         DM1(lamb=4.5),
    #                         DM1(lamb=0.8)],
    #                        [DM1(lamb=2.3),
    #                         DM1(lamb=4.5),
    #                         DM1(lamb=0.7)],
    #                        [DM1(lamb=2.3),
    #                         DM1(lamb=4.5),
    #                         DM1(lamb=0.6)],
    #                        [DM1(lamb=2.3),