Exemple #1
0
def task_3_2_2():
    """
    Here, we execute task 3.2.2 and print the results to the console.
    The first result string keeps the results for 100s, the second one for 1000s simulation time.
    """
    # TODO Task 3.2.2: Your code goes here
    rho = [.01, .5, .8, .9]
    system_utilization_result = []
    sim_param = SimParam()
    random.seed(sim_param.SEED)
    sim = Simulation(sim_param)
    sim.sim_param.SIM_TIME = 100000
    sim.sim_param.S = 5
    for r in rho:
        sim.sim_param.RHO = r
        sim.reset()
        system_utilization_result.append(sim.do_simulation().system_utilization)
    print "The system utilization results for a simulation time of 100s :"
    print system_utilization_result
    rho = [.01, .5, .8, .9]
    system_utilization_result = []
    sim_param = SimParam()
    random.seed(sim_param.SEED)
    sim = Simulation(sim_param)
    sim.sim_param.SIM_TIME = 1000000
    sim.sim_param.S = 5
    for r in rho:
        sim.sim_param.RHO = r
        sim.reset()
        system_utilization_result.append(sim.do_simulation().system_utilization)
    print "The system utilization results for a simulation time of 1000s :"
    print system_utilization_result
def task_1_7_3(queue_size):
    """
    Execute bonus task 1.7.3.
    """
    # TODO Bonus Task 1.7.3: Your code goes here (if necessary)
    sim_param = SimParam()
    random.seed(sim_param.SEED)
    sim = Simulation(sim_param)
    sim.sim_param.S=queue_size
    result_set=[]
    for i in range(sim.sim_param.NO_OF_RUNS):
        sim.reset()
        sim_result=sim.do_simulation()
        result_set.append(sim_result.blocking_probability)
    result_length=len(result_set)
    n, bins, patches = pylab.hist(result_set)
    nc=np.cumsum(n/result_length)
    cdf=[0.0]
    for i in nc:
        cdf.append(i)

    sim_param = SimParam()
    random.seed(sim_param.SEED)
    sim_param.SIM_TIME = 1000000
    sim_param.MAX_DROPPED = 100
    sim_param.NO_OF_RUNS = 100
    sim = Simulation(sim_param)
    sim.sim_param.S=queue_size
    result_set=[]
    for i in range(sim.sim_param.NO_OF_RUNS):
        sim.reset()
        sim_result=sim.do_simulation()
        result_set.append(sim_result.blocking_probability)
    result_length=len(result_set)
    n1, bins1, patches1 = pylab.hist(result_set)
    nc1=np.cumsum(n/result_length)
    cdf1=[0.0]
    for i in nc:
        cdf1.append(i)

    pylab.figure(1)
    pylab.xlabel('blocking_probability')
    pylab.ylabel('density')
    pylab.title('Histogram of the probability density function')
    pylab.hist(n, bins)
    pylab.hist(n1, bins1)
    pylab.figure(2)
    pylab.xlim(0.0,1.0)
    pylab.ylim(0.0,1.0)
    pylab.xlabel('blocking_probability')
    pylab.ylabel('CDF')
    pylab.title('CDF function')
    line1, = pylab.plot(bins,cdf, marker='o', label='100000 ms, 10 MAX_DROPPED, 1000 runs')
    line2, = pylab.plot(bins1,cdf1, marker='o', label='1000000 ms, 100 MAX_DROPPED, 100 runs')
    pylab.legend(handler_map={line1: HandlerLine2D(numpoints=4)})
    pylab.show()
def task_4_3_1():
    """
    Run the correlation tests for given rho for all correlation counters in counter collection.
    After each simulation, print report results.
    SIM_TIME is set higher in order to avoid a large influence of startup effects
    """
    # TODO Task 4.3.1: Your code goes here
    rho = [0.01, 0.5, 0.8, 0.95]
    sim_param = SimParam()
    random.seed(sim_param.SEED)
    sim = Simulation(sim_param)
    sim.sim_param.SIM_TIME = 1000000
    sim.sim_param.S = 10000
    for r in rho:
        print "---------- rho = ,", r, " ---------"
        sim.sim_param.RHO = r
        sim.reset()
        with warnings.catch_warnings():
            warnings.simplefilter("ignore", category=RuntimeWarning)
            sim.do_simulation()
        print "Correlation    between    IAT    and    waiting     time   of  a packet           : ", sim.counter_collection.cnt_iat_wt.get_cor(
        )
        print "Correlation    between    IAT    and    serving     time   of  a packet           : ", sim.counter_collection.cnt_iat_st.get_cor(
        )
        print "Correlation between IAT and system time (waiting time + serving time) of a packet : ", sim.counter_collection.cnt_iat_syst.get_cor(
        )
        print "Correlation    between    service time and system   time   of a packet            : ", sim.counter_collection.cnt_st_syst.get_cor(
        )
        print "Auto-correlation  of  waiting  time  with  lags  ranging  from  1  to  20         : ", sim.counter_collection.acnt_wt.get_auto_cor(
            2)
Exemple #4
0
    def __init__(self, sim_param=SimParam(), no_seed=False):
        """
        Initialize the Simulation object.
        :param sim_param: is an optional SimParam object for parameter pre-configuration
        :param no_seed: is an optional parameter. If it is set to True, the RNG should be initialized without a
        a specific seed.
        """
        self.sim_param = sim_param
        self.sim_state = SimState()
        self.system_state = SystemState(self)
        self.event_chain = EventChain()
        self.sim_result = SimResult(self)
        # TODO Task 2.4.3: Uncomment the line below
        self.counter_collection = CounterCollection(self)
        # TODO Task 3.1.2: Uncomment the line below and replace the "None"

        if no_seed:
            #if the mean = 1.0, then 1/lambda_ = 1.0 -> lambda_ = 1
            self.rng = RNG(ExponentialRNS(1.0),
                           ExponentialRNS(1. / float(self.sim_param.RHO)))
        else:
            self.rng = RNG(
                ExponentialRNS(1.0, self.sim_param.SEED_IAT),
                ExponentialRNS(1. / float(self.sim_param.RHO),
                               self.sim_param.SEED_ST))
def task_4_3_1():
    """
    Run the correlation tests for given rho for all correlation counters in counter collection.
    After each simulation, print report results.
    SIM_TIME is set higher in order to avoid a large influence of startup effects
    """
    # TODO Task 4.3.1: Your code goes here
    sim_param = SimParam()
    sim = Simulation(sim_param)
    sim.sim_param.S = 10000
    sim.sim_param.SIM_TIME = 10000000
    for rho in [0.01, 0.5, 0.8, 0.95]:
        sim.sim_param.RHO = rho
        sim.reset()
        sim.counter_collection.reset()
        sim = sim.do_simulation().sim
        print("RHO = " + str(rho))
        print("Correlation between IAT and waiting time of a packet = " +
              str(sim.counter_collection.cnt_iat_wt.get_cor()))
        print("Correlation between IAT and serving time of a packet = " +
              str(sim.counter_collection.cnt_iat_st.get_cor()))
        print(
            "Correlation between IAT and system time (waiting time + serving time) of a packet = "
            + str(sim.counter_collection.cnt_iat_syst.get_cor()))
        print(
            "Correlation between serving time and system time of a packet = " +
            str(sim.counter_collection.cnt_st_syst.get_cor()))
        for lag in range(0, 21):
            print(
                "Lag = " + str(lag) +
                " Auto-correlation of waiting time with lags ranging from 1 to 20 = "
                + str(sim.counter_collection.acnt_wt.get_auto_cor(lag)))

        print(" ")
Exemple #6
0
    def __init__(self, user_id, dist=10, slice_list=[], sim_param=SimParam()):
        """

        """
        self.user_id = user_id
        self.slice_list = slice_list
        self.sim_param = sim_param
        self.distance = dist  # np.random.uniform(self.sim_param.dist_range)
        if self.sim_param.cts_service:
            self.channel = ChannelModalCts(self)
        else:
            self.channel = ChannelModal(self)

        self.traffic_generator = TrafficGenerator(self)
        self.traffic_list = []
        self.traffic_list_dict = {}
        for i in self.slice_list:
            if 0:  #i.slice_param.SLICE_ID==0:
                self.traffic_list.append(
                    self.traffic_generator.periodic_arrivals(i))  # for RR
            else:
                self.traffic_list.append(
                    self.traffic_generator.poisson_arrivals(i))
            self.traffic_list_dict.update(
                {i.slice_param.SLICE_ID: self.traffic_list[-1]})
    def __init__(self, t_final: int = 1000):
        """
        Main ran_simulation
        """
        sim_param = SimParam(t_final)

        self.initialize_spaces (sim_param)

        # other attributes of ran_environment
        self.state = None
        self.sim_param = sim_param
        #self.C_algo = 'RL'
        self.slice_scores = None  # slice scores for reward method 3
        self.user_scores = None

        # generate seed values
        new_seed = seeding.create_seed()
        self.sim_param.update_seeds(new_seed)

        # initialize SD_RAN_Controller
        self.SD_RAN_Controller = Controller(self.sim_param)

        # data
        self.user_score_arr = None
        self.slice_score_arr = None
        self.reward_hist = None
        self.cost_tp_hist = None
        self.cost_bp_hist = None
        self.cost_delay_hist = None
        self.reset_counter = 0

        columns = 'reward_hist slice_score_0 slice_score_1 slice_score_2'
        self.env_df = pd.DataFrame(columns=columns.split())
 def __init__(self, setting):
     # Load simulation parameters
     self.sim_param = SimParam(setting)
     # Load simtime
     if setting.dynamictest:
         self.SIMTIME = setting.dynamictest.simtime
     self.freeaccess = False
     if setting.secondwindow.test_values[3]:
         self.freeaccess = True
     # Load the simulation state parameters
     self.sim_state = SimState()
     # Load the result parameters
     self.sim_result = SimResult()
     # Load the class which perfomrs all the methods governing a simple slot
     self.slot = TreeSlot()
     # Load the branch node which keeps track of a tree
     self.branch_node = BranchNode()
     # Create an array of integers of which will contain all active nodes.
     self.active_array = []
     # For gated access, the arrived packets are put into a queue
     self.queue_array = []
     # The number of packets generated in a single slot
     self.packets_gen = 0
     # THe result of a slot
     self.result = 0
     # The current slot no
     self.slot_no = 0
     # Load the parameters for single tree resolution
     self.tree_state = TreeState(self)
Exemple #9
0
def task_5_2_4(rho, alpha, sim_time, num):
    """
    Plot confidence interval as described in the task description for task 5.2.4.
    We use the function plot_confidence() for the actual plotting and run our simulation several times to get the
    samples. Due to the different configurations, we receive eight plots in two figures.
    """
    # TODO Task 5.2.4: Your code goes here

    #rho = 0.5 / alpha = 0.1 / Sim time = 100s
    TIC_SU = TimeIndependentCounter("System Utilization")
    TIC_CI = []
    sim_param = SimParam()
    random.seed(sim_param.SEED)
    sim = Simulation(sim_param)
    sim.sim_param.SIM_TIME = sim_time
    sim.sim_param.S = 100000
    sim.sim_param.RHO = rho
    random.seed(sim.sim_param.SEED_IAT)
    random.seed(sim.sim_param.SEED_ST)
    for i in range(100):
        for j in range(30):
            with warnings.catch_warnings():
                warnings.simplefilter("ignore", category=RuntimeWarning)
                TIC_SU.count(sim.do_simulation().system_utilization)
                sim.reset()
        with warnings.catch_warnings():
            warnings.simplefilter("ignore", category=RuntimeWarning)
            TIC_CI.append(
                (TIC_SU.get_mean() - TIC_SU.report_confidence_interval(alpha),
                 TIC_SU.get_mean() + TIC_SU.report_confidence_interval(alpha)))
        TIC_SU.reset()
    plot_confidence(sim, 100, TIC_CI, rho, "alpha=" + str(alpha), num, alpha)
Exemple #10
0
def task_2_7_1():
    """
    Here, we execute tasks 2.7.1 and 2.7.2 in the same function. This makes sense, since we use only one figure with
    four subfigures to display the plots, which makes comparability easier.
    """
    sim_param = SimParam()
    random.seed(sim_param.SEED)
    sim = Simulation(sim_param)
    return do_simulation_study(sim)
def task_5_2_2():
    """
    Run simulation in batches. Start the simulation with running until a customer count of n=100 or (n=1000) and
    continue to increase the number of customers by dn=n.
    Count the blocking proabability for the batch and calculate the confidence interval width of all values, that have
    been counted until now.
    Do this until the desired confidence level is reached and print out the simulation time as well as the number of
    batches.
    """
    results = [None, None, None, None]
    # TODO Task 5.2.2: Your code goes here
    bp = []
    hw = []
    sim_param = SimParam()
    sim = Simulation(sim_param)
    sim.sim_param.S = 4
    sim.sim_param.RHO = .9
    err = .0015
    half_width = 1.0
    count_bp = TimeIndependentCounter()
    i = 0
    for batch in [100, 1000]:
        for alpha in [.1, .05]:
            first_batch = False
            count_bp.reset()
            sim.reset()
            while 1:
                blocking_pro = sim.do_simulation_n_limit(
                    batch, first_batch).blocking_probability
                first_batch = True  #after first batch
                count_bp.count(blocking_pro)
                half_width = count_bp.report_confidence_interval(alpha)
                sim.sim_state.stop = False  #set the parameter back to original value
                sim.counter_collection.reset()
                sim.sim_state.num_blocked_packets = 0
                sim.sim_state.num_packets = 0
                if half_width < err:
                    break
            results[i] = sim.sim_state.now
            bp.append(count_bp.get_mean())
            hw.append(half_width)
            i += 1

    # print and return results
    print("BATCH SIZE:  100; ALPHA: 10%; TOTAL SIMULATION TIME (SECONDS): " +
          str(results[0] / 1000) + "; Blocking Probability Mean: " +
          str(bp[0]) + "; Half width: " + str(hw[0]))
    print("BATCH SIZE:  100; ALPHA:  5%; TOTAL SIMULATION TIME (SECONDS): " +
          str(results[1] / 1000) + "; Blocking Probability Mean: " +
          str(bp[1]) + "; Half width: " + str(hw[1]))
    print("BATCH SIZE: 1000; ALPHA: 10%; TOTAL SIMULATION TIME (SECONDS): " +
          str(results[2] / 1000) + "; Blocking Probability Mean: " +
          str(bp[2]) + "; Half width: " + str(hw[2]))
    print("BATCH SIZE: 1000; ALPHA:  5%; TOTAL SIMULATION TIME (SECONDS): " +
          str(results[3] / 1000) + "; Blocking Probability Mean: " +
          str(bp[3]) + "; Half width: " + str(hw[3]))
    return results
def task_2_7_1():
    """
    Here, you should execute task 2.7.1 (and 2.7.2, if you want).
    """
    # TODO Task 2.7.1: Your code goes here
    sim_param = SimParam()
    random.seed(sim_param.SEED)
    sim = Simulation(sim_param)
    do_simulation_study(sim)
Exemple #13
0
def task_1_7_1():
    """
    Execute task 1.7.1 and perform a simulation study according to the task assignment.
    :return: Minimum number of buffer spaces to meet requirements.
    """
    sim_param = SimParam()
    random.seed(sim_param.SEED)
    sim = Simulation(sim_param)
    return do_simulation_study(sim)
Exemple #14
0
def task_2_7_2():
    """
    Here, you can execute task 2.7.2 if you want to execute it in a separate function
    """
    # TODO Task 2.7.2: Your code goes here or in the function above
    sim_param = SimParam()
    random.seed(sim_param.SEED)
    sim_param.SIM_TIME = 1000000
    sim = Simulation(sim_param)
    return do_simulation_study(sim)
Exemple #15
0
def task_1_7_2():
    """
    Execute task 1.7.2 and perform a simulation study according to the task assignment.
    :return: Minimum number of buffer spaces to meet requirements.
    """
    sim_param = SimParam()
    random.seed(sim_param.SEED)
    sim_param.SIM_TIME = 1000000
    sim_param.MAX_DROPPED = 100
    sim_param.NO_OF_RUNS = 100
    sim = Simulation(sim_param)
    return do_simulation_study(sim)
Exemple #16
0
def study_waiting_time():
    sim_param = SimParam()
    sim = Simulation(sim_param)
    sim.sim_param.S = 5
    sim.sim_param.SIM_TIME = 100000  # 100 seconds

    num_run = 100
    dataset = []
    for run in range(num_run):
        sim.reset()
        random.seed()
        sim.do_simulation()

        # Take the values (waiting time) of the first 150 packets
        dataset.append(sim.counter_collection.cnt_wt.values[0:150])

    # Manipulate the data set
    pkt_mean = []
    for pkt in range(150):
        s = 0
        for run in range(num_run):
            s += dataset[run][pkt]
        pkt_mean.append(s/float(num_run))

    plt.subplot(121)
    plt.plot(pkt_mean)
    plt.title("100s Run")
    plt.xlabel("Packet Number")
    plt.ylabel("Waiting Time (ms)")

    sim.sim_param.SIM_TIME = 1000000
    dataset = []
    for run in range(num_run):
        sim.reset()
        random.seed()
        sim.do_simulation()

        # Take the values (waiting time) of the first 1900 packets
        dataset.append(sim.counter_collection.cnt_wt.values[0:1800])

    # Manipulate the dataset
    pkt_mean = []
    for pkt in range(1800):
        s = 0
        for run in range(num_run):
            s += dataset[run][pkt]
        pkt_mean.append(s / float(num_run))

    plt.subplot(122)
    plt.plot(pkt_mean)
    plt.title("1000s Run")
    plt.xlabel("Packet Number")
    plt.ylabel("Waiting Time (ms)")
def task_5_2_1():
    """
    Run task 5.2.1. Make multiple runs until the blocking probability distribution reaches
    a confidence level alpha. Simulation is performed for 100s and 1000s and for alpha = 90% and 95%.
    """
    results = [None, None, None, None]
    # TODO Task 5.2.1: Your code goes here
    bp = []
    hw = []
    sim_param = SimParam()
    sim = Simulation(sim_param)
    sim.sim_param.S = 4
    sim.sim_param.RHO = .9
    count_bp = TimeIndependentCounter()
    err = .0015
    i = 0
    for sim_time in [100000, 1000000]:
        sim.sim_param.SIM_TIME = sim_time
        for alpha in [.1, .05]:
            count_bp.reset()
            while 1:
                sim.reset()
                blocking_pro = sim.do_simulation().blocking_probability
                count_bp.count(blocking_pro)
                half_width = count_bp.report_confidence_interval(alpha=alpha)
                if half_width < err:
                    break

            results[i] = len(count_bp.values)
            bp.append(count_bp.get_mean())
            hw.append(half_width)
            i += 1


# print and return results
    print("SIM TIME:  100s; ALPHA: 10%; NUMBER OF RUNS: " + str(results[0]) +
          "; TOTAL SIMULATION TIME (SECONDS): " + str(results[0] * 100) +
          "; Blocking Probability Mean: " + str(bp[0]) + "; Half width: " +
          str(hw[0]))
    print("SIM TIME:  100s; ALPHA:  5%; NUMBER OF RUNS: " + str(results[1]) +
          "; TOTAL SIMULATION TIME (SECONDS): " + str(results[1] * 100) +
          "; Blocking Probability Mean: " + str(bp[1]) + "; Half width: " +
          str(hw[1]))
    print("SIM TIME: 1000s; ALPHA: 10%; NUMBER OF RUNS:  " + str(results[2]) +
          "; TOTAL SIMULATION TIME (SECONDS): " + str(results[2] * 1000) +
          "; Blocking Probability Mean: " + str(bp[2]) + "; Half width: " +
          str(hw[2]))
    print("SIM TIME: 1000s; ALPHA:  5%; NUMBER OF RUNS:  " + str(results[3]) +
          "; TOTAL SIMULATION TIME (SECONDS): " + str(results[3] * 1000) +
          "; Blocking Probability Mean: " + str(bp[3]) + "; Half width: " +
          str(hw[3]))
    return results
def do_theoretical_iter(sim, setting):
    """
    :param n_stop: till the end of number of users we want to sweep till
    can be used to compare different formulas in different formulas in different papers.
    plots the throughput vs number of users
    :return:
    """

    param = SimParam(setting)
    users = range(param.K + 1, setting.theorsweep.n_stop + 1)
    theoretical = []
    theoretical1 = []
    theoretical2 = []
    theoretical3 = []
    theoretical4 = []
    theoretical5 = []
    for n in users:
        if setting.theorsweep.test_values[0]:
            theoretical.append(TheoreticalPlots().qarysic(n, param))
        if setting.theorsweep.test_values[1]:
            theoretical1.append(TheoreticalPlots().sicta(n, param))
        if setting.theorsweep.test_values[2]:
            theoretical2.append(TheoreticalPlots().simpletree(n))
        if setting.theorsweep.test_values[3]:
            theoretical3.append(TheoreticalPlots().recsicta(n))
        if setting.theorsweep.test_values[4]:
            theoretical4.append(TheoreticalPlots().recquary(n, param))
        if setting.theorsweep.test_values[5]:
            theoretical5.append(TheoreticalPlots().qsicta(n, param))
    if setting.theorsweep.test_values[0]:
        pyplot.plot(users, theoretical, 'b-', label='Quary Sic')
    if setting.theorsweep.test_values[1]:
        pyplot.plot(users, theoretical1, 'g-', label='SICTA')
    if setting.theorsweep.test_values[2]:
        pyplot.plot(users, theoretical2, 'r-', label='Simple Tree')
    if setting.theorsweep.test_values[3]:
        pyplot.plot(users, theoretical3, 'c-', label='Recursive SICTA')
    if setting.theorsweep.test_values[4]:
        pyplot.plot(users, theoretical4, 'm-', label='Recursive Quary')
    if setting.theorsweep.test_values[5]:
        pyplot.plot(users, theoretical5, 'y-', label='QSICTA Giannakkis')

    pyplot.xlabel('Users')
    pyplot.ylabel('Throughput')
    pyplot.legend()
    pyplot.xscale('log')
    figname = F"K{sim.sim_param.K}Q{sim.sim_param.SPLIT}TheoreticalCalc"
    pyplot.savefig(figname + '.png', dpi=300)
    tikzplotlib.save(figname + '.tex')
    pyplot.show()
Exemple #19
0
def study_waiting_time_dist():

    sim_param = SimParam()
    random.seed(0)
    sim = Simulation(sim_param)

    sim.sim_param.S = 5
    sim.sim_param.SIM_TIME = 100000  # 100 seconds

    sim.reset()
    sim.do_simulation()
    # sim.counter_collection.hist_wt.report()
    print(sim.counter_collection.cnt_wt.get_mean())
    plt.plot(sim.counter_collection.cnt_wt.values)
    plt.show()
Exemple #20
0
 def __init__(self, sim_param=SimParam(), no_seed=False):
     """
     Initialize the Simulation object.
     :param sim_param: is an optional SimParam object for parameter pre-configuration
     :param no_seed: is an optional parameter. If it is set to True, the RNG should be initialized without a
     a specific seed.
     """
     self.sim_param = sim_param
     self.sim_state = SimState()
     self.system_state = SystemState(self)
     self.event_chain = EventChain()
     self.sim_result = SimResult(self)
     # TODO Task 2.4.3: Uncomment the line below
     self.counter_collection = CounterCollection(self)
     # TODO Task 3.1.2: Uncomment the line below and replace the "None"
     """
def task_5_2_4():
    """
    Plot confidence interval as described in the task description for task 5.2.4.
    We use the function plot_confidence() for the actual plotting and run our simulation several times to get the
    samples. Due to the different configurations, we receive eight plots in two figures.
    """
    # TODO Task 5.2.4: Your code goes here

    sim_param = SimParam()
    sim = Simulation(sim_param)
    sim.sim_param.S = 40000000  #infinite M/M/1/inf
    err = .0015
    plt_no = 1
    for rho in [0.5, 0.9]:
        sim.sim_param.RHO = rho
        for alpha in [0.1, 0.05]:
            for sim_time in [100000, 1000000]:
                sim.sim_param.SIM_TIME = sim_time
                print(" Sim time " + str(sim.sim_param.SIM_TIME / 1000) +
                      "s " + " Alpha " + str(alpha) + " RHO " + str(rho))
                count_util = TimeIndependentCounter()
                mean_count = TimeIndependentCounter()
                y_low = []
                y_high = []
                x = []
                for repeat in range(100):
                    count_util.reset()
                    for sim_run in range(30):
                        sim.reset()
                        count_util.count(
                            sim.do_simulation().system_utilization)

                    mean = count_util.get_mean()
                    half_width = count_util.report_confidence_interval(
                        alpha=alpha)
                    mean_count.count(mean)
                    y_low.append(mean - half_width)
                    y_high.append(mean + half_width)
                    x.append(repeat + 1)

                pyplot.subplot(2, 2, plt_no)
                plt_no += 1
                plot_confidence(sim, x, y_low, y_high, mean_count.get_mean(),
                                sim.sim_param.RHO, "Utilization", alpha)

        pyplot.show()
        plt_no = 1
 def reset(self, setting):
     self.sim_param = SimParam(setting)
     if setting.dynamictest:
         self.SIMTIME = setting.dynamictest.simtime
     self.freeaccess = False
     if setting.secondwindow.test_values[3]:
         self.freeaccess = True
     self.sim_state = SimState()
     self.sim_result = SimResult()
     self.slot = TreeSlot()
     self.active_array = []
     self.queue_array = []
     self.packets_gen = 0
     self.result = 0
     self.slot_no = 0
     self.tree_state = TreeState(self)
     self.branch_node.reset()
def task_4_3_2():
    """
    Exercise to plot auto correlation depending on lags. Run simulation until 10000 (or 100) packets are served.
    For the different rho values, simulation is run and the blocking probability is auto correlated.
    Results are plotted for each N value in a different diagram.
    Note, that for some seeds with rho=0.DES and N=100, the variance of the auto covariance is 0 and returns an error.
    """
    # TODO Task 4.3.2: Your code goes here
    data_n_100 = []
    data_n_10000 = []
    lags = range(21)
    del lags[0]
    sim_param = SimParam()
    random.seed(sim_param.SEED)
    sim = Simulation(sim_param)
    sim.sim_param.SIM_TIME = 1000000
    sim.sim_param.S = 10000
    with warnings.catch_warnings():
        warnings.simplefilter("ignore", category=RuntimeWarning)
        sim.do_simulation_n_limit(100)
    for i in lags:
        data_n_100.append(sim.counter_collection.acnt_wt.get_auto_cor(i))
    sim.reset()
    with warnings.catch_warnings():
        warnings.simplefilter("ignore", category=RuntimeWarning)
        sim.do_simulation_n_limit(10000)
    for i in lags:
        data_n_10000.append(sim.counter_collection.acnt_wt.get_auto_cor(i))
    pylab.figure(1)
    pylab.xlabel('lag')
    pylab.ylabel('Auto Correlation')
    pylab.title(
        'Auto Correlation of Waiting Times (n=100) for lags ranging from 1 to 20'
    )
    pylab.plot(lags, data_n_100)
    pylab.figure(2)
    pylab.xlabel('lag')
    pylab.ylabel('Auto Correlation')
    pylab.title(
        'Auto Correlation of Waiting Times (n=10000) for lags ranging from 1 to 20'
    )
    pylab.plot(lags, data_n_10000)
    pylab.show()
 def __init__(self, sim_param=SimParam(), no_seed=False):
     """
     Initialize the Simulation object.
     :param sim_param: is an optional SimParam object for parameter pre-configuration
     :param no_seed: is an optional parameter. If it is set to True, the RNG should be initialized without a
     a specific seed.
     """
     self.sim_param = sim_param
     self.sim_state = SimState()
     self.system_state = SystemState(self)
     self.event_chain = EventChain()
     self.sim_result = SimResult(self)
     self.counter_collection = CounterCollection(self)
     if no_seed:
         self.rng = RNG(ExponentialRNS(1),
                        ExponentialRNS(1. / float(self.sim_param.RHO)))
     else:
         self.rng = RNG(
             ExponentialRNS(1, self.sim_param.SEED_IAT),
             ExponentialRNS(1. / float(self.sim_param.RHO),
                            self.sim_param.SEED_ST))
def task_4_3_2():
    """
    Exercise to plot the scatter plot of (a) IAT and serving time, (b) serving time and system time
    The scatter plot helps to better understand the meaning of bit/small covariance/correlation.
    For every rho, two scatter plots are needed.
    The simulation parameters are the same as in task_4_3_1()
    """
    # TODO Task 4.3.2: Your code goes here

    sim_param = SimParam()
    sim = Simulation(sim_param)
    sim.sim_param.S = 10000
    sim.sim_param.SIM_TIME = 10000000

    for rho in [0.01, 0.5, 0.8, 0.95]:
        sim.sim_param.RHO = rho
        sim.reset()
        sim.counter_collection.reset()
        sim = sim.do_simulation().sim
        cnt_iat = sim.counter_collection.cnt_iat_st.values_x
        cnt_st = sim.counter_collection.cnt_iat_st.values_y
        cnt_syst = sim.counter_collection.cnt_st_syst.values_y

        corr_iat_st = float(sim.counter_collection.cnt_iat_st.get_cor())
        corr_st_syst = float(sim.counter_collection.cnt_st_syst.get_cor())
        fig = plt.figure()
        ax1 = fig.add_subplot(1, 1, 1)
        plt.subplot(1, 2, 1)
        plt.title('Rho %.2f Correlation %.3f' % (rho, corr_iat_st))
        plt.xlabel("Correlation IAT - ST")
        plt.plot(cnt_iat, cnt_st, 'o')

        plt.subplot(1, 2, 2)
        plt.title('Rho %.2f Correlation %.3f' % (rho, corr_st_syst))
        plt.xlabel("Correlation ST - SYST")
        plt.plot(cnt_st, cnt_syst, 'o')

    plt.show()
Exemple #26
0
def task_3_2_1():
    """
    This function plots two histograms for verification of the random distributions.
    One histogram is plotted for a uniform distribution, the other one for an exponential distribution.
    """
    # TODO Task 3.2.1: Your code goes here
    sim_param = SimParam()
    random.seed(sim_param.SEED)
    sim_param.RHO = 0.01
    sim = Simulation(sim_param)
    rns_iat = ExponentialRNS(1.0)
    rns_st = ExponentialRNS(1.0/sim.sim_param.RHO)
    rns_uniform = UniformRNS((2,4))
    hist1 = TimeIndependentHistogram(sim, "Line")
    hist2 = TimeIndependentHistogram(sim, "Line")
    hist3 = TimeIndependentHistogram(sim, "bp")
    for i in range(1000000):
        hist1.count(rns_iat.next())
        hist2.count(rns_st.next())
        hist3.count(rns_uniform.next())
    hist1.report()
    hist2.report()
    hist3.report()
Exemple #27
0
def task_5_2_2():
    """
    Run simulation in batches. Start the simulation with running until a customer count of n=100 or (n=1000) and
    continue to increase the number of customers by dn=n.
    Count the blocking proabability for the batch and calculate the confidence interval width of all values, that have
    been counted until now.
    Do this until the desired confidence level is reached and print out the simulation time as well as the number of
    batches.
    """
    num_batches1 = 0
    num_batches2 = 0
    num_batches3 = 0
    num_batches4 = 0
    TIC = TimeIndependentCounter("bp")

    # TODO Task 5.2.2: Your code goes here
    sim_param = SimParam()
    random.seed(sim_param.SEED)
    sim = Simulation(sim_param)
    sim.sim_param.S = 4
    sim.sim_param.RHO = 0.9
    ## n = 100
    # ALPHA: 5%
    with warnings.catch_warnings():
        warnings.simplefilter("ignore", category=RuntimeWarning)
        TIC.count(sim.do_simulation_n_limit(100).blocking_probability)

    for i in range(10000):
        sim.sim_result = SimResult(sim)
        sim.sim_state.stop = False
        sim.sim_state.num_packets = 0
        sim.sim_state.num_blocked_packets = 0
        with warnings.catch_warnings():
            warnings.simplefilter("ignore", category=RuntimeWarning)
            TIC.count(
                sim.do_simulation_n_limit(100, True).blocking_probability)
            print TIC.report_confidence_interval(0.05)
            if TIC.report_confidence_interval(0.05) < 0.0015:
                num_batches1 = i + 1
                break
    t1 = sim.sim_state.now

    # ALPHA: 10%
    sim.reset()
    TIC.reset()
    with warnings.catch_warnings():
        warnings.simplefilter("ignore", category=RuntimeWarning)
        TIC.count(sim.do_simulation_n_limit(100).blocking_probability)
    for i in range(10000):
        sim.sim_result = SimResult(sim)
        sim.sim_state.stop = False
        sim.sim_state.num_packets = 0
        sim.sim_state.num_blocked_packets = 0
        with warnings.catch_warnings():
            warnings.simplefilter("ignore", category=RuntimeWarning)
            TIC.count(
                sim.do_simulation_n_limit(100, True).blocking_probability)
            print TIC.report_confidence_interval(0.1)
            if TIC.report_confidence_interval(0.1) < 0.0015:
                num_batches2 = i + 1
                break
    t2 = sim.sim_state.now

    sim.reset()
    ## n = 1000
    # ALPHA: 5%
    with warnings.catch_warnings():
        warnings.simplefilter("ignore", category=RuntimeWarning)
        TIC.count(sim.do_simulation_n_limit(100).blocking_probability)

    for i in range(10000):
        sim.sim_result = SimResult(sim)
        sim.sim_state.stop = False
        sim.sim_state.num_packets = 0
        sim.sim_state.num_blocked_packets = 0
        with warnings.catch_warnings():
            warnings.simplefilter("ignore", category=RuntimeWarning)
            TIC.count(
                sim.do_simulation_n_limit(1000, True).blocking_probability)
            print TIC.report_confidence_interval(0.05)
            if TIC.report_confidence_interval(0.05) < 0.0015:
                num_batches3 = i + 1
                break
    t3 = sim.sim_state.now

    # ALPHA: 10%
    sim.reset()
    TIC.reset()
    with warnings.catch_warnings():
        warnings.simplefilter("ignore", category=RuntimeWarning)
        TIC.count(sim.do_simulation_n_limit(100).blocking_probability)
    for i in range(10000):
        sim.sim_result = SimResult(sim)
        sim.sim_state.stop = False
        sim.sim_state.num_packets = 0
        sim.sim_state.num_blocked_packets = 0
        with warnings.catch_warnings():
            warnings.simplefilter("ignore", category=RuntimeWarning)
            TIC.count(
                sim.do_simulation_n_limit(1000, True).blocking_probability)
            print TIC.report_confidence_interval(0.1)
            if TIC.report_confidence_interval(0.1) < 0.0015:
                num_batches4 = i + 1
                break
    t4 = sim.sim_state.now

    # print and return both results
    print "N:  100; ALPHA:  5%; NUMBER OF BATCHES: " + str(
        num_batches1) + " and SIM TIME: " + str(t1)
    print "N:  100; ALPHA: 10%; NUMBER OF BATCHES: " + str(
        num_batches2) + " and SIM TIME: " + str(t2)
    print "N: 1000; ALPHA:  5%; NUMBER OF BATCHES: " + str(
        num_batches3) + " and SIM TIME: " + str(t3)
    print "N: 1000; ALPHA: 10%; NUMBER OF BATCHES: " + str(
        num_batches4) + " and SIM TIME: " + str(t4)

    return [t1, t2, t3, t4]
Exemple #28
0
def task_5_2_1():
    """
    Run task 5.2.1. Make multiple runs until the blocking probability distribution reaches
    a confidence level alpha. Simulation is performed for 100s and 1000s and for alpha = 90% and 95%.
    """
    results = [None, None, None, None]
    TIC = TimeIndependentCounter("bp")

    # TODO Task 5.2.1: Your code goes here
    #SIM TIME:  100s; ALPHA: 10%
    sim_param = SimParam()
    random.seed(sim_param.SEED)
    sim = Simulation(sim_param)
    sim.sim_param.SIM_TIME = 100000
    sim.sim_param.S = 4
    sim.sim_param.RHO = 0.9
    for i in range(10000):
        with warnings.catch_warnings():
            warnings.simplefilter("ignore", category=RuntimeWarning)
            TIC.count(sim.do_simulation().blocking_probability)
            if TIC.report_confidence_interval(0.1) < 0.0015:
                results[0] = i
                break
            sim.reset()
    #SIM TIME:  100s; ALPHA:  5%
    TIC.reset()
    for i in range(10000):
        with warnings.catch_warnings():
            warnings.simplefilter("ignore", category=RuntimeWarning)
            TIC.count(sim.do_simulation().blocking_probability)
            if TIC.report_confidence_interval(0.05) < 0.0015:
                results[1] = i
                break
            sim.reset()
    #SIM TIME: 1000s; ALPHA: 10%
    sim.sim_param.SIM_TIME = 1000000
    TIC.reset()
    for i in range(10000):
        with warnings.catch_warnings():
            warnings.simplefilter("ignore", category=RuntimeWarning)
            TIC.count(sim.do_simulation().blocking_probability)
            if TIC.report_confidence_interval(0.05) < 0.0015:
                results[2] = i
                break
            sim.reset()
    #SIM TIME: 1000s; ALPHA:  5%
    sim.sim_param.SIM_TIME = 1000000
    TIC.reset()
    for i in range(10000):
        with warnings.catch_warnings():
            warnings.simplefilter("ignore", category=RuntimeWarning)
            TIC.count(sim.do_simulation().blocking_probability)
            if TIC.report_confidence_interval(0.05) < 0.0015:
                results[3] = i
                break
            sim.reset()

    # print and return results
    print "SIM TIME:  100s; ALPHA: 10%; NUMBER OF RUNS: " + str(results[0])
    print "SIM TIME:  100s; ALPHA:  5%; NUMBER OF RUNS: " + str(results[1])
    print "SIM TIME: 1000s; ALPHA: 10%; NUMBER OF RUNS:  " + str(results[2])
    print "SIM TIME: 1000s; ALPHA:  5%; NUMBER OF RUNS:  " + str(results[3])
    return results
def ran_simulation():
    """
    Main ran_simulation
    """


    # define sim_param and inside RB pool: all available Resources list
    sim_param = SimParam()

    no_of_slices = sim_param.no_of_slices
    no_of_users_per_slice = sim_param.no_of_users_per_slice

    # create result directories
    create_dir(sim_param)

    # create logfile and write SimParameters
    results_dir = "results/" + sim_param.timestamp
    log_file = open(results_dir + "/logfile.txt", "wt")
    log_file.write('no_of_slices: %d\nno_of_users_per_slice: %d\n\n' % (no_of_slices, no_of_users_per_slice))
    attrs = vars(sim_param)
    log_file.write('SimParam\n'+''.join("%s: %s\n" % item for item in attrs.items()))
    # log_file.close()

    # initialize SD_RAN_Controller
    SD_RAN_Controller = Controller(sim_param)

    # Each slice has different users
    slices = []
    slice_results = []

    # initialize all slices
    for i in range(no_of_slices):
        slice_param_tmp = SliceParam(sim_param)
        slice_param_tmp.SLICE_ID = i
        slices.append(SliceSimulation(slice_param_tmp))
        slice_results.append([])

        # initialize all users with traffics and distances
        tmp_users = []
        seed_dist = 0  # users in all slices have identical distance distributions
        #rng_dist = RNG(ExponentialRNS(lambda_x=1. / float(sim_param.MEAN_Dist)), s_type='dist') # , the_seed=seed_dist
        rng_dist = RNG(UniformRNS(sim_param.DIST_MIN,sim_param.DIST_MAX, the_seed=seed_dist), s_type='dist')  #
        dist_arr = [10, 100 ]#[30, 30, 100, 100, 100, 100, 100, 100, 100, 100]  # 10*(1+user_id%no_of_users_per_slice)**2
        for j in range(no_of_users_per_slice):
            user_id = i*no_of_users_per_slice + j
            #tmp_users.append(User(user_id, rng_dist.get_dist(), slice_list=[slices[i]], sim_param=sim_param))
            tmp_users.append(User(user_id, dist_arr[j], slice_list=[slices[i]], sim_param=sim_param))

        # insert user to slices
        slices[i].insert_users(tmp_users)


    # Choose Slice Manager Algorithm, 'PF': prop fair, 'MCQI': Max Channel Quality Index, 'RR': round-robin
    slices[0].slice_param.SM_ALGO = 'RR'
    slices[1].slice_param.SM_ALGO = 'MCQI'
    slices[2].slice_param.SM_ALGO = 'PF'

    # log Slice Parameters
    for i in range(no_of_slices):
        attrs = vars(slices[i].slice_param)
        log_file.write('\nSliceParam\n' + ''.join("%s: %s\n" % item for item in attrs.items()))
    #log_file.close()

    # loop rounds for each slice
    for i in range(int(sim_param.T_FINAL/sim_param.T_C)):
        RB_mapping = SD_RAN_Controller.RB_allocate_to_slices(slices[0].sim_state.now, slices)

        for j in range(len(slices)):
            slices[j].prep_next_round(RB_mapping[j,:,:])
            slice_results[j].append(slices[j].simulate_one_round())

    # Store Simulation Results
    # user results
    parent_dir = "results/" + sim_param.timestamp + "/user_results"
    path = parent_dir + "/tp"
    for i in range(len(slice_results)):
        user_count = len(slice_results[i][-1].server_results)   # choose latest result for data
        for k in range(user_count):
            common_name = "/slice%d_user%d_" % (i, slice_results[i][-1].server_results[k].server.user.user_id)
            cc_temp = slice_results[i][-1].server_results[k].server.counter_collection
            # tp
            filename = parent_dir + "/tp" + common_name + "sum_power_two.csv"
            savetxt(filename, cc_temp.cnt_tp.sum_power_two, delimiter=',')
            filename = parent_dir + "/tp" + common_name + "values.csv"
            savetxt(filename, cc_temp.cnt_tp.values, delimiter=',')
            filename = parent_dir + "/tp" + common_name + "timestamps.csv"
            savetxt(filename, cc_temp.cnt_tp.timestamps, delimiter=',')

            filename = parent_dir + "/tp" + common_name + "all_data.csv"
            #savetxt(filename, np.transpose(np.array([cc_temp.cnt_tp.values,cc_temp.cnt_tp.timestamps])), delimiter=',')
            df = DataFrame(np.transpose(np.array([cc_temp.cnt_tp.values,cc_temp.cnt_tp.timestamps])), columns=['Values', 'Timestamps'])
            export_csv = df.to_csv(filename, index=None, header=True)

            # tp2
            filename = parent_dir + "/tp2" + common_name + "sum_power_two.csv"
            savetxt(filename, cc_temp.cnt_tp2.sum_power_two, delimiter=',')
            filename = parent_dir + "/tp2" + common_name + "values.csv"
            savetxt(filename, cc_temp.cnt_tp2.values, delimiter=',')
            filename = parent_dir + "/tp2" + common_name + "timestamps.csv"
            savetxt(filename, cc_temp.cnt_tp2.timestamps, delimiter=',')
            # ql
            filename = parent_dir + "/ql" + common_name + "sum_power_two.csv"
            savetxt(filename, cc_temp.cnt_ql.sum_power_two, delimiter=',')
            filename = parent_dir + "/ql" + common_name + "values.csv"
            savetxt(filename, cc_temp.cnt_ql.values, delimiter=',')
            filename = parent_dir + "/ql" + common_name + "timestamps.csv"
            savetxt(filename, cc_temp.cnt_ql.timestamps, delimiter=',')
            # system time (delay)
            filename = parent_dir + "/delay" + common_name + "values.csv"
            savetxt(filename, cc_temp.cnt_syst.values, delimiter=',')
            filename = parent_dir + "/delay" + common_name + "timestamps.csv"
            savetxt(filename, cc_temp.cnt_syst.timestamps, delimiter=',')
            # Find how to insert histograms


    # plot results
    parent_dir = "results/" + sim_param.timestamp
    plot_results(parent_dir, no_of_slices, no_of_users_per_slice, sim_param, slices)

    # rb dist printing
    filename = "results/" + sim_param.timestamp + "/summary"
    rb_total = 0
    rb_dist = []
    for s in slices:
        rb_dist_slice = []
        for u in s.server_list:
            rb_dist_slice.append(u.RB_counter)
        slicesum = np.nansum(rb_dist_slice)

        print("Slice %d dist: " % s.slice_param.SLICE_ID, *np.round(np.divide(rb_dist_slice,slicesum/100), 1))
        # write these to file savetxt(filename, cc_temp.cnt_ql.sum_power_two, delimiter=',')
        rb_dist.append(slicesum)
    totalsum = np.nansum(rb_dist)
    print("rb dist (RR MCQI PF): ", *np.round(np.divide(rb_dist, totalsum / 100), 1))
Exemple #30
0
    def init_input_obj(self):
        """Section 4 - Create UWG objects from input parameters

            self.simTime            # simulation time parameter obj
            self.weather            # weather obj for simulation time period
            self.forcIP             # Forcing obj
            self.forc               # Empty forcing obj
            self.geoParam           # geographic parameters obj
            self.RSM                # Rural site & vertical diffusion model obj
            self.USM                # Urban site & vertical diffusion model obj
            self.UCM                # Urban canopy model obj
            self.UBL                # Urban boundary layer model

            self.road               # urban road element
            self.rural              # rural road element

            self.soilindex1         # soil index for urban rsoad depth
            self.soilindex2         # soil index for rural road depth

            self.BEM                # list of BEMDef objects
            self.Sch                # list of Schedule objects
        """

        climate_file_path = os.path.join(self.epwDir, self.epwFileName)

        self.simTime = SimParam(self.dtSim, self.dtWeather, self.Month,
                                self.Day,
                                self.nDay)  # simulation time parametrs
        self.weather = Weather(
            climate_file_path, self.simTime.timeInitial, self.simTime.timeFinal
        )  # weather file data for simulation time period
        self.forcIP = Forcing(self.weather.staTemp,
                              self.weather)  # initialized Forcing class
        self.forc = Forcing()  # empty forcing class

        # Initialize geographic Param and Urban Boundary Layer Objects
        nightStart = 18.  # arbitrary values for begin/end hour for night setpoint
        nightEnd = 8.
        maxdx = 250.
        # max dx (m)

        self.geoParam = Param(self.h_ubl1,self.h_ubl2,self.h_ref,self.h_temp,self.h_wind,self.c_circ,\
            self.maxDay,self.maxNight,self.latTree,self.latGrss,self.albVeg,self.vegStart,self.vegEnd,\
            nightStart,nightEnd,self.windMin,self.WGMAX,self.c_exch,maxdx,self.G,self.CP,self.VK,self.R,\
            self.RV,self.LV,math.pi,self.SIGMA,self.WATERDENS,self.LVTT,self.TT,self.ESTT,self.CL,\
            self.CPV,self.B, self.CM, self.COLBURN)

        self.UBL = UBLDef('C', self.charLength, self.weather.staTemp[0], maxdx,
                          self.geoParam.dayBLHeight,
                          self.geoParam.nightBLHeight)

        # Defining road
        emis = 0.93
        asphalt = Material(self.kRoad, self.cRoad, 'asphalt')
        road_T_init = 293.
        road_horizontal = 1
        road_veg_coverage = min(self.vegCover / (1 - self.bldDensity),
                                1.)  # fraction of surface vegetation coverage

        # define road layers
        road_layer_num = int(math.ceil(self.d_road / 0.05))
        thickness_vector = map(lambda r: 0.05, range(
            road_layer_num))  # 0.5/0.05 ~ 10 x 1 matrix of 0.05 thickness
        material_vector = map(lambda n: asphalt, range(road_layer_num))

        self.road = Element(self.alb_road,emis,thickness_vector,material_vector,road_veg_coverage,\
            road_T_init,road_horizontal,name="urban_road")

        self.rural = copy.deepcopy(self.road)
        self.rural.vegCoverage = self.rurVegCover
        self.rural._name = "rural_road"

        # Define BEM for each DOE type (read the fraction)
        if not os.path.exists(self.readDOE_file_path):
            raise Exception("readDOE.pkl file: '{}' does not exist.".format(
                readDOE_file_path))

        readDOE_file = open(self.readDOE_file_path,
                            'rb')  # open pickle file in binary form
        refDOE = cPickle.load(readDOE_file)
        refBEM = cPickle.load(readDOE_file)
        refSchedule = cPickle.load(readDOE_file)
        readDOE_file.close()

        # Define building energy models
        k = 0
        r_glaze = 0  # Glazing ratio for total building stock
        SHGC = 0  # SHGC addition for total building stock
        alb_wall = 0  # albedo wall addition for total building stock
        h_floor = self.flr_h or 3.05  # average floor height

        total_urban_bld_area = math.pow(
            self.charLength, 2
        ) * self.bldDensity * self.bldHeight / h_floor  # total building floor area
        area_matrix = utilities.zeros(16, 3)

        self.BEM = []  # list of BEMDef objects
        self.Sch = []  # list of Schedule objects

        for i in xrange(16):  # 16 building types
            for j in xrange(3):  # 3 built eras
                if self.bld[i][j] > 0.:
                    # Add to BEM list
                    self.BEM.append(refBEM[i][j][self.zone])
                    self.BEM[k].frac = self.bld[i][j]
                    self.BEM[k].fl_area = self.bld[i][j] * total_urban_bld_area

                    # Overwrite with optional parameters if provided
                    if self.glzR:
                        self.BEM[k].building.glazingRatio = self.glzR
                    if self.albRoof:
                        self.BEM[k].roof.albedo = self.albRoof
                    if self.vegRoof:
                        self.BEM[k].roof.vegCoverage = self.vegRoof
                    if self.SHGC:
                        self.BEM[k].building.shgc = self.SHGC
                    if self.albWall:
                        self.BEM[k].wall.albedo = self.albWall

                    # Keep track of total urban r_glaze, SHGC, and alb_wall for UCM model
                    r_glaze = r_glaze + self.BEM[k].frac * self.BEM[
                        k].building.glazingRatio  ##
                    SHGC = SHGC + self.BEM[k].frac * self.BEM[k].building.shgc
                    alb_wall = alb_wall + self.BEM[k].frac * self.BEM[
                        k].wall.albedo
                    # Add to schedule list
                    self.Sch.append(refSchedule[i][j][self.zone])
                    k += 1

        # Reference site class (also include VDM)
        self.RSM = RSMDef(self.lat, self.lon, self.GMT, self.h_obs,
                          self.weather.staTemp[0], self.weather.staPres[0],
                          self.geoParam, self.z_meso_dir_path)
        self.USM = RSMDef(self.lat, self.lon, self.GMT, self.bldHeight / 10.,
                          self.weather.staTemp[0], self.weather.staPres[0],
                          self.geoParam, self.z_meso_dir_path)

        T_init = self.weather.staTemp[0]
        H_init = self.weather.staHum[0]

        self.UCM = UCMDef(self.bldHeight,self.bldDensity,self.verToHor,self.treeCoverage,self.sensAnth,self.latAnth,T_init,H_init,\
        self.weather.staUmod[0],self.geoParam,r_glaze,SHGC,alb_wall,self.road)
        self.UCM.h_mix = self.h_mix

        # Define Road Element & buffer to match ground temperature depth
        roadMat, newthickness = procMat(self.road, self.MAXTHICKNESS,
                                        self.MINTHICKNESS)

        for i in xrange(self.nSoil):
            # if soil depth is greater then the thickness of the road
            # we add new slices of soil at max thickness until road is greater or equal

            is_soildepth_equal = self.is_near_zero(
                self.depth_soil[i][0] - sum(newthickness), 1e-15)

            if is_soildepth_equal or (self.depth_soil[i][0] >
                                      sum(newthickness)):
                while self.depth_soil[i][0] > sum(newthickness):
                    newthickness.append(self.MAXTHICKNESS)
                    roadMat.append(self.SOIL)
                self.soilindex1 = i
                break

        self.road = Element(self.road.albedo, self.road.emissivity, newthickness, roadMat,\
            self.road.vegCoverage, self.road.layerTemp[0], self.road.horizontal, self.road._name)

        # Define Rural Element
        ruralMat, newthickness = procMat(self.rural, self.MAXTHICKNESS,
                                         self.MINTHICKNESS)

        for i in xrange(self.nSoil):
            # if soil depth is greater then the thickness of the road
            # we add new slices of soil at max thickness until road is greater or equal

            is_soildepth_equal = self.is_near_zero(
                self.depth_soil[i][0] - sum(newthickness), 1e-15)

            if is_soildepth_equal or (self.depth_soil[i][0] >
                                      sum(newthickness)):
                while self.depth_soil[i][0] > sum(newthickness):
                    newthickness.append(self.MAXTHICKNESS)
                    ruralMat.append(self.SOIL)

                self.soilindex2 = i
                break

        self.rural = Element(self.rural.albedo, self.rural.emissivity, newthickness,\
            ruralMat,self.rural.vegCoverage,self.rural.layerTemp[0],self.rural.horizontal, self.rural._name)