Beispiel #1
0
def time_analysis(arrp, rtt):

    # deadline-miss rate
    miss_rate = sum(arrp['miss'])*1.0/len(arrp['miss'])
    print len(arrp)

    miss_rate = 0

    # throughput
    thru = 0
    nitem = 0
    thru_all = []
    for item in arrp:

        if item['mcs'] >= 0:

            curr_thru =  lte_utils.mcs_to_throughput(item['mcs'])*(1 if ((item['iter'] < 5) and (item['iter'] > -1) and (item['duration'] < (2000-rtt))) else 0)
            thru += curr_thru
            # miss_rate += (1 if ((item['duration'] > (2000-rtt) and item['iter'] < 5) or (item['iter'] == -1)) else 0)
            miss_rate += (1 if ((item['duration'] > (2000-rtt) ) or (item['iter'] == -1)) else 0)
            nitem += 1
        # if miss_rate > 0:
        #     pdb.set_trace()
            thru_all.append(curr_thru)

    # thru = thru*1.0/len(arrp)
    # miss_rate = miss_rate*1.0/len(arrp)

    print nitem
    thru = thru*1.0/nitem
    miss_rate = miss_rate*1.0/nitem


    # get cdf of throughput
    obj = astat(thru_all)
    thru_cdf = []
    for x in np.arange(0, 35, 0.2):
        thru_cdf.append(obj.get_cdf(x))

    # pprint(result)
    return [miss_rate, thru, thru_cdf]
Beispiel #2
0
# rtt_range = [700]
var_range = [1]
snr_range = [30]
# snr_range = [30]

run = 0

# proc time for mcs
t_t_range = []
for mcs in mcs_range:
    t_t_range.append(lte_utils.mcs_to_time(mcs, 1))

# throughput for mcs
th_range = []
for mcs in mcs_range:
    th_range.append(lte_utils.mcs_to_throughput(mcs))


for mcs in mcs_range:
    for exp in exp_range:
        for num_bss in num_bss_range:
            for num_ants in num_ants_range:
                for lmax in lmax_range:
                    for rtt in rtt_range:
                        for var in var_range:
                            for snr in snr_range:

                                nprocs = num_bss * int(math.ceil(1.0 * lmax / 1000))
                                num_cores_bs = int(math.ceil(1.0 * lmax / 1000))
                                # global is not dependent on input parameters
                                max_cores = int(math.ceil(1.0 * lmax / 1000)) * num_bss