def aggr(device, duplex='FDD'): prefix = "lbps::aggr::%s \t" % device.name try: # duplex will only affect the capacity, not related to mapping if duplex is not 'FDD' and duplex is not 'TDD': return if not isinstance(device, eNB) and not isinstance(device, RN): return interface = 'backhaul' if isinstance(device, eNB) else 'access' capacity = device.capacity if duplex == 'FDD' else device.virtualCapacity capacity = capacity[interface] if type(capacity) is dict else capacity pkt_size = getAvgPktSize(device) DATA_TH = int(getDataTH(capacity, pkt_size)) msg_execute("load= %g\t" % getLoad(device, duplex), pre=prefix) # aggr process sleep_cycle_length = LengthAwkSlpCyl(device.lambd[interface], DATA_TH) msg_execute("sleepCycle = %d" % sleep_cycle_length ,pre=prefix) result = [[] for i in range(sleep_cycle_length)] result[0] = [i for i in device.childs] result[0].append(device) result[0] = sorted(result[0], key=lambda d: d.name) return result except Exception as e: msg_fail(str(e), pre=prefix) return
def check_mincycle(device, rn_status, b_min_cycle): for rn in device.childs: if len(rn_status[rn.name]['result']) > b_min_cycle: pkt_size = getAvgPktSize(rn) DATA_TH = int(getDataTH(rn.virtualCapacity, pkt_size)) accumulate_K = LengthAwkSlpCyl(rn.lambd['access'], DATA_TH, PROB_TH=0.2) if accumulate_K < b_min_cycle: accumulate_pkt = DataAcc(rn.lambd['access'], b_min_cycle) if not accumulate_pkt: rn_status[rn.name].update({'result':aggr(rn, 'TDD')}) rn_status[rn.name].update({ 'a-availability':True, 'a-subframe-count':sum([1 for i in rn_status[rn.name]['result'] if i]) }) rn_status[rn.name].update({ 'b-subframe-count':ceil(rn.virtualCapacity*rn_status[rn.name]['a-subframe-count']/device.virtualCapacity) }) else: rn_status[rn.name].update({ 'a-availability':True, 'a-subframe-count':ceil(accumulate_pkt*pkt_size/rn.virtualCapacity), 'b-subframe-count':ceil(accumulate_pkt*pkt_size/device.virtualCapacity) }) else: continue if rn_status[rn.name]['a-subframe-count']\ +rn_status[rn.name]['b-subframe-count']\ >b_min_cycle: rn_status[rn.name]['a-availability'] = False else: rn_status[rn.name].update({ 'a-availability':True, 'a-subframe-count':sum([1 for i in rn_status[rn.name]['result'] if i]) }) rn_status[rn.name].update({ 'b-subframe-count':ceil(rn.virtualCapacity*rn_status[rn.name]['a-subframe-count']/device.virtualCapacity) })
def merge_merge(device, simulation_time): prefix = "BottomUp::merge-merge\t" duplex = 'TDD' try: rn_status = { rn.name:{ 'device':rn, 'result':merge(rn, duplex, two_hop=True), 'a-subframe-count':None, 'b-subframe-count': None, } for rn in device.childs } a_lbps_result = [[] for i in range(max([len(K['result']['access']) for K in rn_status.values()]))] for (rn_name, info) in rn_status.items(): pkt_size = getAvgPktSize(info['device']) DATA_TH = int(getDataTH(info['device'].virtualCapacity, pkt_size)) info['a-subframe-count'] = sum([1 for i in info['result']['access'] if i]) info['b-subframe-count'] = DATA_TH*info['a-subframe-count']*pkt_size/device.virtualCapacity for i in range(len(info['result']['access'])): a_lbps_result[i] += info['result']['access'][i] failed_RN=[] groups = [ { 'device': [i], 'K': len(rn_status[i.name]['result']['access']) } for i in device.childs ] while not schedulability([i['K'] for i in groups]): groups.sort(key=lambda x:x['K'], reverse=True) non_degraded_success = False for source_G in groups: for target_G in groups: if target_G is not source_G and target_G['K'] == source_G['K']: s_required = sum([rn_status[d.name]['b-subframe-count'] for d in source_G['device']]) t_required = sum([rn_status[d.name]['b-subframe-count'] for d in target_G['device']]) if s_required+t_required<source_G['K']: non_degraded_success = True source_G['device'] += target_G['device'] groups.remove(target_G) break else: continue break if not non_degraded_success: failed_RN = groups break if failed_RN: b_lbps_result = [[rn for rn in device.childs]+[device]]*\ max([len(K['result']['access']) for K in rn_status.values()]) else: # calc the times of waking up for group max_K = max([G['K'] for G in groups]) groups.sort(key=lambda x: x['K']) msg_execute("sleep cycle length = %d with %d groups" % \ (max_K, len(groups)), pre=prefix) result = {i:[] for i in range(max_K)} for G in groups: base = 0 for i in list(result.keys()): if not result[i]: base = i break for TTI in range(base, len(result), G['K']): result[TTI] = G['device'] + [device] b_lbps_result = list(result.values()) mapping_pattern = m_2hop(device.tdd_config) timeline = two_hop_realtimeline( mapping_pattern, simulation_time, b_lbps_result, a_lbps_result) msg_warning("total awake: %d times" %\ (sum([1 for i in timeline['backhaul'] if i])+\ sum([1 for i in timeline['access'] if i])-\ sum([1 for i in range(len(timeline['access'])) \ if timeline['backhaul'][i] and timeline['access'][i]] )), pre=prefix) return timeline except Exception as e: msg_fail(str(e), pre=prefix)
def merge(device, duplex='FDD', two_hop=False): prefix = "lbps::merge::%s\t" % device.name try: # duplex will only affect the capacity, not related to mapping if duplex is not 'FDD' and duplex is not 'TDD': return if not isinstance(device, eNB) and not isinstance(device, RN): return interface = 'backhaul' if isinstance(device, eNB) else 'access' capacity = device.capacity if duplex == 'FDD' else device.virtualCapacity capacity = capacity[interface] if type(capacity) is dict else capacity pkt_size = getAvgPktSize(device) DATA_TH = int(getDataTH(capacity, pkt_size)) msg_execute("load= %g\t" % getLoad(device, duplex), pre=prefix) required_backhaul_subframe = None # init merge groups groups = [ { 'device': [i], 'lambda': i.lambd[interface], 'K': 2**floor(log(LengthAwkSlpCyl(i.lambd[interface], DATA_TH), 2)) } for i in device.childs ] # merge process while True: check_list = [i['K'] for i in groups] if two_hop: Ki = max([G['K'] for G in groups]) required_access_subframe = int(sum([Ki/G['K'] for G in groups])) required_backhaul_subframe = ceil(DATA_TH*required_access_subframe*pkt_size/device.virtualCapacity) if sum([1/KG for KG in check_list])+(required_backhaul_subframe/Ki)<=1: break elif schedulability(check_list): break # non-degraded merge groups.sort(key=lambda x: x['K'], reverse=True) g_non_degraded = non_degraded(groups, interface, DATA_TH) non_degraded_success = False if len(g_non_degraded) == len(groups) else True groups = g_non_degraded # degraded merge if not non_degraded_success and len(groups) > 1: groups[1]['device'] += groups[0]['device'] groups[1]['lambda'] += groups[0]['lambda'] groups[1]['K'] = LengthAwkSlpCyl(groups[1]['lambda'], DATA_TH) groups.pop(0) # calc the times of waking up for group max_K = max([G['K'] for G in groups]) groups.sort(key=lambda x: x['K']) msg_execute("sleep cycle length = %d with %d groups" % \ (max_K, len(groups)), pre=prefix) result = {i:[] for i in range(max_K)} for G in groups: base = 0 for i in list(result.keys()): if not result[i]: base = i break for TTI in range(base, len(result), G['K']): result[TTI] = G['device'] + [device] a_result = list(result.values()) if two_hop: b_result = [[device, device.parent]]*required_backhaul_subframe b_result.extend([[]]*(max_K-required_backhaul_subframe)) return {'backhaul':b_result, 'access':a_result} return a_result except Exception as e: msg_fail(str(e), pre=prefix) return
def split(device, duplex='FDD', boundary_group=None): prefix = "lbps::split::%s\t" % device.name try: # duplex will only affect the capacity, not related to mapping if duplex is not 'FDD' and duplex is not 'TDD': return if not isinstance(device, eNB) and not isinstance(device, RN): return interface = 'backhaul' if isinstance(device, eNB) else 'access' capacity = device.capacity if duplex == 'FDD' else device.virtualCapacity capacity = capacity[interface] if type(capacity) is dict else capacity pkt_size = getAvgPktSize(device) DATA_TH = int(getDataTH(capacity, pkt_size)) msg_execute("load= %g\t" % getLoad(device, duplex), pre=prefix) sleep_cycle_length = LengthAwkSlpCyl(device.lambd[interface], DATA_TH) groups = { 0: { 'device': [ch for ch in device.childs], 'lambda': device.lambd[interface], 'K': sleep_cycle_length } } if not boundary_group or boundary_group > len(device.childs): boundary_group = len(device.childs) # Split process while len(groups) < sleep_cycle_length: groups = { i:{ 'device': [], 'lambda':0, 'K': 0 } for i in range(min(sleep_cycle_length, boundary_group)) } for i in device.childs: # find the minimum lambda group min_lambd = min([groups[G]['lambda'] for G in groups]) min_lambd_G = [G for G in groups if groups[G]['lambda'] == min_lambd] min_lambd_G = random.choice(min_lambd_G) # append the device to the minimum lambda group # FIXME: performance issue groups[min_lambd_G]['device'].append(i) groups[min_lambd_G]['lambda'] += i.lambd[interface] groups[min_lambd_G]['K'] = LengthAwkSlpCyl(groups[min_lambd_G]['lambda'], DATA_TH) K = min([groups[G]['K'] for G in groups]) if K == sleep_cycle_length: break elif len(groups) == boundary_group: sleep_cycle_length = K if K > 0 else sleep_cycle_length break else: sleep_cycle_length = K if K > 0 else sleep_cycle_length msg_execute("sleep cycle length = %d with %d groups" % \ (sleep_cycle_length, len(groups)), pre=prefix) result = [[] for i in range(sleep_cycle_length)] for i in groups: groups[i]['device'] and groups[i]['device'].append(device) result[i] += groups[i]['device'] return result except Exception as e: msg_fail(str(e), pre=prefix) return