def __init__(self, sim_param=SimParam(), no_seed=False): """ Initialize the Simulation object. :param sim_param: is an optional SimParam object for parameter pre-configuration :param no_seed: is an optional parameter. If it is set to True, the RNG should be initialized without a a specific seed. """ self.sim_param = sim_param self.sim_state = SimState() self.system_state = SystemState(self) self.event_chain = EventChain() self.sim_result = SimResult(self) # TODO Task 2.4.3: Uncomment the line below self.counter_collection = CounterCollection(self) # TODO Task 3.1.2: Uncomment the line below and replace the "None" if no_seed: #if the mean = 1.0, then 1/lambda_ = 1.0 -> lambda_ = 1 self.rng = RNG(ExponentialRNS(1.0), ExponentialRNS(1. / float(self.sim_param.RHO))) else: self.rng = RNG( ExponentialRNS(1.0, self.sim_param.SEED_IAT), ExponentialRNS(1. / float(self.sim_param.RHO), self.sim_param.SEED_ST))
def initialize_slices(sim_param, log_file): slices = [] # SLA requirements list [RR, MCQI PF] delay_thresholds = sim_param.delay_requirements# [50, 50, 50] # ms rate_thresholds = sim_param.rate_requirements# [2000, 2000, 2000]#[2000, 3200, 2000] # kbps float(slc.slice_param.P_SIZE / slc.slice_param.MEAN_IAT) packet_sizes = sim_param.packet_sizes# [10000, 10000, 10000] # [2000, 10000, 5000] # in bits mean_iats = sim_param.mean_iats# [5, 5, 5] # dist_arr = [[10]*10,[10]*10,[10]*10]#[[10, 10, 10, 10, 10, 100, 100, 100, 100, 100],[12]*10,[10, 10, 10, 10, 10, 100, 100, 100, 100, 100]] seed_dist = sim_param.SEED_OFFSET # users in all slices have identical distance distributions # rng_dist = RNG(ExponentialRNS(lambda_x=1. / float(sim_param.MEAN_Dist)), s_type='dist') # , the_seed=seed_dist rng_dist = RNG (UniformRNS (sim_param.DIST_MIN, sim_param.DIST_MAX, the_seed=seed_dist), s_type='dist') tmp_user_id=0 for i in range(sim_param.no_of_slices): slice_param_tmp = SliceParam(sim_param) slice_param_tmp.SLICE_ID = i slice_param_tmp.P_SIZE = packet_sizes[i] slice_param_tmp.MEAN_IAT = mean_iats[i] # SLA requirements slice_param_tmp.DELAY_REQ = delay_thresholds[i] slice_param_tmp.RATE_REQ = rate_thresholds[i] slices.append(SliceSimulation(slice_param_tmp)) # initialize all users with traffics and distances tmp_users = [] # dist_arr_tmp = dist_arr[i] # [10, 10, 10, 10, 10, 100, 100, 100, 100, 100] # 10*(1+user_id%no_of_users_per_slice)**2 for j in range(sim_param.no_of_users_list[i]): # user_id = i * sim_param.no_of_users_per_slice + j tmp_users.append(User(tmp_user_id, rng_dist.get_dist(), slice_list=[slices[i]], sim_param=sim_param)) # tmp_users.append(User(user_id, dist_arr_tmp[j], slice_list=[slices[i]], sim_param=sim_param)) tmp_user_id+=1 # insert users to slice slices[i].insert_users(tmp_users) # Choose Slice Manager Algorithm slices[i].slice_param.SM_ALGO = sim_param.SM_ALGO_list[i] # # Choose Slice Manager Algorithm 'PF': prop fair, 'MCQI': Max Channel Quality Index, 'RR': round-robin # slices[0].slice_param.SM_ALGO = 'RR' # slices[1].slice_param.SM_ALGO = 'MCQI' # slices[2].slice_param.SM_ALGO = 'PF' if log_file is None: pass else: # log Slice Parameters for i in range(sim_param.no_of_slices): attrs = vars(slices[i].slice_param) log_file.write('\nSliceParam\n' + ''.join("%s: %s\n" % item for item in attrs.items())) log_file.close() return slices
def gen_sample_array(self): rng = RNG() s = StratifiedShuffleSplit(n_splits=self.n_splits, test_size=0.5) X = rng.randn(self.class_vector.shape[0], 2) y = self.class_vector s.get_n_splits(X, y) train_index, test_index = next(s.split(X, y)) return np.hstack([train_index, test_index])
def __init__(self, sim_param): """ """ self.sim_param = sim_param self.slices_cycle = [] self.avg_rate_pf = [] self.df = pd.DataFrame() # random allocation self.rng_rb_allocate = RNG(UniformIntRNS( 0, sim_param.no_of_slices - 1, the_seed=sim_param.SEED_OFFSET), s_type='rb_allocation')
def get_shadowing(self): t_final = self.user.sim_param.T_FINAL * 10 shadowing = np.empty((0, t_final)) seed_shadowing = self.user.user_id * len(self.RB_pool) for i in range(len(self.RB_pool)): self.rng_shadowing = RNG(NormalRNS( 0, self.user.sim_param.SIGMA_shadowing, seed_shadowing), s_type='shadowing') temp_arr = self.rng_shadowing.get_shadowing(t_final) shadowing = np.append(shadowing, [temp_arr], axis=0) if self.user.sim_param.freq_selective: seed_shadowing += 1 return shadowing
def get_shadowing(self): t_final = self.user.sim_param.T_FINAL t_c = self.user.sim_param.T_C t_coh = self.user.sim_param.T_COH # in ms buffer = 3 # for the packets in simulation final shadowing = np.empty((0, int(t_final) + t_coh * (buffer -1) )) seed_shadowing = (self.user.user_id * len(self.RB_pool)) + self.user.sim_param.SEED_OFFSET for i in range(len(self.RB_pool)): self.rng_shadowing = RNG(NormalRNS(0, self.user.sim_param.SIGMA_shadowing, seed_shadowing), s_type='shadowing') temp_shadowing = self.rng_shadowing.get_shadowing2(t_final, t_c, t_coh, buffer) shadowing = np.append(shadowing, [temp_shadowing], axis=0) if self.user.sim_param.freq_selective: seed_shadowing += 1 return shadowing
def get_rng(self): seed_vector = [] m = hashlib.md5() for key in sorted(self.keys): m.update(bytes(self.keys[key], encoding="utf-8")) seed_vector.append(int(m.hexdigest(), 16)) rng = RNG(*seed_vector) return rng
def reset(self, no_seed=False): """ Reset the Simulation object. :param no_seed: is an optional parameter. If it is set to True, the RNG should be reset without a a specific seed. """ self.sim_state = SimState() self.system_state = SystemState(self) self.event_chain = EventChain() self.sim_result = SimResult(self) # TODO Task 2.4.3: Uncomment the line below self.counter_collection = CounterCollection(self) # TODO Task 3.1.2: Uncomment the line below and replace the "None" if no_seed: self.rng = RNG(ExponentialRNS(1.0), ExponentialRNS(1./float(self.sim_param.RHO))) else: self.rng = RNG(ExponentialRNS(1.0, self.sim_param.SEED_IAT), ExponentialRNS(1./float(self.sim_param.RHO),self.sim_param.SEED_ST))
def reset(self, no_seed=False): """ Reset the Simulation object. :param no_seed: is an optional parameter. If it is set to True, the RNG should be reset without a a specific seed. """ self.sim_state = SimState() self.system_state = SystemState(self) self.event_chain = EventChain() self.sim_result = SimResult(self) self.counter_collection = CounterCollection(self) if no_seed: self.rng = RNG(ExponentialRNS(1), ExponentialRNS(1. / float(self.sim_param.RHO))) else: self.rng = RNG( ExponentialRNS(1, self.sim_param.SEED_IAT), ExponentialRNS(1. / float(self.sim_param.RHO), self.sim_param.SEED_ST))
def __init__(self, sim_param=SimParam(), no_seed=False): """ Initialize the Simulation object. :param sim_param: is an optional SimParam object for parameter pre-configuration :param no_seed: is an optional parameter. If it is set to True, the RNG should be initialized without a a specific seed. """ self.sim_param = sim_param self.sim_state = SimState() self.system_state = SystemState(self) self.event_chain = EventChain() self.sim_result = SimResult(self) self.counter_collection = CounterCollection(self) if no_seed: self.rng = RNG(ExponentialRNS(1), ExponentialRNS(1. / float(self.sim_param.RHO))) else: self.rng = RNG( ExponentialRNS(1, self.sim_param.SEED_IAT), ExponentialRNS(1. / float(self.sim_param.RHO), self.sim_param.SEED_ST))
def poisson_arrivals(self, slicesim): """ """ iat = slicesim.slice_param.MEAN_IAT self.rng = RNG(ExponentialRNS(1. / float(iat), self.seed_iat), s_type='iat') #iat = self.sim_param.MEAN_IAT t_start = slicesim.sim_state.now t_end = self.sim_param.T_FINAL # Poisson t_arr = [] iat_arr = [] t_temp = t_start + self.rng.get_iat() while t_temp < t_end: t_arr.append(t_temp) t_temp += self.rng.get_iat() if len(t_arr) > 2: iat_arr.append(t_arr[-1] - t_arr[-2]) # # Fixed IAT # t_arr = np.arange(t_start, t_end, iat) # # Plot histogram of interarrivals for poisson dist # count, bins, ignored = plt.hist(iat_arr, 30, normed=True) # lmda = 1/iat # plt.plot(bins, lmda * np.exp(- lmda*bins), linewidth=2, color='r') # plt.show() arrivals = [] for i in t_arr: arrivals.append(PacketArrival(slicesim, i)) return arrivals
def __init__(self, game, seed): logger.info('Making dungeon') self.game = game self.rng = RNG(seed=seed) self.levels = [] for i in range(NUM_LEVELS): self.levels.append(self.rng.get_int(0,1000000)) self.rng.delete() self.tcod_map = libtcod.map_new(LEVEL_W,LEVEL_H) #does the fov need to be refreshed (ie new level, player moved...) self.refresh_fov = True #does the tcod map need to be refreshed (ie terrain change) self.refresh_tcod = True #positions of entities that block movement or sight self.creature_positions = []
def main(): r = remote('eof.ais3.org', 39091) r.sendlineafter('> ', '0') r.sendlineafter('> ', '0') r.recvuntil('Lucky Number: ') _R000 = int(r.recvline().strip()) r.sendlineafter('> ', '226') r.sendlineafter('> ', '0') r.recvuntil('Lucky Number: ') _R227 = int(r.recvline().strip()) S000 = undo_php_mt_rand(_R000) S227 = undo_php_mt_rand(_R227) seed = undo_php_mt_reload(S000, S227, 0, 1) if seed: print seed else: exit(-1) rng = RNG() rng.srand(seed) if rng.rand() != _R000: exit(-1) for i in range(226): rng.rand() if rng.rand() != _R227: exit(-1) r.sendlineafter('> ', '0') r.sendlineafter('> ', str(rng.rand())) print r.recvall() r.close()
class ShamirTest(unittest.TestCase): rng = RNG() debug = 0 def _testShamirRecover(self, minimum, shares, prime, secret, keys): if self.debug >= 100: print( "testShamirRecover(minimum=%d,shares=%d,prime=%d,secret=%s,keys=%s)" % (minimum, shares, prime, secret, keys)) shamir = Shamir(minimum, prime) choices = list(range(shares)) for k in range(minimum): i = self.rng.next(len(choices)) shamir.setKey(choices[i] + 1, keys[choices[i]]) del choices[i] assert shamir.getSecret() == secret for i in range(shares): assert shamir.getKey(i + 1) == keys[i] def _testShamir(self, minimum, shares, prime, secret): if self.debug >= 10: print("testShamir(minimum=%d,shares=%d,prime=%d,secret=%d)" % (minimum, shares, prime, secret)) shamir = Shamir(minimum, prime) shamir.setSecret(secret) shamir.randomizeKeys(shares, self.rng) keys = [shamir.getKey(i) for i in range(1, shares + 1)] for t in range(100): self._testShamirRecover(minimum, shares, prime, secret, keys) def testShamir(self): if self.debug >= 1: print("testShamir()") millerrabin = MillerRabin() for bits in range(32, 256, 4): prime = millerrabin.prevPrime(2**bits) for shares in range(1, 9): for minimum in range(1, shares + 1): for secret in [ 0, 1, 2, self.rng.next(prime), prime - 2, prime - 1 ]: self._testShamir(minimum, shares, prime, secret)
def plot_cifar10(X, y, samples_per_class=7, title='CIFAR-10 dataset', title_params=None, imshow_params=None): # check params title_params = title_params or {} title_params.setdefault('fontsize', 20) title_params.setdefault('y', 0.95) imshow_params = imshow_params or {} imshow_params.setdefault('interpolation', 'none') num_classes = 10 classes = range(num_classes) for c in classes: idxs = np.flatnonzero(y == c) idxs = RNG(seed=1337).choice(idxs, samples_per_class, replace=False) for i, idx in enumerate(idxs): plt_idx = i * num_classes + c + 1 ax = plt.subplot(samples_per_class, num_classes, plt_idx) ax.spines['bottom'].set_linewidth(2.) ax.spines['top'].set_linewidth(2.) ax.spines['left'].set_linewidth(2.) ax.spines['right'].set_linewidth(2.) plt.tick_params(axis='both', which='both', bottom='off', top='off', left='off', right='off', labelbottom='off', labelleft='off', labelright='off') plt.imshow(X[idx].astype('uint8'), **imshow_params) if i == 0: plt.title(get_cifar10_label(c)) plt.suptitle(title, **title_params) plt.subplots_adjust(wspace=0, hspace=0)
def roll(): try: min = int(session.get('min', None)) max = int(session.get('max', None)) nNum = int(session.get('nNum', None)) seenList = session.get('seen', None) except TypeError as err: return redirect(url_for('index')) try: newNumbers = RNG(min, max, nNum, seenList) except NegativeNNum as err: flash('Only positive numbers are allowed') return redirect(url_for('preRoll')) except InvalidRange as err: flash('Please input a valid min-max range') return redirect(url_for('index')) session['newNumbers'] = newNumbers seenList.append(newNumbers) session['seen'] = seenList return redirect(url_for('display'))
class ChannelModal(object): """ """ def __init__(self, user): """ R """ self.user = user self.RB_pool = user.sim_param.RB_pool #self.rng = RNG(ExponentialRNS(1. / float(user.sim_param.MEAN_CG), user.sim_param.SEED_CG), s_type='cg') #self.rng = RNG(UniformRNS(2*float(user.sim_param.MEAN_CG), 0., user.sim_param.SEED_CG), s_type='cg') #self.rng = RNG(ExponentialRNS(1. / float(user.sim_param.MEAN_CG), user.user_id), s_type='cg') #self.rng = RNG(UniformRNS(2. , 0., 21), s_type='cg') # self.channel_gains = (user.user_id%2+0.1)*self.rng.get_cg(len(self.RB_pool), user.sim_param.T_FINAL + user.sim_param.T_C) #self.channel_gains = np.random.rand(len(self.RB_pool), user.sim_param.T_FINAL+user.sim_param.T_C) #self.channel_gains = np.ones((len(self.RB_pool), user.sim_param.T_FINAL + user.sim_param.T_C)) # Rayleigh Fading: # self.rayleigh_envelope = GenerateRayleighEnvelope(self.RB_pool, user.sim_param.T_FINAL, user.sim_param.T_S, fm=10) # New channel model rate = BW * log2(1+SINR) # SNR(dB) = Pt-PL-P_noise, #self.pathloss_dB = self.get_pathloss() self.noise_per_rb_dBm = self.get_noise_per_rb() #self.SINR_dB = self.user.sim_param.P_TX_dBm - self.pathloss_dB - self.noise_per_rb_dBm #seed_shadowing = self.user.user_id # % self.user.sim_param.no_of_users_per_slice #self.rng_shadowing = RNG(NormalRNS(0, self.user.sim_param.SIGMA_shadowing, seed_shadowing), s_type='shadowing') #self.shadowing = self.rng_shadowing.get_shadowing(user.sim_param.T_FINAL) self.shadowing = self.get_shadowing() def get_shadowing(self): t_final = self.user.sim_param.T_FINAL * 10 shadowing = np.empty((0, t_final)) seed_shadowing = self.user.user_id * len(self.RB_pool) for i in range(len(self.RB_pool)): self.rng_shadowing = RNG(NormalRNS( 0, self.user.sim_param.SIGMA_shadowing, seed_shadowing), s_type='shadowing') temp_arr = self.rng_shadowing.get_shadowing(t_final) shadowing = np.append(shadowing, [temp_arr], axis=0) if self.user.sim_param.freq_selective: seed_shadowing += 1 return shadowing def get_noise_per_rb(self): """ returns noise in dB P_noise_dBm = 10*log10(k_boltzmann * temperature * bandwidth / 1mW) """ try: k_boltzmann = constants.Boltzmann temperature = self.user.sim_param.TEMPERATURE rb_bw = self.user.sim_param.RB_BANDWIDTH P_noise_dBm = 10 * np.log10( k_boltzmann * temperature * rb_bw / 1e-3) except: raise RuntimeError("Noise calculation error") return return P_noise_dBm def get_pathloss(self, rb, time): """ returns pathloss in dB PL= FSPL(f,1m)+10n log_10(d/1m)+ shadowing FSPL(f,1m)=20 log_10(4πf/c) shadowing = """ try: f = self.user.sim_param.FREQ n = self.user.sim_param.PL_Exponent d = self.user.distance # in meter c = constants.c pi = constants.pi shadowing_dB = self.shadowing[rb, time] fs_pl_dB = 20 * np.log10(4 * pi * f / c) pl_dB = fs_pl_dB + 10 * n * np.log10(d) + shadowing_dB except: raise RuntimeError("Path loss calculation error") return return pl_dB def get_data_rate(self, rb_arr, time): """ add effect of time in deterministic manner!!!! rate = BW * log2(1 + SINR) in bits per sec :return: summation of data rate of given user for given resource blocks """ try: rate = 0 rb_bw = self.user.sim_param.RB_BANDWIDTH if time - int(time) > 0.0001: raise RuntimeError("Time variable is not integer!!") if rb_arr == [-1]: return 0 for rb in rb_arr: pathloss_dB = self.get_pathloss(rb, time) SINR_dB = self.user.sim_param.P_TX_dBm - pathloss_dB - self.noise_per_rb_dBm SINR = np.power(10, SINR_dB / 10) rate += rb_bw * np.log2(1 + SINR) #rate += 1000 # for testing each ms 1 bit is served except: raise RuntimeError("Data Rate Calculation error") return rate def get_load_change(self, rb_arr, time_arr): """ :return: data rate * duration of given user for given resource blocks at given time array data rate is calculated separately for each ms and summed. load_change in bits """ try: if len(time_arr) == 0: return 0 load_change = 0 for t in time_arr: if isinstance(t, float): raise RuntimeError("time must be integer") load_change += np.nansum(self.get_data_rate(rb_arr, t) * 1e-3) # rate * 1ms except: raise RuntimeError("Error during transmitted load calculation") return load_change # np.sum(self.channel_gains[rb_arr][time]) # user_id-1 for indexing def get_serving_duration(self, packet): """ Change the status of the packet once the serving process starts. """ t_s = self.user.sim_param.T_S #t_temp = packet.slicesim.sim_state.now # - packet.slicesim.sim_state.t_round_start) t_temp = packet.t_last_start t_arr = np.arange(t_temp, t_temp + t_s) RB_arr = packet.server.RB_list r_temp = self.get_load_change(RB_arr, t_arr) try: while r_temp < packet.remaining_load: t_temp += t_s t_arr = np.arange(t_temp, t_temp + t_s) r_temp += self.get_load_change(RB_arr, t_arr) except: raise RuntimeError("Error during get_serving_duration") r_last = self.get_load_change(RB_arr, t_arr) t_last = (packet.remaining_load - (r_temp - r_last)) / (r_last / t_s) t_est = t_temp + t_last #packet.estimated_duration = t_est - packet.slicesim.sim_state.now #packet.occupation_duration = t_temp+t_s-packet.slicesim.sim_state.now packet.t_finish_real = t_est #+packet.slicesim.sim_state.t_round_start packet.t_finish = t_temp + t_s #+packet.slicesim.sim_state.t_round_start def update_remaining_load(self, packet): """ substract load change since the latest serving from remaining load return tp = load_change / duration """ t_start = int( round(packet.t_arrival + (packet.d_wait + packet.d_served))) # starting time of latest serve if packet.t_arrival + (packet.d_wait + packet.d_served) - t_start > 0.0001: raise RuntimeError("time calculation error in packet") t_arr = np.arange(t_start, packet.slicesim.sim_state.now) RB_arr = packet.server.RB_list # function is called before RB_list is changed r_temp = self.get_load_change(RB_arr, t_arr) packet.remaining_load -= r_temp if packet.remaining_load < 0: raise RuntimeError("Remaining load can't be negative!") if packet.slicesim.sim_state.now - t_start != 0: tp = float(r_temp) / float(packet.slicesim.sim_state.now - t_start) return tp # throughput in kilobits per second else: return float(0) def get_remaining_load(self, packet): """ returns the remaining load without changing anything initially same as update_remaining_load """ t_start = int( round(packet.t_arrival + (packet.d_wait + packet.d_served))) # starting time of latest serve if packet.t_arrival + (packet.d_wait + packet.d_served) - t_start > 0.0001: raise RuntimeError("time calculation error in packet") t_arr = np.arange(t_start, packet.slicesim.sim_state.now) RB_arr = packet.server.RB_list r_temp = self.get_load_change(RB_arr, t_arr) return packet.remaining_load - r_temp def get_throughput_rt(self, packet): """ NOT USED returns throughput for round termination event without changing anything """ t_start = int( round(packet.t_arrival + (packet.d_wait + packet.d_served))) # starting time of latest serve if packet.t_arrival + (packet.d_wait + packet.d_served) - t_start > 0.0001: raise RuntimeError("time calculation error in packet") t_arr = np.arange(t_start, packet.slicesim.sim_state.now) RB_arr = packet.server.RB_list r_temp = self.get_load_change(RB_arr, t_arr) if packet.slicesim.sim_state.now - t_start != 0: tp = float(r_temp) / float(packet.slicesim.sim_state.now - t_start) # total served size over time return tp #throughput else: return float(0) def get_throughput_sc(self, packet): """ Only when service is completed, calculates the latest serve """ t_start = int( round(packet.t_arrival + (packet.d_wait + packet.d_served))) # starting time of latest serve if packet.t_arrival + (packet.d_wait + packet.d_served) - t_start > 0.0001: raise RuntimeError("time calculation error in packet") t_arr = np.arange(t_start, packet.slicesim.sim_state.now) #RB_arr = packet.server.RB_list # function is called after RB_list is changed #r_temp = self.get_output_rate(RB_arr, t_arr) if packet.slicesim.sim_state.now - t_start != 0: tp = float(packet.remaining_load) / float( packet.slicesim.sim_state.now - t_start) # total served size over time return tp #throughput else: return float(0) def get_throughput_sc2(self, packet): """ Only when service is completed, counts just service completions """ t_start = packet.t_last_start # int(round(packet.t_arrival + (packet.d_wait + packet.d_served))) # starting time of latest serve t_arr = np.arange(t_start, packet.slicesim.sim_state.now) if packet.slicesim.sim_state.now - t_start != 0: tp = float( packet.size) / float(packet.slicesim.sim_state.now - t_start) # total served size over time return tp #throughput else: return float(0) def get_CQI_list(self, server, RB_mapping_sm): """ returns data_rate values for each rb at each timeslot in T_sm only get the rate at the beginning of new Ts period. """ CQI_list = [] t_s = self.user.sim_param.T_S t_temp = int(server.slicesim.sim_state.now ) # - packet.slicesim.sim_state.t_round_start) if server.slicesim.sim_state.now - t_temp > 0.0001: raise RuntimeError("time calculation error in get_CQI_list") #t_arr = np.arange(t_temp, t_temp + t_s) # fill RB_matching_sm for i in range(RB_mapping_sm.shape[1]): # loop over time RB_arr = [] tmp_arr = [] for j in range(RB_mapping_sm.shape[0]): # loop over RBs if RB_mapping_sm[j][i]: #RB_arr.append(j) #tmp_arr.append(self.get_load_change([j], t_arr)) tmp_arr.append(self.get_data_rate([j], t_temp)) else: tmp_arr.append(-1.) #CQI_list.append(self.get_output_rate(RB_arr, t_arr)) CQI_list.append(tmp_arr) t_temp += t_s # increase time by t_s for the next slots rate #t_arr = np.arange(t_temp, t_temp + t_s) return CQI_list # indexing: [time], [RB]
MAXBOSSHP = 50 DEBUG = 'DEBUG' in os.environ if 'DVORAK' in os.environ: commands = ",oae.u'" # Mapping from char to action ID else: commands = "wsadefq" # Mapping from char to action ID # Offset of each direction # Dir: ^ v < > moveOff = [-16, 16, -1, 1] # Variable initialization p = 198543992929796982831856294122484542605661595500963697094465382211596515703899 rng = RNG(p, pow(0xdeadbeef, 31337, p) // 2) maze = [list(line) for line in maze.split('\n')] player, monster = rng.extract(8), rng.extract(8) items = {i: 0 for i in range(256)} HP, BossHP, score = MAXHP, MAXBOSSHP, 0 history = "" # For debug env def draw(pos, c): """Draw a character on the dungeons map""" x, y = pos % 16 * 4 + 2, pos // 16 * 2 + 1 maze[y][x] = c def renderNormal(): """Render normal stage"""
class PhrasesTest(unittest.TestCase): rng = RNG() def mkPhrase(self, language, length): phrases = Phrases.forLanguage(language) words = phrases.words phrase = [words[self.rng.next(len(words))] for i in range(length)] return phrases.space().join(phrase) def testAmbiguity(self): languages = Phrases.getLanguages() allWords = dict() for language in languages: phrases = Phrases.forLanguage(language) for word in phrases.words: if not word in allWords: allWords[word] = dict() index = phrases.invWords[word] if not index in allWords[word]: allWords[word][index] = set() allWords[word][index].add(language) ambiguous = False for word in allWords: indexs = allWords[word] if len(indexs) <= 1: continue ambiguous = True for index in indexs: for lang in indexs[index]: print("word %s in %s index %d" % (word, index, lang)) assert not ambiguous def _testPhrase(self, language, phrase): assert Phrases.forLanguage(language).isPhrase( phrase), "phrase='%s' language=%s" % (phrase, language) number = Phrases.forLanguage(language).toNumber(phrase) detects = Phrases.detectLanguages(phrase) for lang2 in detects: number2 = Phrases.forLanguage(lang2).toNumber(phrase) phrase2 = Phrases.forLanguage(lang2).toPhrase(number) assert number == number2 assert phrase == phrase2 def _testPhrases(self, language): for number in range(100000): phrase = Phrases.forLanguage(language).toPhrase(number) self._testPhrase(language, phrase) for length in range(1, 20): phrase = self.mkPhrase(language, length) self._testPhrase(language, phrase) def testPhrase(self): languages = Phrases.getLanguages() for language in [ "chinese_simplified", "chinese_traditional", "english", "french", "italian", "japanese", "korean", "spanish" ]: assert language in languages for language in languages: self._testPhrases(language)
class TrafficGenerator(object): """ """ def __init__(self, user): """ Generate TrafficGenerator objects and initialize variables. """ self.sim_param = user.sim_param # self.seed_iat = (user.user_id % user.sim_param.no_of_users_per_slice) + user.sim_param.SEED_IAT self.seed_iat = user.user_id + user.sim_param.SEED_OFFSET def poisson_arrivals(self, slicesim): """ """ iat = slicesim.slice_param.MEAN_IAT self.rng = RNG(ExponentialRNS(1. / float(iat), self.seed_iat), s_type='iat') #iat = self.sim_param.MEAN_IAT t_start = slicesim.sim_state.now t_end = self.sim_param.T_FINAL # Poisson t_arr = [] iat_arr = [] t_temp = t_start + self.rng.get_iat() while t_temp < t_end: t_arr.append(t_temp) t_temp += self.rng.get_iat() if len(t_arr) > 2: iat_arr.append(t_arr[-1] - t_arr[-2]) # # Fixed IAT # t_arr = np.arange(t_start, t_end, iat) # # Plot histogram of interarrivals for poisson dist # count, bins, ignored = plt.hist(iat_arr, 30, normed=True) # lmda = 1/iat # plt.plot(bins, lmda * np.exp(- lmda*bins), linewidth=2, color='r') # plt.show() arrivals = [] for i in t_arr: arrivals.append(PacketArrival(slicesim, i)) return arrivals def periodic_arrivals(self, slicesim): """ """ iat = slicesim.slice_param.MEAN_IAT t_start = slicesim.sim_state.now t_end = self.sim_param.T_FINAL # Fixed IAT t_arr = np.arange(t_start, t_end, iat) arrivals = [] for i in t_arr: arrivals.append(PacketArrival(slicesim, i)) return arrivals
def ran_simulation(): """ Main ran_simulation """ # define sim_param and inside RB pool: all available Resources list sim_param = SimParam() no_of_slices = sim_param.no_of_slices no_of_users_per_slice = sim_param.no_of_users_per_slice # create result directories create_dir(sim_param) # create logfile and write SimParameters results_dir = "results/" + sim_param.timestamp log_file = open(results_dir + "/logfile.txt", "wt") log_file.write('no_of_slices: %d\nno_of_users_per_slice: %d\n\n' % (no_of_slices, no_of_users_per_slice)) attrs = vars(sim_param) log_file.write('SimParam\n'+''.join("%s: %s\n" % item for item in attrs.items())) # log_file.close() # initialize SD_RAN_Controller SD_RAN_Controller = Controller(sim_param) # Each slice has different users slices = [] slice_results = [] # initialize all slices for i in range(no_of_slices): slice_param_tmp = SliceParam(sim_param) slice_param_tmp.SLICE_ID = i slices.append(SliceSimulation(slice_param_tmp)) slice_results.append([]) # initialize all users with traffics and distances tmp_users = [] seed_dist = 0 # users in all slices have identical distance distributions #rng_dist = RNG(ExponentialRNS(lambda_x=1. / float(sim_param.MEAN_Dist)), s_type='dist') # , the_seed=seed_dist rng_dist = RNG(UniformRNS(sim_param.DIST_MIN,sim_param.DIST_MAX, the_seed=seed_dist), s_type='dist') # dist_arr = [10, 100 ]#[30, 30, 100, 100, 100, 100, 100, 100, 100, 100] # 10*(1+user_id%no_of_users_per_slice)**2 for j in range(no_of_users_per_slice): user_id = i*no_of_users_per_slice + j #tmp_users.append(User(user_id, rng_dist.get_dist(), slice_list=[slices[i]], sim_param=sim_param)) tmp_users.append(User(user_id, dist_arr[j], slice_list=[slices[i]], sim_param=sim_param)) # insert user to slices slices[i].insert_users(tmp_users) # Choose Slice Manager Algorithm, 'PF': prop fair, 'MCQI': Max Channel Quality Index, 'RR': round-robin slices[0].slice_param.SM_ALGO = 'RR' slices[1].slice_param.SM_ALGO = 'MCQI' slices[2].slice_param.SM_ALGO = 'PF' # log Slice Parameters for i in range(no_of_slices): attrs = vars(slices[i].slice_param) log_file.write('\nSliceParam\n' + ''.join("%s: %s\n" % item for item in attrs.items())) #log_file.close() # loop rounds for each slice for i in range(int(sim_param.T_FINAL/sim_param.T_C)): RB_mapping = SD_RAN_Controller.RB_allocate_to_slices(slices[0].sim_state.now, slices) for j in range(len(slices)): slices[j].prep_next_round(RB_mapping[j,:,:]) slice_results[j].append(slices[j].simulate_one_round()) # Store Simulation Results # user results parent_dir = "results/" + sim_param.timestamp + "/user_results" path = parent_dir + "/tp" for i in range(len(slice_results)): user_count = len(slice_results[i][-1].server_results) # choose latest result for data for k in range(user_count): common_name = "/slice%d_user%d_" % (i, slice_results[i][-1].server_results[k].server.user.user_id) cc_temp = slice_results[i][-1].server_results[k].server.counter_collection # tp filename = parent_dir + "/tp" + common_name + "sum_power_two.csv" savetxt(filename, cc_temp.cnt_tp.sum_power_two, delimiter=',') filename = parent_dir + "/tp" + common_name + "values.csv" savetxt(filename, cc_temp.cnt_tp.values, delimiter=',') filename = parent_dir + "/tp" + common_name + "timestamps.csv" savetxt(filename, cc_temp.cnt_tp.timestamps, delimiter=',') filename = parent_dir + "/tp" + common_name + "all_data.csv" #savetxt(filename, np.transpose(np.array([cc_temp.cnt_tp.values,cc_temp.cnt_tp.timestamps])), delimiter=',') df = DataFrame(np.transpose(np.array([cc_temp.cnt_tp.values,cc_temp.cnt_tp.timestamps])), columns=['Values', 'Timestamps']) export_csv = df.to_csv(filename, index=None, header=True) # tp2 filename = parent_dir + "/tp2" + common_name + "sum_power_two.csv" savetxt(filename, cc_temp.cnt_tp2.sum_power_two, delimiter=',') filename = parent_dir + "/tp2" + common_name + "values.csv" savetxt(filename, cc_temp.cnt_tp2.values, delimiter=',') filename = parent_dir + "/tp2" + common_name + "timestamps.csv" savetxt(filename, cc_temp.cnt_tp2.timestamps, delimiter=',') # ql filename = parent_dir + "/ql" + common_name + "sum_power_two.csv" savetxt(filename, cc_temp.cnt_ql.sum_power_two, delimiter=',') filename = parent_dir + "/ql" + common_name + "values.csv" savetxt(filename, cc_temp.cnt_ql.values, delimiter=',') filename = parent_dir + "/ql" + common_name + "timestamps.csv" savetxt(filename, cc_temp.cnt_ql.timestamps, delimiter=',') # system time (delay) filename = parent_dir + "/delay" + common_name + "values.csv" savetxt(filename, cc_temp.cnt_syst.values, delimiter=',') filename = parent_dir + "/delay" + common_name + "timestamps.csv" savetxt(filename, cc_temp.cnt_syst.timestamps, delimiter=',') # Find how to insert histograms # plot results parent_dir = "results/" + sim_param.timestamp plot_results(parent_dir, no_of_slices, no_of_users_per_slice, sim_param, slices) # rb dist printing filename = "results/" + sim_param.timestamp + "/summary" rb_total = 0 rb_dist = [] for s in slices: rb_dist_slice = [] for u in s.server_list: rb_dist_slice.append(u.RB_counter) slicesum = np.nansum(rb_dist_slice) print("Slice %d dist: " % s.slice_param.SLICE_ID, *np.round(np.divide(rb_dist_slice,slicesum/100), 1)) # write these to file savetxt(filename, cc_temp.cnt_ql.sum_power_two, delimiter=',') rb_dist.append(slicesum) totalsum = np.nansum(rb_dist) print("rb dist (RR MCQI PF): ", *np.round(np.divide(rb_dist, totalsum / 100), 1))
import os from rng import RNG from pwn import * from arts import maze, mazeDir, bossBG, gameOver, boom, win import time # # Variable initialization p = 198543992929796982831856294122484542605661595500963697094465382211596515703899 rng = RNG(p, pow(0xdeadbeef, 31337, p) // 2) print(bin(rng.M)[2:]) print(bin(rng.max)[2:]) print(bin(rng.entropy_pool)[2:]) print(len(bin(rng.entropy_pool)[2:])) M_list = [int(i) for i in bin(rng.M)[2:]] # now_status = [ int(i) for i in bin(rng.entropy_pool)[2:]]+ \ # [ 0 for i in range(256)] # for i in range(256): # if now_status[i] == 1: # for j,m in zip(range(i,i+256),M_list): # now_status[j+1] = (now_status[j+1] + m) % 2 # print(now_status[256:]) x = 0b1000000000011011011100101001000000011111101101101010001001111110100101011011001110100000110101100011110111100101000111001010000100110111010111101100101100010101100101101100111000010100111110100111111000111010000100110110111010110101010001101010000111111101 M3 = rng.M ^ (rng.M * 2) ^ (rng.M * 4) print(bin(M3)) print(bin(x)) # exit() r = remote('edu-ctf.zoolab.org', 8400) with open('path31337', 'r') as fin: for row in fin:
class Controller(object): """ """ def __init__(self, sim_param): """ """ self.sim_param = sim_param self.slices_cycle = [] self.avg_rate_pf = [] self.df = pd.DataFrame() # random allocation self.rng_rb_allocate = RNG(UniformIntRNS( 0, sim_param.no_of_slices - 1, the_seed=sim_param.SEED_OFFSET), s_type='rb_allocation') def RB_allocate_from_action(self, t_start, slice_list, action): '''""" method 0: 1slice-1agent: discrete action (0, no_of_rb), mapped to no_of_rb by bidding """ # mapping actions to rbs no_of_rb = len(self.sim_param.RB_pool) if np.sum(action,axis=0)==0: bidding = np.sum(action,axis=1)/1. else: bidding = np.sum(action,axis=1)/np.sum(action,axis=0) bidding *= no_of_rb rb_allocation = np.floor(bidding).astype(int) rb_remaining = no_of_rb - np.sum(rb_allocation) while rb_remaining>0: bidding-=rb_allocation rb_allocation[np.argmax(bidding)]+=1 # only first occurance of max argument is returned rb_remaining-=1 t_s = self.sim_param.T_S t_c = self.sim_param.T_C t_arr = np.arange(t_start, t_start+t_c, t_s) RB_mapping = np.zeros([len(slice_list), len(self.sim_param.RB_pool), len(t_arr)], dtype=bool) rb_idx = 0 for i in range(len(slice_list)): count = rb_allocation[i] while count>0: RB_mapping[i, rb_idx, 0]= True rb_idx+=1 count -=1''' ## ------------------------------------------------------- '''""" method 1: 1agent: discrete action (0, no_of_slices ** no_of_rb), mapped to no_of_rb by convertion """ no_of_rb = len(self.sim_param.RB_pool) def dec_to_base( num, base, size): # Maximum base - 36 base_num = "" while num > 0: dig = int(num % base) if dig < 10: base_num += str(dig) else: base_num += chr(ord('A') + dig - 10) # Using uppercase letters num //= base while len(base_num)<size: base_num += str(0) base_num = base_num[::-1] # To reverse the string return base_num rb_dist = dec_to_base(action,len(slice_list), no_of_rb) t_s = self.sim_param.T_S t_c = self.sim_param.T_C t_arr = np.arange(t_start, t_start + t_c, t_s) RB_mapping = np.zeros([len(slice_list), len(self.sim_param.RB_pool), len(t_arr)], dtype=bool) for i in range(no_of_rb): slice_idx = int(rb_dist[i]) RB_mapping[slice_idx, i, 0] = True''' ## ------------------------------------------------------- """ method 2: 1agent: Multi discrete action ( no_of_rb * [(0,no_of_slices)] ) maps no_of_rb to slices """ no_of_rb = action.size rb_dist = action t_s = self.sim_param.T_S t_c = self.sim_param.T_C t_arr = np.arange(t_start, t_start + t_c, t_s) RB_mapping = np.zeros( [len(slice_list), len(self.sim_param.RB_pool), len(t_arr)], dtype=bool) for i in range(no_of_rb): slice_idx = int(rb_dist[i]) RB_mapping[slice_idx, i] = True # ## ------------------------------------------------------- # """ # method 3: PF with trimming alpha # """ # t_s = self.sim_param.T_S # t_c = self.sim_param.T_C # t_arr = np.arange(t_start, t_start + t_c, t_s) # RB_mapping = np.zeros([len(slice_list), len(self.sim_param.RB_pool), len(t_arr)], dtype=bool) # # # Prop Fair per RB, each user has avg_ratio( only works if each slice has same amount of users) # if len(self.avg_rate_pf) == 0: # avg_rate initialization # no_of_users = len(slice_list[0].user_list) # self.avg_rate_pf = np.ones([RB_mapping.shape[0], no_of_users]) # indexing: slice,user # # avg_rate = self.avg_rate_pf # #alpha = (action + 1)/4 # -1,1 mapped to 0,1 # if action==0: alpha = 0 # elif action==1: alpha = 0.1 # elif action==2: alpha = 0.5 # else: RuntimeError("ERROR: wrong alpha.") # for j in range(RB_mapping.shape[1]): # loop over RBs # token_slice = -1 # token_user = -1 # max_ratio = -1 # for k in range(RB_mapping.shape[0]): # loop over slices # for u in range(len(slice_list[k].user_list)): # loop over users # tmp_user = slice_list[k].user_list[u] # # tmp_CQI = tmp_user.channel.channel_gains[j, t_arr[i]] # indexing: RB,time # tmp_CQI = tmp_user.channel.get_data_rate([j], t_start) # insert index of rb as list and time # tmp_ratio = tmp_CQI / avg_rate[k, u] # current rate average rate ratio, indexing: slice,user # if max_ratio < tmp_ratio: # max_ratio = tmp_ratio # token_slice = k # token_user = u # RB_mapping[token_slice, j] = True # indexing: slice,RB # # # updating average rates for next rb-time slot (alpha = 0.1) # for k2 in range(RB_mapping.shape[0]): # loop over slices # for u2 in range(len(slice_list[k2].user_list)): # loop over users # if k2 == token_slice and u2 == token_user: # tmp_user = slice_list[k2].user_list[u2] # # tmp_CQI = tmp_user.channel.channel_gains[j, t_arr[i]] # indexing: RB,time # tmp_CQI = tmp_user.channel.get_data_rate([j], t_start) # insert index of rb as list and time # avg_rate[k2, u2] = (1 - alpha) * avg_rate[k2, u2] + alpha * tmp_CQI # else: # avg_rate[k2, u2] = (1 - alpha) * avg_rate[k2, u2] # # ## ------------------------------------------------------- # Storing data with dataframe RB_allocation = np.full(RB_mapping.shape[1:], np.nan, dtype=int) for i in range(len(slice_list)): RB_allocation[RB_mapping[i] == True] = i t_arr = np.arange(t_start, t_start + t_c, t_s) idx = 0 for t in t_arr: self.df[t] = RB_allocation[:, idx] idx += 1 return RB_mapping def get_CQI_data(self, slice_list): ''' return CQI values for each slice* user* rb* time combination within next round(T_C) :param slice_list: :return: ''' t_s = self.sim_param.T_S t_c = self.sim_param.T_C t_start = slice_list[0].sim_state.now t_arr = np.arange(t_start, t_start + t_c, t_s) # CQI_matrix = np.zeros([len(slice_list),len(slice_list[0].server_list), len(self.sim_param.RB_pool), len(t_arr)]) CQI_data = [] tmp_CQI_slice = [] for s in slice_list: # loop over slices tmp_CQI_user = [] for u in s.user_list: # loop over users tmp_CQI_time = [] for t in t_arr: # loop over time tmp_CQI_rb = [] for r in self.sim_param.RB_pool: # loop over RBs data_rate = u.channel.get_data_rate( [r], t) # insert index of rb as list, time tmp_CQI = data_rate # / s.slice_param.P_SIZE tmp_CQI_rb.append(tmp_CQI) tmp_CQI_time.append(tmp_CQI_rb) tmp_CQI_user.append(tmp_CQI_time) tmp_CQI_slice.append(tmp_CQI_user) CQI_data.append(tmp_CQI_slice) return CQI_data def RB_allocate_to_slices(self, t_start, slice_list): """ """ t_s = self.sim_param.T_S t_c = self.sim_param.T_C t_arr = np.arange(t_start, t_start + t_c, t_s) RB_mapping = np.zeros( [len(slice_list), len(self.sim_param.RB_pool), len(t_arr)], dtype=bool) # Hard slicing if self.sim_param.C_ALGO == 'H': no_of_RB_per_slice = int(RB_mapping.shape[1] / RB_mapping.shape[0]) for i in range(RB_mapping.shape[0]): for j in range(no_of_RB_per_slice): rb_idx = i * no_of_RB_per_slice + j RB_mapping[i][rb_idx] = True # random allocation elif self.sim_param.C_ALGO == 'Random': for j in range(RB_mapping.shape[1]): # loop over RBs tmp_slice_idx = self.rng_rb_allocate.get_rb() RB_mapping[tmp_slice_idx][j] = True # round robin elif self.sim_param.C_ALGO == 'RR': if not isinstance(self.slices_cycle, cycle): # region : weighted cycle #self.slices_cycle = cycle(np.arange(0, RB_mapping.shape[0])) cycle_list = [] # weight : user no # weight_list = list(self.sim_param.no_of_users_list) # weight : user no * tp_req # a0 = np.array ([s.slice_param.RATE_REQ for s in slice_list]) a0 = np.array(self.sim_param.rate_requirements) a1 = np.divide(a0, np.gcd.reduce(a0)) b0 = np.array(self.sim_param.no_of_users_list) b1 = np.divide(b0, np.gcd.reduce(b0)) weight_list = list(b1 * a1) while max(weight_list) > 0: for i in range(len(weight_list)): if weight_list[i] > 0: cycle_list.append(i) weight_list[i] -= 1 self.slices_cycle = cycle(cycle_list) # endregion : weighted cycle for j in range(RB_mapping.shape[1]): # loop over RBs tmp_slice_idx = next(self.slices_cycle) RB_mapping[tmp_slice_idx][j] = True # Max CQI elif self.sim_param.C_ALGO == 'MCQI': # CQI is calculated considering user with max CQI in a slice CQI_arr = np.zeros([len(slice_list), len(self.sim_param.RB_pool)]) for i in range(len(slice_list)): # loop over slices for k in range(RB_mapping.shape[1]): # loop over RBs max_CQI = -1 for u in slice_list[i].user_list: # loop over users # tmp_CQI2 = u.channel.channel_gains[k, t_arr[j]] # indexing: RB,time tmp_CQI = u.channel.get_data_rate( [k], t_start) # insert index of rb as list, time if max_CQI < tmp_CQI: max_CQI = tmp_CQI CQI_arr[i, k] = max_CQI # indexing: slice,RB # For each rb and time, map rb to max valued CQI_arr values owner slice for j in range(RB_mapping.shape[1]): # loop over RBs max_CQI = -1 for k in range(RB_mapping.shape[0]): # loop over slices tmp_CQI = CQI_arr[k, j] # indexing: slice,RB if max_CQI < tmp_CQI: max_CQI = tmp_CQI token = k RB_mapping[token, j] = True # indexing: slice,RB # Prop Fair per RB, each user has avg_ratio( only works if each slice has same amount of users) elif self.sim_param.C_ALGO == 'PF': if len(self.avg_rate_pf) == 0: # avg_rate initialization no_of_users = len(slice_list[0].user_list) self.avg_rate_pf = np.ones([RB_mapping.shape[0], no_of_users ]) # indexing: slice,user avg_rate = self.avg_rate_pf alpha = self.sim_param.ALPHA_C for j in range(RB_mapping.shape[1]): # loop over RBs token_slice = -1 token_user = -1 max_ratio = -1 for k in range(RB_mapping.shape[0]): # loop over slices for u in range(len( slice_list[k].user_list)): # loop over users tmp_user = slice_list[k].user_list[u] # tmp_CQI = tmp_user.channel.channel_gains[j, t_arr[i]] # indexing: RB,time tmp_CQI = tmp_user.channel.get_data_rate( [j], t_start) # insert index of rb as list and time tmp_ratio = tmp_CQI / avg_rate[ k, u] # current rate average rate ratio, indexing: slice,user if max_ratio < tmp_ratio: max_ratio = tmp_ratio token_slice = k token_user = u RB_mapping[token_slice, j] = True # indexing: slice,RB # updating average rates for next rb-time slot (alpha = 0.1) for k2 in range(RB_mapping.shape[0]): # loop over slices for u2 in range(len( slice_list[k2].user_list)): # loop over users if k2 == token_slice and u2 == token_user: tmp_user = slice_list[k2].user_list[u2] # tmp_CQI = tmp_user.channel.channel_gains[j, t_arr[i]] # indexing: RB,time tmp_CQI = tmp_user.channel.get_data_rate( [j], t_start) # insert index of rb as list and time avg_rate[k2, u2] = ( 1 - alpha) * avg_rate[k2, u2] + alpha * tmp_CQI else: avg_rate[k2, u2] = (1 - alpha) * avg_rate[k2, u2] # Storing data with dataframe RB_allocation = np.full(RB_mapping.shape[1:], np.nan, dtype=int) for i in range(len(slice_list)): RB_allocation[RB_mapping[i] == True] = i t_arr = np.arange(t_start, t_start + t_c, t_s) idx = 0 for t in t_arr: self.df[t] = RB_allocation[:, idx] idx += 1 return RB_mapping
Available bit lengths: {', '.join(str(k) for k in BITS)} ''' ).strip()) exit(1) try: executions = int(argv[3]) except IndexError: executions = 10 try: seed = 88172645463325252 for i in range(bits): seed += 2 ** bits avg = 0 rng = iter(RNG( ALGORITHMS[algorithm], seed=seed, limit = 2 ** bits )) for i in range(executions): start = now() n = next(rng) avg += into_milliseconds(now() - start) / executions print(f'Average of {executions} executions of {algorithm} ({bits} bits): {avg}') except KeyError: print(f'Valid bitlengths: {[*BITS.keys()]}')
def __generate_angles(self, theta, phi): if theta == None: theta = RNG.theta() if phi == None: phi = RNG.phi() return theta, phi
if SHOW_ANIMATION: bMap = str_map(landscape, showSugar=SHOW_SUGAR) print(f"t = {t:4.32}, alive = {len(agentList.agentList)}\n{bMap}") if PAUSE: # Print nice statistics and await input # Empty input == continue to next event print(nice_statistics(agentList, t)) repeatInput = True while repeatInput: uInput = input(f"Input t={t}> ") repeatInput = interpret(uInput, agentList, calendar, landscape, calendar.now()) rng = RNG(SEED) eventList = EventCalendar() landscape = Landscape(ROWS, COLUMNS, rng=rng) agentList = AgentList(AGENTS, landscape, eventList, rng=rng) eventList.set_preevent(preoperation, landscape, eventList) eventList.set_postevent(postoperation, landscape, eventList, agentList) ## Pre simulation ## print(f"Seed: {SEED}") print(nice_statistics(agentList, 0)) init_map = str_map(landscape, showSugar=SHOW_SUGAR) ## Simulation ## eventList.run(MAX_T)
def __init__(self, trials=64, rng=RNG()): self.trials = trials self.rng = rng
import sys import argparse parser = argparse.ArgumentParser(description="Small python based path tracer") parser.add_argument("--samples", type=int, default=4) parser.add_argument( "--passtype", type=str, choices=['albedo', 'normal', 'indirect', 'direct', 'depth'], default="direct") args = parser.parse_args() if __name__ == "__main__": rng = RNG() nb_samples = args.samples w = 1024 h = 768 #setup orthographic camera eye = Vector3(50, 52, 295.6) gaze = Vector3(0, -0.042612, -1).normalize() fov = 0.5135 cam_x = Vector3(w * fov / h, 0.0, 0.0) cam_y = cam_x.cross(gaze).normalize() * fov Ls = [None] * w * h for i in range(w * h): Ls[i] = Vector3()
class ChannelModalCts(object): """ """ def __init__(self, user): """ """ self.user = user self.RB_pool = user.sim_param.RB_pool '''self.rng = RNG(ExponentialRNS(1. / float(user.sim_param.MEAN_CG), user.sim_param.SEED_CG), s_type='cg') # self.rng = RNG(UniformRNS(2*float(user.sim_param.MEAN_CG), 0., user.sim_param.SEED_CG), s_type='cg') # self.rng = RNG(ExponentialRNS(1. / float(user.sim_param.MEAN_CG), user.user_id), s_type='cg') # self.rng = RNG(UniformRNS(2. , 0., 21), s_type='cg') # self.channel_gains = (user.user_id%2+0.1)*self.rng.get_cg(len(self.RB_pool), user.sim_param.T_FINAL + user.sim_param.T_C) #self.channel_gains = np.random.rand(len(self.RB_pool), user.sim_param.T_FINAL+user.sim_param.T_C) #self.channel_gains = np.ones((len(self.RB_pool), user.sim_param.T_FINAL + user.sim_param.T_C))''' self.noise_per_rb_dBm = self.get_noise_per_rb() # seed_shadowing = self.user.user_id # % self.user.sim_param.no_of_users_per_slice # self.rng_shadowing = RNG(NormalRNS(0, self.user.sim_param.SIGMA_shadowing, seed_shadowing), s_type='shadowing') # self.shadowing = self.rng_shadowing.get_shadowing(user.sim_param.T_FINAL) self.shadowing = self.get_shadowing() def get_shadowing(self): t_final = self.user.sim_param.T_FINAL t_c = self.user.sim_param.T_C t_coh = self.user.sim_param.T_COH # in ms buffer = 3 # for the packets in simulation final shadowing = np.empty((0, int(t_final) + t_coh * (buffer -1) )) seed_shadowing = (self.user.user_id * len(self.RB_pool)) + self.user.sim_param.SEED_OFFSET for i in range(len(self.RB_pool)): self.rng_shadowing = RNG(NormalRNS(0, self.user.sim_param.SIGMA_shadowing, seed_shadowing), s_type='shadowing') temp_shadowing = self.rng_shadowing.get_shadowing2(t_final, t_c, t_coh, buffer) shadowing = np.append(shadowing, [temp_shadowing], axis=0) if self.user.sim_param.freq_selective: seed_shadowing += 1 return shadowing def get_noise_per_rb(self): """ returns noise in dB P_noise_dBm = 10*log10(k_boltzmann * temperature * bandwidth / 1mW) """ try: k_boltzmann = constants.Boltzmann temperature = self.user.sim_param.TEMPERATURE rb_bw = self.user.sim_param.RB_BANDWIDTH P_noise_dBm = 10 * np.log10(k_boltzmann * temperature * rb_bw / 1e-3) except: raise RuntimeError("Noise calculation error") return return P_noise_dBm def get_pathloss(self, rb, time): """ returns pathloss in dB PL= FSPL(f,1m)+10n log_10(d/1m)+ shadowing FSPL(f,1m)=20 log_10(4πf/c) shadowing = """ try: f = self.user.sim_param.FREQ n = self.user.sim_param.PL_Exponent d = self.user.distance # in meter c = constants.c pi = constants.pi shadowing_dB = self.shadowing[rb, time] fs_pl_dB = 20*np.log10(4*pi*f/c) pl_dB = fs_pl_dB + 10*n*np.log10(d) + shadowing_dB except: raise RuntimeError("Path loss calculation error") return pl_dB def get_data_rate(self, rb_arr, time): """ add effect of time in deterministic manner!!!! rate = BW * log2(1 + SINR) in bits per sec :return: summation of data rate of given user for given resource blocks """ try: rate = 0 rb_bw = self.user.sim_param.RB_BANDWIDTH if time-int(time) > 0.0001: raise RuntimeError("Time variable is not integer!!") if rb_arr == [-1]: return 0 for rb in rb_arr: pathloss_dB = self.get_pathloss(rb, time) SINR_dB = self.user.sim_param.P_TX_dBm - pathloss_dB - self.noise_per_rb_dBm # SNR(dB) = Pt-PL-P_noise SINR = np.power(10, SINR_dB / 10) rate += rb_bw * np.log2(1 + SINR) #rate += 1000 # for testing each ms 1 bit is served except: raise RuntimeError("Data Rate Calculation error") return rate def get_load_change2(self, rb_arr, t_start, t_end): """ :return: data rate * duration of given user for given resource blocks at given time interval(float) data rate is calculated separately for each ms and summed. load_change in bits """ try: if t_start == t_end: return 0 t0_f = int(np.floor(t_start)) t0_c = int(np.ceil(t_start)) t1_f = int(np.floor(t_end)) if t0_c > t1_f: # interval within the same ms assert(t0_f==t1_f) data_rate = self.get_data_rate(rb_arr, t0_f) duration = (t_end - t_start) * 1e-3 load_change = duration * data_rate else: # interval on different ms's data_rate_0 = self.get_data_rate(rb_arr, t0_f) duration_0 = (t0_c - t_start) * 1e-3 load_change_0 = duration_0 * data_rate_0 data_rate_1 = self.get_data_rate(rb_arr, t1_f) duration_1 = (t_end - t1_f) * 1e-3 load_change_1 = duration_1 * data_rate_1 load_change_btw = 0 t_arr = np.arange(t0_c, t1_f, dtype=int) for t in t_arr: load_change_btw += self.get_data_rate(rb_arr, t) * 1e-3 load_change = load_change_0 + load_change_1 + load_change_btw except: raise RuntimeError("Error during transmitted load calculation") return load_change def get_load_change(self, rb_arr, time_arr): """ :return: data rate * duration of given user for given resource blocks at given time array data rate is calculated separately for each ms and summed. load_change in bits """ try: if len(time_arr) == 0: return 0 load_change = 0 for t in time_arr: if isinstance(t,float): raise RuntimeError("time must be integer") load_change += np.nansum(self.get_data_rate(rb_arr, t)*1e-3) # rate * 1ms except: raise RuntimeError("Error during transmitted load calculation") return load_change # np.sum(self.channel_gains[rb_arr][time]) # user_id-1 for indexing def get_serving_duration2(self, packet): """ Change the status of the packet once the serving process starts. """ RB_arr = packet.server.RB_list try: t_end_temp = np.ceil (packet.t_last_start) load_change = self.get_load_change2 (RB_arr, packet.t_last_start, t_end_temp) while load_change < packet.remaining_load: t_end_temp += 1 load_change = self.get_load_change2(RB_arr, packet.t_last_start, t_end_temp) r_last = self.get_load_change2(RB_arr, t_end_temp-1, t_end_temp) t_delta = (load_change - packet.remaining_load) / r_last t_end = t_end_temp - t_delta assert(np.isclose(packet.remaining_load , self.get_load_change2(RB_arr, packet.t_last_start, t_end))) packet.t_finish_real = t_end packet.t_finish = packet.t_finish_real except: raise RuntimeError("Error during get_serving_duration") def get_serving_duration(self, packet): """ Change the status of the packet once the serving process starts. """ RB_arr = packet.server.RB_list #t_s = self.user.sim_param.T_S t_temp = packet.t_last_start if (packet.t_last_start % 1.) == 0: t_temp = packet.t_last_start t_arr = np.arange(t_temp, t_temp + 1) r_temp = self.get_load_change(RB_arr, t_arr) else: t_1 = packet.t_last_start % 1. t_temp = int(packet.t_last_start - t_1) t_arr = np.arange(t_temp, t_temp + 1) r_temp0 = self.get_load_change(RB_arr, t_arr) r_temp = r_temp0 * (1.-t_1)/1. try: while r_temp < packet.remaining_load: t_temp += 1 t_arr = np.arange(t_temp, t_temp + 1) r_temp += self.get_load_change(RB_arr, t_arr) except: raise RuntimeError("Error during get_serving_duration") r_last = self.get_load_change(RB_arr, t_arr) t_last = (packet.remaining_load-(r_temp-r_last))/(r_last/1.) t_est = t_temp+t_last #packet.estimated_duration = t_est - packet.slicesim.sim_state.now #packet.occupation_duration = t_temp+t_s-packet.slicesim.sim_state.now packet.t_finish_real = t_est#+packet.slicesim.sim_state.t_round_start packet.t_finish = packet.t_finish_real def update_remaining_load2(self, packet): """ substract load change since the latest serving from remaining load return tp = load_change / duration """ RB_arr = packet.server.RB_list # function is called before RB_list is changed t_start = (packet.t_arrival + (packet.d_wait + packet.d_served)) # starting time of latest serve load_change = self.get_load_change2(RB_arr, t_start, packet.slicesim.sim_state.now) packet.remaining_load -= load_change if packet.remaining_load < 0: raise RuntimeError("Remaining load can't be negative!") if packet.slicesim.sim_state.now-t_start != 0: tp = float(load_change) / float(packet.slicesim.sim_state.now-t_start) return tp # throughput in kilobits per second: bits / ms else: return float(0) def update_remaining_load(self, packet): """ substract load change since the latest serving from remaining load return tp = load_change / duration """ #t_s = self.user.sim_param.T_S RB_arr = packet.server.RB_list # function is called before RB_list is changed t_start = (packet.t_arrival + (packet.d_wait + packet.d_served)) # starting time of latest serve if (t_start % 1) > 0.0000001: t_0 = (t_start % 1) t_1 = int(t_start - t_0) t_arr = np.arange(t_1, t_1 + 1, dtype=int) r_temp0 = self.get_load_change(RB_arr, t_arr) * t_0/1. else: t_1 = int(t_start) r_temp0 = 0 t_arr = np.arange(t_1, packet.slicesim.sim_state.now, dtype=int) r_temp = self.get_load_change(RB_arr, t_arr) packet.remaining_load -= (r_temp - r_temp0) # rtemp0 is not received part if packet.remaining_load < 0: raise RuntimeError("Remaining load can't be negative!") if packet.slicesim.sim_state.now-t_start != 0: tp = float(r_temp - r_temp0) / float(packet.slicesim.sim_state.now-t_start) return tp # throughput in kilobits per second else: return float(0) def get_remaining_load(self, packet): """ returns the remaining load without changing anything initially same as update_remaining_load """ RB_arr = packet.server.RB_list # function is called before RB_list is changed t_start = (packet.t_arrival + (packet.d_wait + packet.d_served)) # starting time of latest serve if (t_start % 1) > 0.0001: t_0 = (t_start % 1) t_1 = int(t_start - t_0) t_arr = np.arange(t_1, t_1 + 1) r_temp0 = self.get_load_change(RB_arr, t_arr) * t_0/1. else: t_1 = int(t_start) r_temp0 = 0 t_arr = np.arange(t_1, packet.slicesim.sim_state.now) r_temp = self.get_load_change(RB_arr, t_arr) return packet.remaining_load - (r_temp - r_temp0) # rtemp0 is not received part def get_throughput_rt(self, packet): """ NOT USED returns throughput for round termination event without changing anything """ RB_arr = packet.server.RB_list # function is called before RB_list is changed t_start = (packet.t_arrival + (packet.d_wait + packet.d_served)) # starting time of latest serve if (t_start % 1) > 0.0001: t_0 = (t_start % 1) t_1 = int(t_start - t_0) t_arr = np.arange(t_1, t_1 + 1) r_temp0 = self.get_load_change(RB_arr, t_arr) * t_0/1. else: t_1 = int(t_start) r_temp0 = 0 t_arr = np.arange(t_1, packet.slicesim.sim_state.now) r_temp = self.get_load_change(RB_arr, t_arr) if packet.slicesim.sim_state.now-t_start != 0: tp = float(r_temp - r_temp0)/float(packet.slicesim.sim_state.now-t_start) # total served size over time return tp #throughput else: return float(0) def get_throughput_sc(self, packet): """ Only when service is completed, tp in Kbps """ t_start = (packet.t_arrival + (packet.d_wait + packet.d_served)) # starting time of latest serve if packet.slicesim.sim_state.now-t_start != 0: tp = float(packet.remaining_load)/float(packet.slicesim.sim_state.now-t_start) # total served size over time return tp #throughput else: return float(0) def get_throughput_sc_pauseless(self, packet): """ Only when service is completed, counts the datarate when packet is served. tp in Kbps """ #t_start = packet.t_last_start # int(round(packet.t_arrival + (packet.d_wait + packet.d_served))) # starting time of latest serve t_start = packet.t_start if packet.slicesim.sim_state.now-t_start != 0: tp = float(packet.size)/float(packet.slicesim.sim_state.now-t_start) # total served size over time return tp #throughput else: return float(0) def get_CQI_list(self, server, RB_mapping_sm): """ returns data_rate values for each rb at each timeslot in T_sm only get the rate at the beginning of new Ts period. """ CQI_list = [] t_s = self.user.sim_param.T_S t_temp = int(server.slicesim.sim_state.now)# - packet.slicesim.sim_state.t_round_start) if server.slicesim.sim_state.now - t_temp > 0.0001: raise RuntimeError("time calculation error in get_CQI_list") # fill RB_matching_sm for i in range(RB_mapping_sm.shape[1]): # loop over time RB_arr = [] tmp_arr = [] for j in range(RB_mapping_sm.shape[0]): # loop over RBs if RB_mapping_sm[j][i]: #RB_arr.append(j) #tmp_arr.append(self.get_load_change([j], t_arr)) tmp_arr.append(self.get_data_rate([j], t_temp)) else: tmp_arr.append(-1.) #CQI_list.append(self.get_output_rate(RB_arr, t_arr)) CQI_list.append(tmp_arr) t_temp += t_s # increase time by t_s for the next slots rate #t_arr = np.arange(t_temp, t_temp + t_s) return CQI_list # indexing: [time], [RB]
class Dungeon(Observer): """Dungeon now is mostly a container for levels, and handles rendering""" def __init__(self, game, seed): logger.info('Making dungeon') self.game = game self.rng = RNG(seed=seed) self.levels = [] for i in range(NUM_LEVELS): self.levels.append(self.rng.get_int(0,1000000)) self.rng.delete() self.tcod_map = libtcod.map_new(LEVEL_W,LEVEL_H) #does the fov need to be refreshed (ie new level, player moved...) self.refresh_fov = True #does the tcod map need to be refreshed (ie terrain change) self.refresh_tcod = True #positions of entities that block movement or sight self.creature_positions = [] def generate_level(self,depth): seed = self.levels[depth] approved = False while not approved: start = t = time.time() self.levels[depth] = Level(self.game, seed, LEVEL_W, LEVEL_H) print "Created level: \t%s"%str(time.time()-t) '''t = time.time() self.levels[depth].rect_automata_cave_gen() print "Generated caves: \t%s"%str(time.time()-t) t = time.time() self.levels[depth].smooth_caves() print "Smoothed caves: \t%s"%str(time.time()-t) t = time.time() self.levels[depth].find_caves() print "Found caves: \t%s"%str(time.time()-t) t = time.time() self.levels[depth].remove_caves_by_size() print "Removed caves (1):\t%s"%str(time.time()-t) t = time.time() self.levels[depth].connect_caves() print "Connected caves: \t%s"%str(time.time()-t) t = time.time() self.levels[depth].remove_isolated_caves() print "Removed caves (2):\t%s"%str(time.time()-t) print "Total time: \t%s"%str(time.time()-start) ''' self.levels[depth].experimental_cave_gen() if self.levels[depth].evaluate() or True: approved = True print "Approved" else: seed += 1 print "Rejected" print "" self.compute_tcod_map() self.levels[depth].tag_rooms() self.levels[depth].populate_rooms() self.compute_tcod_map() return self.levels[depth].get_start_pos() def compute_tcod_map(self): for x in range(LEVEL_W): for y in range(LEVEL_H): tile = self.game.cur_level.get_tile(x,y) libtcod.map_set_properties(self.tcod_map,x,y, tile.see_through, tile.move_through) for entity in self.game.entities: if entity != self.game.player: x,y = entity.pos libtcod.map_set_properties(self.tcod_map,x,y, True, False) def refresh_creature_positions(self): for pos in self.creature_positions: tile = self.game.cur_level.get_tile(*pos) libtcod.map_set_properties(self.tcod_map, pos[0],pos[1], tile.see_through, tile.move_through) self.creature_positions = [] for entity in self.game.living_entities: self.creature_positions.append(entity.pos) if entity != self.game.player: libtcod.map_set_properties(self.tcod_map, entity.pos[0],entity.pos[1], entity.see_through, entity.move_through) def on_notify(self,event): #WARNING: assumes items don't affect tcod map if event.event_type == EVENT_MOVE: entity = event.actor from_tile = self.levels[entity.depth].get_tile(*event.from_pos) to_tile = self.levels[entity.depth].get_tile(*event.to_pos) if entity.creature and entity.creature.alive: from_tile.creature = None to_tile.creature = entity if entity != self.game.player: libtcod.map_set_properties(self.tcod_map, event.from_pos[0], event.from_pos[1], from_tile.see_through, from_tile.move_through) libtcod.map_set_properties(self.tcod_map, event.to_pos[0], event.to_pos[1], entity.see_through, entity.move_through) elif entity.item: from_tile.item = None to_tile.item = entity if event.actor == self.game.player: self.refresh_fov = True elif event.event_type == EVENT_DIE: entity = event.actor tile = self.levels[entity.depth].get_tile(entity.x,entity.y) tile.creature = None libtcod.map_set_properties(self.tcod_map, entity.x, entity.y, tile.see_through, tile.move_through) if not tile.item: tile.item = entity else: placed_corpse = False x,y = entity.pos r = 1 while not placed_corpse: positions = [] for try_x in range(entity.x-r,entity.x+r+1): positions.append((try_x,entity.y+r)) positions.append((try_x,entity.y-r)) for try_y in range(entity.y-r,entity.y+r+1): positions.append((entity.x+r,try_y)) positions.append((entity.x-r,try_y)) i = 0 while i<len(positions) and not placed_corpse: try_tile = self.levels[entity.depth].get_tile(*positions[i]) if try_tile.move_through and not try_tile.item: try_tile.item = entity placed_corpse = True entity.x = positions[i][0] entity.y = positions[i][1] i += 1 r += 1 elif event.event_type == EVENT_HARVEST: entity = event.corpse tile = self.levels[entity.depth].get_tile(entity.x,entity.y) tile.item = None elif event.event_type == EVENT_CREATE: entity = event.actor tile = self.levels[entity.depth].get_tile(entity.x,entity.y) if entity.creature and entity.creature.alive: tile.creature = entity elif entity.item: tile.item = entity libtcod.map_set_properties(self.tcod_map, entity.x, entity.y, entity.see_through, entity.move_through) def update(self): if self.refresh_tcod: self.compute_tcod_map() self.refresh_tcod = False if self.refresh_fov: #libtcod.map_compute_fov(self.tcod_map, # self.game.player.x, # self.game.player.y) self.game.player.fov.refresh() self.game.cur_level.explore() self.refresh_fov = False def render(self, focus_x, focus_y, con, overlay=None): level = self.game.cur_level x_offset = 0 + focus_x - MAP_W//2 y_offset = 0 + focus_y - MAP_H//2 for x in range(MAP_W): for y in range(MAP_H): map_x = x+x_offset map_y = y+y_offset char = ' ' color = libtcod.black background = libtcod.black if map_x in range(level.w) and map_y in range(level.h): tile = level.get_tile(map_x,map_y) explored = tile.explored if explored: char = tile.char visible = self.game.player.fov(map_x,map_y) if visible: color = tile.color if overlay == OVERLAY_FOV: fov_num = 0 for entity in self.game.active_entities: if self.game.player.fov(entity) and (not entity is self.game.player) and entity.creature.ai.state!=AI_SLEEPING and entity.creature.alive: fov_num = max(fov_num,entity.fov(map_x,map_y)) if fov_num==1: background = libtcod.darkest_blue elif fov_num==2: background = libtcod.darker_blue elif fov_num==3: background = libtcod.dark_blue else: color = tile.color_not_visible con.put_char_ex(x,y,char,color,background)
u = (Vector3(0.0, 1.0, 0.0) if abs(w[0]) > 0.1 else Vector3( 1.0, 0.0, 0.0)).cross(w).normalize() v = w.cross(u) sample_d = cosine_weighted_sample_on_hemisphere( rng.uniform_float(), rng.uniform_float()) d = (sample_d[0] * u + sample_d[1] * v + sample_d[2] * w).normalize() r = Ray(p, d, tmin=Sphere.EPSILON_SPHERE, depth=r.depth + 1) continue import sys if __name__ == "__main__": rng = RNG() nb_samples = int(sys.argv[1]) // 4 if len(sys.argv) > 1 else 1 w = 1024 h = 768 eye = Vector3(50, 52, 295.6) gaze = Vector3(0, -0.042612, -1).normalize() fov = 0.5135 cx = Vector3(w * fov / h, 0.0, 0.0) cy = cx.cross(gaze).normalize() * fov Ls = [None] * w * h for i in range(w * h): Ls[i] = Vector3()