def forward(self, input, fwd_noise): # Setup Interleavers. if self.args.is_interleave == 0: pass elif self.args.is_same_interleaver == 0: interleaver = RandInterlv.RandInterlv(self.args.block_len, np.random.randint(0, 1000)) p_array = interleaver.p_array self.enc.set_interleaver(p_array) self.dec.set_interleaver(p_array) else:# self.args.is_same_interleaver == 1 interleaver = RandInterlv.RandInterlv(self.args.block_len, 0) # not random anymore! p_array = interleaver.p_array self.enc.set_interleaver(p_array) self.dec.set_interleaver(p_array) codes = self.enc(input) # Setup channel mode: if self.args.channel in ['awgn', 't-dist', 'radar', 'ge_awgn']: received_codes = codes + fwd_noise elif self.args.channel == 'bec': received_codes = codes * fwd_noise elif self.args.channel in ['bsc', 'ge']: received_codes = codes * (2.0*fwd_noise - 1.0) received_codes = received_codes.type(torch.FloatTensor) elif self.args.channel == 'fading': data_shape = codes.shape # Rayleigh Fading Channel, non-coherent fading_h = torch.sqrt(torch.randn(data_shape)**2 + torch.randn(data_shape)**2)/torch.sqrt(torch.tensor(3.14/2.0)) #np.sqrt(2.0) fading_h = fading_h.type(torch.FloatTensor).to(self.this_device) received_codes = fading_h*codes + fwd_noise # fading_h = np.sqrt(np.random.standard_normal(data_shape)**2 + np.random.standard_normal(data_shape)**2)/np.sqrt(3.14/2.0) # noise = sigma * np.random.standard_normal(data_shape) # Define noise # # # corrupted_signal = 2.0*fading_h*input_signal-1.0 + noise # corrupted_signal = fading_h *(2.0*input_signal-1.0) + noise else: print('default AWGN channel') received_codes = codes + fwd_noise if self.args.rec_quantize: myquantize = MyQuantize.apply received_codes = myquantize(received_codes, self.args.rec_quantize_level, self.args.rec_quantize_level) x_dec = self.dec(received_codes) return x_dec, codes
def turbo_enc(X_train_raw, args, p_array): num_block = X_train_raw.shape[0] x_code = [] if args.encoder == 'Turbo_rate3_lte': # Turbo-LTE M = np.array( [3]) # Number of delay elements in the convolutional encoder generator_matrix = np.array([[13, 11]]) # Encoder of convolutional encoder feedback = 13 # Feedback of convolutional encoder else: # Turbo-757 M = np.array( [2]) # Number of delay elements in the convolutional encoder generator_matrix = np.array([[7, 5]]) # Encoder of convolutional encoder feedback = 7 # Feedback of convolutional encoder trellis1 = cc.Trellis(M, generator_matrix, feedback=feedback) trellis2 = cc.Trellis(M, generator_matrix, feedback=feedback) interleaver = RandInterlv.RandInterlv(args.block_len, 0) interleaver.p_array = p_array for idx in range(num_block): #print(X_train_raw[idx, :, 0]) np_inputs = np.array(X_train_raw[idx, :, 0].type(torch.IntTensor).detach()) [sys, par1, par2] = turbo.turbo_encode(np_inputs, trellis1, trellis2, interleaver) xx = np.array([sys, par1, par2]).T x_code.append(xx) return torch.from_numpy(np.array(x_code)).type(torch.FloatTensor)
def forward(self, input, fwd_noise): # Setup Interleavers. if self.args.is_interleave == 0: pass elif self.args.is_same_interleaver == 0: interleaver = RandInterlv.RandInterlv(self.args.block_len, np.random.randint(0, 1000)) p_array = interleaver.p_array self.enc.set_interleaver(p_array) self.dec.set_interleaver(p_array) else: # self.args.is_same_interleaver == 1 interleaver = RandInterlv.RandInterlv(self.args.block_len, 0) # not random anymore! p_array = interleaver.p_array self.enc.set_interleaver(p_array) self.dec.set_interleaver(p_array) codes = self.enc(input) symbols = self.mod(codes) # Setup channel mode: if self.args.channel in ['awgn', 't-dist', 'radar', 'ge_awgn']: received_symbols = symbols + fwd_noise elif self.args.channel == 'fading': print('Fading not implemented') else: print('default AWGN channel') received_symbols = symbols + fwd_noise if self.args.rec_quantize: myquantize = MyQuantize.apply received_symbols = myquantize(received_symbols, self.args.rec_quantize_level, self.args.rec_quantize_level) x_rec = self.demod(received_symbols) x_dec = self.dec(x_rec) return x_dec, symbols
def __init__(self, args): M = np.array([args.M]) generator_matrix = np.array([[args.enc1, args.enc2]]) feedback = args.feedback self.trellis1 = cc.Trellis(M, generator_matrix, feedback=feedback) # trellis data structure self.trellis2 = cc.Trellis(M, generator_matrix, feedback=feedback) # trellis data structure self.interleaver = RandInterlv.RandInterlv(args.block_len, 0) self.p_array = self.interleaver.p_array self.p_array = self.p_array.astype(np.int32) self.args = args print('[Convolutional Code Codec] Encoder', 'M ', M, ' Generator Matrix ', generator_matrix, ' Feedback ', feedback)
print '[BCJR Setting Parameters] Training num_epoch is ', args.num_epoch print '[BCJR Setting Parameters] Turbo Decoding Iteration ', args.num_dec_iteration print '[BCJR Setting Parameters] RNN Direction is ', args.rnn_direction print '[BCJR Setting Parameters] RNN Model Type is ', args.rnn_setup print '[BCJR Setting Parameters] Number of RNN layer is ', args.num_Dec_layer print '[BCJR Setting Parameters] Number of RNN unit is ', args.num_Dec_unit M = np.array([args.M]) generator_matrix = np.array([[args.enc1, args.enc2]]) feedback = args.feedback trellis1 = cc.Trellis(M, generator_matrix, feedback=feedback) # Create trellis data structure trellis2 = cc.Trellis(M, generator_matrix, feedback=feedback) # Create trellis data structure interleaver = RandInterlv.RandInterlv(args.block_len, 0) p_array = interleaver.p_array print '[BCJR Code Codec] Encoder', 'M ', M, ' Generator Matrix ', generator_matrix, ' Feedback ', feedback codec = [trellis1, trellis2, interleaver] print '[BCJR Setting Parameters] Training Data SNR is ', args.train_snr, ' dB' print '[BCJR Setting Parameters] Code Block Length is ', args.block_len print '[BCJR Setting Parameters] Number of Train Block is ', args.num_block_train, ' Test Block ', args.num_block_test model = build_decoder(args) bcjr_inputs_train, bcjr_outputs_train = generate_bcjr_example( args.num_block_train, args.block_len, codec, is_save=False,
# ============================================================================= # Example showing the encoding and decoding of convolutional codes # ============================================================================= # G(D) corresponding to the convolutional encoder # Create trellis data structure #trellis1 = cc.Trellis(np.array([3]), np.array([[11,13]]),feedback=11) #trellis2 = cc.Trellis(np.array([3]), np.array([[11,13]]),feedback=11) #print('trellis: cc.Trellis(np.array([3]), np.array([[11,13]]),feedback=11) ') trellis1 = cc.Trellis(np.array([2]), np.array([[7, 5]]), feedback=7) trellis2 = cc.Trellis(np.array([2]), np.array([[7, 5]]), feedback=7) print('trellis: cc.Trellis(np.array([2]), np.array([[7,5]]),feedback=7) ') interleaver = RandInterlv.RandInterlv(k, 0) nb_errors = np.zeros(test_sigmas.shape) map_nb_errors = np.zeros(test_sigmas.shape) nb_block_no_errors = np.zeros(test_sigmas.shape) tic = time.clock() for iterations in range(iterations_number): print(iterations) message_bits = np.random.randint(0, 2, k) [sys, par1, par2] = turbo.turbo_encode(message_bits, trellis1, trellis2, interleaver) for idx in range(len(test_sigmas)):
def generate_viterbi_batch(batch_size=100, block_len=200, code_rate=2, batch_criteria={}, seed=0): noise_type = batch_criteria["noise_type"] SNR = batch_criteria["snr"] rng = np.random.RandomState(seed) # print("[generate_viterbi_batch] block_len, code_rate", block_len, code_rate) trellis1 = cc.Trellis(np.array([2]), np.array([[7, 5]])) trellis2 = cc.Trellis(np.array([2]), np.array([[7, 5]])) #print('trellis: cc.Trellis(np.array([2]), np.array([[7,5]]))') # G(D) corresponding to the convolutional encoder tic = time.time() ### TEST EXAMPLES # Initialize Test Examples/ noisy_codewords = np.zeros([1, batch_size, block_len, 2]) true_messages = np.zeros([1, batch_size, block_len, 1]) iterations_number = batch_size #for idx in range(SNR_points): nb_errors = np.zeros([iterations_number, 1]) tic = time.time() noise_sigmas = 10**(-SNR * 1.0 / 20) mb_test_collect = np.zeros([iterations_number, block_len]) interleaver = RandInterlv.RandInterlv(block_len, 0) message_bits = rng.randint(0, 2, block_len) # mb_test_collect[iterations,:] = message_bits [sys, par1, par2] = turbo.turbo_encode(message_bits, trellis1, trellis2, interleaver) # print("[debug] noise type ", noise_type, " noise_sigmas ", noise_sigmas, # "vv", vv, "radar_power", radar_pow, "radar_prob", radar_prob) for iterations in range(iterations_number): noise_seed1 = rng.randint(1, 999999) noise_seed2 = rng.randint(1, 999999) noise_seed3 = rng.randint(1, 999999) # print("seeds ", noise_seed1, noise_seed2) sys_r = corrupt_signal(input_signal = sys, noise_type = noise_type, sigma = noise_sigmas, \ metrics = batch_criteria, seed = noise_seed1) par1_r = corrupt_signal(input_signal = par1, noise_type = noise_type, sigma = noise_sigmas, \ metrics = batch_criteria, seed = noise_seed2) par2_r = corrupt_signal(input_signal = par2, noise_type = noise_type, sigma = noise_sigmas ,\ metrics = batch_criteria, seed = noise_seed3) # print("sys_r ", sys_r, flush=True) # print("par1_r", par1_r, flush=True) # ADD Training Examples noisy_codewords[0, iterations, :, :] = np.concatenate( [sys_r.reshape(block_len, 1), par1_r.reshape(block_len, 1)], axis=1) # Message sequence true_messages[0, iterations, :, :] = message_bits.reshape(block_len, 1) noisy_codewords = noisy_codewords.reshape(batch_size, block_len, code_rate) true_messages = true_messages.reshape(batch_size, block_len) # target_true_messages = mb_test_collect.reshape([mb_test_collect.shape[0],mb_test_collect.shape[1],1]) toc = time.time() #print('time to generate test examples:', toc-tic) return (noisy_codewords, true_messages)
def generate_examples(k_test=1000, step_of_history=200, SNR=0, code_rate=2): trellis1 = cc.Trellis(np.array([2]), np.array([[7, 5]])) trellis2 = cc.Trellis(np.array([2]), np.array([[7, 5]])) #print('trellis: cc.Trellis(np.array([2]), np.array([[7,5]]))') # G(D) corresponding to the convolutional encoder tic = time.time() ### TEST EXAMPLES # Initialize Test Examples/ noisy_codewords = np.zeros( [1, int(k_test / step_of_history), step_of_history, 2]) true_messages = np.zeros( [1, int(k_test / step_of_history), step_of_history, 1]) iterations_number = int(k_test / step_of_history) #for idx in range(SNR_points): nb_errors = np.zeros([iterations_number, 1]) tic = time.time() noise_sigmas = 10**(-SNR * 1.0 / 20) mb_test_collect = np.zeros([iterations_number, step_of_history]) interleaver = RandInterlv.RandInterlv(step_of_history, 0) for iterations in range(iterations_number): # print(iterations) message_bits = np.random.randint(0, 2, step_of_history) mb_test_collect[iterations, :] = message_bits [sys, par1, par2] = turbo.turbo_encode(message_bits, trellis1, trellis2, interleaver) noise = noise_sigmas * np.random.standard_normal( sys.shape) # Generate noise sys_r = (2 * sys - 1) + noise # Modulation plus noise noise = noise_sigmas * np.random.standard_normal( par1.shape) # Generate noise par1_r = (2 * par1 - 1) + noise # Modulation plus noise noise = noise_sigmas * np.random.standard_normal( par2.shape) # Generate noise par2_r = (2 * par2 - 1) + noise # Modulation plus noise sys_symbols = sys_r non_sys_symbols_1 = par1_r non_sys_symbols_2 = par2_r # ADD Training Examples noisy_codewords[0, iterations, :, :] = np.concatenate([ sys_r.reshape(step_of_history, 1), par1_r.reshape(step_of_history, 1) ], axis=1) # Message sequence true_messages[0, iterations, :, :] = message_bits.reshape( step_of_history, 1) noisy_codewords = noisy_codewords.reshape(int(k_test / step_of_history), step_of_history, code_rate) true_messages = true_messages.reshape(int(k_test / step_of_history), step_of_history, 1) target_true_messages = mb_test_collect.reshape( [mb_test_collect.shape[0], mb_test_collect.shape[1], 1]) toc = time.time() #print('time to generate test examples:', toc-tic) return (noisy_codewords, true_messages, target_true_messages)
def system(args, optimizer, enc, dec, use_cuda=False, verbose=True): device = torch.device("cuda" if use_cuda else "cpu") train_loss = 0.0 for batch_idx in range(int(args.num_block / args.batch_size)): if args.is_variable_block_len: block_len = np.random.randint(args.block_len_low, args.block_len_high) else: block_len = args.block_len optimizer.zero_grad() # generate bit and noise X_train = torch.randint(0, 2, (args.batch_size, block_len, args.code_rate_k), dtype=torch.float) noise_shape = (args.batch_size, args.block_len, args.code_rate_n) fwd_noise = generate_noise(noise_shape, args, snr_low=args.train_dec_channel_low, snr_high=args.train_dec_channel_high, mode='decoder') X_train, fwd_noise = X_train.to(device), fwd_noise.to(device) # pass system if args.is_interleave == 0: pass elif args.is_same_interleaver == 0: interleaver = RandInterlv.RandInterlv(args.block_len, np.random.randint(0, 1000)) p_array = interleaver.p_array enc.set_interleaver(p_array) dec.set_interleaver(p_array) else: # self.args.is_same_interleaver == 1 interleaver = RandInterlv.RandInterlv(args.block_len, 0) # not random anymore! p_array = interleaver.p_array enc.set_interleaver(p_array) dec.set_interleaver(p_array) codes = enc.encode(X_train) if self.args.channel in [ 'awgn', 't-dist', 'radar', 'ge_awgn', 'bikappa' ]: # print("noise_type:",self.args.channel) received_codes = codes + fwd_noise elif self.args.channel == 'bec': received_codes = codes * fwd_noise elif self.args.channel in ['bsc', 'ge']: received_codes = codes * (2.0 * fwd_noise - 1.0) received_codes = received_codes.type(torch.FloatTensor) else: print('default AWGN channel') received_codes = codes + fwd_noise if args.rec_quantize: myquantize = MyQuantize.apply received_codes = myquantize(received_codes, args.rec_quantize_level, args.rec_quantize_level) x_dec = dec(received_codes) loss = customized_loss(output, X_train, args, noise=fwd_noise, code=code) loss.backward() train_loss += loss.item() optimizer.step() train_loss = train_loss / (args.num_block / args.batch_size) if verbose: print('====> Epoch: {} Average loss: {:.8f}'.format(epoch, train_loss), \ ' running time', str(end_time - start_time))
target_train_select = bcjr_outputs_train[:,:,0] + bcjr_inputs_train[:,:,2] target_train_select[:,:] = math.e**target_train_select[:,:]*1.0/(1+math.e**target_train_select[:,:]) X_input = bcjr_inputs_train.reshape(-1,block_len,input_feature_num) X_target = target_train_select.reshape(-1,block_len,1) return X_input, X_target if __name__ == '__main__': import commpy.channelcoding.interleavers as RandInterlv import commpy.channelcoding.convcode as cc M = np.array([2]) # Number of delay elements in the convolutional encoder generator_matrix = np.array([[7, 5]]) feedback = 7 trellis1 = cc.Trellis(M, generator_matrix,feedback=feedback)# Create trellis data structure trellis2 = cc.Trellis(M, generator_matrix,feedback=feedback)# Create trellis data structure interleaver = RandInterlv.RandInterlv(100, 0) p_array = interleaver.p_array print('[Turbo Codec] Encoder', 'M ', M, ' Generator Matrix ', generator_matrix, ' Feedback ', feedback) ########################################## # Setting Up RNN Model ########################################## codec = [trellis1, trellis2, interleaver] generate_bcjr_example(num_block=10000, block_len=100, codec=codec, num_iteration=6)