def turbo_enc(X_train_raw, args, p_array): num_block = X_train_raw.shape[0] x_code = [] if args.encoder == 'Turbo_rate3_lte': # Turbo-LTE M = np.array( [3]) # Number of delay elements in the convolutional encoder generator_matrix = np.array([[13, 11]]) # Encoder of convolutional encoder feedback = 13 # Feedback of convolutional encoder else: # Turbo-757 M = np.array( [2]) # Number of delay elements in the convolutional encoder generator_matrix = np.array([[7, 5]]) # Encoder of convolutional encoder feedback = 7 # Feedback of convolutional encoder trellis1 = cc.Trellis(M, generator_matrix, feedback=feedback) trellis2 = cc.Trellis(M, generator_matrix, feedback=feedback) interleaver = RandInterlv.RandInterlv(args.block_len, 0) interleaver.p_array = p_array for idx in range(num_block): #print(X_train_raw[idx, :, 0]) np_inputs = np.array(X_train_raw[idx, :, 0].type(torch.IntTensor).detach()) [sys, par1, par2] = turbo.turbo_encode(np_inputs, trellis1, trellis2, interleaver) xx = np.array([sys, par1, par2]).T x_code.append(xx) return torch.from_numpy(np.array(x_code)).type(torch.FloatTensor)
def conv_encoder(message_bits, fb): generator_matrix = np.array([[07, 05]]) M = np.array([2]) if fb == True: trellis = cc.Trellis(M, generator_matrix, feedback=7) else: trellis = cc.Trellis(M, generator_matrix) return 2 * cc.conv_encode(np.asarray(message_bits), trellis) - 1
def __init__(self, args): M = np.array([args.M]) generator_matrix = np.array([[args.enc1, args.enc2]]) feedback = args.feedback self.trellis1 = cc.Trellis(M, generator_matrix, feedback=feedback) # trellis data structure self.trellis2 = cc.Trellis(M, generator_matrix, feedback=feedback) # trellis data structure self.interleaver = RandInterlv.RandInterlv(args.block_len, 0) self.p_array = self.interleaver.p_array self.p_array = self.p_array.astype(np.int32) self.args = args print('[Convolutional Code Codec] Encoder', 'M ', M, ' Generator Matrix ', generator_matrix, ' Feedback ', feedback)
def conv_enc(X_train_raw, args): import commpy.channelcoding.convcode as cc num_block = X_train_raw.shape[0] block_len = X_train_raw.shape[1] x_code = [] M = np.array([2]) # Number of delay elements in the convolutional encoder generator_matrix = np.array([[args.enc1, args.enc2]]) feedback = args.feedback trellis = cc.Trellis(M, generator_matrix, feedback=feedback) # Create trellis data structure for idx in range(num_block): xx = cc.conv_encode(X_train_raw[idx, :, 0], trellis, 'rsc') xx1 = xx[::2] xx2 = xx[1::2] xx1 = xx1[:-int(M)] xx2 = xx2[:-int(M)] xx = np.array([xx1, xx2]).T # xx = xx[:-2*int(M)] # xx = xx.reshape((block_len, 2)) x_code.append(xx) return np.array(x_code)
def conv_decoder(coded_bits): generator_matrix = np.array([[05, 07]]) M = np.array([2]) trellis = cc.Trellis(M, generator_matrix) tb_depth = 5 * (M.sum() + 1) return 2 * cc.viterbi_decode( ((coded_bits + 1) / 2).astype(float), trellis, tb_depth) - 1
def wavarunner(message_numbers, upperbound, snr, index): memory = array([8]) g_matrix = array([[0o515, 0o677]]) trellis = cc.Trellis(memory, g_matrix) total_errors = 0 for i in range(message_numbers): total_errors += wava(upperbound, snr, trellis) print("pass: {}, {}".format(i, index)) return total_errors / (128 * message_numbers)
def fec_decoder(self, data_in): memory = np.array([6]) g_matrix = np.array([[91, 121] ]) # G(D) = [1+D^2+D^3+D^5+D^6, 1+D+D^2+D^3+D^6] trellis = cc.Trellis(memory, g_matrix) data_out = cc.viterbi_decode(data_in, trellis, tb_depth=int(len(data_in) / 2)) return data_out[:-6]
def conv_decoder(coded_bits): generator_matrix = np.array([[05, 07]]) M = np.array([2]) trellis = cc.Trellis(M, generator_matrix) tb_depth = 5 * (M.sum() + 1) return cc.viterbi_decode(coded_bits.astype(float), trellis, tb_depth, decoding_type='unquantized')
def __init__(self,d1,d2,m): self.d1 = d1 self.d2 = d2 self.m = m # Number of delay elements in the convolutional encoder self.generator_matrixNSC = np.array([[self.d1, self.d2]])# G(D) corresponding to the convolutional encoder self.trellisNSC = cc.Trellis(np.array([self.m]), self.generator_matrixNSC)# Create trellis data structure self.tb_depth = 5*(self.m + 1) # Traceback depth of the decoder self.code_rate = self.trellisNSC.k / self.trellisNSC.n # the code rate ## get impulse response self.impulse_response = self.commpy_encode_sequence(np.concatenate([np.array([1],dtype=np.int8),np.zeros([self.m],dtype=np.int8)],axis=0)).astype(np.int8)
def convolutional_encode(message_bits, generator_matrix, memory): """ Given a sequence of input bits and a particular generator_matrix, along with a memory specification, generates the corresponding Trellis and then the convolution code. Returns encoded bits """ trellis = cc.Trellis(memory, generator_matrix) coded_bits = cc.conv_encode(message_bits, trellis) return coded_bits
def conv_decode_bench(args): num_block = 100 ########################################## # Setting Up Codec ########################################## M = np.array([2]) # Number of delay elements in the convolutional encoder generator_matrix = np.array([[args.enc1, args.enc2]]) feedback = args.feedback trellis1 = cc.Trellis(M, generator_matrix, feedback=feedback) # Create trellis data structure SNRS, test_sigmas = get_test_sigmas(args.snr_test_start, args.snr_test_end, args.snr_points) tb_depth = 15 commpy_res_ber = [] commpy_res_bler = [] nb_errors = np.zeros(test_sigmas.shape) map_nb_errors = np.zeros(test_sigmas.shape) nb_block_no_errors = np.zeros(test_sigmas.shape) for idx in range(len(test_sigmas)): results = [] print(num_block) #pool = mp.Pool(processes=args.num_cpu) #results = pool.starmap(turbo_compute, [(idx,x) for x in range(num_block)]) for x in range(num_block): results.append( turbo_compute(args, idx, x, trellis1, test_sigmas, M)) for result in results: if result == 0: nb_block_no_errors[idx] = nb_block_no_errors[idx] + 1 nb_errors[idx] += sum(results) #print('[testing]SNR: ' , SNRS[idx]) print('[testing]BER: ', sum(results) / float(args.block_len * num_block)) #print('[testing]BLER: ', 1.0 - nb_block_no_errors[idx]/args.num_block) commpy_res_ber.append(sum(results) / float(args.block_len * num_block)) commpy_res_bler.append(1.0 - nb_block_no_errors[idx] / num_block) print('[Result]SNR: ', SNRS) print('[Result]BER', commpy_res_ber) print('[Result]BLER', commpy_res_bler) return commpy_res_ber, commpy_res_bler
def conv_enc(X_train_raw, args): num_block = X_train_raw.shape[0] block_len = X_train_raw.shape[1] x_code = [] generator_matrix = np.array([[args.enc1, args.enc2]]) M = np.array([args.M]) # Number of delay elements in the convolutional encoder trellis = cc.Trellis(M, generator_matrix,feedback=args.feedback)# Create trellis data structure for idx in range(num_block): xx = cc.conv_encode(X_train_raw[idx, :, 0], trellis) xx = xx[2*int(M):] xx = xx.reshape((block_len, 2)) x_code.append(xx) return np.array(x_code)
def _make_trellis() -> cc.Trellis: """ Convolutional Code: G(D) = [[1, 0, 0], [0, 1, 1+D]] F(D) = [[D, D], [1+D, 1]] :return: trellis object implementing this convolutional encoding scheme """ # Number of delay elements in the convolutional encoder memory = np.array((1, 1)) # Generator matrix & feedback matrix g_matrix = np.array(((1, 0, 0), (0, 1, 3))) feedback = np.array(((2, 2), (3, 1))) # Create trellis data structure return cc.Trellis(memory, g_matrix, feedback, 'rsc')
def viterbi_decode_sequences(encoded_seqs, L, rate=1 / 2): """ Given a list of convolutionally encoded sequences, uses the Viterbi algorithm on each element to decode it using a hard-decision boundary. The Trellis is generated as per the specified L and k. Returns a list of decoded elements. """ decoded_seqs = [None for _ in range(len(encoded_seqs))] generator_matrix = gen_gmatrix(L, rate) memory = np.array([L - 1]) trellis = cc.Trellis(memory, generator_matrix) for i, encoded_seq in enumerate(encoded_seqs): decoded_seq = cc.viterbi_decode(encoded_seq.astype(float), trellis) decoded_seqs[i] = decoded_seq return decoded_seqs
def stack_runner(IterationTimes, snr, index, metrics): memory = array([8]) g_matrix = array([[0o515, 0o677]]) trellis = cc.Trellis(memory, g_matrix) BPSKMod = cm.PSKModem(2) CodeError = 0 total_result = 0.0 for i in range(IterationTimes): # encode # set the message size message_bits = np.random.randint(0, 2, 128) # print(message_bits) encoded_code = cc.conv_encode(message_bits, trellis) BPSK_modedbits = BPSKMod.modulate(encoded_code) AWGNreceived_bits = cch.awgn(BPSK_modedbits, snr + 3, 0.5) result = stack_soft_decoder(AWGNreceived_bits, metrics, trellis) print("pass: {}, {}".format(i, index)) if len(result) != 136: continue else: BitErrors = str( array(message_bits) ^ array(result[0:len(message_bits)])).count('1') # print(result) # print(BitErrors) # print(result_int) # print(message_bits_int) BER = BitErrors / 128 if BitErrors > 0: CodeError += 1 total_result += BER # i += 1 # print(i) # print(result) CER = CodeError / IterationTimes AverageBER = total_result / IterationTimes return CER
def newmethodrunner(iterationtimes, eb_n0, index, metrics): memory = array([8]) g_matrix = array([[0o515, 0o677]]) trellis = cc.Trellis(memory, g_matrix) BPSKMod = cm.PSKModem(2) # set the message size total_error = 0 cer = 0 for d in range(iterationtimes): message_bits = np.random.randint(0, 2, 128) result = 0 # print(message_bits) encoded_code = tb_encoder.conv_encode_tb(message_bits, trellis) BPSK_modedbits = BPSKMod.modulate(encoded_code) r_code = cch.awgn(BPSK_modedbits, eb_n0 + 3, 0.5) # r_code = BPSKMod.demodulate(AWGNreceived_bits, demod_type='hard') # part_code = BPSKMod.demodulate(r_code[240:], demod_type='hard') part_decode = shortviterbi1.short_viterbi(np.append(r_code[240:], r_code[0:64]), trellis, 'unquantized')[0] # part_decode = message_bits[120:] # print(part_decode) # print(len(part_decode)) initial_code = part_decode[0:8][::-1] # initial_code = message_bits[120:][::-1] initial_state = bitarray2dec(initial_code) # print(initial_state) initial_metric = 0 result = stack_decoder_fixed_tb(r_code, metrics, trellis, initial_state, initial_metric)[0] print("pass: {}, {}".format(d, index)) # print(result) bit_error = str(message_bits ^ array(result)).count('1') if bit_error > 0: cer += 1 # print(bit_error) total_error += bit_error averageber = total_error/(iterationtimes * 128) a_cer = cer/iterationtimes return a_cer, averageber
from numpy import array import numpy as np import commpy.channelcoding.convcode as cc # memory = array([4]) # g_matrix = array([[ 109 , 79]]) # G(D) = [1+D^2, 1+D+D^2] memory = array([3]) g_matrix = array([[ 109 , 79]]) # G(D) = [1+D^2, 1+D+D^2] trellis = cc.Trellis(memory, g_matrix) # a='111101011' # message_bits = np.array(list(a)) # coded_bits = cc.conv_encode(message_bits, trellis, code_type='default', puncture_matrix=None) # print(coded_bits) bit_num = 8414 print(bit_num) num_bin = '{:016b}'.format(bit_num) num_bits = [ ] for i in num_bin: if i =='0': num_bits.append(0) elif i == '1': num_bits.append(1) num_encoded = cc.conv_encode(num_bits, trellis, code_type='default', puncture_matrix=None) print(num_encoded) # coded_bits = np.array([0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0]) # print(coded_bits) ans = cc.viterbi_decode(num_encoded, trellis, tb_depth=None, decoding_type='hard') print(ans) # print(str(message_bits)) # ans = np.array2string(np.random.randint(0,2,10000))
import commpy.modulation as mod import commpy.utilities as util # ============================================================================= # Convolutional Code 1: G(D) = [1+D^2, 1+D+D^2] # Standard code with rate 1/2 # ============================================================================= # Number of delay elements in the convolutional encoder memory1 = np.array(2, ndmin=1) # Generator matrix g_matrix1 = np.array((0o5, 0o7), ndmin=2) # Create trellis data structure trellis1 = cc.Trellis(memory1, g_matrix1) # ============================================================================= # Convolutional Code 2: G(D) = [[1, 0, 0], [0, 1, 1+D]]; F(D) = [[D, D], [1+D, 1]] # RSC with rate 2/3 # ============================================================================= # Number of delay elements in the convolutional encoder memory2 = np.array((1, 1)) # Generator matrix & feedback matrix g_matrix2 = np.array(((1, 0, 0), (0, 1, 3))) feedback = np.array(((2, 2), (3, 1))) # Create trellis data structure trellis2 = cc.Trellis(memory2, g_matrix2, feedback, 'rsc')
print args print '[ID]', args.id return args if __name__ == '__main__': args = get_args() M = np.array([args.M ]) # Number of delay elements in the convolutional encoder generator_matrix = np.array([[args.enc1, args.enc2] ]) # Encoder of convolutional encoder feedback = args.feedback # Feedback of convolutional encoder print '[testing] Turbo Code Encoder: G: ', generator_matrix, 'Feedback: ', feedback, 'M: ', M trellis1 = cc.Trellis(M, generator_matrix, feedback=feedback) trellis2 = cc.Trellis(M, generator_matrix, feedback=feedback) interleaver = RandInterlv.RandInterlv(args.block_len, 0) p_array = interleaver.p_array codec = [trellis1, trellis2, interleaver] snrs, test_sigmas = get_test_sigmas(args.snr_test_start, args.snr_test_end, args.snr_points) turbo_res_ber, turbo_res_bler = [], [] tic = time.time() def turbo_compute((idx, x)): ''' Compute Turbo Decoding in 1 iterations for one SNR point. '''
def _get_trellis(): return cc.Trellis(Wifi80211.memory, Wifi80211.generator_matrix)
def generate_examples(k_test=1000, step_of_history=200, SNR=0, code_rate=2): trellis1 = cc.Trellis(np.array([2]), np.array([[7, 5]])) trellis2 = cc.Trellis(np.array([2]), np.array([[7, 5]])) #print('trellis: cc.Trellis(np.array([2]), np.array([[7,5]]))') # G(D) corresponding to the convolutional encoder tic = time.time() ### TEST EXAMPLES # Initialize Test Examples/ noisy_codewords = np.zeros( [1, int(k_test / step_of_history), step_of_history, 2]) true_messages = np.zeros( [1, int(k_test / step_of_history), step_of_history, 1]) iterations_number = int(k_test / step_of_history) #for idx in range(SNR_points): nb_errors = np.zeros([iterations_number, 1]) tic = time.time() noise_sigmas = 10**(-SNR * 1.0 / 20) mb_test_collect = np.zeros([iterations_number, step_of_history]) interleaver = RandInterlv.RandInterlv(step_of_history, 0) for iterations in range(iterations_number): # print(iterations) message_bits = np.random.randint(0, 2, step_of_history) mb_test_collect[iterations, :] = message_bits [sys, par1, par2] = turbo.turbo_encode(message_bits, trellis1, trellis2, interleaver) noise = noise_sigmas * np.random.standard_normal( sys.shape) # Generate noise sys_r = (2 * sys - 1) + noise # Modulation plus noise noise = noise_sigmas * np.random.standard_normal( par1.shape) # Generate noise par1_r = (2 * par1 - 1) + noise # Modulation plus noise noise = noise_sigmas * np.random.standard_normal( par2.shape) # Generate noise par2_r = (2 * par2 - 1) + noise # Modulation plus noise sys_symbols = sys_r non_sys_symbols_1 = par1_r non_sys_symbols_2 = par2_r # ADD Training Examples noisy_codewords[0, iterations, :, :] = np.concatenate([ sys_r.reshape(step_of_history, 1), par1_r.reshape(step_of_history, 1) ], axis=1) # Message sequence true_messages[0, iterations, :, :] = message_bits.reshape( step_of_history, 1) noisy_codewords = noisy_codewords.reshape(int(k_test / step_of_history), step_of_history, code_rate) true_messages = true_messages.reshape(int(k_test / step_of_history), step_of_history, 1) target_true_messages = mb_test_collect.reshape( [mb_test_collect.shape[0], mb_test_collect.shape[1], 1]) toc = time.time() #print('time to generate test examples:', toc-tic) return (noisy_codewords, true_messages, target_true_messages)
print '[BCJR Setting Parameters] Network starting path is ', args.init_nw_model print '[BCJR Setting Parameters] Initial learning_rate is ', args.learning_rate print '[BCJR Setting Parameters] Training batch_size is ', args.batch_size print '[BCJR Setting Parameters] Training num_epoch is ', args.num_epoch print '[BCJR Setting Parameters] Turbo Decoding Iteration ', args.num_dec_iteration print '[BCJR Setting Parameters] RNN Direction is ', args.rnn_direction print '[BCJR Setting Parameters] RNN Model Type is ', args.rnn_setup print '[BCJR Setting Parameters] Number of RNN layer is ', args.num_Dec_layer print '[BCJR Setting Parameters] Number of RNN unit is ', args.num_Dec_unit M = np.array([args.M]) generator_matrix = np.array([[args.enc1, args.enc2]]) feedback = args.feedback trellis1 = cc.Trellis(M, generator_matrix, feedback=feedback) # Create trellis data structure trellis2 = cc.Trellis(M, generator_matrix, feedback=feedback) # Create trellis data structure interleaver = RandInterlv.RandInterlv(args.block_len, 0) p_array = interleaver.p_array print '[BCJR Code Codec] Encoder', 'M ', M, ' Generator Matrix ', generator_matrix, ' Feedback ', feedback codec = [trellis1, trellis2, interleaver] print '[BCJR Setting Parameters] Training Data SNR is ', args.train_snr, ' dB' print '[BCJR Setting Parameters] Code Block Length is ', args.block_len print '[BCJR Setting Parameters] Number of Train Block is ', args.num_block_train, ' Test Block ', args.num_block_test model = build_decoder(args) bcjr_inputs_train, bcjr_outputs_train = generate_bcjr_example( args.num_block_train,
test_sigmas = np.array([0.6310, 0.5623, 0.4994, 0.3985]) SNR_points = 4 SNRS = -10 * np.log10(test_sigmas**2) # ============================================================================= # Example showing the encoding and decoding of convolutional codes # ============================================================================= # G(D) corresponding to the convolutional encoder #generator_matrix = np.array([[03, 00, 02], [07, 04, 06]]) # Number of delay elements in the convolutional encoder M = np.array([2]) generator_matrix = np.array([[05, 07]]) # Create trellis data structure trellis = cc.Trellis(M, generator_matrix) ## RSC M = np.array([3]) # Number of delay elements in the convolutional encoder trellis = cc.Trellis(np.array([3]), np.array([[11, 13]])) print('trellis: cc.Trellis(np.array([3]), np.array([[11,13]]))') M = np.array([1]) # Number of delay elements in the convolutional encoder trellis = cc.Trellis(np.array([1]), np.array([[3, 1]]), feedback=3) print('trellis: cc.Trellis(np.array([1]), np.array([[3,1]]),feedback=3)') #M = np.array([2]) #trellis = cc.Trellis(np.array([2]), np.array([[7,5]]),feedback=7) #print('trellis: cc.Trellis(np.array([2]), np.array([[7,5]]),feedback=7)') nb_errors = np.zeros(test_sigmas.shape)
def generate_viterbi_batch(batch_size=100, block_len=200, code_rate=2, batch_criteria={}, seed=0): noise_type = batch_criteria["noise_type"] SNR = batch_criteria["snr"] rng = np.random.RandomState(seed) # print("[generate_viterbi_batch] block_len, code_rate", block_len, code_rate) trellis1 = cc.Trellis(np.array([2]), np.array([[7, 5]])) trellis2 = cc.Trellis(np.array([2]), np.array([[7, 5]])) #print('trellis: cc.Trellis(np.array([2]), np.array([[7,5]]))') # G(D) corresponding to the convolutional encoder tic = time.time() ### TEST EXAMPLES # Initialize Test Examples/ noisy_codewords = np.zeros([1, batch_size, block_len, 2]) true_messages = np.zeros([1, batch_size, block_len, 1]) iterations_number = batch_size #for idx in range(SNR_points): nb_errors = np.zeros([iterations_number, 1]) tic = time.time() noise_sigmas = 10**(-SNR * 1.0 / 20) mb_test_collect = np.zeros([iterations_number, block_len]) interleaver = RandInterlv.RandInterlv(block_len, 0) message_bits = rng.randint(0, 2, block_len) # mb_test_collect[iterations,:] = message_bits [sys, par1, par2] = turbo.turbo_encode(message_bits, trellis1, trellis2, interleaver) # print("[debug] noise type ", noise_type, " noise_sigmas ", noise_sigmas, # "vv", vv, "radar_power", radar_pow, "radar_prob", radar_prob) for iterations in range(iterations_number): noise_seed1 = rng.randint(1, 999999) noise_seed2 = rng.randint(1, 999999) noise_seed3 = rng.randint(1, 999999) # print("seeds ", noise_seed1, noise_seed2) sys_r = corrupt_signal(input_signal = sys, noise_type = noise_type, sigma = noise_sigmas, \ metrics = batch_criteria, seed = noise_seed1) par1_r = corrupt_signal(input_signal = par1, noise_type = noise_type, sigma = noise_sigmas, \ metrics = batch_criteria, seed = noise_seed2) par2_r = corrupt_signal(input_signal = par2, noise_type = noise_type, sigma = noise_sigmas ,\ metrics = batch_criteria, seed = noise_seed3) # print("sys_r ", sys_r, flush=True) # print("par1_r", par1_r, flush=True) # ADD Training Examples noisy_codewords[0, iterations, :, :] = np.concatenate( [sys_r.reshape(block_len, 1), par1_r.reshape(block_len, 1)], axis=1) # Message sequence true_messages[0, iterations, :, :] = message_bits.reshape(block_len, 1) noisy_codewords = noisy_codewords.reshape(batch_size, block_len, code_rate) true_messages = true_messages.reshape(batch_size, block_len) # target_true_messages = mb_test_collect.reshape([mb_test_collect.shape[0],mb_test_collect.shape[1],1]) toc = time.time() #print('time to generate test examples:', toc-tic) return (noisy_codewords, true_messages)
def __init__(self): memory = array([6]) g_matrix = array([[0b1111001,0b1011011]]) self.trellis = cc.Trellis(memory, g_matrix)
SNRS = -10 * np.log10(test_sigmas**2) # ============================================================================= # Example showing the encoding and decoding of convolutional codes # ============================================================================= # G(D) corresponding to the convolutional encoder generator_matrix = np.array([[05, 07]]) #generator_matrix = np.array([[03, 00, 02], [07, 04, 06]]) # Number of delay elements in the convolutional encoder M = np.array([2]) # Create trellis data structure trellis = cc.Trellis(M, generator_matrix) nb_errors = np.zeros(test_sigmas.shape) map_nb_errors = np.zeros(test_sigmas.shape) # Traceback depth of the decoder tb_depth = 10 #5*(M.sum() + 1) print('traceback depth: ' + str(tb_depth)) for idx in xrange(SNR_points): print(idx) for iterations in xrange(iterations_number): message_bits = np.random.randint(0, 2, k)
#SNRS = -10*np.log10(test_sigmas**2) test_sigmas = 10**(-SNRS * 1.0 / 20) print(SNRS) # ============================================================================= # Example showing the encoding and decoding of convolutional codes # ============================================================================= # G(D) corresponding to the convolutional encoder # Create trellis data structure #trellis1 = cc.Trellis(np.array([3]), np.array([[11,13]]),feedback=11) #trellis2 = cc.Trellis(np.array([3]), np.array([[11,13]]),feedback=11) #print('trellis: cc.Trellis(np.array([3]), np.array([[11,13]]),feedback=11) ') trellis1 = cc.Trellis(np.array([2]), np.array([[7, 5]]), feedback=7) trellis2 = cc.Trellis(np.array([2]), np.array([[7, 5]]), feedback=7) print('trellis: cc.Trellis(np.array([2]), np.array([[7,5]]),feedback=7) ') interleaver = RandInterlv.RandInterlv(k, 0) nb_errors = np.zeros(test_sigmas.shape) map_nb_errors = np.zeros(test_sigmas.shape) nb_block_no_errors = np.zeros(test_sigmas.shape) tic = time.clock() for iterations in range(iterations_number): print(iterations) message_bits = np.random.randint(0, 2, k) [sys, par1, par2] = turbo.turbo_encode(message_bits, trellis1, trellis2,
def conv_encoder(message_bits): generator_matrix = np.array([[05, 07]]) M = np.array([2]) trellis = cc.Trellis(M, generator_matrix) return 2*cc.conv_encode(np.asarray(message_bits), trellis)-1
import commpy.modulation as mod import commpy.utilities as util # ============================================================================= # Convolutional Code 1: G(D) = [1+D^2, 1+D+D^2] # Standard code with rate 1/2 # ============================================================================= # Number of delay elements in the convolutional encoder memory = np.array(2, ndmin=1) # Generator matrix g_matrix = np.array((0o5, 0o7), ndmin=2) # Create trellis data structure trellis1 = cc.Trellis(memory, g_matrix) # ============================================================================= # Convolutional Code 1: G(D) = [1+D^2, 1+D^2+D^3] # Standard code with rate 1/2 # ============================================================================= # Number of delay elements in the convolutional encoder memory = np.array(3, ndmin=1) # Generator matrix (1+D^2+D^3 <-> 13 or 0o15) g_matrix = np.array((0o5, 0o15), ndmin=2) # Create trellis data structure trellis2 = cc.Trellis(memory, g_matrix)