Пример #1
0
    def turbo_compute((idx, x)):
        '''
        Compute Turbo Decoding in 1 iterations for one SNR point.
        '''
        np.random.seed()
        message_bits = np.random.randint(0, 2, args.block_len)
        [sys, par1, par2] = turbo.turbo_encode(message_bits, trellis1,
                                               trellis2, interleaver)

        sys_r = corrupt_signal(sys,
                               noise_type=args.noise_type,
                               sigma=test_sigmas[idx])
        par1_r = corrupt_signal(par1,
                                noise_type=args.noise_type,
                                sigma=test_sigmas[idx])
        par2_r = corrupt_signal(par2,
                                noise_type=args.noise_type,
                                sigma=test_sigmas[idx])

        decoded_bits = turbo.hazzys_turbo_decode(sys_r,
                                                 par1_r,
                                                 par2_r,
                                                 trellis1,
                                                 test_sigmas[idx]**2,
                                                 args.num_dec_iteration,
                                                 interleaver,
                                                 L_int=None)

        num_bit_errors = hamming_dist(message_bits, decoded_bits)

        return num_bit_errors
Пример #2
0
def turbo_enc(X_train_raw, args, p_array):
    num_block = X_train_raw.shape[0]
    x_code = []

    if args.encoder == 'Turbo_rate3_lte':  # Turbo-LTE
        M = np.array(
            [3])  # Number of delay elements in the convolutional encoder
        generator_matrix = np.array([[13,
                                      11]])  # Encoder of convolutional encoder
        feedback = 13  # Feedback of convolutional encoder
    else:  # Turbo-757
        M = np.array(
            [2])  # Number of delay elements in the convolutional encoder
        generator_matrix = np.array([[7,
                                      5]])  # Encoder of convolutional encoder
        feedback = 7  # Feedback of convolutional encoder

    trellis1 = cc.Trellis(M, generator_matrix, feedback=feedback)
    trellis2 = cc.Trellis(M, generator_matrix, feedback=feedback)
    interleaver = RandInterlv.RandInterlv(args.block_len, 0)
    interleaver.p_array = p_array

    for idx in range(num_block):
        #print(X_train_raw[idx, :, 0])
        np_inputs = np.array(X_train_raw[idx, :,
                                         0].type(torch.IntTensor).detach())
        [sys, par1, par2] = turbo.turbo_encode(np_inputs, trellis1, trellis2,
                                               interleaver)
        xx = np.array([sys, par1, par2]).T
        x_code.append(xx)

    return torch.from_numpy(np.array(x_code)).type(torch.FloatTensor)
Пример #3
0
def build_rnn_data_feed(num_block, block_len, noiser, codec, is_all_zero = False ,is_same_code = False, **kwargs):
    '''

    :param num_block:
    :param block_len:
    :param noiser: list, 0:noise_type, 1:sigma,     2:v for t-dist, 3:radar_power, 4:radar_prob
    :param codec:  list, 0:trellis1,   1:trellis2 , 2:interleaver
    :param kwargs:
    :return: X_feed, X_message
    '''

    # Unpack Noiser
    noise_type  = noiser[0]
    noise_sigma = noiser[1]
    vv          = 5.0
    radar_power = 20.0
    radar_prob  = 5e-2
    denoise_thd = 10.0
    snr_mix     = [0, 0, 0]

    if noise_type == 't-dist':
        vv = noiser[2]
    elif noise_type == 'awgn+radar' or noise_type == 'hyeji_bursty':
        radar_power = noiser[3]
        radar_prob  = noiser[4]

    elif noise_type == 'awgn+radar+denoise' or noise_type == 'hyeji_bursty+denoise':
        radar_power = noiser[3]
        radar_prob  = noiser[4]
        denoise_thd = noiser[5]

    elif noise_type == 'mix_snr_turbo' or noise_type == 'random_snr_turbo':
        snr_mix = noiser[6]

    elif noise_type == 'customize':
        '''
        TBD, noise model shall be open to other user, for them to train their own decoder.
        '''

        print('[Debug] Customize noise model not supported yet')
    else:  # awgn
        pass

    #print '[Build RNN Data] noise type is ', noise_type, ' noiser', noiser

    # Unpack Codec
    trellis1    = codec[0]
    trellis2    = codec[1]
    interleaver = codec[2]


    p_array     = interleaver.p_array

    X_feed = []
    X_message = []

    same_code = np.random.randint(0, 2, block_len)

    for nbb in range(num_block):
        if is_same_code:
            message_bits = same_code
        else:
            if is_all_zero == False:
                message_bits = np.random.randint(0, 2, block_len)
            else:
                message_bits = np.random.randint(0, 1, block_len)

        X_message.append(message_bits)
        [sys, par1, par2] = turbo.turbo_encode(message_bits, trellis1, trellis2, interleaver)

        sys_r  = corrupt_signal(sys, noise_type =noise_type, sigma = noise_sigma,
                               vv =vv, radar_power = radar_power, radar_prob = radar_prob, denoise_thd = denoise_thd,
                               snr_mixture = snr_mix)
        par1_r = corrupt_signal(par1, noise_type =noise_type, sigma = noise_sigma,
                               vv =vv, radar_power = radar_power, radar_prob = radar_prob, denoise_thd = denoise_thd,
                               snr_mixture = snr_mix)
        par2_r = corrupt_signal(par2, noise_type =noise_type, sigma = noise_sigma,
                               vv =vv, radar_power = radar_power, radar_prob = radar_prob, denoise_thd = denoise_thd,
                               snr_mixture = snr_mix)

        rnn_feed_raw = np.stack([sys_r, par1_r, np.zeros(sys_r.shape), intleave(sys_r, p_array), par2_r], axis = 0).T
        rnn_feed = rnn_feed_raw

        X_feed.append(rnn_feed)

    X_feed = np.stack(X_feed, axis=0)

    X_message = np.array(X_message)
    X_message = X_message.reshape((-1,block_len, 1))

    return X_feed, X_message
Пример #4
0
trellis1 = cc.Trellis(np.array([2]), np.array([[7, 5]]), feedback=7)
trellis2 = cc.Trellis(np.array([2]), np.array([[7, 5]]), feedback=7)
print('trellis: cc.Trellis(np.array([2]), np.array([[7,5]]),feedback=7) ')

interleaver = RandInterlv.RandInterlv(k, 0)

nb_errors = np.zeros(test_sigmas.shape)
map_nb_errors = np.zeros(test_sigmas.shape)
nb_block_no_errors = np.zeros(test_sigmas.shape)

tic = time.clock()

for iterations in range(iterations_number):
    print(iterations)
    message_bits = np.random.randint(0, 2, k)
    [sys, par1, par2] = turbo.turbo_encode(message_bits, trellis1, trellis2,
                                           interleaver)

    for idx in range(len(test_sigmas)):

        if NType == 'iid':
            noise = test_sigmas[idx] * np.random.standard_normal(
                sys.shape)  # Define noise
            sys_r = (2 * sys - 1) + noise  # Modulation plus noise
            noise = test_sigmas[idx] * np.random.standard_normal(
                par1.shape)  # Define noise
            par1_r = (2 * par1 - 1) + noise  # Modulation plus noise
            noise = test_sigmas[idx] * np.random.standard_normal(
                par2.shape)  # Define noise
            par2_r = (2 * par2 - 1) + noise  # Modulation plus noise

        decoded_bits = hazzys_turbo_decode(sys_r,
Пример #5
0
def generate_viterbi_batch(batch_size=100,
                           block_len=200,
                           code_rate=2,
                           batch_criteria={},
                           seed=0):

    noise_type = batch_criteria["noise_type"]
    SNR = batch_criteria["snr"]
    rng = np.random.RandomState(seed)

    # print("[generate_viterbi_batch] block_len, code_rate", block_len, code_rate)
    trellis1 = cc.Trellis(np.array([2]), np.array([[7, 5]]))
    trellis2 = cc.Trellis(np.array([2]), np.array([[7, 5]]))
    #print('trellis: cc.Trellis(np.array([2]), np.array([[7,5]]))') # G(D) corresponding to the convolutional encoder

    tic = time.time()

    ### TEST EXAMPLES

    # Initialize Test Examples/
    noisy_codewords = np.zeros([1, batch_size, block_len, 2])
    true_messages = np.zeros([1, batch_size, block_len, 1])

    iterations_number = batch_size

    #for idx in range(SNR_points):
    nb_errors = np.zeros([iterations_number, 1])

    tic = time.time()

    noise_sigmas = 10**(-SNR * 1.0 / 20)

    mb_test_collect = np.zeros([iterations_number, block_len])

    interleaver = RandInterlv.RandInterlv(block_len, 0)

    message_bits = rng.randint(0, 2, block_len)
    #            mb_test_collect[iterations,:] = message_bits
    [sys, par1, par2] = turbo.turbo_encode(message_bits, trellis1, trellis2,
                                           interleaver)

    # print("[debug] noise type ", noise_type, " noise_sigmas ", noise_sigmas,
    # "vv", vv, "radar_power", radar_pow, "radar_prob", radar_prob)
    for iterations in range(iterations_number):
        noise_seed1 = rng.randint(1, 999999)
        noise_seed2 = rng.randint(1, 999999)
        noise_seed3 = rng.randint(1, 999999)
        # print("seeds ",  noise_seed1, noise_seed2)
        sys_r = corrupt_signal(input_signal = sys, noise_type = noise_type, sigma = noise_sigmas, \
            metrics = batch_criteria, seed = noise_seed1)
        par1_r = corrupt_signal(input_signal = par1, noise_type = noise_type, sigma = noise_sigmas, \
            metrics = batch_criteria, seed = noise_seed2)
        par2_r = corrupt_signal(input_signal = par2, noise_type = noise_type, sigma = noise_sigmas ,\
            metrics = batch_criteria, seed = noise_seed3)
        # print("sys_r ", sys_r, flush=True)
        # print("par1_r", par1_r, flush=True)
        # ADD Training Examples
        noisy_codewords[0, iterations, :, :] = np.concatenate(
            [sys_r.reshape(block_len, 1),
             par1_r.reshape(block_len, 1)],
            axis=1)

        # Message sequence
        true_messages[0, iterations, :, :] = message_bits.reshape(block_len, 1)

    noisy_codewords = noisy_codewords.reshape(batch_size, block_len, code_rate)
    true_messages = true_messages.reshape(batch_size, block_len)
    #   target_true_messages  = mb_test_collect.reshape([mb_test_collect.shape[0],mb_test_collect.shape[1],1])

    toc = time.time()

    #print('time to generate test examples:', toc-tic)

    return (noisy_codewords, true_messages)
Пример #6
0
 def encoder(self, x):
     [sys, par1,
      par2] = dturbo.turbo_encode(x, self.trellis1, self.trellis2,
                                  self.interleaver)
     # code_rate=3
     return (sys, par1, par2)
Пример #7
0
def generate_examples(k_test=1000, step_of_history=200, SNR=0, code_rate=2):

    trellis1 = cc.Trellis(np.array([2]), np.array([[7, 5]]))
    trellis2 = cc.Trellis(np.array([2]), np.array([[7, 5]]))
    #print('trellis: cc.Trellis(np.array([2]), np.array([[7,5]]))') # G(D) corresponding to the convolutional encoder

    tic = time.time()

    ### TEST EXAMPLES

    # Initialize Test Examples/
    noisy_codewords = np.zeros(
        [1, int(k_test / step_of_history), step_of_history, 2])
    true_messages = np.zeros(
        [1, int(k_test / step_of_history), step_of_history, 1])

    iterations_number = int(k_test / step_of_history)

    #for idx in range(SNR_points):
    nb_errors = np.zeros([iterations_number, 1])

    tic = time.time()

    noise_sigmas = 10**(-SNR * 1.0 / 20)

    mb_test_collect = np.zeros([iterations_number, step_of_history])

    interleaver = RandInterlv.RandInterlv(step_of_history, 0)

    for iterations in range(iterations_number):

        #    print(iterations)
        message_bits = np.random.randint(0, 2, step_of_history)
        mb_test_collect[iterations, :] = message_bits
        [sys, par1, par2] = turbo.turbo_encode(message_bits, trellis1,
                                               trellis2, interleaver)

        noise = noise_sigmas * np.random.standard_normal(
            sys.shape)  # Generate noise
        sys_r = (2 * sys - 1) + noise  # Modulation plus noise
        noise = noise_sigmas * np.random.standard_normal(
            par1.shape)  # Generate noise
        par1_r = (2 * par1 - 1) + noise  # Modulation plus noise
        noise = noise_sigmas * np.random.standard_normal(
            par2.shape)  # Generate noise
        par2_r = (2 * par2 - 1) + noise  # Modulation plus noise

        sys_symbols = sys_r
        non_sys_symbols_1 = par1_r
        non_sys_symbols_2 = par2_r

        # ADD Training Examples
        noisy_codewords[0, iterations, :, :] = np.concatenate([
            sys_r.reshape(step_of_history, 1),
            par1_r.reshape(step_of_history, 1)
        ],
                                                              axis=1)

        # Message sequence
        true_messages[0, iterations, :, :] = message_bits.reshape(
            step_of_history, 1)

    noisy_codewords = noisy_codewords.reshape(int(k_test / step_of_history),
                                              step_of_history, code_rate)
    true_messages = true_messages.reshape(int(k_test / step_of_history),
                                          step_of_history, 1)
    target_true_messages = mb_test_collect.reshape(
        [mb_test_collect.shape[0], mb_test_collect.shape[1], 1])

    toc = time.time()

    #print('time to generate test examples:', toc-tic)

    return (noisy_codewords, true_messages, target_true_messages)
Пример #8
0
def generate_bcjr_example(num_block, block_len, codec, num_iteration, is_save = True, train_snr_db = 0.0, save_path = './tmp/',
                          **kwargs ):
    '''
    Generate BCJR feature and target for training BCJR-like RNN codec from scratch
    '''

    start_time = time.time()
    # print
    print('[BCJR] Block Length is ', block_len)
    print('[BCJR] Number of Block is ', num_block)

    input_feature_num = 3
    noise_type  = 'awgn'
    noise_sigma = snr_db2sigma(train_snr_db)

    identity = str(np.random.random())    # random id for saving

    # Unpack Codec
    trellis1    = codec[0]
    trellis2    = codec[1]
    interleaver = codec[2]

    # Initialize BCJR input/output Pair for training (Is that necessary?)
    bcjr_inputs  = np.zeros([2*num_iteration, num_block, block_len ,input_feature_num])
    bcjr_outputs = np.zeros([2*num_iteration, num_block, block_len ,1        ])

    for block_idx in range(num_block):
        # Generate Noisy Input For Turbo Decoding
        message_bits = np.random.randint(0, 2, block_len)
        [sys, par1, par2] = turbo.turbo_encode(message_bits, trellis1, trellis2, interleaver)

        sys_r  = corrupt_signal(sys, noise_type =noise_type, sigma = noise_sigma,)
        par1_r = corrupt_signal(par1, noise_type =noise_type, sigma = noise_sigma)
        par2_r = corrupt_signal(par2, noise_type =noise_type, sigma = noise_sigma)

        # Use the Commpy BCJR decoding algorithm
        sys_symbols = sys_r
        non_sys_symbols_1 = par1_r
        non_sys_symbols_2 = par2_r
        noise_variance = noise_sigma**2
        #print("+++++++++++++++++++")
        #print("SYS_SYMBOLS: ", sys_symbols)
        #print("+++++++++++++++++++")
        sys_symbols_i = interleaver.interlv(sys_symbols)
        trellis = trellis1

        L_int = None
        if L_int is None:
            L_int = np.zeros(len(sys_symbols))

        L_int_1 = L_int
        L_ext_2 = L_int_1

        weighted_sys = 2*sys_symbols*1.0/noise_variance # Is gonna be used in the final step of decoding.
        weighted_sys_int = interleaver.interlv(weighted_sys)

        for turbo_iteration_idx in range(num_iteration-1):
            L_int_1 = interleaver.deinterlv(L_ext_2)
            # MAP 1
            [L_ext_1, decoded_bits] = turbo.map_decode(sys_symbols, non_sys_symbols_1,
                                                 trellis, noise_variance, L_int_1, 'compute')
            L_ext_1 -= L_int_1
            L_ext_1 -= weighted_sys

             # ADD Training Examples
            bcjr_inputs[2*turbo_iteration_idx,block_idx,:,:] = np.concatenate([sys_symbols.reshape(block_len,1),
                                                                               non_sys_symbols_1.reshape(block_len,1),
                                                                               L_int_1.reshape(block_len,1)],
                                                                              axis=1)
            bcjr_outputs[2*turbo_iteration_idx,block_idx,:,:]= L_ext_1.reshape(block_len,1)

            # MAP 2
            L_int_2 = interleaver.interlv(L_ext_1)

            #print("+++++++++++++++++++++++++++++++++++")
            #print(sys_symbols_i)
            #print(sys_symbols_i.shape)
            #print("+++++++++++++++++++++++++++++++++++")

            [L_ext_2, decoded_bits] = turbo.map_decode(sys_symbols_i, non_sys_symbols_2,
                                             trellis, noise_variance, L_int_2, 'compute')
            L_ext_2 -=  L_int_2
            L_ext_2 -=  weighted_sys_int
            # ADD Training Examples
            bcjr_inputs[2*turbo_iteration_idx+1,block_idx,:,:] = np.concatenate([sys_symbols_i.reshape(block_len,1),
                                                                                 non_sys_symbols_2.reshape(block_len,1),
                                                                                 L_int_2.reshape(block_len,1)],
                                                                                axis=1)
            bcjr_outputs[2*turbo_iteration_idx+1,block_idx,:,:] = L_ext_2.reshape(block_len,1)

        # MAP 1
        L_int_1 = interleaver.deinterlv(L_ext_2)
        [L_ext_1, decoded_bits] = turbo.map_decode(sys_symbols, non_sys_symbols_1,
                                             trellis, noise_variance, L_int_1, 'compute')
        L_ext_1 -= L_int_1
        L_ext_1 -= weighted_sys
         # ADD Training Examples


        bcjr_inputs[2*num_iteration-2,block_idx,:,:] = np.concatenate([sys_symbols.reshape(block_len,1),
                                                                     non_sys_symbols_1.reshape(block_len,1),
                                                                     L_int_1.reshape(block_len,1)],
                                                                    axis=1)
        bcjr_outputs[2*num_iteration-2,block_idx,:,:] = L_ext_1.reshape(block_len,1)

        # MAP 2
        L_int_2 = interleaver.interlv(L_ext_1)
        [L_2, decoded_bits] = turbo.map_decode(sys_symbols_i, non_sys_symbols_2,
                                         trellis, noise_variance, L_int_2, 'decode')
        L_ext_2 = L_2 - L_int_2
        L_ext_2 -=  weighted_sys_int
        # ADD Training Examples
        bcjr_inputs[2*num_iteration-1,block_idx,:,:] = np.concatenate([sys_symbols_i.reshape(block_len,1),
                                                                       non_sys_symbols_2.reshape(block_len,1),
                                                                       L_int_2.reshape(block_len,1)],
                                                                      axis=1)
        bcjr_outputs[2*num_iteration-1,block_idx,:,:] = L_ext_2.reshape(block_len,1)

    end_time = time.time()
    print('[BCJR] The input feature has shape', bcjr_inputs.shape,'the output has shape', bcjr_outputs.shape)
    print('[BCJR] Generating Training Example takes ', end_time - start_time , 'secs')
    print('[BCJR] file id is', identity)

    bcjr_inputs_train   = bcjr_inputs.reshape((-1, block_len,input_feature_num ))
    bcjr_outputs_train  = bcjr_outputs.reshape((-1,  block_len, 1))

    target_train_select = bcjr_outputs_train[:,:,0] + bcjr_inputs_train[:,:,2]

    target_train_select[:,:] = math.e**target_train_select[:,:]*1.0/(1+math.e**target_train_select[:,:])

    X_input  = bcjr_inputs_train.reshape(-1,block_len,input_feature_num)
    X_target = target_train_select.reshape(-1,block_len,1)

    return X_input, X_target