def turbo_compute((idx, x)):
        '''
        Compute Turbo Decoding in 1 iterations for one SNR point.
        '''
        np.random.seed()
        message_bits = np.random.randint(0, 2, args.block_len)

        coded_bits = cc.conv_encode(message_bits, trellis1)
        received = corrupt_signal(coded_bits,
                                  noise_type=args.noise_type,
                                  sigma=test_sigmas[idx],
                                  vv=args.v,
                                  radar_power=args.radar_power,
                                  radar_prob=args.radar_prob,
                                  denoise_thd=args.radar_denoise_thd)

        # make fair comparison between (100, 204) convolutional code and (100,200) RNN decoder, set the additional bit to 0
        received[-2 * int(M):] = 0.0

        decoded_bits = cc.viterbi_decode(received.astype(float),
                                         trellis1,
                                         tb_depth,
                                         decoding_type='unquantized')
        decoded_bits = decoded_bits[:-int(M)]
        num_bit_errors = hamming_dist(message_bits, decoded_bits)
        return num_bit_errors
Exemplo n.º 2
0
    def turbo_compute((idx, x)):
        '''
        Compute Turbo Decoding in 1 iterations for one SNR point.
        '''
        np.random.seed()
        message_bits = np.random.randint(0, 2, args.block_len)
        [sys, par1, par2] = turbo.turbo_encode(message_bits, trellis1,
                                               trellis2, interleaver)

        sys_r = corrupt_signal(sys,
                               noise_type=args.noise_type,
                               sigma=test_sigmas[idx])
        par1_r = corrupt_signal(par1,
                                noise_type=args.noise_type,
                                sigma=test_sigmas[idx])
        par2_r = corrupt_signal(par2,
                                noise_type=args.noise_type,
                                sigma=test_sigmas[idx])

        decoded_bits = turbo.hazzys_turbo_decode(sys_r,
                                                 par1_r,
                                                 par2_r,
                                                 trellis1,
                                                 test_sigmas[idx]**2,
                                                 args.num_dec_iteration,
                                                 interleaver,
                                                 L_int=None)

        num_bit_errors = hamming_dist(message_bits, decoded_bits)

        return num_bit_errors
Exemplo n.º 3
0
def _compute_branch_metrics(decoding_type, _r_codeword: tuple,
                            _i_codeword_array: tuple):
    r_codeword = np.array(_r_codeword)
    i_codeword_array = np.array(_i_codeword_array)
    if decoding_type == 'hard':
        return hamming_dist(r_codeword.astype(int),
                            i_codeword_array.astype(int))
    elif decoding_type == 'soft':
        neg_LL_0 = np.log(np.exp(r_codeword) +
                          1)  # negative log-likelihood to have received a 0
        neg_LL_1 = neg_LL_0 - r_codeword  # negative log-likelihood to have received a 1
        return np.where(i_codeword_array, neg_LL_1, neg_LL_0).sum()
    elif decoding_type == 'unquantized':
        i_codeword_array = 2 * i_codeword_array - 1
        return euclid_dist(r_codeword, i_codeword_array)
Exemplo n.º 4
0
    def test_ldpc_bp_decode(self):
        ldpc_design_file = os.path.join(
            self.dir, '../designs/ldpc/gallager/96.33.964.txt')
        ldpc_code_params = get_ldpc_code_params(ldpc_design_file)

        for n_blocks in (1, 2):
            N = 96 * n_blocks
            rate = 0.5
            Es = 1.0
            snr_list = array([2.0, 2.5])
            niters = 10000000
            tx_codeword = zeros(N, int)
            ldpcbp_iters = 100

            for decoder_algorithm in ('MSA', 'SPA'):
                fer_array_ref = array((.2, .1))
                fer_array_test = zeros(len(snr_list))

                for idx, ebno in enumerate(snr_list):

                    noise_std = 1 / sqrt((10**(ebno / 10.0)) * rate * 2 / Es)
                    fer_cnt_bp = 0

                    for iter_cnt in range(niters):

                        awgn_array = noise_std * randn(N)
                        rx_word = 1 - (2 * tx_codeword) + awgn_array
                        rx_llrs = 2.0 * rx_word / (noise_std**2)

                        [dec_word,
                         _] = ldpc_bp_decode(rx_llrs, ldpc_code_params,
                                             decoder_algorithm, ldpcbp_iters)

                        if hamming_dist(tx_codeword, dec_word.reshape(-1)):
                            fer_cnt_bp += 1

                        if fer_cnt_bp >= 50:
                            fer_array_test[idx] = float(fer_cnt_bp) / (
                                iter_cnt + 1) / n_blocks
                            break

                assert_allclose(fer_array_test,
                                fer_array_ref,
                                rtol=.6,
                                atol=0,
                                err_msg=decoder_algorithm +
                                ' algorithm does not perform as expected.')
Exemplo n.º 5
0
    def test_ldpc_bp_decode(self):
        ldpc_design_file = os.path.join(
            self.dir, '../designs/ldpc/gallager/96.33.964.txt')
        ldpc_code_params = get_ldpc_code_params(ldpc_design_file)

        for n_blocks in (1, 2):
            N = 96 * n_blocks
            rate = 0.5
            Es = 1.0
            snr_list = array([2.0, 2.5])
            niters = 10000000
            tx_codeword = zeros(N, int)
            ldpcbp_iters = 100

            fer_array_ref = array([200.0 / 1000, 200.0 / 2000])
            fer_array_test = zeros(len(snr_list))

            for idx, ebno in enumerate(snr_list):

                noise_std = 1 / sqrt((10**(ebno / 10.0)) * rate * 2 / Es)
                fer_cnt_bp = 0

                for iter_cnt in range(niters):

                    awgn_array = noise_std * randn(N)
                    rx_word = 1 - (2 * tx_codeword) + awgn_array
                    rx_llrs = 2.0 * rx_word / (noise_std**2)

                    [dec_word,
                     out_llrs] = ldpc_bp_decode(rx_llrs, ldpc_code_params,
                                                'SPA', ldpcbp_iters)

                    num_bit_errors = hamming_dist(tx_codeword,
                                                  dec_word.reshape(-1))
                    if num_bit_errors > 0:
                        fer_cnt_bp += 1

                    if fer_cnt_bp >= 200:
                        fer_array_test[idx] = float(fer_cnt_bp) / (
                            iter_cnt + 1) / n_blocks
                        break

            assert_allclose(fer_array_test, fer_array_ref, rtol=.5, atol=0)
Exemplo n.º 6
0
    def test_ldpc_bp_decode(self):
        N = 96
        k = 48
        rate = 0.5
        Es = 1.0
        snr_list = array([2.0, 2.5])
        niters = 10000000
        tx_codeword = zeros(N, int)
        ldpcbp_iters = 100

        fer_array_ref = array([200.0 / 1000, 200.0 / 2000])
        fer_array_test = zeros(len(snr_list))

        for idx, ebno in enumerate(snr_list):

            noise_std = 1 / sqrt((10**(ebno / 10.0)) * rate * 2 / Es)
            fer_cnt_bp = 0

            for iter_cnt in range(niters):

                awgn_array = noise_std * randn(N)
                rx_word = 1 - (2 * tx_codeword) + awgn_array
                rx_llrs = 2.0 * rx_word / (noise_std**2)

                [dec_word,
                 out_llrs] = ldpc_bp_decode(rx_llrs, self.ldpc_code_params,
                                            'SPA', ldpcbp_iters)

                num_bit_errors = hamming_dist(tx_codeword, dec_word)
                if num_bit_errors > 0:
                    fer_cnt_bp += 1

                if fer_cnt_bp >= 200:
                    fer_array_test[idx] = float(fer_cnt_bp) / (iter_cnt + 1)
                    break

        assert_allclose(fer_array_test, fer_array_ref, rtol=.5, atol=0)
Exemplo n.º 7
0
    def test_ldpc_bp_decode(self):
        N = 96
        k = 48
        rate = 0.5
        Es = 1.0
        snr_list = array([2.0, 2.5])
        niters = 10000000
        tx_codeword = zeros(N, int)
        ldpcbp_iters = 100

        fer_array_ref = array([200.0/1000, 200.0/2000])
        fer_array_test = zeros(len(snr_list))

        for idx, ebno in enumerate(snr_list):

            noise_std = 1/sqrt((10**(ebno/10.0))*rate*2/Es)
            fer_cnt_bp = 0

            for iter_cnt in range(niters):

                awgn_array = noise_std * randn(N)
                rx_word = 1-(2*tx_codeword) + awgn_array
                rx_llrs = 2.0*rx_word/(noise_std**2)

                [dec_word, out_llrs] = ldpc_bp_decode(rx_llrs, self.ldpc_code_params, 'SPA',
                                                      ldpcbp_iters)

                num_bit_errors = hamming_dist(tx_codeword, dec_word)
                if num_bit_errors > 0:
                    fer_cnt_bp += 1

                if fer_cnt_bp >= 200:
                    fer_array_test[idx] = float(fer_cnt_bp)/(iter_cnt+1)
                    break

        assert_allclose(fer_array_test, fer_array_ref, rtol=2e-1, atol=0)
Exemplo n.º 8
0
def _acs_traceback(r_codeword, trellis, decoding_type,
                   path_metrics, paths, decoded_symbols,
                   decoded_bits, tb_count, t, count,
                   tb_depth, current_number_states):

    #cdef int state_num, i, j, number_previous_states, previous_state, \
    #        previous_input, i_codeword, number_found, min_idx, \
    #        current_state, dec_symbol

    k = trellis.k
    n = trellis.n
    number_states = trellis.number_states
    number_inputs = trellis.number_inputs

    branch_metric = 0.0

    next_state_table = trellis.next_state_table
    output_table = trellis.output_table
    pmetrics = np.empty(number_inputs)
    i_codeword_array = np.empty(n, 'int')
    index_array = np.empty([number_states, 2], 'int')
    decoded_bitarray = np.empty(k, 'int')

    # Loop over all the current states (Time instant: t)
    for state_num in range(current_number_states):

        # Using the next state table find the previous states and inputs
        # leading into the current state (Trellis)
        number_found = _where_c(next_state_table, number_states, number_inputs, state_num, index_array)

        # Loop over all the previous states (Time instant: t-1)
        for i in range(number_found):

            previous_state = index_array[i, 0]
            previous_input = index_array[i, 1]

            # Using the output table, find the ideal codeword
            i_codeword = output_table[previous_state, previous_input]
            #dec2bitarray_c(i_codeword, n, i_codeword_array)
            i_codeword_array = dec2bitarray(i_codeword, n)

            # Compute Branch Metrics
            if decoding_type == 'hard':
                #branch_metric = hamming_dist_c(r_codeword.astype(int), i_codeword_array.astype(int), n)
                branch_metric = hamming_dist(r_codeword.astype(int), i_codeword_array.astype(int))
            elif decoding_type == 'soft':
                pass
            elif decoding_type == 'unquantized':
                i_codeword_array = 2*i_codeword_array - 1
                branch_metric = euclid_dist(r_codeword, i_codeword_array)
            else:
                pass

            # ADD operation: Add the branch metric to the
            # accumulated path metric and store it in the temporary array
            pmetrics[i] = path_metrics[previous_state, 0] + branch_metric

        # COMPARE and SELECT operations
        # Compare and Select the minimum accumulated path metric
        path_metrics[state_num, 1] = pmetrics.min()

        # Store the previous state corresponding to the minimum
        # accumulated path metric
        min_idx = pmetrics.argmin()
        paths[state_num, tb_count] = index_array[min_idx, 0]

        # Store the previous input corresponding to the minimum
        # accumulated path metric
        decoded_symbols[state_num, tb_count] = index_array[min_idx, 1]

    if t >= tb_depth - 1:
        current_state = path_metrics[:,1].argmin()

        # Traceback Loop
        for j in reversed(range(1, tb_depth)):

            dec_symbol = decoded_symbols[current_state, j]
            previous_state = paths[current_state, j]
            decoded_bitarray = dec2bitarray(dec_symbol, k)
            decoded_bits[(t-tb_depth-1)+(j+1)*k+count:(t-tb_depth-1)+(j+2)*k+count] =  \
                    decoded_bitarray
            current_state = previous_state

        paths[:,0:tb_depth-1] = paths[:,1:]
        decoded_symbols[:,0:tb_depth-1] = decoded_symbols[:,1:]
Exemplo n.º 9
0
def _acs_traceback(r_codeword, trellis, decoding_type,
                   path_metrics, paths, decoded_symbols,
                   decoded_bits, tb_count, t, count,
                   tb_depth, current_number_states):

    k = trellis.k
    n = trellis.n
    number_states = trellis.number_states
    number_inputs = trellis.number_inputs

    branch_metric = 0.0

    next_state_table = trellis.next_state_table
    output_table = trellis.output_table
    pmetrics = np.empty(number_inputs)
    index_array = np.empty([number_states, 2], 'int')

    # Loop over all the current states (Time instant: t)
    for state_num in range(current_number_states):

        # Using the next state table find the previous states and inputs
        # leading into the current state (Trellis)
        number_found = _where_c(next_state_table, number_states, number_inputs, state_num, index_array)

        # Loop over all the previous states (Time instant: t-1)
        for i in range(number_found):

            previous_state = index_array[i, 0]
            previous_input = index_array[i, 1]

            # Using the output table, find the ideal codeword
            i_codeword = output_table[previous_state, previous_input]
            i_codeword_array = dec2bitarray(i_codeword, n)

            # Compute Branch Metrics
            if decoding_type == 'hard':
                branch_metric = hamming_dist(r_codeword.astype(int), i_codeword_array.astype(int))
            elif decoding_type == 'soft':
                neg_LL_0 = np.log(np.exp(r_codeword) + 1)  # negative log-likelihood to have received a 0
                neg_LL_1 = neg_LL_0 - r_codeword  # negative log-likelihood to have received a 1
                branch_metric = np.where(i_codeword_array, neg_LL_1, neg_LL_0).sum()
            elif decoding_type == 'unquantized':
                i_codeword_array = 2*i_codeword_array - 1
                branch_metric = euclid_dist(r_codeword, i_codeword_array)

            # ADD operation: Add the branch metric to the
            # accumulated path metric and store it in the temporary array
            pmetrics[i] = path_metrics[previous_state, 0] + branch_metric

        # COMPARE and SELECT operations
        # Compare and Select the minimum accumulated path metric
        path_metrics[state_num, 1] = pmetrics.min()

        # Store the previous state corresponding to the minimum
        # accumulated path metric
        min_idx = pmetrics.argmin()
        paths[state_num, tb_count] = index_array[min_idx, 0]

        # Store the previous input corresponding to the minimum
        # accumulated path metric
        decoded_symbols[state_num, tb_count] = index_array[min_idx, 1]

    if t >= tb_depth - 1:
        current_state = path_metrics[:,1].argmin()

        # Traceback Loop
        for j in reversed(range(1, tb_depth)):

            dec_symbol = decoded_symbols[current_state, j]
            previous_state = paths[current_state, j]
            decoded_bitarray = dec2bitarray(dec_symbol, k)
            decoded_bits[t - tb_depth + 1 + (j - 1) * k + count:t - tb_depth + 1 + j * k + count] = decoded_bitarray
            current_state = previous_state

        paths[:,0:tb_depth-1] = paths[:,1:]
        decoded_symbols[:,0:tb_depth-1] = decoded_symbols[:,1:]
Exemplo n.º 10
0
def _acs_traceback(r_codeword, trellis, decoding_type, path_metrics, paths,
                   decoded_symbols, decoded_bits, tb_count, t, count, tb_depth,
                   current_number_states):

    #cdef int state_num, i, j, number_previous_states, previous_state, \
    #        previous_input, i_codeword, number_found, min_idx, \
    #        current_state, dec_symbol

    k = trellis.k
    n = trellis.n
    number_states = trellis.number_states
    number_inputs = trellis.number_inputs

    branch_metric = 0.0

    next_state_table = trellis.next_state_table
    output_table = trellis.output_table
    pmetrics = np.empty(number_inputs)
    i_codeword_array = np.empty(n, 'int')
    index_array = np.empty([number_states, 2], 'int')
    decoded_bitarray = np.empty(k, 'int')

    # Loop over all the current states (Time instant: t)
    for state_num in range(current_number_states):

        # Using the next state table find the previous states and inputs
        # leading into the current state (Trellis)
        number_found = _where_c(next_state_table, number_states, number_inputs,
                                state_num, index_array)

        # Loop over all the previous states (Time instant: t-1)
        for i in range(number_found):

            previous_state = index_array[i, 0]
            previous_input = index_array[i, 1]

            # Using the output table, find the ideal codeword
            i_codeword = output_table[previous_state, previous_input]
            #dec2bitarray_c(i_codeword, n, i_codeword_array)
            i_codeword_array = dec2bitarray(i_codeword, n)

            # Compute Branch Metrics
            if decoding_type == 'hard':
                #branch_metric = hamming_dist_c(r_codeword.astype(int), i_codeword_array.astype(int), n)
                branch_metric = hamming_dist(r_codeword.astype(int),
                                             i_codeword_array.astype(int))
            elif decoding_type == 'soft':
                pass
            elif decoding_type == 'unquantized':
                i_codeword_array = 2 * i_codeword_array - 1
                branch_metric = euclid_dist(r_codeword, i_codeword_array)
            else:
                pass

            # print("branch_metric: ")
            # print(branch_metric)
            # ADD operation: Add the branch metric to the
            # accumulated path metric and store it in the temporary array
            pmetrics[i] = path_metrics[previous_state, 0] + branch_metric

        # COMPARE and SELECT operations
        # Compare and Select the minimum accumulated path metric
        path_metrics[state_num, 1] = pmetrics.min()

        # Store the previous state corresponding to the minimum
        # accumulated path metric
        min_idx = pmetrics.argmin()
        paths[state_num, tb_count] = index_array[min_idx, 0]

        # Store the previous input corresponding to the minimum
        # accumulated path metric
        decoded_symbols[state_num, tb_count] = index_array[min_idx, 1]

    if t >= tb_depth - 1:
        current_state = path_metrics[:, 1].argmin()

        # Traceback Loop
        for j in reversed(range(1, tb_depth)):

            dec_symbol = decoded_symbols[current_state, j]
            previous_state = paths[current_state, j]
            decoded_bitarray = dec2bitarray(dec_symbol, k)
            decoded_bits[(t-tb_depth-1)+(j+1)*k+count:(t-tb_depth-1)+(j+2)*k+count] =  \
                    decoded_bitarray
            current_state = previous_state

        paths[:, 0:tb_depth - 1] = paths[:, 1:]
        decoded_symbols[:, 0:tb_depth - 1] = decoded_symbols[:, 1:]
Exemplo n.º 11
0
    fer_array_ref = array([200.0 / 1000, 200.0 / 2000])
    fer_array_test = zeros(len(snr_list))

    for idx, ebno in enumerate(snr_list):

        noise_std = 1 / sqrt((10**(ebno / 10.0)) * rate * 2 / Es)
        fer_cnt_bp = 0

        for iter_cnt in range(niters):

            awgn_array = noise_std * randn(N)
            rx_word = 1 - (2 * tx_codeword) + awgn_array
            rx_llrs = 2.0 * rx_word / (noise_std**2)

            print tx_codeword

            [dec_word, out_llrs] = ldpc_bp_decode(rx_llrs, ldpc_code_params,
                                                  'SPA', ldpcbp_iters)

            num_bit_errors = hamming_dist(tx_codeword, dec_word)

            if num_bit_errors > 0:
                fer_cnt_bp += 1

            if fer_cnt_bp >= 200:
                fer_array_test[idx] = float(fer_cnt_bp) / (iter_cnt + 1)
                break

    assert_allclose(fer_array_test, fer_array_ref, rtol=2e-1, atol=0)
Exemplo n.º 12
0
        message_bits = np.random.randint(0, 2, 1000)

        # Encode message bits
        coded_bits = cc.conv_encode(message_bits, trellis)

        # Introduce bit errors (channel)
        coded_bits[np.random.randint(0, 1000)] = 0
        coded_bits[np.random.randint(0, 1000)] = 0
        coded_bits[np.random.randint(0, 1000)] = 1
        coded_bits[np.random.randint(0, 1000)] = 1

        # Decode the received bits
        decoded_bits = cc.viterbi_decode(coded_bits.astype(float), trellis,
                                         tb_depth)

        num_bit_errors = util.hamming_dist(message_bits,
                                           decoded_bits[:len(message_bits)])

        if num_bit_errors != 0:
            print(num_bit_errors, "Bit Errors found!")
        elif i == 9:
            print("No Bit Errors :)")

# ==================================================================================================
# Complete example using Commpy features and compare hard and soft demodulation. Example with code 1
# ==================================================================================================

# Modem : QPSK
modem = mod.QAMModem(4)

# AWGN channel
channels = chan.SISOFlatChannel(None, (1 + 0j, 0j))