예제 #1
0
def test_SPMatrix_todense_with_specs():
    ### LDPC biggest encoding matrix
    H = specs.get_expanded_H_matrix(2304, '1/2')

    H_prime = LDPC.SPMatrix(H).todense()
    if not np.all(H_prime == H):
        raise Exception('LDPC SPMatrix conversion is not reversible')
예제 #2
0
def test_sign():
    # check against dummy cases
    correct_ones = {
        (1, 2, 3): +1,
        (1, -1, 3): -1,
        (1): +1,
        (-2, -2): +1,
    }

    for vector, correct_sign in correct_ones.items():
        assert LDPC.global_sign(np.array(vector)) == correct_sign
예제 #3
0
def test_encoder():
    for n in get_code_lengths():
        for rate in get_code_rates():
            H = sp.csc_matrix(get_expanded_H_matrix(n, rate))

            k = H.shape[1] - H.shape[0]
            u = np.random.choice(a=[0, 1], size=k, p=[1 / 2, 1 / 2])

            enc = LDPC.encoder(H)
            if np.count_nonzero(H.dot(enc(u)) % 2) != 0:
                raise Exception(
                    'Invalid encoding function for n={}, rate={}'.format(
                        n, rate))
예제 #4
0
def test_generated_matrix():
    ''' Generated matrix columns must be in the null space of H '''
    for n in get_code_lengths():
        for rate in get_code_rates():
            H = sp.csc_matrix(get_expanded_H_matrix(n, rate))
            G = LDPC.get_generating_matrix(H)
            F = H.dot(G)

            # accessing data directly is the only way to apply modulo 2
            # since it is not natively supported by sp.csc_matrix
            F.data = F.data % 2
            if F.count_nonzero() != 0:
                raise Exception(
                    'Invalid generating matrix for n={}, rate={}'.format(
                        n, rate))
예제 #5
0
def test_SPMatrix_todense():
    ### all-zero matrix
    H = np.zeros((3, 3))

    H_prime = LDPC.SPMatrix(H).todense()
    if not np.all(H_prime == H):
        raise Exception(
            'Dense SPMatrix conversion from dense is not reversible')

    ### sparse matrix
    H = np.zeros((3, 3))
    H[1, 1] = 1

    H_prime = LDPC.SPMatrix(H).todense()
    if not np.all(H_prime == H):
        raise Exception('Sparse SPMatrix conversion is not reversible')

    ### dense matrix
    H = np.ones((3, 3))
    H[1, 1] = 0

    H_prime = LDPC.SPMatrix(H).todense()
    if not np.all(H_prime == H):
        raise Exception('Dense SPMatrix conversion is not reversible')
예제 #6
0
def test_SPMatrix_get_element():
    H = np.zeros((3, 4))
    H[1, 1] = 1
    M = LDPC.SPMatrix(H)

    ### nonzero element
    if not M[1, 1] == H[1, 1]:
        raise Exception('Get non-zero element failed')

    ### zero element
    if not M[0, 1] == 0:
        raise Exception('Get zero element failed')

    ### zero element before nonzero one
    # this is causes an early escape from column index loop
    if not M[1, 0] == 0:
        raise Exception('Get zero element exceeding last column member failed')
import numpy as np
from LDPC import *

H = LDPC('gallager_regular')
res = H.simBEC(0.4, 50, 10000000, 100, 1000, 1000)

print '''Iterations %d BERs: %d FERs: %d BER%% : %f FER%% : %f''' % res
예제 #8
0
def test_phi():
    for value in np.logspace(-7, 2):
        assert phi_definition(value) == LDPC.phi_tilde(value)
예제 #9
0
def step(n, rate, SNRs):
    # extract rate from label (removing last letter if any)
    R = eval(rate[:3])

    H = specs.get_expanded_H_matrix(n, rate)
    k = n - H.shape[0]

    # setup encoder functions
    enc = LDPC.encoder(H)

    results = []
    for SNR in SNRs:
        # print('n = {}, rate = {}, SNR = {}'.format(n, rate, SNR))

        # compute noise standard deviation, given a binary PAM (M=2)
        # of rate R and SNR = Eb/N0
        # $ \sigma_w = \frac{E_s}{2R ` \log_2 M ` \Gamma} $
        # where $ E_s = 1, M=2, \Gamma = \frac{E_b}{N_0} $
        sigma_w = sqrt(1 / (2 * R * SNR))

        # setup decoder functions
        dec = LDPC.decoder(H, sigma_w, max_iterations=MAX_ITERATIONS)

        # count number of tested words and number
        # of iterations needed for each word
        n_words = 0
        n_iterations = []
        errors = []

        # generate always the same uniform messages,
        # in order to obtain smoother SNR-Pe curves
        np.random.seed(0)

        # measure total time taken per word
        start = time()

        # proceed until maximum word quota is exceeded or wanted
        # number of bad decoding and correct words is reached
        while n_words < N_WORDS:
            # print('n_errors = {}, n_failures = {}, n_words = {}'\
            # .format(n_errors, n_failures, n_words), end='\r')

            u = np.random.choice(a=[0, 1], size=k)

            c = enc(u)  ## ENCODE
            d = LDPC.modulate(c)  ## MODULATE
            r = LDPC.channel(d, sigma_w)  ## add CHANNEL noise

            u_prime, current_n_iter = dec(r)  ## DECODE

            ## update PERFORMANCE measures

            n_iterations.append(current_n_iter)
            n_words += 1

            if np.all(u_prime == u):
                errors.append(0)
            else:
                errors.append(1)

        ## REPORT

        current_result = pd.DataFrame({
            'n': n,
            'rate': rate,
            'SNR': SNR,
            'time per word': (time() - start) / n_words,
            # n_iterations and is_error list replicates
            # all other fields, as wanted
            'iterations': n_iterations,
            'errors': errors,
        })
        results.append(current_result)

    # collect results for current couple (n, rate)
    summary = pd.concat(results)
    summary.to_csv('results/other/SNRvsPe_n-{}_rate-{}.csv'\
                   .format(n, rate.replace('/', '')), index=None)