Beispiel #1
0
def polar_codes_mapping(metric, N=8, k=4, nb_pkts = 100, graph = 'BER',channel='BSC'):
  print('-------------------Polar Codes + Mapping-----------------------------')
  cont = 2
  if channel == 'AWGN':
    design_parameter = np.linspace(0.0, 10, cont)
  else:
    design_parameter = np.linspace(0.0001, 0.1, cont)

  for key in [0.5]:
    e_design = 0.1
    # print('===============Design================',key)
    G,infoBits = polar.polar_generator_matrix(64, k, channel, e_design)

    k = len(G)
    Nt = len(G[1])
    t = int(Nt /N)
    U_k = utils.symbols_generator(k)  # all possible messages
    X_m = utils.symbols_generator(t)  # all possible symbol sequences
    C = utils.matrix_codes(U_k, k, G, Nt)

    nx = 2**t*key
    # print('nx', nx, 't', t)
    x = utils.mapping2(C, X_m, t, nx)
    N = len(x[1])
    if graph == 'BLER':
      metric[f"P({e_design})+M({key})"] = utils.block_error_probability(N,k,x,e0,e1)
    else:
      metric[f"P({e_design})+M({key})"] = utils.bit_error_rate(k,x,nb_pkts,e0,e1)
def integrated_scheme(metric,
                      N=8,
                      k=4,
                      nb_pkts=100,
                      graph='BER',
                      channel='BSC'):
    print(
        '-------------------Integrated Scheme Code-----------------------------'
    )
    for key in [0.5]:
        G, infoBits = polar.polar_generator_matrix(64, k, channel, 0.1)
        k = len(G)
        Nt = len(G[1])
        t = int(Nt / N)

        U_k = utils.symbols_generator(k)  # all possible messages
        C = utils.integrated_function(infoBits, U_k, k, Nt, -1)

        X_m = utils.symbols_generator(t)  # all possible symbol sequences
        nx = 2**t * key
        # print('nx', nx, 't', t)
        x = utils.mapping(C, X_m, t, nx)
        N = len(x[1])
        if graph == 'BLER':
            metric[f"Int_P({key})"] = utils.block_error_probability(
                N, k, C, e0, e1)
        else:
            metric[f"Int_P({key})"] = utils.bit_error_rate(
                k, x, nb_pkts, e0, e1)
def bit_error_rate_NN(N, k, C, Nb_sequences, e0, e1, channel='BSC'):
    print(
        '*******************NN-Decoder********************************************'
    )
    # model_decoder = keras.models.load_model("autoencoder/model_decoder_bsc_16_8_array.h5")
    # model_decoder = keras.models.load_model("./model/model_decoder_16_4_std.h5")
    print("Decoder Loaded from disk, ready to be used")
    U_k = utils.symbols_generator(k)  # all possible messages
    ber = {}
    bler = {}
    count = 0
    Nb_iter_max = 10
    Nb_words = int(Nb_sequences / Nb_iter_max)

    for ep0 in e0:
        ber_row = []
        bler_row = []

        for ep1 in (ep1 for ep1 in e1 if ep1 + ep0 <= 1 and ep1 <= ep0):
            if ep1 == ep0 or ep1 == e0[0]:
                N_errors = 0
                N_errors_bler = 0
                N_iter = 0
                while N_iter < Nb_iter_max:  # and N_errors < N_errors_mini:
                    N_iter += 1

                    idx = np.random.randint(0,
                                            len(U_k) - 1,
                                            size=(1, Nb_words)).tolist()[0]
                    u = [U_k[a] for a in idx]
                    x = [C[a] for a in idx]  # coded bits
                    y_bac = [utils.BAC_channel(xi, ep0, ep1)
                             for xi in x]  # received symbols

                    yh = np.reshape(y_bac, [Nb_words, N]).astype(np.float64)
                    u_nn = [
                        U_k[idy] for idy in np.argmax(model_dec.predict(yh), 1)
                    ]  #  NN Detector

                    for i in range(len(u)):
                        N_errors += np.sum(
                            np.abs(np.array(u[i]) - np.array(u_nn[i]))
                        )  # bit error rate compute with NN
                        N_errors_bler += np.sum(1.0 * (u[i] != u_nn[i]))
                ber_row.append(
                    N_errors /
                    (k * 1.0 * Nb_sequences))  # bit error rate compute with NN
                bler_row.append(
                    N_errors_bler /
                    (1.0 * Nb_sequences))  # block error rate compute with NN

        ber[ep0] = ber_row
        bler[ep0] = bler_row
        print("{:.2f}".format(ep0), '|', ["{:.4f}".format(a) for a in ber_row])
        print("{:.2f}".format(ep0), '|',
              ["{:.4f}".format(a) for a in bler_row])
        count += 1
        print("{:.3f}".format(count / len(e0) * 100), '% completed ')
    return ber, bler
Beispiel #4
0
def uncoded(metric, k=4, nb_pkts = 100, graph = 'BER'):
  print('-------------------Uncoded-----------------------------')
  key = f"Uncode"
  N = k
  U_k = utils.symbols_generator(k)  # all possible messages
  if graph == 'BLER':
    metric[key] = utils.block_error_probability(N,k,U_k,e0,e1)
  else:
    metric[key] = utils.bit_error_rate(k,U_k,nb_pkts,e0,e1,False)
Beispiel #5
0
def linear_codes_mapping(metric, N=8, k=4, nb_pkts = 100, graph = 'BER'):
  print('-------------------Linear Code + Mapping-----------------------------')
  G = mat_gen.matrix_codes(64,k,'linear')
  if G!= []:
    for key in [0.55]:
      k = len(G)
      Nt = len(G[1])
      t = int(Nt/N)
      U_k= utils.symbols_generator(k)  # all possible messages
      X_m = utils.symbols_generator(t)  # all possible symbol sequences
      C = utils.matrix_codes(U_k, k, G, Nt)
      nx = 2**t*key
      # print('nx', nx, 't', t)
      x = utils.mapping(C, X_m, t, nx) #codebook after mapping
      N = len(x[1])

      if graph == 'BLER':
        metric[f"L+M({key})"] = utils.block_error_probability(N,k,x,e0,e1)
      else:
        metric[f"L+M({key})"] = utils.bit_error_rate(k,x,nb_pkts,e0,e1)
Beispiel #6
0
def polar_codes(metric, N=8, k=4, nb_pkts = 100, graph = 'BER',channel='BSC'):
  print('-------------------Polar Code-----------------------------')
  for key in [0.1]:
    G, infoBits = polar.polar_generator_matrix(N,k, channel, key)
    k = len(G)
    N = len(G[1])
    U_k = utils.symbols_generator(k)  # all possible messages
    C = utils.matrix_codes(U_k, k, G, N)
    # print('Polar codebook', np.array(C))
    if graph == 'BLER':
      metric[f"Polar({key})"] = utils.block_error_probability(N,k,C,e0,e1)
    else:
      metric[f"Polar({key})"] = utils.bit_error_rate(k,C,nb_pkts,e0,e1)
    print(metric[f"Polar({key})"])
Beispiel #7
0
def polar_codes_NN(metric, N=8, k=4, nb_pkts = 100, graph = 'BER',channel='BSC'):
  print('-------------------Polar Code + NN decoder -----------------------------')
  key = 'NN_dec'
  G,infoBits = polar.polar_generator_matrix(N, k, channel, 0.1)
  # print('G = ', np.array(G))
  k = len(G)
  N = len(G[1])
  U_k = utils.symbols_generator(k)  # all possible messages
  C = utils.matrix_codes(U_k, k, G, N)
  print('k ', k, 'N ', N)
  if graph == 'BLER':
    metric[key] = utils.block_error_probability(N, k, C, e0, e1)
  else:
    metric[key] = utils.bit_error_rate_NN(N, k, C, nb_pkts, e0, e1, channel)
Beispiel #8
0
def bch_codes(metric, N=8, k=4, nb_pkts = 100, graph = 'BER'):
  print('-------------------BCH Code-----------------------------')
  G = mat_gen.matrix_codes(N, k, 'bch')
  if G != []:
    for key in [0]:
      # print('G = ', np.array(G))
      k = len(G)
      N = len(G[1])
      U_k = utils.symbols_generator(k)  # all possible messages
      C = utils.matrix_codes(U_k, k, G, N)
      print('k ',k,'N ',N)
      if graph == 'BLER':
        metric[f"BCH({key})"] = utils.block_error_probability(N,k,C,e0,e1)
      else:
        metric[f"BCH({key})"] = utils.bit_error_rate(k,C,nb_pkts,e0,e1)
def linear_codes(metric, N=8, k=4, nb_pkts=100, graph='BER'):
    print('-------------------Linear Code-----------------------------')
    for key in ['BKLC']:
        print(key)
        G = mat_gen.matrix_codes(N, k, key)
        if G != []:
            # print('G = ', np.array(G))
            k = len(G)
            N = len(G[1])
            U_k = utils.symbols_generator(k)  # all possible messages
            C = utils.matrix_codes(U_k, k, G, N)
            print(np.array(C))
            print('k ', k, 'N ', N)
            if graph == 'BLER':
                metric[key] = utils.block_error_probability(N, k, C, e0, e1)
            else:
                metric[key] = utils.bit_error_rate(k, C, nb_pkts, e0, e1)
Beispiel #10
0
def bit_error_rate_NN(N, k, C, N_iter_max, e0, e1, channel='BSC'):
    print(
        '******************* NN-Decoder ********************************************',
        channel)
    N_errors_mini = 100
    U_k = utils.symbols_generator(k)  # all possible messages
    ber = {}
    count = 0
    for ep0 in e0:
        ber_row = []
        interval = np.zeros(4)
        # interval[int(ep1*4)] = 1.0
        interval[int(3 * np.log10(ep0) + 4) if ep0 >= 0.1 else 0] = 1.0
        for ep1 in (ep1 for ep1 in e1 if ep1 + ep0 <= 1 and ep1 <= ep0):
            if ep1 == ep0 or ep1 == e0[0]:
                N_errors = 0
                N_iter = 0
                while N_iter < N_iter_max:  # and N_errors < N_errors_mini:
                    N_iter += 1

                    idx = np.random.randint(0, len(U_k) - 1)
                    u = U_k[idx]  # Bits to be sent
                    x = C[idx]  # coded bits
                    y_bac = utils.BAC_channel(x, ep0, ep1)  # received symbols
                    yh = np.reshape(
                        np.concatenate((y_bac, interval), axis=0), [1, N + 4]
                    )  #if channel == 'BAC'  else np.reshape(y_bac, [1, N]).astype(np.float64)
                    u_nn = U_k[np.argmax(model_decoder(yh))]  #  NN Detector

                    N_errors += utils.NbOfErrors(
                        u, u_nn)  # bit error rate compute with NN
                ber_tmp = N_errors / (k * 1.0 * N_iter
                                      )  # bit error rate compute with NN
                ber_row.append(ber_tmp)

        ber[ep0] = ber_row
        print("{:.2f}".format(ep0), '|', ["{:.4f}".format(a) for a in ber_row])
        count += 1
        print("{:.3f}".format(count / len(e0) * 100), '% completed ')
    return ber
else:
  rep = 128
  epoch_pretrain = 100
  e0 = np.concatenate((np.array([0.001]), np.linspace(0.01, 0.1, 10, endpoint=False), np.linspace(0.1, 1, 15)), axis=0)
  verbose = 2

e0[len(e0) - 1] = e0[len(e0) - 1] - 0.001
e1 = [t for t in e0 if t <= 0.5]

#Parameters
MAP_test = False
pretrain_epsilon = 0.05
train_epsilon_1 = 0.0001       #useless for the BSC and epsilon_1 for the BAC

#Training Data set
u_k = utils.symbols_generator(k)
U_k = np.tile(u_k,(rep,1))
In = utils.symbols_generator(N)[0:2**k] # List of outputs of NN
In = np.tile(In,(rep,1)).astype(float)

#Hyper parameters
batch_size = 256
initializer = tf.keras.initializers.Orthogonal()
loss = 'mse' #'categorical_crossentropy'  #'kl_divergence'
activation = 'Mish' # Activation function for hidden layers

lr = 0.01
decay = 0.999
# reducing the learning rate by half every 2 epochs
cbks = [LearningRateScheduler(lambda epoch: lr * decay ** epoch)]
optimizer = keras.optimizers.Nadam(lr=lr)
Beispiel #12
0
       [0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
       [0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0],
       [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1],
       [0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0],
       [0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1],
       [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1]]

k = len(G)      #Nombre de bits � envoyer
N = len(G[1])   #codeword length

rep = 500
train_epsilon = 0.07
S = 3

################### Coding
U_k = utils.symbols_generator(k)  # all possible messages
cn = utils.matrix_codes(U_k, k, G, N)
# print('codebook',np.array(cn))
print('size C: ',len(cn), 'size Cn: ', len(cn[0]))
c = np.array(cn)
print(type(c[0]))

In = np.eye(2**k) # List of outputs of NN
c = np.tile(c,(rep,1))
In = np.tile(In,(rep,1))
print('size C: ',len(c), 'size Cn: ', len(c[0]))
batch_size = len(In)

########### Neural Network Generator ###################
optimizer = 'adam'
loss = 'categorical_crossentropy'                # or 'mse'
#Parameters
channel = sys.argv[1]
N = int(sys.argv[2])
k = int(sys.argv[3])
epoch = int(sys.argv[4])

rep = 80
train_epsilon = 0.2
S = 5
rounding = True
MAP_test = False

In = np.eye(2**k)  # List of outputs of NN
In = np.tile(In, (rep, 1))
batch_size = 256
u_k = utils.symbols_generator(k)
# print(u_k)
U_k = np.tile(u_k, (rep, 1))

# Interval = np.reshape(np.tile(np.eye(4),int(len(In)/4)), (len(In), 4))
Interval = []
idx = [0.80, 0.10, 0.08, 0.02]
# idx =[0.25,0.25,0.25,0.25] # for proofs of BSC Noise layer
for i in range(4):
    for j in range(round(len(In) * idx[i])):
        Interval.append(np.eye(4)[i].tolist())
Interval = np.reshape(Interval, (len(In), 4))
# print('Interval \n',Interval)
####################################################################################################
########### Neural Network Generator ###################
# LearningRate = 0.001
        (np.array([0.001]), np.linspace(
            0.01, 0.1, 10, endpoint=False), np.linspace(0.1, 1, 15)),
        axis=0)
    verbose = 2

e0[len(e0) - 1] = e0[len(e0) - 1] - 0.001
e1 = [t for t in e0 if t <= 0.5]

#Parameters
MAP_test = False
pretrain = False
pretrain_epsilon = 0.05
train_epsilon_1 = 0.0001  #useless for the BSC and epsilon_1 for the BAC

#Training Data set
u_k = np.array(utils.symbols_generator(k))
U_k = np.tile(u_k, (rep, 1))
One_hot = np.eye(2**k)  # List of outputs of NN
One_hot = np.tile(One_hot, (rep, 1)).astype(float)

G, infoBits = utils.polar_generator_matrix(N, k, 'BAC', 0.1)
cn = utils.matrix_codes(u_k, k, G, N)
c = np.array(cn)
c = np.tile(c, (rep, 1))

#Hyper parameters
batch_size = 256
initializer = tf.keras.initializers.Orthogonal()
loss = 'mse'  #'categorical_crossentropy'  #'kl_divergence'
activation = 'Mish'  # Activation function for hidden layers
Beispiel #15
0
def bit_error_rate_NN_decoder_irregular(N, k, C, nb_packets, e0, e1,
                                        model_decoder, output):
    """ computes the bit an block error rate using the NN-model decoder when external irregular interval
      @param C: codebook
      @param nb_packets: number of packets used in the computation
      @param e0 and e1: linspaces containing all the values of epsilon_0 and epsilon_1 to be evaluated
      @param model_decoder: the NN-model of the decoder, previously trained
      @param output: type of output 'array' or 'one' (One-hot coding) it must agree with the type of the decoder model
      @return: two dictionaries 'ber' and 'bler' containing metrics, as keys use the ep0
      """
    print(
        f'*******************NN-Decoder******************************************** {nb_packets} packets'
    )
    U_k = utils.symbols_generator(k)  # all possible messages
    ber = {}
    bler = {}
    count = 0
    Nb_iter_max = 10
    Nb_words = int(nb_packets / Nb_iter_max)

    for ep0 in e0:
        ber_row = []
        bler_row = []
        # interval = np.eye(4)[int(ep0*4/e_t-0.5) if ep0 < e_t else 3]
        interval = np.eye(4)[int(9.30 *
                                 ep0**0.5) if int(9.30 * ep0**0.5) < 4 else 3]

        inter_list = np.array(np.tile(interval, (Nb_words, 1)))

        for ep1 in (ep1 for ep1 in e1 if ep1 + ep0 <= 1 and ep1 <= ep0):
            # if ep1 == ep0 or ep1 == e0[0]: #change to this if wants to compute for all epsilon
            if ep1 == e0[0]:  #just for the most asymmetric case
                N_errors = 0
                N_errors_bler = 0
                N_iter = 0
                while N_iter < Nb_iter_max:  # and N_errors < N_errors_mini:
                    N_iter += 1

                    idx = np.random.randint(0,
                                            len(U_k) - 1,
                                            size=(1, Nb_words)).tolist()[0]
                    u = [U_k[a] for a in idx]
                    x = [C[a] for a in idx]  # coded bits
                    # print('uk\n',u,'\nc\n',x)
                    y_bac = [utils.BAC_channel(xi, ep0, ep1)
                             for xi in x]  # received symbols

                    yh = np.reshape(y_bac, [Nb_words, N]).astype(np.float64)
                    yh = np.concatenate((yh, inter_list), 1)

                    if output == 'one':
                        u_nn = [
                            U_k[idy]
                            for idy in np.argmax(model_decoder.predict(yh), 1)
                        ]  #  NN Detector
                    elif output == 'array':
                        u_nn = [
                            idy for idy in np.round(
                                model_decoder.predict(yh + inter_list)).astype(
                                    'int').tolist()
                        ]  # NN Detector

                    for i in range(len(u)):
                        N_errors += np.sum(
                            np.abs(np.array(u[i]) - np.array(u_nn[i]))
                        )  # bit error rate compute with NN
                        N_errors_bler += np.sum(1.0 * (u[i] != u_nn[i]))
                ber_row.append(
                    N_errors /
                    (k * 1.0 * nb_packets))  # bit error rate compute with NN
                bler_row.append(
                    N_errors_bler /
                    (1.0 * nb_packets))  # block error rate compute with NN

        ber[ep0] = ber_row
        bler[ep0] = bler_row
        # print("{:.2f}".format(ep0), '|', ["{:.4f}".format(a) for a in ber_row])
        # print("{:.2f}".format(ep0), '|', ["{:.4f}".format(a) for a in bler_row])
        count += 1
        print("{:.3f}".format(count / len(e0) * 100), '% completed ')
    return ber, bler