def rbn_received(model, data_, labels_, snr):
    labels = np.copy(labels_)
    labels = np.repeat(labels, L, axis=0).reshape(samples * L, K)
    data = np.copy(data_)

    data = cbn_dg.apply_wgn(data, L, snr)
    data = np.concatenate((data.real, data.imag), axis=1)
    data = data / np.max(np.abs(data), axis=1).reshape((samples * L, 1))

    pred = model.predict(data)  #*np.pi - np.pi/2

    #print(pred[0])

    labels = (labels + np.pi / 2) / np.pi

    m = mean_squared_error(labels, pred) / mean_squared_error(
        labels, np.zeros(labels.shape))

    mse['rbn single-vec'].append(m)
def cbn_received(model, data_, labels_, snr):
    labels = np.copy(labels_)
    data = np.copy(data_)
    data = cbn_dg.apply_wgn(data, L, snr)

    data = np.concatenate((data.real, data.imag), axis=1)
    data = data.reshape((samples, 2 * N * L))
    data = data / np.max(np.abs(data), axis=1).reshape((samples, 1))

    pred = model.predict(data)

    pred_conv = np.zeros((len(labels), K))
    labels_conv = np.zeros((len(labels), K))

    for i in range(len(pred)):
        n = int(np.sum(labels[i]))
        pred_theta = (-pred[i]).argsort()[:n].copy()
        pred_theta.sort()
        pred_conv[i][:n] = pred_theta / 180  #* np.pi - np.pi/2

        pred[i][pred[i] < cutoff] = 0
        pred[i][pred[i] >= cutoff] = 1

        temp = (-labels[i]).argsort()[:n].copy()
        temp.sort()
        labels_conv[i][:n] = temp / 180  #* np.pi - np.pi/2

    #acc_pos_ = compute_pos_acc(labels, pred, K)
    p = tf.keras.metrics.Precision(thresholds=threshold_vec)
    p.update_state(labels, pred)
    prec = p.result().numpy()

    r = tf.keras.metrics.Recall(thresholds=threshold_vec)
    r.update_state(labels, pred)
    rec = r.result().numpy()

    m = mean_squared_error(labels_conv, pred_conv) / mean_squared_error(
        labels_conv, np.zeros(labels_conv.shape))

    mse['cbn multi-vec'].append(m)
    precision['cbn multi-vec'].append(prec)
    recall['cbn multi-vec'].append(rec)
def rbn_cov(model, data_, labels_, snr):
    labels = labels_.copy()
    data = data_.copy()

    data = cbn_dg.apply_wgn(data, L, snr).reshape((samples, L, N))
    data = cbn_dg.compute_cov(data) / L
    data = data / np.max(np.abs(data), axis=1).reshape((samples, 1))

    pred = model.predict(data)  #- np.pi/2

    labels = (labels + np.pi / 2) / np.pi

    #print(labels[0])
    #print(pred[0])

    m = mean_squared_error(labels, pred) / mean_squared_error(
        labels, np.zeros(labels.shape))

    #m = np.linalg.norm(labels - pred)**2 / np.linalg.norm(labels)**2
    #print(m)
    #m = np.mean(m)

    mse['rbn cov'].append(m)
def train_model(N, K, L, freq = 2.4e9, snr = [5, 30], resolution = 180, training_size = 500000, validation_size = 0.1, learning_rate = 0.001):
    training_labels, training_data = dg.data_initialization(training_size, N, K, L, freq, resolution, snr, cache=True)    
    
    training_data = dg.apply_wgn(training_data, L, snr).reshape((training_size, L, N))
    
    training_data = dg.compute_cov(training_data)/L
    
    training_data = dg.normalize(training_data, snr)
    
    training_data, validation_data, training_labels, validation_labels = train_test_split(training_data, training_labels, test_size=validation_size, shuffle=True)
    
    # define model
    model = keras.Sequential([
            keras.layers.Dense(resolution),
            ResnetBlock(resolution, 3),
            keras.layers.Dense(resolution, activation='sigmoid')
            ])
    
    adaptive_learning_rate = lambda epoch: learning_rate/(2**np.floor(epoch/10))
    
    adam = keras.optimizers.Adam(learning_rate=learning_rate, beta_1=0.9, beta_2=0.999)
    
    stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=4, min_delta=1e-4)
    lrate = tf.keras.callbacks.LearningRateScheduler(adaptive_learning_rate)

    model.compile(optimizer=adam,
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    m = model.fit(training_data, training_labels, batch_size=32, epochs=300, validation_data=(validation_data, validation_labels), callbacks=[stopping, lrate])
    
    with open(f"history/CBN_resnet_N={N}_K={K}_L={L}", 'wb') as f:
        pickle.dump(m.history, f)

    model.save(f"models/CBN_resnet_N={N}_K={K}_L={L}")

    return model
def train_model(C=32,
                N=16,
                K=8,
                L=16,
                freq=2.4e9,
                training_size=200000,
                validation_size=0.1,
                learning_rate=0.001,
                snr=[5, 30]):

    training_labels, training_data = dg.data_initialization(training_size,
                                                            N,
                                                            K,
                                                            L,
                                                            freq,
                                                            180,
                                                            snr,
                                                            cache=True)

    training_size = int(len(training_data) / L)

    training_data = dg.apply_wgn(training_data, L, snr)
    training_data = training_data.reshape((training_size, N * L))
    training_data = np.concatenate((training_data.real, training_data.imag),
                                   axis=1)
    #training_data = training_data / np.max(np.abs(training_data), axis=1).reshape((training_size,1))
    training_data = training_data - np.min(training_data, axis=1).reshape(
        (training_size, 1))
    training_data = training_data / np.max(np.abs(training_data),
                                           axis=1).reshape((training_size, 1))

    print(training_data.shape)

    training_data, validation_data, training_labels, validation_labels = train_test_split(
        training_data,
        training_labels,
        test_size=validation_size,
        shuffle=False)

    training_labels = None
    validation_labels = None

    # define model
    encoder = keras.Sequential([
        keras.layers.Dense(2 * N * L, 'relu'),
        keras.layers.Dense(2 * N * L, 'relu'),
        keras.layers.Dense(2 * N, 'relu'),
        keras.layers.Dense(C)
    ])

    decoder = keras.Sequential([
        keras.layers.Dense(2 * N, 'relu'),
        keras.layers.Dense(2 * N * L, 'relu'),
        keras.layers.Dense(2 * N * L, 'sigmoid'),
    ])

    auto_input = tf.keras.Input(shape=(2 * N * L))
    encoded = encoder(auto_input)
    decoded = decoder(encoded)
    auto_encoder = tf.keras.Model(auto_input, decoded)

    adaptive_learning_rate = lambda epoch: learning_rate / (2**np.floor(epoch /
                                                                        10))

    adam = keras.optimizers.Adam(learning_rate=learning_rate,
                                 beta_1=0.9,
                                 beta_2=0.999)

    lrate = tf.keras.callbacks.LearningRateScheduler(adaptive_learning_rate)
    stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss',
                                                patience=4,
                                                min_delta=1e-4)

    auto_encoder.compile(optimizer=adam, loss='mse')

    m = auto_encoder.fit(training_data,
                         training_data,
                         batch_size=128,
                         epochs=100,
                         validation_data=(validation_data, validation_data),
                         callbacks=[stopping, lrate])

    with open(f"history/AE_C={C}_N={N}_K={K}_L={L}", 'wb') as f:
        pickle.dump(m.history, f)

    auto_encoder.save(f"models/AE_C={C}_N={N}_K={K}_L={L}")

    return auto_encoder
Example #6
0
    cbn_row_labels_, cbn_row_data_ = cbn_dg.generate_bulk_data(
        samples, N, k, L)
    cbn_resnet_labels_, cbn_resnet_data_ = cbn_dg.generate_bulk_data(
        samples, N, k, L)
    cbn_recv_labels_, cbn_recv_data_ = cbn_recv_dg.generate_bulk_data(
        samples, N, k, L)
    rbn_labels_, rbn_data_ = rbn_dg.generate_bulk_data(samples, N, k, L)
    rbn_cov_labels_, rbn_cov_data_ = rbn_cov_dg.generate_bulk_data(
        samples, N, k, L)

    for s in snrs:
        snr = [s, s]

        cbn_labels = np.copy(cbn_labels_)
        cbn_data = np.copy(cbn_data_)
        cbn_data = cbn_dg.apply_wgn(cbn_data, L, snr).reshape((samples, L, N))
        cbn_data = cbn_dg.compute_cov(cbn_data) / L
        cbn_data = cbn_dg.normalize(cbn_data, snr)

        cbn_pred = cbn_model.predict(cbn_data)

        cbn_pred = cbn_pred / np.max(cbn_pred, axis=1).reshape(samples, 1)

        for i in range(len(cbn_pred)):
            idx = find_peaks(cbn_pred[i], 0.05)[0]
            cbn_pred[i][:] = 0
            cbn_pred[i][idx] = 1

        cbn_row_labels = np.copy(cbn_row_labels_)
        cbn_data = np.copy(cbn_row_data_)
        cbn_data = cbn_dg.apply_wgn(cbn_data, L, snr).reshape((samples, L, N))
snr = [min_snr, max_snr]

learning_rate = 0.001

resolution = 180

labels, data = dg.data_initialization(1,
                                      N,
                                      K,
                                      L,
                                      freq,
                                      resolution,
                                      snr,
                                      cache=False)

data = dg.apply_wgn(data, L, snr).reshape((1, L, N))

data = dg.compute_cov(data) / L

data = dg.normalize(data, snr)

labels_, data_ = dg_.generate_bulk_data(1, N, K, L)
data_ = dg_.normalize_add_wgn(data_, L, [1000, 1000])

data_ = data_[:, list(range(0, N)) + list(range(-1, -(N + 1), -1))]

model = load_model(f"models/CBN_N={N}_K={K}_L={L}")
model_ = load_model(f"models/CBN_row_N={N}_K={K}_L={L}")

prediction = model_.predict(data_)