Exemplo n.º 1
0
def cbn_row_cov(model, data_, labels_, snr):
    labels = np.copy(labels_)
    data = np.copy(data_)
    data = cbn_dg.apply_wgn(data, L, snr).reshape((samples, L, N))
    data = cbn_dg.compute_cov(data) / L
    data[:, list(range(0, N)) + list(range(-1, -(N + 1), -1))]
    data = data / np.max(np.abs(data), axis=1).reshape((samples, 1))

    pred = model.predict(data)

    pred_conv = np.zeros((len(labels), K))
    labels_conv = np.zeros((len(labels), K))

    for i in range(len(pred)):
        n = int(np.sum(labels[i]))
        pred_theta = (-pred[i]).argsort()[:n].copy()
        pred_theta.sort()
        pred_conv[i][:n] = pred_theta / 180  #* np.pi - np.pi/2

        pred[i][pred[i] < cutoff] = 0
        pred[i][pred[i] >= cutoff] = 1

        temp = (-labels[i]).argsort()[:n].copy()
        temp.sort()
        labels_conv[i][:n] = temp / 180  #* np.pi - np.pi/2

    #acc_pos_ = compute_pos_acc(labels, pred, K)
    p = tf.keras.metrics.Precision(thresholds=threshold_vec)
    p.update_state(labels, pred)
    prec = p.result().numpy()

    r = tf.keras.metrics.Recall(thresholds=threshold_vec)
    r.update_state(labels, pred)
    rec = r.result().numpy()

    m = mean_squared_error(labels_conv, pred_conv) / mean_squared_error(
        labels_conv, np.zeros(labels_conv.shape))

    mse['cbn row cov'].append(m)
    precision['cbn row cov'].append(prec)
    recall['cbn row cov'].append(rec)
Exemplo n.º 2
0
def rbn_cov(model, data_, labels_, snr):
    labels = labels_.copy()
    data = data_.copy()

    data = cbn_dg.apply_wgn(data, L, snr).reshape((samples, L, N))
    data = cbn_dg.compute_cov(data) / L
    data = data / np.max(np.abs(data), axis=1).reshape((samples, 1))

    pred = model.predict(data)  #- np.pi/2

    labels = (labels + np.pi / 2) / np.pi

    #print(labels[0])
    #print(pred[0])

    m = mean_squared_error(labels, pred) / mean_squared_error(
        labels, np.zeros(labels.shape))

    #m = np.linalg.norm(labels - pred)**2 / np.linalg.norm(labels)**2
    #print(m)
    #m = np.mean(m)

    mse['rbn cov'].append(m)
Exemplo n.º 3
0
def train_model(N, K, L, freq = 2.4e9, snr = [5, 30], resolution = 180, training_size = 500000, validation_size = 0.1, learning_rate = 0.001):
    training_labels, training_data = dg.data_initialization(training_size, N, K, L, freq, resolution, snr, cache=True)    
    
    training_data = dg.apply_wgn(training_data, L, snr).reshape((training_size, L, N))
    
    training_data = dg.compute_cov(training_data)/L
    
    training_data = dg.normalize(training_data, snr)
    
    training_data, validation_data, training_labels, validation_labels = train_test_split(training_data, training_labels, test_size=validation_size, shuffle=True)
    
    # define model
    model = keras.Sequential([
            keras.layers.Dense(resolution),
            ResnetBlock(resolution, 3),
            keras.layers.Dense(resolution, activation='sigmoid')
            ])
    
    adaptive_learning_rate = lambda epoch: learning_rate/(2**np.floor(epoch/10))
    
    adam = keras.optimizers.Adam(learning_rate=learning_rate, beta_1=0.9, beta_2=0.999)
    
    stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=4, min_delta=1e-4)
    lrate = tf.keras.callbacks.LearningRateScheduler(adaptive_learning_rate)

    model.compile(optimizer=adam,
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    m = model.fit(training_data, training_labels, batch_size=32, epochs=300, validation_data=(validation_data, validation_labels), callbacks=[stopping, lrate])
    
    with open(f"history/CBN_resnet_N={N}_K={K}_L={L}", 'wb') as f:
        pickle.dump(m.history, f)

    model.save(f"models/CBN_resnet_N={N}_K={K}_L={L}")

    return model
Exemplo n.º 4
0
        samples, N, k, L)
    cbn_resnet_labels_, cbn_resnet_data_ = cbn_dg.generate_bulk_data(
        samples, N, k, L)
    cbn_recv_labels_, cbn_recv_data_ = cbn_recv_dg.generate_bulk_data(
        samples, N, k, L)
    rbn_labels_, rbn_data_ = rbn_dg.generate_bulk_data(samples, N, k, L)
    rbn_cov_labels_, rbn_cov_data_ = rbn_cov_dg.generate_bulk_data(
        samples, N, k, L)

    for s in snrs:
        snr = [s, s]

        cbn_labels = np.copy(cbn_labels_)
        cbn_data = np.copy(cbn_data_)
        cbn_data = cbn_dg.apply_wgn(cbn_data, L, snr).reshape((samples, L, N))
        cbn_data = cbn_dg.compute_cov(cbn_data) / L
        cbn_data = cbn_dg.normalize(cbn_data, snr)

        cbn_pred = cbn_model.predict(cbn_data)

        cbn_pred = cbn_pred / np.max(cbn_pred, axis=1).reshape(samples, 1)

        for i in range(len(cbn_pred)):
            idx = find_peaks(cbn_pred[i], 0.05)[0]
            cbn_pred[i][:] = 0
            cbn_pred[i][idx] = 1

        cbn_row_labels = np.copy(cbn_row_labels_)
        cbn_data = np.copy(cbn_row_data_)
        cbn_data = cbn_dg.apply_wgn(cbn_data, L, snr).reshape((samples, L, N))
        cbn_data = cbn_dg.compute_cov(cbn_data) / L
learning_rate = 0.001

resolution = 180

labels, data = dg.data_initialization(1,
                                      N,
                                      K,
                                      L,
                                      freq,
                                      resolution,
                                      snr,
                                      cache=False)

data = dg.apply_wgn(data, L, snr).reshape((1, L, N))

data = dg.compute_cov(data) / L

data = dg.normalize(data, snr)

labels_, data_ = dg_.generate_bulk_data(1, N, K, L)
data_ = dg_.normalize_add_wgn(data_, L, [1000, 1000])

data_ = data_[:, list(range(0, N)) + list(range(-1, -(N + 1), -1))]

model = load_model(f"models/CBN_N={N}_K={K}_L={L}")
model_ = load_model(f"models/CBN_row_N={N}_K={K}_L={L}")

prediction = model_.predict(data_)

res = prediction[0] / np.max(prediction[0])