Esempio n. 1
0
    def evaluate(A_list, X_list, y_list, mask_list, ops, batch_size):
        batches_ = batch_iterator([A_list, X_list, y_list, mask_list],
                                  batch_size=batch_size)
        output_ = []
        y_ = []

        for b_ in batches_:
            batch_ = Batch(b_[0], b_[1])
            X__, A__, _ = batch_.get('XAI')
            y__ = np.vstack(b_[2])
            mask__ = np.concatenate(b_[3], axis=0)
            feed_dict_ = {
                X_in: X__,
                A_in: sp_matrix_to_sp_tensor_value(A__),
                mask_in: mask__,
                target: y__,
                SW_KEY: np.ones((1, ))
            }

            outs_ = sess.run(ops, feed_dict=feed_dict_)

            output_.append(outs_[1][mask__.astype(np.bool)])
            y_.append(y__[mask__.astype(np.bool)])

        output_ = np.concatenate(output_, axis=0)
        y_ = np.concatenate(y_, axis=0)

        mse = (output_[:, 0] - y_[:, 0])**2

        return mse.mean(), np.std(mse) / np.sqrt(mse.shape[0])
def evaluate(A_list, X_list, y_list, ops, batch_size):
    batches = batch_iterator([A_list, X_list, y_list], batch_size=batch_size)
    output = []
    for b in batches:
        X, A, I = Batch(b[0], b[1]).get('XAI')
        A = sp_matrix_to_sp_tensor(A)
        y = b[2]
        pred = model([X, A, I], training=False)
        outs = [o(pred, y) for o in ops]
        output.append(outs)
    return np.mean(output, 0)
def evaluate(A_list, X_list, y_list, ops):
    batches_ = batch_iterator([A_list, X_list, y_list],
                              batch_size=P['batch_size'])
    output_ = []
    for A__, X__, y__ in batches_:
        A__, X__, I__ = create_batch(A__, X__)
        feed_dict_ = {
            X_in: X__,
            A_in: sp_matrix_to_sp_tensor_value(A__),
            I_in: I__,
            target: y__,
            SW_KEY: np.ones((1, ))
        }
        outs_ = sess.run(ops, feed_dict=feed_dict_)
        output_.append(outs_)
    return np.mean(output_, 0)
def evaluate(A_list, X_list, y_list, ops, batch_size):
    batches_ = batch_iterator([A_list, X_list, y_list], batch_size=batch_size)
    output_ = []
    for b_ in batches_:
        batch_ = Batch(b_[0], b_[1])
        X__, A__, I__ = batch_.get('XAI')
        y__ = b[2]
        feed_dict_ = {X_in: X__,
                     A_in: sp_matrix_to_sp_tensor_value(A__),
                     I_in: I__,
                     target: y__,
                     SW_KEY: np.ones((1,))}

        outs_ = sess.run(ops, feed_dict=feed_dict_)
        output_.append(outs_)
    return np.mean(output_, 0)
def evaluate(A_list, X_list, y_list, ops, batch_size):
    batches = batch_iterator([A_list, X_list, y_list], batch_size=batch_size)
    output = []
    for b in batches:
        X, A, I = Batch(b[0], b[1]).get('XAI')
        y = b[2]
        feed_dict = {
            X_in: X,
            A_in: sp_matrix_to_sp_tensor_value(A),
            I_in: I,
            target: y,
            SW_KEY: np.ones((1, ))
        }

        outs = sess.run(ops, feed_dict=feed_dict)
        output.append(outs)
    return np.mean(output, 0)
Esempio n. 6
0
def evaluate(A_list, X_list, D_list, y_list, ops):
    batches_ = batch_iterator([A_list, X_list, D_list, y_list],
                              batch_size=P['batch_size'])
    output_ = []
    for A__, X__, D__, y__ in batches_:
        A__, X__, D__, I__ = create_batch(A__, X__, D__)
        feed_dict_ = {
            X_in: X__,
            I_in: I__,
            target: y__,
            D_in[0]: sp_matrix_to_sp_tensor_value(D__[0]),
            D_in[1]: sp_matrix_to_sp_tensor_value(D__[1]),
            A_in[0]: sp_matrix_to_sp_tensor_value(A__[0]),
            A_in[1]: sp_matrix_to_sp_tensor_value(A__[1]),
            A_in[2]: sp_matrix_to_sp_tensor_value(A__[2]),
            'dense_1_sample_weights:0': np.ones((1, ))
        }
        outs_ = sess.run(ops, feed_dict=feed_dict_)
        output_.append(outs_)
    return np.mean(output_, 0)
Esempio n. 7
0
gc1 = GraphConv(64, activation='relu')([X_in, A_in])
gc2 = GraphConv(64, activation='relu')([gc1, A_in])
pool = GlobalAvgPool()([gc2, I_in])
dense1 = Dense(64, activation='relu')(pool)
output = Dense(n_out)(dense1)

# Build model
model = Model(inputs=[X_in, A_in, I_in], outputs=output)
optimizer = Adam(lr=learning_rate)
model.compile(optimizer=optimizer, loss='mse', target_tensors=target)
model.summary()

# Training setup
sess = K.get_session()
batches_train = batch_iterator([A_train, X_train, y_train], batch_size=batch_size, epochs=epochs)
loss = 0
batch_index = 0
batches_in_epoch = np.ceil(len(A_train) / batch_size)

# Training loop
for b in batches_train:
    batch = Batch(b[0], b[1])
    y_ = b[2]
    sess.run(target_iter.initializer, feed_dict={target_ph: y_})
    loss += model.train_on_batch(list(batch.get('XAI')), None)

    batch_index += 1
    if batch_index == batches_in_epoch:
        print('Loss: {}'.format(loss / batches_in_epoch))
        loss = 0
################################################################################
# FIT MODEL
################################################################################
# Run training loop
current_batch = 0
model_loss = 0
model_acc = 0
best_val_loss = np.inf
best_weights = None
patience = es_patience
batches_in_epoch = np.ceil(y_train.shape[0] / batch_size)

print('Fitting model')
batches = batch_iterator([A_train, X_train, y_train],
                         batch_size=batch_size,
                         epochs=epochs)
for b in batches:
    X_, A_, I_ = Batch(b[0], b[1]).get('XAI')
    y_ = b[2]
    tr_feed_dict = {
        X_in: X_,
        A_in: sp_matrix_to_sp_tensor_value(A_),
        I_in: I_,
        target: y_,
        SW_KEY: np.ones((1, ))
    }
    outs = sess.run([train_step, loss, acc], feed_dict=tr_feed_dict)

    model_loss += outs[1]
    model_acc += outs[2]
Esempio n. 9
0
log(model_to_str(model), print_string=False)
log(model_to_str(discriminator), print_string=False)

# Train model
tic('Fitting AAE')
t = time.time()
current_batch = 0
model_loss = 0  # Loss of the autoencoder
adv_fool = 0  # Mean prediction of the discriminator on positive samples
best_val_loss = np.inf
patience = es_patience
batches_in_epoch = 1 + adj_train.shape[0] // batch_size
total_batches = batches_in_epoch * epochs

for batch in batch_iterator([adj_train, fltr_train, nf_train],
                            batch_size=batch_size,
                            epochs=epochs):
    a_, f_, nf_ = batch
    model_loss += model.train_on_batch([f_, nf_], [a_, nf_])[0]

    # Regularization
    true_batch_size = batch[0].shape[0]
    adv_res_fool = enc_discriminator.train_on_batch([f_, nf_],
                                                    np.ones(true_batch_size))

    # Update stats
    adv_fool += adv_res_fool[1]
    current_batch += 1
    if current_batch % batches_in_epoch == 0:
        model_loss /= batches_in_epoch
        adv_fool /= batches_in_epoch
Esempio n. 10
0
def run_kcn_sage():
    def evaluate(A_list, X_list, y_list, mask_list, ops, batch_size):
        batches_ = batch_iterator([A_list, X_list, y_list, mask_list],
                                  batch_size=batch_size)
        output_ = []
        y_ = []

        for b_ in batches_:
            batch_ = Batch(b_[0], b_[1])
            X__, A__, _ = batch_.get('XAI')
            y__ = np.vstack(b_[2])
            mask__ = np.concatenate(b_[3], axis=0)
            feed_dict_ = {
                X_in: X__,
                A_in: sp_matrix_to_sp_tensor_value(A__),
                mask_in: mask__,
                target: y__,
                SW_KEY: np.ones((1, ))
            }

            outs_ = sess.run(ops, feed_dict=feed_dict_)

            output_.append(outs_[1][mask__.astype(np.bool)])
            y_.append(y__[mask__.astype(np.bool)])

        output_ = np.concatenate(output_, axis=0)
        y_ = np.concatenate(y_, axis=0)

        mse = (output_[:, 0] - y_[:, 0])**2

        return mse.mean(), np.std(mse) / np.sqrt(mse.shape[0])

    ################################################################################
    # LOAD DATA
    ################################################################################

    coords, features, y, y_train_val, nbs, Ntrain, train_mask, val_mask, test_mask = load_kriging_data(
        FLAGS.dataset, FLAGS.n_neighbors)

    y_f_train = y * train_mask[:, np.newaxis].astype(np.float)
    X_train, A_train, mask_train, y_train = get_sub_graph(
        coords, features, y, y_f_train, nbs, train_mask)
    X_val, A_val, mask_val, y_val = get_sub_graph(coords, features, y,
                                                  y_f_train, nbs, val_mask)
    X_test, A_test, mask_test, y_test = get_sub_graph(coords, features, y,
                                                      y_train_val, nbs,
                                                      test_mask)

    # Parameters
    F = X_train[0].shape[-1]  # Dimension of node features
    n_out = y_train[0].shape[-1]  # Dimension of the target

    ################################################################################
    # BUILD MODEL
    ################################################################################
    X_in = Input(
        tensor=tf.placeholder(tf.float32, shape=(None, F), name='X_in'))
    A_in = Input(tensor=tf.sparse_placeholder(tf.float32, shape=(None, None)),
                 sparse=True,
                 name='A_in')
    mask_in = Input(tensor=tf.placeholder(tf.float32),
                    shape=(None, ),
                    name='mask_in')
    target = Input(
        tensor=tf.placeholder(tf.float32, shape=(None, n_out), name='target'))

    # Block 1
    gc1 = GraphSageConv(FLAGS.hidden1,
                        aggregate_method='max',
                        activation=keras.activations.relu,
                        use_bias=True)([X_in, A_in])
    gc1 = Dropout(FLAGS.dropout)(gc1)

    if FLAGS.hidden2 != -1:
        # Block 2
        gc2 = GraphSageConv(FLAGS.hidden2,
                            aggregate_method='max',
                            activation=keras.activations.relu,
                            use_bias=True)([gc1, A_in])
        gc2 = Dropout(FLAGS.dropout)(gc2)
    else:
        gc2 = gc1

    # Output block
    output = Dense(n_out, activation=FLAGS.last_activation, use_bias=True)(gc2)

    # Build model
    model = Model([X_in, A_in], output)
    model.compile(optimizer='adam',
                  loss='mse',
                  loss_weights=[mask_in],
                  target_tensors=[target])

    # Training setup
    sess = K.get_session()
    loss = model.total_loss
    opt = tf.train.AdamOptimizer(learning_rate=FLAGS.lr)
    train_step = opt.minimize(loss)

    # Initialize all variables
    init_op = tf.global_variables_initializer()
    sess.run(init_op)

    ################################################################################
    # FIT MODEL
    ################################################################################
    # Run training loop
    current_batch = 0
    model_loss = 0
    model_acc = 0
    best_val_loss = np.inf
    patience = FLAGS.es_patience
    batches_in_epoch = np.ceil(len(y_train) / FLAGS.batch_size)

    print('Fitting model')
    batches = batch_iterator([A_train, X_train, y_train, mask_train],
                             batch_size=FLAGS.batch_size,
                             epochs=FLAGS.epochs)
    for b in batches:
        batch = Batch(b[0], b[1])
        X_, A_, _ = batch.get('XAI')
        y_ = np.vstack(b[2])
        mask_ = np.concatenate(b[3], axis=0)

        tr_feed_dict = {
            X_in: X_,
            A_in: sp_matrix_to_sp_tensor_value(A_),
            mask_in: mask_,
            target: y_,
            SW_KEY: np.ones((1, ))
        }
        outs = sess.run([train_step, loss], feed_dict=tr_feed_dict)

        model_loss += np.sum(outs[1] * mask_)

        current_batch += 1
        if current_batch % batches_in_epoch == 0:
            model_loss /= np.sum(train_mask)

            # Compute validation loss and accuracy
            val_loss, val_loss_std = evaluate(A_val,
                                              X_val,
                                              y_val,
                                              mask_val, [loss, output],
                                              batch_size=FLAGS.batch_size)

            ep = int(current_batch / batches_in_epoch)

            print('Ep: {:d} - Train loss: {:.5f} - Val mse: {:.5f}'.format(
                ep, model_loss, val_loss))

            # Check if loss improved for early stopping
            if val_loss < best_val_loss:
                best_val_loss = val_loss
                patience = FLAGS.es_patience
            else:
                patience -= 1
                if patience == 0:
                    print('Early stopping (best val_loss: {})'.format(
                        best_val_loss))
                    break
            model_loss = 0

    ################################################################################
    # EVALUATE MODEL
    ################################################################################
    # Test model
    test_loss, test_loss_std = evaluate(A_test,
                                        X_test,
                                        y_test,
                                        mask_test, [loss, output],
                                        batch_size=FLAGS.batch_size)
    print('Test mse: {:.5f}'.format(test_loss))
Esempio n. 11
0
t = time.time()
current_batch = 0
model_loss = 0
adv_loss_neg = 0
adv_loss_pos = 0
adv_acc_neg = 0
adv_acc_pos = 0
adv_fool = 0
best_val_loss = np.inf
patience = es_patience
batches_in_epoch = 1 + x_train.shape[0] // batch_size
total_batches = batches_in_epoch * epochs

zeros = np.zeros(x_train.shape[0])
ones = np.ones(x_train.shape[0])
for batch, z, o in batch_iterator([x_train, zeros, ones], batch_size=batch_size, epochs=epochs):
    model_loss += model.train_on_batch(batch, batch)

    # Regularization
    batch_x = encoder.predict(batch)
    adv_res_neg = discriminator.train_on_batch(batch_x, z)

    if radius > 0:
        batch_x = ccm_uniform(batch.shape[0], dim=latent_dim, r=radius)
    else:
        batch_x = ccm_normal(batch.shape[0], dim=latent_dim, r=radius, scale=scale)
    adv_res_pos = discriminator.train_on_batch(batch_x, o)

    # Fit encoder to fool discriminator
    adv_res_fool = enc_discriminator.train_on_batch(batch, o)
Esempio n. 12
0
        loss += sum(model.losses)
    gradients = tape.gradient(loss, model.trainable_variables)
    opt.apply_gradients(zip(gradients, model.trainable_variables))
    return loss


################################################################################
# FIT MODEL
################################################################################
current_batch = 0
model_loss = 0
batches_in_epoch = np.ceil(len(A_tr) / batch_size)

print('Fitting model')
batches_train = batch_iterator([X_tr, A_tr, E_tr, y_tr],
                               batch_size=batch_size,
                               epochs=epochs)
for b in batches_train:
    X_, A_, E_, I_ = numpy_to_disjoint(*b[:-1])
    A_ = ops.sp_matrix_to_sp_tensor(A_)
    y_ = b[-1]
    outs = train_step(X_, A_, E_, I_, y_)

    model_loss += outs.numpy()
    current_batch += 1
    if current_batch == batches_in_epoch:
        print('Loss: {}'.format(model_loss / batches_in_epoch))
        model_loss = 0
        current_batch = 0

################################################################################
        # Run training loop
        current_batch = 0
        model_loss = 0
        model_acc = 0
        best_val_loss = np.inf
        patience = P['es_patience']
        batches_in_epoch = 1 + y_train.shape[0] // P['batch_size']
        total_batches = batches_in_epoch * P['epochs']

        ########################################################################
        # FIT MODEL
        ########################################################################
        log('Fitting model')
        start_time = time.time()
        batches = batch_iterator([A_train, X_train, y_train],
                                 batch_size=P['batch_size'],
                                 epochs=P['epochs'])
        epoch_time = [0]
        for A_, X_, y_ in batches:
            A_, X_, I_ = create_batch(A_, X_)
            tr_feed_dict = {
                X_in: X_,
                A_in: sp_matrix_to_sp_tensor_value(A_),
                I_in: I_,
                target: y_,
                SW_KEY: np.ones((1, ))
            }
            epoch_time[-1] -= time.time()
            outs = sess.run([train_step, loss, acc], feed_dict=tr_feed_dict)
            epoch_time[-1] += time.time()
Esempio n. 14
0
sess.run(init_op)

################################################################################
# Training loop
################################################################################
log('Fitting model')
model_loss = 0  # Keep track of the current loss
model_acc = 0  # Keep track of the current accuracy
best_val_loss = np.inf  # Keep track of best validation loss for ES
patience = P['es_patience']  # Keep track of current patience for ES
epoch_time = [0]  # Keep track of epochs duration

batches_in_epoch = np.ceil(y_train.shape[0] / P['batch_size'])
total_batches = batches_in_epoch * P['epochs']
batches = batch_iterator([A_train, X_train, D_train, y_train],
                         batch_size=P['batch_size'],
                         epochs=P['epochs'],
                         shuffle=True)
for current_batch, (A_, X_, D_, y_) in enumerate(batches, start=1):
    A_, X_, D_, I_ = create_batch(A_, X_, D_)
    tr_feed_dict = {
        X_in: X_,
        I_in: I_,
        target: y_,
        D_in[0]: sp_matrix_to_sp_tensor_value(D_[0]),
        D_in[1]: sp_matrix_to_sp_tensor_value(D_[1]),
        A_in[0]: sp_matrix_to_sp_tensor_value(A_[0]),
        A_in[1]: sp_matrix_to_sp_tensor_value(A_[1]),
        A_in[2]: sp_matrix_to_sp_tensor_value(A_[2]),
        'dense_1_sample_weights:0': np.ones((1, ))
    }
    epoch_time[-1] -= time.time()
Esempio n. 15
0
def build_gcn_model(trainset, testset, nb_node_features, nb_classes=1, batch_size=32, nb_epochs=100, lr=0.001,
                     save_path=None):
    
    # Create model architecture
    X_in = Input(batch_shape=(None, nb_node_features))
    A_in = Input(batch_shape=(None, None), sparse=True)
    I_in = Input(batch_shape=(None, ), dtype='int64')
    target = Input(tensor=tf.placeholder(tf.float32, shape=(None, nb_classes), name='target'))
    
    gc1 = GraphConv(64, activation='relu')([X_in, A_in])
    gc2 = GraphConv(128, activation='relu')([gc1, A_in])
    pool = GlobalAvgPool()([gc2, I_in])
    dense1 = Dense(128, activation='relu')(pool)
    output = Dense(nb_classes, activation='sigmoid')(dense1)
    
    model = Model(inputs=[X_in, A_in, I_in], outputs=output)
    
    # Compile model
    #optimizer = Adam(lr=lr)    
    opt = tf.train.AdamOptimizer(learning_rate=lr)
    model.compile(optimizer=opt, loss='binary_crossentropy', target_tensors=target, metrics=['accuracy'])
    model.summary()
    loss = model.total_loss
    train_step = opt.minimize(loss)
    
    # Initialize all variables
    sess = K.get_session()
    init_op = tf.global_variables_initializer()
    sess.run(init_op)
    
    # Get train and test data
    [A_train, X_train, y_train] = trainset
    [A_test, X_test, y_test] = testset
    
    SW_KEY = 'dense_2_sample_weights:0' # Keras automatically creates a placeholder for sample weights, which must be fed
    best_accuracy = 0
    for i in range(nb_epochs):
        # Train
        # TODO: compute class weight and use it in loss function
        batches_train = batch_iterator([A_train, X_train, y_train], batch_size=batch_size)
        model_loss = 0
        prediction = []
        for b in batches_train:
            batch = Batch(b[0], b[1])
            X_, A_, I_ = batch.get('XAI')
            y_ = b[2]
            tr_feed_dict = {X_in: X_,
                            A_in: sp_matrix_to_sp_tensor_value(A_),
                            I_in: I_,
                            target: y_,
                            SW_KEY: np.ones((1,))}
            outs = sess.run([train_step, loss, output], feed_dict=tr_feed_dict)
            model_loss += outs[1]
            prediction.append(list(outs[2].flatten()))    
        y_train_predict = (np.concatenate(prediction)[:len(y_train)] > 0.5).astype('uint8')
        train_accuracy = accuracy_score(y_train, y_train_predict)
        train_loss = model_loss / (np.ceil(len(y_train) / batch_size))
        
        # Validation
        batches_val = batch_iterator([A_test, X_test, y_test], batch_size=batch_size)
        model_loss = 0
        prediction = []
        
        for b in batches_val:
            batch = Batch(b[0], b[1])
            X_, A_, I_ = batch.get('XAI')
            y_ = b[2]
            tr_feed_dict = {X_in: X_,
                            A_in: sp_matrix_to_sp_tensor_value(A_),
                            I_in: I_,
                            target: y_,
                            SW_KEY: np.ones((1,))}
            loss_, output_ = sess.run([loss, output], feed_dict=tr_feed_dict)
            model_loss += loss_
            prediction.append(list(output_.flatten()))
        
        y_val_predict = (np.concatenate(prediction)[:len(y_test)] > 0.5).astype('uint8')
        val_accuracy = accuracy_score(y_test, y_val_predict)
        val_loss = model_loss / (np.ceil(len(y_test) / batch_size))
        print('---------------------------------------------')
        print('Epoch {}: train_loss: {}, train_acc: {}, val_loss: {}, val_acc: {}'.format(i+1, train_loss, train_accuracy,
              val_loss, val_accuracy))
        
        if val_accuracy > best_accuracy:
            best_accuracy = val_accuracy
            model.save(save_path)
        
    # Evaluate the model
    model = load_model(save_path)
    batches_val = batch_iterator([A_test, X_test, y_test], batch_size=batch_size)
    prediction = []
    for b in batches_val:
        batch = Batch(b[0], b[1])
        X_, A_, I_ = batch.get('XAI')
        y_ = b[2]
        tr_feed_dict = {X_in: X_,
                        A_in: sp_matrix_to_sp_tensor_value(A_),
                        I_in: I_,
                        target: y_,
                        SW_KEY: np.ones((1,))}
        output_ = sess.run([output], feed_dict=tr_feed_dict)
        prediction.append(list(output_.flatten()))
    
    y_val_predict = (np.concatenate(prediction)[:len(y_test)] > 0.5).astype('uint8')
    with warnings.catch_warnings():
        warnings.simplefilter('ignore')  # disable the warning on f1-score with not all labels
        scores = get_prediction_score(y_val, y_val_predict)
        
    return model, scores
@tf.function
def evaluate(x, y):
    predictions = model([x, fltr], training=False)
    loss = loss_fn(y, predictions)
    loss += sum(model.losses)
    acc = acc_fn(y, predictions)

    return loss, acc


# Setup training
best_val_loss = 99999
current_patience = patience
curent_batch = 0
batches_in_epoch = int(np.ceil(x_tr.shape[0] / batch_size))
batches_tr = batch_iterator([x_tr, y_tr], batch_size=batch_size, epochs=epochs)

# Training loop
results_tr = []
results_te = np.zeros(2)
for batch in batches_tr:
    curent_batch += 1

    # Training step
    l, a = train(*batch)
    results_tr.append((l, a))

    if curent_batch == batches_in_epoch:
        batches_va = batch_iterator([x_va, y_va], batch_size=batch_size)
        results_va = [evaluate(*batch) for batch in batches_va]
        results_va = np.array(results_va)