def on_epoch_end(self, epoch, logs=None):
     """ monitor PR """
     # validation data
     x_val, y_val = self.validation_data[0], self.validation_data[1]
     predictions = self.model.predict(x_val)
     aupr = auprc(y_val,predictions)
     self.val_auprc.append(aupr)
Esempio n. 2
0
 def on_epoch_end(self, epoch, logs=None):
     """ monitor PR """
     # validation data
     x_val, c_val, y_val = self.validation_data[0], self.validation_data[1], self.validation_data[2]
     print x_val.shape
     print c_val.shape
     print y_val.shape
     predictions = self.model.predict([x_val, c_val])
     aupr = auprc(y_val,predictions)
     self.val_auprc.append(aupr)
    def evaluate(self):
        test_generator = self.test_or_val_generator(self.test_data_path)
        probabilities = self.model.predict_generator(test_generator, 5000)
        test_labels = np.loadtxt(self.test_data_path + '.labels')

        # Calculate auROC

        roc_auc = roc_auc_score(test_labels, probabilities)
        # Calculate auPRC
        prc_auc = auprc(test_labels, probabilities)
        self.records_path.write('')
        # Write auROC and auPRC to records file.
        self.records_path.write("AUC ROC:{0}\n".format(roc_auc))
        self.records_path.write("AUC PRC:{0}\n".format(prc_auc))
Esempio n. 4
0
def reportStats(weight, current_iteration, X_train, y_train, X_test, y_test):

    y_train[y_train < 0] = 0
    y_test[y_test < 0] = 0

    ypred_is = predict_all(X_train, weight)
    ypred_oos = predict_all(X_test, weight)

    np_err_handling = np.seterr(invalid='ignore')

    is_acc = acc(y_train, ypred_is)
    is_mcc = mcc(y_train, ypred_is)
    is_f1 = f1(y_train, ypred_is)
    is_mse = mse(y_train, ypred_is)

    oos_acc = acc(y_test, ypred_oos)
    oos_mcc = mcc(y_test, ypred_oos)
    oos_f1 = f1(y_test, ypred_oos)
    oos_mse = mse(y_test, ypred_oos)

    is_tn, is_fp, is_fn, is_tp = confusion_matrix(y_train, ypred_is).ravel()
    oos_tn, oos_fp, oos_fn, oos_tp = confusion_matrix(y_test,
                                                      ypred_oos).ravel()
    is_auprc = auprc(y_train, ypred_is)
    oos_auprc = auprc(y_test, ypred_oos)

    np.seterr(**np_err_handling)

    print(
        f"Consensus {current_iteration}: IS acc {is_acc:0.5f}.  IS MCC {is_mcc:0.5f}.  IS F1 {is_f1:0.5f}.  IS MSE {is_mse:0.5f}.  OOS acc {oos_acc:0.5f}.  OOS MCC {oos_mcc:0.5f}.  OOS F1 {oos_f1:0.5f}.  OOS MSE {oos_mse:0.5f}."
    )
    print(
        f"Confusion {current_iteration}: IS TP: {is_tp}, IS FP: {is_fp}, IS TN: {is_tn}, IS FN: {is_fn}, IS AUPRC: {is_auprc:0.5f}.  OOS TP: {oos_tp}, OOS FP: {oos_fp}, OOS TN: {oos_tn}, OOS FN: {oos_fn}, OOS AUPRC: {oos_auprc:0.5f}."
    )

    return is_acc, is_mcc, is_f1, is_mse, is_auprc, oos_acc, oos_mcc, oos_f1, oos_mse, oos_auprc
Esempio n. 5
0
 def on_epoch_end(self, epoch, logs=None):
     (x_val, c_val), y_val = self.validation_data
     predictions = self.model.predict([x_val, c_val])
     aupr = auprc(y_val, predictions)
     self.val_auprc.append(aupr)
Esempio n. 6
0
i = 0
kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=seed)
for train, test in kfold.split(np.zeros(len(y)), y):
    print("Running Fold:", i+1)
    model = interp_net()  # re-initializing every time
    model.compile(
        optimizer='adam',
        loss={'main_output': 'binary_crossentropy', 'aux_output': customloss},
        loss_weights={'main_output': 1., 'aux_output': 1.},
        metrics={'main_output': 'accuracy'})
    model.fit(
        {'input': x[train]}, {'main_output': y[train], 'aux_output': x[train]},
        batch_size=batch,
        callbacks=callbacks_list,
        nb_epoch=epoch,
        validation_split=0.20,
        verbose=2)
    y_pred = model.predict(x[test], batch_size=batch)
    y_pred = y_pred[0]
    total_loss, score, reconst_loss, acc = model.evaluate(
        {'input': x[test]},
        {'main_output': y[test], 'aux_output': x[test]},
        batch_size=batch,
        verbose=0)
    results['loss'].append(score)
    results['acc'].append(acc)
    results['auc'].append(auc_score(y[test], y_pred))
    results['auprc'].append(auprc(y[test], y_pred))
    print(results)
    i += 1