def on_epoch_end(self, epoch, logs=None):
        y_pred_all = []
        y_test_all = []
        for batch_index in range(self.validation_steps):
            features, y_true = next(self.validation_generator)
            y_pred = np.asarray(self.model.predict(features))
            y_pred = np.clip(np.round(y_pred), np.min(y_true),
                             np.max(y_true)).astype(int)
            #            y_test_all=np.append(y_test_all,np.argmax(y_true, axis=-1))
            #            y_pred_all=np.append(y_pred_all,np.argmax(y_pred, axis=-1))
            y_test_all = np.append(y_test_all, np.round(y_true).astype(int))
            y_pred_all = np.append(y_pred_all, np.round(y_pred).astype(int))

        val_kappa = kappa(y_test_all, y_pred_all, weights='quadratic')

        # Add custom metrics to the logs, so that we can use them with
        # EarlyStop and csvLogger callbacks
        logs["val_kappa"] = val_kappa
        print("val_kappa: {} ".format(val_kappa))
Exemplo n.º 2
0
    coef = [0.5, 1.5, 2.5, 3.5]
    for i, pred in enumerate(y_test):
        if pred < coef[0]:
            y_test[i] = 0
        elif pred >= coef[0] and pred < coef[1]:
            y_test[i] = 1
        elif pred >= coef[1] and pred < coef[2]:
            y_test[i] = 2
        elif pred >= coef[2] and pred < coef[3]:
            y_test[i] = 3
        else:
            y_test[i] = 4
    predicted.append(int(y_test))

y_true = test_df['adjudicated_dr_grade'] 


print('Confusion Matrix')
cm=confusion_matrix(y_true, predicted)
print(cm)

plot_cm(cm, y_true, predicted)

print('Classification Report')
print(classification_report(y_true, predicted, target_names=['0', '1', '2', '3', '4']))



print(' Test Quadratic_wieghted_kappa')
print(kappa(y_true, predicted, weights='quadratic'))