Beispiel #1
0
	    train_err = 0
	    train_batches = 0
	    confusion_train = ConfusionMatrix(n_class)
	    
	    # Generate minibatches and train on each one of them	
	    for batch in iterate_minibatches(X_tr, y_tr, mask_tr, batch_size, shuffle=True):
		inputs, targets, in_masks = batch
		tr_err, predict = train_fn(inputs, targets, in_masks)
		train_err += tr_err
		train_batches += 1
		preds = np.argmax(predict, axis=-1)
		confusion_train.batch_add(targets, preds)
	    
	    train_loss = train_err / train_batches
	    train_accuracy = confusion_train.accuracy()
	    cf_train = confusion_train.ret_mat()	    

		
	    # Full pass validation set
	    val_err = 0
	    val_batches = 0
	    confusion_valid = ConfusionMatrix(n_class)
	    
	    # Generate minibatches and train on each one of them	
	    for batch in iterate_minibatches(X_val, y_val, mask_val, batch_size):
		inputs, targets, in_masks = batch
		err, predict_val, alpha, context = val_fn(inputs, targets, in_masks)
		val_err += err
		val_batches += 1
		preds = np.argmax(predict_val, axis=-1)
		confusion_valid.batch_add(targets, preds)
Beispiel #2
0
            accuracy, predictions = dev_step(x_test, y_test)
        #dev_step(x_dev, y_dev, writer=dev_summary_writer)
outputs = np.argmax(predictions, axis=-1)
print(outputs.shape)
confusion_test = ConfusionMatrix(num_classes)
y_test = np.argmax(y_test, axis=-1)
print(y_test.shape)
confusion_test.batch_add(y_test, outputs)
test_accuracy = confusion_test.accuracy()

a, positive_predictive_value = confusion_test.positive_predictive_value()
b, negative_predictive_value = confusion_test.negative_predictive_value()
e, F1 = confusion_test.F1()
f, MCC = confusion_test.matthews_correlation()

cf_val = confusion_test.ret_mat()

print("FINAL TEST RESULTS")
print(confusion_test)
print(cf_val)
print("  test accuracy:\t\t{:.2f} %".format(test_accuracy * 100))
print(a)
print(b)
print(e)
print(f)
print("  test positive_predictive_value:\t\t{:.2f} %".format(
    positive_predictive_value * 100))
print("  test negative_predictive_value:\t\t{:.2f} %".format(
    negative_predictive_value * 100))
print("  test F1:\t\t{:.2f} %".format(F1 * 100))
print("  test MCC:\t\t{:.2f} %".format(MCC * 100))
Beispiel #3
0
                    print("\nEvaluation:")
                    accuracy, predictions = dev_step(x_test,
                                                     y_test,
                                                     writer=dev_summary_writer)
                    if accuracy > max_acc:
                        max_acc = accuracy
                        path = saver.save(sess,
                                          checkpoint_prefix,
                                          global_step=current_step)
                        print("Saved model checkpoint to {}\n".format(path))
        else:
            ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
            saver.restore(sess, ckpt.model_checkpoint_path)
            accuracy, predictions = dev_step(
                x_test,
                y_test)  # dev_step(x_dev, y_dev, writer=dev_summary_writer)
outputs = np.argmax(predictions, axis=-1)
print(outputs.shape)
confusion_test = ConfusionMatrix(num_classes)
y_test = np.argmax(y_test, axis=-1)
print(y_test.shape)
confusion_test.batch_add(y_test, outputs)
test_accuracy = confusion_test.accuracy()
cf_test = confusion_test.ret_mat()

print("FINAL TEST RESULTS")
print(confusion_test)
print("  test accuracy:\t\t{:.2f} %".format(test_accuracy * 100))
print("  test Gorodkin:\t\t{:.2f}".format(gorodkin(cf_test)))
print("  test IC:\t\t{:.2f}".format(IC(cf_test)))