Beispiel #1
0
                        path = saver.save(sess,
                                          checkpoint_prefix,
                                          global_step=current_step)
                        print("Saved model checkpoint to {}\n".format(path))
        else:
            ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
            saver.restore(sess, ckpt.model_checkpoint_path)
            accuracy, predictions = dev_step(x_test, y_test)
        #dev_step(x_dev, y_dev, writer=dev_summary_writer)
outputs = np.argmax(predictions, axis=-1)
print(outputs.shape)
confusion_test = ConfusionMatrix(num_classes)
y_test = np.argmax(y_test, axis=-1)
print(y_test.shape)
confusion_test.batch_add(y_test, outputs)
test_accuracy = confusion_test.accuracy()

a, positive_predictive_value = confusion_test.positive_predictive_value()
b, negative_predictive_value = confusion_test.negative_predictive_value()
e, F1 = confusion_test.F1()
f, MCC = confusion_test.matthews_correlation()

cf_val = confusion_test.ret_mat()

print("FINAL TEST RESULTS")
print(confusion_test)
print(cf_val)
print("  test accuracy:\t\t{:.2f} %".format(test_accuracy * 100))
print(a)
print(b)
print(e)
Beispiel #2
0
    all_y, all_preds = [], []
    for i in range(n_batches_test):
        x_batch = x_test[i*batch_size:(i+1)*batch_size]
        y_batch = y_test[i*batch_size:(i+1)*batch_size]
        probs_test, A_test = f_eval(x_batch)
        preds_test_flat = probs_test.reshape((-1, num_classes)).argmax(-1)
        conf_test.batch_add(
            y_batch.flatten(),
            preds_test_flat
        )

        all_y += [y_batch]
        all_preds += [probs_test.argmax(-1)]

    print("Epoch {} Acc Valid {}, Acc Train = {}, Acc Test = {}".format(
            epoch,conf_valid.accuracy(),conf_train.accuracy(),conf_test.accuracy()))

    np.savez( "res_test_3",probs=probs_test, preds=probs_test.argmax(-1),
             x=x_batch, y=y_batch, A=A_test,
             all_y=np.vstack(all_y),
             all_preds=np.vstack(all_preds))

    if conf_valid.accuracy() > best_valid:
        best_valid = conf_valid.accuracy()
        look_count = LOOK_AHEAD
    else:
        look_count -= 1

    if look_count <= 0:
        break
Beispiel #3
0
        XY, XZ, YZ, Y_test  = DP.Patch_triplanar_para(img, PS)
        num_samples_valid = Y_test.shape[0]
        num_batches_valid = num_samples_valid // batch_size
        for i in range(num_batches_valid):
            idx = range(i*batch_size, (i+1)*batch_size)
            xy_batch = XY[idx]
            xz_batch = XZ[idx]
            yz_batch = YZ[idx]
            targets_batch = Y_test[idx]
            net_out = f_eval(xy_batch, xz_batch, yz_batch)
            preds = np.argmax(net_out, axis=-1)
            AB = f_vali(xy_batch, xz_batch, yz_batch, targets_batch)
            confusion_valid.batch_add(targets_batch, preds)
            val_loss += AB[0]/batch_size
    valid_loss += [val_loss/len(Test)]
    train_acc_cur = confusion_train.accuracy()
    valid_acc_cur = confusion_valid.accuracy()
    train_acc += [train_acc_cur]
    valid_acc += [valid_acc_cur]

  #  if (epoch) % 10 == 0:

   #     np.savez('/home/xvt131/Functions/Adhish_copy/TP_param/epoch_%d_paramsBTF.npz' %(epoch), *lasagne.layers.get_all_param_values(output))    


    print confusion_train
    print "Epoch %i : Train Loss %e , Train acc %f, Valid Loss %f, Valid acc %f " % (epoch+1, loss[-1], train_acc_cur,valid_loss[-1], valid_acc_cur) 

#import Evaluation as E

np.savez('/home/xvt131/Functions/Adhish_copy/TP_param/triplanar_Params_reg.npz', *lasagne.layers.get_all_param_values(output))
        x_batch = x_test[i*num_batch:(i+1)*num_batch]
        y_batch = y_test[i*num_batch:(i+1)*num_batch]
        probs_test, A_test = f_eval(x_batch)
        preds_test_flat = probs_test.reshape((-1, num_classes)).argmax(-1)
        conf_test.batch_add(
            y_batch.flatten(),
            preds_test_flat
        )

        all_y += [y_batch]
        all_preds += [probs_test.argmax(-1)]

    logger.info(
        "Epoch {} Acc Valid {}, Acc Train = {}, Acc Test = {}".format(
            epoch,
            conf_valid.accuracy(),
            conf_train.accuracy(),
            conf_test.accuracy())
    )

    np.savez(os.path.join(output_folder, "res_test"),
             probs=probs_test, preds=probs_test.argmax(-1),
             x=x_batch, y=y_batch, A=A_test,
             all_y=np.vstack(all_y),
             all_preds=np.vstack(all_preds))

    if conf_valid.accuracy() > best_valid:
        best_valid = conf_valid.accuracy()
        look_count = LOOK_AHEAD
    else:
        look_count -= 1
    #    [X_test, X_Bigs], X_post, Y_test  = DP.voxel_samples(im, [9, 5])
    #    num_samples_valid = Y_test.shape[0]
    #    num_batches_valid = num_samples_valid // batch_size

    #    for i in range(num_batches_valid):
    #        idx = range(i*batch_size, (i+1)*batch_size)
    #        x_batch = X_test[idx]
    #        post_batch = X_post[idx]
    #        big_batch = X_Bigs[idx]
    #        targets_batch = Y_test[idx]
    #        net_out = f_eval(x_batch, post_batch, big_batch)
    #        preds = np.argmax(net_out, axis=-1)

    #        confusion_valid.batch_add(targets_batch, preds)

    train_acc_cur = confusion_train.accuracy()
    # valid_acc_cur = confusion_valid.accuracy()

    print confusion_train
    print "Epoch %i : Train Loss %e , Train acc %f,  Valid acc %f " % (
        epoch + 1, loss[-1], train_acc_cur, valid_acc_cur)

X, Y, Z = E.Evaluate1("/home/xvt131/Running/validating", DP.image_load, 9, 5,
                      f_eval)

print "Mean Gold Dice Score:", np.mean(X)
print "Mean Silver Dice Score:", np.mean(Y)

#with np.load('model.npz') as f:
#param_values = [f['arr_%d' % i] for i in range(len(f.files))]
#lasagne.layers.set_all_param_values(network, param_values)
Beispiel #6
0
	    # Full pass training set
	    train_err = 0
	    train_batches = 0
	    confusion_train = ConfusionMatrix(n_class)
	    
	    # Generate minibatches and train on each one of them	
	    for batch in iterate_minibatches(X_tr, y_tr, mask_tr, batch_size, shuffle=True):
		inputs, targets, in_masks = batch
		tr_err, predict = train_fn(inputs, targets, in_masks)
		train_err += tr_err
		train_batches += 1
		preds = np.argmax(predict, axis=-1)
		confusion_train.batch_add(targets, preds)
	    
	    train_loss = train_err / train_batches
	    train_accuracy = confusion_train.accuracy()
	    cf_train = confusion_train.ret_mat()	    

		
	    # Full pass validation set
	    val_err = 0
	    val_batches = 0
	    confusion_valid = ConfusionMatrix(n_class)
	    
	    # Generate minibatches and train on each one of them	
	    for batch in iterate_minibatches(X_val, y_val, mask_val, batch_size):
		inputs, targets, in_masks = batch
		err, predict_val, alpha, context = val_fn(inputs, targets, in_masks)
		val_err += err
		val_batches += 1
		preds = np.argmax(predict_val, axis=-1)
Beispiel #7
0
print "Network"
print ffn
print "Loss", losslayer
print ""


acc = []
for epoch in range(num_epochs):
    confusion = ConfusionMatrix(num_classes)
    for i in range(num_batches):
        idx = range(i*batch_size, (i+1)*batch_size)
        x_batch = x_train[idx]
        target_batch = targets_train[idx]

        y_probs =  ffn.forward(x_batch)
        loss = losslayer.fprop(y_probs, target_batch)
        delta = losslayer.bprop(y_probs, target_batch)
        ffn.backward(delta)
        ffn.update(learning_rate)
        confusion.batch_add(target_batch.argmax(-1), y_probs.argmax(-1))
    curr_acc = confusion.accuracy()
    print "Epoch %i : Loss %f Train acc %f" % (epoch, loss, curr_acc)
    acc += [curr_acc]






Beispiel #8
0
        x_batch = x_test[i*num_batch:(i+1)*num_batch]
        y_batch = y_test[i*num_batch:(i+1)*num_batch]
        probs_test, A_test = f_eval(x_batch)
        preds_test_flat = probs_test.reshape((-1, num_classes)).argmax(-1)
        conf_test.batch_add(
            y_batch.flatten(),
            preds_test_flat
        )

        all_y += [y_batch]
        all_preds += [probs_test.argmax(-1)]

    logger.info(
        "Epoch {} Acc Valid {}, Acc Train = {}, Acc Test = {}".format(
            epoch,
            conf_valid.accuracy(),
            conf_train.accuracy(),
            conf_test.accuracy())
    )

    np.savez(os.path.join(output_folder, "res_test"),
             probs=probs_test, preds=probs_test.argmax(-1),
             x=x_batch, y=y_batch, A=A_test,
             all_y=np.vstack(all_y),
             all_preds=np.vstack(all_preds))

    if conf_valid.accuracy() > best_valid:
        best_valid = conf_valid.accuracy()
        look_count = LOOK_AHEAD
    else:
        look_count -= 1