Ejemplo n.º 1
0
            xz_batch = XZ[idx]
            yz_batch = YZ[idx]
            target_batch = Y_train[idx]
            batch_loss = f_train(xy_batch, xz_batch, yz_batch ,target_batch) #this will do the backprop pass
           
            cur_loss += batch_loss[0]/batch_size

        for i in range(num_batches_train):
            idx =  range(i*batch_size, (i+1)*batch_size)
            xy_batch = XY[idx]
            xz_batch = XZ[idx]
            yz_batch = YZ[idx]
            targets_batch = Y_train[idx]
            net_out = f_eval(xy_batch, xz_batch, yz_batch)
            preds = np.argmax(net_out, axis=-1)
            confusion_train.batch_add(targets_batch, preds)
    loss += [cur_loss/len(Train)]


    for img in Test:
        XY, XZ, YZ, Y_test  = DP.Patch_triplanar_para(img, PS)
        num_samples_valid = Y_test.shape[0]
        num_batches_valid = num_samples_valid // batch_size
        for i in range(num_batches_valid):
            idx = range(i*batch_size, (i+1)*batch_size)
            xy_batch = XY[idx]
            xz_batch = XZ[idx]
            yz_batch = YZ[idx]
            targets_batch = Y_test[idx]
            net_out = f_eval(xy_batch, xz_batch, yz_batch)
            preds = np.argmax(net_out, axis=-1)
Ejemplo n.º 2
0
                        max_acc = accuracy
                        path = saver.save(sess,
                                          checkpoint_prefix,
                                          global_step=current_step)
                        print("Saved model checkpoint to {}\n".format(path))
        else:
            ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
            saver.restore(sess, ckpt.model_checkpoint_path)
            accuracy, predictions = dev_step(x_test, y_test)
        #dev_step(x_dev, y_dev, writer=dev_summary_writer)
outputs = np.argmax(predictions, axis=-1)
print(outputs.shape)
confusion_test = ConfusionMatrix(num_classes)
y_test = np.argmax(y_test, axis=-1)
print(y_test.shape)
confusion_test.batch_add(y_test, outputs)
test_accuracy = confusion_test.accuracy()

a, positive_predictive_value = confusion_test.positive_predictive_value()
b, negative_predictive_value = confusion_test.negative_predictive_value()
e, F1 = confusion_test.F1()
f, MCC = confusion_test.matthews_correlation()

cf_val = confusion_test.ret_mat()

print("FINAL TEST RESULTS")
print(confusion_test)
print(cf_val)
print("  test accuracy:\t\t{:.2f} %".format(test_accuracy * 100))
print(a)
print(b)
Ejemplo n.º 3
0
                    pad = (40-len(pm.name))*" "
                    print("%s \t %.5e \t %.5e \t %.5e" % (
                        pm.name + pad,
                        np.linalg.norm(gm),
                        np.linalg.norm(um),
                        np.linalg.norm(pm.get_value())
                    ))

        cost_train_lst += [cost_train]

    conf_train = ConfusionMatrix(num_classes)
    for i in range(x_train.shape[0] // 1000):
        probs_train, _ = f_eval(x_train[i*1000:(i+1)*1000])
        preds_train_flat = probs_train.reshape((-1, num_classes)).argmax(-1)
        conf_train.batch_add(
            y_train[i*1000:(i+1)*1000].flatten(),
            preds_train_flat
        )

    if last_decay > args.decayinterval and epoch > args.nodecay:
        last_decay = 0
        old_lr = sh_lr.get_value(sh_lr)
        new_lr = old_lr / args.decayfac
        sh_lr.set_value(lasagne.utils.floatX(new_lr))
        print("Decay lr from %f to %f" % (float(old_lr), float(new_lr)))
    else:
        last_decay += 1

    # valid
    conf_valid = ConfusionMatrix(num_classes)
    for i in range(n_batches_valid):
        x_batch = x_valid[i*batch_size:(i+1)*batch_size]
Ejemplo n.º 4
0
            xz_batch = XZ[idx]
            yz_batch = YZ[idx]
            target_batch = np.float32(Y_train[idx].reshape(batch_size, 1))
            batch_loss = f_train(xy_batch, xz_batch, yz_batch,
                                 target_batch)  #this will do the backprop pass
            cur_loss += batch_loss[0] / batch_size

        for i in range(num_batches_train):
            idx = range(i * batch_size, (i + 1) * batch_size)
            xy_batch = XY[idx]
            xz_batch = XZ[idx]
            yz_batch = YZ[idx]
            targets_batch = np.float32(Y_train[idx].reshape(batch_size, 1))
            net_out = f_eval(xy_batch, xz_batch, yz_batch)
            preds = np.where(net_out > threshold, upper, lower)
            confusion_train.batch_add(targets_batch.reshape(batch_size, 1),
                                      preds)
    loss += [cur_loss / len(Train)]

    for img in Test:
        XY, XZ, YZ, Y_test = DP.Patch_triplanar_para(img, PS)
        num_samples_valid = Y_test.shape[0]
        num_batches_valid = num_samples_valid // batch_size
        for i in range(num_batches_valid):
            idx = range(i * batch_size, (i + 1) * batch_size)
            xy_batch = XY[idx]
            xz_batch = XZ[idx]
            yz_batch = YZ[idx]
            targets_batch = np.float32(Y_test[idx].reshape(batch_size, 1))
            net_out = f_eval(xy_batch, xz_batch, yz_batch)
            preds = np.where(net_out > threshold, upper, lower)
            AB = f_vali(xy_batch, xz_batch, yz_batch, targets_batch)
Ejemplo n.º 5
0
                                 x_batch_c3d_dim[2:])
        # print 'the shape of x_batch_c3d after reshape:', x_batch_c3d.shape

        train_out = f_train(x_batch_c3d, y_batch,
                            mask_batch_c3d)  # y_batch_repeat
        cost_train, _ = train_out[:2]
        # print 'batch_train:', i, 'cost_train:', cost_train

        cost_train_lst += [cost_train]

        # pdb.set_trace()
        cost_eval_train, probs_train = f_eval(x_batch_c3d, y_batch,
                                              mask_batch_c3d)
        # print 'batches_train_eval:', i, 'cost_eval_train:', cost_eval_train
        preds_train_flat = probs_train.reshape((-1, num_classes)).argmax(-1)
        conf_train.batch_add(y_batch.flatten(), preds_train_flat)
        cost_eval_train_lst += [cost_eval_train]

    #########################################################
    # pdb.set_trace()
    cost_eval_train_lst = np.array(cost_eval_train_lst)
    cost_eval_train_mean = cost_eval_train_lst.mean()

    cost_train_lst = np.array(cost_train_lst)
    cost_train_mean = cost_train_lst.mean()

    # change learning_rate
    if last_decay > args.decayinterval and epoch > args.nodecay:
        last_decay = 0
        old_lr = sh_lr.get_value(sh_lr)
        new_lr = old_lr / args.decayfac
                    pad = (40-len(pm.name))*" "
                    print "%s \t %.5e \t %.5e \t %.5e" % (
                        pm.name + pad,
                        np.linalg.norm(gm),
                        np.linalg.norm(um),
                        np.linalg.norm(pm.get_value())
                    )

        cost_train_lst += [cost_train]

    conf_train = ConfusionMatrix(num_classes)
    for i in range(x_train.shape[0] // 1000):
        probs_train, _ = f_eval(x_train[i*1000:(i+1)*1000])
        preds_train_flat = probs_train.reshape((-1, num_classes)).argmax(-1)
        conf_train.batch_add(
            y_train[i*1000:(i+1)*1000].flatten(),
            preds_train_flat
        )

    if last_decay > args.decayinterval and epoch > args.nodecay:
        last_decay = 0
        old_lr = sh_lr.get_value(sh_lr)
        new_lr = old_lr / args.decayfac
        sh_lr.set_value(lasagne.utils.floatX(new_lr))
        print "Decay lr from %f to %f" % (float(old_lr), float(new_lr))
    else:
        last_decay += 1

    # valid
    conf_valid = ConfusionMatrix(num_classes)
    for i in range(batches_valid):
        x_batch = x_valid[i*num_batch:(i+1)*num_batch]
Ejemplo n.º 7
0
	    # Calculate epoch time
	    start_time = time.time()
	    
	    # Full pass training set
	    train_err = 0
	    train_batches = 0
	    confusion_train = ConfusionMatrix(n_class)
	    
	    # Generate minibatches and train on each one of them	
	    for batch in iterate_minibatches(X_tr, y_tr, mask_tr, batch_size, shuffle=True):
		inputs, targets, in_masks = batch
		tr_err, predict = train_fn(inputs, targets, in_masks)
		train_err += tr_err
		train_batches += 1
		preds = np.argmax(predict, axis=-1)
		confusion_train.batch_add(targets, preds)
	    
	    train_loss = train_err / train_batches
	    train_accuracy = confusion_train.accuracy()
	    cf_train = confusion_train.ret_mat()	    

		
	    # Full pass validation set
	    val_err = 0
	    val_batches = 0
	    confusion_valid = ConfusionMatrix(n_class)
	    
	    # Generate minibatches and train on each one of them	
	    for batch in iterate_minibatches(X_val, y_val, mask_val, batch_size):
		inputs, targets, in_masks = batch
		err, predict_val, alpha, context = val_fn(inputs, targets, in_masks)
Ejemplo n.º 8
0
print "Network"
print ffn
print "Loss", losslayer
print ""


acc = []
for epoch in range(num_epochs):
    confusion = ConfusionMatrix(num_classes)
    for i in range(num_batches):
        idx = range(i*batch_size, (i+1)*batch_size)
        x_batch = x_train[idx]
        target_batch = targets_train[idx]

        y_probs =  ffn.forward(x_batch)
        loss = losslayer.fprop(y_probs, target_batch)
        delta = losslayer.bprop(y_probs, target_batch)
        ffn.backward(delta)
        ffn.update(learning_rate)
        confusion.batch_add(target_batch.argmax(-1), y_probs.argmax(-1))
    curr_acc = confusion.accuracy()
    print "Epoch %i : Loss %f Train acc %f" % (epoch, loss, curr_acc)
    acc += [curr_acc]






Ejemplo n.º 9
0
                    train_err = 0
                    train_batches = 0
                    confusion_train = ConfusionMatrix(num_classes)

                    # Generate minibatches and train on each one of them
                    for batch in iterate_minibatches(X_tr,
                                                     y_tr,
                                                     batch_size,
                                                     shuffle=True):
                        inputs, targets = batch
                        tr_err, predict = train_step(inputs, targets)
                        train_err += tr_err
                        train_batches += 1
                        preds = np.argmax(predict, axis=-1)
                        targets = np.argmax(targets, axis=-1)
                        confusion_train.batch_add(targets, preds)

                    train_loss = train_err / train_batches
                    train_accuracy = confusion_train.accuracy()
                    cf_train = confusion_train.ret_mat()

                    # Full pass validation set
                    val_err = 0
                    val_batches = 0
                    confusion_valid = ConfusionMatrix(num_classes)

                    # Generate minibatches and train on each one of them
                    for batch in iterate_minibatches(X_val, y_val, batch_size):
                        inputs, targets = batch
                        err, predict_val = dev_step(inputs,
                                                    targets,