def resultsForScan(scanID):
    data, lTrain, lTest = files.loadAllInputsUpdated(scanID,
                                                     classifier.PAD,
                                                     allFeatures=True,
                                                     moreFeatures=False)
    assert data.shape[-1] == len(FEATURES)

    labels = np.vstack((lTrain, lTest))
    nLabels = labels.shape[0]
    xyz, labels = labels[:, :3], labels[:, 3]
    xyz = xyz - 1  # IMPORTANT!

    predictions = np.zeros(nLabels)

    accs, senss, specs, dices, aucs = \
      np.zeros(len(FEATURES)), np.zeros(len(FEATURES)), np.zeros(len(FEATURES)), np.zeros(len(FEATURES)), np.zeros(len(FEATURES))

    for i in range(len(FEATURES)):
        feat = data[:, :, :, i]
        T_ostu = threshold_otsu(feat.ravel())
        for j in range(nLabels):
            predictions[j] = feat[xyz[j, 0], xyz[j, 1], xyz[j, 2]]
        accs[i], senss[i], specs[i], dices[i], aucs[i] = util.genScores(
            labels, predictions, 0.5, T_ostu)
    return accs, senss, specs, dices, aucs
Example #2
0
def singleBrain(scanID,
                runOneFunc,
                calcScore=True,
                writeVolume=False,
                savePath=None):
    data, labelsTrain, labelsTest = files.loadAllInputsUpdated(
        scanID, PAD, ALL_FEAT, MORE_FEAT, oneFeat=ONE_FEAT_NAME)

    toReturn = None
    if calcScore:
        trainX, trainY = files.convertToInputs(scanID, data, labelsTrain, PAD,
                                               FLIP_X, FLIP_Y, FLIP_Z, FLIP_XY)
        testX, testY = files.convertToInputs(scanID, data, labelsTest, PAD,
                                             False, False, False, FLIP_XY)
        print("%d train samples, %d test" % (len(trainX), len(testX)))
        _, _, scores, _ = runOneFunc(trainX, trainY, testX, testY, scanID,
                                     savePath)
        print("  Results\n  -------\n" + util.formatScores(scores))
        toReturn = scores

    if writeVolume:
        labels = np.vstack((labelsTrain, labelsTest))
        trainX, trainY = files.convertToInputs(scanID, data, labels, PAD,
                                               FLIP_X, FLIP_Y, FLIP_Z)
        _, _, volume, _ = runOneFunc(trainX, trainY, data, None, scanID,
                                     savePath)
        toReturn = volume
    return toReturn
def scanAnnotation(scanID):
    _, lTrain, lTest = files.loadAllInputsUpdated(scanID,
                                                  classifier.PAD,
                                                  allFeatures=True,
                                                  moreFeatures=False)
    trainV = lTrain.sum(axis=0)[-1]
    trainNV = lTrain.shape[0] - trainV
    testV = lTest.sum(axis=0)[-1]
    testNV = lTest.shape[0] - testV
    return trainV, trainNV, testV, testNV
def generateErrorForSingleBrain(scanID):
    data, labelsTrain, labelsTest = files.loadAllInputsUpdated(
        scanID,
        classifier.ALL_FEAT,
        classifier.MORE_FEAT,
        oneFeat=classifier.ONE_FEAT_NAME)

    trainX, trainY = files.convertToInputs(data, labelsTrain, classifier.PAD,
                                           classifier.FLIP_X,
                                           classifier.FLIP_Y,
                                           classifier.FLIP_Z)
    testX, testY = files.convertToInputs(data, labelsTest, classifier.PAD,
                                         False, False, False)

    print("%d train samples, %d test" % (len(trainX), len(testX)))
    _, _, scores, predictions = cnn.runOne(trainX, trainY, testX, testY,
                                           scanID, None)
    print("  Results Train -> Test\n  -------\n" + util.formatScores(scores))

    delta = labelsTest[:, 3] - np.array(predictions)
    errorTest = np.copy(labelsTest.astype(np.float16))
    errorTest[:, 3] = labelsTest[:, 3] - np.array(predictions)
    print("%f -> %f" % (np.min(errorTest[:, 3]), np.max(errorTest[:, 3])))

    print("Switched: %d train samples, %d test" % (len(testX), len(trainX)))
    _, _, scores, predictions = cnn.runOne(testX, testY, trainX, trainY,
                                           scanID, None)
    print("  Results Test -> Train\n  -------\n" + util.formatScores(scores))

    delta = labelsTrain[:, 3] - np.array(predictions)
    errorTrain = np.copy(labelsTrain.astype(np.float16))
    errorTrain[:, 3] = labelsTrain[:, 3] - np.array(predictions)
    print("%f -> %f" % (np.min(errorTrain[:, 3]), np.max(errorTrain[:, 3])))

    errorPath = "%s/%s/Normal%s-MRA_annotationAll_errors.mat" % (
        files.BASE_PATH, scanID, scanID)
    print(errorTest.shape)
    print(errorTrain.shape)
    allErrors = np.vstack((errorTrain, errorTest))
    print(allErrors.shape)
    files.writePrediction(errorPath, 'coordTable', allErrors)
    plt.hist(allErrors[:, 3].ravel())
    plt.show()
Example #5
0
def trainAndSave(scanID, savePath=None):
    epochs = N_EPOCHS
    batchSize = BATCH_SIZE

    inFeat, lTrain, lTest = files.loadAllInputsUpdated(scanID, classifier.ALL_FEAT, classifier.MORE_FEAT)
    labelled = np.concatenate([lTrain, lTest])
    lX, lY = files.convertToInputs(inFeat, labelled, classifier.PAD, False, False, False)
    # simpleTSNE(lX, lY)

    bm = files.loadBM(scanID)
    brainIndices = np.array(np.where(bm == 1)).T
    brainIndices = brainIndices[::SUB_SAMPLE, :]
    nSamples = brainIndices.shape[0]
    print ("Training on %d sub-volumes" % brainIndices.shape[0])

    xInput, xOutput, innerFeat, isTraining, trainOp, cost = buildCAENetwork9()
    denseNet = buildDenseNetwork()

    saver = None if savePath is None else tf.train.Saver()

    costs = []
    with tf.Session(config=tf.ConfigProto(log_device_placement=False)) as sess:
        start_time = datetime.datetime.now()
        print ("Initializing session...")
        sess.run(tf.global_variables_initializer())

        iterations = int(nSamples/batchSize) + 1
        # run epochs
        for epoch in range(epochs):
            start_time_epoch = datetime.datetime.now()
            print('Scan %s, Epoch %d started' % (scanID, epoch))
            np.random.shuffle(brainIndices)

            # mini batch for trianing set:
            totalCost = 0.0
            for itr in tqdm(range(iterations)):
                batchMids = brainIndices[itr*batchSize: (itr+1)*batchSize]
                batchX = util.xyzRowsToVolumes(inFeat, batchMids, classifier.PAD)
                _trainOp, _cost = sess.run([trainOp, cost], feed_dict={
                    xInput: batchX,
                    isTraining: True
                })
                totalCost += _cost

            avCost = totalCost / iterations
            finalCost = nSamples * avCost
            print (">> Epoch %d had TRAIN loss: %.3f\tav: %.6f" % (
                epoch, finalCost, avCost
            ))
            costs.append(finalCost)

        end_time = datetime.datetime.now()
        print('Time elapse: ', str(end_time - start_time))

        _cost, _feat = sess.run([cost, innerFeat], feed_dict={
            xInput: lX,
            isTraining: False
        })
        print ('Train/Test data has cost %.3f\ av = %.6f' % (_cost * lX.shape[0], _cost))
        print (_feat.shape)

        # Save the network:
        if savePath is not None:
            savePath = saver.save(sess, savePath)
            print ("Model saved to %s" % (savePath))

    return costs
Example #6
0
def runDenseNetwork(trainID, testID, loadPath, batchSize=BATCH_SIZE):
    epochs = N_EPOCHS * 5
    tf.reset_default_graph()

    volTrain, lTrainA, lTrainB = files.loadAllInputsUpdated(trainID, classifier.ALL_FEAT, classifier.MORE_FEAT)
    volTest, lTestA, lTestB = files.loadAllInputsUpdated(testID, classifier.ALL_FEAT, classifier.MORE_FEAT)
    lTrain = np.concatenate([lTrainA, lTrainB], axis=0)
    lTest = np.concatenate([lTestA, lTestB], axis=0)

    trainX, trainY = files.convertToInputs(volTrain, lTrain, classifier.PAD, classifier.FLIP_X, classifier.FLIP_Y, classifier.FLIP_Z)
    testX,   testY = files.convertToInputs(volTest, lTest, classifier.PAD, False, False, False)

    print ("Dense network: %d train samples, %d test" % (len(trainX), len(testX)))

    print ("Building networks...")
    caeX, _, innerFeat, caeIsTraining, _, _ = buildCAENetwork9()
    net = buildDenseNetwork()
    xInput, yInput, trainOp, cost, numCorrect, predictedProbs = net['x'], net['y'], net['t'], net['c'], net['nc'], net['p']

    runTest = True

    iterations = int(len(trainY)/batchSize) + 1
    saver = tf.train.Saver()

    trainCosts, corrs = [], []
    testCosts = []
    with tf.Session(config=tf.ConfigProto(log_device_placement=False)) as sess:
        saver.restore(sess, loadPath)

        # run epochs
        for epoch in range(epochs):
            start_time_epoch = datetime.datetime.now()
            print('Training Scan %s, Epoch %d started' % (trainID, epoch))
            trainX, trainY = util.randomShuffle(trainX, trainY)

            # mini batch for trianing set:
            totalCost, totalCorr = 0.0, 0
            for itr in range(iterations):
                batchX = trainX[itr*batchSize: (itr+1)*batchSize]
                batchY = trainY[itr*batchSize: (itr+1)*batchSize]

                _feat = sess.run(innerFeat, feed_dict={
                    caeX: batchX,
                    caeIsTraining: False
                })

                _trainOp, _cost, _corr = sess.run([trainOp, cost, numCorrect], feed_dict={
                    caeX: batchX, # HMM
                    caeIsTraining: False, # HMM
                    xInput: _feat,
                    yInput: util.oneshotY(batchY)
                })
                totalCost += _cost
                totalCorr += _corr

            print (">> Epoch %d had TRAIN loss: %.2f\t#Correct = %5d/%5d = %f" % (
                epoch, totalCost, totalCorr, len(trainY), totalCorr / len(trainY)
            ))
            trainCosts.append(totalCost)
            corrs.append(totalCorr/len(testY))

            # Run against test set:
            if runTest:
                testX, testY = util.randomShuffle(testX, testY)
                totalCost, totalCorr = 0, 0
                itrs = int(math.ceil(len(testY)/batchSize))
                for itr in range(itrs):
                    batchX = testX[itr*batchSize: (itr+1)*batchSize]
                    batchY = testY[itr*batchSize: (itr+1)*batchSize]

                    predictions, cost = densePredict(sess, caeX, innerFeat, caeIsTraining, xInput, yInput, predictedProbs, cost, batchX, batchY)
                    totalCost += cost
                    totalCorr += np.sum((np.array(predictions) > 0.5) == (np.array(batchY) > 0.5))
                end_time_epoch = datetime.datetime.now()
                print('>> Epoch %d had  TEST loss:      \t#Correct = %5d/%5d = %f\tTime elapsed: %s' % (
                    epoch, totalCorr, len(testY), totalCorr / len(testY), str(end_time_epoch - start_time_epoch)
                ))
                testCosts.append(totalCost)



        # Run against test:
        if runTest:
            print('Testing Scan %s' % (testID))
            testProbs = []
            itrs = int(math.ceil(len(testY)/batchSize))
            for itr in range(itrs):
                batchX = testX[itr*batchSize: (itr+1)*batchSize]
                batchY = testY[itr*batchSize: (itr+1)*batchSize]

                _feat = sess.run(innerFeat, feed_dict={
                    caeX: batchX,
                    caeIsTraining: False
                })

                _probs = sess.run(predictedProbs, feed_dict={
                    caeX: batchX, # HMM
                    caeIsTraining: False, # HMM
                    xInput: _feat,
                    yInput: util.oneshotY(batchY)
                })
                testProbs.extend(np.array(_probs)[:, 1].tolist())
    return trainCosts, testCosts, corrs, util.genScores(testY, testProbs)
Example #7
0
def brainsToBrain(fromIDs,
                  toID,
                  runOneFunc,
                  calcScore=True,
                  writeVolume=False,
                  savePath=None,
                  perBrainExamples=None):
    trainX, trainY = None, None
    print("Loading points from scans: %s" % (str(fromIDs)))
    for fromID in fromIDs:
        print("  ... loading %s" % (fromID))
        fromX, fromY = files.convertScanToXY(fromID,
                                             ALL_FEAT,
                                             MORE_FEAT,
                                             PAD,
                                             FLIP_X,
                                             FLIP_Y,
                                             FLIP_Z,
                                             FLIP_XY,
                                             merge=True,
                                             oneFeat=ONE_FEAT_NAME,
                                             oneTransID=ONE_TRANS_ID)

        if perBrainExamples is not None:
            idxSubset = np.random.choice(len(fromX),
                                         perBrainExamples,
                                         replace=False)
            fromX = [fromX[idx] for idx in idxSubset]
            fromY = fromY[idxSubset]

        if trainX is None:
            trainX, trainY = fromX, fromY
        else:
            trainX.extend(fromX)
            trainY = np.append(trainY, fromY)
        gc.collect()

    print("Train X / Y shapes = ", len(trainX), trainY.shape)
    toReturn = None
    if calcScore:
        toX, toY = files.convertScanToXY(toID,
                                         ALL_FEAT,
                                         MORE_FEAT,
                                         PAD,
                                         False,
                                         False,
                                         False,
                                         False,
                                         merge=True,
                                         oneFeat=ONE_FEAT_NAME,
                                         oneTransID=ONE_TRANS_ID)
        print("Test X / Y shapes = ", len(toX), toY.shape)
        _, _, scores, _ = runOneFunc(trainX, trainY, toX, toY, toID, savePath)
        print("  Results\n  -------\n" + util.formatScores(scores))
        print("\n\n\n")
        toReturn = scores

    if writeVolume:
        data, _, _ = files.loadAllInputsUpdated(toID,
                                                PAD,
                                                ALL_FEAT,
                                                MORE_FEAT,
                                                oneFeat=ONE_FEAT_NAME)
        _, _, volume, _ = runOneFunc(trainX, trainY, data, None, toID,
                                     savePath)
        toReturn = volume
    return toReturn
Example #8
0
def volumeFromSavedNet(netPath,
                       scanID,
                       resultPath,
                       xFr=None,
                       xTo=None,
                       useMask=True):
    pad = classifier.PAD
    tf.reset_default_graph()

    print("Using network %s to generate volume %s" % (netPath, scanID))
    volume = files.loadAllInputsUpdated(scanID,
                                        pad,
                                        classifier.ALL_FEAT,
                                        classifier.MORE_FEAT,
                                        oneFeat=classifier.ONE_FEAT_NAME,
                                        noTrain=True)
    prediction = np.zeros(volume.shape[0:3])

    brainMask = np.ones(volume.shape[0:3])
    if useMask:
        brainMask = files.loadBM(scanID, maskPad=classifier.SIZE)

    xInput, yInput, isTraining, trainOp, cost, numCorrect, scores = (
        _getNetworkFunc())()
    saver = tf.train.Saver()

    # Change these if we need only small subsegments
    if xFr is None:
        xFr = 0
    if xTo is None:
        xTo = volume.shape[0]

    with tf.Session() as sess:
        print("Loading net from file...")
        start_time = datetime.datetime.now()
        saver.restore(sess, netPath)

        # Generate entire volume, one column of x/y at a time:
        print("\nGenerating all predictions for volume %s" % (scanID))

        nBrain = 0
        for x in tqdm(range(xFr, xTo)):
            for y in range(volume.shape[1]):
                zFr, zTo = util.maskBounds(brainMask[x, y, :])
                nBrain += zTo - zFr

        with tqdm(total=nBrain, ascii=True) as progress:
            for x in range(xFr, xTo):
                for y in range(volume.shape[1]):
                    zFr, zTo = util.maskBounds(brainMask[x, y, :])
                    if zFr == -1:
                        continue  # No brain in this column
                    dataAsInput = files.convertVolumeStack(
                        scanID, pad, x, y, zFr, zTo)
                    preds = predict(sess, scores, xInput, isTraining,
                                    dataAsInput)
                    prediction[x, y, zFr:zTo] = preds
                    progress.update(zTo - zFr)

        print("Writing to %s" % (resultPath))
        files.writePrediction(resultPath, "cnn", prediction)

    end_time = datetime.datetime.now()
    print('Time elapse: ', str(end_time - start_time))