コード例 #1
0
ファイル: cnnInfarc.py プロジェクト: Smith42/heart-mrcnn
import h5py
import datetime

# Import and preprocess data

if __name__ == "__main__":
    h5f = h5py.File("./data/infarction-healthy.h5", "r")
    h5f_test = h5py.File("./data/infarction-healthy-test.h5", "r")
    inData = h5f["inData"]
    inLabelsOH = h5f["inLabels"]
    inData_test = h5f_test["inData"]
    inLabelsOH_test = h5f_test["inLabels"]

    # Neural net (two-channel)
    sess = tf.InteractiveSession()
    model = getCNN(2)  # 2 classes: healthy, infarcted

    # Train the model, leaving out the kfold not being used
    model.fit(inData, inLabelsOH, batch_size=100, n_epoch=20, show_metric=True)
    dt = str(datetime.datetime.now().replace(second=0,
                                             microsecond=0).isoformat("_"))
    model.save("./models/" + dt + "_3d-2channel-fakedata_infarction.tflearn")

    # Get sensitivity and specificity
    illTest = []
    healthTest = []
    inLabels_test = inLabelsOH_test[:, 1]
    for index, item in enumerate(inLabels_test):
        if item == 1:
            illTest.append(inData_test[index])
        if item == 0:
コード例 #2
0
    i = int(sys.argv[1])  # i is current kfold
    k = 5  # k folds

    inData = np.load("./data/shufData.npy")
    inLabels = np.load("./data/shufLab.npy")
    inLabelsOH = np.eye(2)[inLabels.astype(int)]  # One hot encode

    # k fold the data
    kfoldData = np.array_split(inData, k)
    kfoldLabels = np.array_split(inLabels, k)
    kfoldLabelsOH = np.array_split(inLabelsOH, k)

    # Neural net (two-channel)
    sess = tf.InteractiveSession()
    model = getCNN(2, finetune=True)

    # Train the model, leaving out the kfold not being used
    dummyData = np.reshape(
        np.concatenate(kfoldData[:i] + kfoldData[i + 1:], axis=0),
        [-1, 34, 34, 34, 2])
    dummyLabels = np.reshape(
        np.concatenate(kfoldLabelsOH[:i] + kfoldLabelsOH[i + 1:], axis=0),
        [-1, 2])
    model.load("./models/placeholder")
    model.fit(dummyData,
              dummyLabels,
              batch_size=5,
              n_epoch=150,
              show_metric=True)  # In practice learning stops ~150 epochs.
    dt = str(datetime.datetime.now().replace(second=0,
コード例 #3
0
import h5py
import datetime

# Import and preprocess data

if __name__ == "__main__":
    h5f = h5py.File("./data/mixed-healthy.h5", "r")
    h5f_test = h5py.File("./data/mixed-health-test.h5", "r")
    inData = h5f["inData"]
    inLabelsOH = h5f["inLabels"]
    inData_test = h5f_test["inData"]
    inLabelsOH_test = h5f_test["inLabels"]

    # Neural net (two-channel)
    sess = tf.InteractiveSession()
    model = getCNN(5) # 2 classes: healthy, mixed

    # Train the model, leaving out the kfold not being used
    model.fit(inData, inLabelsOH, batch_size=100, n_epoch=20, show_metric=True)
    dt = str(datetime.datetime.now().replace(second=0, microsecond=0).isoformat("_"))
    model.save("./models/"+dt+"_3d-2channel-fakedata_mixed.tflearn")

    # Get sensitivity and specificity
    illTest = []
    healthTest = []
    inLabels_test = inLabelsOH_test[:,1]
    for index, item in enumerate(inLabels_test):
        if item == 1:
            illTest.append(inData_test[index])
        if item == 0:
            healthTest.append(inData_test[index])
コード例 #4
0
ファイル: cnnAug.py プロジェクト: lightwisemax/heart-cnn
    # Initialise Horovod
    if args.dist:
        print("Initialising Horovod")
        hvd.init()
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.gpu_options.visible_device_list = str(hvd.local_rank())
        K.set_session(tf.Session(config=config))

        print("Hvd current rank:", str(hvd.local_rank()))
    print("Seed:", str(args.SEED))
    print("Current kfold:", str(args.i), "of", str(args.k - 1))

    # Neural net (two-channel)
    model = getCNN(2)  # 2 classes: healthy, ischaemia
    if args.dist:
        opt = keras.optimizers.Adam(lr=0.001 * hvd.size())
        opt = hvd.DistributedOptimizer(opt)
    else:
        opt = keras.optimizers.Adam(lr=0.001)
    model.compile(optimizer=opt,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    # callbacks
    cb = []
    if args.dist:
        cb.append(hvd.callbacks.BroadcastGlobalVariablesCallback(0))
        # Horovod: average metrics among workers at the end of every epoch.
        # Note: This callback must be in the list before the ReduceLROnPlateau,
コード例 #5
0
ファイル: getLossCube.py プロジェクト: Smith42/heart-mrcnn
    inDataAbs = np.fabs(inData)
    inDataMax = np.amax(inData)
    normalisedData = inDataAbs / inDataMax
    return normalisedData


if __name__ == "__main__":
    # inData are heartcubes with same shape as those used in the CNN
    # So far this has only been implemented for healthy/ill pairs. It should be easy enough to generalise it to n classes though.
    # Generalisation to n classes would require grouping of healthy and ill diagnoses as in cnnAll.py, or a rethink of the current loss function.
    ppt = 20
    h5f = h5py.File("./data/twoThousand.h5", "r")
    inData = h5f["inData"][ppt]
    inData = inData[np.newaxis, ...]

    model = getCNN(2)
    model.load(
        "./data/placeholderModel")  # The model that we want to test goes here

    inLabel = model.predict(inData)[:, 1]

    maskWidth = 8  # Might be more representative to have this as even.
    lossCube = np.zeros(inData.shape[1:4])

    for j in np.arange(inData.shape[1] - maskWidth + 1):
        for k in np.arange(inData.shape[2] - maskWidth + 1):
            for l in np.arange(inData.shape[3] - maskWidth + 1):
                loss = getLoss(inData, inLabel, maskWidth, j, k, l)
                lossCube[j + maskWidth / 2, k + maskWidth / 2,
                         l + maskWidth / 2] = loss
                print(j + maskWidth / 2, k + maskWidth / 2, l + maskWidth / 2,
コード例 #6
0
import h5py
import datetime

# Import and preprocess data

if __name__ == "__main__":
    h5f = h5py.File("./data/artefact-healthy.h5", "r")
    h5f_test = h5py.File("./data/artefact-healthy-test.h5", "r")
    inData = h5f["inData"]
    inLabelsOH = h5f["inLabels"]
    inData_test = h5f_test["inData"]
    inLabelsOH_test = h5f_test["inLabels"]

    # Neural net (two-channel)
    sess = tf.InteractiveSession()
    model = getCNN(2)  # 2 classes: healthy, artefact

    # Train the model, leaving out the kfold not being used
    model.fit(inData, inLabelsOH, batch_size=100, n_epoch=20, show_metric=True)
    dt = str(datetime.datetime.now().replace(second=0,
                                             microsecond=0).isoformat("_"))
    model.save("./models/" + dt + "_3d-2channel-fakedata_artefact.tflearn")

    # Get sensitivity and specificity
    illTest = []
    healthTest = []
    inLabels_test = inLabelsOH_test[:, 1]
    for index, item in enumerate(inLabels_test):
        if item == 1:
            illTest.append(inData_test[index])
        if item == 0:
コード例 #7
0
ファイル: cnnAll.py プロジェクト: Smith42/heart-mrcnn
    return e_x / e_x.sum()


# Import and preprocess data

if __name__ == "__main__":
    h5f = h5py.File("./data/all.h5", "r")
    h5f_test = h5py.File("./data/allTest.h5", "r")
    inData = h5f["inData"]
    inLabelsOH = h5f["inLabels"]
    inData_test = h5f_test["inData"]
    inLabelsOH_test = h5f_test["inLabels"]

    # Neural net (two-channel)
    sess = tf.InteractiveSession()
    model = getCNN(
        5)  # 5 classes: healthy, ischaemic, infarcted, mixed, artefacted

    # Train the model
    model.fit(
        inData,
        inLabelsOH,
        batch_size=100,
        n_epoch=30,
        show_metric=True,
        validation_set=0.1)  # Need validation so I can see when learning stops
    dt = str(datetime.datetime.now().replace(second=0,
                                             microsecond=0).isoformat("_"))
    model.save("./models/" + dt + "_3d-2channel-fakedata-all.tflearn")

    # Retrieve indices of the different cubes and get the accuracies of each type
    inLabels_test = np.argwhere(np.array(inLabelsOH_test) == 1)[:, 1]