def exp_eval_ots_lidc_jsrt(args):
    # load LIDC & JSRT-positives data
    imgs_tr, blobs_tr = lidc.load()
    pred_blobs_tr = detect.read_blobs('data/{}-lidc-pred-blobs.pkl'.format(
        args.detector))
    masks_tr = np.load('data/aam-lidc-pred-masks.npy')
    imgs_te, blobs_te = jsrt.load(set_name='jsrt140p')
    pred_blobs_te = detect.read_blobs('data/{}-jsrt140p-pred-blobs.pkl'.format(
        args.detector))
    masks_te = np.load('data/aam-jsrt140p-pred-masks.npy')
    rois_tr = lnd.create_rois(imgs_tr, masks_tr, pred_blobs_tr, args)
    rois_te = lnd.create_rois(imgs_te, masks_te, pred_blobs_te, args)

    # Create rois dataset
    rois_tr, Y_tr, _, _ = neural.create_train_test_sets(
        blobs_tr, pred_blobs_tr, rois_tr, None, None, None)
    generator = augment.get_default_generator((args.roi_size, args.roi_size))
    rois_tr, Y_tr = augment.balance_and_perturb(rois_tr, Y_tr, generator)
    range_tr = np.max(rois_tr), np.min(rois_tr)
    print "range {}".format(range_tr)

    # Extract features
    network = VGG16(mode='ots-feat', pool_layer=args.pool_layer)
    feats_tr = extract_convfeats(network, rois_tr, range_tr)
    feats_te = extract_convfeats(network, rois_te, range_tr)
    np.save('data/{}-lidc-feats.npy'.format(args.detector, fold_idx), feats_tr)
    np.save('data/{}-jsrt140p-feats.npy'.format(args.detector, fold_idx),
            feats_te)

    # Eval classifier
    clf = LinearSVC(C=args.svm_c)
    froc = evaluate_classifier(clf, feats_tr, Y_tr, blobs_te, pred_blobs_te,
                               feats_te)
Beispiel #2
0
def visual_results_jsrt_only(model_name, args):
    print "Visual results for model {} JSRT only".format(model_name)
    imgs, blobs = jsrt.load(set_name='jsrt140p')
    pred_blobs = detect.read_blobs('data/{}-jsrt140p-pred-blobs.pkl'.format(
        args.detector))
    masks = np.load('data/aam-jsrt140p-pred-masks.npy')
    rois = create_rois(imgs, masks, pred_blobs, args)
    folds = KFold(n_splits=5, shuffle=True,
                  random_state=util.FOLDS_SEED).split(imgs)
    fold_idx = 0
    for tr, te in folds:
        model.load('data/' + model.name + '.fold-{}'.format(fold_idx + 1))
        model = neural.create_network(model_name, args,
                                      (1, args.roi_size, args.roi_size))
        X_tr, Y_tr, X_te, Y_te = neural.create_train_test_sets(
            real_blobs_tr, pred_blobs_tr, rois_tr, real_blobs_te,
            pred_blobs_te, rois_te)

        print 'load weights {}'.format(model.name)
        model.network.load_weights('data/{}_weights.h5'.format(model.name))
        # FIX: remove and add zmuv mean and zmuv std no Preprocessor augment.py
        if not hasattr(model.preprocessor, 'zmuv_mean'):
            model.preprocessor.fit(X_tr, Y_tr)

        model.save('data/' + model.name)
        pred_blobs_te, probs_te = neural.predict_proba(model, pred_blobs_te,
                                                       rois_te)
        util.save_rois_with_probs(rois_te, probs_te)
        fold_idx += 1
Beispiel #3
0
def model_output(model_name, args):
    print "Model Outputs"
    imgs, blobs = jsrt.load(set_name='jsrt140p')
    pred_blobs = detect.read_blobs('data/{}-jsrt140p-pred-blobs.pkl'.format(
        args.detector))
    masks = np.load('data/aam-jsrt140p-pred-masks.npy')
    rois = create_rois(imgs, masks, pred_blobs, args)
    folds = KFold(n_splits=5, shuffle=True,
                  random_state=util.FOLDS_SEED).split(imgs)

    fold_idx = 0
    frocs = []
    legends = ['Fold {}'.format(i + 1) for i in range(5)]

    index = np.array(range(len(imgs)))
    for tr, te in folds:
        X_tr, Y_tr, _, _ = neural.create_train_test_sets(
            blobs[tr], pred_blobs[tr], rois[tr], blobs[te], pred_blobs[te],
            rois[te])
        model = neural.create_network(model_name, args,
                                      (1, args.roi_size, args.roi_size))
        model.name = model.name + '-{}-lidc.fold-{}'.format(
            args.detector, fold_idx + 1)
        model.network.load_weights('data/{}_weights.h5'.format(model.name))
        if not hasattr(model.preprocessor, 'zmuv_mean'):
            model.preprocessor.fit(X_tr, Y_tr)

        print "Predict ..."
        pred_blobs_te, probs_te, rois_te = neural.predict_proba(
            model, pred_blobs[te], rois[te])

        print "Save ..."
        eval.save_outputs(imgs[te], blobs[te], pred_blobs_te, probs_te,
                          rois_te, index[te])
Beispiel #4
0
def evaluate_model(model,
                   real_blobs_tr,
                   pred_blobs_tr,
                   rois_tr,
                   real_blobs_te,
                   pred_blobs_te,
                   rois_te,
                   load_model=False):
    X_tr, Y_tr, X_te, Y_te = neural.create_train_test_sets(
        real_blobs_tr, pred_blobs_tr, rois_tr, real_blobs_te, pred_blobs_te,
        rois_te)

    if load_model == True:
        print 'load weights {}'.format(model.name)
        model.network.load_weights('data/{}_weights.h5'.format(model.name))
        # FIX: remove and add zmuv mean and zmuv std no Preprocessor augment.py
        if not hasattr(model.preprocessor, 'zmuv_mean'):
            model.preprocessor.fit(X_tr, Y_tr)
    else:
        _ = model.fit(X_tr, Y_tr, X_te, Y_te)

    model.save('data/' + model.name)

    pred_blobs_te, probs_te, _ = neural.predict_proba(model, pred_blobs_te,
                                                      rois_te)
    return eval.froc(real_blobs_te, pred_blobs_te, probs_te)
Beispiel #5
0
def save_rois(args):
    imgs_tr, blobs_tr = lidc.load(pts=False)
    pred_blobs_tr = detect.read_blobs('data/sbf-aam-lidc-pred-blobs.pkl')
    masks_tr = np.load('data/aam-lidc-pred-masks.npy')

    imgs_te, blobs_te = jsrt.load(set_name='jsrt140p')
    pred_blobs_te = detect.read_blobs('data/sbf-aam-jsrt140p-pred-blobs.pkl')
    masks_te = np.load('data/aam-jsrt140p-pred-masks.npy')

    rois_tr = create_rois(imgs_tr,
                          masks_tr,
                          pred_blobs_tr,
                          args,
                          real_blobs=blobs_tr)
    rois_te = create_rois(imgs_te,
                          masks_te,
                          pred_blobs_te,
                          args,
                          real_blobs=blobs_te)
    X_tr, Y_tr, X_te, Y_te = neural.create_train_test_sets(
        blobs_tr, pred_blobs_tr, rois_tr, blobs_te, pred_blobs_te, rois_te)
    X_tr, Y_tr = util.split_data_pos_neg(X_tr, Y_tr)
    X_te, Y_te = util.split_data_pos_neg(X_te, Y_te)

    X_pos = X_tr[0]
    idx = np.random.randint(0, len(X_tr[1]), len(X_pos))
    X_neg = X_tr[1][idx]

    print len(X_pos), len(X_neg)
    for i in range(len(X_pos)):
        util.imwrite('data/lidc/roi{}p.jpg'.format(i), X_pos[i][0])
        np.save('data/lidc/roi{}p.npy'.format(i), X_pos[i])
        util.imwrite('data/lidc/roi{}n.jpg'.format(i), X_neg[i][0])
        np.save('data/lidc/roi{}n.npy'.format(i), X_neg[i])

    X_pos = X_te[0]
    idx = np.random.randint(0, len(X_te[1]), len(X_pos))
    X_neg = X_te[1][idx]

    print len(X_pos), len(X_neg)
    for i in range(len(X_pos)):
        util.imwrite('data/jsrt140/roi{}p.jpg'.format(i), X_pos[i][0])
        np.save('data/jsrt140/roi{}p.npy'.format(i), X_pos[i])
        util.imwrite('data/jsrt140/roi{}n.jpg'.format(i), X_neg[i][0])
        np.save('data/jsrt140/roi{}n.npy'.format(i), X_neg[i])
    def train_with_feature_set_keras(self, feats_tr, pred_blobs_tr, real_blobs_tr,
                                        feats_test=None, pred_blobs_test=None, real_blobs_test=None,
                                        model='shallow_1', model_suffix=None, network_init=None):
        print("{} {} {} {} {} {}".format(len(feats_tr), len(pred_blobs_tr), len(real_blobs_tr), len(feats_test), len(pred_blobs_test), len(real_blobs_test)))
        X_train, Y_train, X_test, Y_test = neural.create_train_test_sets(feats_tr, pred_blobs_tr, real_blobs_tr, 
                                                feats_test, pred_blobs_test, real_blobs_test, streams=self.streams )

        print "X_train shape {}".format(X_train.shape)
        self.network = neural.create_network(model, (X_train.shape[1], self.roi_size, self.roi_size), self.streams) 
        if network_init is not None:
            if self.args.transfer:
                self.load_cnn_weights('data/{}_{}'.format(network_init, model_suffix))
            else:
                self.load_cnn_weights(network_init)

        name =  'data/{}_{}'.format(model, model_suffix)
        history = self.network.fit(X_train, Y_train, X_test, Y_test, streams=(self.streams != 'none'), cropped_shape=(self.roi_size, self.roi_size), checkpoint_prefix=name, checkpoint_interval=2)
        return history
def exp_eval_ots_jsrt_only(args):
    # load LIDC & JSRT-positives data
    network = VGG16(mode='ots-feat', pool_layer=args.pool_layer)
    print "Model Evaluation Protocol 2"
    imgs, blobs = jsrt.load(set_name='jsrt140p')
    pred_blobs = detect.read_blobs('data/{}-jsrt140p-pred-blobs.pkl'.format(
        args.detector))
    masks = np.load('data/aam-jsrt140p-pred-masks.npy')
    rois = lnd.create_rois(imgs, masks, pred_blobs, args)
    folds = KFold(n_splits=5, shuffle=True,
                  random_state=util.FOLDS_SEED).split(imgs)

    fold_idx = 0
    frocs = []
    legends = ['Fold {}'.format(i + 1) for i in range(5)]
    for tr, te in folds:
        # Eval classifier
        rois_tr, Y_tr, _, _ = neural.create_train_test_sets(
            blobs_tr, pred_blobs_tr, rois[tr], None, None, None)
        generator = augment.get_default_generator(
            (args.roi_size, args.roi_size))
        rois_tr, Y_tr = augment.balance_and_perturb(rois_tr, Y_tr, generator)

        clf = LinearSVC(C=args.svm_c)
        froc = evaluate_classifier(clf, rois_tr, Y_tr, blobs[te],
                                   pred_blobs[te], feats[te])
        frocs.append(froc)

        current_frocs = [eval.average_froc([froc_i]) for froc_i in frocs]
        util.save_froc(current_frocs,
                       'data/lsvm-{}-jsrtonly-folds'.format(args.detector),
                       legends[:len(frocs)],
                       with_std=False)
        fold_idx += 1

    froc = eval.average_froc(frocs)
    legends = ['Test FROC (JSRT positives)']
    util.save_froc([froc],
                   'data/lsvm-{}-jsrtonly'.format(args.detector),
                   legends,
                   with_std=True)
def extract_features_from_convnet(args):  # Load img, blobs and masks
    imgs, blobs, paths = lidc.load(pts=True, set_name=args.ds_tr)
    pred_blobs = detect.read_blobs('data/{}-lidc-pred-blobs.pkl'.format(
        args.detector))
    masks = np.load('data/aam-lidc-pred-masks.npy')

    assert len(imgs) == len(masks) and len(pred_blobs) == len(masks)

    # Load folds
    folds = util.model_selection_folds(imgs)

    # Create rois
    rois = lnd.create_rois(imgs, masks, pred_blobs, args, real_blobs=blobs)

    # Load model
    network = VGG16(mode='ots-feat', pool_layer=args.pool_layer)
    network.summary()

    #  Set up CV
    frocs = []
    legends = ['Fold {}'.format(i + 1) for i in range(util.NUM_VAL_FOLDS)]
    fold_idx = 0

    for tr, te in folds:
        # TODO: apply extract convfeats funcs for tr and te sets
        print "Fold {}".format(fold_idx + 1)
        X_tr, Y_tr, _, _ = neural.create_train_test_sets(
            blobs[tr], pred_blobs[tr], rois[tr], blobs[te], pred_blobs[te],
            rois[te])
        gc.collect()

        generator = augment.get_default_generator(
            (args.roi_size, args.roi_size))
        X_tr, Y_tr = augment.balance_and_perturb(X_tr, Y_tr, generator)
        gc.collect()
        ''''
        counta = 0
        countb = 0
        count = 0

        while counta < 10 and countb < 10: 
            if Y_tr[count][1] > 0 and counta < 10:
                util.imshow("positives", X_tr[count][0], display_shape=(256, 256))
                counta += 1
            elif Y_tr[count][1] == 0.0 and countb < 10:
                util.imshow("negatives", X_tr[count][0], display_shape=(256, 256))
                countb += 1
            count += 1
        '''

        range_tr = (X_tr.min(), X_tr.max())
        print "Range {}".format(range_tr)
        print "Extract feats on balanced tr set"
        feats_tr = extract_convfeats(network, X_tr, range_tr)
        save_features(
            "data/vgg16-{}-{}-f{}-lidc-feats".format(args.pool_layer,
                                                     args.detector, fold_idx),
            feats_tr, Y_tr)
        gc.collect()

        print "Extract feats on te set"
        feats_te = extract_convfeats_from_rois(network, rois[te], range_tr)
        print "Test feats to save shape {}".format(feats_te.shape)
        np.save(
            "data/vgg16-{}-{}-f{}-te-lidc-feats.npy".format(
                args.pool_layer, args.detector, fold_idx), feats_te)
        gc.collect()
        fold_idx += 1
Beispiel #9
0
def froc_by_epochs(data,
                   blobs,
                   augmented_blobs,
                   rois,
                   folds,
                   network_model,
                   nb_epochs=30,
                   epoch_interval=2):
    network_init = None
    roi_size = 32
    streams = 'none'

    imgs = []
    masks = []
    for i in range(len(data)):
        img, lung_mask = data.get(i, downsample=True)
        sampled, lce, norm = preprocess.preprocess_hardie(img,
                                                          lung_mask,
                                                          downsample=True)
        imgs.append([lce])
        masks.append(lung_mask)
    imgs = np.array(imgs)
    masks = np.array(masks)

    # Hardcoding blob set shapes
    blobs2 = blobs
    blobs = blobs.reshape((len(blobs), 3))

    nb_checkpoints = int(nb_epochs / epoch_interval)
    epochs = np.linspace(epoch_interval, nb_checkpoints * epoch_interval,
                         nb_checkpoints).astype(np.int)

    av_frocs = []
    names = []
    aucs1 = []
    aucs2 = []
    for epoch in epochs:
        frocs = []
        fold = 1
        for tr_idx, te_idx in folds:
            print "Fold {} ...".format(fold)
            X_train, Y_train, X_test, Y_test = neural.create_train_test_sets(
                rois[tr_idx],
                augmented_blobs[tr_idx],
                blobs[tr_idx],
                rois[te_idx],
                augmented_blobs[te_idx],
                blobs[te_idx],
                streams=streams,
                detector=True)

            # load network
            network = neural.create_network(network_model,
                                            X_train.shape,
                                            fold,
                                            streams,
                                            detector=False)
            name = 'data/{}_fold_{}.epoch_{}'.format(network_model, fold,
                                                     epoch)
            network.network.load_weights('{}_weights.h5'.format(name))

            # open network on detector mode
            detector_network = neural.create_network(network_model,
                                                     X_train.shape,
                                                     fold,
                                                     streams,
                                                     detector=True)
            copy_weights(network, detector_network)

            # evaluate network on test
            blobs_te_pred, probs_te_pred = detect_with_network(
                detector_network, imgs[te_idx], masks[te_idx], fold=fold)

            froc = eval.froc(blobs2[te_idx], blobs_te_pred, probs_te_pred)
            frocs.append(froc)
            fold += 1

        names.append('{}, epoch {}'.format(network_model, epoch))
        ops = eval.average_froc(frocs, fppi_range)
        av_frocs.append(ops)
        aucs1.append(util.auc(ops, range(0, 60)))
        aucs2.append(util.auc(ops, range(0, 40)))
        util.save_auc(
            np.array(range(1,
                           len(aucs1) + 1)) * epoch_interval, aucs1,
            'data/{}-auc-0-60'.format(network_model))
        util.save_auc(
            np.array(range(1,
                           len(aucs2) + 1)) * epoch_interval, aucs2,
            'data/{}-auc-0-40'.format(network_model))

    return av_frocs, names
Beispiel #10
0
def eval_cnn_detector(data, blobs, augmented_blobs, rois, folds, model):
    fold = 1
    network_init = None
    roi_size = 32
    streams = 'none'

    imgs = []
    masks = []
    for i in range(len(data)):
        img, lung_mask = data.get(i, downsample=True)
        sampled, lce, norm = preprocess.preprocess_hardie(img,
                                                          lung_mask,
                                                          downsample=True)
        imgs.append([lce])
        masks.append(lung_mask)
    imgs = np.array(imgs)
    masks = np.array(masks)

    # Hardcoding blob set shapes
    blobs2 = blobs
    blobs = blobs.reshape((len(blobs), 3))

    frocs = []
    for tr_idx, te_idx in folds:
        print "Fold {} ...".format(fold)
        X_train, Y_train, X_test, Y_test = neural.create_train_test_sets(
            rois[tr_idx],
            augmented_blobs[tr_idx],
            blobs[tr_idx],
            rois[te_idx],
            augmented_blobs[te_idx],
            blobs[te_idx],
            streams=streams,
            detector=True)

        network = neural.create_network(model,
                                        X_train.shape,
                                        fold,
                                        streams,
                                        detector=False)
        if network_init is not None:
            network.network.load_weights('data/{}_fold_{}_weights.h5'.format(
                network_init, fold))

        # save network
        name = 'data/{}_fold_{}'.format(model, fold)
        history = network.fit(X_train,
                              Y_train,
                              X_test,
                              Y_test,
                              streams=(streams != 'none'),
                              cropped_shape=(roi_size, roi_size),
                              checkpoint_prefix=name,
                              checkpoint_interval=2,
                              loss='mse')
        network.save(name)

        # open network on detector mode
        network.network.summary()
        detector_network = neural.create_network(model,
                                                 X_train.shape,
                                                 fold,
                                                 streams,
                                                 detector=True)
        detector_network.network.summary()
        copy_weights(network, detector_network)
        #network.network.load_weights('{}_weights.h5'.format(name))
        #network.load(name)

        # evaluate network on test
        blobs_te_pred, probs_te_pred = detect_with_network(detector_network,
                                                           imgs[te_idx],
                                                           masks[te_idx],
                                                           fold=fold)

        froc = eval.froc(blobs2[te_idx], blobs_te_pred, probs_te_pred)
        frocs.append(froc)
        fold += 1

    av_froc = eval.average_froc(frocs, fppi_range)
    return av_froc