Exemplo n.º 1
0
    def run_liver(istrain=False,
                  model_name='unet',
                  modelcheckpoint='cache/liver/model/unet.hdf5',
                  model_pretrain='cache/liver/model/weight_unet_gen_tf.h5',
                  batch_size=1,
                  nb_epoch=500,
                  is_datagen=False,
                  channel=5):

        reader = DataSetVolumn()
        seg = SegmentationBatch(model_name, modelcheckpoint)

        if istrain:
            current_dir = os.path.dirname(os.path.realpath(__file__))
            weights_path = os.path.expanduser(
                os.path.join(current_dir,
                             model_pretrain)) if model_pretrain else None
            seg.train(reader,
                      weights_path=weights_path,
                      batch_size=batch_size,
                      nb_epoch=nb_epoch,
                      is_datagen=is_datagen,
                      channel=channel)

        testdata = reader.load_testdata_channel(channel)

        metrics_testdata = []
        for imagefile, data in testdata.iteritems():
            X_test, y_test = data
            predicts = seg.predict(X_test, batch_size=batch_size)
            pprint(Metrics.all(y_test, predicts))
            metrics_testdata.append((imagefile, Metrics.all(y_test, predicts)))

        result = {
            'acc':
            sum([metrics['acc'] for imagefile, metrics in metrics_testdata]) /
            len(metrics_testdata),
            'dice':
            sum([metrics['dice'] for imagefile, metrics in metrics_testdata]) /
            len(metrics_testdata),
            'jacc':
            sum([metrics['jacc'] for imagefile, metrics in metrics_testdata]) /
            len(metrics_testdata),
            'sensitivity':
            sum([
                metrics['sensitivity']
                for imagefile, metrics in metrics_testdata
            ]) / len(metrics_testdata),
            'specificity':
            sum([
                metrics['specificity']
                for imagefile, metrics in metrics_testdata
            ]) / len(metrics_testdata)
        }
        print('the average metrics case by case')
        pprint(result)

        return (X_test, y_test, predicts)
Exemplo n.º 2
0
    def run_skin(istrain,
                 model_name,
                 modelcheckpoint,
                 model_pretrain=None,
                 batch_size=32,
                 nb_epoch=200,
                 is_datagen=False):
        print('-' * 30)
        print('Loading data...')
        print('-' * 30)
        X_train, X_test, y_train, y_test = SkinData.load_from_npy(
            images_npy='cache/skin/datasets/images_224_224_tf.npy',
            masks_npy='cache/skin/datasets/masks_224_224_tf.npy')

        seg = Segmentation(model_name, modelcheckpoint)

        if istrain:
            seg.train(X_train,
                      y_train, (X_test, y_test),
                      weights_path=Segmentation.absolute_path(model_pretrain),
                      batch_size=batch_size,
                      nb_epoch=nb_epoch,
                      is_datagen=is_datagen)

        predicts = seg.predict(X_test, batch_size=batch_size)
        pprint(Metrics.all(y_test, predicts))

        return (X_test, y_test, predicts)
Exemplo n.º 3
0
    def run_breast(
            istrain,
            model_name,
            modelcheckpoint,
            model_pretrain=None,
            batch_size=32,
            nb_epoch=500,
            is_datagen=False,
            images_npy='cache/breast/datasets/images_crop_224_224_tf.npy',
            masks_npy='cache/breast/datasets/masks_crop_224_224_tf.npy'):
        print('-' * 30)
        print('Loading data...')
        print('-' * 30)
        X_train, X_test, y_train, y_test = BreastData.load_from_npy(
            images_npy, masks_npy, test_size=.1)
        print(
            'Done Loading train with shape {0} and test with shape {1}'.format(
                X_train.shape, X_test.shape))

        # shape = y_train.shape
        # y_flat = np.reshape(y_train, shape[0]*shape[1]*shape[2]*shape[3]).astype('uint8')
        # maxvalue, minvalue = np.max(y_flat), np.min(y_flat)
        # cw = class_weight.compute_class_weight('balanced', np.unique(y_flat), y_flat)

        seg = Segmentation(model_name, modelcheckpoint)

        if istrain:
            seg.train(X_train,
                      y_train, (X_test, y_test),
                      weights_path=Segmentation.absolute_path(model_pretrain),
                      batch_size=batch_size,
                      nb_epoch=nb_epoch,
                      is_datagen=is_datagen)

        predicts = seg.predict(X_test, batch_size=batch_size)
        pprint(Metrics.all(y_test, predicts))

        return (X_test, y_test, predicts)
Exemplo n.º 4
0
    def run_liver(istrain,
                  model_name,
                  modelcheckpoint,
                  model_pretrain=None,
                  batch_size=1,
                  nb_epoch=500,
                  is_datagen=False,
                  isliver=True,
                  image_size=(512, 512)):

        reader = DataSetVolumn(
            image_size=image_size) if isliver else TumorVolumn(
                image_size=image_size)
        seg = Segmentation(model_name, modelcheckpoint)

        if istrain:
            X_train, y_train = reader.load_traindata()
            seg.train(X_train,
                      y_train,
                      reader.validation_data(),
                      weights_path=Segmentation.absolute_path(model_pretrain),
                      batch_size=batch_size,
                      nb_epoch=nb_epoch,
                      is_datagen=is_datagen)

        testdata = reader.load_testdata()

        metrics_testdata = []
        for imagefile, data in testdata.iteritems():
            X_test, y_test = data
            predicts = seg.predict(X_test, batch_size=batch_size)

            def save(liver):
                volumefile = 'volumn-' + imagefile.split('-')[1] + '.npy'
                maskfile = 'segmentation-' + imagefile.split('-')[1] + '.npy'
                predictfile = 'predict-' + imagefile.split('-')[1] + '.npy'
                np.save(
                    os.path.join('cache/{0}/result'.format(liver), volumefile),
                    X_test)
                np.save(
                    os.path.join('cache/{0}/result'.format(liver), maskfile),
                    y_test)
                np.save(
                    os.path.join('cache/{0}/result'.format(liver),
                                 predictfile), predicts)

            # save('liver') if isliver else save('tumor')

            def tumor2noscale(imagefile, predicts):
                if not isliver:
                    boxs, images, masks = reader.get_data(imagefile)
                    masks[masks < 2] = 0
                    masks /= 2
                    imagesize = (512, 512)
                    predicts_new = np.zeros(
                        (predicts.shape[0], imagesize[0], imagesize[1], 1),
                        dtype=predicts.dtype)
                    for i, predict in enumerate(predicts):
                        predicts_new[i, :, :,
                                     0] = cv.resize(predict[:, :, 0],
                                                    imagesize)
                    predicts_new = to_category(predicts_new)
                    predicts_new = back2noscale(boxs, predicts_new)
                    # viewSequence(masks)
                    # viewSequence(predicts_new)
                    y_test, predicts = masks, predicts_new

            tumor2noscale(imagefile, predicts)
            pprint(Metrics.all(y_test, predicts))
            metrics_testdata.append((imagefile, Metrics.all(y_test, predicts)))

        result = {
            'acc':
            sum([metrics['acc'] for imagefile, metrics in metrics_testdata]) /
            len(metrics_testdata),
            'dice':
            sum([metrics['dice'] for imagefile, metrics in metrics_testdata]) /
            len(metrics_testdata),
            'jacc':
            sum([metrics['jacc'] for imagefile, metrics in metrics_testdata]) /
            len(metrics_testdata),
            'sensitivity':
            sum([
                metrics['sensitivity']
                for imagefile, metrics in metrics_testdata
            ]) / len(metrics_testdata),
            'specificity':
            sum([
                metrics['specificity']
                for imagefile, metrics in metrics_testdata
            ]) / len(metrics_testdata)
        }
        print('the average metrics case by case')
        pprint(result)

        return (X_test, y_test, predicts)