示例#1
0
def predict_all(model,
                data_dir,
                out_dir='results',
                backbone='resnet34',
                input_size=None,
                preprocessing_fcn=None):
    if preprocessing_fcn is None:
        preprocessing_fcn = get_preprocessing(backbone)

    if not os.path.isdir(out_dir):
        os.mkdir(out_dir)

    images = []
    for directory, _, files in os.walk(data_dir):
        for fn in files:
            if is_image(fn):
                images.append(os.path.join(directory, fn))

    for fn_full in images:
        name = os.path.splitext(os.path.basename(fn_full))[0]
        predict(model=model,
                image=fn_full,
                out_path=os.path.join(out_dir, name + '.png'),
                preprocessing_fcn=preprocessing_fcn,
                input_size=input_size)
示例#2
0
def mark_all(model, data_dir, out_dir='marked_predictions', backbone='resnet34',
             input_size=None, preprocessing_fcn=None):

    if preprocessing_fcn is None:
        preprocessing_fcn = get_preprocessing(backbone)

    if not os.path.isdir(out_dir):
        os.mkdir(out_dir)

    images = []
    for directory, _, files in os.walk(data_dir):
        for fn in files:
            if is_image(fn):
                images.append(os.path.join(directory, fn))

    s = 'filename,x1,y1,x2,y2,x3,y3,x4,y4,x5,y5,x6,y6,x7,y7,x8,y8,x9,y9,x10,y10\n'
    for fn_full in images:
        name = os.path.splitext(os.path.basename(fn_full))[0]
        mask = predict(model=model,
                       image=fn_full,
                       out_path=None,
                       preprocessing_fcn=preprocessing_fcn,
                       input_size=input_size)
        pts = pmask2points(np.squeeze(mask))
        s += (os.path.basename(fn_full) + ',' +
              ','.join('%s,%s' % (x, y) for x, y in pts) + '\n')
        image = resize(cv.imread(fn_full), dsize=input_size)
        cv.imwrite(os.path.join(out_dir, name + '.jpg'),
                   mark_image(image, pts))
        cv.imwrite(os.path.join(out_dir, name + '.png'),
                   (np.squeeze(mask).sum(axis=2)*255).astype('uint8'))
    with open(os.path.join(out_dir, 'points.csv'), 'w+') as csv_out:
        csv_out.write(s)
    def __init__(self, x, y, valid):
        assert(x.shape[0] == y.shape[0])
        self.x = x
        self.y = y
        self.valid = valid
        self.preprocessing_fn = get_preprocessing('resnet50')

        SZ = image_size

        self.augs = albu.Compose([
            # albu.OneOf([albu.RandomSizedCrop(min_max_height=(SZ//2, SZ), height=SZ, width=SZ, p=0.5),
            #       albu.PadIfNeeded(min_height=SZ, min_width=SZ, p=0.5)], p=1),
            # albu.VerticalFlip(p=0.5),
            # albu.HorizontalFlip(p=0.5),
            # albu.RandomRotate90(p=0.5),
            albu.Rotate(p=0.5, limit=10),
            albu.OneOf([
                albu.ElasticTransform(p=0.5, alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03),
                albu.GridDistortion(p=0.5),
                albu.OpticalDistortion(p=1, distort_limit=2, shift_limit=0.5)
                ], p=0.8),
            # albu.CLAHE(p=0.8),
            # albu.RandomContrast(p=0.8),
            albu.RandomBrightness(p=0.8),
            albu.RandomGamma(p=0.8)])

        print("created Datagen: x", x.shape, "y", y.shape)
示例#4
0
def test(model, out_dir='results', backbone='resnet34'):
    if not os.path.isdir(out_dir):
        os.mkdir(out_dir)

    import skimage.io as io

    def loadimg(image_path,
                target_size=(256, 256),
                flag_multi_class=False,
                as_gray=False):
        img = io.imread(image_path, as_gray=as_gray)
        img = img / 255
        img = trans.resize(img, target_size)
        # if not flag_multi_class:
        #     img = np.expand_dims(img, -1)
        # img = np.expand_dims(img, 0)
        return img

    imgs = [
        os.path.join(data_dir, 'val', 'images', fn)
        for fn in os.listdir(os.path.join(data_dir, 'val', 'images'))
    ]

    preprocessing_fn = get_preprocessing(backbone)
    for fn in imgs:
        x = np.expand_dims(loadimg(fn), 0)
        x = preprocessing_fn(x)
        y = model.predict(x)
        s = y - y.min()
        s = s / s.max() * 255

        name = os.path.splitext(os.path.basename(fn))[0]
        io.imsave(os.path.join(out_dir, name + '.png'),
                  s.squeeze().astype('uint8'))
def generate_mask(input_img, model = model):
    '''
    input: PIL jpg image of size [224,224,3]
    output: np.array of size [224,224,3]
    '''
    ## process x
    x= np.array(input_img).reshape((1,224,224,3))
    preprocessing_fn = get_preprocessing(BACKBONE)
    x = preprocessing_fn(x)
    ## predict y
    ypred = model.predict(x).reshape((224,224,1))
    ypred = np.dstack((ypred, ypred, ypred))
    return ypred
示例#6
0
def preprocess(image, input_size=None, backbone='resnet34', preprocessing_fcn=None):

    if isinstance(image, str):
        image = imread(image)

    if preprocessing_fcn is None:
        preprocessing_fcn = get_preprocessing(backbone)

    if input_size is not None:
        image = resize(image, input_size)
    image = image / 255
    image = preprocessing_fcn(image)
    return np.expand_dims(image, 0)
示例#7
0
文件: iotools.py 项目: mathandy/andnn
def get_data_generators(data_dir,
                        backbone='resnet34',
                        batch_size=2,
                        input_size=(256, 256),
                        keras_augmentations=None,
                        preprocessing_function_x=None,
                        preprocessing_function_y=None,
                        preload=False,
                        cached_preloading=False,
                        presplit=True,
                        random_crops=False):
    if keras_augmentations is None:
        keras_augmentations = dict()
    if preprocessing_function_x is None:
        preprocessing_function_x = get_preprocessing(backbone)

    training_generator = \
        data_generator(data_dir=os.path.join(data_dir, 'train'),
                       batch_size=batch_size,
                       input_size=input_size,
                       keras_augmentations=keras_augmentations,
                       preprocessing_function_x=preprocessing_function_x,
                       preprocessing_function_y=preprocessing_function_y,
                       preload=preload, cached_preloading=cached_preloading,
                       mode=None if presplit else 'training',
                       random_crops=random_crops)

    validation_generator = \
        data_generator(data_dir=os.path.join(data_dir, 'val'),
                       batch_size=batch_size,
                       input_size=input_size,
                       keras_augmentations=None,
                       preprocessing_function_x=preprocessing_function_x,
                       preprocessing_function_y=preprocessing_function_y,
                       preload=preload, cached_preloading=cached_preloading,
                       mode=None if presplit else 'validation',
                       random_crops=False)

    def count_images(directory):
        images = [
            x for x in os.listdir(os.path.join(directory)) if is_jpeg_or_png(x)
        ]
        return len(images)

    training_steps_per_epoch = count_images(
        os.path.join(data_dir, 'train', 'images')) / batch_size
    validation_steps_per_epoch = count_images(
        os.path.join(data_dir, 'val', 'images')) / batch_size
    return (training_generator, validation_generator, training_steps_per_epoch,
            validation_steps_per_epoch)
示例#8
0
    def preprocess_data(do_prediction, inputnpyfname, targetnpyfname,
                        expandChannel, backbone):
        # Preprocess the data (beyond what I already did before)

        print('-' * 30)
        print('Loading and preprocessing data...')
        print('-' * 30)

        # Load, normalize, and cast the data
        imgs_input = (np.load(inputnpyfname).astype('float32') / (2**16 - 1) *
                      (2**8 - 1)).astype('uint8')
        print('Input images information:')
        print(imgs_input.shape)
        print(imgs_input.dtype)
        hist, bins = np.histogram(imgs_input)
        print(hist)
        print(bins)
        if not do_prediction:
            imgs_mask_train = np.load(targetnpyfname).astype('uint8')
            print('Input masks information:')
            print(imgs_mask_train.shape)
            print(imgs_mask_train.dtype)
            hist, bins = np.histogram(imgs_mask_train)
            print(hist)
            print(bins)

        # Make the grayscale images RGB since that's what the model expects apparently
        if expandChannel:
            imgs_input = np.stack((imgs_input, ) * 3, -1)
        else:
            imgs_input = np.expand_dims(imgs_input, 3)
        print('New shape of input images:')
        print(imgs_input.shape)
        if not do_prediction:
            imgs_mask_train = np.expand_dims(imgs_mask_train, 3)
            print('New shape of masks:')
            print(imgs_mask_train.shape)

        # Preprocess as per https://github.com/qubvel/segmentation_models
        preprocessing_fn = get_preprocessing(backbone)
        imgs_input = preprocessing_fn(imgs_input)

        # Return appropriate variables
        if not do_prediction:
            return (imgs_input, imgs_mask_train)
        else:
            return (imgs_input)
示例#9
0
def train(x_train: NpArray, x_valid: NpArray, y_train: NpArray, y_valid: NpArray,
          fold: int = -1) -> None:
    preprocessing_fn = get_preprocessing('resnet34')
    x_train = preprocessing_fn(x_train)
    x_valid = preprocessing_fn(x_valid)

    model = Unet(backbone_name='resnet34', encoder_weights='imagenet')
    model.compile('Adam', 'binary_crossentropy', metrics=[my_iou_metric])
    model.summary()

    model_name = make_output_path("models/fold%d.hdf5" % fold)
    model_checkpoint = ModelCheckpoint(model_name, monitor='val_my_iou_metric',
                                       mode='max', save_best_only=True, verbose=1)
    reduce_lr = ReduceLROnPlateau(monitor='val_my_iou_metric', mode='max',
                                  factor=0.5, patience=5, min_lr=3e-6, verbose=1)

    model.fit(x_train, y_train, validation_data=[x_valid, y_valid], epochs=EPOCHS,
              batch_size=BATCH_SIZE, callbacks=[model_checkpoint, reduce_lr],
              verbose=VERBOSE)
示例#10
0
def get_datagen(backbone='resnet34'):

    preprocessing_fn = get_preprocessing(backbone)
    # x = preprocessing_fn(x)

    myGene = trainGenerator(2,
                            data_dir + '/train',
                            'images',
                            'segmentations',
                            data_gen_args,
                            save_to_dir=None,
                            preprocessing_function_x=preprocessing_fn)

    val_gen = trainGenerator(2,
                             data_dir + '/val',
                             'images',
                             'segmentations',
                             dict(),
                             save_to_dir=None,
                             preprocessing_function_x=preprocessing_fn)
    return myGene, val_gen
示例#11
0
def predict_on_other_datasets(model_path,
                              backbone,
                              decoder_type,
                              batch_norm_type,
                              thr=0.5):
    model = get_model(backbone, decoder_type, batch_norm_type)
    model.load_weights(model_path)

    for dataset in ['chexpert', 'china_set', 'jsrt', 'montgomery_set']:
        test_images = []
        files = glob.glob(OUTPUT_PATH +
                          'dataset_parts/{}/*.png'.format(dataset))
        ITERS_TO_PRED = 1000

        for f in files:
            img = cv2.imread(f, 0)
            img = cv2.resize(img, (SHAPE_SIZE, SHAPE_SIZE),
                             interpolation=cv2.INTER_LINEAR)
            test_images.append(np.stack((img, img, img), axis=2))

        cache_path = CACHE_PATH + 'preds_cache_{}_all_{}.pkl'.format(
            dataset, ITERS_TO_PRED)
        if not os.path.isfile(cache_path) or 1:
            test_images1 = np.array(test_images, dtype=np.float32)
            preprocess_input = get_preprocessing(backbone)
            test_images1 = preprocess_input(test_images1)
            test_preds_all = []
            for i in range(ITERS_TO_PRED):
                print('Predict: {}'.format(i))
                test_preds = model.predict(test_images1)
                test_preds_all.append(test_preds.copy())
            test_preds_all = np.array(test_preds_all, dtype=np.float32)
            # save_in_file_fast(test_preds, cache_path)
            np.save(cache_path[:-4] + '.npy', test_preds_all)
            save_in_file_fast((files, test_images), cache_path)
        else:
            files, test_images = load_from_file_fast(cache_path)
            test_preds_all = np.load(cache_path + '.npy')
    def __getitem__(self, idx):
        '''迭代数据'''
        images, masks = [], []

        for (image_path, mask_path) in zip(
                self.image_path_list[idx * self.batch_size:(idx + 1) *
                                     self.batch_size],
                self.mask_path_list[idx * self.batch_size:(idx + 1) *
                                    self.batch_size]):
            image = cv2.imread(image_path, 1)
            mask = cv2.imread(mask_path, 0)

            # 对图像和掩模进行长宽一致处理
            image = self._padding(image)
            mask = self._padding(mask)

            # 对应数据进行扩充整理
            augmentation = self.transformer(image=image, mask=mask)
            image = augmentation['image']
            mask = self._get_result_map(augmentation['mask'])

            images.append(image)
            masks.append(mask)

        images = np.array(images)
        masks = np.array(masks)

        # 根据不同的网络框架选择不同的数据预处理方式
        if self.backbone is not None:
            preprocess_input = get_preprocessing(self.backbone)
            images = preprocess_input(images)

        # 对作者设计的框架进行数据去均值处理
        else:
            images = pinput(images)

        return images, masks
示例#13
0
def network_setup():
    global preprocess, model, idx, train_batches, valid_batches
    # LOAD UNET WITH PRETRAINING FROM IMAGENET
    preprocess = get_preprocessing(
        'resnet34')  # for resnet, img = (img-110.0)/1.0
    model = Unet('resnet34',
                 input_shape=(img_resize_shape[0], img_resize_shape[1],
                              in_channels),
                 classes=out_channels,
                 activation='sigmoid')
    model.compile(optimizer='adam',
                  loss='binary_crossentropy',
                  metrics=[dice_coef])
    # TRAIN AND VALIDATE MODEL
    idx = int(0.8 * len(train2))
    print()
    train_batches = DataGenerator(train2.iloc[:idx],
                                  shuffle=True,
                                  preprocess=preprocess)
    valid_batches = DataGenerator(train2.iloc[idx:], preprocess=preprocess)
    history = model.fit_generator(train_batches,
                                  validation_data=valid_batches,
                                  epochs=epochs,
                                  verbose=1)
示例#14
0
import csv
import numpy as np
import os
import pydicom
from segmentation_models.backbones import get_preprocessing
import tensorflow as tf

from pneumothorax_segmentation.constants import image_size, folder_path
from pneumothorax_segmentation.data_augment import apply_random_data_augment
from pneumothorax_segmentation.params import tf_image_size

# Documentation for reading dicom files at https://pydicom.github.io/pydicom/stable/viewing_images.html#using-pydicom-with-matplotlib

preprocess_input = get_preprocessing("resnet34")


def get_all_images_list(folder):
    "Load all images filenames in folder. Returns a list of (filepath, filename)"
    all_images_in_folder = []

    for dirName, _, fileList in os.walk(folder_path +
                                        "/data/dicom-images-%s" % folder):
        for filename in fileList:
            if ".dcm" in filename.lower():
                all_images_in_folder.append(
                    (os.path.join(dirName,
                                  filename), filename.replace(".dcm", "")))

    return all_images_in_folder

示例#15
0
def preprocess_input(input, backbone_name):
    preprocessing_fn = get_preprocessing(backbone_name)
    return preprocessing_fn(input)
示例#16
0
文件: UNet.py 项目: UpCoder/C
    def __init__(self, backbone='resnet50', num_classes=1, encoder_weights='imagenet',
                 train_dir='./ck/', log_dir='./logs',
                 dataset_dir='/mnt/cephfs_hl/vc/liangdong.tony/datasets/chestCT/tfrecords/V1'):
        self.preprocess_input = get_preprocessing(backbone)
        self.input_image, _, _, self.input_mask = load_dataset(dataset_dir)
        self.input_image = self.preprocess_input(self.input_image)
        print('input_image is ', self.input_image, self.input_mask)
        with tf.device('/cpu:0'):
            self.base_model = Unet(backbone, num_classes=num_classes, encoder_weights=encoder_weights,
                                   activation='sigmoid', input_shape=dataset_config.input_shape)
        print('output are ', self.base_model.outputs)
        inputs_all = []

        for i in range(global_config.num_gpus):
            inputs_all.append(keras.layers.Input(tensor=get_slice(self.input_image, i, global_config.num_gpus)))

        num_outputs = 1
        num_gpus = global_config.num_gpus
        outputs_all = []
        for i in range(num_outputs):
            outputs_all.append([])
        for i in range(num_gpus):
            with tf.device('/gpu:%d' % i):
                print('gpu: {} used'.format(i))
                print('input layer is ', inputs_all[i])
                cur_pred = self.base_model(inputs_all[i])
                print('cur_pred is ', cur_pred)
                outputs_all[0].append(cur_pred)
        with tf.device('/cpu:0'):
            merged = []
            for outputs in outputs_all:
                merged.append(keras.layers.Concatenate(axis=0)(outputs))
            pred_tensor = merged[0]
            gt_tensor = keras.backend.cast(self.input_mask, "float32")
            tf.summary.scalar('distribution/seg_output/max',
                              tf.reduce_mean(tf.reduce_max(pred_tensor, axis=[1, 2, 3]), axis=0))
            tf.summary.scalar('distribution/seg_output/min',
                              tf.reduce_mean(tf.reduce_min(pred_tensor, axis=[1, 2, 3]), axis=0))
            tf.summary.scalar('distribution/seg_output/std',
                              tf.reduce_mean(tf.reduce_sum(
                                  (pred_tensor - tf.reduce_mean(pred_tensor, axis=[1, 2, 3], keepdims=True)) *
                                  (pred_tensor - tf.reduce_mean(pred_tensor, axis=[1, 2, 3], keepdims=True)),
                                  axis=[1, 2, 3]
                              )))

            loss_bce_dice_loss = keras.layers.Lambda(lambda xs: bce_dice_loss(xs[0], xs[1]))([gt_tensor, pred_tensor])
            tf.summary.scalar('loss/bce_dice', loss_bce_dice_loss)
            tf.summary.scalar('metrics/dice', dice_score(gt_tensor, pred_tensor))
            tf.summary.image('output/pred', tf.cast(merged[0] * 255., tf.uint8), max_outputs=3)
            summary_op = tf.summary.merge_all()
            self.parallel_model = keras.models.Model(inputs=inputs_all, outputs=merged)
            self.parallel_model.add_loss(loss_bce_dice_loss)
            cb_tensorboard = Tensorboard(summary_op, batch_interval=10, log_dir=log_dir)
            cb_checkpointer = CustomCheckpointer(train_dir, self.base_model, monitor='loss', mode='min',
                                                 save_best_only=False, verbose=False)
            # tf.keras.callbacks.TensorBoard
            self.cbs = [cb_tensorboard, cb_checkpointer]
            # self.parallel_model = keras.utils.multi_gpu_model(self.model, gpus=global_config.num_gpus)
            self.optimizer = keras.optimizers.Adam(lr=0.0001)
            self.parallel_model.compile(self.optimizer,
                                        # loss=bce_dice_loss,
                                        # metrics=[iou_score, dice_score]
                                        )

model_type = 'my_res_unet'
# model_type = 'unet'
# model_type = 'fpn'
# model_type = 'linknet'
# model_type = 'pspnet'


backbone = 'resnet34'


if model_type == 'my_res_unet':
    preprocessing = adjust_input
else:
    preprocessing = smb.get_preprocessing(backbone)


def create_model(double_size=True, slide_augmentation=True, trainable_encoder=True, n=32, dropout=0.2):
    if model_type == 'my_res_unet':
        model = my_res_unet(n=n, batch_norm=True, dropout=dropout, slide_augmentation=slide_augmentation)
    else:
        image_size = 256 if double_size else 128
        if model_type == 'unet':
            model = sm.Unet(backbone_name=backbone,
                            input_shape=(image_size, image_size, 3),
                            classes=1,
                            activation='sigmoid',
                            encoder_weights='imagenet',
                            encoder_freeze=not trainable_encoder,
                            encoder_features='default',
示例#18
0
def train_single_model(num_fold, train_files, valid_files, backbone,
                       decoder_type, batch_norm_type):
    from keras.callbacks import EarlyStopping, ModelCheckpoint, CSVLogger, ReduceLROnPlateau
    from keras.optimizers import Adam, SGD
    from keras.models import load_model, Model

    restore = 0
    patience = 100
    epochs = 1000
    optim_type = 'Adam'
    learning_rate = 0.0001
    dropout = 0.1
    cnn_type = '{}_{}_{}_{}_drop_{}_baesyan'.format(backbone, decoder_type,
                                                    batch_norm_type,
                                                    optim_type, dropout)
    print('Creating and compiling {}...'.format(cnn_type))

    train_images, train_masks = read_image_files(train_files)
    valid_images, valid_masks = read_image_files(valid_files)

    final_model_path = MODELS_PATH + '{}_fold_{}.h5'.format(cnn_type, num_fold)
    if os.path.isfile(final_model_path) and restore == 1:
        print('Model already exists for fold {}.'.format(final_model_path))
        return 0.0

    cache_model_path = MODELS_PATH + '{}_temp_fold_{}.h5'.format(
        cnn_type, num_fold)
    best_model_path = MODELS_PATH + '{}_fold_{}_'.format(
        cnn_type, num_fold) + '{epoch:02d}-{val_loss:.4f}-iou-{score:.4f}.h5'
    model = get_model(backbone, decoder_type, batch_norm_type, dropout=dropout)
    print(model.summary())
    if optim_type == 'SGD':
        optim = SGD(lr=learning_rate, decay=1e-6, momentum=0.9, nesterov=True)
    else:
        optim = Adam(lr=learning_rate)

    loss_to_use = bce_jaccard_loss
    # loss_to_use = jacard_focal_loss
    model.compile(optimizer=optim,
                  loss=loss_to_use,
                  metrics=[iou_score, dice_coef])

    preprocess_input = get_preprocessing(backbone)
    valid_images_1, valid_masks_1 = preprocess_validation(
        valid_images.copy(), valid_masks.copy(), preprocess_input)

    print('Fitting model...')
    batch_size = 8
    batch_size_valid = 1
    print('Batch size: {}'.format(batch_size))
    steps_per_epoch = len(train_files) // (batch_size)
    validation_steps = len(valid_files) // (batch_size_valid)

    print('Steps train: {}, Steps valid: {}'.format(steps_per_epoch,
                                                    validation_steps))

    callbacks = [
        # EarlyStopping(monitor='val_loss', patience=patience, verbose=0),
        ModelCheckpoint_IOU(best_model_path,
                            cache_model_path,
                            save_best_only=True,
                            verbose=1,
                            validation_data=(valid_images_1, valid_masks_1,
                                             preprocess_input),
                            patience=patience),
        # ModelCheckpoint(cache_model_path, monitor='val_loss', verbose=0),
        # ModelCheckpoint(best_model_path, monitor='val_loss', save_best_only=True, verbose=0),
        ReduceLROnPlateau(monitor='val_loss',
                          factor=0.95,
                          patience=5,
                          min_lr=1e-9,
                          min_delta=1e-8,
                          verbose=1,
                          mode='min'),
        CSVLogger(HISTORY_FOLDER_PATH +
                  'history_fold_{}_{}_lr_{}_optim_{}.csv'.format(
                      num_fold, cnn_type, learning_rate, optim_type),
                  append=True),
    ]

    gen_train = batch_generator_train(train_images,
                                      train_masks,
                                      batch_size_valid,
                                      preprocess_input,
                                      augment=True)
    gen_valid = batch_generator_train(valid_images,
                                      valid_masks,
                                      1,
                                      preprocess_input,
                                      augment=False)
    history = model.fit_generator(generator=gen_train,
                                  epochs=epochs,
                                  steps_per_epoch=steps_per_epoch,
                                  validation_data=gen_valid,
                                  validation_steps=validation_steps,
                                  verbose=2,
                                  max_queue_size=10,
                                  callbacks=callbacks)

    max_iou = max(history.history['score_iou'])
    best_epoch = np.array(history.history['score_iou']).argmax()

    print('Max IOU: {:.4f} Best epoch: {}'.format(max_iou, best_epoch))

    model.load_weights(cache_model_path)
    model.save(final_model_path)
    now = datetime.datetime.now()
    filename = HISTORY_FOLDER_PATH + 'history_{}_{}_{:.4f}_lr_{}_{}.csv'.format(
        cnn_type, num_fold, max_iou, learning_rate,
        now.strftime("%Y-%m-%d-%H-%M"))
    pd.DataFrame(history.history).to_csv(filename, index=False)
    # save_history_figure(history, filename[:-4] + '.png', columns=('jacard_coef', 'val_jacard_coef'))
    return max_iou, cache_model_path
示例#19
0
    #     evaluate(data_dir=args.input,
    #              model=m,
    #              backbone=args.backbone,
    #              batch_size=args.batch_size,
    #              input_size=args.input_size,
    #              n_gpus=args.n_gpus)

    elif args.command == 'predict':
        m = load_model(args.checkpoint_path)
        if os.path.isdir(args.input):
            predict_all(model=m,
                        data_dir=args.input,
                        out_dir=args.output,
                        input_size=args.input_size,
                        backbone=args.backbone,
                        preprocessing_fcn=get_preprocessing(args.backbone))
        else:
            predict(model=m,
                    image=args.input,
                    out_path=args.output,
                    backbone=args.backbone,
                    preprocessing_fcn=get_preprocessing(args.backbone),
                    input_size=args.input_size)
    elif args.command == 'mark':
        metrics = get_keypoint_metrics((args.batch_size,) + args.input_size[::-1] + (10,))
        custom_objects = dict((m.__name__, m) for m in metrics)
        m = load_model(args.checkpoint_path, custom_objects=custom_objects)
        if os.path.isdir(args.input):
            mark_all(model=m,
                     data_dir=args.input,
                     out_dir=args.output,
示例#20
0
from segmentation_models import Unet
from segmentation_models.backbones import get_preprocessing
from segmentation_models.losses import bce_jaccard_loss
from segmentation_models.metrics import iou_score
import os

from iotools import get_data_generators

DATA_DIR = os.path.expanduser(
    '~/Dropbox/hand-segmented-fish-new-old-split-hiRes')
BACKBONE = 'resnet34'
preprocess_input = get_preprocessing(BACKBONE)

# load your data
# x_train, y_train, x_val, y_val = load_data(...)
(training_generator, validation_generator,
            training_steps_per_epoch, validation_steps_per_epoch) = \
    get_data_generators(DATA_DIR,
                        backbone=BACKBONE,
                        batch_size=2,
                        # input_size=(512, 768),
                        input_size=(1536, 768),
                        # input_size=(3300, 1452),
                        keras_augmentations=None,
                        preprocessing_function_x=None,
                        preprocessing_function_y=None,
                        preload=False,
                        cached_preloading=False,
                        presplit=True,
                        random_crops=False)
示例#21
0
def get_score_on_test_data(model_path,
                           backbone,
                           decoder_type,
                           batch_norm_type,
                           thr=0.5):
    from keras.utils import plot_model
    test_images = []
    test_masks = []
    files = glob.glob(INPUT_PATH + 'masks_test/*.png')
    ITERS_TO_PRED = 1000

    for f in files:
        mask1 = cv2.imread(f)
        mask = np.stack((mask1[:, :, 0], mask1[:, :, 1:].max(axis=2)), axis=2)
        img = cv2.imread(
            INPUT_PATH + 'Chest X-ray-14/img/' + os.path.basename(f), 0)
        img = cv2.resize(img, (SHAPE_SIZE, SHAPE_SIZE),
                         interpolation=cv2.INTER_LINEAR)
        test_images.append(np.stack((img, img, img), axis=2))
        test_masks.append(mask / 255)

    cache_path = CACHE_PATH + 'preds_cache_v4_all.pkl'
    if not os.path.isfile(cache_path) or 0:
        model = get_model(backbone, decoder_type, batch_norm_type)
        model.load_weights(model_path)
        # plot_model(model, to_file='model.png')
        # exit()

        test_images1 = np.array(test_images, dtype=np.float32)
        preprocess_input = get_preprocessing(backbone)
        test_images1 = preprocess_input(test_images1)
        test_preds_all = []
        for i in range(ITERS_TO_PRED):
            print('Predict: {}'.format(i))
            test_preds = model.predict(test_images1)
            test_preds_all.append(test_preds.copy())
        test_preds_all = np.array(test_preds_all, dtype=np.float32)
        # save_in_file_fast(test_preds, cache_path)
        np.save(cache_path + '.npy', test_preds_all)
        save_in_file_fast((files, test_images, test_masks), cache_path)
    else:
        files, test_images, test_masks = load_from_file_fast(cache_path)
        test_preds_all = np.load(cache_path + '.npy')

    test_preds = test_preds_all.mean(axis=0)
    print(test_preds.shape)

    avg_iou = []
    avg_dice = []

    avg_iou_heart = []
    avg_dice_heart = []

    avg_iou_lungs = []
    avg_dice_lungs = []

    for i in range(test_preds.shape[0]):
        p = test_preds[i]
        print(p.shape)
        p[p > thr] = 255
        p[p <= thr] = 0
        img_mask = cv2.resize(p.astype(np.uint8),
                              (test_masks[i].shape[1], test_masks[i].shape[0]),
                              interpolation=cv2.INTER_LINEAR)
        # img_mask = remove_small_noise_from_mask(img_mask, 10)
        img_mask[img_mask <= 127] = 0
        img_mask[img_mask > 127] = 1

        # show_image(test_masks[i].astype(np.uint8))

        iou = get_simple_iou_score(img_mask.astype(np.uint8),
                                   test_masks[i].astype(np.uint8))
        dice = get_simple_dice_score(img_mask.astype(np.uint8),
                                     test_masks[i].astype(np.uint8))

        img_mask_exp = np.zeros((img_mask.shape[0], img_mask.shape[1], 3),
                                dtype=np.uint8)
        test_mask_exp = np.zeros((img_mask.shape[0], img_mask.shape[1], 3),
                                 dtype=np.uint8)
        img_mask_exp[:, :, :2] = 255 * img_mask.astype(np.uint8)
        test_mask_exp[:, :, :2] = 255 * test_masks[i].astype(np.uint8)

        cv2.imwrite(PREDICTION_CACHE + os.path.basename(files[i]),
                    img_mask_exp)
        cv2.imwrite(
            PREDICTION_CACHE + os.path.basename(files[i])[:-4] + '_real.png',
            test_mask_exp)

        # print('Img: {} IOU: {:.4f} Dice: {:.4f}'.format(os.path.basename(files[i]), iou, dice))

        iou_heart = get_simple_iou_score(
            img_mask[:, :, :1].astype(np.uint8),
            test_masks[i][:, :, :1].astype(np.uint8))
        dice_heart = get_simple_dice_score(
            img_mask[:, :, :1].astype(np.uint8),
            test_masks[i][:, :, :1].astype(np.uint8))

        iou_lungs = get_simple_iou_score(
            img_mask[:, :, 1:].astype(np.uint8),
            test_masks[i][:, :, 1:].astype(np.uint8))
        dice_lungs = get_simple_dice_score(
            img_mask[:, :, 1:].astype(np.uint8),
            test_masks[i][:, :, 1:].astype(np.uint8))

        avg_iou.append(iou)
        avg_dice.append(dice)

        avg_iou_heart.append(iou_heart)
        avg_dice_heart.append(dice_heart)

        avg_iou_lungs.append(iou_lungs)
        avg_dice_lungs.append(dice_lungs)

    score_iou = np.array(avg_iou).mean()
    score_dice = np.array(avg_dice).mean()

    score_iou_heart = np.array(avg_iou_heart).mean()
    score_dice_heart = np.array(avg_dice_heart).mean()

    score_iou_lungs = np.array(avg_iou_lungs).mean()
    score_dice_lungs = np.array(avg_dice_lungs).mean()

    print("Average IOU score: {:.4f} Average dice score: {:.4f}".format(
        score_iou, score_dice))
    print("Average IOU heart: {:.4f} Average dice heart: {:.4f}".format(
        score_iou_heart, score_dice_heart))
    print("Average IOU lungs: {:.4f} Average dice lungs: {:.4f}".format(
        score_iou_lungs, score_dice_lungs))
    return score_iou_lungs, score_dice_lungs, score_iou_heart, score_dice_heart
    # if combinations == "TriVV":
    #     x_train,  x_val, x_test = x_train_2[:,:,:,:3],  x_val_2[:,:,:,:3], x_test_2[:,:,:,:3]
    #     y_train,  y_val, y_test = y_train_2, y_val_2, y_test_2
    # elif combinations == "TriVH":
    #     x_train, x_val, x_test =  x_train_2[:,:,:,3:],  x_val_2[:,:,:,3:], x_test_2[:,:,:,3:]
    #     y_train,  y_val, y_test = y_train_2, y_val_2, y_test_2
    # elif combinations == "TriGamma": #"TriVVaVH":
    #     x_train,  x_val, x_test =   x_train_2[:,:,:,:],  x_val_2[:,:,:,:], x_test_2[:,:,:,:]
    #     y_train,  y_val, y_test = y_train_2, y_val_2, y_test_2
    # N = x_train.shape[-1]
    # print(x_train.shape)
    # print(x_val.shape)
    backs = ['mobilenetv2']  # ['resnet34','mobilenetv2']
    for k_back in range(len(backs)):
        BACKBONE = backs[k_back]  #
        preprocess_input = get_preprocessing(
            BACKBONE)  ## define model and chose between following models:

        networks = [
            "shallow"
        ]  # ["Unet","Linknet","FPN", "shallow"]# ["Unet"]#,"PSPNet","Linknet","FPN"]#["PSPNet","Linknet","FPN"]#
        for k_mod in networks:  # range(len(networks)):
            #    size = size_t[k_mod]
            name_model = k_mod + "6"  # networks[k_mod]+"5"#+ "_despeck3_"# +"3"#
            name_model = k_mod + "_HRL"  #+ "_despeck3_"# +"3"#
            name_model = name_model + "_" + combinations + "_{}".format(
                int(time.time()))
            tensorboard = TensorBoard(log_dir='logs/{}'.format(name_model))

            if k_mod == "Unet":
                if N == 3:
                    model = Unet(BACKBONE,
示例#23
0
import pandas as pd
from generator import DataGenerator
from keras.callbacks import ModelCheckpoint, CSVLogger

path = '../output/model/'
train = pd.read_csv('../input/train2.csv')
train.fillna('', inplace=True)
train.reset_index(drop=True, inplace=True)

checkpoint = ModelCheckpoint(filepath=path + 'unet.h5',
                             monitor='val_dice_coef',
                             save_best_only=True)
csv_logger = CSVLogger('../output/training.log')

if __name__ == '__main__':
    preprocess = get_preprocessing('resnet34')
    model = Unet('resnet34',
                 input_shape=(128, 800, 3),
                 classes=4,
                 activation='sigmoid')
    model.compile(optimizer='adam',
                  loss='binary_crossentropy',
                  metrics=[dice_coef])

    idx = int(0.8 * len(train))
    train_batches = DataGenerator(train.iloc[:idx],
                                  shuffle=True,
                                  preprocess=preprocess)
    valid_batches = DataGenerator(train.iloc[idx:], preprocess=preprocess)

    history = model.fit_generator(train_batches,
示例#24
0

# COMPETITION METRIC
def dice_coef(y_true, y_pred, smooth=1):
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) +
                                           smooth)


from segmentation_models import Unet
from segmentation_models.backbones import get_preprocessing

# LOAD UNET WITH PRETRAINING FROM IMAGENET
preprocess = get_preprocessing('resnet34')  # for resnet, img = (img-110.0)/1.0
model = Unet('resnet34',
             input_shape=(128, 800, 3),
             classes=4,
             activation='sigmoid')
model.compile(optimizer='adam',
              loss='binary_crossentropy',
              metrics=[dice_coef])

# TRAIN AND VALIDATE MODEL
idx = int(0.8 * len(train2))
print()
train_batches = DataGenerator(train2.iloc[:idx],
                              shuffle=True,
                              preprocess=preprocess)
valid_batches = DataGenerator(train2.iloc[idx:], preprocess=preprocess)
        x_batch = preprocessing_fn(x_batch)
        y_batch = to_categorical(y_batch, num_classes=num_classes)
        y_batch = y_batch.astype('int64')

        yield (x_batch, y_batch)


# In[ ]:
from segmentation_models.backbones import get_preprocessing

batch_size = 1

backbone = 'resnet18'

preprocessing_fn = get_preprocessing(backbone)

train_gen = custom_generator(images_path=images,
                             labels_path=labels,
                             preprocessing_fn=preprocessing_fn,
                             aug_mode=aug_mode,
                             batch_size=batch_size)

# In[ ]:
# # Define model
from segmentation_models import Linknet

model = Linknet(backbone_name=backbone,
                input_shape=input_shape,
                classes=num_classes,
                activation='softmax')
#unet_filters = (512, 256, 128, 64, 32)
if "fpn_dropout" in hyperparams:
    fpn_dropout = float(hyperparams["fpn_dropout"])
    fpn_filters = int(hyperparams["fpn_filters"])
augmentation_list = None
val_split = 0.15
test_split = 0.15
loss = "unbalanced_weighted_bce_plus_dice_coef_loss"
early_stopping_metric = "val_loss"
early_stopping_mode = "min"
checkpoint_metric = "val_loss"
checkpoint_mode = "min"
input_size = (512, 512)
input_width = input_size[0]
input_height = input_size[1]
preprocess_input = get_preprocessing(model_backbone)

date_string = datetime.now().strftime("%m-%d-%y_%H:%M:%S")
epoch = datetime.utcfromtimestamp(0)
now = datetime.now()
filename = str(uid)
print("Filename: {}".format(filename))


def load_image(path):
    """Load grayscale image from path"""
    return cv2.resize(cv2.imread(path, 1), input_size)


def load_binary_image(path):
    """Load grayscale image from path"""