Пример #1
0
def init_and_train_model(train_df, train_path, val_df, val_path, model_type, BACKBONE, AUGMENTATIONS, batch_size, epoch_num):
  preprocess_input = sm.backbones.get_preprocessing(BACKBONE)
  
  if(model_type == "Linknet"):
    model = sm.Linknet(BACKBONE,input_shape = (256, 256, 3), encoder_weights='imagenet', encoder_freeze=True)
    train_gen = keras_generator_with_augs(train_df, batch_size, train_path, AUGMENTATIONS)
    val_gen = keras_generator(val_df, batch_size, val_path)
  elif(model_type == "Unet"):
    model = sm.Unet(BACKBONE,input_shape = (256, 256, 3), encoder_weights='imagenet', encoder_freeze=True)
    train_gen = keras_generator_with_augs(train_df, batch_size, train_path, AUGMENTATIONS)
    val_gen = keras_generator(val_df, batch_size, val_path)
  elif(model_type == "FPN"):
    model = sm.FPN(BACKBONE,input_shape = (256, 256, 3), encoder_weights='imagenet', encoder_freeze=True)
    train_gen = keras_generator_with_augs(train_df, batch_size, train_path, AUGMENTATIONS)
    val_gen = keras_generator(val_df, batch_size, val_path)
  elif(model_type == "PSPNet"):
    model = sm.PSPNet(BACKBONE,input_shape = (240, 240, 3), encoder_weights='imagenet', encoder_freeze=True)
    train_gen = keras_generator_with_augs(train_df, batch_size, train_path, AUGMENTATIONS, PSPNet=True)
    val_gen = keras_generator(val_df, batch_size, val_path, PSPNet = True)
    
  model.compile(
    'Adam',
    loss=sm.losses.dice_loss,
    metrics=[sm.metrics.dice_score],
  )
  
  best_w = keras.callbacks.ModelCheckpoint(model_type + '_' + BACKBONE + '_best.h5',
                                monitor='val_loss',
                                verbose=0,
                                save_best_only=True,
                                save_weights_only=True,
                                mode='auto',
                                period=1)

  last_w = keras.callbacks.ModelCheckpoint(model_type + '_' + BACKBONE + '_last.h5',
                                monitor='val_loss',
                                verbose=0,
                                save_best_only=False,
                                save_weights_only=True,
                                mode='auto',
                                period=1)


  callbacks = [best_w, last_w]
  
  history = model.fit_generator(train_gen,
              steps_per_epoch=50,
              epochs=epoch_num,
              verbose=1,
              callbacks=callbacks,
              validation_data=val_gen,
              validation_steps=50,
              class_weight=None,
              max_queue_size=1,
              workers=1,
              use_multiprocessing=False,
              shuffle=True,
              initial_epoch=0)
  return model, history
  
Пример #2
0
    def __init__(
            self,
            backbone="efficientnetb3",
            model_choice="FPN",
            num_classes=29,
            input_shape=(320, 320),
    ):

        self.input_shape = input_shape
        self.classes = [str(i) for i in range(num_classes)] + ["background"]
        self.backbone = backbone

        n_classes = len(self.classes)
        activation = "softmax"

        if model_choice == "FPN":
            self.model = sm.FPN(
                self.backbone,
                classes=n_classes,
                activation=activation,
                input_shape=(input_shape[0], input_shape[1], 3),
                encoder_weights="imagenet",
            )
        else:
            self.model = None
            print("{} is not used yet".format(model_choice))

        self.preprocessing = _build_keypoint_preprocessing(
            input_shape, backbone)
def inference(model_name, image_folder_path):
    # wrap our image inside the Dataset wrapper used for training,
    # TODO: remove this and add custom pipeline for preprocessing.
    trial_dataset = Dataset(
    image_folder_path, 
    image_folder_path, 
    classes=CLASSES, 
    augmentation=get_validation_augmentation(),
    preprocessing=get_preprocessing(preprocess_input),
    )


    print(model_name)
    if model_name=="unet":
        print("Loading Models. This might take some time...")
        modelUnet = sm.Unet(BACKBONE, classes=n_classes, activation=activation)
        model_c = config.STYLES["unet"]
        model_path = os.path.join(f"{config.MODEL_PATH}",f"{model_c}.h5")
        modelUnet.load_weights(model_path)
        print("Loaded Unet.")
        model = modelUnet
    elif model_name=="featurepyramidnetwork":
        modelFPN = sm.FPN(BACKBONE, classes=n_classes, activation=activation) 
        model_c = config.STYLES["featurepyramidnetwork"]
        model_path = os.path.join(f"{config.MODEL_PATH}",f"{model_c}.h5")
        modelFPN.load_weights(model_path)
        print("Loaded FPN.")
        model = modelFPN
    elif model_name=="linknet":
        modelLinknet = sm.Linknet(BACKBONE, classes=n_classes, activation=activation)
        model_c = config.STYLES["linknet"]
        model_path = os.path.join(f"{config.MODEL_PATH}",f"{model_c}.h5")
        modelLinknet.load_weights(model_path)
        print("Loaded Linknet.")
        model = modelLinknet
            
    # model.load_weights(model_path) 

    # trial folder must have only one image. hence the [0]
    image, gt_mask = trial_dataset[0]
    image = np.expand_dims(image, axis=0)
    pr_mask = model.predict(image).round()
    #print(pr_mask.shape)
    #print(pr_mask[0].shape)
    # make image back to normal
    image=denormalize(image.squeeze())
    gt_mask=gt_mask[..., 0].squeeze()
    pr_mask=pr_mask[..., 0].squeeze()
  
    # DEBUG: 
    # visualize(
    #     image=image,
    #     gt_mask=gt_mask,
    #     pr_mask=pr_mask,
    # )
    del model
    gc.collect()
    return pr_mask,gt_mask
Пример #4
0
def create_model():
    """
    Function used to create model based on parameters specified in settings.py
    """
    if model_type == 'myresunet':
        model = myresunet.create_model()
    elif model_type == 'unet':
        model = sm.Unet(backbone_name=backbone,
                        input_shape=(image_height, image_width, 3),
                        classes=num_classes,
                        activation='softmax',
                        encoder_weights='imagenet',
                        encoder_freeze=False,
                        encoder_features='default',
                        decoder_block_type='upsampling',
                        decoder_filters=(decoder_scaler * 256, decoder_scaler * 128, decoder_scaler * 64,
                                         decoder_scaler * 32, decoder_scaler * 16),
                        decoder_use_batchnorm=True)
    elif model_type == 'fpn':
        model = sm.FPN(backbone_name=backbone,
                       input_shape=(image_height, image_width, 3),
                       classes=num_classes,
                       activation='softmax',
                       encoder_weights='imagenet',
                       encoder_freeze=False,
                       encoder_features='default',
                       pyramid_block_filters=decoder_scaler * 256,
                       pyramid_use_batchnorm=True,
                       pyramid_aggregation='concat',
                       pyramid_dropout=None)
    elif model_type == 'linknet':
        model = sm.Linknet(backbone_name=backbone,
                           input_shape=(image_height, image_width, 3),
                           classes=num_classes,
                           activation='softmax',
                           encoder_weights='imagenet',
                           encoder_freeze=False,
                           encoder_features='default',
                           decoder_block_type='upsampling',
                           decoder_filters=(None, None, None, None, decoder_scaler * 16),
                           decoder_use_batchnorm=True)
    elif model_type == 'pspnet':
        model = sm.PSPNet(backbone_name=backbone,
                          input_shape=(image_height, image_width, 3),
                          classes=num_classes,
                          activation='softmax',
                          encoder_weights='imagenet',
                          encoder_freeze=False,
                          downsample_factor=8,
                          psp_conv_filters=decoder_scaler * 512,
                          psp_pooling_type='avg',
                          psp_use_batchnorm=True,
                          psp_dropout=None)
    else:
        print('Invalid segmentation model type')
        exit(0)
    return model
Пример #5
0
def fpn(backbone, pretrained_weights=None):
    model = sm.FPN(backbone, 
                   input_shape=(256, 256, 3), 
                   classes=1, 
                   activation='sigmoid', 
                   encoder_weights=pretrained_weights)
    
    model.compile(optimizer='adam', 
                  loss=sm.losses.bce_jaccard_loss, 
                  metrics=[sm.metrics.iou_score, sm.metrics.f1_score])
    return model
Пример #6
0
def network(CLASSES, BACKBONE, arch):
    # define network parameters
    n_classes = 1 if len(CLASSES) == 1 else (
        len(CLASSES) + 1)  # case for binary and multiclass segmentation
    activation = 'sigmoid' if n_classes == 1 else 'softmax'

    #create model
    if arch == 'FPN':
        model = sm.FPN(BACKBONE, classes=n_classes, activation=activation)
    if arch == 'Unet':
        model = sm.Unet(BACKBONE, classes=n_classes, activation=activation)
    return model, n_classes
def create_model(border=False, trainable_encoder=False):
    if model_type == 'unet':
        model = sm.Unet(backbone_name=backbone,
                        input_shape=(image_size, image_size, 3),
                        classes=2 if border else 1,
                        activation='sigmoid',
                        encoder_weights='imagenet',
                        encoder_freeze=not trainable_encoder,
                        encoder_features='default',
                        decoder_block_type='upsampling',
                        decoder_filters=(256, 128, 64, 32, 16),
                        decoder_use_batchnorm=True)
    elif model_type == 'fpn':
        model = sm.FPN(backbone_name=backbone,
                       input_shape=(image_size, image_size, 3),
                       classes=2 if border else 1,
                       activation='sigmoid',
                       encoder_weights='imagenet',
                       encoder_freeze=not trainable_encoder,
                       encoder_features='default',
                       pyramid_block_filters=256,
                       pyramid_use_batchnorm=True,
                       pyramid_aggregation='concat',
                       pyramid_dropout=None)
    elif model_type == 'linknet':
        model = sm.Linknet(backbone_name=backbone,
                           input_shape=(image_size, image_size, 3),
                           classes=2 if border else 1,
                           activation='sigmoid',
                           encoder_weights='imagenet',
                           encoder_freeze=not trainable_encoder,
                           encoder_features='default',
                           decoder_block_type='upsampling',
                           decoder_filters=(None, None, None, None, 16),
                           decoder_use_batchnorm=True)
    elif model_type == 'pspnet':
        model = sm.PSPNet(backbone_name=backbone,
                          input_shape=(image_size, image_size, 3),
                          classes=2 if border else 1,
                          activation='sigmoid',
                          encoder_weights='imagenet',
                          encoder_freeze=not trainable_encoder,
                          downsample_factor=8,
                          psp_conv_filters=512,
                          psp_pooling_type='avg',
                          psp_use_batchnorm=True,
                          psp_dropout=None)
    else:
        print('Invalid segmentation model type')
        exit(0)
    return model
Пример #8
0
def build_model():
    images = tf.keras.Input(shape=[None, None, 3], name="image", dtype=tf.float32)
    model = sm.FPN(
        backbone_name="mobilenetv2",
        input_shape=(None, None, 3),
        classes=7,
        activation="sigmoid",
        weights=None,
        encoder_weights="imagenet",
        encoder_features="default",
        pyramid_block_filters=256,
        pyramid_use_batchnorm=True,
        pyramid_aggregation="concat",
        pyramid_dropout=None,
    )(images)

    return tf.keras.Model(inputs={"image": images}, outputs=model)
def get_backboned_model(model, backbone, freeze=True):

    if model == 'Unet':
        base_model = sm.Unet(backbone_name=backbone,
                             encoder_weights='imagenet',
                             classes=1,
                             activation='sigmoid',
                             freeze_encoder=freeze)
    elif model == 'FPN':
        base_model = sm.FPN(backbone_name=backbone,
                            encoder_weights='imagenet',
                            classes=1,
                            activation='sigmoid',
                            freeze_encoder=freeze)
    elif model == 'Linknet':
        base_model = sm.Linknet(backbone_name=backbone,
                                encoder_weights='imagenet',
                                classes=1,
                                activation='sigmoid',
                                freeze_encoder=freeze)
    elif model == 'PSPNet':
        base_model = sm.PSPNet(backbone_name=backbone,
                               encoder_weights='imagenet',
                               classes=1,
                               activation='sigmoid',
                               freeze_encoder=freeze)
    else:
        print('Model not identified! Unet is selected')
        base_model = sm.Unet(backbone_name=backbone,
                             encoder_weights='imagenet',
                             classes=1,
                             activation='sigmoid',
                             freeze_encoder=freeze)

    inp = Input(shape=(96, 96, 1))
    l1 = Conv2D(3, (1, 1))(inp)  # map N channels data to 3 channels
    out = base_model(l1)

    model = Model(inp, out, name=base_model.name)

    # print(model.summary())
    return model
Пример #10
0
    def _build(self):
        if self.type == 'Unet':
            model = sm.Unet(backbone_name=self.backbone, 
                            classes=self.n_class,
                            activation=self.activate,
                            encoder_weights=self.encoder_weights)

        elif self.type == 'Linknet':
            model = sm.Linknet(backbone_name=self.backbone, 
                               classes=self.n_class, 
                               activation=self.activate, 
                               encoder_weights=self.encoder_weights)

        elif self.type == 'FPN':
            model = sm.FPN(backbone_name=self.backbone, 
                           classes=self.n_class, 
                           activation=self.activate, 
                           encoder_weights=self.encoder_weights)
        
        else:
            raise ValueError('Model type {} not support now.'.format(self.type))

        return model
Пример #11
0
 def __init__(self, **kwargs):
     import segmentation_models as sm
     sm.set_framework('tf.keras')
     super().__init__(name=kwargs['name'])
     del kwargs['name']
     self.unet = sm.FPN(**kwargs)
Пример #12
0
import gc

sub_df = pd.read_csv('../sample_submission.csv')
encoded_pixels = []
best_threshold = 0.665
best_size = 14000
sub_df['ImageId'] = sub_df['Image_Label'].apply(lambda x: x.split('_')[0])
test_imgs = pd.DataFrame(sub_df['ImageId'].unique(), columns=['ImageId'])
models = {'efficientnetb4_Unt': './efficientnetb4_Unet.h5', 'efficientnetb50FPN': 'efficientnetb5_FPN_0_Fold.h5',
          'efficientnetb51FPN': 'efficientnetb5_FPN_1_Fold.h5', 'efficientnetb52FPN': 'efficientnetb5_FPN_2_Fold.h5',
          'efficientnetb53FPN': 'efficientnetb5_FPN_3_Fold.h5', 'efficientnetb54FPN': 'efficientnetb5_FPN_4_Fold.h5'
    , 'efficientnetb5_Unt': './efficientnetb5_Unet.h5', 'densenet169_Unt': './densenet169_Unet.h5'}
predict_total = np.zeros((test_imgs.shape[0], 320, 480, 4), dtype=np.float16)
for k, model in enumerate(models):
    if 'FPN' in model:
        model_ = sm.FPN(model[:-4], classes=4, input_shape=(320, 480, 3), activation='sigmoid')
    else:
        model_ = sm.Unet(model[:-4], classes=4, input_shape=(320, 480, 3), activation='sigmoid')
    weights = models.get(model)
    model_.load_weights(models.get(model))
    # model_ = tta_segmentation(model_, h_flip=True, h_shift=(-10, 10), merge='mean')
    for i in range(0, test_imgs.shape[0], TEST_BATCH_SIZE):
        batch_idx = list(
            range(i, min(test_imgs.shape[0], i + TEST_BATCH_SIZE))
        )
        test_generator = DataGenerator(
            batch_idx,
            df=test_imgs,
            shuffle=False,
            mode='predict',
            dim=(350, 525),
def create_model(double_size=True, slide_augmentation=True, trainable_encoder=True, n=32, dropout=0.2):
    if model_type == 'my_res_unet':
        model = my_res_unet(n=n, batch_norm=True, dropout=dropout, slide_augmentation=slide_augmentation)
    else:
        image_size = 256 if double_size else 128
        if model_type == 'unet':
            model = sm.Unet(backbone_name=backbone,
                            input_shape=(image_size, image_size, 3),
                            classes=1,
                            activation='sigmoid',
                            encoder_weights='imagenet',
                            encoder_freeze=not trainable_encoder,
                            encoder_features='default',
                            decoder_block_type='upsampling',
                            decoder_filters=(16*n, 8*n, 4*n, 2*n, n),
                            decoder_use_batchnorm=True)
        elif model_type == 'fpn':
            model = sm.FPN(backbone_name=backbone,
                           input_shape=(image_size, image_size, 3),
                           classes=1,
                           activation='sigmoid',
                           encoder_weights='imagenet',
                           encoder_freeze=not trainable_encoder,
                           encoder_features='default',
                           pyramid_block_filters=256,
                           pyramid_use_batchnorm=True,
                           pyramid_dropout=None,
                           final_interpolation='bilinear')
        elif model_type == 'linknet':
            model = sm.Linknet(backbone_name=backbone,
                               input_shape=(image_size, image_size, 3),
                               classes=1,
                               activation='sigmoid',
                               encoder_weights='imagenet',
                               encoder_freeze=not trainable_encoder,
                               encoder_features='default',
                               decoder_block_type='upsampling',
                               decoder_filters=(None, None, None, None, 16),
                               decoder_use_batchnorm=True)
        elif model_type == 'pspnet':
            image_size = 240 if double_size else 120
            model = sm.PSPNet(backbone_name=backbone,
                              input_shape=(image_size, image_size, 3),
                              classes=1,
                              activation='sigmoid',
                              encoder_weights='imagenet',
                              encoder_freeze=not trainable_encoder,
                              downsample_factor=8,
                              psp_conv_filters=512,
                              psp_pooling_type='avg',
                              psp_use_batchnorm=True,
                              psp_dropout=None,
                              final_interpolation='bilinear')
        else:
            print('Invalid segmentation model type')
            exit(0)

        if not slide_augmentation:
            x = keras.layers.Input(shape=(101, 101, 1), name='input')
            if model_type == 'pspnet':
                y = keras.layers.ZeroPadding2D(((9, 10), (9, 10)), name='zero_pad_input')(x)
            else:
                y = keras.layers.ZeroPadding2D(((13, 14), (13, 14)), name='zero_pad_input')(x)
            y = keras.layers.Cropping2D()(y)
        else:
            if model_type == 'pspnet':
                x = keras.layers.Input(shape=(120, 120, 1), name='input')
            else:
                x = keras.layers.Input(shape=(128, 128, 1), name='input')
            y = x
        if double_size:
            y = keras.layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(y)
        y = keras.layers.concatenate([y, y, y], name='channel_x3')
        y = model(y)
        if double_size:
            y = keras.layers.AvgPool2D(pool_size=(2, 2))(y)
        model = keras.models.Model(x, y)

    return model
Пример #14
0
def train(weights_paths, model_name="unet", batch_size=16, loss_name="bce"):
    BATCH_SIZE = batch_size

    # for reference about the BUFFER_SIZE in shuffle:
    # https://stackoverflow.com/questions/46444018/meaning-of-buffer-size-in-dataset-map-dataset-prefetch-and-dataset-shuffle
    BUFFER_SIZE = 1000

    dataset = {"train": train_dataset, "val": val_dataset}

    # -- Train Dataset --#
    dataset['train'] = dataset['train'].map(
        load_image_train, num_parallel_calls=tf.data.experimental.AUTOTUNE)
    dataset['train'] = dataset['train'].shuffle(buffer_size=BUFFER_SIZE,
                                                seed=SEED)
    dataset['train'] = dataset['train'].repeat()
    dataset['train'] = dataset['train'].batch(BATCH_SIZE)
    dataset['train'] = dataset['train'].prefetch(buffer_size=AUTOTUNE)

    #-- Validation Dataset --#
    dataset['val'] = dataset['val'].map(load_image_test)
    dataset['val'] = dataset['val'].repeat()
    dataset['val'] = dataset['val'].batch(BATCH_SIZE)
    dataset['val'] = dataset['val'].prefetch(buffer_size=AUTOTUNE)

    print(dataset['train'])
    print(dataset['val'])

    if model_name == "unet":
        model = sm.Unet('efficientnetb4',
                        input_shape=(None, None, 3),
                        classes=N_CLASSES,
                        activation='sigmoid',
                        encoder_weights=None,
                        weights=weights_paths)
    if model_name == "fpn":
        model = sm.FPN('efficientnetb4',
                       input_shape=(None, None, 3),
                       classes=N_CLASSES,
                       activation='sigmoid',
                       encoder_weights=None)
    if model_name == "psp":
        model = sm.PSPNet('efficientnetb4',
                          input_shape=(IMG_SIZE, IMG_SIZE, 3),
                          classes=N_CLASSES,
                          activation='sigmoid',
                          encoder_weights=None)

    optimizer = tf.keras.optimizers.Adam(learning_rate=0.01)  # 0.001

    if loss_name == "bce":
        loss = tf.keras.losses.BinaryCrossentropy()
    elif loss_name == "bce_jaccard":
        loss = sm.losses.bce_jaccard_loss
    elif loss_name == "bce_jaccard_focal":
        loss = sm.losses.binary_focal_jaccard_loss
    elif loss_name == "binary_focal_dice":
        loss = sm.losses.binary_focal_dice_loss

    model.compile(optimizer=optimizer,
                  loss=loss,
                  metrics=['accuracy', sm.metrics.iou_score, dice_coe])

    EPOCHS = 50

    STEPS_PER_EPOCH = TRAINSET_SIZE // BATCH_SIZE
    VALIDATION_STEPS = VALSET_SIZE // BATCH_SIZE

    callbacks = [
        tf.keras.callbacks.ModelCheckpoint(
            'results/weights/' + str(model_name) + '_' + str(loss_name) +
            '.h5',
            monitor='val_dice_coe',
            mode='max',
            verbose=1,
            save_best_only=True,
            save_weights_only=False),
        tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss',
                                             factor=0.1,
                                             patience=8,
                                             min_lr=0.00001)
    ]

    results = model.fit(dataset['train'],
                        epochs=EPOCHS,
                        steps_per_epoch=STEPS_PER_EPOCH,
                        validation_steps=VALIDATION_STEPS,
                        callbacks=callbacks,
                        validation_data=dataset['val'])

    plt.figure(figsize=(8, 8))
    plt.title("Learning curve")
    plt.plot(results.history["loss"], label="loss")
    plt.plot(results.history["val_loss"], label="val_loss")
    plt.plot(np.argmin(results.history["val_loss"]),
             np.min(results.history["val_loss"]),
             marker="x",
             color="r",
             label="best model")
    plt.xlabel("Epochs")
    plt.ylabel("log_loss")
    plt.legend()
    plt.savefig('./results/plots/train_loss_' + str(model_name) + '_' +
                str(loss_name) + '.png')

    plt.figure(figsize=(8, 8))
    plt.title("Learning curve")
    plt.plot(results.history["dice_coe"], label="dice_coe")
    plt.plot(results.history["val_dice_coe"], label="val_dice_coe")
    plt.plot(np.argmax(results.history["val_dice_coe"]),
             np.max(results.history["val_dice_coe"]),
             marker="x",
             color="r",
             label="best model")
    plt.xlabel("Epochs")
    plt.ylabel("Dice Coeff")
    plt.legend()
    plt.savefig('./Results/plots/train_dice_' + str(model_name) + '_' +
                str(loss_name) + '.png')

    plt.figure(figsize=(8, 8))
    plt.title("Learning curve")
    plt.plot(results.history["iou_score"], label="iou_score")
    plt.plot(results.history["val_iou_score"], label="val_iou_score")
    plt.plot(np.argmax(results.history["val_iou_score"]),
             np.max(results.history["val_iou_score"]),
             marker="x",
             color="r",
             label="best model")
    plt.xlabel("Epochs")
    plt.ylabel("IOU")
    plt.legend()
    plt.savefig('./Results/plots/train_IOU_' + str(model_name) + '_' +
                str(loss_name) + '.png')

    plt.figure(figsize=(8, 8))
    plt.title("Learning curve")
    plt.plot(results.history["accuracy"], label="accuracy")
    plt.plot(results.history["val_accuracy"], label="val_accuracy")
    plt.plot(np.argmax(results.history["val_accuracy"]),
             np.max(results.history["val_accuracy"]),
             marker="x",
             color="r",
             label="best model")
    plt.xlabel("Epochs")
    plt.ylabel("accuracy")
    plt.legend()
    plt.savefig('./Results/plots/train_accuracy_' + str(model_name) + '_' +
                str(loss_name) + '.png')
Пример #15
0
target_index = np.asarray(target_index)
X_tst = X_tst[target_index]
Y_tst = Y_tst[target_index]
print("there are {} images with target mask in testset".format(len(Y_tst)))

# model construction (keras + sm)
if model_id == 0:
    model = sm.Unet(classes=1,
                    activation='sigmoid',
                    encoder_weights='imagenet')
elif model_id == 1:
    model = sm.Linknet(classes=1,
                       activation='sigmoid',
                       encoder_weights='imagenet')
elif model_id == 2:
    model = sm.FPN(classes=1, activation='sigmoid', encoder_weights='imagenet')
elif model_id == 3:
    model = sm.PSPNet(classes=1,
                      activation='sigmoid',
                      encoder_weights='imagenet')  # input size must be 384x384

data_gen_args = dict(rotation_range=360,
                     width_shift_range=0.15,
                     height_shift_range=0.15,
                     zoom_range=0.15,
                     brightness_range=[0.7, 1.3],
                     horizontal_flip=True,
                     vertical_flip=False,
                     fill_mode='nearest')

image_generator = ImageDataGenerator(**data_gen_args,
 def train(self,
           model_name='unet',
           backbone='resnet50',
           fine_tune=False,
           model_path=None,
           opt='adam',
           lr=0.001,
           shape=(256, 256)):
     os.makedirs('saved_models/segmentation', exist_ok=True)
     if fine_tune: lr = lr / 10
     opt_dict = {'adam': Adam(lr), 'sgd': SGD(lr), 'adadelta': Adadelta(lr)}
     if fine_tune and model_path:
         new_name = model_path.strip('.h5') + '_fine-tune_{}.h5'.format(opt)
         model = keras.models.load_model(model_path, compile=False)
         model.compile(
             optimizer=opt_dict[opt.lower()],
             loss=sm.losses.bce_jaccard_loss,
             metrics=['acc', sm.metrics.iou_score, sm.metrics.f1_score])
         model.summary()
         model.fit_generator(
             generator=self.data_loader.generator(is_train=True,
                                                  shape=shape,
                                                  shrink=self.shrink),
             steps_per_epoch=self.data_loader.train_steps,
             validation_data=self.data_loader.generator(is_train=False,
                                                        shape=shape,
                                                        shrink=self.shrink),
             validation_steps=self.data_loader.val_steps,
             verbose=1,
             initial_epoch=0,
             epochs=300,
             callbacks=[
                 keras.callbacks.TensorBoard('logs'),
                 keras.callbacks.ReduceLROnPlateau(monitor='val_loss',
                                                   patience=7,
                                                   verbose=1),
                 keras.callbacks.EarlyStopping(monitor='val_loss',
                                               patience=38,
                                               verbose=1),
                 keras.callbacks.ModelCheckpoint(
                     monitor='val_loss',
                     verbose=1,
                     save_weights_only=False,
                     save_best_only=True,
                     filepath='saved_models/segmentation/' + new_name)
             ])
     else:
         if model_name.lower() == 'unet':
             model = sm.Unet(backbone,
                             encoder_weights='imagenet',
                             activation='sigmoid',
                             classes=1,
                             input_shape=(shape[0], shape[1], 3),
                             decoder_use_batchnorm=True)
         elif model_name.lower() == 'pspnet':
             model = sm.PSPNet(backbone,
                               encoder_weights='imagenet',
                               activation='sigmoid',
                               classes=1,
                               input_shape=(shape[0], shape[1], 3))
         elif model_name.lower() == 'fpn':
             model = sm.FPN(backbone,
                            encoder_weights='imagenet',
                            activation='sigmoid',
                            classes=1,
                            input_shape=(shape[0], shape[1], 3))
         elif model_name.lower() == 'linknet':
             model = sm.Linknet(backbone,
                                encoder_weights='imagenet',
                                activation='sigmoid',
                                classes=1,
                                input_shape=(shape[0], shape[1], 3))
         else:
             raise NotImplementedError
         model.compile(
             optimizer=opt_dict[opt.lower()],
             loss=sm.losses.bce_jaccard_loss,
             metrics=['acc', sm.metrics.iou_score, sm.metrics.f1_score])
         model.summary()
         name_list = [
             model_name, backbone, opt, 'init-training',
             'none' if shape is None else str(shape[0]) + 'x' +
             str(shape[1])
         ]
         if self.shrink is None:
             name_list.append('without-shrink')
         new_name = '_'.join(name_list) + '.h5'
         model.fit_generator(
             generator=self.data_loader.generator(is_train=True,
                                                  shape=shape,
                                                  shrink=self.shrink),
             steps_per_epoch=self.data_loader.train_steps,
             validation_data=self.data_loader.generator(is_train=False,
                                                        shape=shape,
                                                        shrink=self.shrink),
             validation_steps=self.data_loader.val_steps,
             verbose=1,
             initial_epoch=0,
             epochs=300,
             callbacks=[
                 keras.callbacks.TensorBoard('logs'),
                 keras.callbacks.ReduceLROnPlateau(monitor='val_loss',
                                                   patience=7,
                                                   verbose=1),
                 keras.callbacks.EarlyStopping(monitor='val_loss',
                                               patience=38,
                                               verbose=1),
                 keras.callbacks.ModelCheckpoint(
                     monitor='val_loss',
                     verbose=1,
                     save_weights_only=False,
                     save_best_only=True,
                     filepath='saved_models/segmentation/' + new_name)
             ])
Пример #17
0
    dataset = Dataset(x_train_path, y_train_path, classes=['non-polyp', 'polyp'], augmentation=get_training_augmentation())
    
    BATCH_SIZE = 8
    CLASSES = ['non-polyp', 'polyp']
    LR = 0.0001
    EPOCHS = 25
    IMAGE_ORDERING = 'channels_last'
    n_classes = 2
    
    # SOTA
    BACKBONE = 'resnet34'
    # define model
    model = sm.Unet(BACKBONE, encoder_weights='imagenet')
    model = sm.Linknet(BACKBONE, encoder_weights='imagenet')
    model = sm.FPN(BACKBONE, encoder_weights='imagenet')
    model = sm.PSPNet(BACKBONE, encoder_weights='imagenet')
    
    model = fcn_8.fcn_8(2)

    optim = tf.keras.optimizers.Adam(LR)

    # Segmentation models losses can be combined together by '+' and scaled by integer or float factor
    # set class weights for dice_loss (car: 1.; pedestrian: 2.; background: 0.5;)
    dice_loss = sm.losses.DiceLoss(class_weights=np.array([0.5, 1])) 
    focal_loss = sm.losses.BinaryFocalLoss() 
    # if n_classes == 1 else sm.losses.CategoricalFocalLoss()
    total_loss = dice_loss + (1 * focal_loss)

    # actulally total_loss can be imported directly from library, above example just show you how to manipulate with losses
    # total_loss = sm.losses.binary_focal_dice_loss # or sm.losses.categorical_focal_dice_loss 
    def define_Network(self): 
        
        sys.path.append(self.args["main"])

        if self.args["model"] == 'Unet':
            
            from networks import Unet

            model = Unet(self.IMAGE_DIMS, n_filters=self.N_FILTERS, dropout=self.args['dropout'], 
                             batchnorm=self.args['batchnorm'], regularization=self.args['regularization'], 
                             kernel_initializer=self.args['init'])   

        elif 'sm' in self.args["model"]:
    
            # Refer to the following GitHub repository for the implementation of the Segmentation Models
            # with pretrained backbones
            # https://github.com/qubvel/segmentation_models  
            import segmentation_models as sm    
            
            _, model_to_use, BACKBONE = self.args["model"].split('_') # ['sm', 'FPN', 'mobilenet']

            # Define network parameters
            n_classes = 1
            activation = 'sigmoid'
            encoder_weights= self.args['encoder_weights'] # None or 'imagenet'
    
            # Define model    
            if model_to_use == 'FPN':
                pyramid_block_filters=512
                model = sm.FPN(BACKBONE, input_shape=self.IMAGE_DIMS, classes=n_classes, activation=activation, encoder_weights=encoder_weights,
                                pyramid_block_filters=pyramid_block_filters, pyramid_dropout = self.args['dropout'])
                   
            elif model_to_use == 'Unet':
                model = sm.Unet(BACKBONE, input_shape=self.IMAGE_DIMS, classes=n_classes, activation=activation, encoder_weights=encoder_weights,
                                decoder_filters=(1024, 512, 256, 128, 64), dropout = self.args['dropout'])
                
            # If requested, add regularization and then return the model
            model = self.add_regularization_function(self.args, model)
            
        elif self.args["model"] == 'Deeplabv3':
            
            # Refer to the following GitHub repository for the implementation of DeepLab 
            # https://github.com/tensorflow/models/tree/master/research/deeplab
            
            sys.path.append(self.args["main"] + 'networks/')
            from model import Deeplabv3        
            
            weights='pascal_voc'
            input_shape=self.IMAGE_DIMS
            classes = 1
            BACKBONE = 'xception' # 'xception','mobilenetv2'
            activation = 'sigmoid'# One of 'softmax', 'sigmoid' or None
            OS=16 # {8,16}
        
            model = Deeplabv3(weights=weights, input_shape=input_shape, classes=classes, backbone=BACKBONE,
                      OS=OS, activation=activation)
            
            import tensorflow as tf
            self.opt = tf.keras.optimizers.Adam(self.INIT_LR)

        elif self.args["model"] == 'DeepCrack':      

            # Refer to the following GitHub repository for the implementation of DeepCrack 
            # https://github.com/hanshenChen/crack-detection

            sys.path.append(self.args["main"] + 'networks/')
            from edeepcrack_cls import Deepcrack

            model = Deepcrack(input_shape=(self.BS, self.IMAGE_DIMS[0], self.IMAGE_DIMS[1], self.IMAGE_DIMS[2]))

            import tensorflow as tf
            self.opt = tf.keras.optimizers.Adam(self.INIT_LR)
   
        model.compile(optimizer=self.opt, loss=self.loss, metrics=[self.metrics])
        
        return model
Пример #19
0
    def Setup(self):
        '''
        User function: Setup all the parameters

        Args:
            None

        Returns:
            None
        '''
        preprocess_input = sm.get_preprocessing(
            self.system_dict["params"]["backbone"])
        # define network parameters
        self.system_dict["local"]["n_classes"] = 1 if len(
            self.system_dict["dataset"]["train"]
            ["classes_to_train"]) == 1 else (
                len(self.system_dict["dataset"]["train"]["classes_to_train"]) +
                1)  # case for binary and multiclass segmentation
        activation = 'sigmoid' if self.system_dict["local"][
            "n_classes"] == 1 else 'softmax'

        #create model
        if (self.system_dict["params"]["model"] == "Unet"):
            self.system_dict["local"]["model"] = sm.Unet(
                self.system_dict["params"]["backbone"],
                classes=self.system_dict["local"]["n_classes"],
                activation=activation)
        elif (self.system_dict["params"]["model"] == "FPN"):
            self.system_dict["local"]["model"] = sm.FPN(
                self.system_dict["params"]["backbone"],
                classes=self.system_dict["local"]["n_classes"],
                activation=activation)
        elif (self.system_dict["params"]["model"] == "Linknet"):
            self.system_dict["local"]["model"] = sm.Linknet(
                self.system_dict["params"]["backbone"],
                classes=self.system_dict["local"]["n_classes"],
                activation=activation)
        elif (self.system_dict["params"]["model"] == "PSPNet"):
            self.system_dict["local"]["model"] = sm.PSPNet(
                self.system_dict["params"]["backbone"],
                classes=self.system_dict["local"]["n_classes"],
                activation=activation)

        # define optomizer
        optim = keras.optimizers.Adam(self.system_dict["params"]["lr"])

        # Segmentation models losses can be combined together by '+' and scaled by integer or float factor
        dice_loss = sm.losses.DiceLoss()
        focal_loss = sm.losses.BinaryFocalLoss() if self.system_dict["local"][
            "n_classes"] == 1 else sm.losses.CategoricalFocalLoss()
        total_loss = dice_loss + (1 * focal_loss)

        # actulally total_loss can be imported directly from library, above example just show you how to manipulate with losses
        # total_loss = sm.losses.binary_focal_dice_loss # or sm.losses.categorical_focal_dice_loss

        metrics = [
            sm.metrics.IOUScore(threshold=0.5),
            sm.metrics.FScore(threshold=0.5)
        ]

        # compile keras model with defined optimozer, loss and metrics
        self.system_dict["local"]["model"].compile(optim, total_loss, metrics)

        # Dataset for train images
        train_dataset = Dataset(
            self.system_dict["dataset"]["train"]["img_dir"],
            self.system_dict["dataset"]["train"]["mask_dir"],
            self.system_dict["dataset"]["train"]["classes_dict"],
            classes_to_train=self.system_dict["dataset"]["train"]
            ["classes_to_train"],
            augmentation=get_training_augmentation(),
            preprocessing=get_preprocessing(preprocess_input),
        )

        if (self.system_dict["params"]["image_shape"][0] % 32 != 0):
            self.system_dict["params"]["image_shape"][0] += (
                32 - self.system_dict["params"]["image_shape"][0] % 32)

        if (self.system_dict["params"]["image_shape"][1] % 32 != 0):
            self.system_dict["params"]["image_shape"][1] += (
                32 - self.system_dict["params"]["image_shape"][1] % 32)

        # Dataset for validation images
        if (self.system_dict["dataset"]["val"]["status"]):
            valid_dataset = Dataset(
                self.system_dict["dataset"]["val"]["img_dir"],
                self.system_dict["dataset"]["val"]["mask_dir"],
                self.system_dict["dataset"]["train"]["classes_dict"],
                classes_to_train=self.system_dict["dataset"]["train"]
                ["classes_to_train"],
                augmentation=get_validation_augmentation(
                    self.system_dict["params"]["image_shape"][0],
                    self.system_dict["params"]["image_shape"][1]),
                preprocessing=get_preprocessing(preprocess_input),
            )
        else:
            valid_dataset = Dataset(
                self.system_dict["dataset"]["train"]["img_dir"],
                self.system_dict["dataset"]["train"]["mask_dir"],
                self.system_dict["dataset"]["train"]["classes_dict"],
                classes_to_train=self.system_dict["dataset"]["train"]
                ["classes_to_train"],
                augmentation=get_validation_augmentation(
                    self.system_dict["params"]["image_shape"][0],
                    self.system_dict["params"]["image_shape"][1]),
                preprocessing=get_preprocessing(preprocess_input),
            )

        self.system_dict["local"]["train_dataloader"] = Dataloder(
            train_dataset,
            batch_size=self.system_dict["params"]["batch_size"],
            shuffle=True)
        self.system_dict["local"]["valid_dataloader"] = Dataloder(
            valid_dataset, batch_size=1, shuffle=False)
Пример #20
0
 def create_model(self):
     return sm.FPN(backbone_name=self._backbone,
                   activation="softmax",
                   classes=self._data.get_n_classes(),
                   encoder_weights=self._encoder_weights,
                   input_shape=self._input_shape)
Пример #21
0
BACKBONE = 'efficientnetb3'
CLASSES = ['car']
n_classes = 1 if len(CLASSES) == 1 else (
    len(CLASSES) + 1)  # case for binary and multiclass segmentation
activation = 'sigmoid' if n_classes == 1 else 'softmax'
preprocess_input = sm.get_preprocessing(BACKBONE)

# models pre load for faster execution.
print("Loading Models. This might take some time...")
modelUnet = sm.Unet(BACKBONE, classes=n_classes, activation=activation)
model_c = config.STYLES["unet"]
model_path = os.path.join(f"{config.MODEL_PATH}", f"{model_c}.h5")
modelUnet.load_weights(model_path)
print("Loaded Unet.")

modelFPN = sm.FPN(BACKBONE, classes=n_classes, activation=activation)
model_c = config.STYLES["featurepyramidnetwork"]
model_path = os.path.join(f"{config.MODEL_PATH}", f"{model_c}.h5")
modelFPN.load_weights(model_path)
print("Loaded FPN.")

modelLinknet = sm.Linknet(BACKBONE, classes=n_classes, activation=activation)
model_c = config.STYLES["linknet"]
model_path = os.path.join(f"{config.MODEL_PATH}", f"{model_c}.h5")
modelLinknet.load_weights(model_path)
print("Loaded Linknet.")


# below was the part of the pipline is used for training and preprocessing
# TODO: Replace this pipeline with custom for faster inference.
# helper function for data visualization
Пример #22
0
    def Setup(self):
        '''
        User function: Setup all the parameters

        Args:
            None

        Returns:
            None
        '''
        # define network parameters
        self.system_dict["local"]["n_classes"] = 1 if len(
            self.system_dict["params"]["classes_to_train"]) == 1 else (
                len(self.system_dict["params"]["classes_to_train"]) +
                1)  # case for binary and multiclass segmentation
        activation = 'sigmoid' if self.system_dict["local"][
            "n_classes"] == 1 else 'softmax'

        #create model
        if (self.system_dict["params"]["model"] == "Unet"):
            self.system_dict["local"]["model"] = sm.Unet(
                self.system_dict["params"]["backbone"],
                classes=self.system_dict["local"]["n_classes"],
                activation=activation)
        elif (self.system_dict["params"]["model"] == "FPN"):
            self.system_dict["local"]["model"] = sm.FPN(
                self.system_dict["params"]["backbone"],
                classes=self.system_dict["local"]["n_classes"],
                activation=activation)
        elif (self.system_dict["params"]["model"] == "Linknet"):
            self.system_dict["local"]["model"] = sm.Linknet(
                self.system_dict["params"]["backbone"],
                classes=self.system_dict["local"]["n_classes"],
                activation=activation)
        elif (self.system_dict["params"]["model"] == "PSPNet"):
            self.system_dict["local"]["model"] = sm.PSPNet(
                self.system_dict["params"]["backbone"],
                classes=self.system_dict["local"]["n_classes"],
                activation=activation)

        # define optomizer
        optim = keras.optimizers.Adam(0.0001)

        # Segmentation models losses can be combined together by '+' and scaled by integer or float factor
        dice_loss = sm.losses.DiceLoss()
        focal_loss = sm.losses.BinaryFocalLoss() if self.system_dict["local"][
            "n_classes"] == 1 else sm.losses.CategoricalFocalLoss()
        total_loss = dice_loss + (1 * focal_loss)

        # actulally total_loss can be imported directly from library, above example just show you how to manipulate with losses
        # total_loss = sm.losses.binary_focal_dice_loss # or sm.losses.categorical_focal_dice_loss

        metrics = [
            sm.metrics.IOUScore(threshold=0.5),
            sm.metrics.FScore(threshold=0.5)
        ]

        # compile keras model with defined optimozer, loss and metrics
        self.system_dict["local"]["model"].compile(optim, total_loss, metrics)

        self.system_dict["local"]["model"].load_weights(
            self.system_dict["params"]["path_to_model"])
Пример #23
0
        model = sm.Unet(backbone_name=config.BACKBONE,
                        input_shape=input_layer,
                        classes=config.nb_classes,
                        activation=config.activation,
                        encoder_weights=config.encoder_weights)
    elif 'pspnet' in config.network:
        model = sm.PSPNet(backbone_name=config.BACKBONE,
                          input_shape=input_layer,
                          classes=config.nb_classes,
                          activation=config.activation,
                          encoder_weights=config.encoder_weights,
                          psp_dropout=config.dropout)
    elif 'fpn' in config.network:
        model = sm.FPN(backbone_name=config.BACKBONE,
                       input_shape=input_layer,
                       classes=config.nb_classes,
                       activation=config.activation,
                       encoder_weights=config.encoder_weights,
                       pyramid_dropout=config.dropout)
    elif 'linknet' in config.network:
        model = sm.Linknet(backbone_name=config.BACKBONE,
                           input_shape=input_layer,
                           classes=config.nb_classes,
                           activation=config.activation,
                           encoder_weights=config.encoder_weights)
    elif 'deeplabv3plus' in config.network:
        model = Deeplabv3(weights=config.encoder_weights,
                          input_shape=input_layer,
                          classes=config.nb_classes,
                          backbone=config.BACKBONE,
                          activation=config.activation)
Пример #24
0
def get_model(model,
              BACKBONE,
              opt,
              loss,
              metric,
              nclass=None,
              freeze_encoder=False,
              batchnormalization=True,
              dropout=None):
    h, w = None, None
    if nclass is not None:
        nclass = nclass
    else:
        nclass = n_classes

    if model == 'fpn':
        model = sm.FPN(BACKBONE,
                       classes=nclass,
                       input_shape=(h, w, 3),
                       activation='sigmoid',
                       encoder_freeze=freeze_encoder,
                       pyramid_use_batchnorm=batchnormalization,
                       pyramid_dropout=dropout)
        model.compile(optimizer=opt, loss=loss, metrics=metric)

    elif model == 'unet':
        model = sm.Unet(BACKBONE,
                        classes=nclass,
                        input_shape=(h, w, 3),
                        activation='sigmoid')
        model.compile(optimizer=opt, loss=loss, metrics=metric)

    elif model == 'psp':
        model = sm.PSPNet(BACKBONE,
                          classes=nclass,
                          input_shape=(h, w, 3),
                          activation='sigmoid')
        model.compile(optimizer=opt, loss=loss, metrics=metric)

    elif model == 'linknet':
        model = sm.Linknet(BACKBONE,
                           classes=nclass,
                           input_shape=(h, w, 3),
                           activation='sigmoid')
        model.compile(optimizer=opt, loss=loss, metrics=metric)

    elif model == 'xnet':
        model = smx.Xnet(BACKBONE,
                         classes=nclass,
                         input_shape=(h, w, 3),
                         activation='sigmoid')
        model.compile(optimizer=opt, loss=loss, metrics=metric)
    elif model == 'jpu':
        model = JPU_DeepLab(h, w, nclass)
        model.compile(optimizer=opt, loss=loss, metrics=metric)

    elif model == 'deeplab':
        model = Deeplabv3(weights=None,
                          input_shape=(h, w, 3),
                          classes=4,
                          backbone='xception',
                          alpha=1.,
                          activation='sigmoid')
        model.compile(optimizer=opt, loss=loss, metrics=metric)

    else:
        raise ValueError('Unknown network ' + model)

    return model
Пример #25
0
def RunTest(
    params,
    model_name_template='models_3/{model}_{backbone}_{optimizer}_{augmented_image_size}-{padded_image_size}-{nn_image_size}_lrf{lrf}_{metric}_{CC}_f{test_fold_no}_{phash}'
):

    # # Params

    # In[ ]:

    DEV_MODE_RANGE = 0  # off

    # In[ ]:

    # In[ ]:

    def params_dict():
        return {
            x[0]: x[1]
            for x in vars(params).items() if not x[0].startswith('__')
        }

    def params_str():
        return '\n'.join([
            repr(x[0]) + ' : ' + repr(x[1]) + ','
            for x in vars(params).items() if not x[0].startswith('__')
        ])

    def params_hash(shrink_to=6):
        import hashlib
        import json
        return hashlib.sha1(
            json.dumps(params_dict(),
                       sort_keys=True).encode()).hexdigest()[:shrink_to]

    def params_save(fn, verbose=True):
        params_fn = fn + '.param.txt'
        with open(params_fn, 'w+') as f:
            s = params_str()
            hash = params_hash(shrink_to=1000)
            s = '{\n' + s + '\n}\nhash: ' + hash[:6] + ' ' + hash[6:]
            f.write(s)
            if verbose:
                print('params: ' + s + '\nsaved to ' + params_fn)

    # # Imports

    # In[ ]:

    import sys
    #sys.path.append(r'D:\Programming\3rd_party\keras')

    # In[ ]:

    import sys
    from imp import reload
    import numpy as np
    import keras
    import datetime
    import time

    from keras.models import Model, load_model
    from keras.layers import Input, Dropout, BatchNormalization, Activation, Add
    from keras.layers.core import Lambda
    from keras.layers.convolutional import Conv2D, Conv2DTranspose
    from keras.layers.pooling import MaxPooling2D
    from keras.layers.merge import concatenate
    from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau, CSVLogger
    from keras import backend as K

    import tensorflow as tf

    # # Load data

    # In[ ]:

    import load_data
    load_data = reload(load_data)
    import keras_unet_divrikwicky_model
    keras_unet_divrikwicky_model = reload(keras_unet_divrikwicky_model)

    # In[ ]:

    train_df = load_data.LoadData(train_data=True,
                                  DEV_MODE_RANGE=DEV_MODE_RANGE,
                                  to_gray=False)

    # In[ ]:

    train_df.images[0].shape

    # In[ ]:

    train_images, train_masks, validate_images, validate_masks = load_data.SplitTrainData(
        train_df, params.test_fold_no)
    train_images.shape, train_masks.shape, validate_images.shape, validate_masks.shape

    # # Reproducability setup:

    # In[ ]:

    import random as rn

    import os
    os.environ['PYTHONHASHSEED'] = '0'

    np.random.seed(params.seed)
    rn.seed(params.seed)

    #session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
    tf.set_random_seed(params.seed)
    #sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
    sess = tf.Session(graph=tf.get_default_graph())
    K.set_session(sess)

    # # IOU metric

    # In[ ]:

    thresholds = np.array(
        [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95])

    def iou(img_true, img_pred):
        assert (img_true.shape[-1] == 1) and (len(img_true.shape) == 3) or (
            img_true.shape[-1] != 1) and (len(img_true.shape) == 2)
        i = np.sum((img_true * img_pred) > 0)
        u = np.sum((img_true + img_pred) > 0)
        if u == 0:
            return 1
        return i / u

    def iou_metric(img_true, img_pred):
        img_pred = img_pred > 0.5  # added by sgx 20180728
        if img_true.sum() == img_pred.sum() == 0:
            scores = 1
        else:
            scores = (thresholds <= iou(img_true, img_pred)).mean()
        return scores

    def iou_metric_batch(y_true_in, y_pred_in):
        batch_size = len(y_true_in)
        metric = []
        for batch in range(batch_size):
            value = iou_metric(y_true_in[batch], y_pred_in[batch])
            metric.append(value)
        #print("metric = ",metric)
        return np.mean(metric)

    # adapter for Keras
    def my_iou_metric(label, pred):
        metric_value = tf.py_func(iou_metric_batch, [label, pred], tf.float64)
        return metric_value

    # # Data generator

    # In[ ]:

    mean_val = np.mean(train_images.apply(np.mean))
    mean_std = np.mean(train_images.apply(np.std))
    mean_val, mean_std

    #####################################
    def FillCoordConvNumpy(imgs):
        print(imgs.shape)
        assert len(imgs.shape) == 4
        assert imgs.shape[3] == 3
        n = imgs.shape[2]
        hor_img = np.linspace(-1., 1., n).reshape((1, 1, n, 1))
        n = imgs.shape[1]
        ver_img = np.linspace(-1., 1., n).reshape((1, n, 1, 1))
        imgs[:, :, :, 0:1] = hor_img
        imgs[:, :, :, 2:3] = ver_img

    def FillCoordConvList(imgs):
        print(imgs.shape)
        assert len(imgs[0].shape) == 3
        assert imgs[0].shape[2] == 3
        for img in imgs:
            n = img.shape[1]
            hor_img = np.linspace(-1., 1., n).reshape((1, n, 1))
            n = img.shape[0]
            ver_img = np.linspace(-1., 1., n).reshape((n, 1, 1))
            img[:, :, 0:1] = hor_img
            img[:, :, 2:3] = ver_img

    if params.coord_conv:
        FillCoordConvList(train_images)
        FillCoordConvList(validate_images)
        print(train_images[0][0, 0, 0], train_images[0][0, 0, 2])
        assert train_images[0][0, 0, 0] == -1.
        assert train_images[0][0, 0, 2] == 1.

    ######################################

    from my_augs import AlbuDataGenerator

    # # model

    # In[ ]:

    sys.path.append('../3rd_party/segmentation_models')
    import segmentation_models
    segmentation_models = reload(segmentation_models)
    from segmentation_models.utils import set_trainable

    # In[ ]:
    if not hasattr(params, 'model_params'):
        params.model_params = {}

    if params.load_model_from:
        model = load_model(params.load_model_from,
                           custom_objects={'my_iou_metric': my_iou_metric})
        print('MODEL LOADED from: ' + params.load_model_from)
    else:
        model = None
        if params.model == 'FNN':
            model = segmentation_models.FPN(
                backbone_name=params.backbone,
                input_shape=(None, None, params.channels),
                encoder_weights=params.initial_weightns,
                freeze_encoder=True,
                dropout=params.dropout,
                **params.model_params)
        if params.model == 'FNNdrop':
            model = segmentation_models.FPNdrop(
                backbone_name=params.backbone,
                input_shape=(None, None, params.channels),
                encoder_weights=params.initial_weightns,
                freeze_encoder=True,
                dropout=params.dropout,
                **params.model_params)
        if params.model == 'Unet':
            model = segmentation_models.Unet(
                backbone_name=params.backbone,
                input_shape=(None, None, params.channels),
                encoder_weights=params.initial_weightns,
                freeze_encoder=True,
                **params.model_params)
        if params.model == 'Linknet':
            model = segmentation_models.Linknet(
                backbone_name=params.backbone,
                input_shape=(None, None, params.channels),
                encoder_weights=params.initial_weightns,
                freeze_encoder=True,
                **params.model_params)
        if params.model == 'divrikwicky':
            model = keras_unet_divrikwicky_model.CreateModel(
                params.nn_image_size, **params.model_params)
            params.backbone = ''
        assert model

    for l in model.layers:
        if isinstance(
                l, segmentation_models.fpn.layers.UpSampling2D) or isinstance(
                    l, keras.layers.UpSampling2D):
            print(l)
            if hasattr(l, 'interpolation'):
                print(l.interpolation)
                if hasattr(params, 'model_params'
                           ) and 'interpolation' in params.model_params:
                    l.interpolation = params.model_params['interpolation']
            else:
                print('qq')

    if hasattr(params,
               'kernel_constraint_norm') and params.kernel_constraint_norm:
        for l in model.layers:
            if hasattr(l, 'kernel_constraint'):
                print('kernel_constraint for ', l, ' is set to ',
                      params.kernel_constraint_norm)
                l.kernel_constraint = keras.constraints.get(
                    keras.constraints.max_norm(params.kernel_constraint_norm))

    # In[ ]:

    model_out_file = model_name_template.format(
        lrf=params.ReduceLROnPlateau['factor'],
        metric=params.monitor_metric[0],
        CC='CC' if params.coord_conv else '',
        **vars(params)) + '_f{test_fold_no}_{phash}'.format(
            test_fold_no=params.test_fold_no, phash=params_hash())
    now = datetime.datetime.now()
    print('model:   ' + model_out_file + '    started at ' +
          now.strftime("%Y.%m.%d %H:%M:%S"))

    assert not os.path.exists(model_out_file + '.model')

    params_save(model_out_file, verbose=True)
    log_out_file = model_out_file + '.log.csv'

    # In[ ]:

    #model = load_model(model1_file, ) #, 'lavazs_loss': lavazs_loss

    # # Train

    # In[ ]:

    optimizer = params.optimizer
    if optimizer == 'adam':
        optimizer = keras.optimizers.adam(**params.optimizer_params)
    elif optimizer == 'sgd':
        optimizer = keras.optimizers.sgd(**params.optimizer_params)

    model.compile(loss="binary_crossentropy",
                  optimizer=optimizer,
                  metrics=["acc", my_iou_metric])  #, my_iou_metric

    # In[ ]:

    if params.coord_conv:
        mean = ((0, mean_val, 0), (1, mean_std, 1))
    else:
        mean = (mean_val, mean_std)

    train_gen = AlbuDataGenerator(train_images,
                                  train_masks,
                                  batch_size=params.batch_size,
                                  nn_image_size=params.nn_image_size,
                                  mode=params.train_augmentation_mode,
                                  shuffle=True,
                                  params=params,
                                  mean=mean)
    val_gen = AlbuDataGenerator(validate_images,
                                validate_masks,
                                batch_size=params.test_batch_size,
                                nn_image_size=params.nn_image_size,
                                mode=params.test_augmentation_mode,
                                shuffle=False,
                                params=params,
                                mean=mean)

    # In[ ]:

    sys.path.append('../3rd_party/keras-tqdm')
    from keras_tqdm import TQDMCallback, TQDMNotebookCallback

    # In[ ]:

    start_t = time.clock()

    if params.epochs_warmup:
        history = model.fit_generator(
            train_gen,
            validation_data=None,
            epochs=params.epochs_warmup,
            callbacks=[TQDMNotebookCallback(leave_inner=True)],
            validation_steps=None,
            workers=5,
            use_multiprocessing=False,
            verbose=0)

    set_trainable(model)
    batches_per_epoch = len(train_images) // params.batch_size
    print("batches per epoch: ", batches_per_epoch)
    test_epochs = 30
    steps = test_epochs * batches_per_epoch
    val_period = steps // 1000
    print("steps: ", steps, " val_period", val_period)

    lr_sheduler = EvalLrTest(log_out_file,
                             val_gen,
                             val_period=val_period,
                             steps=steps)

    history = model.fit_generator(
        train_gen,
        validation_data=None,
        epochs=params.epochs,
        initial_epoch=params.epochs_warmup,
        callbacks=[TQDMNotebookCallback(leave_inner=True), lr_sheduler],
        validation_steps=None,
        workers=5,
        use_multiprocessing=False,
        verbose=0)

    # In[ ]:

    print(params_str())
    print('done:   ' + model_out_file)
    print('elapsed: {}s ({}s/iter)'.format(
        time.clock() - start_t, (time.clock() - start_t) / len(history.epoch)))

    return model
Пример #26
0
    def _do_make_model_task(self,
                            task,
                            model_name,
                            nb_classes,
                            width=299,
                            height=299,
                            backbone="resnet50",
                            activation="softmax"):
        if task == Task.CLASSIFICATION:
            xception_shape_condition = height >= 71 and width >= 71
            mobilenet_shape_condition = height >= 32 and width >= 32

            if model_name == "xception" and xception_shape_condition:
                model = models.xception(nb_classes=nb_classes,
                                        height=height,
                                        width=width)
            elif model_name == "dilated_xception" and xception_shape_condition:
                model = models.dilated_xception(
                    nb_classes=nb_classes,
                    height=height,
                    width=width,
                    weights_info=self.config.train_params.weights_info)
            elif model_name == "mobilenet" and mobilenet_shape_condition:
                model = models.mobilenet(nb_classes=nb_classes,
                                         height=height,
                                         width=width)
            elif model_name == "mobilenetv2" and mobilenet_shape_condition:
                model = models.mobilenet_v2(
                    nb_classes=nb_classes,
                    height=height,
                    width=width,
                    weights_info=self.config.train_params.weights_info)
            elif model_name.startswith("efficientnetb"):
                model = models.EfficientNet(
                    model_name=model_name,
                    nb_classes=nb_classes,
                    height=height,
                    width=width,
                )
            elif model_name.startswith('resnest'):
                model = models.resnest(
                    nb_classes=nb_classes,
                    model_name=model_name,
                    height=height,
                    width=width,
                )
            else:
                model = models.Model2D(nb_classes, height, width)

        elif task == Task.SEMANTIC_SEGMENTATION:
            print('------------------')
            print('Model:', model_name)
            print('Backbone:', backbone)
            print('------------------')

            if model_name == "unet":
                model = segmentation_models.Unet(
                    backbone_name=backbone,
                    input_shape=(height, width, 3),
                    classes=nb_classes,
                )
            elif model_name == "deeplab_v3":
                model = models.Deeplabv3(
                    weights_info=self.config.train_params.weights_info,
                    input_shape=(height, width, 3),
                    classes=nb_classes,
                    backbone=backbone,
                    activation=activation)
            elif model_name == "pspnet":
                model = segmentation_models.PSPNet(
                    backbone_name=backbone,
                    input_shape=(height, width, 3),
                    classes=nb_classes,
                )
            elif model_name == "fpn":
                model = segmentation_models.FPN(
                    backbone_name=backbone,
                    input_shape=(height, width, 3),
                    classes=nb_classes,
                )
        else:
            raise NotImplementedError

        return model