Exemple #1
0
def init_and_train_model(train_df, train_path, val_df, val_path, model_type, BACKBONE, AUGMENTATIONS, batch_size, epoch_num):
  preprocess_input = sm.backbones.get_preprocessing(BACKBONE)
  
  if(model_type == "Linknet"):
    model = sm.Linknet(BACKBONE,input_shape = (256, 256, 3), encoder_weights='imagenet', encoder_freeze=True)
    train_gen = keras_generator_with_augs(train_df, batch_size, train_path, AUGMENTATIONS)
    val_gen = keras_generator(val_df, batch_size, val_path)
  elif(model_type == "Unet"):
    model = sm.Unet(BACKBONE,input_shape = (256, 256, 3), encoder_weights='imagenet', encoder_freeze=True)
    train_gen = keras_generator_with_augs(train_df, batch_size, train_path, AUGMENTATIONS)
    val_gen = keras_generator(val_df, batch_size, val_path)
  elif(model_type == "FPN"):
    model = sm.FPN(BACKBONE,input_shape = (256, 256, 3), encoder_weights='imagenet', encoder_freeze=True)
    train_gen = keras_generator_with_augs(train_df, batch_size, train_path, AUGMENTATIONS)
    val_gen = keras_generator(val_df, batch_size, val_path)
  elif(model_type == "PSPNet"):
    model = sm.PSPNet(BACKBONE,input_shape = (240, 240, 3), encoder_weights='imagenet', encoder_freeze=True)
    train_gen = keras_generator_with_augs(train_df, batch_size, train_path, AUGMENTATIONS, PSPNet=True)
    val_gen = keras_generator(val_df, batch_size, val_path, PSPNet = True)
    
  model.compile(
    'Adam',
    loss=sm.losses.dice_loss,
    metrics=[sm.metrics.dice_score],
  )
  
  best_w = keras.callbacks.ModelCheckpoint(model_type + '_' + BACKBONE + '_best.h5',
                                monitor='val_loss',
                                verbose=0,
                                save_best_only=True,
                                save_weights_only=True,
                                mode='auto',
                                period=1)

  last_w = keras.callbacks.ModelCheckpoint(model_type + '_' + BACKBONE + '_last.h5',
                                monitor='val_loss',
                                verbose=0,
                                save_best_only=False,
                                save_weights_only=True,
                                mode='auto',
                                period=1)


  callbacks = [best_w, last_w]
  
  history = model.fit_generator(train_gen,
              steps_per_epoch=50,
              epochs=epoch_num,
              verbose=1,
              callbacks=callbacks,
              validation_data=val_gen,
              validation_steps=50,
              class_weight=None,
              max_queue_size=1,
              workers=1,
              use_multiprocessing=False,
              shuffle=True,
              initial_epoch=0)
  return model, history
  
Exemple #2
0
 def create_model(self):
     return sm.PSPNet(self._backbone,
                      activation="softmax",
                      downsample_factor=16,
                      classes=self._data.get_n_classes(),
                      encoder_weights=self._encoder_weights,
                      input_shape=self._input_shape)
Exemple #3
0
def get_model(h=384, w=384, optim=Adam(), dropout=0):
    '''
    This is a PSPNet model with the following specifications:-
    backbone = resnet101
    classes = 10
    input_dim = output_dim = hxw = 713x713
    activation = softmax
    loss = Not fixed
    metric = mIoU
    encoder_weights = imagenet
    '''
    optimiser = Adam()
    model = sm.PSPNet(backbone_name='resnet101',
                      input_shape=(h, w, 3),
                      classes=10,
                      activation='softmax',
                      psp_dropout=dropout)
    model.compile(
        optimizer=optim,
        loss=sm.losses.categorical_focal_jaccard_loss,
        metrics=[sm.metrics.iou_score],
    )

    model.summary()

    return model
Exemple #4
0
def create_model():
    """
    Function used to create model based on parameters specified in settings.py
    """
    if model_type == 'myresunet':
        model = myresunet.create_model()
    elif model_type == 'unet':
        model = sm.Unet(backbone_name=backbone,
                        input_shape=(image_height, image_width, 3),
                        classes=num_classes,
                        activation='softmax',
                        encoder_weights='imagenet',
                        encoder_freeze=False,
                        encoder_features='default',
                        decoder_block_type='upsampling',
                        decoder_filters=(decoder_scaler * 256, decoder_scaler * 128, decoder_scaler * 64,
                                         decoder_scaler * 32, decoder_scaler * 16),
                        decoder_use_batchnorm=True)
    elif model_type == 'fpn':
        model = sm.FPN(backbone_name=backbone,
                       input_shape=(image_height, image_width, 3),
                       classes=num_classes,
                       activation='softmax',
                       encoder_weights='imagenet',
                       encoder_freeze=False,
                       encoder_features='default',
                       pyramid_block_filters=decoder_scaler * 256,
                       pyramid_use_batchnorm=True,
                       pyramid_aggregation='concat',
                       pyramid_dropout=None)
    elif model_type == 'linknet':
        model = sm.Linknet(backbone_name=backbone,
                           input_shape=(image_height, image_width, 3),
                           classes=num_classes,
                           activation='softmax',
                           encoder_weights='imagenet',
                           encoder_freeze=False,
                           encoder_features='default',
                           decoder_block_type='upsampling',
                           decoder_filters=(None, None, None, None, decoder_scaler * 16),
                           decoder_use_batchnorm=True)
    elif model_type == 'pspnet':
        model = sm.PSPNet(backbone_name=backbone,
                          input_shape=(image_height, image_width, 3),
                          classes=num_classes,
                          activation='softmax',
                          encoder_weights='imagenet',
                          encoder_freeze=False,
                          downsample_factor=8,
                          psp_conv_filters=decoder_scaler * 512,
                          psp_pooling_type='avg',
                          psp_use_batchnorm=True,
                          psp_dropout=None)
    else:
        print('Invalid segmentation model type')
        exit(0)
    return model
def create_model(border=False, trainable_encoder=False):
    if model_type == 'unet':
        model = sm.Unet(backbone_name=backbone,
                        input_shape=(image_size, image_size, 3),
                        classes=2 if border else 1,
                        activation='sigmoid',
                        encoder_weights='imagenet',
                        encoder_freeze=not trainable_encoder,
                        encoder_features='default',
                        decoder_block_type='upsampling',
                        decoder_filters=(256, 128, 64, 32, 16),
                        decoder_use_batchnorm=True)
    elif model_type == 'fpn':
        model = sm.FPN(backbone_name=backbone,
                       input_shape=(image_size, image_size, 3),
                       classes=2 if border else 1,
                       activation='sigmoid',
                       encoder_weights='imagenet',
                       encoder_freeze=not trainable_encoder,
                       encoder_features='default',
                       pyramid_block_filters=256,
                       pyramid_use_batchnorm=True,
                       pyramid_aggregation='concat',
                       pyramid_dropout=None)
    elif model_type == 'linknet':
        model = sm.Linknet(backbone_name=backbone,
                           input_shape=(image_size, image_size, 3),
                           classes=2 if border else 1,
                           activation='sigmoid',
                           encoder_weights='imagenet',
                           encoder_freeze=not trainable_encoder,
                           encoder_features='default',
                           decoder_block_type='upsampling',
                           decoder_filters=(None, None, None, None, 16),
                           decoder_use_batchnorm=True)
    elif model_type == 'pspnet':
        model = sm.PSPNet(backbone_name=backbone,
                          input_shape=(image_size, image_size, 3),
                          classes=2 if border else 1,
                          activation='sigmoid',
                          encoder_weights='imagenet',
                          encoder_freeze=not trainable_encoder,
                          downsample_factor=8,
                          psp_conv_filters=512,
                          psp_pooling_type='avg',
                          psp_use_batchnorm=True,
                          psp_dropout=None)
    else:
        print('Invalid segmentation model type')
        exit(0)
    return model
def get_backboned_model(model, backbone, freeze=True):

    if model == 'Unet':
        base_model = sm.Unet(backbone_name=backbone,
                             encoder_weights='imagenet',
                             classes=1,
                             activation='sigmoid',
                             freeze_encoder=freeze)
    elif model == 'FPN':
        base_model = sm.FPN(backbone_name=backbone,
                            encoder_weights='imagenet',
                            classes=1,
                            activation='sigmoid',
                            freeze_encoder=freeze)
    elif model == 'Linknet':
        base_model = sm.Linknet(backbone_name=backbone,
                                encoder_weights='imagenet',
                                classes=1,
                                activation='sigmoid',
                                freeze_encoder=freeze)
    elif model == 'PSPNet':
        base_model = sm.PSPNet(backbone_name=backbone,
                               encoder_weights='imagenet',
                               classes=1,
                               activation='sigmoid',
                               freeze_encoder=freeze)
    else:
        print('Model not identified! Unet is selected')
        base_model = sm.Unet(backbone_name=backbone,
                             encoder_weights='imagenet',
                             classes=1,
                             activation='sigmoid',
                             freeze_encoder=freeze)

    inp = Input(shape=(96, 96, 1))
    l1 = Conv2D(3, (1, 1))(inp)  # map N channels data to 3 channels
    out = base_model(l1)

    model = Model(inp, out, name=base_model.name)

    # print(model.summary())
    return model
def train(weights_paths, model_name="unet", batch_size=16, loss_name="bce"):
    BATCH_SIZE = batch_size

    # for reference about the BUFFER_SIZE in shuffle:
    # https://stackoverflow.com/questions/46444018/meaning-of-buffer-size-in-dataset-map-dataset-prefetch-and-dataset-shuffle
    BUFFER_SIZE = 1000

    dataset = {"train": train_dataset, "val": val_dataset}

    # -- Train Dataset --#
    dataset['train'] = dataset['train'].map(
        load_image_train, num_parallel_calls=tf.data.experimental.AUTOTUNE)
    dataset['train'] = dataset['train'].shuffle(buffer_size=BUFFER_SIZE,
                                                seed=SEED)
    dataset['train'] = dataset['train'].repeat()
    dataset['train'] = dataset['train'].batch(BATCH_SIZE)
    dataset['train'] = dataset['train'].prefetch(buffer_size=AUTOTUNE)

    #-- Validation Dataset --#
    dataset['val'] = dataset['val'].map(load_image_test)
    dataset['val'] = dataset['val'].repeat()
    dataset['val'] = dataset['val'].batch(BATCH_SIZE)
    dataset['val'] = dataset['val'].prefetch(buffer_size=AUTOTUNE)

    print(dataset['train'])
    print(dataset['val'])

    if model_name == "unet":
        model = sm.Unet('efficientnetb4',
                        input_shape=(None, None, 3),
                        classes=N_CLASSES,
                        activation='sigmoid',
                        encoder_weights=None,
                        weights=weights_paths)
    if model_name == "fpn":
        model = sm.FPN('efficientnetb4',
                       input_shape=(None, None, 3),
                       classes=N_CLASSES,
                       activation='sigmoid',
                       encoder_weights=None)
    if model_name == "psp":
        model = sm.PSPNet('efficientnetb4',
                          input_shape=(IMG_SIZE, IMG_SIZE, 3),
                          classes=N_CLASSES,
                          activation='sigmoid',
                          encoder_weights=None)

    optimizer = tf.keras.optimizers.Adam(learning_rate=0.01)  # 0.001

    if loss_name == "bce":
        loss = tf.keras.losses.BinaryCrossentropy()
    elif loss_name == "bce_jaccard":
        loss = sm.losses.bce_jaccard_loss
    elif loss_name == "bce_jaccard_focal":
        loss = sm.losses.binary_focal_jaccard_loss
    elif loss_name == "binary_focal_dice":
        loss = sm.losses.binary_focal_dice_loss

    model.compile(optimizer=optimizer,
                  loss=loss,
                  metrics=['accuracy', sm.metrics.iou_score, dice_coe])

    EPOCHS = 50

    STEPS_PER_EPOCH = TRAINSET_SIZE // BATCH_SIZE
    VALIDATION_STEPS = VALSET_SIZE // BATCH_SIZE

    callbacks = [
        tf.keras.callbacks.ModelCheckpoint(
            'results/weights/' + str(model_name) + '_' + str(loss_name) +
            '.h5',
            monitor='val_dice_coe',
            mode='max',
            verbose=1,
            save_best_only=True,
            save_weights_only=False),
        tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss',
                                             factor=0.1,
                                             patience=8,
                                             min_lr=0.00001)
    ]

    results = model.fit(dataset['train'],
                        epochs=EPOCHS,
                        steps_per_epoch=STEPS_PER_EPOCH,
                        validation_steps=VALIDATION_STEPS,
                        callbacks=callbacks,
                        validation_data=dataset['val'])

    plt.figure(figsize=(8, 8))
    plt.title("Learning curve")
    plt.plot(results.history["loss"], label="loss")
    plt.plot(results.history["val_loss"], label="val_loss")
    plt.plot(np.argmin(results.history["val_loss"]),
             np.min(results.history["val_loss"]),
             marker="x",
             color="r",
             label="best model")
    plt.xlabel("Epochs")
    plt.ylabel("log_loss")
    plt.legend()
    plt.savefig('./results/plots/train_loss_' + str(model_name) + '_' +
                str(loss_name) + '.png')

    plt.figure(figsize=(8, 8))
    plt.title("Learning curve")
    plt.plot(results.history["dice_coe"], label="dice_coe")
    plt.plot(results.history["val_dice_coe"], label="val_dice_coe")
    plt.plot(np.argmax(results.history["val_dice_coe"]),
             np.max(results.history["val_dice_coe"]),
             marker="x",
             color="r",
             label="best model")
    plt.xlabel("Epochs")
    plt.ylabel("Dice Coeff")
    plt.legend()
    plt.savefig('./Results/plots/train_dice_' + str(model_name) + '_' +
                str(loss_name) + '.png')

    plt.figure(figsize=(8, 8))
    plt.title("Learning curve")
    plt.plot(results.history["iou_score"], label="iou_score")
    plt.plot(results.history["val_iou_score"], label="val_iou_score")
    plt.plot(np.argmax(results.history["val_iou_score"]),
             np.max(results.history["val_iou_score"]),
             marker="x",
             color="r",
             label="best model")
    plt.xlabel("Epochs")
    plt.ylabel("IOU")
    plt.legend()
    plt.savefig('./Results/plots/train_IOU_' + str(model_name) + '_' +
                str(loss_name) + '.png')

    plt.figure(figsize=(8, 8))
    plt.title("Learning curve")
    plt.plot(results.history["accuracy"], label="accuracy")
    plt.plot(results.history["val_accuracy"], label="val_accuracy")
    plt.plot(np.argmax(results.history["val_accuracy"]),
             np.max(results.history["val_accuracy"]),
             marker="x",
             color="r",
             label="best model")
    plt.xlabel("Epochs")
    plt.ylabel("accuracy")
    plt.legend()
    plt.savefig('./Results/plots/train_accuracy_' + str(model_name) + '_' +
                str(loss_name) + '.png')
Exemple #8
0
    if len(config.band_list) == 0:
        print("Error: band_list should not be empty!")
        sys.exit(-2)
    input_layer = (config.img_w, config.img_h, len(config.band_list))

    if 'unet' in config.network:
        model = sm.Unet(backbone_name=config.BACKBONE,
                        input_shape=input_layer,
                        classes=config.nb_classes,
                        activation=config.activation,
                        encoder_weights=config.encoder_weights)
    elif 'pspnet' in config.network:
        model = sm.PSPNet(backbone_name=config.BACKBONE,
                          input_shape=input_layer,
                          classes=config.nb_classes,
                          activation=config.activation,
                          encoder_weights=config.encoder_weights,
                          psp_dropout=config.dropout)
    elif 'fpn' in config.network:
        model = sm.FPN(backbone_name=config.BACKBONE,
                       input_shape=input_layer,
                       classes=config.nb_classes,
                       activation=config.activation,
                       encoder_weights=config.encoder_weights,
                       pyramid_dropout=config.dropout)
    elif 'linknet' in config.network:
        model = sm.Linknet(backbone_name=config.BACKBONE,
                           input_shape=input_layer,
                           classes=config.nb_classes,
                           activation=config.activation,
                           encoder_weights=config.encoder_weights)
test_label_root = pathlib.Path('.\\dataset\\new_test_set\\test_label')
test_label_path = list(test_label_root.glob('*'))
test_label_path = [str(path) for path in test_label_path]
test_label = np.empty((5, reshape, reshape, 1))
for i in range(5):
    img_raw = tf.io.read_file(test_label_path[i])
    img_tensor = tf.image.decode_image(img_raw)
    img_final = tf.image.resize(img_tensor, [reshape, reshape])
    img_final = img_final/255.0
    test_label[i] = img_final

# set backbone of the model (extract feature maps)
BACKBONE = 'resnet50'

# define model without pre-trained
model = sm.PSPNet(BACKBONE, encoder_weights=None, input_shape=(reshape, reshape, 1), classes=1)
model.compile(
    'Adam',
    loss=sm.losses.bce_jaccard_loss,
    metrics=[sm.metrics.iou_score],
)

# fit model (data can be randomized)
model.fit(
   x=train_data[:20],
   y=train_label[:20],
   batch_size=16,
   epochs=8,
   validation_data=(train_data[20:], train_label[20:])
)
Exemple #10
0
    def Setup(self):
        '''
        User function: Setup all the parameters

        Args:
            None

        Returns:
            None
        '''
        preprocess_input = sm.get_preprocessing(
            self.system_dict["params"]["backbone"])
        # define network parameters
        self.system_dict["local"]["n_classes"] = 1 if len(
            self.system_dict["dataset"]["train"]
            ["classes_to_train"]) == 1 else (
                len(self.system_dict["dataset"]["train"]["classes_to_train"]) +
                1)  # case for binary and multiclass segmentation
        activation = 'sigmoid' if self.system_dict["local"][
            "n_classes"] == 1 else 'softmax'

        #create model
        if (self.system_dict["params"]["model"] == "Unet"):
            self.system_dict["local"]["model"] = sm.Unet(
                self.system_dict["params"]["backbone"],
                classes=self.system_dict["local"]["n_classes"],
                activation=activation)
        elif (self.system_dict["params"]["model"] == "FPN"):
            self.system_dict["local"]["model"] = sm.FPN(
                self.system_dict["params"]["backbone"],
                classes=self.system_dict["local"]["n_classes"],
                activation=activation)
        elif (self.system_dict["params"]["model"] == "Linknet"):
            self.system_dict["local"]["model"] = sm.Linknet(
                self.system_dict["params"]["backbone"],
                classes=self.system_dict["local"]["n_classes"],
                activation=activation)
        elif (self.system_dict["params"]["model"] == "PSPNet"):
            self.system_dict["local"]["model"] = sm.PSPNet(
                self.system_dict["params"]["backbone"],
                classes=self.system_dict["local"]["n_classes"],
                activation=activation)

        # define optomizer
        optim = keras.optimizers.Adam(self.system_dict["params"]["lr"])

        # Segmentation models losses can be combined together by '+' and scaled by integer or float factor
        dice_loss = sm.losses.DiceLoss()
        focal_loss = sm.losses.BinaryFocalLoss() if self.system_dict["local"][
            "n_classes"] == 1 else sm.losses.CategoricalFocalLoss()
        total_loss = dice_loss + (1 * focal_loss)

        # actulally total_loss can be imported directly from library, above example just show you how to manipulate with losses
        # total_loss = sm.losses.binary_focal_dice_loss # or sm.losses.categorical_focal_dice_loss

        metrics = [
            sm.metrics.IOUScore(threshold=0.5),
            sm.metrics.FScore(threshold=0.5)
        ]

        # compile keras model with defined optimozer, loss and metrics
        self.system_dict["local"]["model"].compile(optim, total_loss, metrics)

        # Dataset for train images
        train_dataset = Dataset(
            self.system_dict["dataset"]["train"]["img_dir"],
            self.system_dict["dataset"]["train"]["mask_dir"],
            self.system_dict["dataset"]["train"]["classes_dict"],
            classes_to_train=self.system_dict["dataset"]["train"]
            ["classes_to_train"],
            augmentation=get_training_augmentation(),
            preprocessing=get_preprocessing(preprocess_input),
        )

        if (self.system_dict["params"]["image_shape"][0] % 32 != 0):
            self.system_dict["params"]["image_shape"][0] += (
                32 - self.system_dict["params"]["image_shape"][0] % 32)

        if (self.system_dict["params"]["image_shape"][1] % 32 != 0):
            self.system_dict["params"]["image_shape"][1] += (
                32 - self.system_dict["params"]["image_shape"][1] % 32)

        # Dataset for validation images
        if (self.system_dict["dataset"]["val"]["status"]):
            valid_dataset = Dataset(
                self.system_dict["dataset"]["val"]["img_dir"],
                self.system_dict["dataset"]["val"]["mask_dir"],
                self.system_dict["dataset"]["train"]["classes_dict"],
                classes_to_train=self.system_dict["dataset"]["train"]
                ["classes_to_train"],
                augmentation=get_validation_augmentation(
                    self.system_dict["params"]["image_shape"][0],
                    self.system_dict["params"]["image_shape"][1]),
                preprocessing=get_preprocessing(preprocess_input),
            )
        else:
            valid_dataset = Dataset(
                self.system_dict["dataset"]["train"]["img_dir"],
                self.system_dict["dataset"]["train"]["mask_dir"],
                self.system_dict["dataset"]["train"]["classes_dict"],
                classes_to_train=self.system_dict["dataset"]["train"]
                ["classes_to_train"],
                augmentation=get_validation_augmentation(
                    self.system_dict["params"]["image_shape"][0],
                    self.system_dict["params"]["image_shape"][1]),
                preprocessing=get_preprocessing(preprocess_input),
            )

        self.system_dict["local"]["train_dataloader"] = Dataloder(
            train_dataset,
            batch_size=self.system_dict["params"]["batch_size"],
            shuffle=True)
        self.system_dict["local"]["valid_dataloader"] = Dataloder(
            valid_dataset, batch_size=1, shuffle=False)
    dataset = Dataset(x_train_path, y_train_path, classes=['non-polyp', 'polyp'], augmentation=get_training_augmentation())
    
    BATCH_SIZE = 8
    CLASSES = ['non-polyp', 'polyp']
    LR = 0.0001
    EPOCHS = 25
    IMAGE_ORDERING = 'channels_last'
    n_classes = 2
    
    # SOTA
    BACKBONE = 'resnet34'
    # define model
    model = sm.Unet(BACKBONE, encoder_weights='imagenet')
    model = sm.Linknet(BACKBONE, encoder_weights='imagenet')
    model = sm.FPN(BACKBONE, encoder_weights='imagenet')
    model = sm.PSPNet(BACKBONE, encoder_weights='imagenet')
    
    model = fcn_8.fcn_8(2)

    optim = tf.keras.optimizers.Adam(LR)

    # Segmentation models losses can be combined together by '+' and scaled by integer or float factor
    # set class weights for dice_loss (car: 1.; pedestrian: 2.; background: 0.5;)
    dice_loss = sm.losses.DiceLoss(class_weights=np.array([0.5, 1])) 
    focal_loss = sm.losses.BinaryFocalLoss() 
    # if n_classes == 1 else sm.losses.CategoricalFocalLoss()
    total_loss = dice_loss + (1 * focal_loss)

    # actulally total_loss can be imported directly from library, above example just show you how to manipulate with losses
    # total_loss = sm.losses.binary_focal_dice_loss # or sm.losses.categorical_focal_dice_loss 
 def train(self,
           model_name='unet',
           backbone='resnet50',
           fine_tune=False,
           model_path=None,
           opt='adam',
           lr=0.001,
           shape=(256, 256)):
     os.makedirs('saved_models/segmentation', exist_ok=True)
     if fine_tune: lr = lr / 10
     opt_dict = {'adam': Adam(lr), 'sgd': SGD(lr), 'adadelta': Adadelta(lr)}
     if fine_tune and model_path:
         new_name = model_path.strip('.h5') + '_fine-tune_{}.h5'.format(opt)
         model = keras.models.load_model(model_path, compile=False)
         model.compile(
             optimizer=opt_dict[opt.lower()],
             loss=sm.losses.bce_jaccard_loss,
             metrics=['acc', sm.metrics.iou_score, sm.metrics.f1_score])
         model.summary()
         model.fit_generator(
             generator=self.data_loader.generator(is_train=True,
                                                  shape=shape,
                                                  shrink=self.shrink),
             steps_per_epoch=self.data_loader.train_steps,
             validation_data=self.data_loader.generator(is_train=False,
                                                        shape=shape,
                                                        shrink=self.shrink),
             validation_steps=self.data_loader.val_steps,
             verbose=1,
             initial_epoch=0,
             epochs=300,
             callbacks=[
                 keras.callbacks.TensorBoard('logs'),
                 keras.callbacks.ReduceLROnPlateau(monitor='val_loss',
                                                   patience=7,
                                                   verbose=1),
                 keras.callbacks.EarlyStopping(monitor='val_loss',
                                               patience=38,
                                               verbose=1),
                 keras.callbacks.ModelCheckpoint(
                     monitor='val_loss',
                     verbose=1,
                     save_weights_only=False,
                     save_best_only=True,
                     filepath='saved_models/segmentation/' + new_name)
             ])
     else:
         if model_name.lower() == 'unet':
             model = sm.Unet(backbone,
                             encoder_weights='imagenet',
                             activation='sigmoid',
                             classes=1,
                             input_shape=(shape[0], shape[1], 3),
                             decoder_use_batchnorm=True)
         elif model_name.lower() == 'pspnet':
             model = sm.PSPNet(backbone,
                               encoder_weights='imagenet',
                               activation='sigmoid',
                               classes=1,
                               input_shape=(shape[0], shape[1], 3))
         elif model_name.lower() == 'fpn':
             model = sm.FPN(backbone,
                            encoder_weights='imagenet',
                            activation='sigmoid',
                            classes=1,
                            input_shape=(shape[0], shape[1], 3))
         elif model_name.lower() == 'linknet':
             model = sm.Linknet(backbone,
                                encoder_weights='imagenet',
                                activation='sigmoid',
                                classes=1,
                                input_shape=(shape[0], shape[1], 3))
         else:
             raise NotImplementedError
         model.compile(
             optimizer=opt_dict[opt.lower()],
             loss=sm.losses.bce_jaccard_loss,
             metrics=['acc', sm.metrics.iou_score, sm.metrics.f1_score])
         model.summary()
         name_list = [
             model_name, backbone, opt, 'init-training',
             'none' if shape is None else str(shape[0]) + 'x' +
             str(shape[1])
         ]
         if self.shrink is None:
             name_list.append('without-shrink')
         new_name = '_'.join(name_list) + '.h5'
         model.fit_generator(
             generator=self.data_loader.generator(is_train=True,
                                                  shape=shape,
                                                  shrink=self.shrink),
             steps_per_epoch=self.data_loader.train_steps,
             validation_data=self.data_loader.generator(is_train=False,
                                                        shape=shape,
                                                        shrink=self.shrink),
             validation_steps=self.data_loader.val_steps,
             verbose=1,
             initial_epoch=0,
             epochs=300,
             callbacks=[
                 keras.callbacks.TensorBoard('logs'),
                 keras.callbacks.ReduceLROnPlateau(monitor='val_loss',
                                                   patience=7,
                                                   verbose=1),
                 keras.callbacks.EarlyStopping(monitor='val_loss',
                                               patience=38,
                                               verbose=1),
                 keras.callbacks.ModelCheckpoint(
                     monitor='val_loss',
                     verbose=1,
                     save_weights_only=False,
                     save_best_only=True,
                     filepath='saved_models/segmentation/' + new_name)
             ])
Exemple #13
0
def main(model_name, weight_path, train_dir, train_label_dir, val_dir, val_label_dir, test_dir, test_label_dir, is_fine_tuning, batch_size, epochs):
    BACKBONE = 'resnet50'
    CLASSES = ['background', 'building']
    LR = 0.0001
    preprocess_input = sm.get_preprocessing(BACKBONE)
    n_classes = 1
    activation = 'sigmoid'
    input_size = 480
    if model_name.lower() == 'unet':
        model = sm.Unet(BACKBONE,
                        classes=n_classes,
                        activation=activation,
                        input_shape=(input_size, input_size, 3),
                        encoder_weights='imagenet',
                        encoder_freeze=is_fine_tuning,
                        )
    elif model_name.lower() == 'pspnet' :
        model = sm.PSPNet(BACKBONE,
                          classes=n_classes,
                          activation=activation,
                          input_shape=(input_size, input_size, 3),
                          encoder_weights='imagenet',
                          encoder_freeze=is_fine_tuning,
                          psp_dropout=0.1,
                          downsample_factor=4)
    else:
        raise Exception(model_name + ' not supported')
    optim = keras.optimizers.SGD(lr=LR, momentum=0.9, nesterov=True)
    dice_loss = sm.losses.DiceLoss()
    focal_loss = sm.losses.BinaryFocalLoss() if n_classes == 1 else sm.losses.CategoricalFocalLoss()
    total_loss = dice_loss + (1 * focal_loss)
    metrics = [sm.metrics.IOUScore(threshold=0.5), sm.metrics.FScore(threshold=0.5)]
    model.compile(optim, total_loss, metrics)
    if weight_path:
        model.load_weights(weight_path)
    train_dataset = Dataset(
        train_dir,
        train_label_dir,
        classes=CLASSES,
        augmentation=get_training_augmentation(input_size)
    )
    train_dataloader = Dataloader(train_dataset, batch_size=batch_size, shuffle=True)
    assert train_dataloader[0][0].shape == (batch_size, input_size, input_size, 3)
    assert train_dataloader[0][1].shape == (batch_size, input_size, input_size, n_classes)

    callbacks = [
        keras.callbacks.ModelCheckpoint('./best_model.h5', save_weights_only=True, save_best_only=True, mode='min'),
        keras.callbacks.ReduceLROnPlateau(),
    ]
    if val_dir and val_label_dir:
        valid_dataset = Dataset(
            val_dir,
            val_label_dir,
            classes=CLASSES,
            augmentation=get_val_augmentation(input_size)
        )
        valid_dataloader = Dataloader(valid_dataset, batch_size=1, shuffle=False)
        history = model.fit_generator(
            train_dataloader,
            steps_per_epoch=len(train_dataloader),
            epochs=epochs,
            callbacks=callbacks,
            validation_data=valid_dataloader,
            validation_steps=len(valid_dataloader),
        )
    else:
        history = model.fit_generator(
            train_dataloader,
            steps_per_epoch=len(train_dataloader),
            epochs=epochs,
            callbacks=callbacks,
        )
    if test_dir and test_label_dir:
        test_dataset = Dataset(
            test_dir,
            test_label_dir,
            classes=CLASSES,
            augmentation=get_val_augmentation(input_size),
        )
        test_dataloader = Dataloader(test_dataset, batch_size=1, shuffle=False)
        scores = model.evaluate_generator(test_dataloader)
        print("Loss: {:.5}".format(scores[0]))
        for metric, value in zip(metrics, scores[1:]):
            print("mean {}: {:.5}".format(metric.__name__, value))
    return model
Exemple #14
0
def get_model(model,
              BACKBONE,
              opt,
              loss,
              metric,
              nclass=None,
              freeze_encoder=False,
              batchnormalization=True,
              dropout=None):
    h, w = None, None
    if nclass is not None:
        nclass = nclass
    else:
        nclass = n_classes

    if model == 'fpn':
        model = sm.FPN(BACKBONE,
                       classes=nclass,
                       input_shape=(h, w, 3),
                       activation='sigmoid',
                       encoder_freeze=freeze_encoder,
                       pyramid_use_batchnorm=batchnormalization,
                       pyramid_dropout=dropout)
        model.compile(optimizer=opt, loss=loss, metrics=metric)

    elif model == 'unet':
        model = sm.Unet(BACKBONE,
                        classes=nclass,
                        input_shape=(h, w, 3),
                        activation='sigmoid')
        model.compile(optimizer=opt, loss=loss, metrics=metric)

    elif model == 'psp':
        model = sm.PSPNet(BACKBONE,
                          classes=nclass,
                          input_shape=(h, w, 3),
                          activation='sigmoid')
        model.compile(optimizer=opt, loss=loss, metrics=metric)

    elif model == 'linknet':
        model = sm.Linknet(BACKBONE,
                           classes=nclass,
                           input_shape=(h, w, 3),
                           activation='sigmoid')
        model.compile(optimizer=opt, loss=loss, metrics=metric)

    elif model == 'xnet':
        model = smx.Xnet(BACKBONE,
                         classes=nclass,
                         input_shape=(h, w, 3),
                         activation='sigmoid')
        model.compile(optimizer=opt, loss=loss, metrics=metric)
    elif model == 'jpu':
        model = JPU_DeepLab(h, w, nclass)
        model.compile(optimizer=opt, loss=loss, metrics=metric)

    elif model == 'deeplab':
        model = Deeplabv3(weights=None,
                          input_shape=(h, w, 3),
                          classes=4,
                          backbone='xception',
                          alpha=1.,
                          activation='sigmoid')
        model.compile(optimizer=opt, loss=loss, metrics=metric)

    else:
        raise ValueError('Unknown network ' + model)

    return model
def create_model(double_size=True, slide_augmentation=True, trainable_encoder=True, n=32, dropout=0.2):
    if model_type == 'my_res_unet':
        model = my_res_unet(n=n, batch_norm=True, dropout=dropout, slide_augmentation=slide_augmentation)
    else:
        image_size = 256 if double_size else 128
        if model_type == 'unet':
            model = sm.Unet(backbone_name=backbone,
                            input_shape=(image_size, image_size, 3),
                            classes=1,
                            activation='sigmoid',
                            encoder_weights='imagenet',
                            encoder_freeze=not trainable_encoder,
                            encoder_features='default',
                            decoder_block_type='upsampling',
                            decoder_filters=(16*n, 8*n, 4*n, 2*n, n),
                            decoder_use_batchnorm=True)
        elif model_type == 'fpn':
            model = sm.FPN(backbone_name=backbone,
                           input_shape=(image_size, image_size, 3),
                           classes=1,
                           activation='sigmoid',
                           encoder_weights='imagenet',
                           encoder_freeze=not trainable_encoder,
                           encoder_features='default',
                           pyramid_block_filters=256,
                           pyramid_use_batchnorm=True,
                           pyramid_dropout=None,
                           final_interpolation='bilinear')
        elif model_type == 'linknet':
            model = sm.Linknet(backbone_name=backbone,
                               input_shape=(image_size, image_size, 3),
                               classes=1,
                               activation='sigmoid',
                               encoder_weights='imagenet',
                               encoder_freeze=not trainable_encoder,
                               encoder_features='default',
                               decoder_block_type='upsampling',
                               decoder_filters=(None, None, None, None, 16),
                               decoder_use_batchnorm=True)
        elif model_type == 'pspnet':
            image_size = 240 if double_size else 120
            model = sm.PSPNet(backbone_name=backbone,
                              input_shape=(image_size, image_size, 3),
                              classes=1,
                              activation='sigmoid',
                              encoder_weights='imagenet',
                              encoder_freeze=not trainable_encoder,
                              downsample_factor=8,
                              psp_conv_filters=512,
                              psp_pooling_type='avg',
                              psp_use_batchnorm=True,
                              psp_dropout=None,
                              final_interpolation='bilinear')
        else:
            print('Invalid segmentation model type')
            exit(0)

        if not slide_augmentation:
            x = keras.layers.Input(shape=(101, 101, 1), name='input')
            if model_type == 'pspnet':
                y = keras.layers.ZeroPadding2D(((9, 10), (9, 10)), name='zero_pad_input')(x)
            else:
                y = keras.layers.ZeroPadding2D(((13, 14), (13, 14)), name='zero_pad_input')(x)
            y = keras.layers.Cropping2D()(y)
        else:
            if model_type == 'pspnet':
                x = keras.layers.Input(shape=(120, 120, 1), name='input')
            else:
                x = keras.layers.Input(shape=(128, 128, 1), name='input')
            y = x
        if double_size:
            y = keras.layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(y)
        y = keras.layers.concatenate([y, y, y], name='channel_x3')
        y = model(y)
        if double_size:
            y = keras.layers.AvgPool2D(pool_size=(2, 2))(y)
        model = keras.models.Model(x, y)

    return model
 def __init__(self, **kwargs):
     import segmentation_models as sm
     sm.set_framework('tf.keras')
     super().__init__(name=kwargs['name'])
     del kwargs['name']
     self.unet = sm.PSPNet(**kwargs)
Exemple #17
0
    def Setup(self):
        '''
        User function: Setup all the parameters

        Args:
            None

        Returns:
            None
        '''
        # define network parameters
        self.system_dict["local"]["n_classes"] = 1 if len(
            self.system_dict["params"]["classes_to_train"]) == 1 else (
                len(self.system_dict["params"]["classes_to_train"]) +
                1)  # case for binary and multiclass segmentation
        activation = 'sigmoid' if self.system_dict["local"][
            "n_classes"] == 1 else 'softmax'

        #create model
        if (self.system_dict["params"]["model"] == "Unet"):
            self.system_dict["local"]["model"] = sm.Unet(
                self.system_dict["params"]["backbone"],
                classes=self.system_dict["local"]["n_classes"],
                activation=activation)
        elif (self.system_dict["params"]["model"] == "FPN"):
            self.system_dict["local"]["model"] = sm.FPN(
                self.system_dict["params"]["backbone"],
                classes=self.system_dict["local"]["n_classes"],
                activation=activation)
        elif (self.system_dict["params"]["model"] == "Linknet"):
            self.system_dict["local"]["model"] = sm.Linknet(
                self.system_dict["params"]["backbone"],
                classes=self.system_dict["local"]["n_classes"],
                activation=activation)
        elif (self.system_dict["params"]["model"] == "PSPNet"):
            self.system_dict["local"]["model"] = sm.PSPNet(
                self.system_dict["params"]["backbone"],
                classes=self.system_dict["local"]["n_classes"],
                activation=activation)

        # define optomizer
        optim = keras.optimizers.Adam(0.0001)

        # Segmentation models losses can be combined together by '+' and scaled by integer or float factor
        dice_loss = sm.losses.DiceLoss()
        focal_loss = sm.losses.BinaryFocalLoss() if self.system_dict["local"][
            "n_classes"] == 1 else sm.losses.CategoricalFocalLoss()
        total_loss = dice_loss + (1 * focal_loss)

        # actulally total_loss can be imported directly from library, above example just show you how to manipulate with losses
        # total_loss = sm.losses.binary_focal_dice_loss # or sm.losses.categorical_focal_dice_loss

        metrics = [
            sm.metrics.IOUScore(threshold=0.5),
            sm.metrics.FScore(threshold=0.5)
        ]

        # compile keras model with defined optimozer, loss and metrics
        self.system_dict["local"]["model"].compile(optim, total_loss, metrics)

        self.system_dict["local"]["model"].load_weights(
            self.system_dict["params"]["path_to_model"])
Exemple #18
0
    def _do_make_model_task(self,
                            task,
                            model_name,
                            nb_classes,
                            width=299,
                            height=299,
                            backbone="resnet50",
                            activation="softmax"):
        if task == Task.CLASSIFICATION:
            xception_shape_condition = height >= 71 and width >= 71
            mobilenet_shape_condition = height >= 32 and width >= 32

            if model_name == "xception" and xception_shape_condition:
                model = models.xception(nb_classes=nb_classes,
                                        height=height,
                                        width=width)
            elif model_name == "dilated_xception" and xception_shape_condition:
                model = models.dilated_xception(
                    nb_classes=nb_classes,
                    height=height,
                    width=width,
                    weights_info=self.config.train_params.weights_info)
            elif model_name == "mobilenet" and mobilenet_shape_condition:
                model = models.mobilenet(nb_classes=nb_classes,
                                         height=height,
                                         width=width)
            elif model_name == "mobilenetv2" and mobilenet_shape_condition:
                model = models.mobilenet_v2(
                    nb_classes=nb_classes,
                    height=height,
                    width=width,
                    weights_info=self.config.train_params.weights_info)
            elif model_name.startswith("efficientnetb"):
                model = models.EfficientNet(
                    model_name=model_name,
                    nb_classes=nb_classes,
                    height=height,
                    width=width,
                )
            elif model_name.startswith('resnest'):
                model = models.resnest(
                    nb_classes=nb_classes,
                    model_name=model_name,
                    height=height,
                    width=width,
                )
            else:
                model = models.Model2D(nb_classes, height, width)

        elif task == Task.SEMANTIC_SEGMENTATION:
            print('------------------')
            print('Model:', model_name)
            print('Backbone:', backbone)
            print('------------------')

            if model_name == "unet":
                model = segmentation_models.Unet(
                    backbone_name=backbone,
                    input_shape=(height, width, 3),
                    classes=nb_classes,
                )
            elif model_name == "deeplab_v3":
                model = models.Deeplabv3(
                    weights_info=self.config.train_params.weights_info,
                    input_shape=(height, width, 3),
                    classes=nb_classes,
                    backbone=backbone,
                    activation=activation)
            elif model_name == "pspnet":
                model = segmentation_models.PSPNet(
                    backbone_name=backbone,
                    input_shape=(height, width, 3),
                    classes=nb_classes,
                )
            elif model_name == "fpn":
                model = segmentation_models.FPN(
                    backbone_name=backbone,
                    input_shape=(height, width, 3),
                    classes=nb_classes,
                )
        else:
            raise NotImplementedError

        return model
Exemple #19
0
print("there are {} images with target mask in testset".format(len(Y_tst)))

# model construction (keras + sm)
if model_id == 0:
    model = sm.Unet(classes=1,
                    activation='sigmoid',
                    encoder_weights='imagenet')
elif model_id == 1:
    model = sm.Linknet(classes=1,
                       activation='sigmoid',
                       encoder_weights='imagenet')
elif model_id == 2:
    model = sm.FPN(classes=1, activation='sigmoid', encoder_weights='imagenet')
elif model_id == 3:
    model = sm.PSPNet(classes=1,
                      activation='sigmoid',
                      encoder_weights='imagenet')  # input size must be 384x384

data_gen_args = dict(rotation_range=360,
                     width_shift_range=0.15,
                     height_shift_range=0.15,
                     zoom_range=0.15,
                     brightness_range=[0.7, 1.3],
                     horizontal_flip=True,
                     vertical_flip=False,
                     fill_mode='nearest')

image_generator = ImageDataGenerator(**data_gen_args,
                                     preprocessing_function=preprocess_input)
mask_generator = ImageDataGenerator(**data_gen_args,
                                    preprocessing_function=preprocess_output)
Exemple #20
0
import segmentation_models as smp
import torch

model = smp.Unet('vgg11', classes=4, encoder_weights='imagenet')

model = smp.PSPNet('vgg11', classes=4, encoder_weights='imagenet')
inputs = torch.randn(5, 3, 256, 1600)
outputs = model(inputs)
print(model.__class__.__name__)

print(outputs.size())