コード例 #1
0
def init_and_train_model(train_df, train_path, val_df, val_path, model_type, BACKBONE, AUGMENTATIONS, batch_size, epoch_num):
  preprocess_input = sm.backbones.get_preprocessing(BACKBONE)
  
  if(model_type == "Linknet"):
    model = sm.Linknet(BACKBONE,input_shape = (256, 256, 3), encoder_weights='imagenet', encoder_freeze=True)
    train_gen = keras_generator_with_augs(train_df, batch_size, train_path, AUGMENTATIONS)
    val_gen = keras_generator(val_df, batch_size, val_path)
  elif(model_type == "Unet"):
    model = sm.Unet(BACKBONE,input_shape = (256, 256, 3), encoder_weights='imagenet', encoder_freeze=True)
    train_gen = keras_generator_with_augs(train_df, batch_size, train_path, AUGMENTATIONS)
    val_gen = keras_generator(val_df, batch_size, val_path)
  elif(model_type == "FPN"):
    model = sm.FPN(BACKBONE,input_shape = (256, 256, 3), encoder_weights='imagenet', encoder_freeze=True)
    train_gen = keras_generator_with_augs(train_df, batch_size, train_path, AUGMENTATIONS)
    val_gen = keras_generator(val_df, batch_size, val_path)
  elif(model_type == "PSPNet"):
    model = sm.PSPNet(BACKBONE,input_shape = (240, 240, 3), encoder_weights='imagenet', encoder_freeze=True)
    train_gen = keras_generator_with_augs(train_df, batch_size, train_path, AUGMENTATIONS, PSPNet=True)
    val_gen = keras_generator(val_df, batch_size, val_path, PSPNet = True)
    
  model.compile(
    'Adam',
    loss=sm.losses.dice_loss,
    metrics=[sm.metrics.dice_score],
  )
  
  best_w = keras.callbacks.ModelCheckpoint(model_type + '_' + BACKBONE + '_best.h5',
                                monitor='val_loss',
                                verbose=0,
                                save_best_only=True,
                                save_weights_only=True,
                                mode='auto',
                                period=1)

  last_w = keras.callbacks.ModelCheckpoint(model_type + '_' + BACKBONE + '_last.h5',
                                monitor='val_loss',
                                verbose=0,
                                save_best_only=False,
                                save_weights_only=True,
                                mode='auto',
                                period=1)


  callbacks = [best_w, last_w]
  
  history = model.fit_generator(train_gen,
              steps_per_epoch=50,
              epochs=epoch_num,
              verbose=1,
              callbacks=callbacks,
              validation_data=val_gen,
              validation_steps=50,
              class_weight=None,
              max_queue_size=1,
              workers=1,
              use_multiprocessing=False,
              shuffle=True,
              initial_epoch=0)
  return model, history
  
コード例 #2
0
def get_sm_model():
    CLASSES = ['sofa']

    sm.set_framework('tf.keras')

    model = sm.Linknet(BACKBONE, classes=len(CLASSES), encoder_weights=None)

    return model
コード例 #3
0
def inference(model_name, image_folder_path):
    # wrap our image inside the Dataset wrapper used for training,
    # TODO: remove this and add custom pipeline for preprocessing.
    trial_dataset = Dataset(
    image_folder_path, 
    image_folder_path, 
    classes=CLASSES, 
    augmentation=get_validation_augmentation(),
    preprocessing=get_preprocessing(preprocess_input),
    )


    print(model_name)
    if model_name=="unet":
        print("Loading Models. This might take some time...")
        modelUnet = sm.Unet(BACKBONE, classes=n_classes, activation=activation)
        model_c = config.STYLES["unet"]
        model_path = os.path.join(f"{config.MODEL_PATH}",f"{model_c}.h5")
        modelUnet.load_weights(model_path)
        print("Loaded Unet.")
        model = modelUnet
    elif model_name=="featurepyramidnetwork":
        modelFPN = sm.FPN(BACKBONE, classes=n_classes, activation=activation) 
        model_c = config.STYLES["featurepyramidnetwork"]
        model_path = os.path.join(f"{config.MODEL_PATH}",f"{model_c}.h5")
        modelFPN.load_weights(model_path)
        print("Loaded FPN.")
        model = modelFPN
    elif model_name=="linknet":
        modelLinknet = sm.Linknet(BACKBONE, classes=n_classes, activation=activation)
        model_c = config.STYLES["linknet"]
        model_path = os.path.join(f"{config.MODEL_PATH}",f"{model_c}.h5")
        modelLinknet.load_weights(model_path)
        print("Loaded Linknet.")
        model = modelLinknet
            
    # model.load_weights(model_path) 

    # trial folder must have only one image. hence the [0]
    image, gt_mask = trial_dataset[0]
    image = np.expand_dims(image, axis=0)
    pr_mask = model.predict(image).round()
    #print(pr_mask.shape)
    #print(pr_mask[0].shape)
    # make image back to normal
    image=denormalize(image.squeeze())
    gt_mask=gt_mask[..., 0].squeeze()
    pr_mask=pr_mask[..., 0].squeeze()
  
    # DEBUG: 
    # visualize(
    #     image=image,
    #     gt_mask=gt_mask,
    #     pr_mask=pr_mask,
    # )
    del model
    gc.collect()
    return pr_mask,gt_mask
コード例 #4
0
def create_model():
    """
    Function used to create model based on parameters specified in settings.py
    """
    if model_type == 'myresunet':
        model = myresunet.create_model()
    elif model_type == 'unet':
        model = sm.Unet(backbone_name=backbone,
                        input_shape=(image_height, image_width, 3),
                        classes=num_classes,
                        activation='softmax',
                        encoder_weights='imagenet',
                        encoder_freeze=False,
                        encoder_features='default',
                        decoder_block_type='upsampling',
                        decoder_filters=(decoder_scaler * 256, decoder_scaler * 128, decoder_scaler * 64,
                                         decoder_scaler * 32, decoder_scaler * 16),
                        decoder_use_batchnorm=True)
    elif model_type == 'fpn':
        model = sm.FPN(backbone_name=backbone,
                       input_shape=(image_height, image_width, 3),
                       classes=num_classes,
                       activation='softmax',
                       encoder_weights='imagenet',
                       encoder_freeze=False,
                       encoder_features='default',
                       pyramid_block_filters=decoder_scaler * 256,
                       pyramid_use_batchnorm=True,
                       pyramid_aggregation='concat',
                       pyramid_dropout=None)
    elif model_type == 'linknet':
        model = sm.Linknet(backbone_name=backbone,
                           input_shape=(image_height, image_width, 3),
                           classes=num_classes,
                           activation='softmax',
                           encoder_weights='imagenet',
                           encoder_freeze=False,
                           encoder_features='default',
                           decoder_block_type='upsampling',
                           decoder_filters=(None, None, None, None, decoder_scaler * 16),
                           decoder_use_batchnorm=True)
    elif model_type == 'pspnet':
        model = sm.PSPNet(backbone_name=backbone,
                          input_shape=(image_height, image_width, 3),
                          classes=num_classes,
                          activation='softmax',
                          encoder_weights='imagenet',
                          encoder_freeze=False,
                          downsample_factor=8,
                          psp_conv_filters=decoder_scaler * 512,
                          psp_pooling_type='avg',
                          psp_use_batchnorm=True,
                          psp_dropout=None)
    else:
        print('Invalid segmentation model type')
        exit(0)
    return model
コード例 #5
0
def create_model(border=False, trainable_encoder=False):
    if model_type == 'unet':
        model = sm.Unet(backbone_name=backbone,
                        input_shape=(image_size, image_size, 3),
                        classes=2 if border else 1,
                        activation='sigmoid',
                        encoder_weights='imagenet',
                        encoder_freeze=not trainable_encoder,
                        encoder_features='default',
                        decoder_block_type='upsampling',
                        decoder_filters=(256, 128, 64, 32, 16),
                        decoder_use_batchnorm=True)
    elif model_type == 'fpn':
        model = sm.FPN(backbone_name=backbone,
                       input_shape=(image_size, image_size, 3),
                       classes=2 if border else 1,
                       activation='sigmoid',
                       encoder_weights='imagenet',
                       encoder_freeze=not trainable_encoder,
                       encoder_features='default',
                       pyramid_block_filters=256,
                       pyramid_use_batchnorm=True,
                       pyramid_aggregation='concat',
                       pyramid_dropout=None)
    elif model_type == 'linknet':
        model = sm.Linknet(backbone_name=backbone,
                           input_shape=(image_size, image_size, 3),
                           classes=2 if border else 1,
                           activation='sigmoid',
                           encoder_weights='imagenet',
                           encoder_freeze=not trainable_encoder,
                           encoder_features='default',
                           decoder_block_type='upsampling',
                           decoder_filters=(None, None, None, None, 16),
                           decoder_use_batchnorm=True)
    elif model_type == 'pspnet':
        model = sm.PSPNet(backbone_name=backbone,
                          input_shape=(image_size, image_size, 3),
                          classes=2 if border else 1,
                          activation='sigmoid',
                          encoder_weights='imagenet',
                          encoder_freeze=not trainable_encoder,
                          downsample_factor=8,
                          psp_conv_filters=512,
                          psp_pooling_type='avg',
                          psp_use_batchnorm=True,
                          psp_dropout=None)
    else:
        print('Invalid segmentation model type')
        exit(0)
    return model
コード例 #6
0
def get_backboned_model(model, backbone, freeze=True):

    if model == 'Unet':
        base_model = sm.Unet(backbone_name=backbone,
                             encoder_weights='imagenet',
                             classes=1,
                             activation='sigmoid',
                             freeze_encoder=freeze)
    elif model == 'FPN':
        base_model = sm.FPN(backbone_name=backbone,
                            encoder_weights='imagenet',
                            classes=1,
                            activation='sigmoid',
                            freeze_encoder=freeze)
    elif model == 'Linknet':
        base_model = sm.Linknet(backbone_name=backbone,
                                encoder_weights='imagenet',
                                classes=1,
                                activation='sigmoid',
                                freeze_encoder=freeze)
    elif model == 'PSPNet':
        base_model = sm.PSPNet(backbone_name=backbone,
                               encoder_weights='imagenet',
                               classes=1,
                               activation='sigmoid',
                               freeze_encoder=freeze)
    else:
        print('Model not identified! Unet is selected')
        base_model = sm.Unet(backbone_name=backbone,
                             encoder_weights='imagenet',
                             classes=1,
                             activation='sigmoid',
                             freeze_encoder=freeze)

    inp = Input(shape=(96, 96, 1))
    l1 = Conv2D(3, (1, 1))(inp)  # map N channels data to 3 channels
    out = base_model(l1)

    model = Model(inp, out, name=base_model.name)

    # print(model.summary())
    return model
コード例 #7
0
def get_model_definition():
    backbone = 'mobilenet'
    n_classes = 2
    lr = 0.001
    activation = 'softmax'
    pre_process_input = sm.get_preprocessing(backbone)
    optimizer = keras.optimizers.Adam(lr)
    metrics = [
        sm.metrics.FScore(threshold=0.5),
    ]
    model = sm.Linknet(backbone,
                       classes=n_classes,
                       activation=activation,
                       encoder_freeze=True)
    if n_classes == 1:
        loss = sm.losses.BinaryFocalLoss()
    else:
        loss = sm.losses.CategoricalFocalLoss()
    model.compile(optimizer, loss, metrics)
    return model, pre_process_input
コード例 #8
0
    def _build(self):
        if self.type == 'Unet':
            model = sm.Unet(backbone_name=self.backbone, 
                            classes=self.n_class,
                            activation=self.activate,
                            encoder_weights=self.encoder_weights)

        elif self.type == 'Linknet':
            model = sm.Linknet(backbone_name=self.backbone, 
                               classes=self.n_class, 
                               activation=self.activate, 
                               encoder_weights=self.encoder_weights)

        elif self.type == 'FPN':
            model = sm.FPN(backbone_name=self.backbone, 
                           classes=self.n_class, 
                           activation=self.activate, 
                           encoder_weights=self.encoder_weights)
        
        else:
            raise ValueError('Model type {} not support now.'.format(self.type))

        return model
コード例 #9
0
RESULTADOS_finetuning = []

# Define the K-fold Cross Validator
kfold = KFold(n_splits=num_folds, shuffle=True, random_state=1)

# K-fold Cross Validation model evaluation
fold_no = 1
for train, test in kfold.split(imagens, mascaras_medico):
    print("######### KFOLD ", fold_no, "#########")

    ###### CRIA O MODELO
    BACKBONE = 'resnet34'
    preprocess_input = sm.get_preprocessing(BACKBONE)
    # define model
    model = sm.Linknet(BACKBONE,
                       encoder_weights='imagenet',
                       encoder_freeze=True)
    model.compile(
        'Adam',
        loss=sm.losses.bce_jaccard_loss,
        metrics=[sm.metrics.iou_score],
    )

    ###### dividir treino e validação, lembrando que como são 5 folds, tem 80% pra trein e 20% para test. Então dos 80% de treino pego 25% para validação. E ai mantenho a mesma proporção de antes do kfold (60% treino, 20% teste e 20% validação)
    x_train, x_val, y_train, y_val = train_test_split(imagens[train],
                                                      mascaras_medico[train],
                                                      test_size=0.25,
                                                      random_state=11)

    x_train = np.asarray(x_train)
    y_train = (np.asarray(y_train) > threshold_otsu(np.asarray(y_train)))
コード例 #10
0
with tf.device('/gpu:3'):

    dataset = Dataset(x_train_path, y_train_path, classes=['non-polyp', 'polyp'], augmentation=get_training_augmentation())
    
    BATCH_SIZE = 8
    CLASSES = ['non-polyp', 'polyp']
    LR = 0.0001
    EPOCHS = 25
    IMAGE_ORDERING = 'channels_last'
    n_classes = 2
    
    # SOTA
    BACKBONE = 'resnet34'
    # define model
    model = sm.Unet(BACKBONE, encoder_weights='imagenet')
    model = sm.Linknet(BACKBONE, encoder_weights='imagenet')
    model = sm.FPN(BACKBONE, encoder_weights='imagenet')
    model = sm.PSPNet(BACKBONE, encoder_weights='imagenet')
    
    model = fcn_8.fcn_8(2)

    optim = tf.keras.optimizers.Adam(LR)

    # Segmentation models losses can be combined together by '+' and scaled by integer or float factor
    # set class weights for dice_loss (car: 1.; pedestrian: 2.; background: 0.5;)
    dice_loss = sm.losses.DiceLoss(class_weights=np.array([0.5, 1])) 
    focal_loss = sm.losses.BinaryFocalLoss() 
    # if n_classes == 1 else sm.losses.CategoricalFocalLoss()
    total_loss = dice_loss + (1 * focal_loss)

    # actulally total_loss can be imported directly from library, above example just show you how to manipulate with losses
コード例 #11
0
    def Setup(self):
        '''
        User function: Setup all the parameters

        Args:
            None

        Returns:
            None
        '''
        # define network parameters
        self.system_dict["local"]["n_classes"] = 1 if len(
            self.system_dict["params"]["classes_to_train"]) == 1 else (
                len(self.system_dict["params"]["classes_to_train"]) +
                1)  # case for binary and multiclass segmentation
        activation = 'sigmoid' if self.system_dict["local"][
            "n_classes"] == 1 else 'softmax'

        #create model
        if (self.system_dict["params"]["model"] == "Unet"):
            self.system_dict["local"]["model"] = sm.Unet(
                self.system_dict["params"]["backbone"],
                classes=self.system_dict["local"]["n_classes"],
                activation=activation)
        elif (self.system_dict["params"]["model"] == "FPN"):
            self.system_dict["local"]["model"] = sm.FPN(
                self.system_dict["params"]["backbone"],
                classes=self.system_dict["local"]["n_classes"],
                activation=activation)
        elif (self.system_dict["params"]["model"] == "Linknet"):
            self.system_dict["local"]["model"] = sm.Linknet(
                self.system_dict["params"]["backbone"],
                classes=self.system_dict["local"]["n_classes"],
                activation=activation)
        elif (self.system_dict["params"]["model"] == "PSPNet"):
            self.system_dict["local"]["model"] = sm.PSPNet(
                self.system_dict["params"]["backbone"],
                classes=self.system_dict["local"]["n_classes"],
                activation=activation)

        # define optomizer
        optim = keras.optimizers.Adam(0.0001)

        # Segmentation models losses can be combined together by '+' and scaled by integer or float factor
        dice_loss = sm.losses.DiceLoss()
        focal_loss = sm.losses.BinaryFocalLoss() if self.system_dict["local"][
            "n_classes"] == 1 else sm.losses.CategoricalFocalLoss()
        total_loss = dice_loss + (1 * focal_loss)

        # actulally total_loss can be imported directly from library, above example just show you how to manipulate with losses
        # total_loss = sm.losses.binary_focal_dice_loss # or sm.losses.categorical_focal_dice_loss

        metrics = [
            sm.metrics.IOUScore(threshold=0.5),
            sm.metrics.FScore(threshold=0.5)
        ]

        # compile keras model with defined optimozer, loss and metrics
        self.system_dict["local"]["model"].compile(optim, total_loss, metrics)

        self.system_dict["local"]["model"].load_weights(
            self.system_dict["params"]["path_to_model"])
コード例 #12
0
def create_model(double_size=True, slide_augmentation=True, trainable_encoder=True, n=32, dropout=0.2):
    if model_type == 'my_res_unet':
        model = my_res_unet(n=n, batch_norm=True, dropout=dropout, slide_augmentation=slide_augmentation)
    else:
        image_size = 256 if double_size else 128
        if model_type == 'unet':
            model = sm.Unet(backbone_name=backbone,
                            input_shape=(image_size, image_size, 3),
                            classes=1,
                            activation='sigmoid',
                            encoder_weights='imagenet',
                            encoder_freeze=not trainable_encoder,
                            encoder_features='default',
                            decoder_block_type='upsampling',
                            decoder_filters=(16*n, 8*n, 4*n, 2*n, n),
                            decoder_use_batchnorm=True)
        elif model_type == 'fpn':
            model = sm.FPN(backbone_name=backbone,
                           input_shape=(image_size, image_size, 3),
                           classes=1,
                           activation='sigmoid',
                           encoder_weights='imagenet',
                           encoder_freeze=not trainable_encoder,
                           encoder_features='default',
                           pyramid_block_filters=256,
                           pyramid_use_batchnorm=True,
                           pyramid_dropout=None,
                           final_interpolation='bilinear')
        elif model_type == 'linknet':
            model = sm.Linknet(backbone_name=backbone,
                               input_shape=(image_size, image_size, 3),
                               classes=1,
                               activation='sigmoid',
                               encoder_weights='imagenet',
                               encoder_freeze=not trainable_encoder,
                               encoder_features='default',
                               decoder_block_type='upsampling',
                               decoder_filters=(None, None, None, None, 16),
                               decoder_use_batchnorm=True)
        elif model_type == 'pspnet':
            image_size = 240 if double_size else 120
            model = sm.PSPNet(backbone_name=backbone,
                              input_shape=(image_size, image_size, 3),
                              classes=1,
                              activation='sigmoid',
                              encoder_weights='imagenet',
                              encoder_freeze=not trainable_encoder,
                              downsample_factor=8,
                              psp_conv_filters=512,
                              psp_pooling_type='avg',
                              psp_use_batchnorm=True,
                              psp_dropout=None,
                              final_interpolation='bilinear')
        else:
            print('Invalid segmentation model type')
            exit(0)

        if not slide_augmentation:
            x = keras.layers.Input(shape=(101, 101, 1), name='input')
            if model_type == 'pspnet':
                y = keras.layers.ZeroPadding2D(((9, 10), (9, 10)), name='zero_pad_input')(x)
            else:
                y = keras.layers.ZeroPadding2D(((13, 14), (13, 14)), name='zero_pad_input')(x)
            y = keras.layers.Cropping2D()(y)
        else:
            if model_type == 'pspnet':
                x = keras.layers.Input(shape=(120, 120, 1), name='input')
            else:
                x = keras.layers.Input(shape=(128, 128, 1), name='input')
            y = x
        if double_size:
            y = keras.layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(y)
        y = keras.layers.concatenate([y, y, y], name='channel_x3')
        y = model(y)
        if double_size:
            y = keras.layers.AvgPool2D(pool_size=(2, 2))(y)
        model = keras.models.Model(x, y)

    return model
print("Unet execution time is: ", execution_time_unet)

model1.save('unet_res34_backbone_50epochs.hdf5')

# convert the history.history dict to a pandas DataFrame:
hist1_df = pd.DataFrame(history1.history)
hist1_csv_file = 'history_unet_50epochs.csv'
with open(hist1_csv_file, mode='w') as f:
    hist1_df.to_csv(f)
############################################################
###Model 2
#Using the same backbone as unet

# define model (Change to unet or Linknet based on the need )
model2 = sm.Linknet(BACKBONE1,
                    encoder_weights='imagenet',
                    classes=n_classes,
                    activation=activation)

# compile keras model with defined optimozer, loss and metrics
model2.compile(optim, total_loss, metrics=metrics)

print(model2.summary())

start2 = datetime.now()

history2 = model2.fit(X_train1,
                      y_train_cat,
                      batch_size=8,
                      epochs=50,
                      verbose=1,
                      validation_data=(X_test1, y_test_cat))
コード例 #14
0
        ModelCheckpoint(os.path.join(args.model_path,
                                     'centernet_loss_{epoch}.h5'),
                        monitor='loss',
                        verbose=1,
                        save_best_only=True,
                        save_weights_only=False,
                        mode='min'),
        ReduceLROnPlateau(monitor='val_loss',
                          factor=0.25,
                          patience=3,
                          verbose=1,
                          mode='min')
    ]

    model = sm.Linknet('resnet18',
                       input_shape=(args.image_height, args.image_width, 3),
                       classes=train_generator.classes + 2,
                       activation='sigmoid')
    model.summary()

    loss = centernet_loss(train_generator.num_classes, args.alpha_pos,
                          args.alpha_neg, args.near_center_coef,
                          args.negatives_weight, args.regression_weight)

    opt = Nadam(lr=args.lr)

    model.compile(opt,
                  loss=loss,
                  metrics=[
                      center_pos_loss(train_generator.num_classes),
                      center_neg_loss(train_generator.num_classes),
                      reg_loss(train_generator.num_classes)
コード例 #15
0
def get_model(model,
              BACKBONE,
              opt,
              loss,
              metric,
              nclass=None,
              freeze_encoder=False,
              batchnormalization=True,
              dropout=None):
    h, w = None, None
    if nclass is not None:
        nclass = nclass
    else:
        nclass = n_classes

    if model == 'fpn':
        model = sm.FPN(BACKBONE,
                       classes=nclass,
                       input_shape=(h, w, 3),
                       activation='sigmoid',
                       encoder_freeze=freeze_encoder,
                       pyramid_use_batchnorm=batchnormalization,
                       pyramid_dropout=dropout)
        model.compile(optimizer=opt, loss=loss, metrics=metric)

    elif model == 'unet':
        model = sm.Unet(BACKBONE,
                        classes=nclass,
                        input_shape=(h, w, 3),
                        activation='sigmoid')
        model.compile(optimizer=opt, loss=loss, metrics=metric)

    elif model == 'psp':
        model = sm.PSPNet(BACKBONE,
                          classes=nclass,
                          input_shape=(h, w, 3),
                          activation='sigmoid')
        model.compile(optimizer=opt, loss=loss, metrics=metric)

    elif model == 'linknet':
        model = sm.Linknet(BACKBONE,
                           classes=nclass,
                           input_shape=(h, w, 3),
                           activation='sigmoid')
        model.compile(optimizer=opt, loss=loss, metrics=metric)

    elif model == 'xnet':
        model = smx.Xnet(BACKBONE,
                         classes=nclass,
                         input_shape=(h, w, 3),
                         activation='sigmoid')
        model.compile(optimizer=opt, loss=loss, metrics=metric)
    elif model == 'jpu':
        model = JPU_DeepLab(h, w, nclass)
        model.compile(optimizer=opt, loss=loss, metrics=metric)

    elif model == 'deeplab':
        model = Deeplabv3(weights=None,
                          input_shape=(h, w, 3),
                          classes=4,
                          backbone='xception',
                          alpha=1.,
                          activation='sigmoid')
        model.compile(optimizer=opt, loss=loss, metrics=metric)

    else:
        raise ValueError('Unknown network ' + model)

    return model
コード例 #16
0
ファイル: run_seg_eval_LR.py プロジェクト: jovahe/KaggleSalt
def RunTest(
    params,
    model_name_template='models_3/{model}_{backbone}_{optimizer}_{augmented_image_size}-{padded_image_size}-{nn_image_size}_lrf{lrf}_{metric}_{CC}_f{test_fold_no}_{phash}'
):

    # # Params

    # In[ ]:

    DEV_MODE_RANGE = 0  # off

    # In[ ]:

    # In[ ]:

    def params_dict():
        return {
            x[0]: x[1]
            for x in vars(params).items() if not x[0].startswith('__')
        }

    def params_str():
        return '\n'.join([
            repr(x[0]) + ' : ' + repr(x[1]) + ','
            for x in vars(params).items() if not x[0].startswith('__')
        ])

    def params_hash(shrink_to=6):
        import hashlib
        import json
        return hashlib.sha1(
            json.dumps(params_dict(),
                       sort_keys=True).encode()).hexdigest()[:shrink_to]

    def params_save(fn, verbose=True):
        params_fn = fn + '.param.txt'
        with open(params_fn, 'w+') as f:
            s = params_str()
            hash = params_hash(shrink_to=1000)
            s = '{\n' + s + '\n}\nhash: ' + hash[:6] + ' ' + hash[6:]
            f.write(s)
            if verbose:
                print('params: ' + s + '\nsaved to ' + params_fn)

    # # Imports

    # In[ ]:

    import sys
    #sys.path.append(r'D:\Programming\3rd_party\keras')

    # In[ ]:

    import sys
    from imp import reload
    import numpy as np
    import keras
    import datetime
    import time

    from keras.models import Model, load_model
    from keras.layers import Input, Dropout, BatchNormalization, Activation, Add
    from keras.layers.core import Lambda
    from keras.layers.convolutional import Conv2D, Conv2DTranspose
    from keras.layers.pooling import MaxPooling2D
    from keras.layers.merge import concatenate
    from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau, CSVLogger
    from keras import backend as K

    import tensorflow as tf

    # # Load data

    # In[ ]:

    import load_data
    load_data = reload(load_data)
    import keras_unet_divrikwicky_model
    keras_unet_divrikwicky_model = reload(keras_unet_divrikwicky_model)

    # In[ ]:

    train_df = load_data.LoadData(train_data=True,
                                  DEV_MODE_RANGE=DEV_MODE_RANGE,
                                  to_gray=False)

    # In[ ]:

    train_df.images[0].shape

    # In[ ]:

    train_images, train_masks, validate_images, validate_masks = load_data.SplitTrainData(
        train_df, params.test_fold_no)
    train_images.shape, train_masks.shape, validate_images.shape, validate_masks.shape

    # # Reproducability setup:

    # In[ ]:

    import random as rn

    import os
    os.environ['PYTHONHASHSEED'] = '0'

    np.random.seed(params.seed)
    rn.seed(params.seed)

    #session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
    tf.set_random_seed(params.seed)
    #sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
    sess = tf.Session(graph=tf.get_default_graph())
    K.set_session(sess)

    # # IOU metric

    # In[ ]:

    thresholds = np.array(
        [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95])

    def iou(img_true, img_pred):
        assert (img_true.shape[-1] == 1) and (len(img_true.shape) == 3) or (
            img_true.shape[-1] != 1) and (len(img_true.shape) == 2)
        i = np.sum((img_true * img_pred) > 0)
        u = np.sum((img_true + img_pred) > 0)
        if u == 0:
            return 1
        return i / u

    def iou_metric(img_true, img_pred):
        img_pred = img_pred > 0.5  # added by sgx 20180728
        if img_true.sum() == img_pred.sum() == 0:
            scores = 1
        else:
            scores = (thresholds <= iou(img_true, img_pred)).mean()
        return scores

    def iou_metric_batch(y_true_in, y_pred_in):
        batch_size = len(y_true_in)
        metric = []
        for batch in range(batch_size):
            value = iou_metric(y_true_in[batch], y_pred_in[batch])
            metric.append(value)
        #print("metric = ",metric)
        return np.mean(metric)

    # adapter for Keras
    def my_iou_metric(label, pred):
        metric_value = tf.py_func(iou_metric_batch, [label, pred], tf.float64)
        return metric_value

    # # Data generator

    # In[ ]:

    mean_val = np.mean(train_images.apply(np.mean))
    mean_std = np.mean(train_images.apply(np.std))
    mean_val, mean_std

    #####################################
    def FillCoordConvNumpy(imgs):
        print(imgs.shape)
        assert len(imgs.shape) == 4
        assert imgs.shape[3] == 3
        n = imgs.shape[2]
        hor_img = np.linspace(-1., 1., n).reshape((1, 1, n, 1))
        n = imgs.shape[1]
        ver_img = np.linspace(-1., 1., n).reshape((1, n, 1, 1))
        imgs[:, :, :, 0:1] = hor_img
        imgs[:, :, :, 2:3] = ver_img

    def FillCoordConvList(imgs):
        print(imgs.shape)
        assert len(imgs[0].shape) == 3
        assert imgs[0].shape[2] == 3
        for img in imgs:
            n = img.shape[1]
            hor_img = np.linspace(-1., 1., n).reshape((1, n, 1))
            n = img.shape[0]
            ver_img = np.linspace(-1., 1., n).reshape((n, 1, 1))
            img[:, :, 0:1] = hor_img
            img[:, :, 2:3] = ver_img

    if params.coord_conv:
        FillCoordConvList(train_images)
        FillCoordConvList(validate_images)
        print(train_images[0][0, 0, 0], train_images[0][0, 0, 2])
        assert train_images[0][0, 0, 0] == -1.
        assert train_images[0][0, 0, 2] == 1.

    ######################################

    from my_augs import AlbuDataGenerator

    # # model

    # In[ ]:

    sys.path.append('../3rd_party/segmentation_models')
    import segmentation_models
    segmentation_models = reload(segmentation_models)
    from segmentation_models.utils import set_trainable

    # In[ ]:
    if not hasattr(params, 'model_params'):
        params.model_params = {}

    if params.load_model_from:
        model = load_model(params.load_model_from,
                           custom_objects={'my_iou_metric': my_iou_metric})
        print('MODEL LOADED from: ' + params.load_model_from)
    else:
        model = None
        if params.model == 'FNN':
            model = segmentation_models.FPN(
                backbone_name=params.backbone,
                input_shape=(None, None, params.channels),
                encoder_weights=params.initial_weightns,
                freeze_encoder=True,
                dropout=params.dropout,
                **params.model_params)
        if params.model == 'FNNdrop':
            model = segmentation_models.FPNdrop(
                backbone_name=params.backbone,
                input_shape=(None, None, params.channels),
                encoder_weights=params.initial_weightns,
                freeze_encoder=True,
                dropout=params.dropout,
                **params.model_params)
        if params.model == 'Unet':
            model = segmentation_models.Unet(
                backbone_name=params.backbone,
                input_shape=(None, None, params.channels),
                encoder_weights=params.initial_weightns,
                freeze_encoder=True,
                **params.model_params)
        if params.model == 'Linknet':
            model = segmentation_models.Linknet(
                backbone_name=params.backbone,
                input_shape=(None, None, params.channels),
                encoder_weights=params.initial_weightns,
                freeze_encoder=True,
                **params.model_params)
        if params.model == 'divrikwicky':
            model = keras_unet_divrikwicky_model.CreateModel(
                params.nn_image_size, **params.model_params)
            params.backbone = ''
        assert model

    for l in model.layers:
        if isinstance(
                l, segmentation_models.fpn.layers.UpSampling2D) or isinstance(
                    l, keras.layers.UpSampling2D):
            print(l)
            if hasattr(l, 'interpolation'):
                print(l.interpolation)
                if hasattr(params, 'model_params'
                           ) and 'interpolation' in params.model_params:
                    l.interpolation = params.model_params['interpolation']
            else:
                print('qq')

    if hasattr(params,
               'kernel_constraint_norm') and params.kernel_constraint_norm:
        for l in model.layers:
            if hasattr(l, 'kernel_constraint'):
                print('kernel_constraint for ', l, ' is set to ',
                      params.kernel_constraint_norm)
                l.kernel_constraint = keras.constraints.get(
                    keras.constraints.max_norm(params.kernel_constraint_norm))

    # In[ ]:

    model_out_file = model_name_template.format(
        lrf=params.ReduceLROnPlateau['factor'],
        metric=params.monitor_metric[0],
        CC='CC' if params.coord_conv else '',
        **vars(params)) + '_f{test_fold_no}_{phash}'.format(
            test_fold_no=params.test_fold_no, phash=params_hash())
    now = datetime.datetime.now()
    print('model:   ' + model_out_file + '    started at ' +
          now.strftime("%Y.%m.%d %H:%M:%S"))

    assert not os.path.exists(model_out_file + '.model')

    params_save(model_out_file, verbose=True)
    log_out_file = model_out_file + '.log.csv'

    # In[ ]:

    #model = load_model(model1_file, ) #, 'lavazs_loss': lavazs_loss

    # # Train

    # In[ ]:

    optimizer = params.optimizer
    if optimizer == 'adam':
        optimizer = keras.optimizers.adam(**params.optimizer_params)
    elif optimizer == 'sgd':
        optimizer = keras.optimizers.sgd(**params.optimizer_params)

    model.compile(loss="binary_crossentropy",
                  optimizer=optimizer,
                  metrics=["acc", my_iou_metric])  #, my_iou_metric

    # In[ ]:

    if params.coord_conv:
        mean = ((0, mean_val, 0), (1, mean_std, 1))
    else:
        mean = (mean_val, mean_std)

    train_gen = AlbuDataGenerator(train_images,
                                  train_masks,
                                  batch_size=params.batch_size,
                                  nn_image_size=params.nn_image_size,
                                  mode=params.train_augmentation_mode,
                                  shuffle=True,
                                  params=params,
                                  mean=mean)
    val_gen = AlbuDataGenerator(validate_images,
                                validate_masks,
                                batch_size=params.test_batch_size,
                                nn_image_size=params.nn_image_size,
                                mode=params.test_augmentation_mode,
                                shuffle=False,
                                params=params,
                                mean=mean)

    # In[ ]:

    sys.path.append('../3rd_party/keras-tqdm')
    from keras_tqdm import TQDMCallback, TQDMNotebookCallback

    # In[ ]:

    start_t = time.clock()

    if params.epochs_warmup:
        history = model.fit_generator(
            train_gen,
            validation_data=None,
            epochs=params.epochs_warmup,
            callbacks=[TQDMNotebookCallback(leave_inner=True)],
            validation_steps=None,
            workers=5,
            use_multiprocessing=False,
            verbose=0)

    set_trainable(model)
    batches_per_epoch = len(train_images) // params.batch_size
    print("batches per epoch: ", batches_per_epoch)
    test_epochs = 30
    steps = test_epochs * batches_per_epoch
    val_period = steps // 1000
    print("steps: ", steps, " val_period", val_period)

    lr_sheduler = EvalLrTest(log_out_file,
                             val_gen,
                             val_period=val_period,
                             steps=steps)

    history = model.fit_generator(
        train_gen,
        validation_data=None,
        epochs=params.epochs,
        initial_epoch=params.epochs_warmup,
        callbacks=[TQDMNotebookCallback(leave_inner=True), lr_sheduler],
        validation_steps=None,
        workers=5,
        use_multiprocessing=False,
        verbose=0)

    # In[ ]:

    print(params_str())
    print('done:   ' + model_out_file)
    print('elapsed: {}s ({}s/iter)'.format(
        time.clock() - start_t, (time.clock() - start_t) / len(history.epoch)))

    return model
コード例 #17
0
# models pre load for faster execution.
print("Loading Models. This might take some time...")
modelUnet = sm.Unet(BACKBONE, classes=n_classes, activation=activation)
model_c = config.STYLES["unet"]
model_path = os.path.join(f"{config.MODEL_PATH}", f"{model_c}.h5")
modelUnet.load_weights(model_path)
print("Loaded Unet.")

modelFPN = sm.FPN(BACKBONE, classes=n_classes, activation=activation)
model_c = config.STYLES["featurepyramidnetwork"]
model_path = os.path.join(f"{config.MODEL_PATH}", f"{model_c}.h5")
modelFPN.load_weights(model_path)
print("Loaded FPN.")

modelLinknet = sm.Linknet(BACKBONE, classes=n_classes, activation=activation)
model_c = config.STYLES["linknet"]
model_path = os.path.join(f"{config.MODEL_PATH}", f"{model_c}.h5")
modelLinknet.load_weights(model_path)
print("Loaded Linknet.")


# below was the part of the pipline is used for training and preprocessing
# TODO: Replace this pipeline with custom for faster inference.
# helper function for data visualization
def visualize(**images):
    """PLot images in one row."""
    n = len(images)
    plt.figure(figsize=(16, 5))
    for i, (name, image) in enumerate(images.items()):
        plt.subplot(1, n, i + 1)
コード例 #18
0
        target_index.append(index)
    else:
        continue
target_index = np.asarray(target_index)
X_tst = X_tst[target_index]
Y_tst = Y_tst[target_index]
print("there are {} images with target mask in testset".format(len(Y_tst)))

# model construction (keras + sm)
if model_id == 0:
    model = sm.Unet(classes=1,
                    activation='sigmoid',
                    encoder_weights='imagenet')
elif model_id == 1:
    model = sm.Linknet(classes=1,
                       activation='sigmoid',
                       encoder_weights='imagenet')
elif model_id == 2:
    model = sm.FPN(classes=1, activation='sigmoid', encoder_weights='imagenet')
elif model_id == 3:
    model = sm.PSPNet(classes=1,
                      activation='sigmoid',
                      encoder_weights='imagenet')  # input size must be 384x384

data_gen_args = dict(rotation_range=360,
                     width_shift_range=0.15,
                     height_shift_range=0.15,
                     zoom_range=0.15,
                     brightness_range=[0.7, 1.3],
                     horizontal_flip=True,
                     vertical_flip=False,
コード例 #19
0
 def train(self,
           model_name='unet',
           backbone='resnet50',
           fine_tune=False,
           model_path=None,
           opt='adam',
           lr=0.001,
           shape=(256, 256)):
     os.makedirs('saved_models/segmentation', exist_ok=True)
     if fine_tune: lr = lr / 10
     opt_dict = {'adam': Adam(lr), 'sgd': SGD(lr), 'adadelta': Adadelta(lr)}
     if fine_tune and model_path:
         new_name = model_path.strip('.h5') + '_fine-tune_{}.h5'.format(opt)
         model = keras.models.load_model(model_path, compile=False)
         model.compile(
             optimizer=opt_dict[opt.lower()],
             loss=sm.losses.bce_jaccard_loss,
             metrics=['acc', sm.metrics.iou_score, sm.metrics.f1_score])
         model.summary()
         model.fit_generator(
             generator=self.data_loader.generator(is_train=True,
                                                  shape=shape,
                                                  shrink=self.shrink),
             steps_per_epoch=self.data_loader.train_steps,
             validation_data=self.data_loader.generator(is_train=False,
                                                        shape=shape,
                                                        shrink=self.shrink),
             validation_steps=self.data_loader.val_steps,
             verbose=1,
             initial_epoch=0,
             epochs=300,
             callbacks=[
                 keras.callbacks.TensorBoard('logs'),
                 keras.callbacks.ReduceLROnPlateau(monitor='val_loss',
                                                   patience=7,
                                                   verbose=1),
                 keras.callbacks.EarlyStopping(monitor='val_loss',
                                               patience=38,
                                               verbose=1),
                 keras.callbacks.ModelCheckpoint(
                     monitor='val_loss',
                     verbose=1,
                     save_weights_only=False,
                     save_best_only=True,
                     filepath='saved_models/segmentation/' + new_name)
             ])
     else:
         if model_name.lower() == 'unet':
             model = sm.Unet(backbone,
                             encoder_weights='imagenet',
                             activation='sigmoid',
                             classes=1,
                             input_shape=(shape[0], shape[1], 3),
                             decoder_use_batchnorm=True)
         elif model_name.lower() == 'pspnet':
             model = sm.PSPNet(backbone,
                               encoder_weights='imagenet',
                               activation='sigmoid',
                               classes=1,
                               input_shape=(shape[0], shape[1], 3))
         elif model_name.lower() == 'fpn':
             model = sm.FPN(backbone,
                            encoder_weights='imagenet',
                            activation='sigmoid',
                            classes=1,
                            input_shape=(shape[0], shape[1], 3))
         elif model_name.lower() == 'linknet':
             model = sm.Linknet(backbone,
                                encoder_weights='imagenet',
                                activation='sigmoid',
                                classes=1,
                                input_shape=(shape[0], shape[1], 3))
         else:
             raise NotImplementedError
         model.compile(
             optimizer=opt_dict[opt.lower()],
             loss=sm.losses.bce_jaccard_loss,
             metrics=['acc', sm.metrics.iou_score, sm.metrics.f1_score])
         model.summary()
         name_list = [
             model_name, backbone, opt, 'init-training',
             'none' if shape is None else str(shape[0]) + 'x' +
             str(shape[1])
         ]
         if self.shrink is None:
             name_list.append('without-shrink')
         new_name = '_'.join(name_list) + '.h5'
         model.fit_generator(
             generator=self.data_loader.generator(is_train=True,
                                                  shape=shape,
                                                  shrink=self.shrink),
             steps_per_epoch=self.data_loader.train_steps,
             validation_data=self.data_loader.generator(is_train=False,
                                                        shape=shape,
                                                        shrink=self.shrink),
             validation_steps=self.data_loader.val_steps,
             verbose=1,
             initial_epoch=0,
             epochs=300,
             callbacks=[
                 keras.callbacks.TensorBoard('logs'),
                 keras.callbacks.ReduceLROnPlateau(monitor='val_loss',
                                                   patience=7,
                                                   verbose=1),
                 keras.callbacks.EarlyStopping(monitor='val_loss',
                                               patience=38,
                                               verbose=1),
                 keras.callbacks.ModelCheckpoint(
                     monitor='val_loss',
                     verbose=1,
                     save_weights_only=False,
                     save_best_only=True,
                     filepath='saved_models/segmentation/' + new_name)
             ])
コード例 #20
0
 def create_model(self):
     return sm.Linknet(self._backbone,
                       activation="softmax",
                       classes=self._data.get_n_classes(),
                       encoder_weights=self._encoder_weights,
                       input_shape=self._input_shape)
コード例 #21
0
    def Setup(self):
        '''
        User function: Setup all the parameters

        Args:
            None

        Returns:
            None
        '''
        preprocess_input = sm.get_preprocessing(
            self.system_dict["params"]["backbone"])
        # define network parameters
        self.system_dict["local"]["n_classes"] = 1 if len(
            self.system_dict["dataset"]["train"]
            ["classes_to_train"]) == 1 else (
                len(self.system_dict["dataset"]["train"]["classes_to_train"]) +
                1)  # case for binary and multiclass segmentation
        activation = 'sigmoid' if self.system_dict["local"][
            "n_classes"] == 1 else 'softmax'

        #create model
        if (self.system_dict["params"]["model"] == "Unet"):
            self.system_dict["local"]["model"] = sm.Unet(
                self.system_dict["params"]["backbone"],
                classes=self.system_dict["local"]["n_classes"],
                activation=activation)
        elif (self.system_dict["params"]["model"] == "FPN"):
            self.system_dict["local"]["model"] = sm.FPN(
                self.system_dict["params"]["backbone"],
                classes=self.system_dict["local"]["n_classes"],
                activation=activation)
        elif (self.system_dict["params"]["model"] == "Linknet"):
            self.system_dict["local"]["model"] = sm.Linknet(
                self.system_dict["params"]["backbone"],
                classes=self.system_dict["local"]["n_classes"],
                activation=activation)
        elif (self.system_dict["params"]["model"] == "PSPNet"):
            self.system_dict["local"]["model"] = sm.PSPNet(
                self.system_dict["params"]["backbone"],
                classes=self.system_dict["local"]["n_classes"],
                activation=activation)

        # define optomizer
        optim = keras.optimizers.Adam(self.system_dict["params"]["lr"])

        # Segmentation models losses can be combined together by '+' and scaled by integer or float factor
        dice_loss = sm.losses.DiceLoss()
        focal_loss = sm.losses.BinaryFocalLoss() if self.system_dict["local"][
            "n_classes"] == 1 else sm.losses.CategoricalFocalLoss()
        total_loss = dice_loss + (1 * focal_loss)

        # actulally total_loss can be imported directly from library, above example just show you how to manipulate with losses
        # total_loss = sm.losses.binary_focal_dice_loss # or sm.losses.categorical_focal_dice_loss

        metrics = [
            sm.metrics.IOUScore(threshold=0.5),
            sm.metrics.FScore(threshold=0.5)
        ]

        # compile keras model with defined optimozer, loss and metrics
        self.system_dict["local"]["model"].compile(optim, total_loss, metrics)

        # Dataset for train images
        train_dataset = Dataset(
            self.system_dict["dataset"]["train"]["img_dir"],
            self.system_dict["dataset"]["train"]["mask_dir"],
            self.system_dict["dataset"]["train"]["classes_dict"],
            classes_to_train=self.system_dict["dataset"]["train"]
            ["classes_to_train"],
            augmentation=get_training_augmentation(),
            preprocessing=get_preprocessing(preprocess_input),
        )

        if (self.system_dict["params"]["image_shape"][0] % 32 != 0):
            self.system_dict["params"]["image_shape"][0] += (
                32 - self.system_dict["params"]["image_shape"][0] % 32)

        if (self.system_dict["params"]["image_shape"][1] % 32 != 0):
            self.system_dict["params"]["image_shape"][1] += (
                32 - self.system_dict["params"]["image_shape"][1] % 32)

        # Dataset for validation images
        if (self.system_dict["dataset"]["val"]["status"]):
            valid_dataset = Dataset(
                self.system_dict["dataset"]["val"]["img_dir"],
                self.system_dict["dataset"]["val"]["mask_dir"],
                self.system_dict["dataset"]["train"]["classes_dict"],
                classes_to_train=self.system_dict["dataset"]["train"]
                ["classes_to_train"],
                augmentation=get_validation_augmentation(
                    self.system_dict["params"]["image_shape"][0],
                    self.system_dict["params"]["image_shape"][1]),
                preprocessing=get_preprocessing(preprocess_input),
            )
        else:
            valid_dataset = Dataset(
                self.system_dict["dataset"]["train"]["img_dir"],
                self.system_dict["dataset"]["train"]["mask_dir"],
                self.system_dict["dataset"]["train"]["classes_dict"],
                classes_to_train=self.system_dict["dataset"]["train"]
                ["classes_to_train"],
                augmentation=get_validation_augmentation(
                    self.system_dict["params"]["image_shape"][0],
                    self.system_dict["params"]["image_shape"][1]),
                preprocessing=get_preprocessing(preprocess_input),
            )

        self.system_dict["local"]["train_dataloader"] = Dataloder(
            train_dataset,
            batch_size=self.system_dict["params"]["batch_size"],
            shuffle=True)
        self.system_dict["local"]["valid_dataloader"] = Dataloder(
            valid_dataset, batch_size=1, shuffle=False)
コード例 #22
0
ファイル: train.py プロジェクト: scrssys/SCRS_RS_AI
                          input_shape=input_layer,
                          classes=config.nb_classes,
                          activation=config.activation,
                          encoder_weights=config.encoder_weights,
                          psp_dropout=config.dropout)
    elif 'fpn' in config.network:
        model = sm.FPN(backbone_name=config.BACKBONE,
                       input_shape=input_layer,
                       classes=config.nb_classes,
                       activation=config.activation,
                       encoder_weights=config.encoder_weights,
                       pyramid_dropout=config.dropout)
    elif 'linknet' in config.network:
        model = sm.Linknet(backbone_name=config.BACKBONE,
                           input_shape=input_layer,
                           classes=config.nb_classes,
                           activation=config.activation,
                           encoder_weights=config.encoder_weights)
    elif 'deeplabv3plus' in config.network:
        model = Deeplabv3(weights=config.encoder_weights,
                          input_shape=input_layer,
                          classes=config.nb_classes,
                          backbone=config.BACKBONE,
                          activation=config.activation)

    else:
        print("Error:")

    print(model.summary())
    print("Train by : {}_{}".format(config.network, config.BACKBONE))
    #