Exemplo n.º 1
0
def load_model(path, input_shape, num_classes=4, backbone='resnet18'):

    model = Linknet(backbone_name=backbone,
                    input_shape=input_shape,
                    classes=num_classes,
                    activation='softmax')

    model.load_weights(path)

    return model, sm.get_preprocessing(backbone)
Exemplo n.º 2
0
def define_model(architecture='Unet', BACKBONE='resnet34', input_shape=(None, None, 4),encoder_weights=None):
    print('In define_model function')
    if architecture == 'Unet':
        model = Unet(BACKBONE, classes=3, activation='softmax', encoder_weights=encoder_weights, input_shape=input_shape)
        print('Unet model defined')
    elif architecture == 'FPN':
        model = FPN(BACKBONE, classes=3, activation='softmax', encoder_weights=encoder_weights, input_shape=input_shape)
        print('FPN model defined')
    elif architecture == 'Linknet':
        model = Linknet(BACKBONE, classes=3, activation='softmax', encoder_weights=encoder_weights, input_shape=input_shape)
        print('Linknet model defined')
    elif architecture == 'PSPNet':
        model = PSPNet(BACKBONE, classes=3, activation='softmax', encoder_weights=encoder_weights, input_shape=input_shape)
        print('PSPNet model defined')
    return model
Exemplo n.º 3
0
def get_model(backbone, decoder_type, batch_norm_type, dropout=0.0):
    from segmentation_models import Unet, Linknet, PSPNet

    inp_shape = (SHAPE_SIZE, SHAPE_SIZE, 3)
    classes = 2

    if decoder_type == 'Unet':
        model = Unet(backbone,
                     encoder_weights='imagenet',
                     input_shape=inp_shape,
                     classes=classes,
                     dropout=dropout,
                     norm_type=batch_norm_type,
                     activation='sigmoid')
    elif decoder_type == 'FPN':
        model = FPN(backbone,
                    encoder_weights='imagenet',
                    input_shape=inp_shape,
                    classes=classes,
                    pyramid_dropout=dropout,
                    norm_type=batch_norm_type,
                    activation='sigmoid')
    elif decoder_type == 'Linknet':
        model = Linknet(backbone,
                        encoder_weights='imagenet',
                        input_shape=inp_shape,
                        classes=classes,
                        dropout=dropout,
                        norm_type=batch_norm_type,
                        activation='sigmoid')
    elif decoder_type == 'PSPNet':
        model = PSPNet(backbone,
                       encoder_weights='imagenet',
                       input_shape=inp_shape,
                       classes=classes,
                       psp_dropout=dropout,
                       norm_type=batch_norm_type,
                       activation='sigmoid')
    return model
Exemplo n.º 4
0
def build_pretrained_model(model_type,
                           backbone_name,
                           encoder_weights,
                           freeze_encoder,
                           activation='sigmoid'):
    if model_type == "Unet":
        return Unet(backbone_name=backbone_name,
                    encoder_weights=encoder_weights,
                    freeze_encoder=freeze_encoder,
                    activation=activation)
    elif model_type == "FPN":
        return FPN(backbone_name=backbone_name,
                   encoder_weights=encoder_weights,
                   freeze_encoder=freeze_encoder,
                   activation=activation)
    elif model_type == "Linknet":
        return Linknet(backbone_name=backbone_name,
                       encoder_weights=encoder_weights,
                       freeze_encoder=freeze_encoder,
                       activation=activation)
    else:
        print('Pretrained model type is not supported.')
        return None
Exemplo n.º 5
0
def get_model(architecture: str,
              input_width: int = None,
              input_height: int = None,
              nb_channels: int = 3,
              nb_classes: int = 1,
              activation: str = 'softmax',
              init_weights_with: str = 'imagenet',
              freeze: bool = False) -> keras.models.Model:
    """
    Get a model.
    
    Args:
        architecture (str): Architecture of the network to create
        input_width (int, optional): Width of the input images. Defaults to None.
        input_height (int, optional): Height of the input images. Defaults to None.
        nb_channels (int, optional): Nb of channels/bands of the input images. Defaults to 3.
        nb_classes (int, optional): Nb of classes to be segmented to. Defaults to 1.
        activation (Activation, optional): Activation function of last layer. Defaults to 'softmax'.
        init_weights_with (str, optional): Weights to init the network with. Defaults to 'imagenet'.
        freeze (bool, optional): Freeze the final layer weights during 
            training. It is usefull to use this option for the first few 
            epochs get a more robust network. Defaults to False. 
    
    Raises:
        Exception: [description]
        Exception: [description]
    
    Returns:
        [type]: [description]
    """
    # Check architecture
    segment_architecture_parts = architecture.split('+')
    if len(segment_architecture_parts) < 2:
        raise Exception(f"Unsupported architecture: {architecture}")
    encoder = segment_architecture_parts[0]
    decoder = segment_architecture_parts[1]

    if decoder.lower() == 'unet':
        # These two unet variants are implemented in a seperate module
        if encoder.lower() == 'standard':
            logger.warn(
                f"Architecture {architecture} not tested in a long time, so use at own risk"
            )
            import orthoseg.model.model_unet_standard as m
            if init_weights_with is not None:
                init_weights = True
            else:
                init_weights = False
            return m.get_model(input_width=input_width,
                               input_height=input_height,
                               nb_channels=nb_channels,
                               nb_classes=nb_classes,
                               init_model_weights=init_weights)
        elif encoder.lower() == 'ternaus':
            logger.warn(
                f"Architecture {architecture} not tested in a long time, so use at own risk"
            )
            import orthoseg.model.model_unet_ternaus as m
            if init_weights_with is not None:
                init_weights = True
            else:
                init_weights = False
            return m.get_model(input_width=input_width,
                               input_height=input_height,
                               nb_channels=nb_channels,
                               nb_classes=nb_classes,
                               init_model_weights=init_weights)

        # Some other unet variants is implemented using the segmentation_models library
        from segmentation_models import Unet
        #from segmentation_models.backbones import get_preprocessing

        model = Unet(backbone_name=encoder.lower(),
                     input_shape=(input_width, input_height, nb_channels),
                     classes=nb_classes,
                     activation=activation,
                     encoder_weights=init_weights_with,
                     encoder_freeze=freeze)
        return model
    elif decoder.lower() == 'pspnet':
        from segmentation_models import PSPNet
        #from segmentation_models.backbones import get_preprocessing

        model = PSPNet(backbone_name=encoder.lower(),
                       input_shape=(input_width, input_height, nb_channels),
                       classes=nb_classes,
                       activation=activation,
                       encoder_weights=init_weights_with,
                       encoder_freeze=freeze)
        return model
    elif decoder.lower() == 'linknet':
        from segmentation_models import Linknet
        #from segmentation_models.backbones import get_preprocessing

        # First check if input size is compatible with linknet
        if input_width is not None and input_height is not None:
            check_image_size(decoder, input_width, input_height)

        model = Linknet(backbone_name=encoder.lower(),
                        input_shape=(input_width, input_height, nb_channels),
                        classes=nb_classes,
                        activation=activation,
                        encoder_weights=init_weights_with,
                        encoder_freeze=freeze)
        return model
    else:
        raise Exception(f"Unknown decoder architecture: {decoder}")
Exemplo n.º 6
0



# preprocess input
# from segmentation_models.backbones import get_preprocessing
# preprocess_input = get_preprocessing(BACKBONE)
# x_train = preprocess_input(x_train)
# x_val = preprocess_input(x_val)

# define model

model = Unet(BACKBONE, classes=len(class_ids), encoder_weights='imagenet')
model = FPN(BACKBONE, classes=len(class_ids), encoder_weights='imagenet')
model = PSPNet(BACKBONE, classes=len(class_ids), encoder_weights='imagenet')
model = Linknet(BACKBONE, classes=len(class_ids), encoder_weights='imagenet')

model.compile('Adam', loss=bce_jaccard_loss, metrics=[iou_score])
model.summary()

modelCheckpoint = keras.callbacks.ModelCheckpoint(filepath='segmod_weights.{epoch:02d}-{val_loss:.4f}.hdf5',
                                                  monitor='val_loss',
                                                  verbose=0, save_best_only=False, save_weights_only=False,
                                                  mode='auto', period=1)
reduceLROnPlateau = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=7, verbose=1,
                                                      mode='auto', min_delta=0.001, cooldown=0, min_lr=10e-7)


model.fit_generator(generator=train_generator, steps_per_epoch=None, epochs=10, verbose=1,
                    callbacks=[reduceLROnPlateau, modelCheckpoint],
                    validation_data=val_generator, validation_steps=None, class_weight=None, max_queue_size=10,
Exemplo n.º 7
0
    if 'pspnet' in config.network:
        model = PSPNet(backbone_name=config.BACKBONE,
                       input_shape=input_layer,
                       classes=config.nb_classes,
                       activation=config.activation,
                       encoder_weights=config.encoder_weights)
    elif 'fpn' in config.network:
        model = FPN(backbone_name=config.BACKBONE,
                    input_shape=input_layer,
                    classes=config.nb_classes,
                    activation=config.activation,
                    encoder_weights=config.encoder_weights)
    elif 'linknet' in config.network:
        model = Linknet(backbone_name=config.BACKBONE,
                        input_shape=input_layer,
                        classes=config.nb_classes,
                        activation=config.activation,
                        encoder_weights=config.encoder_weights)
    else:
        pass

    print(model.summary())
    print("Train by : {}_{}".format(config.network, config.BACKBONE))

    # sys.exit(-1)
    train(model)

    if FLAG_MAKE_TEST:
        print("test ....................predict by trained model .....\n")
        test_img_path = '../../data/test/sample1.png'
        import sys
#                if N == 3:
#                    model = PSPNet(BACKBONE, input_shape=(size_t, size_t, 3), classes=3, activation='softmax', encoder_weights='imagenet', encoder_freeze=False)
#                else:
#                    base_model = PSPNet(BACKBONE, input_shape=(size_t, size_t, 3), classes=3, activation='softmax', encoder_weights='imagenet', encoder_freeze=False)
#                    inp = Input(shape=(size_t, size_t, N))
#                    bn = BatchNormalization()(inp)
#                    l1 = Conv2D(3, (1, 1))(bn) # map N channels data to 3 channels
#                    out = base_model(l1)
#                    model = Model(inp, out, name=base_model.name)

            elif k_mod == "Linknet":
                # N = x_train.shape[-1]
                if N == 3:
                    model = Linknet(BACKBONE,
                                    input_shape=(size, size, 3),
                                    classes=3,
                                    activation='softmax',
                                    encoder_weights='imagenet',
                                    encoder_freeze=False)
                else:
                    base_model = Linknet(BACKBONE,
                                         input_shape=(size, size, 3),
                                         classes=3,
                                         activation='softmax',
                                         encoder_weights='imagenet',
                                         encoder_freeze=False)
                    inp = Input(shape=(size, size, N))
                    bn = BatchNormalization()(inp)
                    l1 = Conv2D(3, (1, 1))(
                        bn)  # map N channels data to 3 channels
                    out = base_model(l1)
                    model = Model(inp, out, name=base_model.name)
Exemplo n.º 9
0
    callbacks_list = [
        ModelCheckpoint('models/linknet_gray' + str(BATCH) + '_batch.h5',
                        verbose=1,
                        save_best_only=True,
                        mode='min',
                        save_weights_only=True),
        TensorBoard(log_dir='./logs',
                    batch_size=BATCH,
                    write_images=True),
        ReduceLROnPlateau(verbose=1, factor=0.25, patience=3, min_lr=1e-6)
    ]

    model = Linknet(
        backbone_name='mobilenetv2',
        input_shape=(HEIGHT, WIDTH, 3),
        activation='sigmoid',
        decoder_block_type='transpose',
        encoder_weights='imagenet',
        decoder_use_batchnorm=True
    )

    model.summary()
    model.compile(optimizer=Adadelta(1e-3), loss=loss, metrics=[dice_score, jaccard_score])

    model_json = model.to_json()
    json_file = open('models/linknet_gray' + str(BATCH) + '_batch.json', 'w')
    json_file.write(model_json)
    json_file.close()
    print('Model saved!')

    model.fit_generator(
        my_generator(train_images, train_masks, BATCH),
Exemplo n.º 10
0
from segmentation_models import get_preprocessing
from segmentation_models.losses import bce_jaccard_loss
from segmentation_models.metrics import iou_score
from tensorflow.keras.datasets import mnist

BACKBONE = 'resnet34'
preprocess_input = get_preprocessing(BACKBONE)

# load your data
(x_train, y_train), (x_val, y_val) = mnist.load_data()

# preprocess input
x_train = preprocess_input(x_train)
x_val = preprocess_input(x_val)

# define model
model = Linknet(BACKBONE,
                input_shape=(224, 224, 6),
                classes=9,
                encoder_weights=None)
model.compile('Adam', loss=bce_jaccard_loss, metrics=[iou_score])

# fit model
model.fit(
    x=x_train,
    y=y_train,
    batch_size=16,
    epochs=100,
    validation_data=(x_val, y_val),
)  # -*- coding: utf-8 -*-
Exemplo n.º 11
0
    if architecture == 'PSP':
        model = PSPNet(backbone, input_shape=dim_image, classes=number_of_classes, encoder_weights='imagenet', activation='softmax', freeze_encoder=freeze_encoder)
    elif architecture == 'FPN':
        model = FPN(backbone, input_shape=dim_image, classes=number_of_classes, encoder_weights='imagenet', activation='softmax', freeze_encoder=freeze_encoder)
    else:
        assert False
    model.compile('Adam', loss='categorical_crossentropy', metrics=['categorical_accuracy'])
    model.summary()
else:
    assert 1 == number_of_classes
    if architecture == 'PSP':
        model = PSPNet(backbone, input_shape=dim_image, classes=1, encoder_weights='imagenet', activation='sigmoid', freeze_encoder=freeze_encoder)
    elif architecture == 'FPN':
        model = FPN(backbone, input_shape=dim_image, classes=1, encoder_weights='imagenet', activation='sigmoid', freeze_encoder=freeze_encoder)
    elif architecture == 'Linknet':
        model = Linknet(backbone, input_shape=dim_image, classes=1, encoder_weights='imagenet', activation='sigmoid', freeze_encoder=freeze_encoder)
    elif architecture == 'Unet':
        model = Unet(backbone, input_shape=dim_image, classes=1, encoder_weights='imagenet', activation='sigmoid', freeze_encoder=freeze_encoder)
    else:
        assert False
    # model.compile('Adam', loss='binary_crossentropy', metrics=['binary_accuracy'])
    model.compile('Adam', loss='jaccard_distance_l', metrics=['iou_score'])
    model.summary()

if os.name == 'nt':
    logger.info('GPU=(' + str(gpu_id) + ')  Architecture=' + architecture + '  Backbone=' + backbone + '  dim_image=' + str(dim_image) + '  batch_size/baseline_batch_size=(' + str(
        batch_size) + '/' + str(baseline_batch_size) + ')  model_checkpoint_prefix=(' + str(model_checkpoint_prefix) + ')  use coco2017 precompiled dataset=' + str(
        precompiled) + '  #_threads=' + str(nb_threads) + '  models_directory=' + model_dir + '  dataset=' + dataset)

    model.fit_generator(generator=train_generator, steps_per_epoch=None, epochs=nb_epoch, verbose=1, callbacks=None,
                        validation_data=val_generator, validation_steps=None, class_weight=None, max_queue_size=10,
Exemplo n.º 12
0
        for (key, value) in dct_data3.items()
    }
    channels3 = preprocess.get_channels(dct3, order_of_chanel)
    del dct3
lst_channels = channels1 + channels2 + channels3
number_sample = len(channels1[0])
del channels1
del channels2
del channels3
masks_lst = preprocess.get_masks(dct_mask)
del dct_mask
print("Data preparing: Done!")

model = Linknet('resnet50',
                classes=1,
                activation='sigmoid',
                input_shape=(sample_size, sample_size, number_chanels),
                encoder_weights=None)
model.compile(
    optimizer='Adam',
    loss='binary_crossentropy',
    metrics=['accuracy',
             keras_metrics.precision(),
             keras_metrics.recall()])
model.summary()

callbacksList = [
    EarlyStopping(patience=10, verbose=1),
    ModelCheckpoint(
        'model_segmentation.h5',
        verbose=1,
Exemplo n.º 13
0
    def train(self):

        seed = 42
        # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)
        # config = tf.ConfigProto(gpu_options=gpu_options)
        session_config = tf.ConfigProto(gpu_options=tf.GPUOptions(
            allow_growth=True, per_process_gpu_memory_fraction=0.6))
        session = tf.Session(config=session_config)

        from segmentation_models import get_preprocessing

        self.processer = get_preprocessing(self.backbone)

        X_train = self.processer(self.X_train)

        Y_train = self.Y_train

        print('Done!')

        from tensorflow.keras.preprocessing import image

        # Creating the training Image and Mask generator
        image_datagen = image.ImageDataGenerator(shear_range=0.5,
                                                 rotation_range=50,
                                                 zoom_range=0.2,
                                                 width_shift_range=0.2,
                                                 height_shift_range=0.2,
                                                 fill_mode='reflect')
        mask_datagen = image.ImageDataGenerator(shear_range=0.5,
                                                rotation_range=50,
                                                zoom_range=0.2,
                                                width_shift_range=0.2,
                                                height_shift_range=0.2,
                                                fill_mode='reflect')

        # Keep the same seed for image and mask generators so they fit together

        image_datagen.fit(X_train[:int(X_train.shape[0] * 0.9)],
                          augment=True,
                          seed=seed)
        mask_datagen.fit(Y_train[:int(Y_train.shape[0] * 0.9)],
                         augment=True,
                         seed=seed)

        x = image_datagen.flow(X_train[:int(X_train.shape[0] * 0.9)],
                               batch_size=self.BATCH_SIZE,
                               shuffle=True,
                               seed=seed)
        y = mask_datagen.flow(Y_train[:int(Y_train.shape[0] * 0.9)],
                              batch_size=self.BATCH_SIZE,
                              shuffle=True,
                              seed=seed)

        # Creating the validation Image and Mask generator
        image_datagen_val = image.ImageDataGenerator()
        mask_datagen_val = image.ImageDataGenerator()

        image_datagen_val.fit(X_train[int(X_train.shape[0] * 0.9):],
                              augment=True,
                              seed=seed)
        mask_datagen_val.fit(Y_train[int(Y_train.shape[0] * 0.9):],
                             augment=True,
                             seed=seed)

        x_val = image_datagen_val.flow(X_train[int(X_train.shape[0] * 0.9):],
                                       batch_size=self.BATCH_SIZE,
                                       shuffle=True,
                                       seed=seed)
        y_val = mask_datagen_val.flow(Y_train[int(Y_train.shape[0] * 0.9):],
                                      batch_size=self.BATCH_SIZE,
                                      shuffle=True,
                                      seed=seed)

        train_generator = zip(x, y)
        val_generator = zip(x_val, y_val)

        from segmentation_models import Unet, PSPNet, Linknet, FPN
        from segmentation_models.losses import CategoricalFocalLoss
        from segmentation_models.utils import set_trainable
        import segmentation_models
        from tensorflow.keras.optimizers import RMSprop, SGD
        #model = self.model(self.IMG_HEIGHT,self.IMG_WIDTH,self.IMG_CHANNELS)

        if self.architecture == 'Linknet':

            self.model = Linknet(self.backbone,
                                 classes=self.num_bodyparts + 1,
                                 activation='softmax',
                                 encoder_weights=self.image_net,
                                 input_shape=(self.IMG_WIDTH, self.IMG_HEIGHT,
                                              self.IMG_CHANNELS))

        elif self.architecture == 'unet':

            self.model = Unet(self.backbone,
                              classes=self.num_bodyparts + 1,
                              activation='softmax',
                              encoder_weights=self.image_net,
                              input_shape=(self.IMG_WIDTH, self.IMG_HEIGHT,
                                           self.IMG_CHANNELS))
        elif self.architecture == 'PSPnet':
            self.model = PSPNet(self.backbone,
                                classes=self.num_bodyparts + 1,
                                activation='softmax',
                                encoder_weights=self.image_net,
                                input_shape=(self.IMG_WIDTH, self.IMG_HEIGHT,
                                             self.IMG_CHANNELS))

        elif self.architecture == 'FPN':
            self.model = FPN(self.backbone,
                             classes=self.num_bodyparts + 1,
                             activation='softmax',
                             encoder_weights=self.image_net,
                             input_shape=(self.IMG_WIDTH, self.IMG_HEIGHT,
                                          self.IMG_CHANNELS))

        weights = np.zeros((1, self.num_bodyparts + 1), dtype=float)
        weight = 1.0 / self.num_bodyparts

        num_zeros = 1
        # while (weight * 100 < 1):
        #     weight = weight * 100
        #     num_zeros += 1
        #
        # weight = int(weight * 100) / np.power(100, num_zeros)
        weights[0, 1:] = weight
        weights[0, 0] = 0.01 * len(self.bodyparts)

        while weights[0, 0] > weights[0, 1]:
            weights[0, 0] = weights[0, 0] / 10
            num_zeros += 1

        for i in range(1, len(self.bodyparts) + 1):
            weights[0, i] = weights[0, i] - 10**-(num_zeros + 1)

        if self.loss_function == "Weighted Categorical_cross_entropy":
            loss = self.weighted_categorical_crossentropy(weights)
        else:
            loss = segmentation_models.losses.DiceLoss(class_weights=weights)
        metric = segmentation_models.metrics.IOUScore(class_weights=weights,
                                                      per_image=True)
        self.model.compile(optimizer=RMSprop(lr=self.learning_rate),
                           loss=loss,
                           metrics=[metric])
        earlystopper = EarlyStopping(patience=6, verbose=1)
        #
        checkpointer = ModelCheckpoint(os.path.join(self.address, 'Unet.h5'),
                                       verbose=1,
                                       save_best_only=True)
        reduce_lr = keras.callbacks.LearningRateScheduler(self.lr_scheduler)

        #
        # model.fit_generator(train_generator, validation_data=val_generator, validation_steps=10, steps_per_epoch=50,
        #                                epochs=2, callbacks=[earlystopper, checkpointer],verbose=1)
        # model.load_weights(self.address + 'Temp_weights.h5')

        # set_trainable(model)
        #
        self.model.fit_generator(
            train_generator,
            validation_data=val_generator,
            steps_per_epoch=20,
            validation_steps=5,
            epochs=100,
            callbacks=[earlystopper, checkpointer, reduce_lr],
            verbose=1)
Exemplo n.º 14
0
    st = stats[:,-1][stats[:,-1] > area_threshold] #Ban areas smaller than threshold
                    
    if nb_components == 1 or len(st) < 2:
        return None, None, None

    if (num_blobs <= len(st)-1):
        n = num_blobs+1
    else:
        n = len(st)
    
    blob_index = np.argsort(stats[:,-1])[-n:-1]
                
    return output, blob_index[::-1], centroids[blob_index[::-1]]
            
# In[]:
model = Linknet(backbone_name=backbone, input_shape=input_shape, classes=num_classes, activation='softmax')
model.load_weights('weights/clothes.hdf5')

# In[]:
preprocessing_fn = sm.get_preprocessing(backbone)

img_path = "2.jpg"
x = get_image(img_path)
y_pred = np.squeeze(model.predict(np.expand_dims(preprocessing_fn(x), axis=0)))

#y_tshirt = y_pred[0,...,1] > conf
#y_dress = y_pred[0,...,2] > conf
#y_pants = y_pred[0,...,3] > conf
#plt.imshow(y_tshirt)

# In[]: Find contours
Exemplo n.º 15
0
    train_images_path = 'E:/datasets/parking/images'
    train_masks_path = 'E:/datasets/parking/masks'
    image_h = 288
    image_w = 288

    x_data = prepare_images(train_images_path)
    y_data = prepare_masks(train_masks_path)

    x_train, x_val, y_train, y_val = train_test_split(x_data,
                                                      y_data,
                                                      test_size=0.2,
                                                      random_state=SEED)

    model = Linknet(backbone_name='mobilenetv2',
                    input_shape=(image_h, image_w, 3),
                    encoder_weights='imagenet',
                    decoder_block_type='transpose',
                    activation='sigmoid')
    model.summary()

    callbacks_list = [
        ReduceLROnPlateau(monitor='val_loss',
                          factor=0.25,
                          patience=1,
                          min_lr=1e-6)
    ]

    # model.load_weights('../weights/resnet34_RLE_72_loss.h5')

    model.compile(optimizer=Adam(1e-4),
                  loss=dice_loss,
Exemplo n.º 16
0
            dct_data3, order_of_chanel)

dct_data1 = tif_read.get_dct_with_names(dct_data1, order_of_chanel)
dct_data2 = tif_read.get_dct_with_names(dct_data2, order_of_chanel)
dct_data3 = tif_read.get_dct_with_names(dct_data3, order_of_chanel)

if len(dct_data1) != len(order_of_chanel) or len(dct_data2) != len(
        order_of_chanel) or len(dct_data3) != len(order_of_chanel):
    raise IncorrectDataFiles(
        "Error while trying to collect paths!  You should have " +
        str(order_of_chanel) + ' in eche file in ./Data file')

# Load model
model = Linknet('resnet50',
                classes=1,
                activation='sigmoid',
                input_shape=(sample_size, sample_size, number_chanels),
                encoder_weights=None)
Path_to_weights = 'Model\\model_segmentation.h5'
model.load_weights(Path_to_weights)

path = lst_path[0]
imgMatrixRead = point_sumple.ImgMatrixRead(lst_path, mask_path)
imgcord = point_sumple.Imgcord(imgMatrixRead)


def random_generator(number_sample, img_size, sample_size):
    lst = [i
           for i in range(sample_size, img_size, sample_size)][0:number_sample]
    random.shuffle(lst)
    return lst
    def train(self):

        # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)
        # config = tf.ConfigProto(gpu_options=gpu_options)
        session_config = tf.ConfigProto(gpu_options=tf.GPUOptions(
            allow_growth=True, per_process_gpu_memory_fraction=0.6))
        session = tf.Session(config=session_config)

        addresss = self.address

        warnings.filterwarnings('ignore',
                                category=UserWarning,
                                module='skimage')
        seed = 42

        np.random.seed(10)

        print('Getting and resizing train images and masks ... ')
        sys.stdout.flush()
        counter = 0

        self.IMG_WIDTH = 288  # for faster computing on kaggle
        self.IMG_HEIGHT = 288  # for faster computing on kaggle

        counter = 0

        files_original_name = list()

        #self.num_bodyparts =1

        if len(self.annotated) == 0:
            wx.MessageBox(
                'Did you save your annotation?\n '
                'No annotation found in your file, please save and re-run',
                'Error!', wx.OK | wx.ICON_ERROR)
            self.error = 1
            return

        for i in range(0, len(self.annotated)):
            files_original_name.append(self.dataFrame[
                self.dataFrame.columns[0]]._stat_axis[self.annotated[i]][7:])

        img = imread(self.image_folder + os.sep + files_original_name[0])

        IMG_CHANNELS = len(np.shape(img))
        self.IMG_CHANNELS = IMG_CHANNELS

        self.file_name_for_prediction_confidence = files_original_name

        X_train = np.zeros((len(
            self.annotated), self.IMG_HEIGHT, self.IMG_WIDTH, IMG_CHANNELS),
                           dtype=np.uint8)
        Y_train = np.zeros((len(self.annotated), self.IMG_HEIGHT,
                            self.IMG_WIDTH, self.num_bodyparts + 1),
                           dtype=np.int)
        New_train = np.zeros(
            (len(self.annotated), self.IMG_HEIGHT, self.IMG_WIDTH),
            dtype=np.int)

        for l in range(0, len(self.annotated)):
            img = imread(self.image_folder + os.sep + files_original_name[l])

            # mask_ = np.zeros((np.shape(img)[0],np.shape(img)[1],self.num_bodyparts))
            mask_ = np.zeros(
                (np.shape(img)[0], np.shape(img)[1], self.num_bodyparts))
            img = resize(img, (self.IMG_HEIGHT, self.IMG_WIDTH),
                         mode='constant',
                         preserve_range=True)

            X_train[counter] = img

            for j in range(0, self.num_bodyparts):
                mask_single_label = np.zeros((mask_.shape[0], mask_.shape[1]))

                #if annotation was assisted, x is negative

                points = np.asarray([
                    self.dataFrame[self.dataFrame.columns[j * 2]].values[
                        self.annotated[l]],
                    self.dataFrame[self.dataFrame.columns[j * 2 + 1]].values[
                        self.annotated[l]]
                ],
                                    dtype=float)
                points = np.abs(points)

                if np.isnan(points[0]):
                    continue

                cv2.circle(mask_single_label, (int(round(
                    (points[0] * (2**4)))), int(round(points[1] * (2**4)))),
                           int(round(self.markerSize * (2**4))),
                           (255, 255, 255),
                           thickness=-1,
                           shift=4)
                mask_[:, :, j] = mask_single_label

            mask_ = resize(mask_, (self.IMG_HEIGHT, self.IMG_WIDTH),
                           mode='constant',
                           preserve_range=True)
            a, mask_ = cv2.threshold(mask_, 200, 255, cv2.THRESH_BINARY)
            mask_ = mask_ / 255.0
            if len(np.shape(mask_)) == 2:
                mask_new = np.zeros(
                    (np.shape(mask_)[0], np.shape(mask_)[1], 1))
                mask_new[:, :, 0] = mask_
                mask_ = mask_new

            for j in range(0, self.num_bodyparts):
                New_train[counter] = New_train[counter] + mask_[:, :,
                                                                j] * (j + 1)

            # alternative method to build the ground truth
            # temp = temp + 1
            # temp[temp == 0] = 1
            # temp[temp > 1] = 0
            # Y_train[counter, :, :,1:] = mask_
            # Y_train[counter,:,:,0] = temp
            counter += 1
            #

        try:
            Y_train = tf.keras.utils.to_categorical(
                New_train, num_classes=self.num_bodyparts + 1)
        except:
            wx.MessageBox(
                'two or more labels are overlapping!\n '
                'Check annotation or re-perform the labeling operation',
                'Error!', wx.OK | wx.ICON_ERROR)
            self.error = 1
            return

        counter = 0

        from segmentation_models import get_preprocessing

        self.processer = get_preprocessing(self.backbone)

        X_train = self.processer(X_train)

        print('Done!')

        from tensorflow.keras.preprocessing import image

        # Creating the training Image and Mask generator
        image_datagen = image.ImageDataGenerator(shear_range=0.5,
                                                 rotation_range=50,
                                                 zoom_range=0.2,
                                                 width_shift_range=0.2,
                                                 height_shift_range=0.2,
                                                 fill_mode='reflect')
        mask_datagen = image.ImageDataGenerator(shear_range=0.5,
                                                rotation_range=50,
                                                zoom_range=0.2,
                                                width_shift_range=0.2,
                                                height_shift_range=0.2,
                                                fill_mode='reflect')

        # Keep the same seed for image and mask generators so they fit together

        image_datagen.fit(X_train[:int(X_train.shape[0] * 0.9)],
                          augment=True,
                          seed=seed)
        mask_datagen.fit(Y_train[:int(Y_train.shape[0] * 0.9)],
                         augment=True,
                         seed=seed)

        x = image_datagen.flow(X_train[:int(X_train.shape[0] * 0.9)],
                               batch_size=self.BATCH_SIZE,
                               shuffle=True,
                               seed=seed)
        y = mask_datagen.flow(Y_train[:int(Y_train.shape[0] * 0.9)],
                              batch_size=self.BATCH_SIZE,
                              shuffle=True,
                              seed=seed)

        # Creating the validation Image and Mask generator
        image_datagen_val = image.ImageDataGenerator()
        mask_datagen_val = image.ImageDataGenerator()

        image_datagen_val.fit(X_train[int(X_train.shape[0] * 0.9):],
                              augment=True,
                              seed=seed)
        mask_datagen_val.fit(Y_train[int(Y_train.shape[0] * 0.9):],
                             augment=True,
                             seed=seed)

        x_val = image_datagen_val.flow(X_train[int(X_train.shape[0] * 0.9):],
                                       batch_size=self.BATCH_SIZE,
                                       shuffle=True,
                                       seed=seed)
        y_val = mask_datagen_val.flow(Y_train[int(Y_train.shape[0] * 0.9):],
                                      batch_size=self.BATCH_SIZE,
                                      shuffle=True,
                                      seed=seed)

        train_generator = zip(x, y)
        val_generator = zip(x_val, y_val)

        from segmentation_models import Unet, PSPNet, Linknet, FPN
        from segmentation_models.losses import CategoricalFocalLoss
        from segmentation_models.utils import set_trainable
        import segmentation_models
        from tensorflow.keras.optimizers import RMSprop, SGD

        if self.architecture == 'Linknet':

            self.model = Linknet(self.backbone,
                                 classes=self.num_bodyparts + 1,
                                 activation='softmax',
                                 encoder_weights=self.image_net,
                                 input_shape=(self.IMG_WIDTH, self.IMG_HEIGHT,
                                              self.IMG_CHANNELS))

        elif self.architecture == 'unet':

            self.model = Unet(self.backbone,
                              classes=self.num_bodyparts + 1,
                              activation='softmax',
                              encoder_weights=self.image_net,
                              input_shape=(self.IMG_WIDTH, self.IMG_HEIGHT,
                                           self.IMG_CHANNELS))
        elif self.architecture == 'PSPnet':
            self.model = PSPNet(self.backbone,
                                classes=self.num_bodyparts + 1,
                                activation='softmax',
                                encoder_weights=self.image_net,
                                input_shape=(self.IMG_WIDTH, self.IMG_HEIGHT,
                                             self.IMG_CHANNELS))

        elif self.architecture == 'FPN':
            self.model = FPN(self.backbone,
                             classes=self.num_bodyparts + 1,
                             activation='softmax',
                             encoder_weights=self.image_net,
                             input_shape=(self.IMG_WIDTH, self.IMG_HEIGHT,
                                          self.IMG_CHANNELS))

        weights = np.zeros((1, self.num_bodyparts + 1), dtype=float)
        weight = 1.0 / self.num_bodyparts

        num_zeros = 1
        # while (weight * 100 < 1):
        #     weight = weight * 100
        #     num_zeros += 1
        #
        # weight = int(weight * 100) / np.power(100, num_zeros)
        weights[0, 1:] = weight
        weights[0, 0] = 0.01 * len(self.bodyparts)

        while weights[0, 0] > weights[0, 1]:
            weights[0, 0] = weights[0, 0] / 10
            num_zeros += 1

        for i in range(1, len(self.bodyparts) + 1):
            weights[0, i] = weights[0, i] - 10**-(num_zeros + 1)

        if self.loss_function == "Weighted Categorical_cross_entropy":
            loss = self.weighted_categorical_crossentropy(weights)
        else:
            loss = segmentation_models.losses.DiceLoss(class_weights=weights)
        metric = segmentation_models.metrics.IOUScore(class_weights=weights,
                                                      per_image=True)
        self.model.compile(optimizer=RMSprop(lr=self.learning_rate),
                           loss=loss,
                           metrics=[metric])
        earlystopper = EarlyStopping(patience=6, verbose=1)
        #
        checkpointer = ModelCheckpoint(os.path.join(self.address, 'Unet.h5'),
                                       verbose=1,
                                       save_best_only=True)
        reduce_lr = keras.callbacks.LearningRateScheduler(self.lr_scheduler)

        #
        # model.fit_generator(train_generator, validation_data=val_generator, validation_steps=10, steps_per_epoch=50,
        #                                epochs=2, callbacks=[earlystopper, checkpointer],verbose=1)
        # model.load_weights(self.address + 'Temp_weights.h5')

        # set_trainable(model)
        #
        self.model.fit_generator(
            train_generator,
            validation_data=val_generator,
            steps_per_epoch=20,
            validation_steps=5,
            epochs=100,
            callbacks=[earlystopper, checkpointer, reduce_lr],
            verbose=1)
Exemplo n.º 18
0
    train_images, val_images, train_masks, val_masks = train_test_split(x_data, y_data, shuffle=True, test_size=0.2)
    callbacks_list = [
        ModelCheckpoint('models/linknet_vgg16_' + str(len(CLASSES)) + '_classes.h5',
                        verbose=1,
                        save_best_only=True,
                        mode='min',
                        save_weights_only=True),
        ReduceLROnPlateau(verbose=1, factor=0.25, patience=3, min_lr=1e-6)
    ]

    model = Linknet(
        backbone_name='vgg16',
        input_shape=(HEIGHT, WIDTH, DEPTH),
        classes=len(CLASSES),
        activation='sigmoid',
        decoder_block_type='upsampling',
        encoder_weights='imagenet',
        decoder_use_batchnorm=True
    )

    model.summary()
    model.compile(optimizer=Adam(1e-3), loss=jaccard_loss, metrics=[jaccard_score, dice_score])

    model_json = model.to_json()
    json_file = open('models/linknet_vgg16_' + str(len(CLASSES)) + '_classes.json', 'w')
    json_file.write(model_json)
    json_file.close()
    print('Model saved!')

    model.fit_generator(
Exemplo n.º 19
0
backbone = 'resnet18'

preprocessing_fn = get_preprocessing(backbone)

train_gen = custom_generator(images_path=images,
                             labels_path=labels,
                             preprocessing_fn=preprocessing_fn,
                             aug_mode=aug_mode,
                             batch_size=batch_size)

# In[ ]:
# # Define model
from segmentation_models import Linknet

model = Linknet(backbone_name=backbone,
                input_shape=input_shape,
                classes=num_classes,
                activation='softmax')

print("Model summary:")
model.summary()

# In[ ]:
from keras import optimizers
from losses import dice_coef_multiclass_loss

learning_rate = 1e-4
optimizer = optimizers.Adam(learning_rate)

losses = [dice_coef_multiclass_loss]
metrics = ['categorical_accuracy']
Exemplo n.º 20
0
if model_name == 'Unet':
    model = Unet(backbone_name=backbone_name,
                 classes=n_classes,
                 activation='softmax')
elif model_name == 'PSPNet':
    model = PSPNet(backbone_name=backbone_name,
                   classes=n_classes,
                   activation='softmax')
elif model_name == 'FPN':
    model = FPN(backbone_name=backbone_name,
                classes=n_classes,
                activation='softmax')
elif model_name == 'Linknet':
    model = Linknet(backbone_name=backbone_name,
                    classes=n_classes,
                    activation='softmax')
else:
    print('Please provide the right model name')

model.compile('Adam',
              loss='categorical_crossentropy',
              metrics=['categorical_accuracy'])

####################################################
############# Training model #######################
####################################################

for i in range(n_save):
    print('==============================')
    print('in iteration: ', i + 1)