Exemple #1
0
def train():
    training_generator = get_training_generator()
    for epoch_index in range(epochs):
        # Why iterating over epochs instead of passing them correctly as a parameter of fit_generator?
        # The problem is that I need to clean the graph regularly, or I will get a memory error
        # And I did not find a way to clean the memory from used operations without resetting the whole graph
        # But resetting the whole graph also requires to re-load the wholes weights
        # This is obviously not an acceptable long-term solution. To see the issue on github:
        # https://github.com/tensorflow/tensorflow/issues/31419
        graph = tf.Graph()
        K.clear_session()
        gen = training_generator(graph,
                                 starting_index=epoch_index * steps_per_epoch)
        with graph.as_default():
            if (epoch_index == 0):
                unet = Unet(
                    "resnet34",
                    encoder_weights="imagenet",
                    classes=1,
                    activation="sigmoid",
                    input_shape=(tf_image_size, tf_image_size, 3),
                )
                unet.compile(optimizer=Adam(lr=learning_rate),
                             loss=calculate_loss)
            else:
                unet = load_model(
                    file_path,
                    custom_objects={"calculate_loss": calculate_loss})
            unet.fit_generator(gen, steps_per_epoch=steps_per_epoch, epochs=1)
            save_model(unet, file_path)
def train():
    #load images
    images = []
    for image in os.listdir(im_path):
        imi = cv.imread(os.path.join(im_path, image))
        images.append(imi)

    #load masks
    masks = []
    for mask in os.listdir(mask_path):
        mask_in = cv.imread(os.path.join(mask_path, mask), 0)
        ret_val, threshed_mask = cv.threshold(mask_in, 37, 1, cv.THRESH_BINARY)
        masks.append(threshed_mask)

    model = Unet('resnet34',
                 encoder_weights='imagenet',
                 input_shape=(128, 128, 3))
    model.compile('Adam',
                  loss=bce_jaccard_loss,
                  metrics=[iou_score, 'accuracy'])
    model.summary()
    hist = model.fit(x=np.array(images).reshape(-1, 128, 128, 3),
                     y=np.array(masks).reshape(-1, 128, 128, 1),
                     batch_size=10,
                     epochs=15)

    #save model
    filename = 'trained_model.h5'
    model.save(filename, include_optimizer=False)
Exemple #3
0
class Dense_Unet():
    def __init__(self, img_shape):
        self.img_shape = img_shape

    def compile_dense(self):

        self.model = Unet(backbone_name='inceptionv3',
                          input_shape=self.img_shape,
                          input_tensor=None,
                          encoder_weights=None,
                          freeze_encoder=False,
                          skip_connections='default',
                          decoder_block_type='upsampling',
                          decoder_filters=(256, 128, 64, 32, 16),
                          decoder_use_batchnorm=True,
                          n_upsample_blocks=5,
                          upsample_rates=(2, 2, 2, 2, 2),
                          classes=4,
                          activation='softmax')

        sgd = SGD(lr=0.1, momentum=0.9, decay=5e-6, nesterov=False)
        adam = Adam(lr=0.01,
                    beta_1=0.9,
                    beta_2=0.999,
                    epsilon=None,
                    decay=0.0,
                    amsgrad=False)

        self.model.compile(
            adam, gen_dice_loss,
            [dice_whole_metric, dice_core_metric, dice_en_metric])

        return (self.model)
Exemple #4
0
 def U_net(self):
     # Build U-Net model
     transfer_model = Unet(backbone_name=backbone, input_shape=(None, None, 3), classes=1,
                           activation='relu', encoder_weights='imagenet', encoder_freeze=True)
     transfer_model.compile(optimizer='Adam', loss='binary_crossentropy', metrics=[self.mean_iou])
     transfer_model.load_weights(self.modelPath)
     transfer_model.summary()
     return transfer_model
Exemple #5
0
def train_stage_1(x_train, y_train, x_valid, y_valid):
    opt = optimizers.adam(lr=0.001)
    model = Unet(backbone_name=BACKBONE,
                 encoder_weights='imagenet',
                 freeze_encoder=True)
    model.compile(loss=bce_dice_jaccard_loss,
                  optimizer=opt,
                  metrics=[my_iou_metric])
    model_checkpoint = ModelCheckpoint(
        OUTPUT_DIR + "/{}/models/{}_fold_{}_stage1.model".format(
            BASE_NAME, BASE_NAME, CUR_FOLD_INDEX),
        monitor='val_my_iou_metric',
        mode='max',
        save_best_only=True,
        verbose=1)
    reduce_lr = ReduceLROnPlateau(monitor='val_my_iou_metric',
                                  mode='max',
                                  factor=0.5,
                                  patience=6,
                                  min_lr=0.00001,
                                  verbose=1)
    early_stopping = EarlyStopping(monitor='val_my_iou_metric',
                                   mode='max',
                                   patience=20,
                                   verbose=1)
    logger = CSVLogger(OUTPUT_DIR + '/{}/logs/{}_fold_{}_stage1.log'.format(
        BASE_NAME, BASE_NAME, CUR_FOLD_INDEX))
    model.fit_generator(
        TrainGenerator(x_train,
                       y_train,
                       batch_size=int(np.ceil(BATCH_SIZE / (len(AUGS) + 1))),
                       img_size_target=IMG_SIZE_TARGET),
        steps_per_epoch=int(
            np.ceil(len(x_train) / int(np.ceil(BATCH_SIZE /
                                               (len(AUGS) + 1))))),
        epochs=WARM_EPOCHS,
        validation_data=ValidGenerator(x_valid,
                                       y_valid,
                                       batch_size=BATCH_SIZE,
                                       img_size_target=IMG_SIZE_TARGET),
        callbacks=[model_checkpoint],
        shuffle=True)
    segmentation_utils.set_trainable(model)
    model.fit_generator(
        TrainGenerator(x_train,
                       y_train,
                       batch_size=int(np.ceil(BATCH_SIZE / (len(AUGS) + 1))),
                       img_size_target=IMG_SIZE_TARGET),
        steps_per_epoch=int(
            np.ceil(len(x_train) / int(np.ceil(BATCH_SIZE /
                                               (len(AUGS) + 1))))),
        epochs=EPOCHS,
        validation_data=ValidGenerator(x_valid,
                                       y_valid,
                                       batch_size=BATCH_SIZE,
                                       img_size_target=IMG_SIZE_TARGET),
        callbacks=[early_stopping, model_checkpoint, reduce_lr, logger],
        shuffle=True)
Exemple #6
0
def model_init(path_1, path_2):
    model = Unet(BACKBONE_NAME, input_shape=(None, None, 1), classes=1, encoder_weights=None)
    model.compile(optimizer=OPTIMIZER, loss=LOSS, metrics=[my_iou])
    history = model.fit_generator(train_generator,
                                steps_per_epoch=TRAIN_STEPS_PER_EPOCH,
                                validation_data=valid_generator,
                                validation_steps=VALID_STEPS_PER_EPOCH,
                                callbacks=callbacks,
                                epochs=50)
Exemple #7
0
def get_model(net_name,
              num_class,
              weight_path,
              input_shape=[],
              weighted_loss=False):
    number_class = num_class
    if net_name == 'psp':
        model_name = 'pspnet101_cityscapes'
        input_shape = (473, 473, 3)
        model = pspnet.PSPNet101(nb_classes=num_class,
                                 input_shape=input_shape,
                                 weights=model_name)
        model = model.model
    elif net_name == 'psp_50':
        input_shape = (473, 473, 3)
        model_name = 'pspnet50_ade20k'
        #output_mode='sigmoid'
        model = pspnet.PSPNet50(nb_classes=num_class,
                                input_shape=input_shape,
                                weights=model_name)
        model = model.model

    elif net_name[-1:] == 'c':
        if net_name == 'unet_rgbh_c' or net_name == 'unet_rgbc_c':
            if len(input_shape) < 3:
                input_shape = [512, 512, 4]
        elif net_name == 'unet_rgb_c':
            if len(input_shape) < 3:
                input_shape = [512, 512, 3]
        elif net_name == 'unet_msi_c':
            if len(input_shape) < 3:
                input_shape = [512, 512, 3]
        elif net_name == 'unet_msih_c' or net_name == 'unet_msic_c':
            if len(input_shape) < 3:
                input_shape = [512, 512, 9]
        from keras.layers import Input
        input_tensor = Input(shape=(input_shape[0], input_shape[1],
                                    input_shape[2]))
        model = Unet(input_shape=input_shape,
                     input_tensor=input_tensor,
                     backbone_name=params.BACKBONE,
                     encoder_weights=None,
                     classes=num_class)
    if weighted_loss:
        loss = my_class_weighted_loss
    else:
        loss = params.SEMANTIC_LOSS
    lr = params.LEARN_RATE
    optimizer = Adam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
    if (len(weight_path) > 2):
        model.load_weights(weight_path, True)
        print('use pre-trained weights', weight_path)
    model.compile(optimizer, loss=loss, metrics=[categorical_accuracy])

    model.summary()
    return model, input_shape
Exemple #8
0
def train_unet_mobilenetv2(saveModelFn, tensorboardPath):
    # train_imgDir = "/home/xiping/mydisk2/imglib/my_imglib/coco/train2014_person"
    train_imgDir = "/coco/train2014_person"
    (train_data, train_mask_data), (val_data,
                                    val_mask_data) = get_data(train_imgDir,
                                                              maxNum=12000,
                                                              valMaxNum=1000)

    # print(train_data.shape)
    # print(mask_data.shape)
    # print(mask_data[0])
    # cv2.imwrite("xx.bmp", mask_data[1]*255)
    # exit(0)

    print("================================")
    BACKBONE = 'mobilenetv2'
    # define model
    model = Unet(
        BACKBONE,
        classes=1,
        input_shape=(224, 224,
                     3),  # specific inputsize for callback save model
        activation='sigmoid',  #sigmoid,softmax
        encoder_weights='imagenet')

    # Show network structure.
    # model.summary()

    model.compile('Adam', loss='jaccard_loss', metrics=['iou_score'])
    # model.compile('SGD', loss="bce_dice_loss", metrics=["dice_score"])
    # model.compile('SGD', loss="bce_jaccard_loss", metrics=["iou_score"])
    # model.compile('adam', loss="binary_crossentropy", metrics=["iou_score"])

    checkpointer = ModelCheckpoint(
        filepath=
        "weights.epoch={epoch:02d}-val_loss={val_loss:.2f}-val_iou_score={val_iou_score:.2f}.hdf5",
        verbose=1,
        save_best_only=True)

    print("================================")
    print("Start train...")
    # fit model
    # if you use data generator use model.fit_generator(...) instead of model.fit(...)
    # more about `fit_generator` here: https://keras.io/models/sequential/#fit_generator
    model.fit(
        x=train_data,
        y=train_mask_data,
        batch_size=32,
        epochs=200,
        validation_data=(
            val_data,
            val_mask_data),  # callback save middle model need input val data
        callbacks=[TensorBoard(log_dir=tensorboardPath), checkpointer])

    model.save(saveModelFn)
Exemple #9
0
def main():
    with open('/home/rbuddhad/NIH-XRAY/test_sml.txt') as f1:
        lines1 = f1.readlines()

    test_datagen = ImageDataGenerator()
    test_batches = test_datagen.flow_from_directory(TEST_DATASET_PATH,
                                                    target_size=(1024, 1024),
                                                    shuffle=True,
                                                    class_mode=None,
                                                    batch_size=BATCH_SIZE)

    test_crops_orig = crop_generator(test_batches, CROP_LENGTH, lines1)  # 224

    model = Unet(backbone_name='resnet18', encoder_weights=None)
    model.load_weights('best_model1.h5')
    model.compile(optimizer='Adam',
                  loss='mean_squared_error',
                  metrics=['mae', 'mean_squared_error'])
    model.summary()

    # callbacks = [EarlyStopping(monitor='val_loss', patience=10),
    #              ModelCheckpoint(filepath='best_model1.h5', monitor='val_loss', save_best_only=True),
    #              TensorBoard(log_dir='./logs', histogram_freq=0, write_graph=True, write_images=True)]
    # model.fit_generator(generator=test_crops_orig,
    #                     steps_per_epoch=100,
    #                     validation_data=valid_crops_orig,
    #                     callbacks=callbacks,
    #                     validation_steps=200,
    #                     epochs=1000,
    #                     shuffle=True)
    # model.predict(generator=test_crops_orig,
    #               steps=2,
    #               verbose=1)

    # model.save('unet2.h5')
    predict = model.predict_generator(generator=test_crops_orig,
                                      steps=1,
                                      verbose=1)
    # predict = model.predict()
    print(predict.shape, 'predict_batch_size')
    for i in range(50):
        plt.imshow(predict[i, :, :, 0], cmap='gray', vmin=0, vmax=1)
        plt.show()
def train(x_train: NpArray, x_valid: NpArray, y_train: NpArray, y_valid: NpArray,
          fold: int = -1) -> None:
    preprocessing_fn = get_preprocessing('resnet34')
    x_train = preprocessing_fn(x_train)
    x_valid = preprocessing_fn(x_valid)

    model = Unet(backbone_name='resnet34', encoder_weights='imagenet')
    model.compile('Adam', 'binary_crossentropy', metrics=[my_iou_metric])
    model.summary()

    model_name = make_output_path("models/fold%d.hdf5" % fold)
    model_checkpoint = ModelCheckpoint(model_name, monitor='val_my_iou_metric',
                                       mode='max', save_best_only=True, verbose=1)
    reduce_lr = ReduceLROnPlateau(monitor='val_my_iou_metric', mode='max',
                                  factor=0.5, patience=5, min_lr=3e-6, verbose=1)

    model.fit(x_train, y_train, validation_data=[x_valid, y_valid], epochs=EPOCHS,
              batch_size=BATCH_SIZE, callbacks=[model_checkpoint, reduce_lr],
              verbose=VERBOSE)
Exemple #11
0
def main():

    train_datagen = ImageDataGenerator(rescale=1 / 255)
    train_batches = train_datagen.flow_from_directory(DATASET_PATH,
                                                      target_size=(1024, 1024),
                                                      shuffle=True,
                                                      class_mode=None,
                                                      batch_size=BATCH_SIZE)

    valid_datagen = ImageDataGenerator(rescale=1 / 255)
    valid_batches = valid_datagen.flow_from_directory(DATASET_PATH,
                                                      target_size=(1024, 1024),
                                                      shuffle=False,
                                                      class_mode=None,
                                                      batch_size=BATCH_SIZE)

    train_crops = crop_generator(train_batches, CROP_LENGTH)  #224
    valid_crops = crop_generator(valid_batches, CROP_LENGTH)

    batch_x_random_crop, batch_y_targeted_crop = next(train_crops)
    valid_x, valid_y = next(valid_crops)

    in_painted_x = in_painting_mask(batch_x_random_crop, batch_y_targeted_crop)
    valid_in_x = in_painting_mask(valid_x, valid_y)

    batch_x_random_crop = rgb2gray(batch_x_random_crop)
    batch_x_random_crop = np.reshape(
        batch_x_random_crop, (batch_x_random_crop.shape[0], 224, 224, 1))

    valid_x = rgb2gray(valid_x)
    valid_x = np.reshape(valid_x, (valid_x.shape[0], 224, 224, 1))

    model = Unet(backbone_name='resnet18',
                 encoder_weights='imagenet',
                 decoder_block_type='transpose')  # build U-Net
    model.compile(optimizer='Adam', loss='mean_squared_error')
    model.summary()
    model.fit(x=in_painted_x,
              y=batch_x_random_crop,
              validation_data=(valid_in_x, valid_x),
              validation_steps=5,
              steps_per_epoch=5,
              epochs=1)
def seg_model(preprocess_type, input_size, pretrained_weights, activation,
              loss):
    classes = 4 if activation == 'sigmoid' else 5

    model = Unet(preprocess_type,
                 encoder_weights='imagenet',
                 input_shape=input_size,
                 classes=classes,
                 activation=activation)

    adam = keras.optimizers.Adam(lr=1e-4)

    model.compile(optimizer=adam, loss=loss, metrics=[dice_coef])

    model.summary()

    if (pretrained_weights):
        model.load_weights(pretrained_weights)

    return model
Exemple #13
0
def get_compiled_unet(config, label_encoder, loss='categorical_crossentropy', predict_logits=False, large=True):
    '''
    Input: config dict, label_encoder, loss (string or callable), predict_logits (boolean)
    Output: compiled Unet model
    '''
    activation = 'linear' if predict_logits else 'softmax'
    n_bands = len(config['s1_input_bands']) + len(config['s2_input_bands'])
    decoder_filters = (256,128,64,32,16) if large else (64,32,32,32,32)
    model = Unet(
        backbone_name=config['unet_params']['backbone_name'],
        encoder_weights=None,
        activation=activation,
        input_shape=(None, None, n_bands),
        classes=len(label_encoder.classes_),
        decoder_filters=decoder_filters
    )
    model.compile(loss=loss,
        optimizer=Nadam(lr=config['unet_params']['learning_rate']),
        metrics=['accuracy'])
    return model
Exemple #14
0
def build_model():

    model = Unet(backbone_name='mobilenetv2',
                 input_shape=(224, 224, 3),
                 classes=1,
                 activation='sigmoid',
                 encoder_weights=weight_mobilenetv2_path,
                 encoder_freeze=True,
                 encoder_features='default',
                 decoder_block_type='upsampling',
                 decoder_filters=(256, 128, 64, 32, 16),
                 decoder_use_batchnorm=True)

    #model.compile(loss='binary_crossentropy', optimizer=RMSprop(lr=0.0001), metrics=['acc'])
    #model.compile(loss='binary_crossentropy', optimizer=SGD(lr=1e-4, momentum=0.9), metrics=['acc'])
    model.compile(loss='binary_crossentropy',
                  optimizer=Adam(lr=0.0001),
                  metrics=['acc'])
    model.summary()
    return model
Exemple #15
0
def unet_train():
    callbacks = [
        # EarlyStopping(patience=10, verbose=1),
        # ReduceLROnPlateau(factor=0.1, patience=3, min_lr=0.00001, verbose=1),
        ModelCheckpoint('unet_' + checkpoint,
                        verbose=1,
                        monitor='loss',
                        save_best_only=True,
                        save_weights_only=True)
    ]
    model = Unet(backbone_name=backbone,
                 encoder_weights=None,
                 input_shape=input_shape)
    model.compile('Adam', 'binary_crossentropy', ['binary_accuracy'])
    results = model.fit(X_train,
                        y_train,
                        callbacks=callbacks,
                        epochs=100,
                        verbose=1,
                        validation_data=(X_valid, y_valid))
    plotting(results)
def generate_compiled_segmentation_model(model_name, model_parameters, num_classes, loss, optimizer,
                                         weights_to_load=None):

    # These are the only model, loss, and optimizer currently supported
    assert model_name == 'Unet'
    assert loss == 'cross_entropy'
    assert optimizer == 'adam'

    model = Unet(input_shape=(None, None, 1), classes=num_classes, **model_parameters)

    crossentropy = binary_crossentropy if num_classes == 1 else categorical_crossentropy
    loss_fn = crossentropy

    model.compile(optimizer=Adam(),
                  loss=loss_fn,
                  metrics=[accuracy, iou_score, jaccard_loss, dice_loss])

    if weights_to_load:
        model.load_weights(weights_to_load)

    return model
    def get_model(self, net_name, input_shape, number_class, class_weight,
                  weight_path):
        from segmentation_models import pspnet  #PSPNet
        if net_name == 'psp':
            model_name = 'pspnet101_cityscapes'
            input_shape = (473, 473, 9)
            model = pspnet.PSPNet101(nb_classes=number_class,
                                     input_shape=input_shape,
                                     weights=model_name)
            model = model.model
        if net_name == 'psp_50':
            input_shape = (473, 473, 9)
            model_name = 'pspnet50_ade20k'
            #output_mode='sigmoid'
            pspnet = pspnet.PSPNet101(nb_classes=number_class,
                                      input_shape=input_shape,
                                      weights=model_name)
            model = model.model
        elif net_name == 'unet':
            input_shape = [256, 256, 9]
            from keras.layers import Input
            input_tensor = Input(shape=(input_shape[0], input_shape[1],
                                        input_shape[2]))
            model = Unet(input_shape=input_shape,
                         input_tensor=input_tensor,
                         backbone_name=params.BACKBONE,
                         encoder_weights=None,
                         classes=number_class)
        ##[1.0,10.0,10.0,20.,30.]
        weights = np.array(class_weight)
        #        loss = weighted_categorical_crossentropy(weights)
        loss = my_weighted_loss
        #        loss=params.SEMANTIC_LOSS
        optimizer = Adam(lr=1E-3, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
        if (len(weight_path) > 2):
            model.load_weights(weight_path)
        model.compile(optimizer, loss=loss)

        model.summary()
        return model
    def resnet(self):
        """
        Load model and weights

        :return: neural network model
        """
        #define model
        # model = Unet(backbone_name='resnet34', input_shape=(None, None, 3), encoder_weights=None, classes=1, encoder_freeze=False)
        # model.load_weights(self.data_path + '/weights/true_weights.hdf5')
        # model.compile('Adam', 'dice_loss', metrics=['iou_score'])

        model = Unet(
            backbone_name='resnet18',
            input_shape=(None, None, 3),
            decoder_filters=(64, 32, 32, 16, 4),
            encoder_weights='imagenet',
            classes=1,
            encoder_freeze=True,
        )
        model.load_weights(self.data_path + '/weights/new.hdf5')
        model.compile('Adam', 'bce_jaccard_loss', metrics=['iou_score'])
        return model
Exemple #19
0
def train_net():
    args = parser.parse_args()
    final_path = args.model_final_path
    checkpoint_path = args.model_checkpoint_path
    dataset_path = args.dataset_path
    with tf.device("/gpu:0"):
        backbone = 'resnet50'
        preprocess_input = get_preprocessing(backbone)

        # load your data
        x_train, y_train, x_val, y_val = from_directory_datagen(dataset_path)

        # preprocess input
        x_train = preprocess_input(x_train)
        x_val = preprocess_input(x_val)

        # define model
        model = Unet(backbone, encoder_weights='imagenet', input_shape=(256, 256, 3))
        model.compile(optimizer=tf.keras.optimizers.Adam(lr=1e-3), loss=dice_loss,
                      metrics=[f1_score, iou_score])

        check_point = [ModelCheckpoint(checkpoint_path + 'model-{epoch:03d}-{val_f1-score:03f}.h5', verbose=1,
                                       monitor='val_f1-score',
                                       save_best_only=True, mode='max')]

        # fit model
        model.fit(
            x=(pair for pair in zip(x_train, y_train)),
            epochs=10,
            steps_per_epoch=x_train.n // x_train.batch_size,
            validation_data=(pair for pair in zip(x_val, y_val)),
            validation_steps=x_val.n // x_val.batch_size,
            verbose=1,
            shuffle=True,
            callbacks=check_point,
        )
        model.save(final_path + 'final_model.h5')
Exemple #20
0
def main():

    image_path = "image"  #加载训练图片
    im_start = 0  #确定编号
    im_end = 29
    im_array = load_png_files(image_path, im_start, im_end)
    im_array = im_array[:, :, :, np.newaxis]  # 需要加代表图片的通道数,这里是黑白图片所以是1,因此直接加一维
    print("train_image shape : " + im_array.shape)

    label_path = "label"  #加载训练图片对应的标签图片
    la_start = 0
    la_end = 29
    la_array = load_png_files(label_path, la_start, la_end)
    la_array = la_array[:, :, :, np.newaxis]
    print("train_label shape : " + la_array.shape)

    test_path = "test"  #加载测试集的图片
    te_start = 0
    te_end = 4
    te_array = load_png_files(test_path, te_start, te_end)
    te_array = te_array[:, :, :, np.newaxis]
    print("test_image shape : " + te_array.shape)

    model = Unet('resnet34', input_shape=(512, 512, 1),
                 encoder_weights=None)  #1代表通道数
    model.compile('Adam', loss='binary_crossentropy', metrics=['accuracy'])
    model.fit(
        x=im_array,
        y=la_array,
        batch_size=10,
        epochs=8,
        validation_split=0.2,  #取训练集中的0.2作为验证集
        shuffle=True)
    model.save("model_v1.h5")  #保存模型

    print("Saved model to disk")
    print("done!!!!!")
def network_setup():
    global preprocess, model, idx, train_batches, valid_batches
    # LOAD UNET WITH PRETRAINING FROM IMAGENET
    preprocess = get_preprocessing(
        'resnet34')  # for resnet, img = (img-110.0)/1.0
    model = Unet('resnet34',
                 input_shape=(img_resize_shape[0], img_resize_shape[1],
                              in_channels),
                 classes=out_channels,
                 activation='sigmoid')
    model.compile(optimizer='adam',
                  loss='binary_crossentropy',
                  metrics=[dice_coef])
    # TRAIN AND VALIDATE MODEL
    idx = int(0.8 * len(train2))
    print()
    train_batches = DataGenerator(train2.iloc[:idx],
                                  shuffle=True,
                                  preprocess=preprocess)
    valid_batches = DataGenerator(train2.iloc[idx:], preprocess=preprocess)
    history = model.fit_generator(train_batches,
                                  validation_data=valid_batches,
                                  epochs=epochs,
                                  verbose=1)
Exemple #22
0
    intersection = K.sum(y_true_f * y_pred_f)
    return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) +
                                           smooth)


from segmentation_models import Unet
from segmentation_models.backbones import get_preprocessing

# LOAD UNET WITH PRETRAINING FROM IMAGENET
preprocess = get_preprocessing('resnet34')  # for resnet, img = (img-110.0)/1.0
model = Unet('resnet34',
             input_shape=(128, 800, 3),
             classes=4,
             activation='sigmoid')
model.compile(optimizer='adam',
              loss='binary_crossentropy',
              metrics=[dice_coef])

# TRAIN AND VALIDATE MODEL
idx = int(0.8 * len(train2))
print()
train_batches = DataGenerator(train2.iloc[:idx],
                              shuffle=True,
                              preprocess=preprocess)
valid_batches = DataGenerator(train2.iloc[idx:], preprocess=preprocess)
history = model.fit_generator(train_batches,
                              validation_data=valid_batches,
                              epochs=30,
                              verbose=2)
# PLOT TRAINING
plt.figure(figsize=(15, 5))
Exemple #23
0
#Data augmentation
x_train = np.append(x_train, [np.fliplr(x) for x in x_train], axis=0)
y_train = np.append(y_train, [np.fliplr(x) for x in y_train], axis=0)

x_train = np.repeat(x_train, 3, axis=3)
x_valid = np.repeat(x_valid, 3, axis=3)

# In[14]:

from segmentation_models import Unet
model1 = Unet(backbone_name='resnet152', encoder_weights='imagenet')

c = optimizers.adam(lr=0.0001)
model1.compile(loss='binary_crossentropy',
               optimizer=c,
               metrics=[my_iou_metric])

# In[15]:

model_checkpoint = ModelCheckpoint(save_model_name,
                                   monitor='val_my_iou_metric',
                                   mode='max',
                                   save_best_only=True,
                                   verbose=1)

epochs = 60
BATCH_SIZE = 8
steps_per_epoch = x_train.shape[0] // BATCH_SIZE

schedule = SGDRScheduler(min_lr=1e-5,
Exemple #24
0
def train(backbone,
          load_pretrain,
          data_path,
          split_path,
          save_path,
          n_split=5,
          seed=960630,
          batch_size=4,
          fold=0):

    # split by all data
    get_train_val_split(data_path=data_path + 'image_set/',
                        save_path=split_path,
                        n_splits=n_split,
                        seed=seed)

    # split by folders
    # get_train_val_split(data_path=data_path+'images/',
    #                     save_path=split_path,
    #                     n_splits=n_split,
    #                     seed=seed)

    if load_pretrain is not None:
        model = load_model(load_pretrain, compile=False)
    elif backbone is not None:
        model = Unet(backbone, classes=1, encoder_weights='imagenet')
    else:
        model = Unet(classes=1, encoder_weights='imagenet')

    model.compile('Adam', loss=bce_jaccard_loss, metrics=[iou_score])
    model.summary()

    # split by all images
    train_data = Carotid_DataGenerator(
        df_path=split_path +
        'split/train_fold_{}_seed_{}.csv'.format(fold, seed),
        image_path=data_path + 'image_set/',
        mask_path=data_path + '/mask_set/',
        batch_size=batch_size,
        target_shape=(512, 512),
        augmentation=True,
        shuffle=False)
    val_data = Carotid_DataGenerator(
        df_path=split_path +
        'split/val_fold_{}_seed_{}.csv'.format(fold, seed),
        image_path=data_path + 'image_set/',
        mask_path=data_path + '/mask_set/',
        batch_size=batch_size,
        target_shape=(512, 512),
        augmentation=True,
        shuffle=False)

    # split by folder
    # train_data = Carotid_DataGenerator(
    #     df_path=split_path+'split/train_fold_{}_seed_{}.csv'.format(fold, seed),
    #     image_path=data_path + 'images/',
    #     mask_path=data_path + '/masks/',
    #     batch_size=batch_size,
    #     target_shape=(512, 512),
    #     augmentation=True,
    #     shuffle=False)
    # val_data = Carotid_DataGenerator(
    #     df_path=split_path + 'split/val_fold_{}_seed_{}.csv'.format(fold, seed),
    #     image_path=data_path + 'images/',
    #     mask_path=data_path + '/masks/',
    #     batch_size=batch_size,
    #     target_shape=(512, 512),
    #     augmentation=True,
    #     shuffle=False)

    callbacks = [
        EarlyStopping(monitor='val_loss',
                      patience=8,
                      verbose=1,
                      min_delta=1e-4),
        ReduceLROnPlateau(monitor='val_loss',
                          factor=0.1,
                          patience=4,
                          verbose=1,
                          epsilon=1e-4),
        ModelCheckpoint(monitor='val_loss',
                        filepath=save_path,
                        verbose=True,
                        save_best_only=True)
    ]

    model.fit_generator(train_data,
                        validation_data=val_data,
                        epochs=10,
                        callbacks=callbacks,
                        verbose=1)
Exemple #25
0
    iou = K.mean(iou * class_weights)

    return iou


test_images = []
labels = []
for i in range(100):
    test_images.append(np.random.rand(128, 128, 3))
    rand = np.random.randint(10, size=(128, 128))
    labels.append(rand)
test_images = np.array(test_images)
labels = np.array(labels)
x_train, x_val, y_train, y_val = train_test_split(test_images,
                                                  labels,
                                                  test_size=0.33,
                                                  random_state=42)
y_train = to_categorical(y_train)
y_val = to_categorical(y_val)
print(x_train.shape)
print(y_train.shape)
model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=[iou_score])
model.fit(
    x=x_train,
    y=y_train,
    batch_size=16,
    epochs=100,
    validation_data=(x_val, y_val),
)
Exemple #26
0
elif config.model == "Nestnet":
    model = Nestnet(backbone_name=config.backbone,
                    encoder_weights=config.weights,
                    decoder_block_type=config.decoder_block_type,
                    classes=config.nb_class,
                    activation=config.activation)
elif config.model == "Xnet":
    model = Xnet(backbone_name=config.backbone,
                 encoder_weights=config.weights,
                 decoder_block_type=config.decoder_block_type,
                 classes=config.nb_class,
                 activation=config.activation)
else:
    raise
model.compile(optimizer="Adam",
              loss=bce_dice_loss,
              metrics=["binary_crossentropy", mean_iou, dice_coef])

# plot_model(model, to_file=os.path.join(model_path, config.exp_name+".png"))
if os.path.exists(os.path.join(model_path, config.exp_name + ".txt")):
    os.remove(os.path.join(model_path, config.exp_name + ".txt"))
with open(os.path.join(model_path, config.exp_name + ".txt"), 'w') as fh:
    model.summary(positions=[.3, .55, .67, 1.],
                  print_fn=lambda x: fh.write(x + '\n'))

shutil.rmtree(os.path.join(logs_path, config.exp_name), ignore_errors=True)
if not os.path.exists(os.path.join(logs_path, config.exp_name)):
    os.makedirs(os.path.join(logs_path, config.exp_name))
tbCallBack = TensorBoard(
    log_dir=os.path.join(logs_path, config.exp_name),
    histogram_freq=0,
####################################################### IMPORTANT SETTINGS!!!!!
learning_rate = 0.002
batch_size = 100
epoch_no = 10
####################################################### IMPORTANT SETTINGS!!!!!

optimizer = Adam(lr=learning_rate)

model = Unet(backbone_name='efficientnetb7', 
             classes = len(mask_label_dict.keys()), 
             activation = 'softmax', 
             encoder_weights = 'imagenet', 
             encoder_freeze = True)    # freezing weights as pre-trained weights are used

model.compile(optimizer, 
              loss = JaccardLoss(per_image = False), 
              metrics = ['categorical_accuracy', IOUScore(per_image = False, threshold = 0.5)])

# creating generators for the image augmentation
train_generator = backroom.UnetSequence(X_train, y_train_multidim, batch_size, augmentations = backroom.train_augmentation) 
test_generator = backroom.UnetSequence(X_test, y_test_multidim, batch_size, augmentations = None)


start_time = time.time() # measuring modelling time

# basic .fit method
model.fit(X_train, y_train_multidim, epochs = 2, batch_size = batch_size, validation_data = (X_test, y_test_multidim)) 

set_trainable(model, recompile = False) # Set all layers of model trainable, so that encode_freeze is lifted. Recompile = True does not work with Tensorflow 2.0
model.compile(optimizer, 
              loss = JaccardLoss(per_image = False), 
num_val_samples = len(df_val)
train_batch_size = BATCH_SIZE
val_batch_size = BATCH_SIZE

# determine numtrain steps
train_steps = np.ceil(num_train_samples / train_batch_size)
# determine num val steps
val_steps = np.ceil(num_val_samples / val_batch_size)

# Initialize the generators
train_gen = train_generator(batch_size=BATCH_SIZE)
val_gen = val_generator(batch_size=BATCH_SIZE)

model.compile(
    Adam(lr=0.0001),
    loss=dice_loss,
    metrics=[iou_score],
)



filepath = "model.h5"

earlystopper = EarlyStopping(patience=5, verbose=1)

checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, 
                             save_best_only=True, mode='min')

reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=2, 
                                   verbose=1, mode='min')
def build_model(BACKBONE, Config, encoder_weights=None, freeze_encoder=False, inference=False):
    from segmentation_models import Unet

    if Config.num_classes == 1:
        activation = 'sigmoid'
    else:
        activation = 'softmax'

    # define model
    model = Unet(BACKBONE, input_shape=Config.input_shape, classes=Config.num_classes, activation=activation, encoder_weights=encoder_weights, encoder_freeze=freeze_encoder)

    if Config.class_mode == 'liver_lesion_pyramid':
        out3 = model.layers[-1].output
        out2 = model.get_layer('decoder_stage3_relu2').output
        out2 = Conv2D(Config.num_classes, (1, 1), activation='sigmoid', name='pred2')(out2)
        out1 = model.get_layer('decoder_stage2_relu2').output
        out1 = Conv2D(Config.num_classes, (1, 1), activation='sigmoid', name='pred1')(out1)
        out0 = model.get_layer('decoder_stage1_relu2').output
        out0 = Conv2D(Config.num_classes, (1, 1), activation='sigmoid', name='pred0')(out0)

        model = Model(inputs=[model.input], outputs=[out0, out1, out2, out3])

    # Compile (Training mode)
    if inference:
        return model
    else:
        if Config.class_mode == 'liver' or Config.class_mode == 'lesion':
            loss = focal_tversky
            # optimizer = 'Adam'
            optimizer = SGD(lr=Config.init_lr, momentum=0.9, decay=1e-4, nesterov=False)
            metrics = [dice_coef]

        elif Config.class_mode == 'liver_lesion':
            loss = weighted_categorical_crossentropy(Config.weights)
            # loss = combined_dice_wp_crossentropy(Config.weights)
            optimizer = 'Adam'
            # optimizer = SGD(lr=Config.init_lr, momentum=0.9, decay=1e-4, nesterov=False)
            metrics = ['accuracy', dice_coef_liver, dice_coef_lesion]

        elif Config.class_mode == 'lesion_combined':
            loss1 = weighted_categorical_crossentropy(Config.weights)
            loss2 = dice_coef_loss
            # loss2 = tversky_loss
            loss ={'softmax_output': loss1, 'sigmoid_output': loss2}# loss_weights
            optimizer = 'Adam'
            metrics = {'softmax_output': [dice_coef_liver, dice_coef_lesion],
                        'sigmoid_output': [dice_coef, dice_coef_lesion]}
            model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
            return model

        elif Config.class_mode == 'liver_lesion_pyramid':
            loss = {'pred0': weighted_categorical_crossentropy(Config.weights),
                    'pred1': weighted_categorical_crossentropy(Config.weights),
                    'pred2': weighted_categorical_crossentropy(Config.weights),
                    'softmax': weighted_categorical_crossentropy(Config.weights)}

            optimizer = 'Adam'
            metrics = [dice_coef_lesion]

        else:
            ValueError(' Please define a valid class_mode in config: liver / lesion / liver_lesion / lesion_pyramid / liver_lesion_pyramid / lesion_combined')

        model.compile(optimizer=optimizer, loss=loss, metrics=metrics)

    return model
    def train_track3(self, net='unet', check_folder=params.CHECKPOINT_DIR):
        os.environ["CUDA_VISIBLE_DEVICES"] = params.GPUS
        if os.path.exists(check_folder) == 0:
            os.mkdir(check_folder)
        CHECKPOINT_DIR = check_folder
        CHECKPOINT_PATH = os.path.join(check_folder,
                                       'weights.{epoch:02d}.hdf5')

        data_folder = 'C:/TrainData/Track3/Train/patch_473/'
        img_train, dsm_train, lable_train, img_val, dsm_val, label_val = load_all_data_files(
            data_folder)

        num_training_sample = len(img_train)
        batch_size = 1
        n_batch_per_epoch = num_training_sample // batch_size

        num_val_sample = len(img_val)
        n_batch_per_epoch_val = num_val_sample // batch_size

        nb_epoch = 200
        NUM_CATEGORIES = 5
        train_generator = input_generator_RGBH(img_train, dsm_train,
                                               lable_train, batch_size)
        val_generator = input_generator_RGBH(img_val, dsm_val, label_val,
                                             batch_size)

        if net == 'psp':
            from segmentation_models import pspnet  #PSPNet
            model_name = 'pspnet101_cityscapes'
            input_shape = (473, 473, 9)
            model = pspnet.PSPNet101(nb_classes=NUM_CATEGORIES,
                                     input_shape=input_shape,
                                     weights=model_name)
            model = model.model
        elif net == 'unet':
            input_shape = [256, 256, 9]
            from keras.layers import Input
            input_tensor = Input(shape=(input_shape[0], input_shape[1],
                                        input_shape[2]))
            model = Unet(input_shape=input_shape,
                         input_tensor=input_tensor,
                         backbone_name=params.BACKBONE,
                         encoder_weights=None,
                         classes=2)

            model.load_weights(
                os.path.join('./checkpoint_track3-1/', 'weights.80.hdf5'))

        from keras.optimizers import Adam, SGD
        from keras.callbacks import ModelCheckpoint, CSVLogger
        #loss=params.SEMANTIC_LOSS
        #   loss=my_weighted_loss
        weights = np.array([1.0, 10.0, 10.0, 20., 30.])
        loss = weighted_categorical_crossentropy(weights)

        optimizer = Adam(lr=1E-3, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
        model.compile(optimizer, loss=loss)
        model.summary()
        csv_logger = CSVLogger(os.path.join(CHECKPOINT_DIR, 'train.csv'))

        checkpoint = ModelCheckpoint(filepath=CHECKPOINT_PATH,
                                     monitor='loss',
                                     verbose=1,
                                     save_best_only=False,
                                     save_weights_only=True,
                                     mode='auto',
                                     period=params.MODEL_SAVE_PERIOD)
        callbacks = [csv_logger, checkpoint]

        model.fit_generator(train_generator,
                            steps_per_epoch=n_batch_per_epoch,
                            validation_data=val_generator,
                            validation_steps=n_batch_per_epoch_val,
                            epochs=nb_epoch,
                            callbacks=callbacks)