def train():
    #load images
    images = []
    for image in os.listdir(im_path):
        imi = cv.imread(os.path.join(im_path, image))
        images.append(imi)

    #load masks
    masks = []
    for mask in os.listdir(mask_path):
        mask_in = cv.imread(os.path.join(mask_path, mask), 0)
        ret_val, threshed_mask = cv.threshold(mask_in, 37, 1, cv.THRESH_BINARY)
        masks.append(threshed_mask)

    model = Unet('resnet34',
                 encoder_weights='imagenet',
                 input_shape=(128, 128, 3))
    model.compile('Adam',
                  loss=bce_jaccard_loss,
                  metrics=[iou_score, 'accuracy'])
    model.summary()
    hist = model.fit(x=np.array(images).reshape(-1, 128, 128, 3),
                     y=np.array(masks).reshape(-1, 128, 128, 1),
                     batch_size=10,
                     epochs=15)

    #save model
    filename = 'trained_model.h5'
    model.save(filename, include_optimizer=False)
Example #2
0
 def U_net(self):
     # Build U-Net model
     transfer_model = Unet(backbone_name=backbone, input_shape=(None, None, 3), classes=1,
                           activation='relu', encoder_weights='imagenet', encoder_freeze=True)
     transfer_model.compile(optimizer='Adam', loss='binary_crossentropy', metrics=[self.mean_iou])
     transfer_model.load_weights(self.modelPath)
     transfer_model.summary()
     return transfer_model
Example #3
0
def get_model(net_name,
              num_class,
              weight_path,
              input_shape=[],
              weighted_loss=False):
    number_class = num_class
    if net_name == 'psp':
        model_name = 'pspnet101_cityscapes'
        input_shape = (473, 473, 3)
        model = pspnet.PSPNet101(nb_classes=num_class,
                                 input_shape=input_shape,
                                 weights=model_name)
        model = model.model
    elif net_name == 'psp_50':
        input_shape = (473, 473, 3)
        model_name = 'pspnet50_ade20k'
        #output_mode='sigmoid'
        model = pspnet.PSPNet50(nb_classes=num_class,
                                input_shape=input_shape,
                                weights=model_name)
        model = model.model

    elif net_name[-1:] == 'c':
        if net_name == 'unet_rgbh_c' or net_name == 'unet_rgbc_c':
            if len(input_shape) < 3:
                input_shape = [512, 512, 4]
        elif net_name == 'unet_rgb_c':
            if len(input_shape) < 3:
                input_shape = [512, 512, 3]
        elif net_name == 'unet_msi_c':
            if len(input_shape) < 3:
                input_shape = [512, 512, 3]
        elif net_name == 'unet_msih_c' or net_name == 'unet_msic_c':
            if len(input_shape) < 3:
                input_shape = [512, 512, 9]
        from keras.layers import Input
        input_tensor = Input(shape=(input_shape[0], input_shape[1],
                                    input_shape[2]))
        model = Unet(input_shape=input_shape,
                     input_tensor=input_tensor,
                     backbone_name=params.BACKBONE,
                     encoder_weights=None,
                     classes=num_class)
    if weighted_loss:
        loss = my_class_weighted_loss
    else:
        loss = params.SEMANTIC_LOSS
    lr = params.LEARN_RATE
    optimizer = Adam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
    if (len(weight_path) > 2):
        model.load_weights(weight_path, True)
        print('use pre-trained weights', weight_path)
    model.compile(optimizer, loss=loss, metrics=[categorical_accuracy])

    model.summary()
    return model, input_shape
Example #4
0
def main():

    train_datagen = ImageDataGenerator(rescale=1 / 255)
    train_batches = train_datagen.flow_from_directory(DATASET_PATH,
                                                      target_size=(1024, 1024),
                                                      shuffle=True,
                                                      class_mode=None,
                                                      batch_size=BATCH_SIZE)

    valid_datagen = ImageDataGenerator(rescale=1 / 255)
    valid_batches = valid_datagen.flow_from_directory(DATASET_PATH,
                                                      target_size=(1024, 1024),
                                                      shuffle=False,
                                                      class_mode=None,
                                                      batch_size=BATCH_SIZE)

    train_crops = crop_generator(train_batches, CROP_LENGTH)  #224
    valid_crops = crop_generator(valid_batches, CROP_LENGTH)

    batch_x_random_crop, batch_y_targeted_crop = next(train_crops)
    valid_x, valid_y = next(valid_crops)

    in_painted_x = in_painting_mask(batch_x_random_crop, batch_y_targeted_crop)
    valid_in_x = in_painting_mask(valid_x, valid_y)

    batch_x_random_crop = rgb2gray(batch_x_random_crop)
    batch_x_random_crop = np.reshape(
        batch_x_random_crop, (batch_x_random_crop.shape[0], 224, 224, 1))

    valid_x = rgb2gray(valid_x)
    valid_x = np.reshape(valid_x, (valid_x.shape[0], 224, 224, 1))

    model = Unet(backbone_name='resnet18',
                 encoder_weights='imagenet',
                 decoder_block_type='transpose')  # build U-Net
    model.compile(optimizer='Adam', loss='mean_squared_error')
    model.summary()
    model.fit(x=in_painted_x,
              y=batch_x_random_crop,
              validation_data=(valid_in_x, valid_x),
              validation_steps=5,
              steps_per_epoch=5,
              epochs=1)
Example #5
0
def main():
    with open('/home/rbuddhad/NIH-XRAY/test_sml.txt') as f1:
        lines1 = f1.readlines()

    test_datagen = ImageDataGenerator()
    test_batches = test_datagen.flow_from_directory(TEST_DATASET_PATH,
                                                    target_size=(1024, 1024),
                                                    shuffle=True,
                                                    class_mode=None,
                                                    batch_size=BATCH_SIZE)

    test_crops_orig = crop_generator(test_batches, CROP_LENGTH, lines1)  # 224

    model = Unet(backbone_name='resnet18', encoder_weights=None)
    model.load_weights('best_model1.h5')
    model.compile(optimizer='Adam',
                  loss='mean_squared_error',
                  metrics=['mae', 'mean_squared_error'])
    model.summary()

    # callbacks = [EarlyStopping(monitor='val_loss', patience=10),
    #              ModelCheckpoint(filepath='best_model1.h5', monitor='val_loss', save_best_only=True),
    #              TensorBoard(log_dir='./logs', histogram_freq=0, write_graph=True, write_images=True)]
    # model.fit_generator(generator=test_crops_orig,
    #                     steps_per_epoch=100,
    #                     validation_data=valid_crops_orig,
    #                     callbacks=callbacks,
    #                     validation_steps=200,
    #                     epochs=1000,
    #                     shuffle=True)
    # model.predict(generator=test_crops_orig,
    #               steps=2,
    #               verbose=1)

    # model.save('unet2.h5')
    predict = model.predict_generator(generator=test_crops_orig,
                                      steps=1,
                                      verbose=1)
    # predict = model.predict()
    print(predict.shape, 'predict_batch_size')
    for i in range(50):
        plt.imshow(predict[i, :, :, 0], cmap='gray', vmin=0, vmax=1)
        plt.show()
def train(x_train: NpArray, x_valid: NpArray, y_train: NpArray, y_valid: NpArray,
          fold: int = -1) -> None:
    preprocessing_fn = get_preprocessing('resnet34')
    x_train = preprocessing_fn(x_train)
    x_valid = preprocessing_fn(x_valid)

    model = Unet(backbone_name='resnet34', encoder_weights='imagenet')
    model.compile('Adam', 'binary_crossentropy', metrics=[my_iou_metric])
    model.summary()

    model_name = make_output_path("models/fold%d.hdf5" % fold)
    model_checkpoint = ModelCheckpoint(model_name, monitor='val_my_iou_metric',
                                       mode='max', save_best_only=True, verbose=1)
    reduce_lr = ReduceLROnPlateau(monitor='val_my_iou_metric', mode='max',
                                  factor=0.5, patience=5, min_lr=3e-6, verbose=1)

    model.fit(x_train, y_train, validation_data=[x_valid, y_valid], epochs=EPOCHS,
              batch_size=BATCH_SIZE, callbacks=[model_checkpoint, reduce_lr],
              verbose=VERBOSE)
Example #7
0
def build_model():

    model = Unet(backbone_name='mobilenetv2',
                 input_shape=(224, 224, 3),
                 classes=1,
                 activation='sigmoid',
                 encoder_weights=weight_mobilenetv2_path,
                 encoder_freeze=True,
                 encoder_features='default',
                 decoder_block_type='upsampling',
                 decoder_filters=(256, 128, 64, 32, 16),
                 decoder_use_batchnorm=True)

    #model.compile(loss='binary_crossentropy', optimizer=RMSprop(lr=0.0001), metrics=['acc'])
    #model.compile(loss='binary_crossentropy', optimizer=SGD(lr=1e-4, momentum=0.9), metrics=['acc'])
    model.compile(loss='binary_crossentropy',
                  optimizer=Adam(lr=0.0001),
                  metrics=['acc'])
    model.summary()
    return model
def seg_model(preprocess_type, input_size, pretrained_weights, activation,
              loss):
    classes = 4 if activation == 'sigmoid' else 5

    model = Unet(preprocess_type,
                 encoder_weights='imagenet',
                 input_shape=input_size,
                 classes=classes,
                 activation=activation)

    adam = keras.optimizers.Adam(lr=1e-4)

    model.compile(optimizer=adam, loss=loss, metrics=[dice_coef])

    model.summary()

    if (pretrained_weights):
        model.load_weights(pretrained_weights)

    return model
    def get_model(self, net_name, input_shape, number_class, class_weight,
                  weight_path):
        from segmentation_models import pspnet  #PSPNet
        if net_name == 'psp':
            model_name = 'pspnet101_cityscapes'
            input_shape = (473, 473, 9)
            model = pspnet.PSPNet101(nb_classes=number_class,
                                     input_shape=input_shape,
                                     weights=model_name)
            model = model.model
        if net_name == 'psp_50':
            input_shape = (473, 473, 9)
            model_name = 'pspnet50_ade20k'
            #output_mode='sigmoid'
            pspnet = pspnet.PSPNet101(nb_classes=number_class,
                                      input_shape=input_shape,
                                      weights=model_name)
            model = model.model
        elif net_name == 'unet':
            input_shape = [256, 256, 9]
            from keras.layers import Input
            input_tensor = Input(shape=(input_shape[0], input_shape[1],
                                        input_shape[2]))
            model = Unet(input_shape=input_shape,
                         input_tensor=input_tensor,
                         backbone_name=params.BACKBONE,
                         encoder_weights=None,
                         classes=number_class)
        ##[1.0,10.0,10.0,20.,30.]
        weights = np.array(class_weight)
        #        loss = weighted_categorical_crossentropy(weights)
        loss = my_weighted_loss
        #        loss=params.SEMANTIC_LOSS
        optimizer = Adam(lr=1E-3, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
        if (len(weight_path) > 2):
            model.load_weights(weight_path)
        model.compile(optimizer, loss=loss)

        model.summary()
        return model
Example #10
0
def buildUNetModel(pathToSave,
                   width,
                   height,
                   outputChannelCount,
                   backbone='resnet50'):
    base_model = Unet(backbone,
                      input_shape=(width, height, 3),
                      classes=outputChannelCount,
                      activation='sigmoid',
                      freeze_encoder=True)
    base_model.summary()
    input = Input(shape=(None, None, 1))
    # This adapter layer map grayscale to RGB space since all these public models 3 channels only.
    adapter = Conv2D(3, (1, 1),
                     trainable=False,
                     name="AdapterLayer",
                     kernel_initializer="ones")(input)
    out = base_model(adapter)

    model = Model(input, out, name=base_model.name)

    model.save(pathToSave)
    return model
Example #11
0
app.config['CACHE_TYPE'] = 'null'
cache.init_app(app)

# Load model pretrained
print('Carregando o modelo')

model = Unet('resnet34')

graph = tf.get_default_graph()

model_filename = 'weights_model_IOU_DICE.h5'

model.load_weights(model_filename)

model.summary()


@app.route("/")
def index():
    return render_template('index.html')


@app.route("/", methods=['POST'])
def imagem_post():
    lat = request.form['lat']
    lon = request.form['lon']

    coor, contador = ML(lat, lon)
    url_tes = "./static/test.png?" + coor
    url_res = "./static/resultado.png?" + coor
    def train_track3(self, net='unet', check_folder=params.CHECKPOINT_DIR):
        os.environ["CUDA_VISIBLE_DEVICES"] = params.GPUS
        if os.path.exists(check_folder) == 0:
            os.mkdir(check_folder)
        CHECKPOINT_DIR = check_folder
        CHECKPOINT_PATH = os.path.join(check_folder,
                                       'weights.{epoch:02d}.hdf5')

        data_folder = 'C:/TrainData/Track3/Train/patch_473/'
        img_train, dsm_train, lable_train, img_val, dsm_val, label_val = load_all_data_files(
            data_folder)

        num_training_sample = len(img_train)
        batch_size = 1
        n_batch_per_epoch = num_training_sample // batch_size

        num_val_sample = len(img_val)
        n_batch_per_epoch_val = num_val_sample // batch_size

        nb_epoch = 200
        NUM_CATEGORIES = 5
        train_generator = input_generator_RGBH(img_train, dsm_train,
                                               lable_train, batch_size)
        val_generator = input_generator_RGBH(img_val, dsm_val, label_val,
                                             batch_size)

        if net == 'psp':
            from segmentation_models import pspnet  #PSPNet
            model_name = 'pspnet101_cityscapes'
            input_shape = (473, 473, 9)
            model = pspnet.PSPNet101(nb_classes=NUM_CATEGORIES,
                                     input_shape=input_shape,
                                     weights=model_name)
            model = model.model
        elif net == 'unet':
            input_shape = [256, 256, 9]
            from keras.layers import Input
            input_tensor = Input(shape=(input_shape[0], input_shape[1],
                                        input_shape[2]))
            model = Unet(input_shape=input_shape,
                         input_tensor=input_tensor,
                         backbone_name=params.BACKBONE,
                         encoder_weights=None,
                         classes=2)

            model.load_weights(
                os.path.join('./checkpoint_track3-1/', 'weights.80.hdf5'))

        from keras.optimizers import Adam, SGD
        from keras.callbacks import ModelCheckpoint, CSVLogger
        #loss=params.SEMANTIC_LOSS
        #   loss=my_weighted_loss
        weights = np.array([1.0, 10.0, 10.0, 20., 30.])
        loss = weighted_categorical_crossentropy(weights)

        optimizer = Adam(lr=1E-3, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
        model.compile(optimizer, loss=loss)
        model.summary()
        csv_logger = CSVLogger(os.path.join(CHECKPOINT_DIR, 'train.csv'))

        checkpoint = ModelCheckpoint(filepath=CHECKPOINT_PATH,
                                     monitor='loss',
                                     verbose=1,
                                     save_best_only=False,
                                     save_weights_only=True,
                                     mode='auto',
                                     period=params.MODEL_SAVE_PERIOD)
        callbacks = [csv_logger, checkpoint]

        model.fit_generator(train_generator,
                            steps_per_epoch=n_batch_per_epoch,
                            validation_data=val_generator,
                            validation_steps=n_batch_per_epoch_val,
                            epochs=nb_epoch,
                            callbacks=callbacks)
Example #13
0
    model = Xnet(backbone_name=config.backbone,
                 encoder_weights=config.weights,
                 decoder_block_type=config.decoder_block_type,
                 classes=config.nb_class,
                 activation=config.activation)
else:
    raise
model.compile(optimizer="Adam",
              loss=bce_dice_loss,
              metrics=["binary_crossentropy", mean_iou, dice_coef])

# plot_model(model, to_file=os.path.join(model_path, config.exp_name+".png"))
if os.path.exists(os.path.join(model_path, config.exp_name + ".txt")):
    os.remove(os.path.join(model_path, config.exp_name + ".txt"))
with open(os.path.join(model_path, config.exp_name + ".txt"), 'w') as fh:
    model.summary(positions=[.3, .55, .67, 1.],
                  print_fn=lambda x: fh.write(x + '\n'))

shutil.rmtree(os.path.join(logs_path, config.exp_name), ignore_errors=True)
if not os.path.exists(os.path.join(logs_path, config.exp_name)):
    os.makedirs(os.path.join(logs_path, config.exp_name))
tbCallBack = TensorBoard(
    log_dir=os.path.join(logs_path, config.exp_name),
    histogram_freq=0,
    write_graph=True,
    write_images=True,
)
tbCallBack.set_model(model)

early_stopping = keras.callbacks.EarlyStopping(
    monitor='val_loss',
    patience=config.patience,
Example #14
0
    return X_test

TEST_PATH = '../s2_data/data/test/'
X_test = Test2Npy(TEST_PATH)



backbone_name = 'efficientnetb3'
weight = '20201122-170215_Unet_efficientnetb3_model.h5'
model = Unet(backbone_name, classes=1, activation='sigmoid')

model_path ='../user_data/model/'  + weight
model.load_weights(model_path)

# model summary
print(model.summary(line_length=120))



TEST_MASK_PATH = '../prediction_result/images/'
predicted_test = model.predict(X_test)




# Save test mask
print('Get img name and path')
each_test_name = []
each_test_path = []

for i in tqdm(list({i.split('.')[0] for i in os.listdir(TEST_PATH)})):
    Unet:  from segmentation_models
    unet & unet_noskip :  implemented in models module, warning when calculating the ERF
    
'''
if (args.network == 'Unet'):

    m = Unet(classes=cl, input_shape=(256, 256, 3), activation='softmax')
#     m = get_unet()
elif (args.network == 'unet_noskip'):
    m = unet_noskip()
else:
    m = Unet('resnet18',
             classes=cl,
             input_shape=(256, 256, 3),
             activation='softmax')
m.summary()
'''Load data'''
train_x, train_y, val_x, val_y = load_data(frame_path, mask_path, 256, cl)
print('train_y.shape:', train_y.shape)
# val_y = np.eye(cl)[val_y]

NO_OF_TRAINING_IMAGES = train_x.shape[0]
NO_OF_VAL_IMAGES = val_x.shape[0]
print('train: val: test', NO_OF_TRAINING_IMAGES, NO_OF_VAL_IMAGES)
'''Data generator'''
#DATA AUGMENTATION
train_gen = trainGen(train_x, train_y, BATCH_SIZE)

#optimizer
if args.opt == 1:
    opt = Adam(lr=1e-4)
Example #16
0
def get_model(net_name, num_class, weight_path, input_shape=[]):
    from segmentation_models import pspnet  #PSPNet
    number_class = num_class

    if net_name == 'psp':
        model_name = 'pspnet101_cityscapes'
        input_shape = (473, 473, 3)
        model = pspnet.PSPNet101(nb_classes=number_class,
                                 input_shape=input_shape,
                                 weights=model_name)
        model = model.model
    elif net_name == 'psp_50':
        input_shape = (473, 473, 3)
        model_name = 'pspnet50_ade20k'
        #output_mode='sigmoid'
        model = pspnet.PSPNet50(nb_classes=number_class,
                                input_shape=input_shape,
                                weights=model_name)
        model = model.model

    elif net_name[-1:] == 'c':
        if net_name == 'unet_rgbh_c' or net_name == 'unet_rgbc_c':
            if len(input_shape) < 3:
                input_shape = [512, 512, 4]
        elif net_name == 'unet_rgb_c':
            if len(input_shape) < 3:
                input_shape = [512, 512, 3]
        elif net_name == 'unet_msi_c':
            if len(input_shape) < 3:
                input_shape = [512, 512, 3]
        elif net_name == 'unet_msih_c' or net_name == 'unet_msic_c':
            if len(input_shape) < 3:
                input_shape = [512, 512, 9]
        from keras.layers import Input
        input_tensor = Input(shape=(input_shape[0], input_shape[1],
                                    input_shape[2]))
        model = Unet(input_shape=input_shape,
                     input_tensor=input_tensor,
                     backbone_name=params.BACKBONE,
                     encoder_weights=None,
                     classes=number_class)
    elif net_name[-1:] == 'h':
        if net_name == 'unet_rgbh_h' or net_name == 'unet_rgbc_h':
            if len(input_shape) < 3:
                input_shape = [512, 512, 4]
        elif net_name == 'unet_rgb_h':
            if len(input_shape) < 3:
                input_shape = [512, 512, 3]
        elif net_name == 'unet_msi_h' or net_name == 'unet_msi_c':
            if len(input_shape) < 3:
                input_shape = [512, 512, 8]
        elif net_name == 'unet_msih_h' or net_name == 'unet_msic_h':
            if len(input_shape) < 3:
                input_shape = [512, 512, 9]
        from keras.layers import Input
        input_tensor = Input(shape=(input_shape[0], input_shape[1],
                                    input_shape[2]))
        model = UnetRegressor(input_shape=input_shape,
                              input_tensor=input_tensor,
                              backbone_name=params.BACKBONE)

    if net_name[-1:] == 'h':
        loss = no_nan_mse_evenloss
    elif number_class == 2:
        loss = my_weighted_loss
    elif number_class == 5:
        loss = my_weighted_loss_5_classes
    elif number_class == 3:
        loss = my_weighted_loss_3_classes
        #loss='categorical_crossentropy'
        #loss=my_tf_balanced_loss
    optimizer = Adam(lr=1E-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
    if (len(weight_path) > 2):
        model.load_weights(weight_path)
        print('use pre-trained weights', weight_path)
    model.compile(optimizer, loss=loss)

    model.summary()
    return model, input_shape
Example #17
0
    def train(self, optimizer, loss, metrics, epoch=10, restore=False):
        self.initialize(restore)

        if restore:
            set_custom_object(loss, metrics)
        else:
            size = "{:}*{:}".format(cfgs.train_size_ny, cfgs.train_size_nx)
            detail_writer(cfgs.base_model_path, self.backbone_name, optimizer,
                          loss, size, cfgs.divide_stride, metrics)

        model = Unet(backbone_name=self.backbone_name,
                     input_shape=(cfgs.train_size_ny, cfgs.train_size_nx, 1),
                     freeze_encoder=False,
                     decoder_use_batchnorm=True,
                     classes=cfgs.n_classes,
                     activation="sigmoid",
                     encoder_weights=None)

        model.compile(optimizer=optimizer, loss=loss, metrics=metrics)

        if restore:
            last_model = all_path(cfgs.last_model_path + "*")

            self.restore_from_epoch, path = find_best_path(last_model)
            if path != "":
                epoch = epoch + self.restore_from_epoch
                model.load_weights(path)

        model.summary()

        last_model = ModelCheckpoint(filepath=cfgs.last_model_path +
                                     cfgs.base_model_name,
                                     monitor="val_" + cfgs.save_model_monitor,
                                     verbose=1,
                                     save_best_only=False,
                                     save_weights_only=True,
                                     mode='max')

        best_model = ModelCheckpoint(filepath=cfgs.best_model_path +
                                     cfgs.base_model_name,
                                     monitor="val_" + cfgs.save_model_monitor,
                                     verbose=1,
                                     save_best_only=True,
                                     save_weights_only=False,
                                     mode='max')

        csv_logger = CSVLogger(cfgs.base_model_path + 'train_history.csv',
                               append=restore,
                               separator="/")

        pred_each_epoch = pc.Histories(cfgs, with_gt=True)

        time = datetime.datetime.now().strftime('%Y-%m-%d_%I-%M-%p')

        tb = TensorBoard(log_dir=cfgs.outpath_tensorboard + "{:}".format(time))

        if cfgs.lr_schedule_mode is False:
            callbacks_list = [
                best_model, last_model, csv_logger, pred_each_epoch, tb
            ]
        else:
            lr_decay = LearningRateScheduler(lr_scheduler)
            callbacks_list = [
                best_model, last_model, csv_logger, pred_each_epoch, tb,
                lr_decay
            ]

        model.fit_generator(generator=self.train_gen,
                            validation_data=self.val_gen,
                            steps_per_epoch=self.train_gen.__len__(),
                            epochs=epoch,
                            initial_epoch=self.restore_from_epoch,
                            callbacks=callbacks_list,
                            workers=4)

        return model
def generate_compiled_segmentation_model(
        model_name,
        model_parameters,
        num_classes,
        loss,
        optimizer,
        weights_to_load=None,
        optimizing_threshold_class_metric=None,
        optimizing_class_id=None,
        optimizing_input_threshold=None,
        optimized_class_thresholds=None):

    # These are the only model, loss, and optimizer currently supported
    assert model_name == 'Unet'
    assert loss == 'cross_entropy'
    assert optimizer == 'adam'

    loss_fn = BinaryCrossentropyL()

    all_metrics = [
    ]  # one-hot versions are generally preferred for given metric
    # make first metric a copy of loss, to continually verify `val_loss` is correct
    if isinstance(loss_fn, BinaryCrossentropyL):
        all_metrics.append(BinaryCrossentropyM(name='binary_ce_metric'))
    else:
        all_metrics.append(CategoricalCrossentropyM(name='categ_ce_metric'))

    # standard thresholded version (default threshold is 0.5) also kept below, in case it's desired in certain scenario
    for class_num in range(num_classes + 1):
        if class_num == 0 and optimizing_threshold_class_metric is None:  # all class metrics
            # note, `loss_fn` for all classes placed before `all_metrics` in lineup of command window metrics and plots
            if not isinstance(loss_fn, BinaryCrossentropyL):
                all_metrics.extend([CategoricalCELoss()])
                all_metrics[1].name = str('categ_cross_entropy_sm')
            all_metrics.extend([
                AccuracyTfKeras(),
                # OneHotAccuracyTfKeras(),  # `global_threshold` built-in
                ClassBinaryAccuracyTfKeras(thresholds=global_threshold),
                # OneHotClassBinaryAccuracyTfKeras(thresholds=global_threshold),
                ClassBinaryAccuracySM(threshold=global_threshold),
                # OneHotClassBinaryAccuracySM(threshold=global_threshold),
                BinaryAccuracy(threshold=global_threshold),
                CategoricalAccuracy(),
                FalseNegatives(name='false_neg', thresholds=global_threshold),
                # OneHotFalseNegatives(name='false_neg_1H', thresholds=global_threshold),
                TrueNegatives(name='true_neg', thresholds=global_threshold),
                # OneHotTrueNegatives(name='true_neg_1H', thresholds=global_threshold),
                FalsePositives(name='false_pos', thresholds=global_threshold),
                # OneHotFalsePositives(name='false_pos_1H', thresholds=global_threshold),
                TruePositives(name='true_pos', thresholds=global_threshold),
                # OneHotTruePositives(name='true_pos_1H', thresholds=global_threshold),
                Recall(name='recall', thresholds=global_threshold),
                # OneHotRecall(name='recall_1H', thresholds=global_threshold),
                Precision(name='precision', thresholds=global_threshold),
                # OneHotPrecision(name='precision_1H', thresholds=global_threshold),
                FBetaScore(name='f1_score',
                           beta=1,
                           thresholds=global_threshold),
                # OneHotFBetaScore(name='f1_score_1H', beta=1, thresholds=global_threshold),
                IoUScore(name='iou_score', thresholds=global_threshold),
                # OneHotIoUScore(name='iou_score_1H', thresholds=global_threshold)
            ])
        elif class_num == 0 and optimizing_threshold_class_metric is not None:  # all class metrics
            continue
        else:  # per class metrics
            if optimizing_threshold_class_metric is not None:
                class_threshold = optimizing_input_threshold
                class_num = optimizing_class_id + 1
            elif optimized_class_thresholds is None:
                class_threshold = global_threshold
            else:
                class_threshold = optimized_class_thresholds[str(
                    'class' + str(class_num - 1))]

            all_metrics.append(CategoricalCELoss(class_indexes=class_num - 1))
            all_metrics[-1].name = str('class' + str(class_num - 1) +
                                       '_binary_cross_entropy')
            all_metrics.append(
                ClassBinaryAccuracySM(name=str('class' + str(class_num - 1) +
                                               '_binary_accuracy_sm'),
                                      class_indexes=class_num - 1,
                                      threshold=class_threshold))
            all_metrics.append(
                ClassBinaryAccuracyTfKeras(
                    name=str('class' + str(class_num - 1) +
                             '_binary_accuracy_tfkeras'),
                    class_id=class_num - 1,
                    thresholds=class_threshold))
            all_metrics.append(
                IoUScore(name=str('class' + str(class_num - 1) + '_iou_score'),
                         class_id=class_num - 1,
                         thresholds=class_threshold))
            all_metrics.append(
                FBetaScore(name=str('class' + str(class_num - 1) +
                                    '_f1_score'),
                           class_id=class_num - 1,
                           beta=1,
                           thresholds=class_threshold))
            all_metrics.append(
                Precision(name=str('class' + str(class_num - 1) +
                                   '_precision'),
                          class_id=class_num - 1,
                          thresholds=class_threshold))
            all_metrics.append(
                Recall(name=str('class' + str(class_num - 1) + '_recall'),
                       class_id=class_num - 1,
                       thresholds=class_threshold))

            if optimizing_threshold_class_metric is not None:
                break

        if num_classes == 1:
            break

    # strategy = tf.distribute.MirroredStrategy()
    # with strategy.scope():
    model = Unet(input_shape=(None, None, 1),
                 classes=num_classes,
                 **model_parameters)
    model.compile(optimizer=Adam(), loss=loss_fn, metrics=all_metrics)

    if weights_to_load:
        model.load_weights(weights_to_load)

    if optimizing_threshold_class_metric is None:
        print(model.summary())

    return model
Example #19
0
train_gen, valid_gen = utils.preproc_data_with_masks(BATCH_SIZE, TARGET_SIZE)

# If loading actual numpy arrays, need:
# x_val = preprocess_input(x_val)

# define model
model = Unet(
    BACKBONE,
    encoder_weights='imagenet',
    classes=1,
    activation='sigmoid',
    encoder_freeze=True,
)
model.compile('Adam', loss=bce_jaccard_loss, metrics=[iou_score])
# model.compile('Adadelta', loss='binary_crossentropy')
print(model.summary())

callbacks = [
    ModelCheckpoint('model_weights.h5', monitor='val_loss', save_best_only=True, verbose=0)
]

# fit model
model.fit_generator(
    train_gen,
    steps_per_epoch=80,
    epochs=50,
    callbacks=callbacks,
    validation_data=valid_gen,
)
model.save("unet.h5")
    img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True)
    X_test[n] = img

print('Done importing images')

#Define IoU metric
def mean_iou(y_true, y_pred):
    prec = []
    for t in np.arange(0.5, 1.0, 0.05):
        y_pred_ = tf.to_int32(y_pred > t)
        score, up_opt = tf.metrics.mean_iou(y_true, y_pred_, 2)
        K.get_session().run(tf.local_variables_initializer())
        with tf.control_dependencies([up_opt]):
            score = tf.identity(score)
        prec.append(score)
    return K.mean(K.stack(prec), axis=0)

# Build U-Net model (transfer model version, must match model in transfer_test_loop.py)
logdir="logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard = TensorBoard(log_dir=logdir)
transfer_model = Unet(backbone_name=backbone, input_shape=(None,None,3), classes=1,
             activation='relu', encoder_weights='imagenet', encoder_freeze=True)
transfer_model.compile(optimizer='Adam', loss='binary_crossentropy', metrics=[mean_iou])
transfer_model.summary()
earlystopper = EarlyStopping(patience=patience, verbose=1)
checkpointer = ModelCheckpoint((WorkingDir + '/' + model_name), verbose=1, save_best_only=True)
results = transfer_model.fit(X_train, Y_train, validation_split=val_split, batch_size=batch, epochs=epochs, callbacks=[earlystopper, checkpointer, tensorboard])

# Evaluate how well training went in tensorboard
%load_ext tensorboard
%tensorboard --logdir logs --host=127.0.0.1
Example #21
0
def train(backbone,
          load_pretrain,
          data_path,
          split_path,
          save_path,
          n_split=5,
          seed=960630,
          batch_size=4,
          fold=0):

    # split by all data
    get_train_val_split(data_path=data_path + 'image_set/',
                        save_path=split_path,
                        n_splits=n_split,
                        seed=seed)

    # split by folders
    # get_train_val_split(data_path=data_path+'images/',
    #                     save_path=split_path,
    #                     n_splits=n_split,
    #                     seed=seed)

    if load_pretrain is not None:
        model = load_model(load_pretrain, compile=False)
    elif backbone is not None:
        model = Unet(backbone, classes=1, encoder_weights='imagenet')
    else:
        model = Unet(classes=1, encoder_weights='imagenet')

    model.compile('Adam', loss=bce_jaccard_loss, metrics=[iou_score])
    model.summary()

    # split by all images
    train_data = Carotid_DataGenerator(
        df_path=split_path +
        'split/train_fold_{}_seed_{}.csv'.format(fold, seed),
        image_path=data_path + 'image_set/',
        mask_path=data_path + '/mask_set/',
        batch_size=batch_size,
        target_shape=(512, 512),
        augmentation=True,
        shuffle=False)
    val_data = Carotid_DataGenerator(
        df_path=split_path +
        'split/val_fold_{}_seed_{}.csv'.format(fold, seed),
        image_path=data_path + 'image_set/',
        mask_path=data_path + '/mask_set/',
        batch_size=batch_size,
        target_shape=(512, 512),
        augmentation=True,
        shuffle=False)

    # split by folder
    # train_data = Carotid_DataGenerator(
    #     df_path=split_path+'split/train_fold_{}_seed_{}.csv'.format(fold, seed),
    #     image_path=data_path + 'images/',
    #     mask_path=data_path + '/masks/',
    #     batch_size=batch_size,
    #     target_shape=(512, 512),
    #     augmentation=True,
    #     shuffle=False)
    # val_data = Carotid_DataGenerator(
    #     df_path=split_path + 'split/val_fold_{}_seed_{}.csv'.format(fold, seed),
    #     image_path=data_path + 'images/',
    #     mask_path=data_path + '/masks/',
    #     batch_size=batch_size,
    #     target_shape=(512, 512),
    #     augmentation=True,
    #     shuffle=False)

    callbacks = [
        EarlyStopping(monitor='val_loss',
                      patience=8,
                      verbose=1,
                      min_delta=1e-4),
        ReduceLROnPlateau(monitor='val_loss',
                          factor=0.1,
                          patience=4,
                          verbose=1,
                          epsilon=1e-4),
        ModelCheckpoint(monitor='val_loss',
                        filepath=save_path,
                        verbose=True,
                        save_best_only=True)
    ]

    model.fit_generator(train_data,
                        validation_data=val_data,
                        epochs=10,
                        callbacks=callbacks,
                        verbose=1)
Example #22
0
def main():
    # with open('/home/kunal/Desktop/Feature-Learning-for-Disease-Classification/temp_patch.txt') as f:
    # 	lines = f.readlines()

    with open('/home/rbuddhad/NIH-XRAY/train_sml.txt') as f1:
        lines1 = f1.readlines()

    with open('/home/rbuddhad/NIH-XRAY/validation_sml.txt') as f2:
        lines2 = f2.readlines()

    # print((lines1))

    train_datagen = ImageDataGenerator()
    train_batches = train_datagen.flow_from_directory(TRAIN_DATASET_PATH,
                                                      target_size=(1024, 1024),
                                                      shuffle=True,
                                                      class_mode=None,
                                                      batch_size=BATCH_SIZE)

    valid_datagen = ImageDataGenerator()
    valid_batches = valid_datagen.flow_from_directory(VALID_DATASET_PATH,
                                                      target_size=(1024, 1024),
                                                      shuffle=False,
                                                      class_mode=None,
                                                      batch_size=BATCH_SIZE)

    train_crops_orig = crop_generator(train_batches, CROP_LENGTH,
                                      lines1)  # 224
    valid_crops_orig = crop_generator(valid_batches, CROP_LENGTH, lines2)

    # batch_x_random_crop, batch_y_targeted_crop = next(train_crops)
    # valid_x, valid_y = next(valid_crops)
    # print(train_crops_orig.shape)
    # train_crops_orig=np.reshape(train_crops_orig,(train_crops_orig.shape[0]*train_crops_orig.shape[1],224,224,3))
    # print(train_crops_orig.shape)
    # in_painted_x= out_painting_mask(train_crops_orig)
    # valid_in_x=in_painting_mask(valid_x,valid_y)

    # train_crops_1_ch=rgb2gray(train_crops_orig)
    # train_crops_1_ch=np.reshape(train_crops_1_ch,(train_crops_1_ch.shape[0],224,224,1))

    # valid_x=rgb2gray(valid_x)
    # valid_x=np.reshape(valid_x,(valid_x.shape[0],224,224,1))

    # model = Unet(backbone_name='resnet18', encoder_weights='imagenet', decoder_block_type='transpose') # build U-Net
    model = Unet(backbone_name='resnet18', encoder_weights=None)  # build U-Net
    model.load_weights('best_model.h5')
    model.compile(optimizer='Adam', loss='mean_squared_error')
    model.summary()
    # print('inpaited',in_painted_x.shape)
    # print('1 channel y',train_crops_1_ch.shape)
    # print(in_painted_x.shape)
    # print(train_crops_1_ch.shape)

    callbacks = [
        EarlyStopping(monitor='val_loss', patience=70),
        ModelCheckpoint(filepath='best_model70_withgray_finetuned.h5',
                        monitor='val_loss',
                        save_best_only=True),
        TensorBoard(log_dir='./logs',
                    histogram_freq=0,
                    write_graph=True,
                    write_images=True)
    ]
    model.fit_generator(generator=train_crops_orig,
                        steps_per_epoch=100,
                        validation_data=valid_crops_orig,
                        callbacks=callbacks,
                        validation_steps=200,
                        epochs=300)
    model.save('outpaint70_withgray_finetuned.h5')
Example #23
0
    def workflow(self):
        # define model
        model = Unet(backbone_name='resnet50', encoder_weights='imagenet')
        adam = keras.optimizers.Adam(lr=self.cfgs["LEARNING_RATE"])
        model.summary()
        # model.compile('Adam', sigmoid_cross_entropy_balanced)
        model.compile(
            'Adam',
            # cross_entropy_balanced
            loss=self.define_loss(),
            # 'binary_crossentropy'
        )

        test_images, test_ulabels, test_elabels, test_rlabels, filelist = self.dl.get_test_data(
        )

        if self.cfgs["RESTORE"]:
            model.load_weights(
                os.path.join(self.cfgs["SAVE_DIR"], "weights", "epoch150.h5"))
            print("RETORE SUCCESSFULLY!")

        callback = TensorBoard('./graph')
        callback.set_model(model)
        train_names = [
            'loss', 'u_outputs_sig_loss', 'e_fuse_sig_loss', 'r_fuse_sig_loss',
            'fuse_dir_loss'
        ]

        current_learning_rate = self.cfgs["LEARNING_RATE"]
        K.set_value(model.optimizer.lr, current_learning_rate)
        for i in range(self.cfgs["EPOCH"]):
            print("[I] EPOCH {}".format(i))
            # TRAIN
            for j in tqdm(range(self.cfgs["STEP"])):
                images_batch, ulabels_batch, elabels_batch, rlabels_batch, d_labels_batch = self.dl.next_batch(
                    "train")
                Logs = model.train_on_batch(
                    images_batch,
                    self.define_train_y(ulabels_batch, elabels_batch,
                                        rlabels_batch, d_labels_batch),
                )

            write_log(callback, train_names, Logs, i)
            if i % self.cfgs["INTERVAL"] == 0 and i >= 0:

                # TEST:
                results = model.predict(test_images, batch_size=10, verbose=0)
                logits = results[-1]
                r_logits = results[-2]

                # result analyse and show
                rlt_worker = ResultManager(i, logits, test_ulabels)
                # r_analyst.compute_roc(savename='roc_vegas_{}.csv'.format(i))
                # rlt_worker_r = ResultManager(i, r_logits, test_rlabels)

                rlt_worker.run()
                # rlt_worker_r.run()

                for ii in range(results[0].shape[0]):
                    #                     cv2.imwrite(os.path.join(self.cfgs["SAVE_DIR"], 'main_outputs/images/{}'.format(filelist[ii][0])),
                    #                                 test_images[ii, :] * 255)
                    #                     cv2.imwrite(os.path.join(self.cfgs["SAVE_DIR"], 'main_outputs/labels/{}'.format(filelist[ii][1])),
                    #                                 test_ulabels[ii, :] * 255)
                    #                     cv2.imwrite(os.path.join(self.cfgs["SAVE_DIR"], 'main_outputs/labels_e/{}'.format(filelist[ii][1])),
                    #                                 test_elabels[ii, :] * 255)
                    #                     cv2.imwrite(os.path.join(self.cfgs["SAVE_DIR"], 'main_outputs/labels_r/{}'.format(filelist[ii][1])),
                    #                                 test_rlabels[ii, :] * 255)

                    #cv2.imwrite(os.path.join(self.cfgs["SAVE_DIR"], 'main_outputs/preds/{}'.format(filelist[ii][1])),
                    #           results[-1][ii, :])
                    pred_threshold = threshold(results[-1][ii, :])
                    cv2.imwrite(
                        os.path.join(
                            self.cfgs["SAVE_DIR"],
                            'main_outputs/preds_threshold/{}'.format(
                                filelist[ii][1])), pred_threshold * 255)


#                     cv2.imwrite(os.path.join(self.cfgs["SAVE_DIR"], 'main_outputs/out_e1/{}'.format(filelist[ii][1])),
#                                 threshold(results[1][ii, :]) * 255)
#                     cv2.imwrite(os.path.join(self.cfgs["SAVE_DIR"], 'main_outputs/out_e2/{}'.format(filelist[ii][1])),
#                                 threshold(results[2][ii, :]) * 255)
#                     cv2.imwrite(os.path.join(self.cfgs["SAVE_DIR"], 'main_outputs/out_e3/{}'.format(filelist[ii][1])),
#                                 threshold(results[3][ii, :]) * 255)
#                     cv2.imwrite(os.path.join(self.cfgs["SAVE_DIR"], 'main_outputs/out_e4/{}'.format(filelist[ii][1])),
#                                 threshold(results[4][ii, :]) * 255)
#                     cv2.imwrite(os.path.join(self.cfgs["SAVE_DIR"], 'main_outputs/out_e5/{}'.format(filelist[ii][1])),
#                                 threshold(results[5][ii, :]) * 255)
#                     cv2.imwrite(os.path.join(self.cfgs["SAVE_DIR"], 'main_outputs/out_r1/{}'.format(filelist[ii][1])),
#                                 threshold(results[6][ii, :]) * 255)
#                     cv2.imwrite(os.path.join(self.cfgs["SAVE_DIR"], 'main_outputs/out_r2/{}'.format(filelist[ii][1])),
#                                 threshold(results[7][ii, :]) * 255)
#                     cv2.imwrite(os.path.join(self.cfgs["SAVE_DIR"], 'main_outputs/out_r3/{}'.format(filelist[ii][1])),
#                                 threshold(results[8][ii, :]) * 255)
#                     cv2.imwrite(os.path.join(self.cfgs["SAVE_DIR"], 'main_outputs/out_r4/{}'.format(filelist[ii][1])),
#                                 threshold(results[9][ii, :]) * 255)
#                     cv2.imwrite(os.path.join(self.cfgs["SAVE_DIR"], 'main_outputs/out_r5/{}'.format(filelist[ii][1])),
#                                 threshold(results[10][ii, :]) * 255)

# SAVE WEIGHTS
                current_learning_rate = current_learning_rate * self.cfgs[
                    "LEARNING_RATE_DECAY"]
                K.set_value(model.optimizer.lr, current_learning_rate)
                print('[I] Current Learning Rate: ', current_learning_rate)
                model_json = model.to_json()
                with open("model.json", "w") as json_file:
                    json_file.write(model_json)
                model.save_weights(
                    os.path.join(self.cfgs["SAVE_DIR"],
                                 "epoch{}.h5".format(i)))