예제 #1
0
#    epoch_log=epoch_log,
#    x_test=x_test,
#    on_epoch_end=lambda epoch, logs, epoch_log, x_test: eval_epoch(epoch_log, x_test, epoch, logs),
#    on_train_end=lambda logs: epoch_log.close()
#)


#def eval_epoch(epoch_log, x_test, epoch, logs):
#    """ Write loss to a log and predict on sample images """
#    epoch_log.write(
#        json.dumps({'epoch': epoch, 'loss': logs['loss']}) + '\n')
#    cv2.imwrite("../training_visualisations/test_epoch_{}.png".format(epoch), model.predict(x_test[1]))


# Define model
model = sm.Unet(BACKBONE, encoder_weights='imagenet', input_shape=(None, None, img_n_channels))
model.compile('Adam', loss=sm.losses.bce_jaccard_loss, metrics=[sm.metrics.iou_score])


print("fitting model...")
# fit model
model.fit_generator(
    generator=training_generator,
    epochs=200,
    steps_per_epoch=50,
    validation_data=validation_generator,
    validation_steps=10,
    callbacks=[PredOnEpochEnd(logs_dir, x_train=train_sample, x_test=test_sample,
                              pred_path=train_vis_dir, run_id=run_id), model_save_checkpoint],
    use_multiprocessing=True
)
import keras, cv2
import pandas as pd
from tta_wrapper import tta_segmentation
import segmentation_models as sm
from data import DataGenerator
from utils import post_process, mask2rle

sub_df = pd.read_csv('../data/sample_submission.csv')
sub_df['ImageId'] = sub_df['Image_Label'].apply(lambda x: x.split('_')[0])
test_imgs = pd.DataFrame(sub_df['ImageId'].unique(), columns=['ImageId'])

# # Predict on Test Set
model1 = sm.Unet('efficientnetb4',
                 classes=4,
                 input_shape=(320, 480, 3),
                 activation='sigmoid')
model1.load_weights('../efn_unet.h5')
model2 = sm.Unet('resnet18',
                 classes=4,
                 input_shape=(320, 480, 3),
                 activation='sigmoid')
model2.load_weights('../resnet18_unet.h5')
model3 = sm.Unet('densenet121',
                 classes=4,
                 input_shape=(320, 480, 3),
                 activation='sigmoid')
model3.load_weights('../dense_unet.h5')

model1 = tta_segmentation(model1, h_flip=True, h_shift=(-10, 10), merge='mean')
model2 = tta_segmentation(model2, h_flip=True, h_shift=(-10, 10), merge='mean')
model3 = tta_segmentation(model3, h_flip=True, h_shift=(-10, 10), merge='mean')
예제 #3
0
        n += 1
    return 'weights/best_weights_{}.hdf5'.format(n)


callbacks = [
    ModelCheckpoint(monitor='val_loss',
                    filepath=verify_model(),
                    save_best_only=True,
                    save_weights_only=True,
                    verbose=1),
    TensorBoard(log_dir='logs')
]

data_train = list_dir('input/train')
data_valid = list_dir('input/valid')

model = sm.Unet('resnet101', encoder_weights='imagenet')
model.compile(
    'Adam',
    loss=sm.losses.bce_jaccard_loss,
    metrics=[sm.metrics.iou_score],
)

model.fit(train_generator(),
          callbacks=callbacks,
          verbose=1,
          epochs=100,
          steps_per_epoch=np.ceil(float(len(data_train)) / float(16)),
          validation_data=valid_generator(),
          validation_steps=np.ceil(float(len(data_valid)) / float(16)))
########################################################################

BACKBONE1 = 'resnet34'
preprocess_input1 = sm.get_preprocessing(BACKBONE1)

# preprocess input
X_train1 = preprocess_input1(X_train)
X_test1 = preprocess_input1(X_test)
#####################################################################
###Model 1
#Using same backbone for both models

# define model (Change to unet or Linknet based on the need )
model1 = sm.Unet(BACKBONE1,
                 encoder_weights='imagenet',
                 classes=n_classes,
                 activation=activation)

# compile keras model with defined optimozer, loss and metrics
model1.compile(optim, total_loss, metrics=metrics)

#model1.compile(optimizer='adam', loss='categorical_crossentropy', metrics=metrics)

print(model1.summary())

start1 = datetime.now()

history1 = model1.fit(X_train1,
                      y_train_cat,
                      batch_size=8,
                      epochs=50,
예제 #5
0
if gpus:
    # Restrict TensorFlow to only use the first GPU
    try:
        tf.config.experimental.set_visible_devices(gpus[GPU_GLOBAL], 'GPU')
    except RuntimeError as e:
        # Visible devices must be set at program startup
        print(e)
print(gpus)

##################################################

BACKBONE = 'resnet34'
preprocess_input = sm.get_preprocessing(BACKBONE)

# define model
model = sm.Unet(BACKBONE, encoder_weights='imagenet')
model.compile(
    'Adam',
    loss=sm.losses.bce_jaccard_loss,
    metrics=[sm.metrics.iou_score],
)

inicio = time.time()

####### treinamento
model.fit(
    x=x_train,
    y=y_train,
    batch_size=BATCH_SIZE_GLOBAL,
    epochs=NUMERO_EPOCAS_GLOBAL,
    validation_data=(x_val, y_val),
예제 #6
0
! unzip Val.zip

! pip install -U segmentation-models

import segmentation_models as sm
import keras
from keras import optimizers
from keras.preprocessing.image import ImageDataGenerator
keras.backend.set_image_data_format('channels_last')
import os

!nvidia-smi

############ online learning ##########################
############ run the block at bottom first##########
model = sm.Unet('resnet34',classes=1, activation='sigmoid',input_shape=(None, None, 3))
# for layer in model.layers:
#   layer.trainable=True
#   layerName=str(layer.name)
#   if layerName.startswith("decoder") or layerName.startswith("Final_"):
#     layer.trainable=True
#   else: layer.trainable=False       #freeze/unfreeze the encoder

folder = 'kite-surf'
sgd = optimizers.SGD(lr=0.0001, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd,loss=sm.losses.bce_jaccard_loss,
  metrics=[sm.metrics.iou_score],)
load_shot(folder,'00')
train_generator, val_generator = val_loader(folder)
model.load_weights('drive/My Drive/65iou.h5') #load parent model
history = model.fit_generator(
예제 #7
0
def TheModel(batch_size_, restore):

    dataset = input_fn(batch_size_, datasettype='train')
    datasetval = input_fn(batch_size_, datasettype='validation')

    train_set = 'PASCAL'
    set_partition = 1
    if train_set == 'LIP':
        t_steps = int(set_partition * (30462 / batch_size_))
        v_steps = int(set_partition * (10000 / batch_size_))
    elif train_set == 'PASCAL':
        t_steps = int(set_partition * (1716 / batch_size_))
        v_steps = int(set_partition * (1817 / batch_size_))

    BACKBONE = 'efficientnetb3'
    CLASSES = ['background', 'torso', 'head', 'arms', 'hands', 'legs', 'feet']
    LR = 0.0001
    EPOCHS = 10

    # define network parameters
    n_classes = (len(CLASSES) + 1
                 )  # case for binary and multiclass segmentation
    activation = 'softmax'

    optim = k.optimizers.Adam(LR)
    #create model
    model = sm.Unet(BACKBONE, classes=n_classes, activation=activation)

    # Segmentation models losses can be combined together by '+' and scaled by integer or float factor
    # set class weights for dice_loss (car: 1.; pedestrian: 2.; background: 0.5;)
    #dice_loss = sm.losses.DiceLoss(class_weights=np.array([0.3, 1, 1, 1, 1, 1, 1]))
    #focal_loss = sm.losses.CategoricalFocalLoss()
    #total_loss = dice_loss + focal_loss
    total_loss = sm.losses.categorical_focal_dice_loss

    # actulally total_loss can be imported directly from library, above example just show you how to manipulate with losses
    # total_loss = sm.losses.binary_focal_dice_loss # or sm.losses.categorical_focal_dice_loss

    metrics = [
        sm.metrics.IOUScore(threshold=0.5),
        sm.metrics.FScore(threshold=0.5)
    ]

    # compile keras model with defined optimozer, loss and metrics
    model.compile(optim, total_loss, metrics)
    customcallbacks = [
        k.callbacks.ModelCheckpoint('./best_model.h5',
                                    save_weights_only=True,
                                    save_best_only=True,
                                    mode='min'),
        k.callbacks.ReduceLROnPlateau(),
    ]

    # train model
    history = model.fit_generator(
        dataset,
        steps_per_epoch=t_steps,
        epochs=EPOCHS,
        callbacks=customcallbacks,
        validation_data=datasetval,
        validation_steps=v_steps,
    )

    # Plot training & validation iou_score values
    plt.figure(figsize=(30, 5))
    plt.subplot(121)
    plt.plot(history.history['iou_score'])
    plt.plot(history.history['val_iou_score'])
    plt.title('Model iou_score')
    plt.ylabel('iou_score')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'Test'], loc='upper left')

    # Plot training & validation loss values
    plt.subplot(122)
    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.title('Model loss')
    plt.ylabel('Loss')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'Test'], loc='upper left')
    plt.show()
    #  #create model
    #  model = sm.Unet(BACKBONE, classes=n_classes, activation=activation)
    #  #Get training dataset
    #  dataset=input_fn(batch_size_,datasettype = 'train')
    #  #Get Validation dataset
    #  datasetval = input_fn(batch_size_,datasettype = 'validation')
    #  #Defining the resulting metrics
    #  metrics =[fn1,fn2,fn3,fn4,fn5,fn6,fn7]
    #  train_set = 'PASCAL' # LIP or PASCAL
    #  set_partition = 1
    #  if train_set == 'LIP':
    #      t_steps = int(set_partition*(30462/batch_size_))
    #      v_steps = int(set_partition*(10000/batch_size_))
    #  elif train_set == 'PASCAL':
    #      t_steps = int(set_partition*(1716/batch_size_))
    #      v_steps = int(set_partition*(1817/batch_size_))
    #  #Restoring model possibility
    #  if restore == True:
    #      model = tf.contrib.saved_model.load_keras_model('./saved_models/1561026721')
    #  else:
    #      #The Model with Resnet50, BatchNorm, SGD optimizer and Categorical Cross-entropy Loss
    #      model=tf.keras.models.Sequential()
    #      inputs=tf.keras.layers.Input(shape=(250,250,3))
    #      model.add(ResNet50(include_top=False, weights='imagenet',input_tensor=inputs, pooling=None, classes = 7))
    #      model.add(tf.keras.layers.Conv2D(7,(1,1)))
    #      model.add(layers.BatchNormalization())
    #      model.add(tf.keras.layers.Lambda(lambda x: tf.image.resize_bilinear(x,(250,250))))
    #      model.summary()
    #  learning_rate = 0.002
    #  sgd = tf.keras.optimizers.SGD(learning_rate, momentum=0.8, nesterov=True)
    #  model.compile(optimizer=sgd,   #SGD with momemtum usually gives good enough results without too many parameters as is the case for ADAMoptimizer
    #                loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits = True),
    #                metrics=metrics)
    #  history=model.fit(dataset,validation_data = datasetval, epochs=10, steps_per_epoch=t_steps,validation_steps=v_steps) # 2975 Training Images - 1525 Test Images - 500 validation Images
    #  #Saving the model after training
    #  saved_model_path = tf.contrib.saved_model.save_keras_model(model, "./saved_models/saved_model_test7bigsparse")

    return model
예제 #8
0
파일: train.py 프로젝트: Bill-Bi/ADAS
    FScore(threshold=cfg.metric_threshold, per_image=cfg.metric_per_image),
    Precision(threshold=cfg.metric_threshold, per_image=cfg.metric_per_image),
    Recall(threshold=cfg.metric_threshold, per_image=cfg.metric_per_image),
    TruePositives(thresholds=cfg.metric_threshold),
    TrueNegatives(thresholds=cfg.metric_threshold),
    FalsePositives(thresholds=cfg.metric_threshold),
    FalseNegatives(thresholds=cfg.metric_threshold)
]

# with mirrored_strategy.scope():
model = sm.Unet(backbone_name=cfg.backbone_name,
                input_shape=cfg.input_shape,
                classes=n_classes,
                activation='sigmoid' if n_classes == 1 else 'softmax',
                weights=None,
                encoder_weights=cfg.encoder_weights,
                encoder_freeze=cfg.encoder_freeze,
                encoder_features=cfg.encoder_features,
                decoder_block_type=cfg.decoder_block_type,
                decoder_filters=cfg.decoder_filters,
                decoder_use_batchnorm=True)

model.compile(optim, loss, metrics)

callbacks = [
    ModelCheckpoint(
        cfg.base_model_path + ".{epoch:03d}-{val_loss:.6f}.h5",
        monitor='val_loss',
        # verbose=1,
        save_best_only=(not cfg.SAVE_ALL_MODELS),
        save_weights_only=False,
BACKBONE = 'densenet121'
#'densenet121'fenadeğil
#'resnet101'
#'resnet34'
#'efficientnetb6'
#'inceptionresnetv2'
BATCH_SIZE = 16
CLASSES = ['dolins']
LR = 0.0001 #0.0001 best for adam
EPOCHS = 100

preprocess_input = sm.get_preprocessing(BACKBONE)

#create model
model = sm.Unet(BACKBONE, classes=1, activation="sigmoid")

# load best weights
model.load_weights("/content/drive/MyDrive/Dolin_Segmentation/results/densenet121/best_model.h5")

image_path="/content/drive/MyDrive/Dolin_Segmentation/test_image.png"
c=cv2.imread(image_path)
plt.imshow(c)
plt.show()
print(c.shape)

#----------------------------------------------------------------------------------------------------
import cv2
import matplotlib.pyplot as plt
import numpy as np
import os
    n_classes=4
)

val_generator = DataGenerator(
    val_idx, 
    df=mask_count_df,
    target_df=train_df,
    batch_size=BATCH_SIZE, 
    reshape=(320, 480),
    augment=False,
    n_channels=3,
    n_classes=4
)
model = sm.Unet(
    'resnet34', 
    classes=4,
    input_shape=(320, 480, 3),
    activation='sigmoid'
)
model.compile(optimizer=Nadam(lr=0.0002), loss=bce_dice_loss, metrics=[dice_coef])
model.summary()
checkpoint = ModelCheckpoint('model.h5', save_best_only=True)

history = model.fit_generator(
    train_generator,
    validation_data=val_generator,
    callbacks=[checkpoint],
    epochs=30
)
with open('history.json', 'w') as f:
    json.dump(history.history, f)
예제 #11
0
BACKBONE = 'resnet34'
preprocess_input = sm.get_preprocessing(BACKBONE)

# load your data
x_train, y_train = ld_data(
    "/Users/abhinav/Documents/Foot Data and Manipulations/Training/AugmentedDataset/"
), ld_data(
    "/Users/abhinav/Documents/Foot Data and Manipulations/Training/AugmentedAnnotations/"
)

# preprocess input
# x_train = preprocess_input(x_train)
# x_val = preprocess_input(x_val)

# define model
model = sm.Unet(BACKBONE, classes=2, encoder_weights='imagenet')
model.compile(
    'Adam',
    loss=sm.losses.bce_jaccard_loss,
    metrics=[sm.metrics.iou_score],
)

# fit model
# if you use data generator use model.fit_generator(...) instead of model.fit(...)
# more about `fit_generator` here: https://keras.io/models/sequential/#fit_generator
model.fit(
    x=x_train,
    y=y_train,
    #batch_size=16,
    epochs=100,
    steps_per_epoch=5
예제 #12
0
파일: train_2.py 프로젝트: boeselfr/u-net
def unet_train():

    BACKBONE = 'efficientnetb0'
    model = sm.Unet(BACKBONE, encoder_weights='imagenet', classes=1)
    image_datagen = ImageDataGenerator(
        **aug_dict, preprocessing_function=normalize_classif)
    mask_datagen = ImageDataGenerator(**aug_dict,
                                      preprocessing_function=normalize_classif)

    val_image_datagen = ImageDataGenerator(
        {}, preprocessing_function=normalize_classif)
    val_mask_datagen = ImageDataGenerator(
        {}, preprocessing_function=normalize_classif)
    seed = 1
    STEP_SIZE_TRAIN = TRAIN_SIZE // BATCH_SIZE
    STEP_SIZE_VALID = VALID_SIZE // BATCH_SIZE

    image_generator = image_datagen.flow_from_directory(
        join(data_path, 'classification'),
        target_size=IMG_SIZE,
        classes=['training_images'],
        class_mode=None,
        seed=seed,
        batch_size=BATCH_SIZE,
        color_mode='rgb')
    mask_generator = mask_datagen.flow_from_directory(
        join(data_path, 'classification'),
        classes=['training_labels'],
        target_size=IMG_SIZE,
        class_mode=None,
        seed=seed,
        batch_size=BATCH_SIZE,
        color_mode='rgb')

    val_image_generator = val_image_datagen.flow_from_directory(
        data_path,
        classes=['validation_images_cancer'],
        target_size=IMG_SIZE,
        class_mode=None,
        seed=seed,
        batch_size=BATCH_SIZE,
        color_mode='rgb')
    val_mask_generator = val_mask_datagen.flow_from_directory(
        data_path,
        classes=['validation_labels_cancer'],
        target_size=IMG_SIZE,
        class_mode=None,
        seed=seed,
        batch_size=BATCH_SIZE,
        color_mode='rgb')

    train_generator = zip(image_generator, mask_generator)
    val_generator = zip(val_image_generator, val_mask_generator)
    model_checkpoint = ModelCheckpoint('unet_16_efc_all.hdf5',
                                       monitor='loss',
                                       save_best_only=True,
                                       period=1)
    tensorboard_callback = TensorBoard(log_dir='./logs/unet_16')
    drop_alpha = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5)
    model.compile(optimizer=Adam(lr=learning_rate),
                  loss=wbce_loss,
                  metrics=my_metrics)
    model.fit(train_generator,
              validation_data=val_generator,
              epochs=EPOCHS,
              verbose=1,
              steps_per_epoch=STEP_SIZE_TRAIN,
              validation_steps=STEP_SIZE_VALID,
              callbacks=[model_checkpoint, tensorboard_callback, drop_alpha])
if not os.path.exists(y_valid_dir):
    raise ValueError(
        'The dataset path \'{dataset_path}\' does not contain a folder named validation_annotated.'
        .format(dataset_path=args.dataset_path))

# Build the model
backbones = ['efficientnetb5', 'mobilenetv2', 'efficientnetb3']

if (args.backbone not in backbones):
    raise ValueError(
        'The backbone \'{backbone}\' does not exist. Choose mobilenetv2 or efficientnetb5'
        .format(backbone=args.backbone))

model = sm.Unet(args.backbone,
                input_shape=(480, 320, 3),
                classes=1,
                activation='sigmoid',
                decoder_filters=(128, 64, 32, 16, 8))

preprocess_input = sm.get_preprocessing(args.backbone)

optimizer = keras.optimizers.Adam(args.learning_rate)

model.compile(optimizer, sm.losses.binary_focal_dice_loss,
              [sm.metrics.IOUScore(threshold=0.5)])

# Dataset for training images
train_dataset = Dataset(
    x_train_dir,
    y_train_dir,
    classes=['Avalanche'],
예제 #14
0
batch_size = 8
myGene = trainGenerator(batch_size,
                        data_dir,
                        'train_images',
                        'train_labels',
                        data_gen_args_val,
                        save_to_dir=None)
myGeneval = trainGenerator(batch_size,
                           data_dir,
                           'val_images',
                           'val_labels',
                           data_gen_args_val,
                           save_to_dir=None)
model = sm.Unet('efficientnetb4',
                input_shape=(512, 512, 3),
                classes=1,
                activation='sigmoid',
                encoder_weights='imagenet')
model.summary()
model.compile(optimizer=tf.keras.optimizers.Adam(lr=1e-3),
              loss=sm.losses.DiceLoss(),
              metrics=[sm.metrics.iou_score, dice_coef, 'accuracy'])
model_checkpoint = ModelCheckpoint('segmentation.h5',
                                   monitor='val_loss',
                                   verbose=1,
                                   save_best_only=True)
model.fit_generator(myGene,
                    validation_data=myGeneval,
                    epochs=100,
                    callbacks=[model_checkpoint],
                    steps_per_epoch=int((2075 * 2) // batch_size),
예제 #15
0
파일: train.py 프로젝트: scrssys/SCRS_RS_AI
if __name__ == '__main__':

    if not os.path.isdir(config.train_data_path):
        print("train data does not exist in the path:\n {}".format(
            config.train_data_path))
        sys.exit(-1)

    if len(config.band_list) == 0:
        print("Error: band_list should not be empty!")
        sys.exit(-2)
    input_layer = (config.img_w, config.img_h, len(config.band_list))

    if 'unet' in config.network:
        model = sm.Unet(backbone_name=config.BACKBONE,
                        input_shape=input_layer,
                        classes=config.nb_classes,
                        activation=config.activation,
                        encoder_weights=config.encoder_weights)
    elif 'pspnet' in config.network:
        model = sm.PSPNet(backbone_name=config.BACKBONE,
                          input_shape=input_layer,
                          classes=config.nb_classes,
                          activation=config.activation,
                          encoder_weights=config.encoder_weights,
                          psp_dropout=config.dropout)
    elif 'fpn' in config.network:
        model = sm.FPN(backbone_name=config.BACKBONE,
                       input_shape=input_layer,
                       classes=config.nb_classes,
                       activation=config.activation,
                       encoder_weights=config.encoder_weights,
예제 #16
0
           height_shift_range=0.2,
           shear_range=0.15,
           horizontal_flip=True,
           fill_mode="nearest")

with h5py.File("dataset/Training/bgr_comp_hand_segmentation_data.h5",
               "r") as hdf:
    data = hdf.get("images")
    images = np.array(data)

    data2 = hdf.get("masks")
    annotations = np.array(data2)

# model = unet2(input_size=(128, 128, 3))

model = sm.Unet(BACKBONE, classes=1, input_shape=(256, 256, 3))

# model = sm.FPN(backbone_name=BACKBONE, encoder_weights='imagenet',
#                activation='sigmoid', classes=1, pyramid_dropout=0.5)

model.compile(optimizer=Adam(lr=0.0001),
              loss=sm.losses.jaccard_loss,
              metrics=[mean_iou])

train_steps = len(images) // batch_size
test_steps = len(annotations) // batch_size

train_generator = get_segmentation_generator_flow(images[:750],
                                                  annotations[:750],
                                                  target_size,
                                                  batch_size,
예제 #17
0
data_manager = BatchManager(256, 256, './dataset/data/images/',
                            './dataset/data/masks/')
x_train, y_train = data_manager.get_data()

valid_manager = BatchManager(256, 256, './dataset/valid/images/',
                             './dataset/valid/masks/')
x_val, y_val = valid_manager.get_data()

print('Reading complete')

x_train = preprocess_input(x_train)
y_train = preprocess_input(y_train)

model = sm.Unet(backbone,
                classes=1,
                activation='sigmoid',
                encoder_weights='imagenet')

# dice_loss = sm.losses.DiceLoss()
# focal_loss = sm.losses.BinaryFocalLoss()
# total_loss = dice_loss + (1 * focal_loss)

# metrics = [sm.metrics.IOUScore(threshold=0.5), sm.metrics.FScore(threshold=0.5)]

# model.compile('Adam', total_loss, metrics)
model.compile(
    'Adam',
    loss=sm.losses.bce_jaccard_loss,
    metrics=[sm.metrics.iou_score],
)
예제 #18
0
    #orig = cv2.imread('./data/test_images/test_2.jpg')
    #orig = cv2.imread('./data/test_images/58-05-008.png')
    orig = cv2.imread('./data/test_images/geo-eye.tif')
    #image = cv2.merge((orig, orig, orig))
    
    #orig = cv2.imread('./data/test_images/yangambi_orthomosaic_modified.jpg')
    image = cv2.cvtColor(orig, cv2.COLOR_BGR2RGB)

    # define network parameters
    n_classes = 1 if len(CLASSES) == 1 else (len(CLASSES) + 1)  # case for binary and multiclass segmentation
    activation = 'sigmoid' if n_classes == 1 else 'softmax'
    
    #create model
    model = sm.Unet(
      BACKBONE,
      classes=n_classes,
      activation=activation,
      #encoder_weights='imagenet'
    )
    
    # define optomizer
    optim = keras.optimizers.Adam(LR)
    
    # Segmentation models losses can be combined together by '+' and scaled by integer or float factor
    dice_loss = sm.losses.DiceLoss()
    focal_loss = sm.losses.BinaryFocalLoss() if n_classes == 1 else sm.losses.CategoricalFocalLoss()
    total_loss = dice_loss + (1 * focal_loss)
    metrics = [sm.metrics.IOUScore(threshold=0.5), sm.metrics.FScore(threshold=0.5)]
    
    # compile keras model with defined optimozer, loss and metrics
    model.compile(optim, total_loss, metrics)
예제 #19
0
파일: predict.py 프로젝트: kbrodt/severstal
def main():
    global args

    args = parse_args()

    torch.backends.cudnn.benchmark = True

    args.distributed = False
    if 'WORLD_SIZE' in os.environ:
        args.distributed = int(os.environ['WORLD_SIZE']) > 1

    args.gpu = 0
    args.world_size = 1
    if args.distributed:
        args.gpu = args.local_rank
        torch.cuda.set_device(args.gpu)
        torch.distributed.init_process_group(backend='nccl',
                                             init_method='env://')
        args.world_size = torch.distributed.get_world_size()

    assert torch.backends.cudnn.enabled, 'Amp requires cudnn backend to be enabled.'

    to_save = Path(args.save)
    path_to_load = Path(args.load)
    if path_to_load.is_file():
        print(f"=> Loading checkpoint '{path_to_load}'")
        checkpoint = torch.load(
            path_to_load,
            map_location=lambda storage, loc: storage.cuda(args.gpu))
        print(f"=> Loaded checkpoint '{path_to_load}'")
    else:
        raise
    args = checkpoint['args']
    print(args)

    n_classes = args.n_classes
    if args.cls:
        print('With classification')
    else:
        print('Without classification')

    model = smp.Unet(encoder_name=args.encoder,
                     encoder_weights='imagenet',
                     classes=n_classes,
                     activation='sigmoid',
                     n_classes=n_classes if args.cls else None)

    if args.sync_bn:
        print('using apex synced BN')
        model = apex.parallel.convert_syncbn_model(model)

    model.cuda()

    # Initialize Amp. Amp accepts either values or strings for the optional override arguments,
    # for convenient interoperation with argparse.
    if args.fp16:
        model = apex.amp.initialize(
            model,
            opt_level=args.opt_level,
            keep_batchnorm_fp32=args.keep_batchnorm_fp32,
            loss_scale=args.loss_scale)

    # For distributed training, wrap the model with apex.parallel.DistributedDataParallel.
    # This must be done AFTER the call to amp.initialize.  If model = DDP(model) is called
    # before model, ... = amp.initialize(model, ...), the call to amp.initialize may alter
    # the types of model's parameters in a way that disrupts or destroys DDP's allreduce hooks.
    if args.distributed:
        # By default, apex.parallel.DistributedDataParallel overlaps communication with
        # computation in the backward pass.
        # model = DDP(model)
        # delay_allreduce delays all communication to the end of the backward pass.
        model = apex.parallel.DistributedDataParallel(model,
                                                      delay_allreduce=True)

    # work_dir = Path(args.work_dir)
    work_dir = path_to_load.parent

    model.load_state_dict(checkpoint['state_dict'])

    path_to_data = Path(args.data)
    _, dev_gps = get_data_groups(path_to_data / 'train.csv.zip', args)

    dev_ds = SeverStalDS(dev_gps,
                         root=path_to_data / 'train',
                         transform=dev_transform)
    dev_sampler = None
    if args.distributed:
        dev_sampler = torch.utils.data.distributed.DistributedSampler(dev_ds)

    batch_size = args.batch_size
    dev_loader = torch.utils.data.DataLoader(dev_ds,
                                             batch_size=min(batch_size, 16),
                                             shuffle=False,
                                             sampler=dev_sampler,
                                             num_workers=4,
                                             collate_fn=collate_fn,
                                             pin_memory=True)

    metric = Dice(n_classes=n_classes, thresh=0.5)

    x = torch.rand(2, 3, 256, 256).cuda()
    model = model.eval()
    model.encoder.set_swish(memory_efficient=False)
    with torch.no_grad():
        traced_model = torch.jit.trace(model, x)

    traced_model.save(str(work_dir / f'model_{path_to_load.stem}.pt'))
    del traced_model
    del model

    model = torch.jit.load(str(work_dir /
                               f'model_{path_to_load.stem}.pt')).cuda().eval()

    with torch.no_grad():
        metric.clean()
        trgs, preds, preds_cls = epoch_step(dev_loader,
                                            f'[ Validating dev.. ]',
                                            model=model,
                                            metric=metric)
        print(f'dice dev {metric.evaluate()}')
    if str(to_save) == '':
        return
    to_save1 = to_save / 'pred_masks_tta'
    if not to_save1.exists():
        to_save1.mkdir(parents=True)
    to_save2 = to_save / 'pred_clss_tta'
    if not to_save2.exists():
        to_save2.mkdir(parents=True)
    with tqdm.tqdm(zip(dev_gps, preds, preds_cls), total=len(preds)) as pbar:
        for (fname, _), p1, p2 in pbar:
            np.save(to_save1 / fname, p1)
            np.save(to_save2 / fname, p2)
예제 #20
0
import matplotlib.pyplot as plt

from sklearn.model_selection import train_test_split
from data import *
from unet import *
from dense_unet import *
from config import *
from metrics import *
from pretrained_backbone_unet import *
import segmentation_models as sm
sm.set_framework('tf.keras')

#STANDARD UNET
#model = create_unet_model(SHAPE)

MODEL_ARCHITECTURE = sm.Unet('vgg19', encoder_weights='imagenet')


def plot_images(dataset):

    for x, y in dataset:
        print(x.shape)
        print(y.shape)

        print(x[0][125])
        print(y[0][125])

        plt.figure(figsize=(10, 10))
        img = x[0]

        plt.subplot(1, 2, 1)
                     n_channels=3,
                     n_classes=4)

   val_generator = DataGenerator(
                   val_idx,
                   df=mask_count_df,
                   target_df=train_df,
                   batch_size=BATCH_SIZE,
                   reshape=(320, 480),
                   augment=False,
                   n_channels=3,
                   n_classes=4)
   
   model = sm.Unet(
   'efficientnetb3',
   classes=4,
   input_shape=(320, 480, 3),
   activation='sigmoid' )
   model.compile(optimizer=Nadam(lr=0.0002), loss=loss.bce_dice_loss, metrics=[loss.dice_coef])
   model.summary()

   train_metric_callback = PrAucCallback(train_generator)
   val_callback = PrAucCallback(val_generator, stage='val')
   #fpath = 'weights.{epoch:02d}-{loss:.2f}-{dice_coef:.2f}-{val_loss:.2f}-{val_dice_coef:.2f}.hdf5'
   #checkpoint = ModelCheckpoint(os.path.join(args.out_path, fpath), save_best_only=True, monitor='val_loss', period=1)

   history = model.fit_generator(
             train_generator,
             validation_data=val_generator,
             callbacks=[train_metric_callback, val_callback],,
             epochs=30,
tf.keras.backend.set_image_data_format('channels_last')

img_w = 256
img_h = 256

models_string = ['inceptionv3', 'senet154', 'vgg16']

models = []
preproc_fs = []

for model_string in models_string:
    models.append(
        sm.Unet(model_string,
                classes=3,
                activation='softmax',
                input_shape=(img_h, img_w, 3),
                encoder_weights=None))
    preproc_fs.append(sm.get_preprocessing(model_string))

firstTentative = NeuralNetworkFlow(
    seed=1996,
    dataset_path='/content/Development_Dataset/Training',
    n_classes=3,
    out_h=img_h,
    out_w=img_w,
    img_h=img_h,
    img_w=img_w,
    batch_size=32,
    n_test_images=15)
firstTentative.apply_data_augmentation()
예제 #23
0
def train(weights_paths, model_name="unet", batch_size=16, loss_name="bce"):
    BATCH_SIZE = batch_size

    # for reference about the BUFFER_SIZE in shuffle:
    # https://stackoverflow.com/questions/46444018/meaning-of-buffer-size-in-dataset-map-dataset-prefetch-and-dataset-shuffle
    BUFFER_SIZE = 1000

    dataset = {"train": train_dataset, "val": val_dataset}

    # -- Train Dataset --#
    dataset['train'] = dataset['train'].map(
        load_image_train, num_parallel_calls=tf.data.experimental.AUTOTUNE)
    dataset['train'] = dataset['train'].shuffle(buffer_size=BUFFER_SIZE,
                                                seed=SEED)
    dataset['train'] = dataset['train'].repeat()
    dataset['train'] = dataset['train'].batch(BATCH_SIZE)
    dataset['train'] = dataset['train'].prefetch(buffer_size=AUTOTUNE)

    #-- Validation Dataset --#
    dataset['val'] = dataset['val'].map(load_image_test)
    dataset['val'] = dataset['val'].repeat()
    dataset['val'] = dataset['val'].batch(BATCH_SIZE)
    dataset['val'] = dataset['val'].prefetch(buffer_size=AUTOTUNE)

    print(dataset['train'])
    print(dataset['val'])

    if model_name == "unet":
        model = sm.Unet('efficientnetb4',
                        input_shape=(None, None, 3),
                        classes=N_CLASSES,
                        activation='sigmoid',
                        encoder_weights=None,
                        weights=weights_paths)
    if model_name == "fpn":
        model = sm.FPN('efficientnetb4',
                       input_shape=(None, None, 3),
                       classes=N_CLASSES,
                       activation='sigmoid',
                       encoder_weights=None)
    if model_name == "psp":
        model = sm.PSPNet('efficientnetb4',
                          input_shape=(IMG_SIZE, IMG_SIZE, 3),
                          classes=N_CLASSES,
                          activation='sigmoid',
                          encoder_weights=None)

    optimizer = tf.keras.optimizers.Adam(learning_rate=0.01)  # 0.001

    if loss_name == "bce":
        loss = tf.keras.losses.BinaryCrossentropy()
    elif loss_name == "bce_jaccard":
        loss = sm.losses.bce_jaccard_loss
    elif loss_name == "bce_jaccard_focal":
        loss = sm.losses.binary_focal_jaccard_loss
    elif loss_name == "binary_focal_dice":
        loss = sm.losses.binary_focal_dice_loss

    model.compile(optimizer=optimizer,
                  loss=loss,
                  metrics=['accuracy', sm.metrics.iou_score, dice_coe])

    EPOCHS = 50

    STEPS_PER_EPOCH = TRAINSET_SIZE // BATCH_SIZE
    VALIDATION_STEPS = VALSET_SIZE // BATCH_SIZE

    callbacks = [
        tf.keras.callbacks.ModelCheckpoint(
            'results/weights/' + str(model_name) + '_' + str(loss_name) +
            '.h5',
            monitor='val_dice_coe',
            mode='max',
            verbose=1,
            save_best_only=True,
            save_weights_only=False),
        tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss',
                                             factor=0.1,
                                             patience=8,
                                             min_lr=0.00001)
    ]

    results = model.fit(dataset['train'],
                        epochs=EPOCHS,
                        steps_per_epoch=STEPS_PER_EPOCH,
                        validation_steps=VALIDATION_STEPS,
                        callbacks=callbacks,
                        validation_data=dataset['val'])

    plt.figure(figsize=(8, 8))
    plt.title("Learning curve")
    plt.plot(results.history["loss"], label="loss")
    plt.plot(results.history["val_loss"], label="val_loss")
    plt.plot(np.argmin(results.history["val_loss"]),
             np.min(results.history["val_loss"]),
             marker="x",
             color="r",
             label="best model")
    plt.xlabel("Epochs")
    plt.ylabel("log_loss")
    plt.legend()
    plt.savefig('./results/plots/train_loss_' + str(model_name) + '_' +
                str(loss_name) + '.png')

    plt.figure(figsize=(8, 8))
    plt.title("Learning curve")
    plt.plot(results.history["dice_coe"], label="dice_coe")
    plt.plot(results.history["val_dice_coe"], label="val_dice_coe")
    plt.plot(np.argmax(results.history["val_dice_coe"]),
             np.max(results.history["val_dice_coe"]),
             marker="x",
             color="r",
             label="best model")
    plt.xlabel("Epochs")
    plt.ylabel("Dice Coeff")
    plt.legend()
    plt.savefig('./Results/plots/train_dice_' + str(model_name) + '_' +
                str(loss_name) + '.png')

    plt.figure(figsize=(8, 8))
    plt.title("Learning curve")
    plt.plot(results.history["iou_score"], label="iou_score")
    plt.plot(results.history["val_iou_score"], label="val_iou_score")
    plt.plot(np.argmax(results.history["val_iou_score"]),
             np.max(results.history["val_iou_score"]),
             marker="x",
             color="r",
             label="best model")
    plt.xlabel("Epochs")
    plt.ylabel("IOU")
    plt.legend()
    plt.savefig('./Results/plots/train_IOU_' + str(model_name) + '_' +
                str(loss_name) + '.png')

    plt.figure(figsize=(8, 8))
    plt.title("Learning curve")
    plt.plot(results.history["accuracy"], label="accuracy")
    plt.plot(results.history["val_accuracy"], label="val_accuracy")
    plt.plot(np.argmax(results.history["val_accuracy"]),
             np.max(results.history["val_accuracy"]),
             marker="x",
             color="r",
             label="best model")
    plt.xlabel("Epochs")
    plt.ylabel("accuracy")
    plt.legend()
    plt.savefig('./Results/plots/train_accuracy_' + str(model_name) + '_' +
                str(loss_name) + '.png')
예제 #24
0
RESULTADOS_finetuning = []

# Define the K-fold Cross Validator
kfold = KFold(n_splits=num_folds, shuffle=True, random_state=1)

# K-fold Cross Validation model evaluation
fold_no = 1
for train, test in kfold.split(imagens, mascaras_medico):
    print("######### KFOLD ", fold_no, "#########")

    ###### CRIA O MODELO
    BACKBONE = 'resnet34'
    preprocess_input = sm.get_preprocessing(BACKBONE)
    # define model
    model = sm.Unet(BACKBONE, encoder_weights='imagenet', encoder_freeze=True)
    model.compile(
        'Adam',
        loss=sm.losses.bce_jaccard_loss,
        metrics=[sm.metrics.iou_score],
    )

    ###### dividir treino e validação, lembrando que como são 5 folds, tem 80% pra trein e 20% para test. Então dos 80% de treino pego 25% para validação. E ai mantenho a mesma proporção de antes do kfold (60% treino, 20% teste e 20% validação)
    x_train, x_val, y_train, y_val = train_test_split(imagens[train],
                                                      mascaras_medico[train],
                                                      test_size=0.25,
                                                      random_state=11)

    x_train = np.asarray(x_train)
    y_train = (np.asarray(y_train) > threshold_otsu(np.asarray(y_train)))
    x_val = np.asarray(x_val)
예제 #25
0
        weight_mask = torch.zeros_like(y_true).float()
        unique_object_labels = torch.unique(y_true)
        for obj in unique_object_labels:
            num_pixels = torch.sum(y_true == obj, dtype=torch.float)
            weight_mask[y_true == obj] = 1 / num_pixels
        loss = torch.sum(cce_loss * weight_mask**2) / torch.sum(weight_mask**2)
        return loss

    else:
        print("ENTERED WRONG")
        return cce_loss


# Initialize Network Architecture and Start From Pretrained Baseline
model_unet = sm.Unet(backbone_name=BACKBONE,
                     encoder_weights=None,
                     activation=ACTIVATION_FN,
                     classes=N_CLASSES)
# TODO: choose loss
model_unet.compile(
    RMSprop(), loss=custom_loss
)  #loss=JaccardLoss(class_weights=LOSS_WEIGHTS), metrics=[IOUScore()]
callbacks_unet = [
    ReduceLROnPlateau(factor=0.1, patience=5, min_lr=0.00001, verbose=1),
    ModelCheckpoint(CHECKPOINT_FILE,
                    verbose=1,
                    save_best_only=True,
                    save_weights_only=True)
]

# TODO: Selectively load weights based off testing
# model_unet.load_weights(BASELINE_FILE)
예제 #26
0
#            print(mask1)
#            plt.imshow(mask1)
#            plt.show()
#    break
#    if i==1:
#        break
#    i=i+1
# define network parameters
n_classes = 4
#if len(CLASSES) == 1 else (len(CLASSES) + 1)  # case for binary and multiclass segmentation
activation = 'softmax'

#create model
model = sm.Unet(BACKBONE,
                classes=n_classes,
                activation=activation,
                encoder_weights='imagenet',
                encoder_freeze=True)
#model.load_weights('C:/Users/guptav/Desktop/new 22.11/logs/ep01-val_loss0.72.h5')
# define optomizer
optim = keras.optimizers.Adam(lr=0.0)

# Segmentation models losses can be combined together by '+' and scaled by integer or float factor
dice_loss = sm.losses.DiceLoss()
focal_loss = sm.losses.BinaryFocalLoss()
total_loss = dice_loss + (1 * focal_loss)

# actulally total_loss can be imported directly from library, above example just show you how to manipulate with losses
# total_loss = sm.losses.binary_focal_dice_loss # or sm.losses.categorical_focal_dice_loss

metrics = [
예제 #27
0
CLASSES = ['background', 'current', 'neighbour']
LR = 1e-4
EPOCHS = 5

preprocess_input = sm.get_preprocessing(BACKBONE)


# In[12]:


# define network parameters
n_classes = len(CLASSES)  # case for binary and multiclass segmentation
activation = 'sigmoid' if n_classes == 1 else 'softmax'

#create model
model = sm.Unet(BACKBONE, classes=n_classes, activation=activation)


# In[13]:


# define optomizer
optim = keras.optimizers.Adam(LR)

# Segmentation models losses can be combined together by '+' and scaled by integer or float factor
# set class weights for dice_loss (background: 0.5; current: 2.; neighbour: 2.; )
dice_loss = sm.losses.DiceLoss(class_weights=np.array([0.5, 2, 1])) 
focal_loss = sm.losses.BinaryFocalLoss() if n_classes == 1 else sm.losses.CategoricalFocalLoss()
total_loss = dice_loss + (1 * focal_loss)

# actulally total_loss can be imported directly from library, above example just show you how to manipulate with losses
val_img_data_gen = ImageDataAugmentor(augment=AUGMENTATIONS, augment_seed=123)
val_img_gen = val_img_data_gen.flow_from_directory('temp_training/Validation/images/',target_size=(img_height,img_width),class_mode=None, shuffle=True, seed=123,batch_size=batch_size)
val_mask_data_gen = ImageDataAugmentor(augment=AUGMENTATIONS, augment_seed=123, augment_mode='mask')
val_mask_gen = val_mask_data_gen.flow_from_directory('temp_training/Validation/masks/',target_size=(img_height,img_width), class_mode=None, shuffle=True, seed=123,batch_size=batch_size)
val_gen = my_image_mask_generator(val_img_gen,val_mask_gen)

IMG_SIZE=512
# arrêter d'entrainer le modèle quand la mean_iou sur la zone de validation commence à stagner
early_stopping_callback = EarlyStopping(monitor='val_loss', patience=15, mode='min') #ou max ??
# reduce learning rate during the training
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.4,patience=7, verbose=1,min_lr=0.00005, mode='min')
#optimizeAdam = Adam(lr=0.00075, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
optimizeAdam = Adam(lr=0.00075)
input_layer = Input(shape=(IMG_SIZE, IMG_SIZE,3), name='image_in')
unetmodel = sm.Unet(backbone_name='efficientnetb2', encoder_weights='imagenet',input_shape=(IMG_SIZE, IMG_SIZE, 3),classes=1)
out = unetmodel(input_layer)
model = Model(inputs=[input_layer], outputs=out)
#model = multi_gpu_model(model)

lrIni = 0.00075
numLoop=1

for i in range(0,numLoop):
    my_file = Path("temp_training/checkpoint{}.h5".format(i-1))
    if my_file.is_file():
        print("training round {}".format(i+1))
        print("loading checkpoint ... ")
        model = load_model(Path("temp_training/checkpoint{}.h5".format(i-1)))
        print("checkpoint loaded !")
    else:
예제 #29
0
"""
@Author Willian Antunes
"""
import os
import cv2
import sys
import numpy as np
from model.model import unet_256
import tensorflow as tf
import segmentation_models as sm

model = sm.Unet('vgg16', encoder_weights='imagenet')

model.load_weights(filepath='weights/best_weights_1.hdf5')

os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
print(model.summary())


def load_image_for_predict(img, input_size=(256, 256)):
    data = []
    data.append(img)
    img_name = list(map(lambda s: s.split('/')[-1], data))
    img_name = list(map(lambda s: s.split('.')[0], img_name))
    img = cv2.imread(img)
    #img = cv2.resize(img, (625, 352))
    img = img[:405, ]
    img = cv2.resize(img, input_size)
    img = img.reshape((1,) + img.shape)
    img = np.array(img, np.float32) / 255
예제 #30
0
                                               self.batch_size]
        batch_y = self.y[idx * self.batch_size:(idx + 1) *
                                               self.batch_size]

        batch_y = tf.keras.utils.to_categorical(y=batch_y,
                                                num_classes=N_CLASSES)

        return batch_x, batch_y


training_generator = CustomDataGenerator(x_train, y_train, batch_size=BATCH_SIZE)
validation_generator = CustomDataGenerator(x_valid, y_valid, batch_size=BATCH_SIZE)
testing_generator = CustomDataGenerator(x_test, y_test, batch_size=BATCH_SIZE)

# %% -------------------------------------- UNet Model -----------------------------------------------------------------
unet_model = sm.Unet(backbone_name='resnet101', encoder_weights='imagenet',
                     classes=169, input_shape=(224, 224, 3), activation='softmax')
unet_model.summary()


# %% -------------------------------------- Training Prep ----------------------------------------------------------

def dice_loss(y_true, y_pred, eps=1e-6, spatial_axes=[1, 2], from_logits=False):
    num_classes = y_pred.shape[-1]

    # Transform logits in probabilities, and one-hot the ground-truth:

    # Compute Dice numerator and denominator:
    num_perclass = 2 * tf.math.reduce_sum(y_pred * y_true, axis=spatial_axes)
    den_perclass = tf.math.reduce_sum(y_pred + y_true, axis=spatial_axes)

    # Compute Dice and average over batch and classes: