Exemple #1
0
def get_sm_model():
    CLASSES = ['sofa']

    sm.set_framework('tf.keras')

    model = sm.Linknet(BACKBONE, classes=len(CLASSES), encoder_weights=None)

    return model
def main(model_name, model, num_epochs, batch_size):
    '''Trains model.'''

    segmentation_models.set_framework('tf.keras')

    #Build list of training filenames
    X_folderpath = r"data\train\X\\"
    y_folderpath = r"data\train\y\\"
    X_filelist = glob(X_folderpath + '*.png')
    y_filelist = glob(y_folderpath + '*.png')

    #Build list of validation filenames
    X_val_folderpath = r"data\val\X\\"
    y_val_folderpath = r"data\val\y\\"
    X_val_filelist = glob(X_val_folderpath + '*.png')
    y_val_filelist = glob(y_val_folderpath + '*.png')

    model = model()

    #Define losses for each output
    # losses = {'depth_output': 'mean_squared_error',
    #           "rpy_output": 'mean_squared_logarithmic_error', #mean_squared_logarithmic_error
    #           "xyz_output": 'mean_squared_logarithmic_error'} #mean_squared_logarithmic_error
    #model.load_weights(r"parallel_unets_with_tf_weights_best.hdf5")
    model.compile(loss="mean_squared_error",
                  optimizer=Adam(lr=5e-6))  #, options = run_opts) #1e-6
    #lr=5e-6

    #model.compile(loss=losses,optimizer=Adagrad(lr=5e-4))

    #Save best model weights checkpoint
    filepath = f"{model_name}_weights_best.hdf5"
    checkpoint = ModelCheckpoint(filepath,
                                 monitor='val_loss',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='min')

    #Tensorboard setup
    log_dir = f"logs\\{model_name}\\" + datetime.datetime.now().strftime(
        "%Y%m%d-%H%M%S")
    tensorboard_callback = TensorBoard(log_dir=log_dir, write_images=True)

    callbacks_list = [checkpoint, tensorboard_callback]

    model.fit_generator(
        _batchGenerator(X_filelist, y_filelist, batch_size),
        epochs=num_epochs,
        steps_per_epoch=len(X_filelist) // batch_size,
        validation_data=_valBatchGenerator(X_val_filelist, y_val_filelist,
                                           batch_size),
        validation_steps=len(X_val_filelist) // batch_size,
        #validation_freq=1,
        max_queue_size=1,
        callbacks=callbacks_list,
        verbose=2)

    return model
def main(model_name, model=models.wnet_connected, num_epochs=5, batch_size=2):
    '''Trains depth estimation model.'''

    segmentation_models.set_framework('tf.keras')
    print(segmentation_models.framework())

    #Build list of training filenames
    X_folderpath = r"G:\Documents\KITTI\data\train\X\\"
    y_folderpath = r"G:\Documents\KITTI\data\train\y\\"
    X_filelist = glob(X_folderpath + '*.png')
    y_filelist = glob(y_folderpath + '*.png')

    #Build list of validation filenames
    X_val_folderpath = r"G:\Documents\KITTI\data\val\X\\"
    y_val_folderpath = r"G:\Documents\KITTI\data\val\y\\"
    X_val_filelist = glob(X_val_folderpath + '*.png')
    y_val_filelist = glob(y_val_folderpath + '*.png')

    model = model()
    model.compile(loss='mean_squared_error',
                  optimizer=Adam(lr=1e-4))  #,metrics=['mse']

    #Save best model weights checkpoint
    filepath = f"{model_name}_weights_best.hdf5"
    checkpoint = ModelCheckpoint(filepath,
                                 monitor='val_loss',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='min')

    #Tensorboard setup
    log_dir = f"logs\\{model_name}\\" + datetime.datetime.now().strftime(
        "%Y%m%d-%H%M%S")
    tensorboard_callback = TensorBoard(log_dir=log_dir)

    callbacks_list = [checkpoint, tensorboard_callback]

    model.fit_generator(
        _batchGenerator(X_filelist, y_filelist, batch_size),
        epochs=num_epochs,
        steps_per_epoch=len(X_filelist) // batch_size,
        #validation_data=(X_test,y_test),
        validation_data=_valBatchGenerator(X_val_filelist, y_val_filelist,
                                           batch_size),
        validation_steps=len(X_val_filelist) // batch_size,
        max_queue_size=1,
        callbacks=callbacks_list,
        verbose=2)

    return model
def main(model_name, model, num_epochs, batch_size):
    '''Trains model.'''

    segmentation_models.set_framework('tf.keras')

    #Build list of training filenames
    X_folderpath = r"data\train\flow\\"
    X_filelist = glob(X_folderpath + '*.png')

    #Build list of validation filenames
    X_val_folderpath = r"data\val\flow\\"
    X_val_filelist = glob(X_val_folderpath + '*.png')

    model = model()
    losses = {'rpy_output': 'msle', 'xyz_output': 'msle'}

    # model.compile(loss=losses,optimizer=Adagrad(0.001))
    model.compile(loss=losses,
                  optimizer=Adam(1e-7))  #Val_loss:21.76883 with LR=0.001

    #Save best model weights checkpoint
    filepath = f"{model_name}_weights_best.hdf5"
    checkpoint = ModelCheckpoint(filepath,
                                 monitor='val_loss',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='min')
    # filepath2=f"{model_name}_weights_best_trainingloss.hdf5"
    # checkpoint2 = ModelCheckpoint(filepath2, monitor='loss', verbose=1,
    #                               save_best_only=True, mode='min')

    #Tensorboard setup
    log_dir = f"logs\\{model_name}\\" + datetime.datetime.now().strftime(
        "%Y%m%d-%H%M%S")
    tensorboard_callback = TensorBoard(log_dir=log_dir)

    callbacks_list = [checkpoint, tensorboard_callback]  #checkpoint2,

    model.fit_generator(
        _batchGenerator(X_filelist, batch_size),
        epochs=num_epochs,
        steps_per_epoch=len(X_filelist) // batch_size,
        validation_data=_valBatchGenerator(X_val_filelist, batch_size),
        validation_steps=len(X_val_filelist) // batch_size,
        #validation_freq=1,
        max_queue_size=1,
        callbacks=callbacks_list,
        verbose=2)

    return model
Exemple #5
0
def main(model_name, model, num_epochs, batch_size):
    '''Trains model.'''
    
    segmentation_models.set_framework('tf.keras')
    
    #Build list of training filenames
    X_folderpath=r"data\train\X\\"
    y_folderpath=r"data\train\y\\"
    X_filelist=glob(X_folderpath+'*.png')
    y_filelist=glob(y_folderpath+'*.png')
    
    #Build list of validation filenames
    X_val_folderpath=r"data\val\X\\"
    y_val_folderpath=r"data\val\y\\"
    X_val_filelist=glob(X_val_folderpath+'*.png')
    y_val_filelist=glob(y_val_folderpath+'*.png')
    
    model=model()
    losses={'rpy_output': root_mean_squared_error,
            'xyz_output': root_mean_squared_error}
    
    model.compile(loss=losses,optimizer=Adam(1e-5))
    
    #Save best model weights checkpoint
    filepath=f"{model_name}_weights_bestvalloss.hdf5"
    checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, 
                                 save_best_only=True, mode='min')
    filepath=f"{model_name}_weights_bestloss.hdf5"
    checkpoint2 = ModelCheckpoint(filepath, monitor='loss', verbose=1, 
                                 save_best_only=True, mode='min')
    
    #Tensorboard setup
    log_dir = f"logs\\{model_name}\\" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")        
    tensorboard_callback = TensorBoard(log_dir=log_dir, write_images=True)
    
    callbacks_list = [checkpoint, checkpoint2, tensorboard_callback]
    
    model.fit_generator(_batchGenerator(X_filelist,y_filelist,batch_size),
                        epochs=num_epochs,
                        steps_per_epoch=len(X_filelist)//batch_size,
                        validation_data=_valBatchGenerator(X_val_filelist,y_val_filelist,batch_size),
                        validation_steps=len(X_val_filelist)//batch_size,
                        #validation_freq=1,
                        max_queue_size=1,
                        callbacks=callbacks_list,
                        verbose=2)
    
    return model
Exemple #6
0
import tensorflow as tf
import segmentation_models as sm
saved_model_dir = 'h5_model'
sm.set_framework('tf.keras')  ## segmentation_model 2.0 support feature..

backbone = 'mobilenetv2'
model = sm.Unet(backbone, input_shape=(256, 256, 3), encoder_weights=None, activation='sigmoid')#activation='identity')#, decoder_attention_type='scse')  # 'imagenet')
model.summary()

file_name = "0_0.9767963647842407_model.h5"
model.load_weights(file_name)

# Convert the model.
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
open("./tflite/"+file_name.split(".")[0]+".tflite", "wb").write(tflite_model)
print('done')
import pandas as pd
import geopandas as gpd
import numpy as np
import rasterio as rio
from rasterio.windows import Window
import tensorflow as tf
from tensorflow.keras.utils import to_categorical
import segmentation_models as sm
from segmentation_models.metrics import iou_score
from segmentation_models.losses import jaccard_loss, categorical_focal_jaccard_loss
from sklearn.preprocessing import MinMaxScaler

from models import regression_head, unet_model, unet_reg
import argparse

sm.set_framework('tf.keras')
sm.framework()

# indices https://www.sciencedirect.com/science/article/pii/S0303243422000290

## look into switching loss function to categorical_cross_entropy and maybe built in keras metric IOU from keras (done)

### change training data file to csv files (done mostly)
## change to 256 chip size, see if the csv files make a difference (done)
## if not get generators working (not needed yet)
### generate thresholded impoervious data maybe across all datasets (all years) (done)
#### create indices (ASI, NVDI, .....) (in progress)
### look into dice joccard loss as improvement
### when reading impervious mask REMEMBER data[data == 127] = 0 (done)

## Do I calculate indices before scaling by 16 bit scaler???
 def __init__(self, **kwargs):
     import segmentation_models as sm
     sm.set_framework('tf.keras')
     super().__init__(name=kwargs['name'])
     del kwargs['name']
     self.unet = sm.PSPNet(**kwargs)
@author: Szedlák Barnabás
"""

#------------------------------------------------------------------------------
# 0. STEP
# Importing libraries

import numpy as np
import matplotlib.pyplot as plt
import pickle
import time

from tensorflow.keras.optimizers import Adam

import segmentation_models
segmentation_models.set_framework('tf.keras') # manually setting tensorflow.keras framework to avoid package collusion

from segmentation_models.losses import JaccardLoss # other losses may be imported here, as well
from segmentation_models.utils import set_trainable
from segmentation_models import Unet
from segmentation_models.metrics import IOUScore # other metrics may be imported here, as well

from vectorizer import train_test_generator # importing functions from our own scipts
import model_backroom as backroom  # importing functions from our own scipts



#------------------------------------------------------------------------------
# 1. STEP
# Initialising vector parameters
Exemple #10
0
import segmentation_models
from segmentation_models.base import Loss
from segmentation_models.losses import CategoricalCELoss
from ..losses import functional as F

segmentation_models.set_framework('tf.keras')


class DiceLoss(Loss):
    def __init__(self, beta=1, class_weights=None):
        super().__init__(name='dice_loss')
        self.beta = beta
        self.class_weights = class_weights if class_weights is not None else 1

    def __call__(self, gt, pr):
        return F.dice_loss(gt=gt,
                           pr=pr,
                           beta=self.beta,
                           class_weights=self.class_weights)


class JaccardLoss(Loss):
    def __init__(self, class_weights=None):
        super().__init__(name='jaccard_loss')
        self.class_weights = class_weights if class_weights is not None else 1

    def __call__(self, gt, pr):
        return F.jaccard_loss(gt=gt, pr=pr, class_weights=self.class_weights)


class TverskyLoss(Loss):
Exemple #11
0
def run(config):
    ## TODO change to get model

    sm.set_framework('tf.keras')  ## segmentation_model 2.0 support feature..
    backbone = 'mobilenetv2'
    model = sm.Unet(
        backbone,
        input_shape=(256, 256, 3),
        encoder_weights=None,
        activation='sigmoid'
    )  #activation='identity')#, decoder_attention_type='scse')  # 'imagenet')
    model.summary()

    ## TODO optimizer change
    # optimizer = tf.keras.optimizers.Adam(learning_rate_schedule)#learning_rate=config.OPTIMIZER.LR) #get_optimizer(config, model.parameters())
    optimizer = tf.keras.optimizers.Adam(
        learning_rate=config.OPTIMIZER.LR
    )  #config.OPTIMIZER.LR) #get_optimizer(config, model.parameters())
    ##loss ##
    criterion = FocalLoss()  #DiceLoss()#tf.keras.losses.BinaryCrossentropy()

    checkpoint = None
    # checkpoint = utils.checkpoint.get_initial_checkpoint(config)
    if checkpoint is not None:
        last_epoch, score, loss = utils.checkpoint.load_checkpoint(
            config, model, checkpoint)
        # utils.checkpoint.load_checkpoint_legacy(config, model, checkpoint)
    else:
        print('[*] no checkpoint found')
        last_epoch, score, loss = -1, -1, float('inf')
    print('last epoch:{} score:{:.4f} loss:{:.4f}'.format(
        last_epoch, score, loss))

    # optimizer.param_groups[0]['initial_lr'] = config.OPTIMIZER.LR

    writer = SummaryWriter(
        os.path.join(config.TRAIN_DIR + config.RECIPE, 'logs'))
    log_train = Logger()
    log_val = Logger()
    log_train.open(os.path.join(config.TRAIN_DIR + config.RECIPE,
                                'log_train.txt'),
                   mode='a')
    log_val.open(os.path.join(config.TRAIN_DIR + config.RECIPE, 'log_val.txt'),
                 mode='a')
    train_loader = BatchGenerator(config, 'train', config.TRAIN.BATCH_SIZE,
                                  None)
    # train_dataset = Dataset(config, 'train', None)
    # train_loader = train_dataset.DataGenerator(config.DATA_DIR, batch_size=config.TRAIN.BATCH_SIZE, shuffle = True)
    train_datasize = len(train_loader)  #train_dataset.get_length()

    # val_dataset = Dataset(config, 'val', None)
    # val_loader = val_dataset.DataGenerator(config.DATA_DIR, batch_size=config.TRAIN.BATCH_SIZE, shuffle=False)

    val_loader = BatchGenerator(config, 'val', config.EVAL.BATCH_SIZE, None)
    val_datasize = len(val_loader)  #val_dataset.get_length()

    ### TODO: add transform

    train(config, model, train_loader, val_loader, optimizer, log_train,
          log_val, last_epoch + 1, score, loss, writer,
          (train_datasize, val_datasize), criterion)

    model.save_weights("model.h5")
Exemple #12
0
--train: Flag. Add if training.
--test: Flag. Add if testing.
--weights (required): path to weights file, either to write to for training, or to use for testing (.h5)
--backbone (required): name of backbone to use, ex: resnet34, vgg16

For training it should be sufficient to just call the script using the list of rasters and vectors (and other required arguments), 
and currently you have to manually set the hyperparams in the code, but this should eventually be offloaded to a settings file or 
command line arguments. This will result in the training weights being saved in the specified .h5 file.

For testing you just need to call the script on the list of rasters and it will produce a mask of the entire
orthomosaic.
'''

#keras.backend.set_image_data_format('channels_first')
sm.set_framework(
    'tf.keras'
)  # need this otherwise currently a bug in model.fit when used with tf.Datasets

# Globals
N_CHANNELS = 3
WIDTH = 256
HEIGHT = 256


def parse_image(img_path: str) -> dict:
    """Load an image and its annotation (mask) and returning
    a dictionary.

    Parameters
    ----------
    img_path : str
def main(model_name, model, num_epochs, batch_size):
    '''Trains model.'''

    segmentation_models.set_framework('tf.keras')

    #Build list of training filenames
    X_folderpath = r"data\train\X\\"
    X_flow_folderpath = r"data\train\flow\\"
    y_folderpath = r"data\train\y\\"
    X_filelist = glob(X_folderpath + '*.png')
    X_flow_filelist = glob(X_flow_folderpath + '*.png')
    y_filelist = glob(y_folderpath + '*.png')

    #Build list of validation filenames
    X_val_folderpath = r"data\val\X\\"
    X_val_flow_folderpath = r"data\val\flow\\"
    y_val_folderpath = r"data\val\y\\"
    X_val_filelist = glob(X_val_folderpath + '*.png')
    X_val_flow_filelist = glob(X_val_flow_folderpath + '*.png')
    y_val_filelist = glob(y_val_folderpath + '*.png')

    model = model()
    losses = {'rpy_output': undeepvo_rpy_mse, 'xyz_output': undeepvo_xyz_mse}

    #DeepVO uses Adagrad(0.001)
    # model.load_weights(r"C:\Users\craig\Documents\GitHub\damNN-vslam\Weights\mock_undeepvo_withflow_weights_best.hdf5")
    model.compile(loss=losses,
                  optimizer=Adam(0.001))  #UnDeepVO uses beta_2=0.99
    # model.compile(loss=deepvo_mse,optimizer=Adagrad(0.001))

    #Save best model weights checkpoint
    filepath = f"{model_name}_weights_best.hdf5"
    checkpoint = ModelCheckpoint(filepath,
                                 monitor='val_loss',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='min')
    filepath2 = f"{model_name}_weights_best_trainingloss.hdf5"
    checkpoint2 = ModelCheckpoint(filepath2,
                                  monitor='loss',
                                  verbose=1,
                                  save_best_only=True,
                                  mode='min')

    #Tensorboard setup
    log_dir = f"logs\\{model_name}\\" + datetime.datetime.now().strftime(
        "%Y%m%d-%H%M%S")
    tensorboard_callback = TensorBoard(log_dir=log_dir)

    callbacks_list = [checkpoint, checkpoint2, tensorboard_callback]

    model.fit_generator(
        _batchGenerator(X_filelist, X_flow_filelist, y_filelist, batch_size),
        epochs=num_epochs,
        steps_per_epoch=len(X_filelist) // batch_size,
        validation_data=_valBatchGenerator(X_val_filelist, X_val_flow_filelist,
                                           y_val_filelist, batch_size),
        validation_steps=len(X_val_filelist) // batch_size,
        #validation_freq=1,
        max_queue_size=1,
        callbacks=callbacks_list,
        verbose=2)

    return model
Exemple #14
0
val_iters = 1280 // bs_v
# grid search
for k in range(31, 100):
    # continue each model checkpoint
    start_path = model_dir + "%s-%s__%s_%s_%d_lr%s_ep%02d+%02d.h5" % (
        framework,
        model_name,
        data_name,
        loss_name,
        edge_size,
        lrstr,
        continue_step[0] + continue_step[1],
        k * checkpoint_period,
    )
    sm.set_framework('keras')
    model = smunet(loss=loss_name,
                   pretrained_weights=start_path,)
    # model = unet(
    #     pretrained_weights=start_path,
    #     input_size=(target_size[0], target_size[1], 3),
    #     lr=lr,
    #     multi_gpu=flag_multi_gpu,
    # )
    # model = denseunet(start_path)
    # model = unetxx(start_path,
    #                lr=lr)
    """
    for k_val, (x, y) in zip(tqdm(range(val_iters)), valGene):
        f = model.predict(x, batch_size=bs_v)
        # plt.show()
Exemple #15
0
import numpy as np
import tensorflow as tf
import segmentation_models as sm
import matplotlib.pyplot as plt

sm.set_framework("tf.keras")


def build_model():
    images = tf.keras.Input(shape=[None, None, 3],
                            name="image",
                            dtype=tf.float32)
    model = sm.FPN(
        backbone_name="mobilenetv2",
        input_shape=(None, None, 3),
        classes=7,
        activation="sigmoid",
        weights=None,
        encoder_weights="imagenet",
        encoder_features="default",
        pyramid_block_filters=256,
        pyramid_use_batchnorm=True,
        pyramid_aggregation="concat",
        pyramid_dropout=None,
    )(images)

    return tf.keras.Model(inputs={"image": images}, outputs=model)


def input_fn(image, label=None):
    return tf.estimator.inputs.numpy_input_fn({"image": image},
def main():
    yml = 'configs/fastscnn_mv3_sj_add_data_1024.yml'
    config = utils.config.load(yml)
    seed_everything()
    # train_dataloader, suffle_size = dataset.get_dataloader()
    # train_dataloader = train_dataloader.shuffle(suffle_size).batch(2)
    train_dataset = Dataset(config, 'train', None)
    train_dataloader = train_dataset.DataGenerator(config.DATA_DIR, batch_size=config.TRAIN.BATCH_SIZE, shuffle=True)
    dataset_size = 100 # train_dataset.get_length()
    # model = MyModel()
    # h w c
    # model = UNet2(input_dims = [256,256,3], num_classes= 1)
    sm.set_framework('tf.keras') ## segmentation_model 2.0 support feature..
    model = sm.Unet('resnet18', input_shape=(256, 256, 3),  encoder_weights=None)#'imagenet')
    model.summary()

    # model.compile(optimizer=tf.keras.optimizers.Adam(lr=1e-4), loss='binary_crossentropy', metrics=['accuracy'])
    # model.fit_generator(train_dataloader, epochs=50)
    # model.save_weights("model.h5")

    epochs = config.TRAIN.NUM_EPOCHS
    loss_object = tf.keras.losses.BinaryCrossentropy()
    train_loss = tf.keras.metrics.Mean(name='train_loss')
    train_accuracy = tf.keras.metrics.BinaryAccuracy(name='train_accuracy')
    learning_rate = 1e-4
    optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
    ### train
    for epoch in range(epochs):
        start = time.time()

        for step, (input_image,mask) in enumerate(train_dataloader):
            with tf.GradientTape() as grad_tape:
                output = model(input_image)
                # mask = tf.expand_dims(mask,-1)
                loss = loss_object(mask, output)

                # cv2.imshow('mask',mask.numpy()[0]*255)
                # cv2.waitKey()
                # print(loss.numpy())
                ## get a loss
            gradients = grad_tape.gradient(loss, model.trainable_variables)
            ## in grap.. get a gradient... 백워드 하는 것과 같음?? loss.backward()...
            optimizer.apply_gradients(zip(gradients, model.trainable_variables))
            train_loss(loss)
            train_accuracy(mask, output)
            print(epoch, "loss: ",train_loss.result().numpy(), "acc: ",train_accuracy.result().numpy(),"step: ",step)

            ## end of epoch. break..
            if step > dataset_size / config.TRAIN.BATCH_SIZE: break
    print('save_model')
    model.save_weights("model.h5")



    # model.fit_generator(train_dataloader, epochs=50)
    # model.save_weights("model.h5")

    ### test
    alpha = 0.3
    model.load_weights("model.h5")
    if not os.path.exists("./results"): os.mkdir("./results")

    for idx, (img, mask) in enumerate(train_dataloader):
        pred_mask = model(img).numpy()[0]
        pred_mask[pred_mask > 0.5] = 1
        pred_mask[pred_mask <= 0.5] = 0
        # img = cv2.cvtColor(pred_mask, cv2.COLOR_GRAY2RGB)
        # H, W, C = img.shape
        # for i in range(H):
        #     for j in range(W):
        #         if pred_mask[i][j][0] <= 0.5:
        #             img[i][j] = (1 - alpha) * img[i][j] * 255 + alpha * np.array([0, 0, 255])
        #         else:
        #             img[i][j] = img[i][j] * 255
        # image_accuracy = np.mean(mask == pred_mask)
        # image_path = "./results/pred_" + str(idx) + ".png"
        # print("=> accuracy: %.4f, saving %s" % (image_accuracy, image_path))
        cv2.imshow("t_mask", np.uint8(mask[0]) * 255)
        cv2.imshow("mask", np.uint8(pred_mask* 255) )
        cv2.waitKey()