Beispiel #1
0
    def model_arch():
        model = Sequential()
        model.add(Conv2D(32, (3, 3), padding='same', input_shape=input_shape))
        model.add(Activation('relu'))
        model.add(Conv2D(32, (3, 3)))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        model.add(Conv2D(64, (3, 3), padding='same'))
        model.add(Activation('relu'))
        model.add(Conv2D(64, (3, 3)))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        model.add(Flatten())
        model.add(Dense(512))
        model.add(Activation('relu'))
        model.add(Dropout(0.5))
        model.add(Dense(10))
        model.add(Activation('softmax'))
        model.compile(loss=keras.losses.categorical_crossentropy,
                      optimizer=keras.optimizers.Adadelta(),
                      metrics=['accuracy'])
        return model
Beispiel #2
0
def model():
    #This is our LSTM model. we have used keras laters here. The loss function is mean squared error. we have used 'Adam Optimizer'
    mod = Sequential()
    mod.add(
        LSTM(units=64,
             return_sequences=True,
             input_shape=(X_train.shape[1], 9)))
    mod.add(Dropout(0.2))
    mod.add(BatchNormalization())
    mod.add(LSTM(units=64, return_sequences=True))
    mod.add(Dropout(0.1))
    mod.add(BatchNormalization())

    mod.add((LSTM(units=64)))
    mod.add(Dropout(0.1))
    mod.add(BatchNormalization())
    mod.add((Dense(units=16, activation='tanh')))
    mod.add(BatchNormalization())
    mod.add((Dense(units=4, activation='tanh')))
    mod.compile(loss='mean_squared_error',
                optimizer='adam',
                metrics=['accuracy', 'mean_squared_error'])
    mod.summary()

    return mod
Beispiel #3
0
def train_model(model, X_train, y_train, name, config, data):
    """train
    train a single model.

    # Arguments
        model: Model, NN model to train.
        X_train: ndarray(number, lags), Input data for train.
        y_train: ndarray(number, ), result data for train.
        name: String, name of model.
        config: Dict, parameter for train.
    """

    if name in ['lstm', 'gru', 'saes', 'cnn_lstm', 'en_1', 'en_2', 'en_3']:
        #model.compile(loss="mse", optimizer="rmsprop", metrics=['mape'])
        model.compile(loss="mse", optimizer="adam", metrics=['mse'])
        es = EarlyStopping(monitor='val_loss',
                           patience=10,
                           verbose=0,
                           mode='min')
        if data == "pems":
            mc = ModelCheckpoint('model_pems/' + name + '.h5',
                                 monitor='val_loss',
                                 mode='auto',
                                 verbose=1,
                                 save_best_only=True)
        elif data == "nyc":
            mc = ModelCheckpoint('model_nyc/' + name + '.h5',
                                 monitor='val_loss',
                                 mode='auto',
                                 verbose=1,
                                 save_best_only=True)

        hist = model.fit(X_train,
                         y_train,
                         batch_size=config["batch"],
                         epochs=config["epochs"],
                         validation_split=0.05,
                         callbacks=[es, mc])

        #model.save('model/' + name + '.h5')
        df = pd.DataFrame.from_dict(hist.history)
        if data == "pems":
            df.to_csv('model_pems/' + name + ' loss.csv',
                      encoding='utf-8',
                      index=False)
        elif data == "nyc":
            df.to_csv('model_nyc/' + name + ' loss.csv',
                      encoding='utf-8',
                      index=False)

    elif name == 'rf':
        model.fit(X_train, y_train)

        if data == "pems":
            with open('model_pems/' + name + '.h5', 'wb') as f:
                cPickle.dump(model, f)
        elif data == "nyc":
            with open('model_nyc/' + name + '.h5', 'wb') as f:
                cPickle.dump(model, f)
Beispiel #4
0
def build(params, batch_size=None):
    """
    Build the LSTM according to the parameters passed. The general
    architecture is set in the code.
    :param batch_size: If this param is not None is used to override the value
                       set in the parameters dictionary. This is usefule when
                       willing to build a network to make 1-step predictions.
    """
    # Use ALWAYS the batch_size value from the parameter of the method. If not
    # set, then copy it from the params.
    if batch_size is None:
        batch_size = params['lstm_batch_size']
    # Buuild the lstm.
    model = Sequential()
    # Check if my design has more than 1 layer.
    ret_seq_flag = False
    if params['lstm_numlayers'] > 1:
        ret_seq_flag = True
    # Add input layer.
    print('Adding layer #{:d} [{:d}]'
          .format(1, params['lstm_layer{:d}'.format(1)]))
    model.add(LSTM(
            params['lstm_layer1'],
            input_shape=(params['lstm_timesteps'], params['num_features']),
            stateful=params['lstm_stateful'],
            unit_forget_bias=params['lstm_forget_bias'],
            unroll=params['lstm_unroll'],
            batch_input_shape=(batch_size,
                               params['lstm_timesteps'],
                               params['num_features']),
            return_sequences=ret_seq_flag))
    model.add(Dropout(params['lstm_dropout1']))
    # Add additional hidden layers.
    for layer in range(1, params['lstm_numlayers']):
        if (layer+1) is params['lstm_numlayers']:
            ret_seq_flag = False
        print('Adding layer #{:d} [{:d}]'.format(
            layer+1, params['lstm_layer{:d}'.format(layer+1)]))
        model.add(LSTM(
            params['lstm_layer{:d}'.format(layer+1)],
            input_shape=(params['lstm_timesteps'], params['num_features']),
            stateful=params['lstm_stateful'],
            unit_forget_bias=params['lstm_forget_bias'],
            unroll=params['lstm_unroll'],
            batch_input_shape=(batch_size,
                               params['lstm_timesteps'],
                               params['num_features']),
           return_sequences=ret_seq_flag))
        model.add(Dropout(params['lstm_dropout{:d}'.format(layer+1)]))

    # Output layer.
    model.add(Dense(units=1, input_dim=params['lstm_layer{:d}'.format(
        params['lstm_numlayers'])]))
    #model.add(Activation('linear'))
    model.compile(
        loss=params['lstm_loss'],
        optimizer=params['lstm_optimizer'])

    return model
Beispiel #5
0
def train_model(X_train, X_test, y_train, y_test, model):
    X_train = X_train.reshape(X_train.shape[0], 300, 300, 3)
    X_test = X_test.reshape(X_test.shape[0], 300, 300, 3)

    print("X_train.shape=", X_train.shape)
    print("y_train.shape", y_train.shape)

    print("X_test.shape=", X_test.shape)
    print("y_test.shape", y_test.shape)

    # print(y_train[0])
    '''
    softmax layer -> output=10개의 노드. 각각이 0부터 9까지 숫자를 대표하는 클래스 

    이를 위해서 y값을 one-hot encoding 표현법으로 변환
    0: 1,0,0,0,0,0,0,0,0,0
    1: 0,1,0,0,0,0,0,0,0,0
    ...
    5: 0,0,0,0,0,1,0,0,0,0
    '''
    # reformat via one-hot encoding
    y_train = to_categorical(y_train)
    y_test = to_categorical(y_test)

    # print(y_train[0])

    # catergorical_crossentropy = using when multi classficiation
    # metrics = output data type
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    # batch_size : see  batch_size data and set delta in gradient decsending
    history = model.fit(X_train,
                        y_train,
                        validation_data=(X_test, y_test),
                        batch_size=16,
                        epochs=30,
                        verbose=1)

    plot_loss_curve(history.history)

    # print(history.history)
    print("train loss=", history.history['loss'][-1])
    print("validation loss=", history.history['val_loss'][-1])

    # save model in file
    # offering in KERAS
    model.save('model-201611263.model')

    history_df = pd.DataFrame(history.history)
    with open("history_data.csv", mode='w') as file:
        history_df.to_csv(file)

    return model
def train_model(model, X_train, y_train, name, config):
    model.compile(loss="mse", optimizer="adadelta", metrics=['mape'])
    # early = EarlyStopping(monitor='val_loss', patience=30, verbose=0, mode='auto')
    hist = model.fit(X_train,
                     y_train,
                     batch_size=config["batch"],
                     epochs=config["epochs"],
                     validation_split=0.1)  # 训练中

    model.save('models/' + name + '.h5')
    # df = pd.DataFrame.from_dict(hist.history)
    # df.to_csv('models/' + name + ' loss.csv', encoding='utf-8', index=False)
    return model
Beispiel #7
0
def train_model(model,
                x_train,
                y_train,
                out_dir,
                validation_data,
                n_epochs,
                batch_size,
                learning_rate,
                loss="binary_crossentropy",
                early_stopping=True,
                save_checkpoint=True,
                verbose=1,
                ckpt_name_prefix=""):
    print("Model summary:")
    print(model.model.summary())
    callbacks = []
    if save_checkpoint:
        # save the model at every epoch. 'val_loss' is the monitored quantity.
        # If save_best_only=True, the model with the best monitored quantity is not overwitten.
        # If save_weights_only=True, only the model weights are saved calling the method model.save_weights
        checkpoint = ModelCheckpoint(os.path.join(
            out_dir, ckpt_name_prefix + ".{epoch:02d}-{val_loss:.3f}.hdf5"),
                                     verbose=verbose,
                                     monitor='val_loss',
                                     save_weights_only=True,
                                     save_best_only=True)
        callbacks.append(checkpoint)
    if early_stopping:
        # Training stops when the monitored quantity (val_loss) stops improving.
        # patience is the number of epochs with no improvement after which training is stopped.
        stopping = EarlyStopping(monitor="val_loss",
                                 min_delta=0,
                                 patience=6,
                                 verbose=verbose,
                                 mode='auto')
        callbacks.append(stopping)
    adam = Adagrad(lr=learning_rate, epsilon=1e-08, decay=0.0, clipnorm=1.)
    model.compile(metrics=[], optimizer=adam, loss=loss)
    print("Training of model '%s' started." % model.model_name)
    start_time = time.time()
    history = model.fit(x_train,
                        y_train,
                        validation_data=validation_data,
                        n_epochs=n_epochs,
                        batch_size=batch_size,
                        callbacks=callbacks,
                        verbose=verbose)
    print("Training of model '%s' finished in %s." %
          (model.model_name,
           time.strftime("%H:%M:%S", time.gmtime(time.time() - start_time))))
    return history
Beispiel #8
0
def setup_to_finetune(model):
    """Freeze the bottom NB_IV3_LAYERS and retrain the remaining top 
      layers.
   note: NB_IV3_LAYERS corresponds to the top 2 inception blocks in 
         the inceptionv3 architecture
   Args:
     model: keras model
   """
    NB_IV3_LAYERS_TO_FREEZE = 172
    for layer in model.layers[:NB_IV3_LAYERS_TO_FREEZE]:
        layer.trainable = False
    for layer in model.layers[NB_IV3_LAYERS_TO_FREEZE:]:
        layer.trainable = True
    model.compile(optimizer=SGD(lr=0.001, momentum=0.9),
                  loss='categorical_crossentropy')
Beispiel #9
0
def simple_model(pretrained_weights=None, input_size=(256, 256, 1)):
    inputs = tf.keras.Input(input_size)
    conv1 = tf.keras.layers.Conv2D(64,
                                   3,
                                   activation='relu',
                                   padding='same',
                                   kernel_initializer='he_normal')(
                                       inputs)  # 256
    conv1 = tf.keras.layers.Conv2D(32,
                                   3,
                                   activation='relu',
                                   padding='same',
                                   kernel_initializer='he_normal')(conv1)
    conv1 = tf.keras.layers.Conv2D(32,
                                   3,
                                   activation='relu',
                                   padding='same',
                                   kernel_initializer='he_normal')(conv1)
    conv1 = tf.keras.layers.Conv2D(16,
                                   3,
                                   activation='relu',
                                   padding='same',
                                   kernel_initializer='he_normal')(conv1)
    conv1 = tf.keras.layers.Conv2D(8,
                                   3,
                                   activation='relu',
                                   padding='same',
                                   kernel_initializer='he_normal')(conv1)
    conv1 = tf.keras.layers.Conv2D(1,
                                   3,
                                   activation='relu',
                                   padding='same',
                                   kernel_initializer='he_normal')(conv1)

    model = tf.keras.models.Model(inputs=inputs, outputs=conv1)

    model.compile(optimizer=tf.keras.optimizers.Adam(lr=1e-4),
                  loss="mean_absolute_error",
                  metrics=['accuracy'],
                  run_eagerly=True)

    model.summary()

    if (pretrained_weights):
        model.load_weights(pretrained_weights)

    return model
def model_arch():
    model = Sequential()
    model.add(
        Conv2D(32,
               kernel_size=(3, 3),
               activation='relu',
               input_shape=input_shape))  #(?,image_width,image_height,1)))
    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(128, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(num_classes, activation='softmax'))
    model.compile(loss=keras.losses.categorical_crossentropy,
                  optimizer=keras.optimizers.Adadelta(),
                  metrics=['accuracy'])
    return model
Beispiel #11
0
 def model_arch():
     model = Sequential()
     model.add(Conv2D(50, kernel_size=(5, 5), activation='relu', input_shape=input_shape))
     model.add(MaxPooling2D(pool_size=(2, 2)))
     model.add(Conv2D(100, (5, 5), activation='relu'))
     model.add(MaxPooling2D(pool_size=(2, 2)))
     model.add(Conv2D(200, (3, 3), activation='relu'))
     model.add(MaxPooling2D(pool_size=(2, 2)))
     model.add(Dropout(0.5))
     model.add(Flatten())
     model.add(Dense(400, activation='relu'))
     model.add(Dropout(0.5))
     model.add(Dense(200, activation='relu'))
     model.add(Dropout(0.5))
     model.add(Dense(num_classes, activation='softmax'))
     model.compile(loss=keras.losses.categorical_crossentropy,
                 optimizer=keras.optimizers.Adadelta(),
                 metrics=['accuracy'])
     return model
Beispiel #12
0
    def train(self, model):

        opt = self.k.optimizers.Adam(lr=0.00003, epsilon=0.1)
        # model.compile(loss='categorical_crossentropy',
        model.compile(loss='categorical_hinge',
                      optimizer=opt,
                      metrics=['accuracy'])

        print("Load data ...")
        x_train = []
        y_train = []
        allSize = len(os.listdir(self.folderPath))
        for cur, subfolder in enumerate(os.listdir(self.folderPath)):
            if subfolder[0] != '.':
                # print ("Load folder: " + folder)
                (x, y) = tfHelper.get_dataset_with_one_folder(
                    self.folderPath, subfolder, self.c.convertColor,
                    self.c.allOutput)

                x = self.c.normalize(x)

                if len(y[0]) == self.c.num_classes:
                    for i in x:
                        x_train.append(i)
                    for i in y:
                        y_train.append(i)

            if (cur + 1) % self.c.batchSize == 0:
                print("Batch " + str(cur + 1 - self.c.batchSize) + '-' +
                      str(cur + 1) + '/' + str(allSize))
                model = self.fit(model, x_train, y_train)
                x_train = []
                y_train = []

        print("Batch " + str(cur + 1 - self.c.batchSize) + '-' + str(cur + 1) +
              '/' + str(allSize))
        model = self.fit(model, x_train, y_train)
        return model
Beispiel #13
0
def train_model(model, X_train, y_train, name, config):
    """train
    train a single model.

    # Arguments
        model: Model, NN model to train.
        X_train: ndarray(number, lags), Input data for train.
        y_train: ndarray(number, ), result data for train.
        name: String, name of model.
        config: Dict, parameter for train.
    """

    model.compile(loss="mse", optimizer="adadelta", metrics=['mape'])
    # early = EarlyStopping(monitor='val_loss', patience=30, verbose=0, mode='auto')
    hist = model.fit(X_train,
                     y_train,
                     batch_size=config["batch"],
                     epochs=config["epochs"],
                     validation_split=0.1)  # 训练中

    model.save('models/' + name + '.h5')
    # df = pd.DataFrame.from_dict(hist.history)
    # df.to_csv('models/' + name + ' loss.csv', encoding='utf-8', index=False)
    return model
Beispiel #14
0
import model
import data

# 訓練データ作成担当
g = data.Data()
# GPUをすべて使わないオプション
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
tf.keras.backend.set_session(sess)
# モデルを作成
model = model.make(tflite=False)
# 最適化を定義
optimizer = tf.keras.optimizers.Adam(lr=0.001)
model.compile(optimizer=optimizer,
              loss="categorical_crossentropy",
              metrics=["categorical_accuracy"])


# コールバック
class Callback(tf.keras.callbacks.Callback):
    def on_epoch_end(self, epoch, logs=None):
        "各エポック終了時に重みを保存する"
        model.save("weight.hdf5")


cb = Callback()
# 途中から学習する場合
initial_epoch = 0
if initial_epoch >= 1:
    model.load_weights("weight.hdf5")
Beispiel #15
0
def my_msle(y_true, y_pred):
    msle = tf.reduce_mean(tf.square(tf.math.log(y_true + 1) - tf.math.log(y_pred + 1)))
    return msle

def my_mape(y_true, y_pred):
    mape = 100 * tf.reduce_mean(tf.abs(y_true - y_pred) / y_true)
    return mape

def my_logcosh(y_true, y_pred):
    logcosh = tf.math.log((tf.math.exp(y_pred - y_true) + tf.math.exp(y_true - y_pred))/2)
    return logcosh


# Step 2: Define Metrics
model.compile(optimizer= tf.keras.optimizers.RMSprop(learning_rate = lr_rate),
              loss     = my_mae,
              metrics  = ['mse'])
# print(model.summary())
# sys.exit()

if sys.argv[1] == "train":
    # Step 3: Load data
    X_train, Y_train, X_valid, Y_valid,stats = loader.load_data(data_path,True,0.8)
    stats.to_csv("data/wine_stats.csv", sep=',', encoding='utf-8')

    # Step 4: Training
    # Create a function that saves the model's weights
    cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath = model_name,
                                                     save_weights_only=True,
                                                     verbose=0, save_freq="epoch")
    # model.load_weights(model_name)
Beispiel #16
0
def main():
    """main function

    Main function... (what do you expect me to say...)

    Args:
        - none

    Returns:
        - none
    """

    # Main function for evaluate
    parser = argparse.ArgumentParser(description="Evaluate the model")
    parser.add_argument(
        "--net",
        help=
        "The type of net work which is either mrcnn, unet, deeplab or custom.",
        required=True,
        default="unet")
    parser.add_argument("--img_size",
                        required=True,
                        type=int,
                        help="The size of input image")
    parser.add_argument("--gpu_id",
                        required=False,
                        default="0",
                        type=str,
                        help="The id of the gpu used when training.")
    parser.add_argument(
        "--weight_path",
        required - False,
        defalut=None,
        type=str,
        help=
        "The name of trained weights. If not specified, the model will use 'net_imgSize.h5'. "
    )

    # Parse argument
    args = parser.parse_args()
    net_type = args.net
    gpu_number = args.gpu_id
    img_size = args.img_size
    weight_path = args.weight_path

    import os
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = gpu_number
    # Argument check
    if not (net_type in {"unet", "deeplab", "custom", "mrcnn"}):
        raise ValueError(
            "netType should be either unet, deeplab, mrcnn and custom.")

    if net_type in {"unet", "deeplab", "custom"}:

        # Get config
        Config = cfg.Config()

        # COCO instance
        print("Reading COCO ground truth...")
        cocoGt = COCO(Config.COCO_training_ann_path)
        cocoValGt = COCO(Config.COCO_validation_ann_path)
        print("Finished")

        # Get all classes
        classes = len(cocoGt.getCatIds())

        id_to_index = dict()
        # There is a wired class of 0 in the feature map of type zero
        index_to_id = dict()

        # Because the id of COCO dataset starts from 92, we should project those id to index so that keras
        # utils can convert the segmentation map into one hot categorical encoding.
        for index, id in enumerate(cocoGt.getCatIds()):
            id_to_index[id] = index
            index_to_id[index] = id

        if net_type == "unet":
            model = basic_model.unet(input_size=(img_size, img_size, 3),
                                     classes=len(id_to_index))
        elif net_type == "deeplab":
            deeplab_model = basic_model.Deeplabv3(input_shape=(img_size,
                                                               img_size, 3),
                                                  classes=len(id_to_index),
                                                  backbone="xception")
            output = KL.Activation("softmax")(deeplab_model.output)
            model = KM.Model(deeplab_model.input, output)
        elif net_type == "custom":
            model = model.custom_model(input_shape=(img_size, img_size, 3),
                                       classes=len(id_to_index))

    if net_type in {"unet", "deeplab", "custom"}:

        file_list = glob(Config.COCO_training_path + '*')
        val_list = glob(Config.COCO_validation_path + '*')

        if weight_path in None:
            model.load_weights(net_type + "_" + str(img_size) + ".h5")
            print("weights loaded!")
        else:
            model.load_weights(weight_path)
            print("weights loaded!")

        #model.compile(optimizer = KO.Adam(clipvalue=2.), loss="categorical_crossentropy", metrics=["accuracy"])
        model.compile(optimizer=KO.Adam(),
                      loss="categorical_crossentropy",
                      metrics=["accuracy"])
        print("Prediction start...")

        vfunc = np.vectorize(lambda index: index_to_id[index])

        anns = []

        # Convert into COCO annotation
        for i in trange(len(val_list)):
            image = val_list[i]
            image_id = int(image.replace(".jpg", '')[-12:])

            cropping_image, padding_dims, original_size = utils.padding_and_cropping(
                image, (img_size, img_size))
            cropping_image = preprocess_input(cropping_image, mode="torch")

            result = model.predict(cropping_image)
            result = np.argmax(result, axis=3)

            seg_result = utils.reverse_padding_and_cropping(
                result, padding_dims, original_size)
            seg_result = vfunc(seg_result)
            COCO_ann = cocostuffhelper.segmentationToCocoResult(seg_result,
                                                                imgId=image_id)
            for ann in COCO_ann:
                ann["segmentation"]["counts"] = ann["segmentation"][
                    "counts"].decode("ascii")  # json can't dump byte string
            anns += COCO_ann

        with open("result.json", "w") as file:
            json.dump(anns, file)

        # Evaluate result
        resFile = "result.json"
        cocoDt = cocoValGt.loadRes(resFile)
        cocoEval = COCOStuffeval(cocoValGt, cocoDt)
        cocoEval.evaluate()
        cocoEval.summarize()
Beispiel #17
0
    elif (opt.version == 2):
        model = model.get_Fast_ARCNN((None,None,1))
    elif (opt.version == 3):
        model = model.get_ARCNN_lite((None,None,1))
    elif (opt.version == 4):
        model = model.get_ARCNN_att((None,None,1))

    #Load Dataset
    data = create_artifact_dataset(fpath=opt.dataset,
        batch_size=opt.batch_size,
        p=opt.patch_size,
        s=opt.stride_size,
        jpq=(opt.jpq_lower,opt.jpq_upper)))
    data = data.prefetch(tf.data.experimental.AUTOTUNE)
    
    #Set callbacks
    tboard = tf.keras.callbacks.TensorBoard(log_dir="./logs/ARCNN_ssim",write_images=True)
    filepath="./checkpoints/ARCNN_ssim/weights-improvement-{epoch:02d}-{ssim:.2f}.hdf5"
    cp = tf.keras.callbacks.ModelCheckpoint(filepath,monitor="ssim",verbose=1,save_weights_only=True)
    lr_reduce = tf.keras.callbacks.ReduceLROnPlateau(monitor='ssim', factor=0.1, patience=5, verbose=1,mode='max',
                                                    min_delta=0.001, 
                                                    cooldown=2, 
                                                    min_lr=1e-6)

    #Train Model
    optim = tf.keras.optimizers.Adam(learning_rate=1e-3)
    model.compile(optimizer=optim,loss=custom_loss,metrics=[ssim,psnr])
    model.fit(data,epochs=opt.epochs,callbacks=[tboard,cp,lr_reduce])

    #SaveModel
    model.save(opt.model_save_path,save_format="tf")
Beispiel #18
0
def main():
    """main function

    Main function... (what do you expect me to say...)

    Args:
        - none

    Returns:
        - none
    """

    # Main function for evaluate
    parser = argparse.ArgumentParser(
        description="A testing framework for semantic segmentation.")
    parser.add_argument(
        "--net",
        required=True,
        default="unet",
        type=str,
        help=
        "(str) The type of net work which is either unet, deeplab or custom.")
    parser.add_argument("--epochs", required=False, default=500, type=int)
    parser.add_argument("--batch_size", required=False, default=16, type=int)
    parser.add_argument("--gpu_id",
                        required=False,
                        default="0",
                        type=str,
                        help="(str) The id of the gpu used when training.")
    parser.add_argument("--img_size",
                        required=False,
                        default=192,
                        type=int,
                        help="(int) The size of input image")
    parser.add_argument(
        "--load_weights",
        required=False,
        default=False,
        type=bool,
        help="(bool) Use old weights or not (named net_imgSize.h5)")

    # Parse argument
    args = parser.parse_args()
    net_type = args.net
    epochs = args.epochs
    batch_size = args.batch_size
    gpu_number = args.gpu_id
    img_size = args.img_size

    import os
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = gpu_number
    # Argument check
    if not (net_type in {"unet", "deeplab", "custom"}):
        raise ValueError("netType should be either unet, deeplab and custom.")

    # Get config
    Config = cfg.Config()

    # COCO instance
    print("Reading COCO ground truth...")
    cocoGt = COCO(Config.COCO_training_ann_path)
    cocoValGt = COCO(Config.COCO_validation_ann_path)
    print("Finished")

    # Get all classes
    classes = len(cocoGt.getCatIds())

    id_to_index = dict()
    # There is a wired class of 0 in the feature map of type zero
    index_to_id = dict()

    # Because the id of COCO dataset starts from 92, we should project those id to index so that keras
    # utils can convert the segmentation map into one hot categorical encoding.
    for index, id in enumerate(cocoGt.getCatIds()):
        id_to_index[id] = index
        index_to_id[index] = id

    if net_type == "unet":
        model = basic_model.unet(input_size=(img_size, img_size, 3),
                                 classes=len(id_to_index))
    elif net_type == "deeplab":
        deeplab_model = basic_model.Deeplabv3(input_shape=(img_size, img_size,
                                                           3),
                                              classes=len(id_to_index),
                                              backbone="xception")
        output = KL.Activation("softmax")(deeplab_model.output)
        model = KM.Model(deeplab_model.input, output)
    elif net_type == "custom":
        model = model.custom_model(input_shape=(img_size, img_size, 3),
                                   classes=len(id_to_index))

    file_list = glob(Config.COCO_training_path + '*')
    val_list = glob(Config.COCO_validation_path + '*')

    if args.load_weights:
        try:
            model.load_weights(net_type + "_" + str(img_size) + ".h5")
            print("weights loaded!")
        except:
            print("weights not found!")

    checkpointer = KC.ModelCheckpoint(filepath=net_type + "_" + str(img_size) +
                                      ".h5",
                                      verbose=1,
                                      save_best_only=True)

    model.compile(optimizer=KO.Adam(),
                  loss="categorical_crossentropy",
                  metrics=["accuracy"])
    model.fit_generator(data.generator(batch_size, file_list,
                                       (img_size, img_size), cocoGt,
                                       id_to_index, True),
                        validation_data=data.generator(batch_size, val_list,
                                                       (img_size, img_size),
                                                       cocoValGt, id_to_index,
                                                       False),
                        validation_steps=10,
                        steps_per_epoch=100,
                        epochs=epochs,
                        use_multiprocessing=True,
                        workers=8,
                        callbacks=[checkpointer])
    print("Prediction start...")

    vfunc = np.vectorize(lambda index: index_to_id[index])

    anns = []

    # Convert into COCO annotation
    for i in trange(len(val_list)):
        image = val_list[i]
        image_id = int(image.replace(".jpg", '')[-12:])

        cropping_image, padding_dims, original_size = utils.padding_and_cropping(
            image, (img_size, img_size))
        cropping_image = preprocess_input(cropping_image, mode="torch")

        result = model.predict(cropping_image)
        result = np.argmax(result, axis=3)

        seg_result = utils.reverse_padding_and_cropping(
            result, padding_dims, original_size)
        seg_result = vfunc(seg_result)
        COCO_ann = cocostuffhelper.segmentationToCocoResult(seg_result,
                                                            imgId=image_id)
        for ann in COCO_ann:
            ann["segmentation"]["counts"] = ann["segmentation"][
                "counts"].decode("ascii")  # json can't dump byte string
        anns += COCO_ann

    with open("result.json", "w") as file:
        json.dump(anns, file)

    # Read result file
    # Test for fake result
    #resFile = Config.fake_result

    # Evaluate result
    resFile = "result.json"
    cocoDt = cocoValGt.loadRes(resFile)
    cocoEval = COCOStuffeval(cocoValGt, cocoDt)
    cocoEval.evaluate()
    cocoEval.summarize()
from keras import backend as K
from keras.layers import Activation
from keras.layers import Input, Lambda, Dense, Dropout, Convolution2D, MaxPooling2D, Flatten
from keras.models import Sequential, Model
from keras.optimizers import RMSprop
from keras.callbacks import EarlyStopping

import features
import model

train_dir = "train"
test_dir = "test"
batch_size = 64
epochs = 30
input_img_dim = (1, 28, 28)
input_aud_dim = (1, 1025, 47)

img_a, aud_a, img_b, aud_b, labels = features.data_generate(train_dir)
opt, model = model.siamese_model(input_img_dim, input_aud_dim)

model.compile(loss=model.contrastive_loss, optimizer=opt)
model.summary()

es = EarlyStopping(monitor='val_loss', mode='min', verbose=1)
model.fit([img_a, aud_a, img_b, aud_b],
          labels,
          validation_split=.25,
          batch_size=batch_size,
          verbose=2,
          nb_epoch=epochs,
          callbacks=[es])
Beispiel #20
0
# initialize the model
print("[INFO] compiling model...")
cnn1 = model.create_cnn(56, 56, 3)
cnn2 = model.create_cnn(56, 56, 3)

# combining tensor output of the two cnn models
combinedInput = concatenate([cnn1.output, cnn2.output])


x = Dense(16, activation="relu")(combinedInput)

# binary predictor
y = Dense(1, activation="sigmoid")(x)

model = Model(inputs=[cnn1.input, cnn2.input], outputs=y)

opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
model.compile(loss="binary_crossentropy", optimizer=opt,	metrics=["accuracy"])

# train the network
print("[INFO] training network...")

reduce_lr_on_plateau = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=3, verbose=0, mode='auto', min_delta=0.0001, cooldown=0, min_lr=0.001)
tensorboard = TensorBoard(log_dir="logs",write_graph=True)
filepath="binary_classifier_multi_input.best_val_acc.hdf5"
model_check_point = ModelCheckpoint(filepath, monitor='val_acc', verbose=0, save_best_only=True, mode='max', period=1)


model.fit([trainX, trainXX], trainY, callbacks = [reduce_lr_on_plateau,model_check_point,tensorboard],validation_data=([testX, testXX], testY),	epochs=EPOCHS, batch_size=BS)

# the model is serialized by the callback function model_check_point
Beispiel #21
0
if args.model == 'CNN':
    MOSNet = model.CNN()
elif args.model == 'BLSTM':
    MOSNet = model.BLSTM()
elif args.model == 'CNN-BLSTM':
    MOSNet = model.CNN_BLSTM()
else:
    raise ValueError(
        'please specify model to train with, CNN, BLSTM or CNN-BLSTM')

model = MOSNet.build()

model.compile(
    optimizer=tf.keras.optimizers.Adam(1e-4),
    loss={
        'avg': 'mse',
        'frame': 'mse'
    },
    loss_weights=[1, alpha],
)

CALLBACKS = [
    keras.callbacks.ModelCheckpoint(filepath=os.path.join(
        OUTPUT_DIR, 'mosnet.h5'),
                                    save_best_only=True,
                                    monitor='val_loss',
                                    verbose=1),
    keras.callbacks.TensorBoard(log_dir=os.path.join(OUTPUT_DIR,
                                                     'tensorboard.log'),
                                update_freq='epoch'),
    keras.callbacks.EarlyStopping(monitor='val_loss',
                                  mode='min',
Beispiel #22
0
def load_model(ckpt_weights_file, model, learning_rate):
    adam = Adagrad(lr=learning_rate, epsilon=1e-08, decay=0.0, clipnorm=1.)
    model.compile(metrics=[], optimizer=adam)
    model.load_model_weights(ckpt_weights_file)
Beispiel #23
0
x = numpy.empty((len(games),m.input_dim()))
ywin = numpy.empty((len(games),1))
yscore = numpy.empty((len(games),2))
for i in range(len(games)):
    g = games[i]
    (year, week, date) = g.game_time()
    (road_team_id,home_team_id) = g.teams()
    m.set_input_data(year, week, date, road_team_id, home_team_id, x, i)
    ywin[i,0] = g.target_data_win()
    yscore[i,0] = g.score()[0]
    yscore[i,1] = g.score()[1]
    pass

model = tensorflow.keras.models.Sequential()
model.add(tensorflow.keras.layers.BatchNormalization(input_shape=(m.input_dim(),)))
model.add(tensorflow.keras.layers.Dense(m.neurons()[0], activation='relu'))
model.add(tensorflow.keras.layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['binary_accuracy'])
model.fit(x, ywin, epochs=m.epochs()[0], batch_size=1024)
model.save(m.name()+".win.h5")
del model

model = tensorflow.keras.models.Sequential()
model.add(tensorflow.keras.layers.BatchNormalization(input_shape=(m.input_dim(),)))
model.add(tensorflow.keras.layers.Dense(m.neurons()[1], activation='relu'))
model.add(tensorflow.keras.layers.Dense(2, activation='linear'))
model.compile(optimizer='nadam', loss='mean_squared_error')
model.fit(x, yscore, epochs=m.epochs()[1], batch_size=1024)
model.save(m.name()+".score.h5")
del model
def preprocess():
    batch_size = 32
    epochs = 100
    model_name = 'model.h5'
    left_correction = 0.2
    right_correction = -0.2

    print('Training Batch size: ', batch_size, '\nNo of epochs: ', epochs)
    print('Model is saved as: ', model_name)

    source_dir = os.path.join(os.getcwd(), 'recordings/IMG/')
    data_dir = os.path.join(os.getcwd(), 'recordings/')
    print('recorded data is available in: ', source_dir)
    preprocessed_data_dir = os.path.join(os.getcwd(), 'processed_data/IMG/')

    pics_location = os.listdir(source_dir)
    print('Preprocessed data is stored in: ', preprocessed_data_dir)

    new_pics = pics_location[0:20000]

    columns = [
        'center', 'left', 'right', 'steering_angle', 'throttle', 'brake',
        'speed'
    ]
    df = pd.read_csv(data_dir + "driving_log.csv", names=columns)
    print(df.shape)

    xs = df['center']
    ys = df['steering_angle']
    xs_r = df['right']
    ys_r = df['steering_angle'] + right_correction

    xs_l = df['left']
    ys_l = df['steering_angle'] + left_correction

    xs = pd.concat([xs, xs_l], axis=0)
    xs = pd.concat([xs, xs_r], axis=0)
    ys = pd.concat([ys, ys_l], axis=0)
    ys = pd.concat([ys, ys_r], axis=0)
    num_images, cols = df.shape

    print('len of xs', len(xs))
    print('len of ys', len(ys))
    #shuffle list of images
    c = list(zip(xs, ys))
    random.shuffle(c)
    xs, ys = zip(*c)

    train_xs = xs[:int(len(xs) * 0.6)]
    train_ys = ys[:int(len(xs) * 0.6)]

    val_xs = xs[-int(len(xs) * 0.4):]
    val_ys = ys[-int(len(xs) * 0.4):]

    num_train_images = len(train_xs)
    num_val_images = len(val_xs)

    print('No of training images: ', num_train_images)
    print('No of validation images: ', num_val_images)

    # Images are read in the order center, left and right

    k = 0
    images = []
    steering_angles = []

    for i in new_pics:
        img = misc.imread(source_dir + i)
        image_flipped = np.fliplr(img)
        image_normalized = image_flipped / 255.0
        salt_pepper_img = sp_noise(img, 0.05)

        images.append(img)
        images.append(image_flipped)
        images.append(salt_pepper_img)
        steering_angles.append(-ys[k])  # FLIPPED IMAGE
        steering_angles.append(ys[k])  #Salt and pepper image

        k = k + 1

    model = model.build_predict_steering_model()
    model.compile(loss="mse", optimizer=Adam)

    ckpt = modelCheckpoint('model.h5')

    # for i in new_pics:
    #      #print(i)
    #      image = misc.imread(source_dir+i)
    #      image_flipped = np.fliplr(image)
    #      image_normalized = image_flipped/255.0
    #      salt_pepper_img = sp_noise(image, 0.05)
    #
    #      misc.imsave(preprocessed_data_dir+'img_'+i+'_Orig'+'.jpg', image)
    #      img_loc=pd.DataFrame([preprocessed_data_dir+'img_'+i+'_Orig'+'.jpg'])
    #
    #      print(len(xs))
    #      print(ys[k])
    #      ys=pd.concat([ys, ys[k]], axis=0)
    #
    #      misc.imsave(preprocessed_data_dir+'img_'+i+'_F'+'.jpg', image_flipped)
    #      img_loc=preprocessed_data_dir+'img_'+i+'_F'+'.jpg'
    #      ar= np.array(img_loc)
    #      xs= pd.concat([xs, img_loc], axis=0)
    #      ys.append(ys[i])
    #
    #      misc.imsave(preprocessed_data_dir+'img_'+i+'_N'+'.jpg', image_normalized)
    #      img_loc=preprocessed_data_dir+'img_'+i+'_N'+'.jpg'
    #      ar= np.array(img_loc)
    #      xs= pd.concat([xs, img_loc], axis=0)
    #      ys.append(ys[i])
    #
    #      misc.imsave(preprocessed_data_dir+'img_'+i+'_SP'+'.jpg', salt_pepper_img)
    #      img_loc=preprocessed_data_dir+'img_'+i+'_SP'+'.jpg'
    #      ar= np.array(img_loc)
    #      xs= pd.concat([xs, img_loc], axis=0)
    #      ys.append(ys[i])

    print('preprocessing is sucessfully done')
Beispiel #25
0
        if (epoch + 1) % 100 == 0:

            generator.save_weights("Generator{}.h5".format(epoch))
            discriminator.save_weights(
                "Discriminator_weights{}.h5".format(epoch))
            model.save_weights("Model{}.h5".format(epoch))
            from google.colab.patches import cv2_imshow

            path = "/content/drive/MyDrive/cars_train/07336.jpg"

            X = cv2.imread(path)
            X = cv2.resize(X, (24, 24))
            X = np.reshape(X, (1, 24, 24, 3))
            X_batch = tf.cast(X, tf.float32)

            Y = generator(X_batch)
            cv2_imshow(X[0])
            cv2_imshow(Y[0].numpy())


generator().summary()
discriminator().summary()
model = tf.keras.models.Sequential()
model.add(generator())
model.add(discriminator())
model.summary()
discriminator().compile(loss="binary_crossentropy", optimizer="rmsprop")
discriminator().trainable = False
model.compile(loss="binary_crossentropy", optimizer="rmsprop")

train_dcgan(model, epochs=2200)
Beispiel #26
0
def train_dcgan(model, epochs=5):

    print("done")
    generator, discriminator = model.layers
    discriminator.compile(loss="binary_crossentropy", optimizer="rmsprop")
    discriminator.trainable = False
    model.compile(loss="binary_crossentropy", optimizer="rmsprop")
    path = '/content/drive/MyDrive/cars_train/'
    for epoch in tqdm(range(epochs)):
        print("Epoch {}/{}".format(epoch + 1, epochs))
        for root, dirnames, filenames in os.walk(path):
            i = 0
            j = 0
            x_train_x = np.zeros((32, 24, 24, 3))
            x_train_y = np.zeros((32, 96, 96, 3))
            for filename in filenames:
                img_path = os.path.join(path, filename)
                x_train = cv2.imread(img_path)
                x_trainx = cv2.resize(x_train, (24, 24))
                x_trainy = cv2.resize(x_train, (96, 96))
                x_train_x[i] = x_trainx
                x_train_y[i] = x_trainy
                i = i + 1
                if i == 32:
                    j = j + 1
                    print("batch {}/254".format(j))
                    X_batch, Y_batch = x_train_x, x_train_y
                    X_batch = tf.cast(X_batch, tf.float32)
                    Y_batch = tf.cast(Y_batch, tf.float32)
                    generated_images = generator(X_batch)
                    X = tf.cast(generated_images, tf.float32)
                    X_fake_and_real = tf.concat([X, Y_batch], axis=0)
                    y1 = tf.constant([[0.]] * batch_size + [[1.]] * batch_size)
                    discriminator.trainable = True
                    discriminator.train_on_batch(X_fake_and_real, y1)
                    y2 = tf.constant([[1.]] * batch_size)
                    discriminator.trainable = False
                    model.train_on_batch(X_batch, y2)
                    i = 0
                    x_train_x = np.zeros((32, 24, 24, 3))
                    x_train_y = np.zeros((32, 96, 96, 3))

        #Check result after every 100 epochs
        if (epoch + 1) % 100 == 0:

            generator.save_weights("Generator{}.h5".format(epoch))
            discriminator.save_weights(
                "Discriminator_weights{}.h5".format(epoch))
            model.save_weights("Model{}.h5".format(epoch))
            from google.colab.patches import cv2_imshow

            path = "/content/drive/MyDrive/cars_train/07336.jpg"

            X = cv2.imread(path)
            X = cv2.resize(X, (24, 24))
            X = np.reshape(X, (1, 24, 24, 3))
            X_batch = tf.cast(X, tf.float32)

            Y = generator(X_batch)
            cv2_imshow(X[0])
            cv2_imshow(Y[0].numpy())
Beispiel #27
0
	test_label = to_categorical(y_test, 2)
	print('Build model...')

	# 建模
	# model = model.buildCNN(MAX_SEQUENCE_LENGTH, nb_words,
	# 					   word_embedding_matrix, FILTER_LENGTH, NB_FILTER)
	# model = model.buildLstmCnn(MAX_SEQUENCE_LENGTH, nb_words,
	# 					   word_embedding_matrix, FILTER_LENGTH, NB_FILTER)
	# model = model.buildCnnLSTM(MAX_SEQUENCE_LENGTH, nb_words,
	# 						   word_embedding_matrix, NB_FILTER)
	# model = model.buildLstmPool(nb_words, word_embedding_matrix ,MAX_SEQUENCE_LENGTH)
	model = model.LSTM3(nb_words, word_embedding_matrix, MAX_SEQUENCE_LENGTH)
	# model = model.BiLSTM(nb_words, word_embedding_matrix, MAX_SEQUENCE_LENGTH)
	# model = model.BiLstmPool(nb_words, word_embedding_matrix, MAX_SEQUENCE_LENGTH, POOL_LENGTH)

	model.compile(loss='categorical_crossentropy', optimizer='adagrad',  # adam
				  metrics=['accuracy'])
	model.summary()  # 打印出模型概况
	callbacks = [ModelCheckpoint(MODEL_WEIGHTS_FILE,
								 monitor='val_acc', save_best_only=True)]

	t0 = time.time()
	history = model.fit(X_train, train_label,
						batch_size=BATCH_SIZE,
						verbose=1,
						validation_split=VALIDATION_SPLIT, # (X_test, test_label)
						callbacks=callbacks,
						nb_epoch=NB_EPOCHS)
	t1 = time.time()
	print("Minutes elapsed: %f" % ((t1 - t0) / 60.))

	# 将模型和权重保存到指定路径
Beispiel #28
0
from dense import DenseLayer
import model
import numpy as np

if __name__ == "__main__":
    model = model.Model()
    x = np.array([[1, 1], [1, 0], [0, 1], [0, 0]])
    y = np.array([[0], [1], [1], [0]])
    model.add(DenseLayer((2, 2), 'relu'))
    model.add(DenseLayer((2, 4), 'relu'))
    model.add(DenseLayer((4, 1), 'sigmoid'))
    model.compile("mse")
    model.fit(x, y, 0.1, 4, 2000)
    #model.printm()
    print model.predict(np.array([[1, 1], [0, 1], [1, 0], [0, 0]]))
Beispiel #29
0
print(len(x_test), 'test sequences')

print('Pad sequences (samples x time)')
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
y_train = np.array(y_train)
y_test = np.array(y_test)
print(y_train.shape)
print(y_train)

model = model.creat_model(max_features, maxlen)

# try using different optimizers and different optimizer configs
model.compile('adam', 'binary_crossentropy', metrics=['accuracy'])

filepath = "weights.best.hdf5"
checkpoint = ModelCheckpoint(filepath,
                             monitor='val_acc',
                             verbose=1,
                             save_best_only=True,
                             mode='max')
callbacks_list = [checkpoint]

print('Train...')
model.fit(x_train,
          y_train,
          batch_size=batch_size,
          nb_epoch=10,
          validation_split=0.33,
Beispiel #30
0
tab_labels = np.array(tab_labels, dtype=np.float32)
tab_images = np.array(tab_images, dtype=np.float32) / 255

indices = np.random.permutation(len(tab_labels))
tab_labels = tab_labels[indices]
tab_images = tab_images[indices]

print("SOMME", np.sum(tab_labels, axis=0))

model = model.model(7, 8)
optimizer = tf.keras.optimizers.RMSprop(learning_rate=1E-4)
csv_logger = tf.keras.callbacks.CSVLogger('training.log')


class my_callback(tf.keras.callbacks.Callback):
    def on_epoch_end(self, epoch, logs=None):
        if epoch >= 30 and not epoch % 10:
            model.save('my_model/{:d}'.format(epoch))


model.compile(optimizer=optimizer,
              loss='categorical_crossentropy',
              metrics=['accuracy'])

model.fit(tab_images,
          tab_labels,
          validation_split=0.05,
          batch_size=64,
          epochs=300,
          callbacks=[csv_logger, my_callback()])
Beispiel #31
0
def full_model(pretrained_weights=None, input_size=(256, 256, 1)):

    inputs = tf.keras.Input(input_size)
    conv1 = tf.keras.layers.Conv2D(64,
                                   3,
                                   activation='relu',
                                   padding='same',
                                   kernel_initializer='he_normal')(
                                       inputs)  # 256
    conv1 = tf.keras.layers.Conv2D(64,
                                   3,
                                   activation='relu',
                                   padding='same',
                                   kernel_initializer='he_normal')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
    conv2 = Conv2D(128,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(pool1)  # 128
    conv2 = Conv2D(128,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
    conv3 = Conv2D(256,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(pool2)  # 64
    conv3 = Conv2D(256,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
    conv4 = Conv2D(512,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(pool3)  # 32
    conv4 = Conv2D(512,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv4)
    drop4 = Dropout(0.5)(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)

    conv5 = Conv2D(1024,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(pool4)  # 16
    conv5 = Conv2D(1024,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv5)
    drop5 = Dropout(0.5)(conv5)

    up6 = Conv2D(512,
                 2,
                 activation='relu',
                 padding='same',
                 kernel_initializer='he_normal')(UpSampling2D(size=(2,
                                                                    2))(drop5))
    merge6 = concatenate([drop4, up6], axis=3)
    conv6 = Conv2D(512,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(merge6)
    conv6 = Conv2D(512,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv6)

    up7 = Conv2D(256,
                 2,
                 activation='relu',
                 padding='same',
                 kernel_initializer='he_normal')(UpSampling2D(size=(2,
                                                                    2))(conv6))
    merge7 = concatenate([conv3, up7], axis=3)
    conv7 = Conv2D(256,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(merge7)
    conv7 = Conv2D(256,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv7)

    up8 = Conv2D(128,
                 2,
                 activation='relu',
                 padding='same',
                 kernel_initializer='he_normal')(UpSampling2D(size=(2,
                                                                    2))(conv7))
    merge8 = concatenate([conv2, up8], axis=3)
    conv8 = Conv2D(128,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(merge8)
    conv8 = Conv2D(128,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv8)

    up9 = Conv2D(64,
                 2,
                 activation='relu',
                 padding='same',
                 kernel_initializer='he_normal')(UpSampling2D(size=(2,
                                                                    2))(conv8))
    merge9 = concatenate([conv1, up9], axis=3)
    conv9 = Conv2D(64,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(merge9)
    conv9 = Conv2D(64,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv9)
    conv9 = Conv2D(2,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv9)
    # conv10 = Conv2D(1, 1, activation = 'relu')(conv9)
    # conv10 = Conv2D(1, 1, activation = 'sigmoid')(conv10)
    conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)

    model = tf.keras.models.Model(inputs=inputs, outputs=conv10)

    model.compile(optimizer=tf.keras.optimizers.Adam(lr=1e-4),
                  loss="mean_absolute_error",
                  metrics=['accuracy'],
                  run_eagerly=True)

    model.summary()

    if (pretrained_weights):
        model.load_weights(pretrained_weights)

    return model