Ejemplo n.º 1
0
def main(args):
    # set the necessary list
    train_list = pd.read_csv(args.train_list, header=None)
    val_list = pd.read_csv(args.val_list, header=None)

    # set the necessary directories
    trainimg_dir = args.trainimg_dir
    trainmsk_dir = args.trainmsk_dir
    valimg_dir = args.valimg_dir
    valmsk_dir = args.valmsk_dir

    train_gen = data_gen_small(trainimg_dir, trainmsk_dir, train_list,
                               args.batch_size,
                               [args.input_shape[0], args.input_shape[1]],
                               args.n_labels)
    val_gen = data_gen_small(valimg_dir, valmsk_dir, val_list, args.batch_size,
                             [args.input_shape[0], args.input_shape[1]],
                             args.n_labels)

    model = segnet(args.input_shape, args.n_labels, args.kernel,
                   args.pool_size, args.output_mode)
    print(model.summary())

    model.compile(loss=args.loss,
                  optimizer=args.optimizer,
                  metrics=["accuracy"])
    model.fit_generator(train_gen,
                        steps_per_epoch=args.epoch_steps,
                        epochs=args.n_epochs,
                        validation_data=val_gen,
                        validation_steps=args.val_steps)

    model.save_weights(args.save_dir + str(args.n_epochs) + ".hdf5")
    print("sava weight done..")
Ejemplo n.º 2
0
def train(args):
    train_list = range(args.train_size)
    val_list = range(args.val_size)

    # set the necessary directories
    trainimg_dir = args.trainimg_dir
    trainmsk_dir = args.trainmsk_dir
    valimg_dir = args.valimg_dir
    valmsk_dir = args.valmsk_dir

    train_gen = data_gen_small(trainimg_dir, trainmsk_dir, train_list,
                               args.batch_size,
                               [args.input_shape[0], args.input_shape[1]],
                               args.n_labels)
    val_gen = data_gen_small(valimg_dir, valmsk_dir, val_list, args.batch_size,
                             [args.input_shape[0], args.input_shape[1]],
                             args.n_labels)

    model = segnet(args.input_shape, args.n_labels, args.kernel,
                   args.pool_size, args.output_mode, args.model_number)
    print(model.summary())

    with open(args.save_dir + 'model' + str(args.model_number) + '.txt',
              'w') as fh:
        model.summary(print_fn=lambda x: fh.write(x + '\n'))

    model.compile(loss=args.loss,
                  optimizer=args.optimizer,
                  metrics=["accuracy"])

    print("Model Compiled")
    tensorboard = TensorBoard(log_dir='logs/{}'.format(time()))
    model.fit_generator(train_gen,
                        steps_per_epoch=args.epoch_steps,
                        epochs=args.n_epochs,
                        validation_data=val_gen,
                        validation_steps=args.val_steps,
                        callbacks=[tensorboard])
    model.save(args.save_dir + args.new_model_name + ".hdf5")
    print("Model Saved")

    K.set_learning_phase(0)
    session = K.get_session()
    init = tf.global_variables_initializer()
    session.run(init)
    frozen_graph = freeze_session(
        session, output_names=[out.op.name for out in model.outputs])
    print([out.op.name for out in model.outputs])
    tf.train.write_graph(frozen_graph,
                         args.save_dir,
                         args.new_model_name + ".pb",
                         as_text=False)
    session.close()
    file1 = open(args.save_dir + "recent.txt", "w+")
    file1.write(args.new_model_name)
    file1.close()
Ejemplo n.º 3
0
def main(args):
    # set the necessary list
    train_file = open(args.train_list, "r")
    train_list = train_file.readlines()
    random.shuffle(train_list)

    val_file = open(args.val_list, "r")
    val_list = val_file.readlines()

    test_file = open(args.test_list, "r")
    test_list = test_file.readlines()

    train_gen = data_gen_small(
        train_list,
        args.batch_size,
        [args.input_shape[0], args.input_shape[1]],
        args.n_labels,
    )
    val_gen = data_gen_small(
        val_list,
        args.batch_size,
        [args.input_shape[0], args.input_shape[1]],
        args.n_labels,
    )

    model = segnet(args.input_shape, args.n_labels, args.kernel,
                   args.pool_size, args.output_mode)
    print(model.summary())

    model.compile(loss=args.loss,
                  optimizer=Adam(lr=0.001),
                  metrics=["accuracy"])

    if args.resume:
        print("Load Model: " + args.resume)
        model.load_weights(args.resume)

    model.fit_generator(
        train_gen,
        steps_per_epoch=args.epoch_steps,
        epochs=args.n_epochs,
        validation_data=val_gen,
        validation_steps=args.val_steps,
    )

    model.save_weights(args.save_dir + str(args.n_epochs) + ".hdf5")
    print("save weight done..")

    test_gen = test_data_generator(
        test_list,
        #args.batch_size,
        [args.input_shape[0], args.input_shape[1]],
        args.n_labels,
    )
    model.evaluate(test_gen)
Ejemplo n.º 4
0
def main(args):
    # set the necessary list
    train_list = pd.read_csv(args.train_list, header=None)
    val_list = pd.read_csv(args.val_list, header=None)

    # set the necessary directories
    trainimg_dir = args.trainimg_dir
    trainmsk_dir = args.trainmsk_dir
    valimg_dir = args.valimg_dir
    valmsk_dir = args.valmsk_dir

    train_gen = data_gen_small(trainimg_dir, trainmsk_dir, train_list,
                               args.batch_size,
                               (args.input_shape[0], args.input_shape[1]),
                               args.n_labels)
    val_gen = data_gen_small(valimg_dir, valmsk_dir, val_list, args.batch_size,
                             (args.input_shape[0], args.input_shape[1]),
                             args.n_labels)

    segnet = CreateSegNet(args.input_shape, args.n_labels, args.kernel,
                          args.pool_size, args.output_mode)
    #print(segnet.summary())

    segnet.compile(loss=args.loss,
                   optimizer=args.optimizer,
                   metrics=["categorical_accuracy"])

    segnet.load_weights('../weights/MTI_SegNet.hdf5')

    segnet.fit_generator(train_gen,
                         steps_per_epoch=args.epoch_steps,
                         epochs=args.n_epochs,
                         validation_data=val_gen,
                         validation_steps=args.val_steps,
                         workers=1,
                         max_queue_size=20)

    segnet.save_weights("../weights/MTI_SegNet.hdf5")
    print("saving weights done..")
Ejemplo n.º 5
0
def main(args):
    # set the necessary list
    train_list = pd.read_csv(args.train_list, header=None)
    val_list = pd.read_csv(args.val_list, header=None)

    # set the necessary directories
    trainimg_dir = args.trainimg_dir
    trainmsk_dir = args.trainmsk_dir
    valimg_dir = args.valimg_dir
    valmsk_dir = args.valmsk_dir

    train_gen = data_gen_small(trainimg_dir, trainmsk_dir, train_list,
                               args.batch_size,
                               [args.input_shape[0], args.input_shape[1]],
                               args.n_labels)
    val_gen = data_gen_small(valimg_dir, valmsk_dir, val_list, args.batch_size,
                             [args.input_shape[0], args.input_shape[1]],
                             args.n_labels)

    segnet = CreateSegNet(args.input_shape, args.n_labels, args.kernel,
                          args.pool_size, args.output_mode)
    print(segnet.summary())

    segnet.compile(loss=args.loss,
                   optimizer=args.optimizer,
                   metrics=["accuracy"])
    segnet.fit_generator(train_gen,
                         steps_per_epoch=args.epoch_steps,
                         epochs=args.n_epochs,
                         validation_data=val_gen,
                         validation_steps=args.val_steps)

    segnet.save_weights("../LIP/pretrained/LIP_SegNet" + str(args.n_epochs) +
                        ".hdf5")
    print("sava weight done..")

    json_string = segnet.to_json()
    open("../LIP/pretrained/LIP_SegNet.json", "w").write(json_string)
Ejemplo n.º 6
0
def main(args):
    # set the necessary list i.e. list of names of images to be read
    test_list = pd.read_csv(args.test_list, header=None)

    # set the necessary directories
    testimg_dir = args.testimg_dir
    testmsk_dir = args.testmsk_dir

    # Generate batch data for SGD
    # NOTE: This helps control the batch size for each test set
    test_gen = data_gen_small(testimg_dir, testmsk_dir, test_list,
                              args.batch_size,
                              [args.input_shape[0], args.input_shape[1]],
                              args.n_labels)

    # Create a model
    if args.model == 'unet':
        model = unet(\
         args.input_shape, args.n_labels, args.kernel, \
         args.pool_size, args.output_mode, args.gpus)
    elif args.model == 'segnet':
        model = segnet(\
         args.input_shape, args.n_labels, args.kernel, \
         args.pool_size, args.output_mode, args.gpus)
    elif args.model == 'segunet':
        model = segunet(\
         args.input_shape, args.n_labels, args.kernel, \
         args.pool_size, args.output_mode, args.gpus)

    # TODO: Configure the model for training
    optimizer = Adadelta(
        lr=1.0, rho=0.95, epsilon=None, decay=0.01
    )  # NOTE: The details doesn't matter here because we are not training the model
    model.compile(loss=args.loss, optimizer=optimizer, metrics=[args.metrics])

    # Set model trained weights
    model.load_weights(args.model_weights)

    # Keras moodel summary, for confirmation
    print(model.summary())

    # Test the model on data generated batch-by-batch by a Python generator
    # NOTE: We use evaluate_generator because we do provide our generated dataset with specific batch size
    tensorboard = TensorBoard(log_dir=args.save_dir)
    fit_start_time = time()
    model.evaluate_generator(test_gen,
                             steps=args.epoch_steps,
                             verbose=args.verbose,
                             callbacks=[tensorboard])
    print('### Model fit time (s): ', time() - fit_start_time)
Ejemplo n.º 7
0
def main(args):
    # device number
    if args.gpu_num:
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    # set the necessary list
    train_list = pd.read_csv(args.train_list, header=None)
    val_list = pd.read_csv(args.val_list, header=None)

    # set the necessary directories
    trainimg_dir = args.trainimg_dir
    trainmsk_dir = args.trainmsk_dir
    valimg_dir = args.valimg_dir
    valmsk_dir = args.valmsk_dir

    # get old session old_session = KTF.get_session()

    with tf.Graph().as_default():
        session = tf.Session('')
        KTF.set_session(session)
        KTF.set_learning_phase(1)

        # set callbacks
        cp_cb = ModelCheckpoint(filepath=args.log_dir,
                                monitor='val_loss',
                                verbose=1,
                                save_best_only=True,
                                mode='auto',
                                period=2)
        es_cb = EarlyStopping(monitor='val_loss',
                              patience=2,
                              verbose=1,
                              mode='auto')
        tb_cb = TensorBoard(log_dir=args.log_dir, write_images=True)

        # set generater
        train_gen = data_gen_small(trainimg_dir, trainmsk_dir, train_list,
                                   args.batch_size,
                                   [args.input_shape[0], args.input_shape[1]],
                                   args.n_labels)
        val_gen = data_gen_small(valimg_dir, valmsk_dir, val_list,
                                 args.batch_size,
                                 [args.input_shape[0], args.input_shape[1]],
                                 args.n_labels)

        # set model
        pspnet = PSPNet50(input_shape=args.input_shape,
                          n_labels=args.n_labels,
                          output_mode=args.output_mode,
                          upsample_type=args.upsample_type)
        print(pspnet.summary())

        # compile model
        pspnet.compile(loss=args.loss,
                       optimizer=args.optimizer,
                       metrics=["accuracy"])

        # fit with genarater
        pspnet.fit_generator(generator=train_gen,
                             steps_per_epoch=args.epoch_steps,
                             epochs=args.n_epochs,
                             validation_data=val_gen,
                             validation_steps=args.val_steps,
                             callbacks=[cp_cb, es_cb, tb_cb])
Ejemplo n.º 8
0
def main(args):
    # set the necessary list
    train_list = pd.read_csv(args.train_list, header=None)
    val_list = pd.read_csv(args.val_list, header=None)

    # set the necessary directories
    trainimg_dir = args.trainimg_dir
    trainmsk_dir = args.trainmsk_dir
    valimg_dir = args.valimg_dir
    valmsk_dir = args.valmsk_dir

    # get old session old_session = KTF.get_session()

    with tf.Graph().as_default():
        session = tf.Session('')
        KTF.set_session(session)
        KTF.set_learning_phase(1)

        # class weights
        classes = ['background', 'hat', 'hair', 'glove', 'sunglasses', 'upperclothes',
                'dress', 'coat', 'socks', 'pants', 'jumpsuits', 'scarf', 'skirt',
                'face', 'leftArm', 'rightArm', 'leftLeg', 'rightLeg', 'leftShoe','rightShoe']
        if args.class_weights:
            """
            class_weights = {0:1, 1:40, 2:1, 3:114, 4:151, 5:3, 6:53, 7:7, 8:165, 9:7, 10:106,
                    11:249, 12:150, 13:1, 14:1, 15:1, 16:1, 17:1, 18:114, 19:118}
            """
            class_weights = [1, 40, 1, 114, 151, 3, 53, 7, 165, 7, 106, 249, 150, 1, 1, 1, 1, 1, 114, 118]

        # set callbacks
        fpath = "./pretrained_class_weights/LIP_PSPNet50_class_weights{epoch:02d}.hdf5"
        cp_cb = ModelCheckpoint(filepath = fpath, monitor='val_loss', verbose=1, save_best_only=True, mode='auto', period=2)
        es_cb = EarlyStopping(monitor='val_loss', patience=2, verbose=1, mode='auto')
        tb_cb = TensorBoard(log_dir="./pretrained_class_weights", write_images=True)

        # set generater
        train_gen = data_gen_small(
                trainimg_dir,
                trainmsk_dir,
                train_list,
                args.batch_size,
                [args.input_shape[0], args.input_shape[1]],
                args.n_labels)
        val_gen = data_gen_small(
                valimg_dir,
                valmsk_dir,
                val_list,
                args.batch_size,
                [args.input_shape[0], args.input_shape[1]],
                args.n_labels)

        # set model
        pspnet = PSPNet50(
                input_shape=args.input_shape,
                n_labels=args.n_labels,
                output_mode=args.output_mode,
                upsample_type=args.upsample_type)
        print(pspnet.summary())

        # compile model
        pspnet.compile(
                loss=args.loss,
                optimizer=args.optimizer,
                metrics=["accuracy"])

        # fit with genarater
        pspnet.fit_generator(
                generator=train_gen,
                steps_per_epoch=args.epoch_steps,
                epochs=args.n_epochs,
                validation_data=val_gen,
                validation_steps=args.val_steps,
                class_weight=class_weights,
                callbacks=[cp_cb, es_cb, tb_cb])

    # save model
    with open("./pretrained_class_weights/LIP_PSPNet50.json", "w") as json_file:
        json_file.write(json.dumps(json.loads(pspnet.to_json()), indent=2))
    print("save json model done...")
Ejemplo n.º 9
0
def main(args):
    # set the necessary list i.e. list of names of images to be read
    train_list = pd.read_csv(args.train_list, header=None)
    train_list_len = len(train_list)
    epoch_steps = int(train_list_len / args.batch_size)
    val_list = pd.read_csv(args.val_list, header=None)
    val_list_len = len(val_list)
    val_steps = int(val_list_len / args.batch_size)

    # set the necessary directories
    trainimg_dir = args.trainimg_dir
    trainmsk_dir = args.trainmsk_dir
    valimg_dir = args.valimg_dir
    valmsk_dir = args.valmsk_dir

    # Generate batch data for SGD
    # NOTE: This helps control the batch size for each training and validation set
    train_gen = data_gen_small(trainimg_dir, trainmsk_dir, train_list,
                               args.batch_size,
                               [args.input_shape[0], args.input_shape[1]],
                               args.n_labels)
    val_gen = data_gen_small(valimg_dir, valmsk_dir, val_list, args.batch_size,
                             [args.input_shape[0], args.input_shape[1]],
                             args.n_labels)

    # Create a model
    if args.model == 'unet':
        model = unet(\
         args.input_shape, args.n_labels, args.kernel, \
         args.pool, args.output_mode, args.gpus)
    elif args.model == 'segnet':
        model = segnet(\
         args.input_shape, args.n_labels, args.kernel, \
         args.pool, args.output_mode, args.gpus)
    elif args.model == 'segunet':
        model = segunet(\
         args.input_shape, args.n_labels, args.kernel, \
         args.pool, args.output_mode, args.gpus)
    elif args.model == 'fastnet':
        model = fastnet(\
         args.input_shape, args.n_labels, args.kernel, \
         args.pool, args.output_mode, args.gpus)

    # Keras moodel summary
    print(model.summary())

    # TODO: Configure the model for training
    optimizer = Adadelta(lr=args.lr,
                         rho=args.rho,
                         epsilon=args.epsilon,
                         decay=args.decay)
    model.compile(loss=args.loss, optimizer=optimizer, metrics=[args.metrics])
    # model.compile(loss=args.loss, optimizer=args.optimizer, metrics=[args.metrics])

    # If pre-trained weights available, use those
    if (args.initial_weights):
        model.load_weights(args.initial_weights)
        print('Initial weights loaded')

    # Generate log for tensorboard (currently only useful parameters are added to the dir name)
    log_dir = args.save_dir + \
     'model_' + args.model + \
     '_batch_size_' + str(args.batch_size) + \
     '_epoch_steps_' + str(epoch_steps) + \
     '_n_epochs_' + str(args.n_epochs) + \
     '_lr_' + str(args.lr) + \
        '_decay_' + str(args.decay) + \
        '_labels_' + str(args.n_labels) + \
        '/'
    tensorboard = TensorBoard(log_dir=log_dir)

    # Generate checkpoints
    checkpoint = ModelCheckpoint(
        log_dir + 'weights_at_epoch_{epoch:03d}.hdf5',
        verbose=1,
        save_best_only=False,
        save_weights_only=True,
        period=args.period)  # Create 10 checkpoints only

    # Train the model on data generated batch-by-batch by a Python generator
    # NOTE: We use fit_generator because we do provide our generated dataset with specific batch size
    fit_start_time = time()
    model.fit_generator(train_gen,
                        steps_per_epoch=epoch_steps,
                        epochs=args.n_epochs,
                        validation_data=val_gen,
                        validation_steps=val_steps,
                        verbose=args.verbose,
                        callbacks=[checkpoint, tensorboard])
    print('### Model fit time (s): ', time() - fit_start_time)

    # # NOTE: Cannot save the whole model OR the model structure if the model is not Keras Sequential, it throws "AttributeError: 'NoneType' object has no attribute 'update'"
    # # This is a bug in Keras (not sure about TensorFlow)
    # # Therefore, the model strutuce must be generated every time it is required to be implemented outside of the current script
    # model.save(log_dir + 'model.hdf5') # Model architecture and final weights

    # Save final weights
    model.save_weights(log_dir + 'weights_at_epoch_%03d.hdf5' %
                       (args.n_epochs))
    print('Final model weights saved.')
Ejemplo n.º 10
0
def main(args):
    # set the necessary list
    #train_list = pd.read_csv(args.train_list, header=None)
    train_list = pd.read_csv(
        'gdrive/My Drive/Research/TTLAB/AI_For_Earth/Problem_2/test_2/train.csv'
    )
    #val_list = pd.read_csv(args.val_list, header=None)

    # set the necessary directories
    #trainimg_dir = args.trainimg_dir
    trainimg_dir = 'gdrive/My Drive/Research/TTLAB/AI_For_Earth/Problem_2/multiclassification/data/SequoiaMulti_30/train_partition_images/'
    trainmsk_dir = 'gdrive/My Drive/Research/TTLAB/AI_For_Earth/Problem_2/multiclassification/data/SequoiaMulti_30/train_partition_ground_truth/'
    #trainmsk_dir = args.trainmsk_dir
    #valimg_dir = args.valimg_dir
    #valmsk_dir = args.valmsk_dir

    train_gen = data_gen_small(
        trainimg_dir,
        trainmsk_dir,
        train_list,
        args.batch_size,
        #[args.input_shape[0], args.input_shape[1]],
        (args.input_shape[0], args.input_shape[1]),
        args.n_labels,
    )
    '''
    val_gen = data_gen_small(
        valimg_dir,
        valmsk_dir,
        val_list,
        args.batch_size,
        [args.input_shape[0], args.input_shape[1]],
        args.n_labels,
    )
    '''

    model = segnet(args.input_shape, args.n_labels, args.kernel,
                   args.pool_size, args.output_mode)
    print(model.summary())

    model.compile(loss=args.loss,
                  optimizer=args.optimizer,
                  metrics=["accuracy"])

    addtional = 50
    train_resume = True
    weights_path = 'gdrive/My Drive/Research/TTLAB/AI_For_Earth/Problem_2/test_2/' + str(
        600) + ".hdf5"
    print(weights_path)
    if train_resume:
        model.load_weights(weights_path)
        print("\nTraining resumed...")
    else:
        print("\nTraining started from scratch... ")

    model.fit_generator(
        train_gen,
        steps_per_epoch=args.epoch_steps,
        epochs=args.n_epochs,
        #validation_data=val_gen,
        #validation_steps=args.val_steps,
    )

    model.save_weights(
        'gdrive/My Drive/Research/TTLAB/AI_For_Earth/Problem_2/test_2/' +
        str(650) + ".hdf5")
    print("sava weight done..")
Ejemplo n.º 11
0
        cp_cb = ModelCheckpoint(filepath=fpath,
                                monitor='val_loss',
                                verbose=1,
                                save_best_only=True,
                                mode='auto',
                                period=2)
        es_cb = EarlyStopping(monitor='val_loss',
                              patience=2,
                              verbose=1,
                              mode='auto')
        tb_cb = TensorBoard(log_dir="./pretrained_class_weights",
                            write_images=True)

        # set generater
        train_gen = data_gen_small(trainimg_dir, trainmsk_dir, train_list,
                                   args.batch_size,
                                   [args.input_shape[0], args.input_shape[1]],
                                   args.n_labels)
        val_gen = data_gen_small(valimg_dir, valmsk_dir, val_list,
                                 args.batch_size,
                                 [args.input_shape[0], args.input_shape[1]],
                                 args.n_labels)

        # set model
        segunet = CreateSegUNet(args.input_shape, args.n_labels, args.kernel,
                                args.pool_size, args.output_mode)
        print(segunet.summary())

        # compile model
        segunet.compile(loss=args.loss,
                        optimizer=args.optimizer,
                        metrics=["accuracy"])
              include_top=False,
              input_shape=(256, 256, 3),
              weights='imagenet')
#model.summary()

train_list = pd.read_csv('./train.csv', header=None)
val_list = pd.read_csv('./test.csv', header=None)

trainimg_dir = './training_dataset'
trainmsk_dir = './training_mask'
valimg_dir = './validating_dataset'
valmsk_dir = './validating_mask'

x, y = data_gen_small(trainimg_dir,
                      trainmsk_dir,
                      6,
                      batch_size=1,
                      dims=[256, 256],
                      n_labels=2)
#print(x.shape)
x1, y1 = data_gen_small(valimg_dir,
                        valmsk_dir,
                        3,
                        batch_size=1,
                        dims=[256, 256],
                        n_labels=2)
stochastic = SGD(lr=0.001, momentum=0.9, decay=0.0, nesterov=False)
callbacks = [
    ModelCheckpoint('bestmodel.h5', monitor='val_loss', save_best_only=True)
]

model.compile(loss="categorical_crossentropy",
Ejemplo n.º 13
0
def main(args):
    # device number
    if args.gpu_num:
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_num

    print(os.environ["CUDA_VISIBLE_DEVICES"])
    # set the necessary directories
    train_dir = 'resources/train'
    test_dir = 'resources/test'

    train_list = []
    train_list_file = "resources/train_list.txt"
    val_list_file = "resources/val_list.txt"

    with open(train_list_file, "r") as f:
        for l in f:
            train_list.append(l.replace("\n", ""))
    val_list = []
    with open(val_list_file, "r") as f:
        for l in f:
            val_list.append(l.replace("\n", ""))

    with tf.Graph().as_default():
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.gpu_options.per_process_gpu_memory_fraction = 1.0
        session = tf.Session(config=config)
        KTF.set_session(session)
        KTF.set_learning_phase(1)

        # set callbacks
        cp_cb = ModelCheckpoint(filepath='resources/checkpoints/checkpoint',
                                monitor='val_loss',
                                verbose=1,
                                save_best_only=True,
                                mode='auto',
                                period=2)
        es_cb = EarlyStopping(monitor='val_loss',
                              patience=2,
                              verbose=1,
                              mode='auto')
        tb_cb = TensorBoard(log_dir='resources/logs/', write_images=True)
        csv_logger = CSVLogger('resources/logs/training.log')

        # set generater
        train_gen = data_gen_small('resources/train/', 'resources/train/',
                                   train_list, args.batch_size,
                                   [args.input_shape[0], args.input_shape[1]],
                                   args.n_labels)
        val_gen = data_gen_small('resources/val/', 'resources/val/', val_list,
                                 args.batch_size,
                                 [args.input_shape[0], args.input_shape[1]],
                                 args.n_labels)

        # set model
        model = segunet(args.input_shape, args.n_labels, args.kernel,
                        args.pool_size, args.output_mode)
        print(model.summary())

        # compile model
        model.compile(loss=args.loss,
                      optimizer=args.optimizer,
                      metrics=["accuracy"])

        # fit with genarater
        model.fit_generator(generator=train_gen,
                            steps_per_epoch=args.epoch_steps,
                            epochs=args.n_epochs,
                            validation_data=val_gen,
                            validation_steps=args.val_steps,
                            callbacks=[cp_cb, es_cb, tb_cb, csv_logger])

        model.save_weights("resources/weights/weights_01.hdf5")