예제 #1
0
def main(args):
    # set the necessary list i.e. list of names of images to be read
    test_list = pd.read_csv(args.test_list, header=None)

    # set the necessary directories
    testimg_dir = args.testimg_dir
    testmsk_dir = args.testmsk_dir

    # Generate batch data for SGD
    # NOTE: This helps control the batch size for each test set
    test_gen = data_gen_small(testimg_dir, testmsk_dir, test_list,
                              args.batch_size,
                              [args.input_shape[0], args.input_shape[1]],
                              args.n_labels)

    # Create a model
    if args.model == 'unet':
        model = unet(\
         args.input_shape, args.n_labels, args.kernel, \
         args.pool_size, args.output_mode, args.gpus)
    elif args.model == 'segnet':
        model = segnet(\
         args.input_shape, args.n_labels, args.kernel, \
         args.pool_size, args.output_mode, args.gpus)
    elif args.model == 'segunet':
        model = segunet(\
         args.input_shape, args.n_labels, args.kernel, \
         args.pool_size, args.output_mode, args.gpus)

    # TODO: Configure the model for training
    optimizer = Adadelta(
        lr=1.0, rho=0.95, epsilon=None, decay=0.01
    )  # NOTE: The details doesn't matter here because we are not training the model
    model.compile(loss=args.loss, optimizer=optimizer, metrics=[args.metrics])

    # Set model trained weights
    model.load_weights(args.model_weights)

    # Keras moodel summary, for confirmation
    print(model.summary())

    # Test the model on data generated batch-by-batch by a Python generator
    # NOTE: We use evaluate_generator because we do provide our generated dataset with specific batch size
    tensorboard = TensorBoard(log_dir=args.save_dir)
    fit_start_time = time()
    model.evaluate_generator(test_gen,
                             steps=args.epoch_steps,
                             verbose=args.verbose,
                             callbacks=[tensorboard])
    print('### Model fit time (s): ', time() - fit_start_time)
예제 #2
0
def main(args):
    # set the necessary list i.e. list of names of images to be read
    train_list = pd.read_csv(args.train_list, header=None)
    train_list_len = len(train_list)
    epoch_steps = int(train_list_len / args.batch_size)
    val_list = pd.read_csv(args.val_list, header=None)
    val_list_len = len(val_list)
    val_steps = int(val_list_len / args.batch_size)

    # set the necessary directories
    trainimg_dir = args.trainimg_dir
    trainmsk_dir = args.trainmsk_dir
    valimg_dir = args.valimg_dir
    valmsk_dir = args.valmsk_dir

    # Generate batch data for SGD
    # NOTE: This helps control the batch size for each training and validation set
    train_gen = data_gen_small(trainimg_dir, trainmsk_dir, train_list,
                               args.batch_size,
                               [args.input_shape[0], args.input_shape[1]],
                               args.n_labels)
    val_gen = data_gen_small(valimg_dir, valmsk_dir, val_list, args.batch_size,
                             [args.input_shape[0], args.input_shape[1]],
                             args.n_labels)

    # Create a model
    if args.model == 'unet':
        model = unet(\
         args.input_shape, args.n_labels, args.kernel, \
         args.pool, args.output_mode, args.gpus)
    elif args.model == 'segnet':
        model = segnet(\
         args.input_shape, args.n_labels, args.kernel, \
         args.pool, args.output_mode, args.gpus)
    elif args.model == 'segunet':
        model = segunet(\
         args.input_shape, args.n_labels, args.kernel, \
         args.pool, args.output_mode, args.gpus)
    elif args.model == 'fastnet':
        model = fastnet(\
         args.input_shape, args.n_labels, args.kernel, \
         args.pool, args.output_mode, args.gpus)

    # Keras moodel summary
    print(model.summary())

    # TODO: Configure the model for training
    optimizer = Adadelta(lr=args.lr,
                         rho=args.rho,
                         epsilon=args.epsilon,
                         decay=args.decay)
    model.compile(loss=args.loss, optimizer=optimizer, metrics=[args.metrics])
    # model.compile(loss=args.loss, optimizer=args.optimizer, metrics=[args.metrics])

    # If pre-trained weights available, use those
    if (args.initial_weights):
        model.load_weights(args.initial_weights)
        print('Initial weights loaded')

    # Generate log for tensorboard (currently only useful parameters are added to the dir name)
    log_dir = args.save_dir + \
     'model_' + args.model + \
     '_batch_size_' + str(args.batch_size) + \
     '_epoch_steps_' + str(epoch_steps) + \
     '_n_epochs_' + str(args.n_epochs) + \
     '_lr_' + str(args.lr) + \
        '_decay_' + str(args.decay) + \
        '_labels_' + str(args.n_labels) + \
        '/'
    tensorboard = TensorBoard(log_dir=log_dir)

    # Generate checkpoints
    checkpoint = ModelCheckpoint(
        log_dir + 'weights_at_epoch_{epoch:03d}.hdf5',
        verbose=1,
        save_best_only=False,
        save_weights_only=True,
        period=args.period)  # Create 10 checkpoints only

    # Train the model on data generated batch-by-batch by a Python generator
    # NOTE: We use fit_generator because we do provide our generated dataset with specific batch size
    fit_start_time = time()
    model.fit_generator(train_gen,
                        steps_per_epoch=epoch_steps,
                        epochs=args.n_epochs,
                        validation_data=val_gen,
                        validation_steps=val_steps,
                        verbose=args.verbose,
                        callbacks=[checkpoint, tensorboard])
    print('### Model fit time (s): ', time() - fit_start_time)

    # # NOTE: Cannot save the whole model OR the model structure if the model is not Keras Sequential, it throws "AttributeError: 'NoneType' object has no attribute 'update'"
    # # This is a bug in Keras (not sure about TensorFlow)
    # # Therefore, the model strutuce must be generated every time it is required to be implemented outside of the current script
    # model.save(log_dir + 'model.hdf5') # Model architecture and final weights

    # Save final weights
    model.save_weights(log_dir + 'weights_at_epoch_%03d.hdf5' %
                       (args.n_epochs))
    print('Final model weights saved.')
예제 #3
0
def main(args):
    # device number
    if args.gpu_num:
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_num

    print(os.environ["CUDA_VISIBLE_DEVICES"])
    # set the necessary directories
    train_dir = 'resources/train'
    test_dir = 'resources/test'

    train_list = []
    train_list_file = "resources/train_list.txt"
    val_list_file = "resources/val_list.txt"

    with open(train_list_file, "r") as f:
        for l in f:
            train_list.append(l.replace("\n", ""))
    val_list = []
    with open(val_list_file, "r") as f:
        for l in f:
            val_list.append(l.replace("\n", ""))

    with tf.Graph().as_default():
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.gpu_options.per_process_gpu_memory_fraction = 1.0
        session = tf.Session(config=config)
        KTF.set_session(session)
        KTF.set_learning_phase(1)

        # set callbacks
        cp_cb = ModelCheckpoint(filepath='resources/checkpoints/checkpoint',
                                monitor='val_loss',
                                verbose=1,
                                save_best_only=True,
                                mode='auto',
                                period=2)
        es_cb = EarlyStopping(monitor='val_loss',
                              patience=2,
                              verbose=1,
                              mode='auto')
        tb_cb = TensorBoard(log_dir='resources/logs/', write_images=True)
        csv_logger = CSVLogger('resources/logs/training.log')

        # set generater
        train_gen = data_gen_small('resources/train/', 'resources/train/',
                                   train_list, args.batch_size,
                                   [args.input_shape[0], args.input_shape[1]],
                                   args.n_labels)
        val_gen = data_gen_small('resources/val/', 'resources/val/', val_list,
                                 args.batch_size,
                                 [args.input_shape[0], args.input_shape[1]],
                                 args.n_labels)

        # set model
        model = segunet(args.input_shape, args.n_labels, args.kernel,
                        args.pool_size, args.output_mode)
        print(model.summary())

        # compile model
        model.compile(loss=args.loss,
                      optimizer=args.optimizer,
                      metrics=["accuracy"])

        # fit with genarater
        model.fit_generator(generator=train_gen,
                            steps_per_epoch=args.epoch_steps,
                            epochs=args.n_epochs,
                            validation_data=val_gen,
                            validation_steps=args.val_steps,
                            callbacks=[cp_cb, es_cb, tb_cb, csv_logger])

        model.save_weights("resources/weights/weights_01.hdf5")
예제 #4
0
def main(args):
    # device number
    if args.gpu_num:
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_num

    # set the necessary list
    train_list = pd.read_csv(args.train_list, header=None)
    val_list = pd.read_csv(args.val_list, header=None)

    # set the necessary directories
    trainimg_dir = args.trainimg_dir
    trainmsk_dir = args.trainmsk_dir
    valimg_dir = args.valimg_dir
    valmsk_dir = args.valmsk_dir

    # get old session
    old_session = KTF.get_session()

    with tf.Graph().as_default():
        session = tf.Session('')
        KTF.set_session(session)
        KTF.set_learning_phase(1)

        # set callbacks
        cp_cb = ModelCheckpoint(filepath=fpath,
                                monitor='val_loss',
                                verbose=1,
                                save_best_only=True,
                                mode='auto',
                                period=2)
        es_cb = EarlyStopping(monitor='val_loss',
                              patience=2,
                              verbose=1,
                              mode='auto')
        tb_cb = TensorBoard(log_dir=args.log_dir, write_images=True)

        # set generater
        train_gen = data_gen_small(trainimg_dir, trainmsk_dir, train_list,
                                   args.batch_size,
                                   [args.input_shape[0], args.input_shape[1]],
                                   args.n_labels)
        val_gen = data_gen_small(valimg_dir, valmsk_dir, val_list,
                                 args.batch_size,
                                 [args.input_shape[0], args.input_shape[1]],
                                 args.n_labels)

        # set model
        model = segunet(args.input_shape, args.n_labels, args.kernel,
                        args.pool_size, args.output_mode)
        print(segunet.summary())

        # compile model
        model.compile(loss=args.loss,
                      optimizer=args.optimizer,
                      metrics=["accuracy"])

        # fit with genarater
        model.fit_generator(generator=train_gen,
                            steps_per_epoch=args.epoch_steps,
                            epochs=args.n_epochs,
                            validation_data=val_gen,
                            validation_steps=args.val_steps,
                            callbacks=[cp_cb, es_cb, tb_cb])
예제 #5
0
 def __init__(self, input_shape = (512, 512, 3)):
     self.model = segunet(input_shape, 20, 3, (2, 2), "softmax")
     self.model.load_weights("resources/checkpoints/checkpoint_e02.hdf5")
     self.input_shape = input_shape
     self.cascade = cv2.CascadeClassifier("resources/haarcascade_fullbody.xml")
     print(self.model.summary())