Exemple #1
0
def main():
    args = get_args()
    # print('args=', args.__dict__)
    image_dir = args.image_dir
    test_dir = args.test_dir
    image_size = args.image_size
    batch_size = args.batch_size
    nb_epochs = args.nb_epochs
    lr = args.lr
    steps = args.steps
    loss_type = args.loss
    output_path = Path(__file__).resolve().parent.joinpath(args.output_path)
    model = get_model(args.model)

    save_arg_filename = Path(output_path) / 'args.txt'
    save_model_plot_filename = Path(output_path) / 'model_plot.png'

    if not output_path.expanduser().exists():
        os.makedirs(output_path)
    with open(str(save_arg_filename), 'w') as f:
        json.dump(args.__dict__, f, indent=2)

    if args.weight is not None:
        model.load_weights(args.weight)

    opt = Adam(lr=lr)
    callbacks = []

    if loss_type == "l0":
        l0 = L0Loss()
        callbacks.append(UpdateAnnealingParameter(l0.gamma, nb_epochs, verbose=1))
        loss_type = l0()

    model.compile(optimizer=opt, loss=loss_type, metrics=[PSNR])
    plot_model(model, to_file=str(save_model_plot_filename), show_shapes=True, show_layer_names=True)

    source_noise_model = get_noise_model(args.source_noise_model)
    target_noise_model = get_noise_model(args.target_noise_model)
    val_noise_model = get_noise_model(args.val_noise_model)
    generator = NoisyImageGenerator(image_dir, source_noise_model, target_noise_model, batch_size=batch_size,
                                    image_size=image_size)
    val_generator = ValGenerator(test_dir, val_noise_model)
    output_path.mkdir(parents=True, exist_ok=True)
    callbacks.append(LearningRateScheduler(schedule=Schedule(nb_epochs, lr)))
    callbacks.append(ModelCheckpoint(str(output_path) + "/weights.{epoch:03d}-{val_loss:.3f}-{val_PSNR:.5f}.hdf5",
                                     monitor="val_PSNR",
                                     verbose=1,
                                     mode="max",
                                     save_best_only=True))

    hist = model.fit_generator(generator=generator,
                               steps_per_epoch=steps,
                               epochs=nb_epochs,
                               validation_data=val_generator,
                               verbose=1,
                               callbacks=callbacks)

    #callbacks.append(TensorBoard(Path(output_path) / 'logs', histogram_freq=1))
    np.savez(str(output_path.joinpath("history.npz")), history=hist.history)
def get_data(path, batch_size, image_size):
    train_generator = FaceGenerator(
        path, batch_size=batch_size, image_size=image_size, number_classes=nb_classes
    )

    val_generator = ValGenerator(
        path, batch_size=batch_size, image_size=image_size, number_classes=nb_classes
    )

    return train_generator, val_generator
Exemple #3
0
def main():
    args = get_args()
    image_dir = args.image_dir
    test_dir = args.test_dir
    image_size = args.image_size
    batch_size = args.batch_size
    nb_epochs = args.nb_epochs
    lr = args.lr
    steps = args.steps
    loss_type = args.loss
    output_path = Path(__file__).resolve().parent.joinpath(args.output_path)
    model = get_model(args.model)

    if args.weight is not None:
        model.load_weights(args.weight)

    opt = Adam(lr=lr)
    callbacks = []

    if loss_type == "l0":
        l0 = L0Loss()
        callbacks.append(
            UpdateAnnealingParameter(l0.gamma, nb_epochs, verbose=1))
        loss_type = l0()

    model.compile(optimizer=opt, loss=loss_type, metrics=[PSNR])
    source_noise_model = get_noise_model(args.source_noise_model)
    target_noise_model = get_noise_model(args.target_noise_model)
    val_noise_model = get_noise_model(args.val_noise_model)
    generator = NoisyImageGenerator(image_dir,
                                    source_noise_model,
                                    target_noise_model,
                                    batch_size=batch_size,
                                    image_size=image_size,
                                    but)
    val_generator = ValGenerator(test_dir, val_noise_model)
    output_path.mkdir(parents=True, exist_ok=True)
    callbacks.append(LearningRateScheduler(schedule=Schedule(nb_epochs, lr)))
    callbacks.append(
        ModelCheckpoint(
            str(output_path) +
            "/weights.{epoch:03d}-{val_loss:.3f}-{val_PSNR:.5f}.hdf5",
            monitor="val_PSNR",
            verbose=1,
            mode="max",
            save_best_only=True))

    hist = model.fit_generator(generator=generator,
                               steps_per_epoch=steps,
                               epochs=nb_epochs,
                               validation_data=val_generator,
                               verbose=1,
                               callbacks=callbacks)

    np.savez(str(output_path.joinpath("history.npz")), history=hist.history)
Exemple #4
0
def main():
    args = get_args()
    appa_dir = args.appa_dir
    utk_dir = args.utk_dir
    model_name = args.model_name
    batch_size = args.batch_size
    nb_epochs = args.nb_epochs
    lr = args.lr
    opt_name = args.opt

    if model_name == "ResNet50":
        image_size = 224
    elif model_name == "Xception":
        image_size = 299

    train_gen = FaceGenerator(appa_dir,
                              utk_dir=utk_dir,
                              batch_size=batch_size,
                              image_size=image_size)
    val_gen = ValGenerator(appa_dir,
                           batch_size=batch_size,
                           image_size=image_size)
    model = get_model(model_name=model_name)
    opt = get_optimizer(opt_name, lr)
    model.compile(optimizer=opt,
                  loss="categorical_crossentropy",
                  metrics=[age_mae])
    model.summary()
    output_dir = Path(__file__).resolve().parent.joinpath(args.output_dir)
    output_dir.mkdir(parents=True, exist_ok=True)
    callbacks = [
        LearningRateScheduler(schedule=Schedule(nb_epochs, initial_lr=lr)),
        ModelCheckpoint(
            str(output_dir) +
            "/weights.{epoch:03d}-{val_loss:.3f}-{val_age_mae:.3f}.hdf5",
            monitor="val_age_mae",
            verbose=1,
            save_best_only=True,
            mode="min")
    ]

    hist = model.fit_generator(generator=train_gen,
                               epochs=nb_epochs,
                               validation_data=val_gen,
                               verbose=1,
                               callbacks=callbacks)

    np.savez(str(output_dir.joinpath("history.npz")), history=hist.history)
def main():
    args = get_args()
    image_dir = args.image_dir
    test_dir = args.test_dir
    image_size = args.image_size
    batch_size = args.batch_size
    nb_epochs = args.nb_epochs
    lr = args.lr
    steps = args.steps
    loss_type = args.loss
    output_path = Path(__file__).resolve().parent.joinpath(args.output_path)
    model = get_model(args.model)
    opt = Adam(lr=lr)
    model.compile(optimizer=opt, loss=loss_type, metrics=[PSNR])
    source_noise_model = get_noise_model(args.source_noise_model)
    target_noise_model = get_noise_model(args.target_noise_model)
    val_noise_model = get_noise_model(args.val_noise_model)
    generator = NoisyImageGenerator(image_dir,
                                    source_noise_model,
                                    target_noise_model,
                                    batch_size=batch_size,
                                    image_size=image_size)
    val_generator = ValGenerator(test_dir, val_noise_model)
    output_path.mkdir(parents=True, exist_ok=True)
    callbacks = [
        LearningRateScheduler(schedule=Schedule(nb_epochs, lr)),
        ModelCheckpoint(
            str(output_path) +
            "/weights.{epoch:03d}-{val_loss:.3f}-{val_PSNR:.5f}.hdf5",
            monitor="val_PSNR",
            verbose=1,
            mode="max",
            save_best_only=True)
    ]

    hist = model.fit_generator(generator=generator,
                               steps_per_epoch=steps,
                               epochs=nb_epochs,
                               validation_data=val_generator,
                               verbose=1,
                               callbacks=callbacks,
                               use_multiprocessing=True,
                               workers=8)

    np.savez(str(output_path.joinpath("history.npz")), history=hist.history)
Exemple #6
0
def main():
    # DEBUG, INFO, WARN, ERROR, or FATAL
    tf.compat.v1.logging.set_verbosity(
        tf.compat.v1.logging.FATAL)  # silence deprecate warning message
    config = tf.ConfigProto()  # for ubuntu nvidia driver
    config.gpu_options.allow_growth = True  # for ubuntu nvidia driver
    config.gpu_options.per_process_gpu_memory_fraction = 0.9  # for ubuntu nvidia driver
    tf.keras.backend.set_session(
        tf.Session(config=config))  # for ubuntu nvidia driver
    args = get_args()
    image_dir = args.image_dir
    test_dir = args.test_dir
    image_size = args.image_size
    batch_size = args.batch_size
    nb_epochs = args.nb_epochs
    init_lr = float(args.lr)
    steps = args.steps
    loss_type = args.loss
    output_path = Path(__file__).resolve().parent.joinpath(args.output_path)
    model = get_model(args.model)

    # 儲存訓練參數 JSON
    save_arg_filename = Path(output_path) / 'args.txt'
    if not output_path.expanduser().exists():
        os.makedirs(output_path)
    with open(str(save_arg_filename), 'w') as f:
        json.dump(args.__dict__, f, indent=2)

    if args.weight is not None:
        model.load_weights(args.weight)

    callbacks = []

    if loss_type == "l0":
        l0 = L0Loss()
        callbacks.append(
            UpdateAnnealingParameter(l0.gamma, nb_epochs, verbose=1))
        loss_type = l0()

    source_noise_model = get_noise_model(args.source_noise_model)
    target_noise_model = get_noise_model(args.target_noise_model)
    val_noise_model = get_noise_model(args.val_noise_model)
    generator = NoisyImageGenerator(image_dir,
                                    source_noise_model,
                                    target_noise_model,
                                    batch_size=batch_size,
                                    image_size=image_size)
    val_generator = ValGenerator(test_dir, val_noise_model)
    output_path.mkdir(parents=True, exist_ok=True)

    if loss_type == "mssssim":
        print('Choose mean ssim loss')
        my_opt = Adam()
        model.compile(optimizer=my_opt, loss=Mean_MSSSIM_loss, metrics=[PSNR])
        # #建立檢查點
        callbacks.append(
            ModelCheckpoint(
                str(output_path) +
                "/weights.{epoch:03d}-{val_loss:.3f}-{val_PSNR:.5f}.hdf5",
                monitor="val_PSNR",
                verbose=1,
                mode="max",
                save_best_only=True))

    else:
        my_opt = Adam()
        model.compile(optimizer=my_opt, loss=loss_type, metrics=[PSNR])
        # #建立檢查點
        callbacks.append(
            ModelCheckpoint(
                str(output_path) +
                "/weights.{epoch:03d}-{val_loss:.3f}-{val_PSNR:.5f}.hdf5",
                monitor="val_PSNR",
                verbose=1,
                mode="max",
                save_best_only=True))

    # # 更新學習率
    # my_lr_schedule_stepwise = LearningRateScheduler(schedule=MyStepwiseScheduler(nb_epochs, init_lr), verbose=1)
    my_lr_schedule_exponential = LearningRateScheduler(
        schedule=MyExponentialScheduler(nb_epochs, init_lr), verbose=1)
    callbacks.append(my_lr_schedule_exponential)

    # plot_model(model, to_file=str(output_path) + "/model.png", show_shapes=True,dpi=200)
    # #連續 10 次不收斂則終止訓練
    # callbacks.append(EarlyStopping(patience=10))
    # #無法收斂則終止訓練
    # callbacks.append(TerminateOnNaN())

    # #以 CSV 紀錄訓練歷史
    callbacks.append(
        CSVLogger(filename=str(output_path) + "/TrainingLogCsv.csv",
                  append=True))
    # #以 TensorBoard 紀錄訓練歷史
    callbacks.append(
        TensorBoard(log_dir=str(output_path) + str('/logs'),
                    histogram_freq=0,
                    write_graph=True,
                    write_grads=False,
                    embeddings_freq=0,
                    embeddings_layer_names=None,
                    embeddings_metadata=None))

    callbacks.append(LRTensorBoard(log_dir=str(output_path) + str('/logs')))

    # #訓練模型
    hist = model.fit_generator(
        generator=generator,
        steps_per_epoch=steps,
        epochs=nb_epochs,
        validation_data=val_generator,
        verbose=1,
        callbacks=callbacks,
    )
    # #儲存訓練歷史
    np.savez(str(output_path.joinpath("history.npz")), history=hist.history)
Exemple #7
0
    # print (val_generator.__getitem__(1))
    nb_epochs = 100
    lr = 0.001
    steps = 1500
    loss_type = "mse"
    history = LossHistory()
    KTF.set_session(get_session(0.6))  # using 40% of total GPU Memory
    output_path = Path(__file__).resolve().parent.joinpath("checkpoints")
    model = get_DronNet_model(3)
    pre_train_model = 'model.hdf5'
    if os.path.exists(pre_train_model):
        model = K.models.load_model(pre_train_model)
    opt = Adam(lr=lr)
    model.compile(optimizer=opt, loss=loss_type, metrics=['mae'])
    
    generator = TrainImageGenerator(["..\\data\\2019-03-21-14-05-35\\"], batch_size=1,label_size=4)
    val_generator = ValGenerator("..\\data\\test\\")
    output_path.mkdir(parents=True, exist_ok=True)

    callbacks = [
        LearningRateScheduler(schedule=Schedule(nb_epochs, lr)),
        ModelCheckpoint(str(output_path) + "/weights.{epoch:03d}-{val_loss:.3f}.hdf5",
                        monitor="val_loss",
                        verbose=1,
                        mode="auto",
                        save_best_only=False),
    ]

    hist = model.fit_generator(generator=generator,
                               steps_per_epoch=steps,
                               epochs=nb_epochs,
def main():
    nb_epochs = config.MAXIMUM_EPOCHS
    batch_size = config.batch_size
    lr = 0.1
    momentum = 0.9
    model_name = 'ResNet50'
    image_size = config.IMAGE_SIZE
    output_dir = 'checkpoints'

    experiment_name = 'yu4u'
    early_stop_patience = config.EARLY_STOP_EPOCHS

    PARAMS = {
        'epoch_nr': nb_epochs,
        'batch_size': batch_size,
        'learning_rate': lr,
        # 'input_shape': (512, 32, 3),
        'early_stop': early_stop_patience,
        'image_size': config.IMAGE_SIZE,
        'network': model_name
    }

    if config.LOG_RESULTS:
        neptune.init(project_qualified_name='4ND4/sandbox')
        neptune_tb.integrate_with_keras()
        result = neptune.create_experiment(name=experiment_name, params=PARAMS)

        name = result.id

    else:
        name = 'no_log'

    train_gen = FaceGenerator(image_directory,
                              batch_size=batch_size,
                              image_size=image_size,
                              number_classes=nb_classes)
    val_gen = ValGenerator(image_directory,
                           batch_size=batch_size,
                           image_size=image_size,
                           number_classes=nb_classes)

    model = get_model(model_name=model_name,
                      image_size=image_size,
                      number_classes=nb_classes)

    sgd = SGD(lr=lr, momentum=momentum, nesterov=True)

    model.compile(optimizer=sgd,
                  loss="categorical_crossentropy",
                  metrics=[age_mae])

    model.summary()

    output_dir = Path(__file__).resolve().parent.joinpath(output_dir)

    if not output_dir.exists():
        output_dir.mkdir(parents=True)

    if not os.path.exists('checkpoints/{}'.format(name)):
        os.mkdir('checkpoints/{}'.format(name))

    callbacks = [
        EarlyStopping(monitor='val_age_mae',
                      mode='min',
                      verbose=1,
                      patience=early_stop_patience),
        LearningRateScheduler(schedule=Schedule(nb_epochs, initial_lr=lr)),
        ModelCheckpoint(os.path.join(output_dir, name) +
                        "/weights.{epoch:03d}-{val_loss:.3f}-{"
                        "val_age_mae:.3f}.hdf5",
                        monitor="val_age_mae",
                        verbose=1,
                        save_best_only=True,
                        mode="min")
    ]

    hist = model.fit_generator(generator=train_gen,
                               epochs=nb_epochs,
                               validation_data=val_gen,
                               verbose=1,
                               callbacks=callbacks)

    np.savez(str(output_dir.joinpath("history_{}.npz".format(name))),
             history=hist.history)
Exemple #9
0
    lr = 0.0001
    steps = 5000

    history = LossHistory()
    #KTF.set_session(get_session(0.6))  # using 40% of total GPU Memory
    output_path = Path(__file__).resolve().parent.joinpath("checkpoints")
    model = get_DronNet_model(3, lr)
    pre_train_model = 'model.hdf5'
    if os.path.exists(pre_train_model):
        model = K.models.load_model(pre_train_model)

    generator = TrainImageGenerator(
        ["../data/2019-06-16-17-38-29/", "../data/2019-06-26-17-34-32/"],
        batch_size=1,
        label_size=4)
    val_generator = ValGenerator("../data/test-real/")
    output_path.mkdir(parents=True, exist_ok=True)

    callbacks = [
        LearningRateScheduler(schedule=Schedule(nb_epochs, lr)),
        ModelCheckpoint(str(output_path) +
                        "/weights.{epoch:03d}-{val_loss:.3f}.hdf5",
                        monitor="val_loss",
                        verbose=1,
                        mode="auto",
                        save_best_only=False),
    ]

    hist = model.fit_generator(generator=generator,
                               steps_per_epoch=steps,
                               epochs=nb_epochs,
Exemple #10
0
def main():
    args = get_args()
    image_dir_noise = args.image_dir_noise
    image_dir_original = args.image_dir_original
    test_dir_noise = args.test_dir_noise
    test_dir_original = args.test_dir_original
    image_size = args.image_size
    batch_size = args.batch_size
    nb_epochs = args.nb_epochs
    if_n = args.If_n
    lr = args.lr
    steps = args.steps
    loss_type = args.loss
    output_path = Path(__file__).resolve().parent.joinpath(args.output_path)
    model = get_model(args.model)
    opt = Adam(lr=lr)
    callbacks = []

    if loss_type == "l0":
        l0 = L0Loss()
        callbacks.append(
            UpdateAnnealingParameter(l0.gamma, nb_epochs, verbose=1))
        loss_type = l0()

    # model.compile(optimizer=opt, loss=loss_type, metrics=[PSNR])
    model.compile(optimizer=opt,
                  loss={
                      "subtract_1": "mse",
                      "add_36": "mse"
                  },
                  loss_weights={
                      'subtract_1': 0.1,
                      'add_36': 1
                  },
                  metrics=[PSNR])
    model.summary()
    generator = NoisyImageGenerator(image_dir_noise,
                                    image_dir_original,
                                    if_n=if_n,
                                    batch_size=batch_size,
                                    image_size=image_size)
    val_generator = ValGenerator(test_dir_noise, test_dir_original, if_n=if_n)
    output_path.mkdir(parents=True, exist_ok=True)
    # callbacks.append(ReduceLROnPlateau(monitor='val_add_35_loss', factor=0.5, patience=5, verbose=1, mode='min',
    #                                    cooldown=0, min_lr=0.000000001))
    callbacks.append(LearningRateScheduler(schedule=Schedule(nb_epochs, lr)))
    callbacks.append(
        TensorBoard(log_dir='./log',
                    histogram_freq=0,
                    batch_size=batch_size,
                    write_images=True))
    callbacks.append(
        ModelCheckpoint(
            str(output_path) +
            "/weights.{epoch:03d}-{val_add_36_loss:.3f}-{val_add_36_PSNR:.5f}.hdf5",
            monitor="val_add_36_PSNR",
            verbose=1,
            mode="max",
            save_best_only=True))

    hist = model.fit_generator(generator=generator,
                               steps_per_epoch=steps,
                               epochs=nb_epochs,
                               validation_data=val_generator,
                               verbose=1,
                               callbacks=callbacks)

    np.savez(str(output_path.joinpath("history.npz")), history=hist.history)
Exemple #11
0
def main():
    args = get_args()
    blur_image_dir = args.blur_image_dir
    clear_image_dir = args.clear_image_dir
    val_dir = args.val_dir
    image_size = args.image_size
    batch_size = args.batch_size
    nb_epochs = args.nb_epochs
    lr = args.lr
    steps = args.steps
    loss_type = args.loss
    output_path = Path(__file__).resolve().parent.joinpath(args.output_path)
    model = get_model(args.model) # srresnet  output

    if args.weight is not None:
        model.load_weights(args.weight)

    opt = Adam(lr=lr)
    callbacks = []

    if loss_type == "l0":
        l0 = L0Loss()
        callbacks.append(UpdateAnnealingParameter(l0.gamma, nb_epochs, verbose=1))
        loss_type = l0()

    generator = DeblurImageGenerator(blur_image_dir, clear_image_dir, batch_size=batch_size,
                                    image_size=image_size)

    model.compile(optimizer=opt,
                  loss=L1_loss,
                  metrics=[PSNR, SSIM])

    generator = DeblurImageGenerator(blur_image_dir, clear_image_dir, batch_size=batch_size,
                                    image_size=image_size)

    val_generator = ValGenerator(val_dir)
    output_path.mkdir(parents=True, exist_ok=True)

    callbacks.append(LearningRateScheduler(schedule=Schedule(nb_epochs, lr)))
    callbacks.append(ModelCheckpoint(str(output_path) + "/weights.{epoch:03d}-{val_loss:.3f}-{val_PSNR:.5f}.hdf5",
                                     monitor="val_PSNR",
                                     verbose=1,
                                     mode="max",
                                     save_best_only=True))

    callbacks.append(TensorBoard(log_dir='./logs',
                                 histogram_freq=0,
                                 write_graph=True, 
                                 write_grads=True,  
                                 write_images=True,  
                                 embeddings_freq=0,
                                 embeddings_layer_names=None,
                                 embeddings_metadata=None)
                     )

    hist = model.fit_generator(generator=generator,
                               steps_per_epoch=steps,
                               epochs=nb_epochs,
                               validation_data=val_generator,
                               verbose=1,
                               callbacks=callbacks)

    np.savez(str(output_path.joinpath("history.npz")), history=hist.history)
Exemple #12
0
def main():
    args = get_args()
    # train image dir
    image_dir = args.image_dir
    # test image dir
    test_dir = args.test_dir
    # training image patch size
    image_size = args.image_size
    # trining batch size
    batch_size = args.batch_size
    # number of epochs
    nb_epochs = args.nb_epochs
    # learning rate
    lr = args.lr
    # steps per epoch
    steps = args.steps
    loss_type = args.loss
    # checkpoints path
    output_path = Path(__file__).resolve().parent.joinpath(args.output_path)
    model = get_model(args.model)
    opt = Adam(lr=lr)
    callbacks = []

    if loss_type == "l0":
        l0 = L0Loss()
        callbacks.append(
            UpdateAnnealingParameter(l0.gamma, nb_epochs, verbose=1))
        # loss_type is a function, i.e., calc_loss
        loss_type = l0()

    model.compile(optimizer=opt, loss=loss_type, metrics=[PSNR])
    source_noise_model = get_noise_model(args.source_noise_model)
    target_noise_model = get_noise_model(args.target_noise_model)
    val_noise_model = get_noise_model(args.val_noise_model)
    # training set generator
    generator = NoisyImageGenerator(image_dir,
                                    source_noise_model,
                                    target_noise_model,
                                    batch_size=batch_size,
                                    image_size=image_size)
    val_generator = ValGenerator(test_dir, val_noise_model)
    output_path.mkdir(parents=True, exist_ok=True)
    callbacks.append(LearningRateScheduler(schedule=Schedule(nb_epochs, lr)))
    callbacks.append(
        ModelCheckpoint(
            str(output_path) +
            "/weights.{epoch:03d}-{val_loss:.3f}-{val_PSNR:.5f}.hdf5",
            monitor="val_PSNR",
            verbose=1,
            mode="max",
            save_best_only=True))
    callbacks.append(
        TensorBoard(log_dir="./tf-logs",
                    histogram_freq=0,
                    write_graph=True,
                    write_images=True))

    hist = model.fit_generator(generator=generator,
                               steps_per_epoch=steps,
                               epochs=nb_epochs,
                               validation_data=val_generator,
                               verbose=1,
                               callbacks=callbacks)

    np.savez(str(output_path.joinpath("history.npz")), history=hist.history)