Esempio n. 1
0
def main():
    args = get_args()
    # print('args=', args.__dict__)
    image_dir = args.image_dir
    test_dir = args.test_dir
    image_size = args.image_size
    batch_size = args.batch_size
    nb_epochs = args.nb_epochs
    lr = args.lr
    steps = args.steps
    loss_type = args.loss
    output_path = Path(__file__).resolve().parent.joinpath(args.output_path)
    model = get_model(args.model)

    save_arg_filename = Path(output_path) / 'args.txt'
    save_model_plot_filename = Path(output_path) / 'model_plot.png'

    if not output_path.expanduser().exists():
        os.makedirs(output_path)
    with open(str(save_arg_filename), 'w') as f:
        json.dump(args.__dict__, f, indent=2)

    if args.weight is not None:
        model.load_weights(args.weight)

    opt = Adam(lr=lr)
    callbacks = []

    if loss_type == "l0":
        l0 = L0Loss()
        callbacks.append(UpdateAnnealingParameter(l0.gamma, nb_epochs, verbose=1))
        loss_type = l0()

    model.compile(optimizer=opt, loss=loss_type, metrics=[PSNR])
    plot_model(model, to_file=str(save_model_plot_filename), show_shapes=True, show_layer_names=True)

    source_noise_model = get_noise_model(args.source_noise_model)
    target_noise_model = get_noise_model(args.target_noise_model)
    val_noise_model = get_noise_model(args.val_noise_model)
    generator = NoisyImageGenerator(image_dir, source_noise_model, target_noise_model, batch_size=batch_size,
                                    image_size=image_size)
    val_generator = ValGenerator(test_dir, val_noise_model)
    output_path.mkdir(parents=True, exist_ok=True)
    callbacks.append(LearningRateScheduler(schedule=Schedule(nb_epochs, lr)))
    callbacks.append(ModelCheckpoint(str(output_path) + "/weights.{epoch:03d}-{val_loss:.3f}-{val_PSNR:.5f}.hdf5",
                                     monitor="val_PSNR",
                                     verbose=1,
                                     mode="max",
                                     save_best_only=True))

    hist = model.fit_generator(generator=generator,
                               steps_per_epoch=steps,
                               epochs=nb_epochs,
                               validation_data=val_generator,
                               verbose=1,
                               callbacks=callbacks)

    #callbacks.append(TensorBoard(Path(output_path) / 'logs', histogram_freq=1))
    np.savez(str(output_path.joinpath("history.npz")), history=hist.history)
Esempio n. 2
0
def main():
    args = get_args()
    image_dir = args.image_dir
    test_dir = args.test_dir
    image_size = args.image_size
    batch_size = args.batch_size
    nb_epochs = args.nb_epochs
    lr = args.lr
    steps = args.steps
    loss_type = args.loss
    output_path = Path(__file__).resolve().parent.joinpath(args.output_path)
    model = get_model(args.model)

    if args.weight is not None:
        model.load_weights(args.weight)

    opt = Adam(lr=lr)
    callbacks = []

    if loss_type == "l0":
        l0 = L0Loss()
        callbacks.append(
            UpdateAnnealingParameter(l0.gamma, nb_epochs, verbose=1))
        loss_type = l0()

    model.compile(optimizer=opt, loss=loss_type, metrics=[PSNR])
    source_noise_model = get_noise_model(args.source_noise_model)
    target_noise_model = get_noise_model(args.target_noise_model)
    val_noise_model = get_noise_model(args.val_noise_model)
    generator = NoisyImageGenerator(image_dir,
                                    source_noise_model,
                                    target_noise_model,
                                    batch_size=batch_size,
                                    image_size=image_size,
                                    but)
    val_generator = ValGenerator(test_dir, val_noise_model)
    output_path.mkdir(parents=True, exist_ok=True)
    callbacks.append(LearningRateScheduler(schedule=Schedule(nb_epochs, lr)))
    callbacks.append(
        ModelCheckpoint(
            str(output_path) +
            "/weights.{epoch:03d}-{val_loss:.3f}-{val_PSNR:.5f}.hdf5",
            monitor="val_PSNR",
            verbose=1,
            mode="max",
            save_best_only=True))

    hist = model.fit_generator(generator=generator,
                               steps_per_epoch=steps,
                               epochs=nb_epochs,
                               validation_data=val_generator,
                               verbose=1,
                               callbacks=callbacks)

    np.savez(str(output_path.joinpath("history.npz")), history=hist.history)
Esempio n. 3
0
def main():
    args = get_args()
    image_dir = args.image_dir
    test_dir = args.test_dir
    image_size = args.image_size
    batch_size = args.batch_size
    nb_epochs = args.nb_epochs
    lr = args.lr
    steps = args.steps
    loss_type = args.loss
    output_path = Path(__file__).resolve().parent.joinpath(args.output_path)
    model = get_model(args.model)
    opt = Adam(lr=lr)
    model.compile(optimizer=opt, loss=loss_type, metrics=[PSNR])
    source_noise_model = get_noise_model(args.source_noise_model)
    target_noise_model = get_noise_model(args.target_noise_model)
    val_noise_model = get_noise_model(args.val_noise_model)
    generator = NoisyImageGenerator(image_dir,
                                    source_noise_model,
                                    target_noise_model,
                                    batch_size=batch_size,
                                    image_size=image_size)
    val_generator = ValGenerator(test_dir, val_noise_model)
    output_path.mkdir(parents=True, exist_ok=True)
    callbacks = [
        LearningRateScheduler(schedule=Schedule(nb_epochs, lr)),
        ModelCheckpoint(
            str(output_path) +
            "/weights.{epoch:03d}-{val_loss:.3f}-{val_PSNR:.5f}.hdf5",
            monitor="val_PSNR",
            verbose=1,
            mode="max",
            save_best_only=True)
    ]

    hist = model.fit_generator(generator=generator,
                               steps_per_epoch=steps,
                               epochs=nb_epochs,
                               validation_data=val_generator,
                               verbose=1,
                               callbacks=callbacks,
                               use_multiprocessing=True,
                               workers=8)

    np.savez(str(output_path.joinpath("history.npz")), history=hist.history)
Esempio n. 4
0
def main():
    # DEBUG, INFO, WARN, ERROR, or FATAL
    tf.compat.v1.logging.set_verbosity(
        tf.compat.v1.logging.FATAL)  # silence deprecate warning message
    config = tf.ConfigProto()  # for ubuntu nvidia driver
    config.gpu_options.allow_growth = True  # for ubuntu nvidia driver
    config.gpu_options.per_process_gpu_memory_fraction = 0.9  # for ubuntu nvidia driver
    tf.keras.backend.set_session(
        tf.Session(config=config))  # for ubuntu nvidia driver
    args = get_args()
    image_dir = args.image_dir
    test_dir = args.test_dir
    image_size = args.image_size
    batch_size = args.batch_size
    nb_epochs = args.nb_epochs
    init_lr = float(args.lr)
    steps = args.steps
    loss_type = args.loss
    output_path = Path(__file__).resolve().parent.joinpath(args.output_path)
    model = get_model(args.model)

    # 儲存訓練參數 JSON
    save_arg_filename = Path(output_path) / 'args.txt'
    if not output_path.expanduser().exists():
        os.makedirs(output_path)
    with open(str(save_arg_filename), 'w') as f:
        json.dump(args.__dict__, f, indent=2)

    if args.weight is not None:
        model.load_weights(args.weight)

    callbacks = []

    if loss_type == "l0":
        l0 = L0Loss()
        callbacks.append(
            UpdateAnnealingParameter(l0.gamma, nb_epochs, verbose=1))
        loss_type = l0()

    source_noise_model = get_noise_model(args.source_noise_model)
    target_noise_model = get_noise_model(args.target_noise_model)
    val_noise_model = get_noise_model(args.val_noise_model)
    generator = NoisyImageGenerator(image_dir,
                                    source_noise_model,
                                    target_noise_model,
                                    batch_size=batch_size,
                                    image_size=image_size)
    val_generator = ValGenerator(test_dir, val_noise_model)
    output_path.mkdir(parents=True, exist_ok=True)

    if loss_type == "mssssim":
        print('Choose mean ssim loss')
        my_opt = Adam()
        model.compile(optimizer=my_opt, loss=Mean_MSSSIM_loss, metrics=[PSNR])
        # #建立檢查點
        callbacks.append(
            ModelCheckpoint(
                str(output_path) +
                "/weights.{epoch:03d}-{val_loss:.3f}-{val_PSNR:.5f}.hdf5",
                monitor="val_PSNR",
                verbose=1,
                mode="max",
                save_best_only=True))

    else:
        my_opt = Adam()
        model.compile(optimizer=my_opt, loss=loss_type, metrics=[PSNR])
        # #建立檢查點
        callbacks.append(
            ModelCheckpoint(
                str(output_path) +
                "/weights.{epoch:03d}-{val_loss:.3f}-{val_PSNR:.5f}.hdf5",
                monitor="val_PSNR",
                verbose=1,
                mode="max",
                save_best_only=True))

    # # 更新學習率
    # my_lr_schedule_stepwise = LearningRateScheduler(schedule=MyStepwiseScheduler(nb_epochs, init_lr), verbose=1)
    my_lr_schedule_exponential = LearningRateScheduler(
        schedule=MyExponentialScheduler(nb_epochs, init_lr), verbose=1)
    callbacks.append(my_lr_schedule_exponential)

    # plot_model(model, to_file=str(output_path) + "/model.png", show_shapes=True,dpi=200)
    # #連續 10 次不收斂則終止訓練
    # callbacks.append(EarlyStopping(patience=10))
    # #無法收斂則終止訓練
    # callbacks.append(TerminateOnNaN())

    # #以 CSV 紀錄訓練歷史
    callbacks.append(
        CSVLogger(filename=str(output_path) + "/TrainingLogCsv.csv",
                  append=True))
    # #以 TensorBoard 紀錄訓練歷史
    callbacks.append(
        TensorBoard(log_dir=str(output_path) + str('/logs'),
                    histogram_freq=0,
                    write_graph=True,
                    write_grads=False,
                    embeddings_freq=0,
                    embeddings_layer_names=None,
                    embeddings_metadata=None))

    callbacks.append(LRTensorBoard(log_dir=str(output_path) + str('/logs')))

    # #訓練模型
    hist = model.fit_generator(
        generator=generator,
        steps_per_epoch=steps,
        epochs=nb_epochs,
        validation_data=val_generator,
        verbose=1,
        callbacks=callbacks,
    )
    # #儲存訓練歷史
    np.savez(str(output_path.joinpath("history.npz")), history=hist.history)
Esempio n. 5
0
def main():
    args = get_args()
    image_dir_noise = args.image_dir_noise
    image_dir_original = args.image_dir_original
    test_dir_noise = args.test_dir_noise
    test_dir_original = args.test_dir_original
    image_size = args.image_size
    batch_size = args.batch_size
    nb_epochs = args.nb_epochs
    if_n = args.If_n
    lr = args.lr
    steps = args.steps
    loss_type = args.loss
    output_path = Path(__file__).resolve().parent.joinpath(args.output_path)
    model = get_model(args.model)
    opt = Adam(lr=lr)
    callbacks = []

    if loss_type == "l0":
        l0 = L0Loss()
        callbacks.append(
            UpdateAnnealingParameter(l0.gamma, nb_epochs, verbose=1))
        loss_type = l0()

    # model.compile(optimizer=opt, loss=loss_type, metrics=[PSNR])
    model.compile(optimizer=opt,
                  loss={
                      "subtract_1": "mse",
                      "add_36": "mse"
                  },
                  loss_weights={
                      'subtract_1': 0.1,
                      'add_36': 1
                  },
                  metrics=[PSNR])
    model.summary()
    generator = NoisyImageGenerator(image_dir_noise,
                                    image_dir_original,
                                    if_n=if_n,
                                    batch_size=batch_size,
                                    image_size=image_size)
    val_generator = ValGenerator(test_dir_noise, test_dir_original, if_n=if_n)
    output_path.mkdir(parents=True, exist_ok=True)
    # callbacks.append(ReduceLROnPlateau(monitor='val_add_35_loss', factor=0.5, patience=5, verbose=1, mode='min',
    #                                    cooldown=0, min_lr=0.000000001))
    callbacks.append(LearningRateScheduler(schedule=Schedule(nb_epochs, lr)))
    callbacks.append(
        TensorBoard(log_dir='./log',
                    histogram_freq=0,
                    batch_size=batch_size,
                    write_images=True))
    callbacks.append(
        ModelCheckpoint(
            str(output_path) +
            "/weights.{epoch:03d}-{val_add_36_loss:.3f}-{val_add_36_PSNR:.5f}.hdf5",
            monitor="val_add_36_PSNR",
            verbose=1,
            mode="max",
            save_best_only=True))

    hist = model.fit_generator(generator=generator,
                               steps_per_epoch=steps,
                               epochs=nb_epochs,
                               validation_data=val_generator,
                               verbose=1,
                               callbacks=callbacks)

    np.savez(str(output_path.joinpath("history.npz")), history=hist.history)
Esempio n. 6
0
def main():
    args = get_args()
    # train image dir
    image_dir = args.image_dir
    # test image dir
    test_dir = args.test_dir
    # training image patch size
    image_size = args.image_size
    # trining batch size
    batch_size = args.batch_size
    # number of epochs
    nb_epochs = args.nb_epochs
    # learning rate
    lr = args.lr
    # steps per epoch
    steps = args.steps
    loss_type = args.loss
    # checkpoints path
    output_path = Path(__file__).resolve().parent.joinpath(args.output_path)
    model = get_model(args.model)
    opt = Adam(lr=lr)
    callbacks = []

    if loss_type == "l0":
        l0 = L0Loss()
        callbacks.append(
            UpdateAnnealingParameter(l0.gamma, nb_epochs, verbose=1))
        # loss_type is a function, i.e., calc_loss
        loss_type = l0()

    model.compile(optimizer=opt, loss=loss_type, metrics=[PSNR])
    source_noise_model = get_noise_model(args.source_noise_model)
    target_noise_model = get_noise_model(args.target_noise_model)
    val_noise_model = get_noise_model(args.val_noise_model)
    # training set generator
    generator = NoisyImageGenerator(image_dir,
                                    source_noise_model,
                                    target_noise_model,
                                    batch_size=batch_size,
                                    image_size=image_size)
    val_generator = ValGenerator(test_dir, val_noise_model)
    output_path.mkdir(parents=True, exist_ok=True)
    callbacks.append(LearningRateScheduler(schedule=Schedule(nb_epochs, lr)))
    callbacks.append(
        ModelCheckpoint(
            str(output_path) +
            "/weights.{epoch:03d}-{val_loss:.3f}-{val_PSNR:.5f}.hdf5",
            monitor="val_PSNR",
            verbose=1,
            mode="max",
            save_best_only=True))
    callbacks.append(
        TensorBoard(log_dir="./tf-logs",
                    histogram_freq=0,
                    write_graph=True,
                    write_images=True))

    hist = model.fit_generator(generator=generator,
                               steps_per_epoch=steps,
                               epochs=nb_epochs,
                               validation_data=val_generator,
                               verbose=1,
                               callbacks=callbacks)

    np.savez(str(output_path.joinpath("history.npz")), history=hist.history)