Ejemplo n.º 1
0
def main():
    args = get_args()
    # print('args=', args.__dict__)
    image_dir = args.image_dir
    test_dir = args.test_dir
    image_size = args.image_size
    batch_size = args.batch_size
    nb_epochs = args.nb_epochs
    lr = args.lr
    steps = args.steps
    loss_type = args.loss
    output_path = Path(__file__).resolve().parent.joinpath(args.output_path)
    model = get_model(args.model)

    save_arg_filename = Path(output_path) / 'args.txt'
    save_model_plot_filename = Path(output_path) / 'model_plot.png'

    if not output_path.expanduser().exists():
        os.makedirs(output_path)
    with open(str(save_arg_filename), 'w') as f:
        json.dump(args.__dict__, f, indent=2)

    if args.weight is not None:
        model.load_weights(args.weight)

    opt = Adam(lr=lr)
    callbacks = []

    if loss_type == "l0":
        l0 = L0Loss()
        callbacks.append(UpdateAnnealingParameter(l0.gamma, nb_epochs, verbose=1))
        loss_type = l0()

    model.compile(optimizer=opt, loss=loss_type, metrics=[PSNR])
    plot_model(model, to_file=str(save_model_plot_filename), show_shapes=True, show_layer_names=True)

    source_noise_model = get_noise_model(args.source_noise_model)
    target_noise_model = get_noise_model(args.target_noise_model)
    val_noise_model = get_noise_model(args.val_noise_model)
    generator = NoisyImageGenerator(image_dir, source_noise_model, target_noise_model, batch_size=batch_size,
                                    image_size=image_size)
    val_generator = ValGenerator(test_dir, val_noise_model)
    output_path.mkdir(parents=True, exist_ok=True)
    callbacks.append(LearningRateScheduler(schedule=Schedule(nb_epochs, lr)))
    callbacks.append(ModelCheckpoint(str(output_path) + "/weights.{epoch:03d}-{val_loss:.3f}-{val_PSNR:.5f}.hdf5",
                                     monitor="val_PSNR",
                                     verbose=1,
                                     mode="max",
                                     save_best_only=True))

    hist = model.fit_generator(generator=generator,
                               steps_per_epoch=steps,
                               epochs=nb_epochs,
                               validation_data=val_generator,
                               verbose=1,
                               callbacks=callbacks)

    #callbacks.append(TensorBoard(Path(output_path) / 'logs', histogram_freq=1))
    np.savez(str(output_path.joinpath("history.npz")), history=hist.history)
Ejemplo n.º 2
0
def main():
    args = get_args()
    image_dir = args.image_dir
    test_dir = args.test_dir
    image_size = args.image_size
    batch_size = args.batch_size
    nb_epochs = args.nb_epochs
    lr = args.lr
    steps = args.steps
    loss_type = args.loss
    output_path = Path(__file__).resolve().parent.joinpath(args.output_path)
    model = get_model(args.model)

    if args.weight is not None:
        model.load_weights(args.weight)

    opt = Adam(lr=lr)
    callbacks = []

    if loss_type == "l0":
        l0 = L0Loss()
        callbacks.append(
            UpdateAnnealingParameter(l0.gamma, nb_epochs, verbose=1))
        loss_type = l0()

    model.compile(optimizer=opt, loss=loss_type, metrics=[PSNR])
    source_noise_model = get_noise_model(args.source_noise_model)
    target_noise_model = get_noise_model(args.target_noise_model)
    val_noise_model = get_noise_model(args.val_noise_model)
    generator = NoisyImageGenerator(image_dir,
                                    source_noise_model,
                                    target_noise_model,
                                    batch_size=batch_size,
                                    image_size=image_size,
                                    but)
    val_generator = ValGenerator(test_dir, val_noise_model)
    output_path.mkdir(parents=True, exist_ok=True)
    callbacks.append(LearningRateScheduler(schedule=Schedule(nb_epochs, lr)))
    callbacks.append(
        ModelCheckpoint(
            str(output_path) +
            "/weights.{epoch:03d}-{val_loss:.3f}-{val_PSNR:.5f}.hdf5",
            monitor="val_PSNR",
            verbose=1,
            mode="max",
            save_best_only=True))

    hist = model.fit_generator(generator=generator,
                               steps_per_epoch=steps,
                               epochs=nb_epochs,
                               validation_data=val_generator,
                               verbose=1,
                               callbacks=callbacks)

    np.savez(str(output_path.joinpath("history.npz")), history=hist.history)
Ejemplo n.º 3
0
def main():
    args = get_args()
    image_dir = args.image_dir
    weight_file = args.weight_file
    val_noise_model = get_noise_model(args.test_noise_model)
    model = get_srresnet_model()
    model.load_weights(weight_file)

    if args.output_dir:
        output_dir = Path(args.output_dir)
        output_dir.mkdir(parents=True, exist_ok=True)

    image_paths = list(Path(image_dir).glob("*.*"))

    for image_path in image_paths:
        image = cv2.imread(str(image_path))
        h, w, _ = image.shape
        out_image = np.zeros((h, w * 3, 3), dtype=np.uint8)
        noise_image = val_noise_model(image)
        pred = model.predict(np.expand_dims(noise_image, 0))
        denoised_image = get_image(pred[0])
        out_image[:, :w] = image
        out_image[:, w:w * 2] = noise_image
        out_image[:, w * 2:] = denoised_image

        if args.output_dir:
            cv2.imwrite(str(output_dir.joinpath(image_path.name))[:-4] + ".png", out_image)
        else:
            cv2.imshow("result", out_image)
            key = cv2.waitKey(-1)
            # "q": quit
            if key == 113:
                return 0
Ejemplo n.º 4
0
def main():
    args = get_args()
    image_dir = args.image_dir
    weight_file = args.weight_file
    val_noise_model = get_noise_model(args.test_noise_model)
    model = get_model(args.model)
    model.load_weights(weight_file)

    if args.output_dir:
        output_dir = Path(args.output_dir)
        output_dir.mkdir(parents=True, exist_ok=True)

    image_paths = list(Path(image_dir).glob("*.*"))

    for image_path in image_paths:
        image = cv2.imread(str(image_path))
        # patch = image_pre.load_img(str(image_path), color_mode='rgb', target_size=(256, 256))
        # noise_image = image_pre.img_to_array(patch).astype('float32')[:,:,::-1]
        h, w, _ = image.shape
        noise_image = image[:(h // 16) * 16, :(w // 16) *
                            16]  # for stride (maximum 16)
        h, w, _ = noise_image.shape

        out_image = np.zeros((h, w * 2, 3), dtype=np.uint8)
        # noise_image = val_noise_model(moi_image)

        pred = model.predict(np.expand_dims(noise_image, 0))
        denoised_image = get_image(pred[0])
        print(noise_image.shape)
        print(denoised_image.shape)
        out_image[:, :w] = noise_image
        out_image[:, w:w * 2] = denoised_image

        cv2.imwrite(
            str(output_dir.joinpath(image_path.name))[:-4] + ".png", out_image)
Ejemplo n.º 5
0
def main():
    args = get_args()
    image_dir = args.image_dir
    weight_file = args.weight_file
    val_noise_model = get_noise_model(args.test_noise_model)
    model = get_model(args.model)
    model.load_weights(weight_file)

    if args.output_dir:
        output_dir = Path(args.output_dir)
        output_dir.mkdir(parents=True, exist_ok=True)

    save_arg_filename = Path(output_dir) / 'args.txt'
    with open(str(save_arg_filename), 'w') as f:
        json.dump(args.__dict__, f, indent=2)

    # --kw: glob -> rglob--
    image_paths = list(Path(image_dir).rglob("*.png"))
    image_paths += list(Path(image_dir).rglob("*.tif"))
    image_paths += list(Path(image_dir).rglob("*.jpg"))
    image_paths += list(Path(image_dir).rglob("*.bmp"))

    for image_path in tqdm(image_paths):
        # print('1', image_path)
        image = cv2.imread(str(image_path))
        h, w, _ = image.shape
        image = image[:(h // 16) * 16, :(w // 16) *
                      16]  # for stride (maximum 16)
        h, w, _ = image.shape

        out_image = np.zeros((h, w * 3, 3), dtype=np.uint8)
        noise_image = val_noise_model(image)
        pred = model.predict(np.expand_dims(noise_image, 0))
        denoised_image = get_image(pred[0])
        out_image[:, :w] = image
        out_image[:, w:w * 2] = noise_image
        out_image[:, w * 2:] = denoised_image

        if args.output_dir:
            # print('2', Path(output_dir))
            # print('3', image_path)
            save_dir = Path(output_dir).joinpath(
                Path(image_path).parent).joinpath('denoised')
            # save_dir = Path(output_dir).joinpath(Path(image_path).parent.name)

            if not Path(save_dir).exists():
                os.makedirs(save_dir)

            save_img_name = str(
                Path(save_dir).joinpath(Path(image_path).stem)) + '.png'
            # print('Dir:', save_dir)
            print('save_file_name:', save_img_name)
            cv2.imwrite(save_img_name, out_image)
        else:
            cv2.imshow("result", out_image)
            key = cv2.waitKey(-1)
            # "q": quit
            if key == 113:
                return 0
Ejemplo n.º 6
0
def main():
    args = get_args()
    image_dir = args.image_dir
    test_dir = args.test_dir
    image_size = args.image_size
    batch_size = args.batch_size
    nb_epochs = args.nb_epochs
    lr = args.lr
    steps = args.steps
    loss_type = args.loss
    output_path = Path(__file__).resolve().parent.joinpath(args.output_path)
    model = get_model(args.model)
    opt = Adam(lr=lr)
    model.compile(optimizer=opt, loss=loss_type, metrics=[PSNR])
    source_noise_model = get_noise_model(args.source_noise_model)
    target_noise_model = get_noise_model(args.target_noise_model)
    val_noise_model = get_noise_model(args.val_noise_model)
    generator = NoisyImageGenerator(image_dir,
                                    source_noise_model,
                                    target_noise_model,
                                    batch_size=batch_size,
                                    image_size=image_size)
    val_generator = ValGenerator(test_dir, val_noise_model)
    output_path.mkdir(parents=True, exist_ok=True)
    callbacks = [
        LearningRateScheduler(schedule=Schedule(nb_epochs, lr)),
        ModelCheckpoint(
            str(output_path) +
            "/weights.{epoch:03d}-{val_loss:.3f}-{val_PSNR:.5f}.hdf5",
            monitor="val_PSNR",
            verbose=1,
            mode="max",
            save_best_only=True)
    ]

    hist = model.fit_generator(generator=generator,
                               steps_per_epoch=steps,
                               epochs=nb_epochs,
                               validation_data=val_generator,
                               verbose=1,
                               callbacks=callbacks,
                               use_multiprocessing=True,
                               workers=8)

    np.savez(str(output_path.joinpath("history.npz")), history=hist.history)
Ejemplo n.º 7
0
def main():
    start = time.time()
    img_count = 0
    args = get_args()
    image_dir = args.image_dir
    weight_file = args.weight_file
    val_noise_model = get_noise_model(args.test_noise_model)
    model = get_model(args.model)
    model.load_weights(weight_file)

    if args.output_dir:
        output_dir = Path(args.output_dir)
        output_dir.mkdir(parents=True, exist_ok=True)

    image_paths = list(Path(image_dir).glob("*.*"))

    for image_path in image_paths:
        image = cv2.imread(str(image_path))
        h, w, _ = image.shape
        image = image[:(h // 16) * 16, :(w // 16) *
                      16]  # for stride (maximum 16)
        h, w, _ = image.shape

        out_image = np.zeros((h, w * 3, 3), dtype=np.uint8)
        noise_image = val_noise_model(image)
        pred = model.predict(np.expand_dims(noise_image, 0))
        denoised_image = get_image(pred[0])
        out_image[:, :w] = image
        out_image[:, w:w * 2] = noise_image
        out_image[:, w * 2:] = denoised_image

        if args.output_dir:
            cv2.imwrite(
                str(output_dir) + "/compared/" + str(image_path.name)[:-4] +
                ".png", out_image)
            cv2.imwrite(
                str(output_dir) + "/denoised/" + str(image_path.name)[:-4] +
                ".png", denoised_image)
            img_count += 1
        else:
            cv2.imshow("result", out_image)
            key = cv2.waitKey(-1)
            # "q": quit
            if key == 113:
                return 0
    print('Time: {:05.2f}'.format(time.time() - start), 'seconds', 'Image:',
          img_count)
Ejemplo n.º 8
0
def main():
    # get_args() 함수로부터 인자를 받아옴
    args = get_args()
    image_dir = args.image_dir
    weight_file = args.weight_file
    val_noise_model = get_noise_model(args.test_noise_model)
    model = get_model(args.model)
    model.load_weights(weight_file)

    if args.output_dir:  #out_path를 입력받았다면
        output_dir = Path(args.output_dir)
        output_dir.mkdir(parents=True, exist_ok=True)

    image_paths = list(Path(image_dir).glob("*.*"))

    for image_path in image_paths:  #image_path 내에 있는 모든 이미지 파일을 순차적으로 작업함
        image = cv2.imread(str(image_path))
        h, w, _ = image.shape
        image = image[:(h // 16) * 16, :(w // 16) * 16]
        h, w, _ = image.shape

        out_image = np.zeros((h, w * 3, 3), dtype=np.uint8)
        noise_image = val_noise_model(image)
        pred = model.predict(np.expand_dims(noise_image, 0))
        denoised_image = get_image(pred[0])

        #out_image에 imgae, noise_image, denoised_image를 넣음.(사용하지 않음)
        execution_path = os.getcwd()
        out_image[:, :w] = image
        out_image[:, w:w * 2] = noise_image
        out_image[:, w * 2:] = denoised_image
        #cv2.imwrite("denoised.jpg", out_image)
        cv2.imwrite("denoised.jpg", denoised_image)  #denoised된 이미지를 기본경로에 저장
        #img = cv2.imread(execution_path, "denoised.jpg")
        #경로 내에 존재하는 denoised.jpg파일을 tesseract로 불러옴
        data = pytesseract.image_to_string(Image.open('denoised.jpg'),
                                           lang='eng')
        #불러온 image 내의 문자열을 data 변수에 string으로 입력
        f = open("result.txt", 'w', encoding='UTF8')
        f.write(data)  #result.txt파일 안에 해당 string을 저장하고 기본 경로에 저장.
        f.close()

        if args.output_dir:  #output_path를 입력받았다면 그 경로에 저장
            cv2.imwrite("denoised.png", out_image)
Ejemplo n.º 9
0
def main():
    args = get_args()
    image_dir = args.image_dir
    weight_file = args.weight_file
    val_noise_model = get_noise_model(args.test_noise_model)
    model = get_model(args.model)
    model.load_weights(weight_file)

    if args.output_dir:
        output_dir = Path(args.output_dir)
        output_dir.mkdir(parents=True, exist_ok=True)

    image_paths = list(Path(image_dir).glob("*.*"))

    for image_path in image_paths:
        # image = cv2.imread(str(image_path))
        patch = image_pre.load_img(str(image_path),
                                   color_mode='rgb',
                                   target_size=(256, 256))
        image = image_pre.img_to_array(patch).astype('float32')[:, :, ::-1]
        moi_image = str(image_path).replace('clean', 'noise')
        h, w, _ = image.shape
        # image = image[:(h // 16) * 16, :(w // 16) * 16]  # for stride (maximum 16)
        # h, w, _ = image.shape
        out_image = np.zeros((h, w * 3, 3), dtype=np.uint8)
        noise_image = val_noise_model(moi_image)
        pred = model.predict(np.expand_dims(noise_image, 0))
        denoised_image = get_image(pred[0])
        out_image[:, :w] = image
        out_image[:, w:w * 2] = noise_image
        out_image[:, w * 2:] = denoised_image

        if args.output_dir:
            cv2.imwrite(
                str(output_dir.joinpath(image_path.name))[:-4] + ".png",
                out_image)
        else:
            cv2.imshow("result", out_image)
            key = cv2.waitKey(-1)
            # "q": quit
            if key == 113:
                return 0
Ejemplo n.º 10
0
def main():
    args = get_args()
    image_dir = args.image_dir
    weight_file = args.weight_file
    val_noise_model = get_noise_model(args.test_noise_model)
    model = get_model(args.model)
    model.load_weights(weight_file)

    if args.output_dir:
        output_dir = Path(args.output_dir)
        output_dir.mkdir(parents=True, exist_ok=True)

    # list all images
    image_paths = list(Path(image_dir).glob("*.*"))

    for image_path in image_paths:
        image = cv2.imread(str(image_path))
        h, w, _ = image.shape
        image = image[:(h // 16) * 16, :(w // 16) *
                      16]  # for stride (maximum 16)
        h, w, _ = image.shape

        out_image = np.zeros((h, w * 3, 3), dtype=np.uint8)
        noise_image = val_noise_model(image)
        pred = model.predict(np.expand_dims(noise_image, 0))
        denoised_image = get_image(pred[0])
        # stack orignal image, noise_image, denoised image together
        out_image[:, :w] = image
        out_image[:, w:w * 2] = noise_image
        out_image[:, w * 2:] = denoised_image

        if args.output_dir:
            # -4 to exclude extension
            cv2.imwrite(
                str(output_dir.joinpath(image_path.name))[:-4] + ".png",
                out_image)
        else:
            cv2.imshow("result", out_image)
            key = cv2.waitKey(-1)
            # "q": quit
            if key == 113:
                return 0
Ejemplo n.º 11
0
def main():
    args = get_args()
    image_dir = args.image_dir
    weight_file = args.weight_file
    image_size = args.image_size
    val_noise_model = get_noise_model(args.test_noise_model)
    model = get_model(args.model)
    model.load_weights(weight_file)

    if args.output_dir:
        output_dir = Path(args.output_dir)
        output_dir.mkdir(parents=True, exist_ok=True)

    image_paths = list(Path(image_dir).glob("*.*"))

    for image_path in image_paths:
        img_name = os.path.basename(str(image_path))
        image = cv2.imread(str(image_path))
        #h, w, _ = image.shape
        #image = image[:(h // 16) * 16, :(w // 16) * 16]  # for stride (maximum 16)
        img = cv2.resize(image,(image_size,image_size))
        #img = image
        h, w, _ = img.shape

        out_image = np.zeros((h, w * 2, 3), dtype=np.uint8)
        #noise_image = val_noise_model(image)
        pred = model.predict(np.expand_dims(img, 0))
        denoised_image = get_image(pred[0])
        out_image[:, :w] = img
        out_image[:, w:w * 2] = denoised_image
        #out_image[:, w * 2:] = denoised_image

        if args.output_dir:
            #cv2.imwrite(str(output_dir.joinpath(image_path.name))[:-4] + ".png", out_image)
            cv2.imwrite(str(output_dir) +'/'+ img_name, denoised_image)
        else:
            cv2.imshow("result", out_image)
            key = cv2.waitKey(-1)
            # "q": quit
            if key == 113:
                return 0
Ejemplo n.º 12
0
def start():
    image_dir = './after.png'
    weight_file = './denoise/weights.gauss_clean.hdf5'
    val_noise_model = get_noise_model("clean")
    model = 'srresnet'
    output_dir = './denoise/results'
    model = get_model(model)
    model.load_weights(weight_file)

    image = cv2.imread(image_dir)
    h, w, _ = image.shape
    image = image[:(h // 16) * 16, :(w // 16) * 16]  # for stride (maximum 16)
    h, w, _ = image.shape

    out_image = np.zeros((h, w * 3, 3), dtype=np.uint8)
    noise_image = val_noise_model(image)
    pred = model.predict(np.expand_dims(noise_image, 0))
    denoised_image = get_image(pred[0])
    out_image[:, :w] = image
    out_image[:, w:w * 2] = noise_image
    out_image[:, w * 2:] = denoised_image
    cv2.imwrite('./denoise/results/denoise.png', out_image[:, w * 2:])
Ejemplo n.º 13
0
def main():
    # DEBUG, INFO, WARN, ERROR, or FATAL
    tf.compat.v1.logging.set_verbosity(
        tf.compat.v1.logging.FATAL)  # silence deprecate warning message
    config = tf.ConfigProto()  # for ubuntu nvidia driver
    config.gpu_options.allow_growth = True  # for ubuntu nvidia driver
    config.gpu_options.per_process_gpu_memory_fraction = 0.9  # for ubuntu nvidia driver
    tf.keras.backend.set_session(
        tf.Session(config=config))  # for ubuntu nvidia driver
    args = get_args()
    image_dir = args.image_dir
    test_dir = args.test_dir
    image_size = args.image_size
    batch_size = args.batch_size
    nb_epochs = args.nb_epochs
    init_lr = float(args.lr)
    steps = args.steps
    loss_type = args.loss
    output_path = Path(__file__).resolve().parent.joinpath(args.output_path)
    model = get_model(args.model)

    # 儲存訓練參數 JSON
    save_arg_filename = Path(output_path) / 'args.txt'
    if not output_path.expanduser().exists():
        os.makedirs(output_path)
    with open(str(save_arg_filename), 'w') as f:
        json.dump(args.__dict__, f, indent=2)

    if args.weight is not None:
        model.load_weights(args.weight)

    callbacks = []

    if loss_type == "l0":
        l0 = L0Loss()
        callbacks.append(
            UpdateAnnealingParameter(l0.gamma, nb_epochs, verbose=1))
        loss_type = l0()

    source_noise_model = get_noise_model(args.source_noise_model)
    target_noise_model = get_noise_model(args.target_noise_model)
    val_noise_model = get_noise_model(args.val_noise_model)
    generator = NoisyImageGenerator(image_dir,
                                    source_noise_model,
                                    target_noise_model,
                                    batch_size=batch_size,
                                    image_size=image_size)
    val_generator = ValGenerator(test_dir, val_noise_model)
    output_path.mkdir(parents=True, exist_ok=True)

    if loss_type == "mssssim":
        print('Choose mean ssim loss')
        my_opt = Adam()
        model.compile(optimizer=my_opt, loss=Mean_MSSSIM_loss, metrics=[PSNR])
        # #建立檢查點
        callbacks.append(
            ModelCheckpoint(
                str(output_path) +
                "/weights.{epoch:03d}-{val_loss:.3f}-{val_PSNR:.5f}.hdf5",
                monitor="val_PSNR",
                verbose=1,
                mode="max",
                save_best_only=True))

    else:
        my_opt = Adam()
        model.compile(optimizer=my_opt, loss=loss_type, metrics=[PSNR])
        # #建立檢查點
        callbacks.append(
            ModelCheckpoint(
                str(output_path) +
                "/weights.{epoch:03d}-{val_loss:.3f}-{val_PSNR:.5f}.hdf5",
                monitor="val_PSNR",
                verbose=1,
                mode="max",
                save_best_only=True))

    # # 更新學習率
    # my_lr_schedule_stepwise = LearningRateScheduler(schedule=MyStepwiseScheduler(nb_epochs, init_lr), verbose=1)
    my_lr_schedule_exponential = LearningRateScheduler(
        schedule=MyExponentialScheduler(nb_epochs, init_lr), verbose=1)
    callbacks.append(my_lr_schedule_exponential)

    # plot_model(model, to_file=str(output_path) + "/model.png", show_shapes=True,dpi=200)
    # #連續 10 次不收斂則終止訓練
    # callbacks.append(EarlyStopping(patience=10))
    # #無法收斂則終止訓練
    # callbacks.append(TerminateOnNaN())

    # #以 CSV 紀錄訓練歷史
    callbacks.append(
        CSVLogger(filename=str(output_path) + "/TrainingLogCsv.csv",
                  append=True))
    # #以 TensorBoard 紀錄訓練歷史
    callbacks.append(
        TensorBoard(log_dir=str(output_path) + str('/logs'),
                    histogram_freq=0,
                    write_graph=True,
                    write_grads=False,
                    embeddings_freq=0,
                    embeddings_layer_names=None,
                    embeddings_metadata=None))

    callbacks.append(LRTensorBoard(log_dir=str(output_path) + str('/logs')))

    # #訓練模型
    hist = model.fit_generator(
        generator=generator,
        steps_per_epoch=steps,
        epochs=nb_epochs,
        validation_data=val_generator,
        verbose=1,
        callbacks=callbacks,
    )
    # #儲存訓練歷史
    np.savez(str(output_path.joinpath("history.npz")), history=hist.history)
Ejemplo n.º 14
0
def main():

    args = get_args()
    image_dir = args.image_dir
    weight_file = args.weight_file
    val_noise_model = get_noise_model(args.test_noise_model)
    model = get_model(args.model)
    model.load_weights(weight_file)
    is_sharpen_needed = args.sharpen
    print('shapren=', is_sharpen_needed)

    if args.output_dir:
        output_dir = Path(args.output_dir)
        output_dir.mkdir(parents=True, exist_ok=True)

    save_arg_filename = Path(output_dir) / 'args.txt'
    with open(str(save_arg_filename), 'w') as f:
        json.dump(args.__dict__, f, indent=2)

    # --kw: glob -> rglob--
    image_paths = list(Path(image_dir).rglob("*.png"))
    image_paths += list(Path(image_dir).rglob("*.tif"))
    image_paths += list(Path(image_dir).rglob("*.jpg"))
    image_paths += list(Path(image_dir).rglob("*.bmp"))

    for image_path in tqdm(image_paths):
        image = cv2.imread(str(image_path))
        h, w, _ = image.shape
        image = image[:(h // 16) * 16, :(w // 16) *
                      16]  # for stride (maximum 16)
        h, w, _ = image.shape

        noise_image = val_noise_model(image)
        pred = model.predict(np.expand_dims(noise_image, 0))
        denoised_image = get_image(pred[0])
        denoise_gray = denoised_image[:, :, 0]

        if is_sharpen_needed:
            kernel = [[-1, -1, -1], [-1, 27, -1], [-1, -1, -1]]
            kernel = np.asarray(kernel, dtype=np.float32)
            sharpen = cv2.filter2D(denoise_gray,
                                   ddepth=cv2.CV_32F,
                                   kernel=kernel,
                                   borderType=cv2.BORDER_DEFAULT)
            output_image = cv2.normalize(src=sharpen,
                                         dst=None,
                                         alpha=0,
                                         beta=255,
                                         norm_type=cv2.NORM_MINMAX,
                                         dtype=cv2.CV_8U)
        else:
            output_image = cv2.normalize(src=denoise_gray,
                                         dst=None,
                                         alpha=0,
                                         beta=255,
                                         norm_type=cv2.NORM_MINMAX,
                                         dtype=cv2.CV_8U)

        # cv2.imshow('RAW', image)
        # cv2.imshow('De-noised', output_image)
        # cv2.waitKey(0)

        if args.output_dir:
            save_dir = Path(output_dir).joinpath(
                Path(image_path).parent).joinpath('denoised')
            if not Path(save_dir).exists():
                os.makedirs(save_dir)

            save_img_name = str(
                Path(save_dir).joinpath(Path(image_path).stem)) + '.png'
            cv2.imwrite(save_img_name, output_image)
        else:
            cv2.imshow("result", output_image)
            key = cv2.waitKey(-1)
            # "q": quit
            if key == 113:
                return 0
Ejemplo n.º 15
0
def main():
    height = 512
    width = 512

    noise = 'gauss'
    mode = 'clean'

    args = get_args()
    image_dir = args.image_dir
    weight_file = 'weights_{}_{}.hdf5'.format(noise, mode)  #args.weight_file

    if mode != 'clean':
        val_noise_model = get_noise_model(args.test_noise_model)
    else:
        model = get_model(height, width, args.model)
    model.load_weights(weight_file)
    model.summary()

    # saved_model
    tf.saved_model.save(
        model, 'saved_model_{}_{}_{}_{}x{}'.format(args.model, noise, mode,
                                                   height, width))

    # pb
    full_model = tf.function(lambda inputs: model(inputs))
    full_model = full_model.get_concrete_function(
        inputs=[tf.TensorSpec(model.inputs[0].shape, model.inputs[0].dtype)])
    frozen_func = convert_variables_to_constants_v2(full_model,
                                                    lower_control_flow=False)
    frozen_func.graph.as_graph_def()
    tf.io.write_graph(graph_or_graph_def=frozen_func.graph,
                      logdir=".",
                      name="noise2noise_{}_{}_{}_{}x{}_float32.pb".format(
                          args.model, noise, mode, height, width),
                      as_text=False)

    # No Quantization - Input/Output=float32
    converter = tf.lite.TFLiteConverter.from_keras_model(model)
    tflite_model = converter.convert()
    with open(
            'noise2noise_{}_{}_{}_{}x{}_float32.tflite'.format(
                args.model, noise, mode, height, width), 'wb') as w:
        w.write(tflite_model)
    print(
        "tflite convert complete! - noise2noise_{}_{}_{}_{}x{}_float32.tflite".
        format(args.model, noise, mode, height, width))

    # Weight Quantization - Input/Output=float32
    converter = tf.lite.TFLiteConverter.from_keras_model(model)
    converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
    tflite_model = converter.convert()
    with open(
            'noise2noise_{}_{}_{}_{}x{}_weight_quant.tflite'.format(
                args.model, noise, mode, height, width), 'wb') as w:
        w.write(tflite_model)
    print(
        'Weight Quantization complete! - noise2noise_{}_{}_{}_{}x{}_weight_quant.tflite'
        .format(args.model, noise, mode, height, width))

    # Float16 Quantization - Input/Output=float32
    converter = tf.lite.TFLiteConverter.from_keras_model(model)
    converter.optimizations = [tf.lite.Optimize.DEFAULT]
    converter.target_spec.supported_types = [tf.float16]
    tflite_quant_model = converter.convert()
    with open(
            'noise2noise_{}_{}_{}_{}x{}_float16_quant.tflite'.format(
                args.model, noise, mode, height, width), 'wb') as w:
        w.write(tflite_quant_model)
    print(
        'Float16 Quantization complete! - noise2noise_{}_{}_{}_{}x{}_float16_quant.tflite'
        .format(args.model, noise, mode, height, width))

    def representative_dataset_gen():
        for data in raw_test_data.take(10):
            image = data['image'].numpy()
            image = tf.image.resize(image, (height, width))
            image = image[np.newaxis, :, :, :]
            # image = image / 127.5 - 1.0
            yield [image]

    raw_test_data, info = tfds.load(name="coco/2017",
                                    with_info=True,
                                    split="test",
                                    data_dir="~/TFDS",
                                    download=False)

    # Integer Quantization - Input/Output=float32
    converter = tf.lite.TFLiteConverter.from_keras_model(model)
    converter.optimizations = [tf.lite.Optimize.DEFAULT]
    converter.representative_dataset = representative_dataset_gen
    tflite_quant_model = converter.convert()
    with open(
            'noise2noise_{}_{}_{}_{}x{}_integer_quant.tflite'.format(
                args.model, noise, mode, height, width), 'wb') as w:
        w.write(tflite_quant_model)
    print(
        'Integer Quantization complete! - noise2noise_{}_{}_{}_{}x{}_integer_quant.tflite'
        .format(args.model, noise, mode, height, width))

    # Full Integer Quantization - Input/Output=int8
    converter = tf.lite.TFLiteConverter.from_keras_model(model)
    converter.optimizations = [tf.lite.Optimize.DEFAULT]
    converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
    converter.inference_input_type = tf.uint8
    converter.inference_output_type = tf.uint8
    converter.representative_dataset = representative_dataset_gen
    tflite_quant_model = converter.convert()
    with open(
            'noise2noise_{}_{}_{}_{}x{}_full_integer_quant.tflite'.format(
                args.model, noise, mode, height, width), 'wb') as w:
        w.write(tflite_quant_model)
    print(
        'Integer Quantization complete! - noise2noise_{}_{}_{}_{}x{}_full_integer_quant.tflite'
        .format(args.model, noise, mode, height, width))

    # # EdgeTPU
    # import subprocess
    # result = subprocess.check_output(["edgetpu_compiler", "-s", "noise2noise_{}_{}_{}_{}x{}_full_integer_quant.tflite".format(args.model, noise, mode, height, width)])
    # print(result)

    sys.exit(0)

    if args.output_dir:
        output_dir = Path(args.output_dir)
        output_dir.mkdir(parents=True, exist_ok=True)

    image_paths = list(Path(image_dir).glob("*.*"))

    for image_path in image_paths:
        image = cv2.imread(str(image_path))
        h, w, _ = image.shape
        image = image[:(h // 16) * 16, :(w // 16) *
                      16]  # for stride (maximum 16)
        h, w, _ = image.shape

        out_image = np.zeros((h, w * 3, 3), dtype=np.uint8)
        noise_image = val_noise_model(image)
        pred = model.predict(np.expand_dims(noise_image, 0))
        denoised_image = get_image(pred[0])
        out_image[:, :w] = image
        out_image[:, w:w * 2] = noise_image
        out_image[:, w * 2:] = denoised_image

        if args.output_dir:
            cv2.imwrite(
                str(output_dir.joinpath(image_path.name))[:-4] + ".png",
                out_image)
        else:
            cv2.imshow("result", out_image)
            key = cv2.waitKey(-1)
            # "q": quit
            if key == 113:
                return 0
Ejemplo n.º 16
0
    def denoise(self):
        # DEBUG, INFO, WARN, ERROR, or FATAL
        tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.FATAL)  # silence deprecate warning message
        config = tf.ConfigProto()  # for ubuntu nvidia driver
        config.gpu_options.allow_growth = True  # for ubuntu nvidia driver
        tf.keras.backend.set_session(tf.Session(config=config))  # for ubuntu nvidia driver

        image_dir =  self.input_dir
        weight_file = self.weight_path
        is_sharpening = False
        val_noise_model = get_noise_model('gaussian,20,20')
        model = get_model(self.model_category)

        model.load_weights(weight_file)

        # if args.output_dir:
        #     output_dir = Path(args.output_dir)
        #     output_dir.mkdir(parents=True, exist_ok=True)
        #
        # save_arg_filename = Path(output_dir) / 'args.txt'
        # with open(str(save_arg_filename), 'w') as f:
        #     json.dump(args.__dict__, f, indent=2)

        # --kw: glob -> rglob--
        image_paths = list(Path(image_dir).expanduser().rglob("*.png"))
        image_paths += list(Path(image_dir).expanduser().rglob("*.tif"))
        image_paths += list(Path(image_dir).expanduser().rglob("*.jpg"))
        image_paths += list(Path(image_dir).expanduser().rglob("*.bmp"))

        for image_path in tqdm(image_paths):
            image_gray = cv2.imread(str(image_path), cv2.IMREAD_GRAYSCALE)
            image = cv2.merge([image_gray, image_gray, image_gray])
            h, w, _ = image.shape
            image = image[:(h // 16) * 16, :(w // 16) * 16]  # for stride (maximum 16)
            h, w, _ = image.shape

            out_image = np.zeros((h, w * 3, 3), dtype=np.uint8)
            noise_image = val_noise_model(image)
            prediction = model.predict(np.expand_dims(noise_image, 0))
            denoised_image = self.clip_image(prediction[0])

            # if is_sharpening == 'True':
            #     print('sharpening')
            #     dst = cv2.Laplacian(denoised_image, cv2.CV_64F, ksize=1)
            #     denoised_image_float = denoised_image.astype(np.float64)
            #     denoised_image = denoised_image_float - dst
            #     denoised_image = np.clip(denoised_image, 0, 255).astype(np.uint8)

            out_image[:, :w] = image
            out_image[:, w:w * 2] = noise_image
            out_image[:, w * 2:] = denoised_image
            out_image = cv2.cvtColor(out_image, cv2.COLOR_BGR2GRAY)

            # diff = cv2.absdiff(out_image[:, w * 2:], out_image[:, :w])
            # fig_diff, ax = plt.subplots(figsize=(10, 5))
            # fig_diff.subplots_adjust(hspace=0.2, wspace=0.2)  # 設定子圖的間隔
            # plt.subplot(1, 2, 1)
            # plt.imshow(diff, vmin=0, vmax=255)
            # plt.subplot(1, 2, 2)
            # plt.hist(diff.ravel(), bins=256, range=[0, 256])
            # plt.show()

            if not self.debugMode:
                output_dir = Path(self.output_dir).expanduser().resolve()
                # print('output_dir', output_dir)
                rawImgNameWithoutExtension = str(Path(image_path).stem)
                save_file_name = output_dir / str('deN-'+rawImgNameWithoutExtension+'.png')
                # save_histogram_name = str(save_img_name)[:-4] + '_histogram.png'
                # print('save_img_name', save_img_name)
                # print('save_histogram_name', save_histogram_name)

                if not Path(output_dir).exists():
                    os.makedirs(output_dir)

                cv2.imwrite(str(save_file_name), out_image)
                # plt.savefig(save_histogram_name, dpi=300)
                # plt.close()
            else:
                cv2.imshow("result", out_image)
                key = cv2.waitKey(-1)
                # "q": quit
                if key == 113:
                    return 0
Ejemplo n.º 17
0
def main(): 
    # get_args() 함수로부터 인자를 받아옴
    args = get_args()
    image_dir = args.image_dir
    weight_file = args.weight_file
    val_noise_model = get_noise_model(args.test_noise_model)
    model = get_model(args.model)
    model.load_weights(weight_file)

    if args.output_dir: #out_path를 입력받았다면
        output_dir = Path(args.output_dir)
        output_dir.mkdir(parents=True, exist_ok=True)

    image_paths = list(Path(image_dir).glob("*.*"))

    for image_path in image_paths:  #image_path 내에 있는 모든 이미지 파일을 순차적으로 작업함
        image = cv2.imread(str(image_path))
        h, w, _ = image.shape
        image = image[:(h // 16) * 16, :(w // 16) * 16] 
        h, w, _ = image.shape
        
        out_image = np.zeros((h, w * 3, 3), dtype=np.uint8)
        noise_image = val_noise_model(image)
        pred = model.predict(np.expand_dims(noise_image, 0))
        denoised_image = get_image(pred[0])
 
        #out_image에 imgae, noise_image, denoised_image를 넣음.(사용하지 않음)
        execution_path = os.getcwd()
        out_image[:, :w] = image
        out_image[:, w:w * 2] = noise_image
        out_image[:, w * 2:] = denoised_image
        #cv2.imwrite("denoised.jpg", out_image)
        cv2.imwrite("denoised.jpg", denoised_image) #denoised된 이미지를 기본경로에 저장
        #img = cv2.imread(execution_path, "denoised.jpg")
        
        if args.output_dir: #output_path를 입력받았다면 그 경로에 저장
            cv2.imwrite("denoised.png", out_image)
        else:
            #cv2.imshow("result", out_image)

            detector = ObjectDetection()    #Object Detection을 하기 위해 ObjectDetection()호출
            detector.setModelTypeAsRetinaNet() 
            detector.setModelPath( os.path.join(execution_path , "resnet50_coco_best_v2.0.1.h5"))
            detector.loadModel()
            #denoised이미지를 불러와 object detection 시킨 이미지를 detectedImage.jpg로 저장함
            detections = detector.detectObjectsFromImage(input_image=os.path.join(execution_path , "denoised.jpg"), output_image_path=os.path.join(execution_path , "detectedImage.jpg"))

            for eachObject in detections:
                # 콘솔 창에도 인식된 물체의 이름과 정확도를 출력시킴
                print(eachObject["name"] , " : " , eachObject["percentage_probability"] )
            
            #detectedImage를 imread로 새로운 창에 띄우기 위한 과정
            img = cv2.imread('detectedImage.jpg', cv2.IMREAD_UNCHANGED)
            #resized_img = cv2.resize(img, (680, 680))
            #result라는 이름의 윈도우 창으로 화면에 띄움
            cv2.namedWindow("result", cv2.WINDOW_NORMAL)
            cv2.resizeWindow("result", 680,680)
            cv2.imshow('result', img)
            
            print ("Press q if your works are done")
            key = cv2.waitKey(-1)
            # "q": quit
            if key == 113:
                return 0
Ejemplo n.º 18
0
def main():
    args = get_args()
    image_dir = args.image_dir
    weight_file = args.weight_file
    val_noise_model = get_noise_model(args.test_noise_model)
    model = get_model(args.model)
    model.load_weights(weight_file)

    if args.output_dir:
        output_dir = Path(args.output_dir)
        output_dir.mkdir(parents=True, exist_ok=True)

    save_arg_filename = Path(output_dir) / 'args.txt'
    with open(str(save_arg_filename), 'w') as f:
        json.dump(args.__dict__, f, indent=2)

    # --kw: glob -> grab DCM file Only--
    image_paths = list(Path(image_dir).rglob("*.dcm"))

    for image_path in tqdm(image_paths):

        dcm_ds = pydicom.dcmread(str(image_path))
        dcm_ds_copy = dcm_ds
        default_wl = 0
        default_ww = 0
        if type(dcm_ds.WindowCenter) is pydicom.multival.MultiValue:
            default_wl = float(dcm_ds.WindowCenter[0])
        elif type(dcm_ds.WindowCenter) is pydicom.valuerep.DSfloat:
            default_wl = float(dcm_ds.WindowCenter)
        if type(dcm_ds.WindowWidth) is pydicom.multival.MultiValue:
            default_ww = float(dcm_ds.WindowWidth[0])
        elif type(dcm_ds.WindowWidth) is pydicom.valuerep.DSfloat:
            default_ww = float(dcm_ds.WindowWidth)

        rescale_intercept = float(dcm_ds.RescaleIntercept)
        rescale_slope = float(dcm_ds.RescaleSlope)

        dcm_image_scaled = ct_windowed(dcm_ds, default_wl, default_ww,
                                       np.uint8, (0, 255))
        image = cv2.cvtColor(dcm_image_scaled, cv2.COLOR_GRAY2RGB)

        h, w, _ = image.shape
        image = image[:(h // 16) * 16, :(w // 16) *
                      16]  # for stride (maximum 16)
        h, w, _ = image.shape

        noise_image = val_noise_model(image)
        predicts = model.predict(np.expand_dims(noise_image, 0))
        de_noised_rgb = get_image(predicts[0])
        de_noised_gray = cv2.cvtColor(de_noised_rgb, cv2.COLOR_RGB2GRAY)
        de_noised_gray_arr = np.asarray(de_noised_gray, dtype=np.int16)
        # cv2.imshow('RAW   wl:{}   ww:{}'.format(default_wl, default_ww), image)
        # cv2.imshow('De-noised', de_noised_gray)
        # cv2.waitKey(0)

        # x = np.kron([[1, 0] * 4, [0, 1] * 4] * 4, np.ones((100, 100)))

        dcm_ds_copy.WindowCenter = 127
        dcm_ds_copy.WindowWidth = 256
        dcm_ds_copy.RescaleIntercept = 0
        dcm_ds_copy.RescaleSlope = 1
        new_Patient_name = str(dcm_ds_copy.PatientName) + "-DeNoise"
        dcm_ds_copy.PatientName = new_Patient_name
        dcm_ds_copy.PixelData = de_noised_gray_arr.tostring()

        if args.output_dir:
            save_dir = Path(output_dir).joinpath(
                Path(image_path).parent).joinpath('denoised')
            if not Path(save_dir).exists():
                os.makedirs(save_dir)
            save_img_name = str(
                Path(save_dir).joinpath(Path(image_path).stem)) + '_dn.dcm'
            dcm_ds_copy.save_as(save_img_name)
        else:
            cv2.imshow("result", de_noised_gray)
            key = cv2.waitKey(-1)
            # "q": quit
            if key == 113:
                return 0
Ejemplo n.º 19
0
def main():
    args = get_args()
    # train image dir
    image_dir = args.image_dir
    # test image dir
    test_dir = args.test_dir
    # training image patch size
    image_size = args.image_size
    # trining batch size
    batch_size = args.batch_size
    # number of epochs
    nb_epochs = args.nb_epochs
    # learning rate
    lr = args.lr
    # steps per epoch
    steps = args.steps
    loss_type = args.loss
    # checkpoints path
    output_path = Path(__file__).resolve().parent.joinpath(args.output_path)
    model = get_model(args.model)
    opt = Adam(lr=lr)
    callbacks = []

    if loss_type == "l0":
        l0 = L0Loss()
        callbacks.append(
            UpdateAnnealingParameter(l0.gamma, nb_epochs, verbose=1))
        # loss_type is a function, i.e., calc_loss
        loss_type = l0()

    model.compile(optimizer=opt, loss=loss_type, metrics=[PSNR])
    source_noise_model = get_noise_model(args.source_noise_model)
    target_noise_model = get_noise_model(args.target_noise_model)
    val_noise_model = get_noise_model(args.val_noise_model)
    # training set generator
    generator = NoisyImageGenerator(image_dir,
                                    source_noise_model,
                                    target_noise_model,
                                    batch_size=batch_size,
                                    image_size=image_size)
    val_generator = ValGenerator(test_dir, val_noise_model)
    output_path.mkdir(parents=True, exist_ok=True)
    callbacks.append(LearningRateScheduler(schedule=Schedule(nb_epochs, lr)))
    callbacks.append(
        ModelCheckpoint(
            str(output_path) +
            "/weights.{epoch:03d}-{val_loss:.3f}-{val_PSNR:.5f}.hdf5",
            monitor="val_PSNR",
            verbose=1,
            mode="max",
            save_best_only=True))
    callbacks.append(
        TensorBoard(log_dir="./tf-logs",
                    histogram_freq=0,
                    write_graph=True,
                    write_images=True))

    hist = model.fit_generator(generator=generator,
                               steps_per_epoch=steps,
                               epochs=nb_epochs,
                               validation_data=val_generator,
                               verbose=1,
                               callbacks=callbacks)

    np.savez(str(output_path.joinpath("history.npz")), history=hist.history)