예제 #1
0
def evaluate_val(files, person, res_blocks, save_files=False):
    # get model
    edsr_model = edsr(scale=4, res_blocks=res_blocks, res_block_scaling=0.5)
    edsr_model.load_weights('./weights/EDSR_16000_20res_8batch_10epochs_TV+abserr.h5')

    for idx, file in enumerate(tqdm(files)):
        data, rate = librosa.load(file, None)
        # data = normalize(data)
        # print(f'Original rate : {rate}')
        data = data[:len(data) - (len(data) % 3)]
        data = np.asarray(decimate(data, 3), dtype=np.float32)
        hr_rate = int(rate / 3)
        # print(f'High Resolution rate : {rate}')
        # downsampling
        data = data[:len(data) - (len(data) % 4)]
        lr = np.asarray(decimate(data, 4), dtype=np.float32)
        lr_rate = int(hr_rate / 4)
        # print(f'Low Resolution rate : {lr_rate}')
        # reshape
        lr = np.asarray(lr).reshape((-1, lr.shape[0], 1))
        hr = np.asarray(data).reshape((-1, data.shape[0], 1))
        # resolve
        sr = resolve(edsr_model, lr)
        sr = sr.numpy()
        sr = np.reshape(sr, (sr.shape[1],))

        if save_files:
            lr = lr.reshape((lr.shape[1],))
            librosa.output.write_wav(f'./result/{person}_{idx + 1}_LR_data_{lr_rate}.wav', lr, lr_rate)
            librosa.output.write_wav(f'./result/{person}_{idx + 1}_HR_data_{hr_rate}.wav', data, hr_rate)
            librosa.output.write_wav(f'./result/{person}_{idx + 1}_SR_data_{hr_rate}.wav', sr, hr_rate)

        gc.collect()
예제 #2
0
def train(depth,scale,downgrade):
    weights_dir = f'weights/edsr-{depth}-x{scale}'
    weights_file = os.path.join(weights_dir, 'weights.h5')
    os.makedirs(weights_dir, exist_ok=True)

    div2k_train = DIV2K(scale=scale, subset='train', downgrade=downgrade)# 1-800 images
    div2k_valid = DIV2K(scale=scale, subset='valid', downgrade=downgrade)# 801-900 images

    train_batch_size = 16
    train_ds = div2k_train.dataset(batch_size=train_batch_size, random_transform=True)
    valid_ds = div2k_valid.dataset(batch_size=1, random_transform=False, repeat_count=1)

    trainer = EdsrTrainer(model=edsr(scale=scale, num_res_blocks=depth), 
                          checkpoint_dir=f'.ckpt/edsr-{depth}-x{scale}')

    steps_epoch = int(800/train_batch_size) # 50 steps/epoch
    # Train EDSR model for 300,000 steps and evaluate model
    # every 1000 steps on the first 10 images of the DIV2K
    # validation set.
    trainer.train(train_ds,
                  valid_ds.take(10),
                  steps=6000*steps_epoch, 
                  evaluate_every=500*steps_epoch, 
                  save_best_only=True)

    # Restore from checkpoint with highest PSNR
    trainer.restore()

    # Evaluate model on full validation set
    psnrv = trainer.evaluate(valid_ds)
    print(f'PSNR = {psnrv.numpy():3f}')

    # Save weights
    trainer.model.save_weights(weights_file)
예제 #3
0
def test():
    model = edsr(scale=scale, num_res_blocks=depth, num_filters=channels)
    checkpoint = tf.train.Checkpoint(step=tf.Variable(0),
                                     psnr=tf.Variable(-1.0),
                                     optimizer=Adam(1e-04),
                                     model=model)
    checkpoint_manager = tf.train.CheckpointManager(checkpoint=checkpoint,
                                                    directory='./ckpt',
                                                    max_to_keep=3)
    restore(checkpoint, checkpoint_manager)

    video_valid = video_ds(subset='valid')
    valid_ds = video_valid.dataset(batch_size=1,
                                   random_transform=False,
                                   repeat_count=1)

    psnr, ssim = evaluate(checkpoint.model, valid_ds)
    print('PSNR:%.3f, SSIM:%.3f' % (psnr, ssim))

    psnr_b = bilinear_upscale('../image_240', '../image_960', scale=scale)
    print('bilinear upscale PSNR:%.3f' % psnr_b)

    lr = load_image('../image_240/frame1500.jpg')
    sr = resolve_single(checkpoint.model, lr)
    plt.imshow(sr)
    plt.show()
예제 #4
0
def test(depth,scale,inputs_path,outputs_path,is_validate,weight_file):
    weights_dir = f'weights/edsr-{depth}-x{scale}'
    weights_file = os.path.join(weights_dir, weight_file)

    model = edsr(scale=scale, num_res_blocks=depth)
    model.load_weights(weights_file)
    print("[*] weights loaded: ",weight_file)

    if(is_validate==0):
        print("[*] inferring, scale = ",scale)
        image_path_list = get_image_path(inputs_path)
        total_num = len(image_path_list)
        cnt = 0
        for img_path in image_path_list:
            t_start = time.time()
            cnt += 1
            img_name = get_image_name(img_path)
            print("[*] processing[%d/%d]:%s"%(cnt,total_num,img_name))
            lr_img = load_image(img_path)
            print("   [*] low res image shape = ",lr_img.shape)
            sr_img = resolve_single(model, lr_img)
            # sr_img = tensor2img(model(lr_img[np.newaxis, :] / 255))
            img_name_before, img_name_after = divide_image_name(img_name)
            output_img_name = img_name_before + "_EDSR_4x" + img_name_after
            output_img_path = os.path.join(outputs_path, output_img_name)
            outputs_img = sr_img
            print("output_img_name = ",output_img_name)
            cv2.imwrite(output_img_path, outputs_img)
            t_end = time.time()
            print("   [*] done! Time = %.1fs"%(t_end - t_start))
    else:
        print("   image_name                   PSNR/SSIM        PSNR/SSIM (higher,better)")
        image_path_list = get_image_path(inputs_path)
        for img_path in image_path_list:
            img_name = get_image_name(img_path)
            raw_img = cv2.imread(img_path)
            # Generate low resolution image with original images
            lr_img, hr_img = create_lr_hr_pair(raw_img, 4) # scale=4
            sr_img = resolve_single(model, lr_img)
            bic_img = imresize_np(lr_img, 4).astype(np.uint8)
            str_format = "  [{}] Bic={:.2f}db/{:.2f}, SR={:.2f}db/{:.2f}"
            sr_img = sr_img.numpy()
            b = rgb2ycbcr(sr_img)
            print(str_format.format(
                img_name + ' ' * max(0, 20 - len(img_name)),
                calculate_psnr(rgb2ycbcr(bic_img), rgb2ycbcr(hr_img)),
                calculate_ssim(rgb2ycbcr(bic_img), rgb2ycbcr(hr_img)),
                calculate_psnr(rgb2ycbcr(sr_img), rgb2ycbcr(hr_img)),
                calculate_ssim(rgb2ycbcr(sr_img), rgb2ycbcr(hr_img))))
            img_name_before, img_name_after = divide_image_name(img_name)
            output_img_name = img_name_before + "_ESRGAN_025x_4x" + img_name_after
            output_img_path = os.path.join(outputs_path, output_img_name)
            # outputs_img = np.concatenate((bic_img, sr_img, hr_img), 1)
            outputs_img = sr_img
            # cv2.imwrite(output_img_path, outputs_img) # write super resoltion images
            img_name_before, img_name_after = divide_image_name(img_name)
            output_img_name = img_name_before + "_ESRGAN_025x" + img_name_after
            output_lr_img_path = os.path.join(outputs_path, output_img_name)
            outputs_lr_img = lr_img
예제 #5
0
def train(train_ds, valid_ds, ckpt_dir):
	model = Trainer(model = edsr(scale = scale, num_res_blocks= depth, num_filters= channels), learning_rate = 1e-04, checkpoint_dir=
	'./ckpt/')

	model.train(train_ds,
              valid_ds,
              steps=200000, 
              evaluate_every=10000, 
              save_best_only=True)
              

	model.restore()
def load_pre_trained_model():
    """
	This function defines the architecture of the CNN and loads the pre-trained weights from :
	https://github.com/krasserm/super-resolution

	Args :
		- None

	Output :
		model (tensorflow/keras): CNN
	"""
    scale = 4
    depth = 16
    model = edsr(scale=scale, num_res_blocks=depth)
    model.load_weights(weights_file)
    return model
예제 #7
0
# scale image to equal dimensions
def imageScale(lr_image_path):
    path = os.path.splitext(lr_image_path)
    if path[1] != "png":
        foo = Image.open(lr_image_path)
        foo.save(path[0] + ".png")

    foo = Image.open(path[0] + ".png")
    if foo.size[0] > 96 * 3:
        foo = foo.resize((foo.size[0] // 2, foo.size[1] // 2), Image.ANTIALIAS)
    foo.save("./rescale/temp.png", optimized=True, quality=95)
    return "./rescale/temp.png"


print("\n**************************************************")
print("\t\tWelcome to Lucid")
image_path = Path(PureWindowsPath(input("Enter Image path : ")))

weights_dir = f'weights/edsr-16-x4'
weights_file = os.path.join(weights_dir, 'weights.h5')

os.makedirs(weights_dir, exist_ok=True)

model = edsr(scale=4, num_res_blocks=16)
model.load_weights(weights_file)

new_path = imageScale(image_path)
generateSR(new_path)

print("\n**************************************************")
예제 #8
0
def main(args):
    train_dir, models_dir = create_train_workspace(args.outdir)
    write_args(train_dir, args)
    logger.info('Training workspace is %s', train_dir)

    training_generator = cropped_sequence(args.dataset,
                                          scale=args.scale,
                                          subset='train',
                                          downgrade=args.downgrade,
                                          image_ids=args.training_images,
                                          batch_size=args.batch_size)

    if args.benchmark:
        logger.info('Validation with DIV2K benchmark')
        validation_steps = len(args.validation_images)
        validation_generator = fullsize_sequence(
            args.dataset,
            scale=args.scale,
            subset='valid',
            downgrade=args.downgrade,
            image_ids=args.validation_images)
    else:
        logger.info(
            'Validation with randomly cropped images from DIV2K validation set'
        )
        validation_steps = args.validation_steps
        validation_generator = cropped_sequence(
            args.dataset,
            scale=args.scale,
            subset='valid',
            downgrade=args.downgrade,
            image_ids=args.validation_images,
            batch_size=args.batch_size)

    if args.initial_epoch:
        logger.info('Resume training of model %s', args.pretrained_model)
        model = _load_model(args.pretrained_model)

    else:
        if args.model == "edsr":
            loss = mean_absolute_error
            model = edsr.edsr(scale=args.scale,
                              num_filters=args.num_filters,
                              num_res_blocks=args.num_res_blocks,
                              res_block_scaling=args.res_scaling)
        else:
            loss = mae
            model_fn = wdsr.wdsr_b if args.model == 'wdsr-b' else wdsr.wdsr_a
            model = model_fn(scale=args.scale,
                             num_filters=args.num_filters,
                             num_res_blocks=args.num_res_blocks,
                             res_block_expansion=args.res_expansion,
                             res_block_scaling=args.res_scaling)

        if args.weightnorm:
            model.compile(
                optimizer=wn.AdamWithWeightnorm(lr=args.learning_rate),
                loss=loss,
                metrics=[psnr])
            if args.num_init_batches > 0:
                logger.info(
                    'Data-based initialization of weights with %d batches',
                    args.num_init_batches)
                model_weightnorm_init(model, training_generator,
                                      args.num_init_batches)
        else:
            model.compile(optimizer=Adam(lr=args.learning_rate),
                          loss=loss,
                          metrics=[psnr])

        if args.pretrained_model:
            logger.info(
                'Initialization with weights from pre-trained model %s',
                args.pretrained_model)
            copy_weights(from_model=_load_model(args.pretrained_model),
                         to_model=model)

    if args.print_model_summary:
        model.summary()

    callbacks = [
        tensor_board(train_dir),
        learning_rate(step_size=args.learning_rate_step_size,
                      decay=args.learning_rate_decay),
        model_checkpoint_after(args.save_models_after_epoch,
                               models_dir,
                               monitor='val_psnr',
                               save_best_only=args.save_best_models_only
                               or args.benchmark)
    ]

    model.fit_generator(training_generator,
                        epochs=args.epochs,
                        initial_epoch=args.initial_epoch,
                        steps_per_epoch=args.steps_per_epoch,
                        validation_data=validation_generator,
                        validation_steps=validation_steps,
                        use_multiprocessing=args.use_multiprocessing,
                        max_queue_size=args.max_queue_size,
                        workers=args.num_workers,
                        callbacks=callbacks)
예제 #9
0
def Use_EDSR():
    model = edsr(scale=4, num_res_blocks=16)
    model.load_weights('weights/edsr-16-x4/weights.h5')
    lr = Load_Image(filename)
    sr = resolve_single(model, lr)
    Save_Image(sr)
예제 #10
0
parser.add_argument("--jpg_quality", type=int, default=100)
cfg = parser.parse_args()

if cfg.method in SR_METHODS:
	if cfg.method == 'sr':
		neural_net = model().cuda()
		neural_net.load_state_dict(torch.load("checkpoint/checkpoint.pth"))
	elif cfg.method == 'fsrcnn':
		neural_net = fsrcnn().cuda()
		neural_net.load_state_dict(torch.load("checkpoint/fsrcnn.pth"))
	elif cfg.method == 'vdsr':
		sys.path.append('model')
		neural_net = torch.load('checkpoint/vdsr.pth')["model"]
		neural_net.cuda()
	elif cfg.method == 'edsr':
		neural_net = edsr().cuda()
		neural_net.load_state_dict(torch.load("checkpoint/edsr.pth"))
	elif cfg.method == 'carn':
		neural_net = carn().cuda()
		neural_net.load_state_dict(torch.load("checkpoint/carn_m.pth"))
	neural_net.eval()
	neural_net.half()

vpaths = glob.glob(os.path.join('dataset/PEXELS',"*.mp4"))
vpaths.sort()

MASK = cv2.imread('./dataset/mask.png')
MASK = cv2.resize(MASK,(cfg.image_width,cfg.image_height))
MASK = Transform(MASK)
center = np.zeros((cfg.image_height,cfg.image_width,3),np.uint8)
예제 #11
0
  except RuntimeError as e:
    # Virtual devices must be set before GPUs have been initialized
    print(e)


# Number of residual blocks
depth = 16

# Super-resolution factor
scale = 4

# Downgrade operator
downgrade = 'bicubic'

# Location of model weights (needed for demo)
weights_dir = f'weights/article'
weights_file = os.path.join(weights_dir, 'weights.h5')

##Run the demo

def resolve_and_plot(lr_image_path):
    lr = load_image(lr_image_path)
    #plot_sample_no_treatment(lr)
    sr = resolve_single(model,lr)
    plot_sample(lr,sr)

model = edsr(scale=scale, num_res_blocks=depth)
model.load_weights(weights_file)

resolve_and_plot('demo/image_0001.png')
    catesr_train = CATESR(subset='train',
                          images_dir='/home/ec2-user/gans/data/images_rgb',
                          caches_dir='/home/ec2-user/gans/data/caches_rgb')
    catesr_valid = CATESR(subset='valid',
                          images_dir='/home/ec2-user/gans/data/images_rgb',
                          caches_dir='/home/ec2-user/gans/data/caches_rgb')

    train_ds = catesr_train.dataset(batch_size=1,
                                    random_transform=True,
                                    shuffle_buffer_size=500)
    valid_ds = catesr_valid.dataset(batch_size=1,
                                    random_transform=False,
                                    repeat_count=1)

    generator_model = edsr(scale=scale, num_res_blocks=depth)
    generator_model.load_weights(
        os.path.join(weights_dir,
                     'pretrained_weights-edsr-16-x4-fine-tuned.h5'))

    trainer = EdsrTrainer(model=generator_model,
                          checkpoint_dir=f'.ckpt/edsr-{depth}-x{scale}')

    # Train EDSR model for 300,000 steps Save a checkpoint only if evaluation PSNR has improved.
    trainer.train(train_ds,
                  valid_ds.take(20),
                  steps=300000,
                  evaluate_every=1000,
                  save_best_only=True)

    # Restore from checkpoint with highest PSNR