예제 #1
0
def test_dvdnet(**args):
	"""Denoises all sequences present in a given folder. Sequences must be stored as numbered
	image sequences. The different sequences must be stored in subfolders under the "test_path" folder.

	Inputs:
		args (dict) fields:
			"model_spatial_file": path to model of the pretrained spatial denoiser
			"model_temp_file": path to model of the pretrained temporal denoiser
			"test_path": path to sequence to denoise
			"suffix": suffix to add to output name
			"max_num_fr_per_seq": max number of frames to load per sequence
			"noise_sigma": noise level used on test set
			"dont_save_results: if True, don't save output images
			"no_gpu": if True, run model on CPU
			"save_path": where to save outputs as png
	"""
	start_time = time.time()

	# If save_path does not exist, create it
	if not os.path.exists(args['save_path']):
		os.makedirs(args['save_path'])
	logger = init_logger_test(args['save_path'])

	# Sets data type according to CPU or GPU modes
	if args['cuda']:
		device = torch.device('cuda')
	else:
		device = torch.device('cpu')

	# Create models
	model_spa = DVDnet_spatial()
	model_temp = DVDnet_temporal(num_input_frames=NUM_IN_FRAMES)

	# Load saved weights
	state_spatial_dict = torch.load(args['model_spatial_file'])
	state_temp_dict = torch.load(args['model_temp_file'])
	if args['cuda']:
		device_ids = [0]
		model_spa = nn.DataParallel(model_spa, device_ids=device_ids).cuda()
		model_temp = nn.DataParallel(model_temp, device_ids=device_ids).cuda()
	else:
		# CPU mode: remove the DataParallel wrapper
		state_spatial_dict = remove_dataparallel_wrapper(state_spatial_dict)
		state_temp_dict = remove_dataparallel_wrapper(state_temp_dict)
	model_spa.load_state_dict(state_spatial_dict)
	model_temp.load_state_dict(state_temp_dict)

	# Sets the model in evaluation mode (e.g. it removes BN)
	model_spa.eval()
	model_temp.eval()

	with torch.no_grad():
		# process data
		seq, _, _ = open_sequence(args['test_path'],\
									False,\
									expand_if_needed=False,\
									max_num_fr=args['max_num_fr_per_seq'])
		seq = torch.from_numpy(seq[:, np.newaxis, :, :, :]).to(device)

		seqload_time = time.time()

		# Add noise
		noise = torch.empty_like(seq).normal_(mean=0, std=args['noise_sigma']).to(device)
		seqn = seq + noise
		noisestd = torch.FloatTensor([args['noise_sigma']]).to(device)

		denframes = denoise_seq_dvdnet(seq=seqn,\
										noise_std=noisestd,\
										temp_psz=NUM_IN_FRAMES,\
										model_temporal=model_temp,\
										model_spatial=model_spa,\
										mc_algo=MC_ALGO)
		den_time = time.time()

	# Compute PSNR and log it
	psnr = batch_psnr(denframes, seq.squeeze(), 1.)
	psnr_noisy = batch_psnr(seqn.squeeze(), seq.squeeze(), 1.)
	print("\tPSNR on {} : {}\n".format(os.path.split(args['test_path'])[-1], psnr))
	print("\tDenoising time: {:.2f}s".format(den_time - seqload_time))
	print("\tSequence loaded in : {:.2f}s".format(seqload_time - start_time))
	print("\tTotal time: {:.2f}s\n".format(den_time - start_time))
	logger.info("%s, %s, PSNR noisy %fdB, PSNR %f dB" % \
			 (args['test_path'], args['suffix'], psnr_noisy, psnr))

	# Save outputs
	if not args['dont_save_results']:
		# Save sequence
		save_out_seq(seqn, denframes, args['save_path'], int(args['noise_sigma']*255), \
					   args['suffix'], args['save_noisy'])

	# close logger
	close_logger(logger)
예제 #2
0
def test_fastdvdnet(**args):
    """Denoises all sequences present in a given folder. Sequences must be stored as numbered
    image sequences. The different sequences must be stored in subfolders under the "test_path" folder.

    Inputs:
            args (dict) fields:
                    "model_file": path to model
                    "test_path": path to sequence to denoise
                    "suffix": suffix to add to output name
                    "max_num_fr_per_seq": max number of frames to load per sequence
                    "noise_sigma": noise level used on test set
                    "dont_save_results: if True, don't save output images
                    "no_gpu": if True, run model on CPU
                    "save_path": where to save outputs as png
                    "gray": if True, perform denoising of grayscale images instead of RGB
    """
    # Start time
    start_time = time.time()

    # If save_path does not exist, create it
    logger = init_logger_test(os.path.dirname(args['save_path']))

    # Sets data type according to CPU or GPU modes
    if args['cuda']:
        device = torch.device('cuda')
    else:
        device = torch.device('cpu')

    # Create models
    print('Loading models ...')
    model_temp = FastDVDnet(num_input_frames=NUM_IN_FR_EXT)

    # Load saved weights
    state_temp_dict = torch.load(args['model_file'])
    if args['cuda']:
        device_ids = [0]
        model_temp = nn.DataParallel(model_temp, device_ids=device_ids).cuda()
    else:
        # CPU mode: remove the DataParallel wrapper
        state_temp_dict = remove_dataparallel_wrapper(state_temp_dict)
    model_temp.load_state_dict(state_temp_dict)

    # Sets the model in evaluation mode (e.g. it removes BN)
    model_temp.eval()

    with torch.no_grad():
        # process data
        seq = open_sequence(args['test_path'], args['first'], args['last'],
                            args['already_norm'])
        seq = torch.from_numpy(seq).to(device)
        seq_time = time.time()

        # Add noise
        if not args['already_noisy']:
            noise = torch.empty_like(seq).normal_(
                mean=0, std=args['noise_sigma']).to(device)
            seqn = seq + noise
        else:
            seqn = seq
        noisestd = torch.FloatTensor([args['noise_sigma']]).to(device)

        denframes = denoise_seq_fastdvdnet(seq=seqn,
                                           noise_std=noisestd,
                                           temp_psz=NUM_IN_FR_EXT,
                                           model_temporal=model_temp)

    # Compute PSNR and log it
    stop_time = time.time()
    psnr = compute_psnr(denframes.cpu().numpy(), seq.cpu().numpy(), 1.)
    psnr_noisy = compute_psnr(seqn.cpu().numpy().squeeze(),
                              seq.cpu().numpy(), 1.)
    loadtime = (seq_time - start_time)
    runtime = (stop_time - seq_time)
    seq_length = seq.size()[0]
    logger.info("Finished denoising {}".format(args['test_path']))
    logger.info(
        "\tDenoised {} frames in {:.3f}s, loaded seq in {:.3f}s".format(
            seq_length, runtime, loadtime))
    logger.info("\tPSNR noisy {:.4f}dB, PSNR result {:.4f}dB".format(
        psnr_noisy, psnr))

    # Save outputs
    if not args['dont_save_results']:
        # Save sequence
        save_out_seq(denframes.cpu(), args['save_path'], args['first'])

    # close logger
    close_logger(logger)
예제 #3
0
def test_ffdnet(**args):
    r"""Denoises an input image with FFDNet
	"""
    # Init logger
    logger = init_logger_ipol()

    # Check if input exists and if it is RGB
    try:
        rgb_den = is_rgb(args['input'])
    except:
        raise Exception('Could not open the input image')

    # Open image as a CxHxW torch.Tensor
    if rgb_den:
        in_ch = 3
        model_fn = 'models/net_rgb.pth'
        imorig = cv2.imread(args['input'])
        # from HxWxC to CxHxW, RGB image
        imorig = (cv2.cvtColor(imorig, cv2.COLOR_BGR2RGB)).transpose(2, 0, 1)
    else:
        # from HxWxC to  CxHxW grayscale image (C=1)
        in_ch = 1
        model_fn = 'logs/net.pth'
        imorig = cv2.imread(args['input'], cv2.IMREAD_GRAYSCALE)
        imorig_copy = imorig.copy()
        imorig = np.expand_dims(imorig, 0)
    imorig = np.expand_dims(imorig, 0)

    # Handle odd sizes
    expanded_h = False
    expanded_w = False
    sh_im = imorig.shape
    if sh_im[2] % 2 == 1:
        expanded_h = True
        imorig = np.concatenate((imorig, \
          imorig[:, :, -1, :][:, :, np.newaxis, :]), axis=2)

    if sh_im[3] % 2 == 1:
        expanded_w = True
        imorig = np.concatenate((imorig, \
          imorig[:, :, :, -1][:, :, :, np.newaxis]), axis=3)

    imorig = normalize(imorig)
    imorig = torch.Tensor(imorig)

    # Absolute path to model file
    model_fn = os.path.join(os.path.abspath(os.path.dirname(__file__)), \
       model_fn)

    # Create model
    print('Loading model ...\n')
    net = FFDNet(num_input_channels=in_ch)

    # Load saved weights
    if args['cuda']:
        state_dict = torch.load(model_fn)
        device_ids = [0]
        model = nn.DataParallel(net, device_ids=device_ids).cuda()
    else:
        state_dict = torch.load(model_fn, map_location='cpu')
        # CPU mode: remove the DataParallel wrapper
        state_dict = remove_dataparallel_wrapper(state_dict)
        model = net
    model.load_state_dict(state_dict)

    # Sets the model in evaluation mode (e.g. it removes BN)
    model.eval()

    # Sets data type according to CPU or GPU modes
    if args['cuda']:
        dtype = torch.cuda.FloatTensor
    else:
        dtype = torch.FloatTensor

    # Add noise
    if args['add_noise']:
        noise = torch.FloatTensor(imorig.size()).\
          normal_(mean=0, std=args['noise_sigma'])
        imnoisy = imorig + noise
    else:
        imnoisy = imorig.clone()

# Test mode
    with torch.no_grad():  # PyTorch v0.4.0
        imorig, imnoisy = Variable(imorig.type(dtype)), \
            Variable(imnoisy.type(dtype))
        nsigma = Variable(torch.FloatTensor([args['noise_sigma']]).type(dtype))

    # Measure runtime
    start_t = time.time()

    # Estimate noise and subtract it to the input image
    im_noise_estim = model(imnoisy, nsigma)
    outim = torch.clamp(imnoisy - im_noise_estim, 0., 1.)
    stop_t = time.time()

    if expanded_h:
        imorig = imorig[:, :, :-1, :]
        outim = outim[:, :, :-1, :]
        imnoisy = imnoisy[:, :, :-1, :]

    if expanded_w:
        imorig = imorig[:, :, :, :-1]
        outim = outim[:, :, :, :-1]
        imnoisy = imnoisy[:, :, :, :-1]

    # Compute PSNR and log it
    if rgb_den:
        print("### RGB denoising ###")
    else:
        print("### Grayscale denoising ###")
    if args['add_noise']:
        psnr = batch_psnr(outim, imorig, 1.)
        psnr_noisy = batch_psnr(imnoisy, imorig, 1.)

        print("----------PSNR noisy    {0:0.2f}dB".format(psnr_noisy))
        print("----------PSNR denoised {0:0.2f}dB".format(psnr))
    else:
        logger.info("\tNo noise was added, cannot compute PSNR")
    print("----------Runtime     {0:0.4f}s".format(stop_t - start_t))

    # Compute difference
    diffout = 2 * (outim - imorig) + .5
    diffnoise = 2 * (imnoisy - imorig) + .5

    # Save images
    if not args['dont_save_results']:
        noisyimg = variable_to_cv2_image(imnoisy)
        outimg = variable_to_cv2_image(outim)
        cv2.imwrite(
            "bfffd/noisy-" + str(int(args['noise_sigma'] * 255)) + '-' +
            args['input'], noisyimg)
        cv2.imwrite(
            "bfffd/ffdnet-" + str(int(args['noise_sigma'] * 255)) + '-' +
            args['input'], outimg)

        if args['add_noise']:
            cv2.imwrite("noisy_diff.png", variable_to_cv2_image(diffnoise))
            cv2.imwrite("ffdnet_diff.png", variable_to_cv2_image(diffout))
    (score, diff) = compare_ssim(noisyimg, imorig_copy, full=True)
    (score2, diff) = compare_ssim(outimg, imorig_copy, full=True)
    print("----------Noisy ssim:    {0:0.4f}".format(score))
    print("----------Denoisy ssim: {0:0.4f}".format(score2))
예제 #4
0
def test_ffdnet(**args):
    r"""Denoises an input image with FFDNet
	"""
    # Init logger
    logger = init_logger_ipol()

    # Check if input exists and if it is RGB
    try:
        rgb_den = is_rgb(args['input'])
    except:
        raise Exception('Could not open the input image')

# Measure runtime
    start_t = time.time()

    # Open image as a CxHxW torch.Tensor
    if rgb_den:
        in_ch = 3
        model_fn = 'net_rgb.pth'
        imorig = Image.open(args['input'])
        imorig = np.array(imorig, dtype=np.float32).transpose(2, 0, 1)
    else:
        # from HxWxC to  CxHxW grayscale image (C=1)
        in_ch = 1
        model_fn = 'models/net_gray.pth'
        # imorig = cv2.imread(args['input'], cv2.IMREAD_GRAYSCALE)
        imorig = np.expand_dims(imorig, 0)
    imorig = np.expand_dims(imorig, 0)

    # Handle odd sizes
    expanded_h = False
    expanded_w = False
    sh_im = imorig.shape
    if sh_im[2] % 2 == 1:
        expanded_h = True
        imorig = np.concatenate((imorig, \
          imorig[:, :, -1, :][:, :, np.newaxis, :]), axis=2)

    if sh_im[3] % 2 == 1:
        expanded_w = True
        imorig = np.concatenate((imorig, \
          imorig[:, :, :, -1][:, :, :, np.newaxis]), axis=3)

    imorig = normalize(imorig)
    imorig = torch.Tensor(imorig)
    # Absolute path to model file
    model_fn = os.path.join(os.path.abspath(os.path.dirname(__file__)), \
       model_fn)

    # Create model
    print('Loading model ...\n')
    net = FFDNet(num_input_channels=in_ch)

    # Load saved weights
    if args['cuda']:
        state_dict = torch.load(model_fn)
        #device_ids = [0,1,2,3]
        #model = nn.DataParallel(net, device_ids=device_ids).cuda()
        #state_dict = remove_dataparallel_wrapper(state_dict)
        model = net
    else:
        state_dict = torch.load(model_fn, map_location='cpu')
        # CPU mode: remove the DataParallel wrapper
        state_dict = remove_dataparallel_wrapper(state_dict)
        model = net
    model.load_state_dict(state_dict)

    # Sets the model in evaluation mode (e.g. it removes BN)
    model.eval()

    # Sets data type according to CPU or GPU modes
    if args['cuda']:
        dtype = torch.cuda.FloatTensor
    else:
        dtype = torch.FloatTensor

# Test mode
    with torch.no_grad():  # PyTorch v0.4.0
        imorig = Variable(imorig.type(dtype))
        nsigma = Variable(torch.FloatTensor([args['noise_sigma']]).type(dtype))

    # # Measure runtime
    # start_t = time.time()

    # Estimate noise and subtract it to the input image
    im_noise_estim = model(imorig, nsigma)
    stop_t = time.time()

    # log time
    if rgb_den:
        print("### RGB denoising ###")
    else:
        print("### Grayscale denoising ###")
    print("\tRuntime {0:0.4f}s".format(stop_t - start_t))

    # Save noises
    noise = variable_to_numpy(imorig.to(3) - im_noise_estim).transpose(1, 2, 0)
    filename = args['input'].split('/')[-1].split('.')[0]
    if args['save_path']:
        sio.savemat(
            './output_noise/' + args['save_path'] + '/' + filename + '.mat',
            {'Noisex': noise})
    else:
        sio.savemat('./output_noise/' + filename + '.mat', {'Noisex': noise})
예제 #5
0
model_fn = os.path.join(os.path.abspath(os.path.dirname(__file__)), model_fn)
# Create model
print('Loading model ...\n')
net = FFDNet(num_input_channels=in_ch)

# Load saved weights
if cuda:
    state_dict = torch.load(model_fn)
    device_ids = [0]
    model = nn.DataParallel(net, device_ids=device_ids).cuda()
    # Sets data type according to CPU or GPU modes
    dtype = torch.cuda.FloatTensor
else:
    state_dict = torch.load(model_fn, map_location='cpu')
    # CPU mode: remove the DataParallel wrapper
    state_dict = remove_dataparallel_wrapper(state_dict)
    model = net
    dtype = torch.FloatTensor
model.load_state_dict(state_dict)
# If use the mid-saved model, uncomment the following code
# model.load_state_dict(state_dict['state_dict'])
# Sets the model in evaluation mode (e.g. it removes BN)
model.eval()
# Load the fingerprint generated with a set of flat images
data = loadmat('mat' + os.sep + 'FlatFingerprint1.mat')
Fingerprint = data['Fingerprint']
[M, N] = [Fingerprint.shape[0], Fingerprint.shape[1]]
up = int(M / 2 - B / 2)
down = int(M / 2 + B / 2)
left = int(N / 2 - B / 2)
right = int(N / 2 + B / 2)
예제 #6
0
def test_fastdvdnet(**args):
    """Denoises all sequences present in a given folder. Sequences must be stored as numbered
	image sequences. The different sequences must be stored in subfolders under the "test_path" folder.

	Inputs:
			args (dict) fields:
					"model_file": path to model
					"test_path": path to sequence to denoise
					"suffix": suffix to add to output name
					"max_num_fr_per_seq": max number of frames to load per sequence
					"dont_save_results: if True, don't save output images
					"no_gpu": if True, run model on CPU
					"save_path": where to save outputs as png
					"gray": if True, perform denoising of grayscale images instead of RGB
	"""
    # Start time
    start_time = time.time()

    # If save_path does not exist, create it
    if not os.path.exists(args['save_path']):
        os.makedirs(args['save_path'])
    logger = init_logger_test(args['save_path'])

    # Sets data type according to CPU or GPU modes
    if args['cuda']:
        device = args['device_id'][0]
    else:
        device = torch.device('cpu')

    # Create models
    print('Loading models ...')
    model_temp = FastDVDnet(num_input_frames=NUM_IN_FR_EXT)

    # Load saved weights
    state_temp_dict = torch.load(args['model_file'])
    if args['cuda']:
        device_ids = args['device_id']
        model_temp = nn.DataParallel(model_temp,
                                     device_ids=device_ids).cuda(device)
    else:
        # CPU mode: remove the DataParallel wrapper
        state_temp_dict = remove_dataparallel_wrapper(state_temp_dict)
    model_temp.load_state_dict(state_temp_dict)

    # Sets the model in evaluation mode (e.g. it removes BN)
    model_temp.eval()

    gt = None
    with torch.no_grad():
        # process data
        seq, _, _ = open_sequence(args['test_path'],
                                  args['gray'],
                                  expand_if_needed=False,
                                  max_num_fr=args['max_num_fr_per_seq'])
        seq = torch.from_numpy(seq).to(device)
        seq_time = time.time()

        denframes = denoise_seq_fastdvdnet(seq=seq,
                                           temp_psz=NUM_IN_FR_EXT,
                                           model_temporal=model_temp)

        if args['gt_path'] is not None:
            gt, _, _ = open_sequence(args['gt_path'],
                                     args['gray'],
                                     expand_if_needed=False,
                                     max_num_fr=args['max_num_fr_per_seq'])
            gt = torch.from_numpy(gt).to(device)

    # Compute PSNR and log it
    stop_time = time.time()
    if gt is None:
        psnr = 0
        psnr_noisy = 0
    else:
        psnr = batch_psnr(denframes, gt, 1.)
        psnr_noisy = batch_psnr(seq.squeeze(), gt, 1.)
    loadtime = (seq_time - start_time)
    runtime = (stop_time - seq_time)
    seq_length = seq.size()[0]
    logger.info("Finished denoising {}".format(args['test_path']))
    logger.info(
        "\tDenoised {} frames in {:.3f}s, loaded seq in {:.3f}s".format(
            seq_length, runtime, loadtime))
    logger.info("\tPSNR noisy {:.4f}dB, PSNR result {:.4f}dB".format(
        psnr_noisy, psnr))

    # Save outputs
    if not args['dont_save_results']:
        # Save sequence
        save_out_seq(seq, denframes, args['save_path'], 0, args['suffix'],
                     args['save_noisy'])

    # close logger
    close_logger(logger)
예제 #7
0
def test_fastdvdnet(**args):
    """Denoises all sequences present in a given folder. Sequences must be stored as numbered
    image sequences. The different sequences must be stored in subfolders under the "test_path" folder.

    Inputs:
            args (dict) fields:
                    "model_file": path to model
                    "test_path": path to sequence to denoise
                    "suffix": suffix to add to output name
                    "max_num_fr_per_seq": max number of frames to load per sequence
                    "noise_sigma": noise level used on test set
                    "dont_save_results: if True, don't save output images
                    "no_gpu": if True, run model on CPU
                    "save_path": where to save outputs as png
                    "gray": if True, perform denoising of grayscale images instead of RGB
    """


    # If save_path does not exist, create it
    if not os.path.exists(args['save_path']):
        os.makedirs(args['save_path'])
    logger = init_logger_test(args['save_path'])

    # Sets data type according to CPU or GPU modes
    if args['cuda']:
        device = torch.device('cuda')
    else:
        device = torch.device('cpu')

    # Create models
    print('Loading models ...')
    model_temp = FastDVDnet(num_input_frames=NUM_IN_FR_EXT)

    # Load saved weights
    state_temp_dict = torch.load(args['model_file'])
    if args['cuda']:
        device_ids = [0]
        model_temp = nn.DataParallel(model_temp, device_ids=device_ids).cuda()
    else:
        # CPU mode: remove the DataParallel wrapper
        state_temp_dict = remove_dataparallel_wrapper(state_temp_dict)
    model_temp.load_state_dict(state_temp_dict)

    # Sets the model in evaluation mode (e.g. it removes BN)
    model_temp.eval()
    processed_count = 0
    # To avoid out of memory issues, we only process one folder at a time.
    for tmp_folder in get_next_folder(args['test_path'], args['max_num_fr_per_seq']):
        folder = tmp_folder.name
         # Start time
        print("Processing {}".format(os.listdir(tmp_folder.name)))
        logger.info("Processing {}".format(os.listdir(folder)))
        start_time = time.time()
        with torch.no_grad():
            # process data
            seq, _, _ = open_sequence(folder,
                                    args['gray'],
                                    expand_if_needed=False,
                                    max_num_fr=args['max_num_fr_per_seq'])
            seq = torch.from_numpy(seq).to(device)
            seq_time = time.time()

            # Add noise
            noise = torch.empty_like(seq).normal_(
                mean=0, std=args['noise_sigma']).to(device)
            seqn = seq + noise
            noisestd = torch.FloatTensor([args['noise_sigma']]).to(device)

            denframes = denoise_seq_fastdvdnet(seq=seqn,
                                            noise_std=noisestd,
                                            temp_psz=NUM_IN_FR_EXT,
                                            model_temporal=model_temp)

            # Compute PSNR and log it
            stop_time = time.time()
            psnr = batch_psnr(denframes, seq, 1.)
            psnr_noisy = batch_psnr(seqn.squeeze(), seq, 1.)
            loadtime = (seq_time - start_time)
            runtime = (stop_time - seq_time)
            seq_length = seq.size()[0]
            logger.info("Finished denoising {}".format(args['test_path']))
            logger.info("\tDenoised {} frames in {:.3f}s, loaded seq in {:.3f}s".
                        format(seq_length, runtime, loadtime))
            logger.info(
                "\tPSNR noisy {:.4f}dB, PSNR result {:.4f}dB".format(psnr_noisy, psnr))

            # Save outputs
            if not args['dont_save_results']:

                # Save sequence
                save_out_seq(seqn, denframes, args['save_path'],
                            int(args['noise_sigma']*255), args['suffix'], args['save_noisy'], processed_count)
                # subtract half stride because of the half-steps get_next_folder takes.
                processed_count+=seqn.size()[0]

    # close logger
    close_logger(logger)