Example #1
0
def test_ffdnet(**args):
    r"""Denoises an input image with FFDNet
	"""
    # Init logger
    logger = init_logger_ipol()

    # Check if input exists and if it is RGB
    try:
        rgb_den = is_rgb(args['input'])
    except:
        raise Exception('Could not open the input image')

# Measure runtime
    start_t = time.time()

    # Open image as a CxHxW torch.Tensor
    if rgb_den:
        in_ch = 3
        model_fn = 'net_rgb.pth'
        imorig = Image.open(args['input'])
        imorig = np.array(imorig, dtype=np.float32).transpose(2, 0, 1)
    else:
        # from HxWxC to  CxHxW grayscale image (C=1)
        in_ch = 1
        model_fn = 'models/net_gray.pth'
        # imorig = cv2.imread(args['input'], cv2.IMREAD_GRAYSCALE)
        imorig = np.expand_dims(imorig, 0)
    imorig = np.expand_dims(imorig, 0)

    # Handle odd sizes
    expanded_h = False
    expanded_w = False
    sh_im = imorig.shape
    if sh_im[2] % 2 == 1:
        expanded_h = True
        imorig = np.concatenate((imorig, \
          imorig[:, :, -1, :][:, :, np.newaxis, :]), axis=2)

    if sh_im[3] % 2 == 1:
        expanded_w = True
        imorig = np.concatenate((imorig, \
          imorig[:, :, :, -1][:, :, :, np.newaxis]), axis=3)

    imorig = normalize(imorig)
    imorig = torch.Tensor(imorig)
    # Absolute path to model file
    model_fn = os.path.join(os.path.abspath(os.path.dirname(__file__)), \
       model_fn)

    # Create model
    print('Loading model ...\n')
    net = FFDNet(num_input_channels=in_ch)

    # Load saved weights
    if args['cuda']:
        state_dict = torch.load(model_fn)
        #device_ids = [0,1,2,3]
        #model = nn.DataParallel(net, device_ids=device_ids).cuda()
        #state_dict = remove_dataparallel_wrapper(state_dict)
        model = net
    else:
        state_dict = torch.load(model_fn, map_location='cpu')
        # CPU mode: remove the DataParallel wrapper
        state_dict = remove_dataparallel_wrapper(state_dict)
        model = net
    model.load_state_dict(state_dict)

    # Sets the model in evaluation mode (e.g. it removes BN)
    model.eval()

    # Sets data type according to CPU or GPU modes
    if args['cuda']:
        dtype = torch.cuda.FloatTensor
    else:
        dtype = torch.FloatTensor

# Test mode
    with torch.no_grad():  # PyTorch v0.4.0
        imorig = Variable(imorig.type(dtype))
        nsigma = Variable(torch.FloatTensor([args['noise_sigma']]).type(dtype))

    # # Measure runtime
    # start_t = time.time()

    # Estimate noise and subtract it to the input image
    im_noise_estim = model(imorig, nsigma)
    stop_t = time.time()

    # log time
    if rgb_den:
        print("### RGB denoising ###")
    else:
        print("### Grayscale denoising ###")
    print("\tRuntime {0:0.4f}s".format(stop_t - start_t))

    # Save noises
    noise = variable_to_numpy(imorig.to(3) - im_noise_estim).transpose(1, 2, 0)
    filename = args['input'].split('/')[-1].split('.')[0]
    if args['save_path']:
        sio.savemat(
            './output_noise/' + args['save_path'] + '/' + filename + '.mat',
            {'Noisex': noise})
    else:
        sio.savemat('./output_noise/' + filename + '.mat', {'Noisex': noise})
Example #2
0
def test_ffdnet(**args):
    r"""Denoises an input image with FFDNet
	"""
    # Init logger
    logger = init_logger_ipol()

    # Check if input exists and if it is RGB
    try:
        rgb_den = is_rgb(args['input'])
    except:
        raise Exception('Could not open the input image')

    # Open image as a CxHxW torch.Tensor
    if rgb_den:
        in_ch = 3
        model_fn = 'models/net_rgb.pth'
        imorig = cv2.imread(args['input'])
        # from HxWxC to CxHxW, RGB image
        imorig = (cv2.cvtColor(imorig, cv2.COLOR_BGR2RGB)).transpose(2, 0, 1)
    else:
        # from HxWxC to  CxHxW grayscale image (C=1)
        in_ch = 1
        model_fn = 'logs/net.pth'
        imorig = cv2.imread(args['input'], cv2.IMREAD_GRAYSCALE)
        imorig_copy = imorig.copy()
        imorig = np.expand_dims(imorig, 0)
    imorig = np.expand_dims(imorig, 0)

    # Handle odd sizes
    expanded_h = False
    expanded_w = False
    sh_im = imorig.shape
    if sh_im[2] % 2 == 1:
        expanded_h = True
        imorig = np.concatenate((imorig, \
          imorig[:, :, -1, :][:, :, np.newaxis, :]), axis=2)

    if sh_im[3] % 2 == 1:
        expanded_w = True
        imorig = np.concatenate((imorig, \
          imorig[:, :, :, -1][:, :, :, np.newaxis]), axis=3)

    imorig = normalize(imorig)
    imorig = torch.Tensor(imorig)

    # Absolute path to model file
    model_fn = os.path.join(os.path.abspath(os.path.dirname(__file__)), \
       model_fn)

    # Create model
    print('Loading model ...\n')
    net = FFDNet(num_input_channels=in_ch)

    # Load saved weights
    if args['cuda']:
        state_dict = torch.load(model_fn)
        device_ids = [0]
        model = nn.DataParallel(net, device_ids=device_ids).cuda()
    else:
        state_dict = torch.load(model_fn, map_location='cpu')
        # CPU mode: remove the DataParallel wrapper
        state_dict = remove_dataparallel_wrapper(state_dict)
        model = net
    model.load_state_dict(state_dict)

    # Sets the model in evaluation mode (e.g. it removes BN)
    model.eval()

    # Sets data type according to CPU or GPU modes
    if args['cuda']:
        dtype = torch.cuda.FloatTensor
    else:
        dtype = torch.FloatTensor

    # Add noise
    if args['add_noise']:
        noise = torch.FloatTensor(imorig.size()).\
          normal_(mean=0, std=args['noise_sigma'])
        imnoisy = imorig + noise
    else:
        imnoisy = imorig.clone()

# Test mode
    with torch.no_grad():  # PyTorch v0.4.0
        imorig, imnoisy = Variable(imorig.type(dtype)), \
            Variable(imnoisy.type(dtype))
        nsigma = Variable(torch.FloatTensor([args['noise_sigma']]).type(dtype))

    # Measure runtime
    start_t = time.time()

    # Estimate noise and subtract it to the input image
    im_noise_estim = model(imnoisy, nsigma)
    outim = torch.clamp(imnoisy - im_noise_estim, 0., 1.)
    stop_t = time.time()

    if expanded_h:
        imorig = imorig[:, :, :-1, :]
        outim = outim[:, :, :-1, :]
        imnoisy = imnoisy[:, :, :-1, :]

    if expanded_w:
        imorig = imorig[:, :, :, :-1]
        outim = outim[:, :, :, :-1]
        imnoisy = imnoisy[:, :, :, :-1]

    # Compute PSNR and log it
    if rgb_den:
        print("### RGB denoising ###")
    else:
        print("### Grayscale denoising ###")
    if args['add_noise']:
        psnr = batch_psnr(outim, imorig, 1.)
        psnr_noisy = batch_psnr(imnoisy, imorig, 1.)

        print("----------PSNR noisy    {0:0.2f}dB".format(psnr_noisy))
        print("----------PSNR denoised {0:0.2f}dB".format(psnr))
    else:
        logger.info("\tNo noise was added, cannot compute PSNR")
    print("----------Runtime     {0:0.4f}s".format(stop_t - start_t))

    # Compute difference
    diffout = 2 * (outim - imorig) + .5
    diffnoise = 2 * (imnoisy - imorig) + .5

    # Save images
    if not args['dont_save_results']:
        noisyimg = variable_to_cv2_image(imnoisy)
        outimg = variable_to_cv2_image(outim)
        cv2.imwrite(
            "bfffd/noisy-" + str(int(args['noise_sigma'] * 255)) + '-' +
            args['input'], noisyimg)
        cv2.imwrite(
            "bfffd/ffdnet-" + str(int(args['noise_sigma'] * 255)) + '-' +
            args['input'], outimg)

        if args['add_noise']:
            cv2.imwrite("noisy_diff.png", variable_to_cv2_image(diffnoise))
            cv2.imwrite("ffdnet_diff.png", variable_to_cv2_image(diffout))
    (score, diff) = compare_ssim(noisyimg, imorig_copy, full=True)
    (score2, diff) = compare_ssim(outimg, imorig_copy, full=True)
    print("----------Noisy ssim:    {0:0.4f}".format(score))
    print("----------Denoisy ssim: {0:0.4f}".format(score2))
Example #3
0
from torch.autograd import Variable
from models import FFDNet
from utils import batch_psnr, normalize, init_logger_ipol, \
    variable_to_cv2_image, remove_dataparallel_wrapper, is_rgb
from sanic import Sanic, response

from srpgan_api import evaluate

app = Sanic(__name__)

os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
r"""Denoises an input image with FFDNet
"""
# Init logger
logger = init_logger_ipol()

# Check if input exists and if it is RGB
# Absolute path to model file
model_fn = os.path.join(os.path.abspath(os.path.dirname(__file__)), \
                        'models/net_rgb.pth')

# Create model
print('Loading model ...\n')
net = FFDNet(num_input_channels=3, test_mode=True)
model_fn = os.path.join(os.path.abspath(os.path.dirname(__file__)), \
                        model_fn)

# Load saved weights
print(model_fn)
state_dict = torch.load(model_fn)