Пример #1
0
    def __init__(self, num_steps,
                 use_grad_adj=True,
                 use_grad_scaler=True,
                 use_reg=True,
                 share_parameter=True,
                 use_cuda=True):

        super(OptimizerNet, self).__init__()
        #
        input_depth = 1
        pad = 'reflection'
        self.num_steps = num_steps
        self.momen = 0.8
        self.grad_datafitting_cal = GradDataFitting()
        self.use_grad_adj = use_grad_adj
        self.use_reg = use_reg
        self.use_grad_scaler = use_grad_scaler
        self.share_parameter = share_parameter
        if self.share_parameter:
            if(self.use_reg):
                self.rnet = skip( input_depth, 1,
                num_channels_down = [128, 128, 128, 128, 128],
                num_channels_up   = [128, 128, 128, 128, 128],
                num_channels_skip = [16, 16, 16, 16, 16],
                upsample_mode='bilinear',
                need_sigmoid=True, need_bias=True, pad=pad, act_fun='LeakyReLU')
            if(self.use_grad_adj):
                self.fnet = skip( input_depth, 1,
                num_channels_down = [128, 128, 128, 128, 128],
                num_channels_up   = [128, 128, 128, 128, 128],
                num_channels_skip = [16, 16, 16, 16, 16],
                upsample_mode='bilinear',
                need_sigmoid=True, need_bias=True, pad=pad, act_fun='LeakyReLU')
            if(self.use_grad_scaler):
                self.dnet = ConvBlock()
Пример #2
0
    # ######################################################################
    padh, padw = opt.kernel_size[0] - 1, opt.kernel_size[1] - 1
    opt.img_size[0], opt.img_size[1] = img_size[1] + padh, img_size[2] + padw
    '''
    x_net:
    '''
    input_depth = 8

    net_input = get_noise(input_depth, INPUT,
                          (opt.img_size[0], opt.img_size[1])).type(dtype)

    net = skip(input_depth,
               1,
               num_channels_down=[128, 128, 128, 128, 128],
               num_channels_up=[128, 128, 128, 128, 128],
               num_channels_skip=[16, 16, 16, 16, 16],
               upsample_mode='bilinear',
               need_sigmoid=True,
               need_bias=True,
               pad=pad,
               act_fun='LeakyReLU')

    net = net.type(dtype)
    '''
    k_net:
    
    n_k = 200
    net_input_kernel = get_noise(n_k, INPUT, (1, 1)).type(dtype)
    net_input_kernel.squeeze_()
    '''

    net_kernel = Predictor(1, 128, opt.kernel_size[0] * opt.kernel_size[1])
Пример #3
0
def deblur(input,
           kernel_size,
           output,
           outputk=None,
           sigma=0,
           lr=0.01,
           reg_noise_std=0.001,
           num_iter=5000,
           normalization=1):
    INPUT = 'noise'
    pad = 'reflection'

    kernel_size = [kernel_size, kernel_size]

    import iio
    imgs = iio.read(input) / normalization
    imgs = imgs.transpose((2, 0, 1))
    y = np_to_torch(imgs).type(dtype)

    img_size = imgs.shape

    padh, padw = kernel_size[0] - 1, kernel_size[1] - 1
    '''
    x_net:
    '''
    input_depth = 8

    net_input = get_noise(
        input_depth, INPUT,
        (img_size[1] + padh, img_size[2] + padw)).type(dtype).detach()

    net = skip(input_depth,
               3,
               num_channels_down=[128, 128, 128, 128, 128],
               num_channels_up=[128, 128, 128, 128, 128],
               num_channels_skip=[16, 16, 16, 16, 16],
               upsample_mode='bilinear',
               need_sigmoid=True,
               need_bias=True,
               pad=pad,
               act_fun='LeakyReLU')
    net = net.type(dtype)
    '''
    k_net:
    '''
    n_k = 200
    net_input_kernel = get_noise(n_k, INPUT, (1, 1)).type(dtype).detach()
    net_input_kernel = net_input_kernel.squeeze()

    net_kernel = fcn(n_k, kernel_size[0] * kernel_size[1])
    net_kernel = net_kernel.type(dtype)

    # Losses
    mse = torch.nn.MSELoss().type(dtype)
    L1 = torch.nn.L1Loss(reduction='sum').type(dtype)
    lambda_ = 0.1 * sigma / normalization
    tv_loss = TVLoss(tv_loss_weight=lambda_).type(dtype)

    # optimizer
    optimizer = torch.optim.Adam([{
        'params': net.parameters()
    }, {
        'params': net_kernel.parameters(),
        'lr': 1e-4
    }],
                                 lr=lr)
    ml = [
        int(num_iter * 2000 / 5000),
        int(num_iter * 3000 / 5000),
        int(num_iter * 4000 / 5000)
    ]
    scheduler = MultiStepLR(optimizer, milestones=ml,
                            gamma=0.5)  # learning rates

    # initilization inputs
    net_input_saved = net_input.detach().clone()
    net_input_kernel_saved = net_input_kernel.detach().clone()

    ### start SelfDeblur
    for step in tqdm(range(num_iter)):

        # input regularization
        net_input = net_input_saved + reg_noise_std * torch.zeros(
            net_input_saved.shape).type_as(net_input_saved.data).normal_()

        # change the learning rate
        scheduler.step(step)
        optimizer.zero_grad()

        # get the network output
        out_x = net(net_input)
        out_k = net_kernel(net_input_kernel)

        out_k_m = out_k.view(-1, 1, kernel_size[0], kernel_size[1])
        out_y = nn.functional.conv2d(out_x,
                                     out_k_m.expand((3, -1, -1, -1)),
                                     padding=0,
                                     bias=None,
                                     groups=3)

        total_loss = mse(out_y, y) + tv_loss(out_x)
        total_loss.backward()
        optimizer.step()

    out_x_np = torch_to_np(out_x)
    out_x_np = out_x_np.squeeze()
    out_x_np = out_x_np[:, padh // 2:padh // 2 + img_size[1],
                        padw // 2:padw // 2 + img_size[2]]
    out_x_np = out_x_np.transpose((1, 2, 0))
    iio.write(output, out_x_np * normalization)

    if outputk:
        out_k_np = torch_to_np(out_k_m)
        out_k_np = out_k_np.squeeze()
        iio.write(outputk, out_k_np)