def forward(self,input,blurKernel,stdn):
     
     if self.pad:
         pad = getPad2RetainShape(blurKernel.shape)
         input = Pad2D.apply(input,pad,self.padType)
     
     if self.edgetaper:
         input = EdgeTaper.apply(input,blurKernel)
 
     
     if self.sharedWienerFilters:        
         conv_weights = WeightNormalization.apply(self.conv_weights,self.scale,\
                                     self.normalizedWeights,self.zeroMeanWeights)
     else:
         conv_weights = WeightNormalization5D.apply(self.conv_weights,self.scale,\
                                     self.normalizedWeights,self.zeroMeanWeights)
                 
     output, cstdn = WienerFilter.apply(input,blurKernel,conv_weights,self.alpha)
     
     # compute the standard deviation of the remaining colored noise in the output
     cstdn = th.sqrt(stdn.type_as(cstdn).unsqueeze(-1).pow(2).mul(cstdn.mean(dim=2)))
     
     if self.pad:
         output = Crop2D.apply(output,pad)
     
     return output, cstdn
Beispiel #2
0
    def forward(self, input, blurKernel, stdn):

        if self.wiener_pad:
            padding = getPad2RetainShape(blurKernel.shape)
            input = Pad2D.apply(input, padding, self.wiener_padType)

        if self.edgetaper:
            input = EdgeTaper.apply(input, blurKernel)

        if self.wienerWeightSharing:
            wiener_conv_weights = WeightNormalization.apply(self.wiener_conv_weights,\
                self.wiener_scale,self.wiener_normalizedWeights,self.wiener_zeroMeanWeights)
        else:
            wiener_conv_weights = WeightNormalization5D.apply(self.wiener_conv_weights,\
                self.wiener_scale,self.wiener_normalizedWeights,self.wiener_zeroMeanWeights)

        if not self.alpha_update:
            self.alpha = self.alpha.type_as(wiener_conv_weights)
        input, cstdn = WienerFilter.apply(input,blurKernel,wiener_conv_weights,\
                                           self.alpha)

        # compute the variance of the remaining colored noise in the output
        # cstdn is of size batch x numWienerFilters
        cstdn = th.sqrt(
            stdn.type_as(cstdn).unsqueeze(-1).pow(2).mul(cstdn.mean(dim=2)))

        batch, numWienerFilters = input.shape[0:2]

        cstdn = cstdn.view(-1)  # size: batch*numWienerFilters
        # input has size batch*numWienerFilters x C x H x W
        input = input.view(batch * numWienerFilters, *input.shape[2:])

        output = nconv2D(input,self.conv_weights,bias=self.bias_f,stride=1,\
                     pad=self.pad,padType=self.padType,dilation=1,\
                     scale=self.scale_f,normalizedWeights=self.normalizedWeights,
                     zeroMeanWeights=self.zeroMeanWeights)

        for m in self.resPA:
            output = m(output)

        if self.convWeightSharing:
            output = nconv_transpose2D(output,self.conv_weights,bias=self.bias_t,\
                     stride=1,pad=self.pad,padType=self.padType,dilation=1,\
                     scale=self.scale_f,normalizedWeights=self.normalizedWeights,
                     zeroMeanWeights=self.zeroMeanWeights)
        else:
            output = nconv_transpose2D(output,self.convt_weights,bias=self.bias_t,\
                     stride=1,pad=self.pad,padType=self.padType,dilation=1,\
                     scale=self.scale_t,normalizedWeights=self.normalizedWeights,
                     zeroMeanWeights=self.zeroMeanWeights)

        output = L2Proj.apply(output, self.alpha_proj, cstdn)
        output = Crop2D.apply(self.bbproj(input - output), padding)

        # size of batch x numWienerFilters x C x H x W
        output = output.view(batch, numWienerFilters, *output.shape[1:])

        return output.mul(self.weights).sum(dim=1)
def residualPreActivation(input, conv1_weights, conv2_weights, \
      prelu1_weights, prelu2_weights, bias1 = None, scale1 = None, \
      dilation1 = 1, bias2 = None, scale2 = None, dilation2 = 1, \
      normalizedWeights = False, zeroMeanWeights = False, shortcut = False,\
      padType = 'symmetric'):
        
    normalizedWeights = formatInput2Tuple(normalizedWeights,bool,2)    
    zeroMeanWeights = formatInput2Tuple(zeroMeanWeights,bool,2)    

    assert(any(prelu1_weights.numel() == i for i in (1,input.size(1)))),\
        "Dimensions mismatch between input and prelu1_weights."
    assert(any(prelu2_weights.numel() == i for i in (1,conv1_weights.size(0)))),\
        "Dimensions mismatch between conv1_weights and prelu2_weights."
    assert(conv1_weights.size(0) == conv2_weights.size(1)), "Dimensions "+\
        "mismatch between conv1_weights and conv2_weights."
    assert(conv2_weights.size(0) == input.size(1)), "Dimensions "+\
        "mismatch between conv2_weights and input."        
    
    if (normalizedWeights[0] and scale1 is not None) or zeroMeanWeights[0]:
        conv1_weights = weightNormalization(conv1_weights,scale1,\
                                    normalizedWeights[0],zeroMeanWeights[0])

    if (normalizedWeights[1] and scale2 is not None) or zeroMeanWeights[1]:
        conv2_weights = weightNormalization(conv2_weights,scale2,\
                                    normalizedWeights[1],zeroMeanWeights[1])
    
    pad1 = getPad2RetainShape(conv1_weights.shape[2:4],dilation1)
    pad2 = getPad2RetainShape(conv2_weights.shape[2:4],dilation2)
    
    out = th.nn.functional.prelu(input,prelu1_weights)
    out = conv2d(pad2D(out,pad1,padType),conv1_weights,bias = bias1, dilation = dilation1)
    out = th.nn.functional.prelu(out,prelu2_weights)
    out = conv2d(pad2D(out,pad2,padType),conv2_weights,bias = bias2, dilation = dilation2)
    
    if shortcut:
        return out.add(input)
    else:
        return out
    def __init__(self,kernel_size,\
                 input_channels,\
                 output_features,\
                 bias=False,\
                 stride=1,\
                 pad = 'same',\
                 padType = 'symmetric',\
                 conv_init = 'dct',\
                 scale = False,\
                 normalizedWeights=False,\
                 zeroMeanWeights=False):
        
        super(NConv2D,self).__init__()
        
        kernel_size = formatInput2Tuple(kernel_size,int,2)
        
        if isinstance(pad,str) and pad == 'same':
            pad = getPad2RetainShape(kernel_size)
#            # center of the kernel
#            Kc = th.Tensor(kernel_size).add(1).div(2).floor()
#            pad = (int(Kc[0])-1, kernel_size[0]-int(Kc[0]),\
#                   int(Kc[1])-1,kernel_size[1]-int(Kc[1]))        
        
        self.pad = formatInput2Tuple(pad,int,4)
        self.padType = padType
        self.stride = formatInput2Tuple(stride,int,2)
        
        # Initialize conv weights
        shape = (output_features,input_channels)+kernel_size
        self.conv_weights = nn.Parameter(th.Tensor(th.Size(shape)))
        init.convWeights(self.conv_weights,conv_init)
        
        # Initialize the scaling coefficients for the conv weight normalization
        if scale and normalizedWeights:
            self.scale = nn.Parameter(th.Tensor(output_features).fill_(1))
        else:
            self.register_parameter('scale', None)
        
        if bias:
            self.bias = nn.Parameter(th.Tensor(output_features).fill_(0))
        else:
            self.register_parameter('bias', None)           
Beispiel #5
0
    def __init__(self, input_channels,\
                 wiener_kernel_size = (5,5),\
                 wiener_output_features = 24,\
                 numWienerFilters = 4,\
                 wienerWeightSharing = True,\
                 wienerChannelSharing = False,\
                 alphaChannelSharing = True,\
                 alpha_update = True,\
                 lb = 1e-3,\
                 ub = 1e-1,\
                 wiener_pad = True,\
                 wiener_padType = 'symmetric',\
                 edgeTaper = True,\
                 wiener_scale = True,\
                 wiener_normalizedWeights = True,\
                 wiener_zeroMeanWeights = True,\
                 kernel_size = (5,5),\
                 output_features = 32,\
                 convWeightSharing = True,\
                 pad = 'same',\
                 padType = 'symmetric',\
                 conv_init = 'dct',\
                 bias_f= True,\
                 bias_t = True,\
                 scale_f = True,\
                 scale_t = True,\
                 normalizedWeights = True,\
                 zeroMeanWeights = True,\
                 alpha_proj = True,\
                 rpa_depth = 5,\
                 rpa_kernel_size1 = (3,3),\
                 rpa_kernel_size2 = (3,3),\
                 rpa_output_features = 64,\
                 rpa_init = 'msra',\
                 rpa_bias1 = True,\
                 rpa_bias2 = True,\
                 rpa_prelu1_mc = True,\
                 rpa_prelu2_mc = True,\
                 prelu_init = 0.1,\
                 rpa_scale1 = True,\
                 rpa_scale2 = True,\
                 rpa_normalizedWeights = True,\
                 rpa_zeroMeanWeights = True,\
                 shortcut = (True,False),\
                 clb = 0,\
                 cub = 255):

        super(WienerDeblurNet, self).__init__()

        # Initialize the Wiener filters used for deconvolution
        self.wiener_pad = wiener_pad
        self.wiener_padType = wiener_padType
        self.edgetaper = edgeTaper
        self.wienerWeightSharing = wienerWeightSharing
        self.wiener_normalizedWeights = wiener_normalizedWeights
        self.wiener_zeroMeanWeights = wiener_zeroMeanWeights
        self.alpha_update = alpha_update

        assert (numWienerFilters >
                1), "More than one Wiener filter is expected."

        wchannels = 1 if wienerChannelSharing else input_channels

        wiener_kernel_size = formatInput2Tuple(wiener_kernel_size, int, 2)

        if self.wienerWeightSharing:
            shape = (wiener_output_features, wchannels) + wiener_kernel_size
        else:
            shape = (numWienerFilters, wiener_output_features,
                     wchannels) + wiener_kernel_size

        self.wiener_conv_weights = nn.Parameter(th.Tensor(th.Size(shape)))
        init.dctMultiWiener(self.wiener_conv_weights)

        if wiener_scale and wiener_normalizedWeights:
            if self.wienerWeightSharing:
                self.wiener_scale = nn.Parameter(
                    th.Tensor(wiener_output_features).fill_(0.1))
            else:
                self.wiener_scale = nn.Parameter(
                    th.Tensor(numWienerFilters,
                              wiener_output_features).fill_(0.1))
        else:
            self.register_parameter('wiener_scale', None)

        assert(lb > 0 and ub > 0),"Lower (lb) and upper (ub) bounds of the "\
        +"beta parameter must be positive numbers."
        alpha = th.logspace(log10(lb), log10(ub),
                            numWienerFilters).unsqueeze(-1).log()
        if alphaChannelSharing:
            shape = (numWienerFilters, 1)
        else:
            alpha = alpha.repeat(1, input_channels)
            shape = (numWienerFilters, input_channels)

        if self.alpha_update:
            self.alpha = nn.Parameter(th.Tensor(th.Size(shape)))
            self.alpha.data.copy_(alpha)
        else:
            self.alpha = alpha

        # Initialize the Residual Denoising Network
        kernel_size = formatInput2Tuple(kernel_size, int, 2)

        if isinstance(pad, str) and pad == 'same':
            pad = getPad2RetainShape(kernel_size)


#            Kc = th.Tensor(kernel_size).add(1).div(2).floor()
#            pad = (int(Kc[0])-1, kernel_size[0]-int(Kc[0]),\
#                   int(Kc[1])-1,kernel_size[1]-int(Kc[1]))

        self.pad = formatInput2Tuple(pad, int, 4)
        self.padType = padType
        self.normalizedWeights = normalizedWeights
        self.zeroMeanWeights = zeroMeanWeights
        self.convWeightSharing = convWeightSharing

        # Initialize conv weights
        shape = (output_features, input_channels) + kernel_size
        self.conv_weights = nn.Parameter(th.Tensor(th.Size(shape)))
        init.convWeights(self.conv_weights, conv_init)

        # Initialize the scaling coefficients for the conv weight normalization
        if scale_f and normalizedWeights:
            self.scale_f = nn.Parameter(th.Tensor(output_features).fill_(1))
        else:
            self.register_parameter('scale_f', None)

        # Initialize the bias for the conv layer
        if bias_f:
            self.bias_f = nn.Parameter(th.Tensor(output_features).fill_(0))
        else:
            self.register_parameter('bias_f', None)

        # Initialize the bias for the transpose conv layer
        if bias_t:
            self.bias_t = nn.Parameter(th.Tensor(input_channels).fill_(0))
        else:
            self.register_parameter('bias_t', None)

        if not self.convWeightSharing:
            self.convt_weights = nn.Parameter(th.Tensor(th.Size(shape)))
            init.convWeights(self.convt_weights, conv_init)

            if scale_t and normalizedWeights:
                self.scale_t = nn.Parameter(
                    th.Tensor(output_features).fill_(1))
            else:
                self.register_parameter('scale_t', None)

        numparams_prelu1 = output_features if rpa_prelu1_mc else 1
        numparams_prelu2 = rpa_output_features if rpa_prelu2_mc else 1

        self.rpa_depth = rpa_depth
        self.shortcut = formatInput2Tuple(shortcut,
                                          bool,
                                          rpa_depth,
                                          strict=False)
        self.resPA = nn.ModuleList([modules.ResidualPreActivationLayer(\
                        rpa_kernel_size1,rpa_kernel_size2,output_features,\
                        rpa_output_features,rpa_bias1,rpa_bias2,1,1,\
                        numparams_prelu1,numparams_prelu2,prelu_init,padType,\
                        rpa_scale1,rpa_scale2,rpa_normalizedWeights,\
                        rpa_zeroMeanWeights,rpa_init,self.shortcut[i]) \
                        for i in range(self.rpa_depth)])

        self.bbproj = nn.Hardtanh(min_val=clb, max_val=cub)

        # Initialize the parameter for the L2Proj layer
        if alpha_proj:
            self.alpha_proj = nn.Parameter(th.Tensor(1).fill_(0))
        else:
            self.register_parameter('alpha_proj', None)

        # Initialize the parameter for weighting the outputs of each Residual
        # Denoising Network
        self.weights = nn.Parameter(
            th.Tensor(1, numWienerFilters, 1, 1,
                      1).fill_(1 / numWienerFilters))
    def __init__(self, kernel_size,\
                 input_channels,\
                 output_features,\
                 rbf_mixtures,\
                 rbf_precision,\
                 pad = 'same',\
                 convWeightSharing = True,\
                 alpha = True,
                 lb = -100,\
                 ub = 100,\
                 padType = 'symmetric',\
                 scale_f = True,\
                 scale_t = True,\
                 normalizedWeights = True,\
                 zeroMeanWeights = True):
        
        super(ResidualRBFLayer, self).__init__()
        
        kernel_size = formatInput2Tuple(kernel_size,int,2)       
        
        if isinstance(pad,str) and pad == 'same':
            pad = getPad2RetainShape(kernel_size)
#            # center of the kernel
#            Kc = th.Tensor(kernel_size).add(1).div(2).floor()
#            pad = (int(Kc[0])-1, kernel_size[0]-int(Kc[0]),\
#                   int(Kc[1])-1,kernel_size[1]-int(Kc[1]))
            
        self.pad = formatInput2Tuple(pad,int,4)
        self.padType = padType
        self.normalizedWeights = normalizedWeights
        self.zeroMeanWeights = zeroMeanWeights
        self.lb = lb
        self.ub = ub
        
        # Initialize conv weights
        shape = (output_features,input_channels)+kernel_size
        self.conv_weights = nn.Parameter(th.Tensor(th.Size(shape)))
        init.dct(self.conv_weights)
        
        if convWeightSharing:
            self.convt_weights = self.conv_weights
        else:
            self.convt_weights = nn.Parameter(th.Tensor(th.Size(shape)))
            init.dct(self.convt_weights)
        
        # Initialize the scaling coefficients for the conv weight normalization
        if scale_f and normalizedWeights:
            self.scale_f = nn.Parameter(th.Tensor(output_features).fill_(1))
        else:
            self.register_parameter('scale_f', None)
        
        if scale_t and normalizedWeights:
            if convWeightSharing and scale_f:
                self.scale_t = self.scale_f
            elif not convWeightSharing or (convWeightSharing and not scale_f):
                self.scale_t = nn.Parameter(th.Tensor(output_features).fill_(1))
        else :
            self.register_parameter('scale_t', None)
        
        # Initialize the params for the proxL2
        if alpha :
            self.alpha_prox = nn.Parameter(th.Tensor(1).fill_(0))
        else:
            self.register_parameter('alpha_prox', None)
        
        # Initialize the rbf_weights
        self.rbf_weights = nn.Parameter(th.Tensor(output_features,rbf_mixtures).fill_(1e-4))
        self.rbf_centers = th.linspace(lb,ub,rbf_mixtures).type_as(self.rbf_weights)
        self.rbf_precision = rbf_precision
Beispiel #7
0
    def __init__(self, kernel_size,\
                 input_channels,\
                 output_features,\
                 convWeightSharing = True,\
                 pad = 'same',\
                 padType = 'symmetric',\
                 conv_init = 'dct',\
                 bias_f= True,\
                 bias_t = True,\
                 scale_f = True,\
                 scale_t = True,\
                 normalizedWeights = True,\
                 zeroMeanWeights = True,\
                 alpha = True,\
                 rpa_depth = 5,\
                 rpa_kernel_size1 = (3,3),\
                 rpa_kernel_size2 = (3,3),\
                 rpa_output_features = 64,\
                 rpa_init = 'msra',\
                 rpa_bias1 = True,\
                 rpa_bias2 = True,\
                 rpa_prelu1_mc = True,\
                 rpa_prelu2_mc = True,\
                 prelu_init = 0.1,\
                 rpa_scale1 = True,\
                 rpa_scale2 = True,\
                 rpa_normalizedWeights = True,\
                 rpa_zeroMeanWeights = True,\
                 shortcut = (True,False),\
                 clb = 0,\
                 cub = 255):

        super(UDNetPA, self).__init__()

        kernel_size = formatInput2Tuple(kernel_size, int, 2)

        if isinstance(pad, str) and pad == 'same':
            pad = getPad2RetainShape(kernel_size)
            pad = (pad[1], pad[2])


#            Kc = th.Tensor(kernel_size).add(1).div(2).floor()
#            pad = (int(Kc[0])-1, kernel_size[0]-int(Kc[0]),\
#                   int(Kc[1])-1,kernel_size[1]-int(Kc[1]))

        self.pad = formatInput2Tuple(pad, int, 2)
        self.padType = padType
        self.normalizedWeights = normalizedWeights
        self.zeroMeanWeights = zeroMeanWeights
        self.convWeightSharing = convWeightSharing

        # Initialize conv weights
        shape = (output_features, input_channels) + kernel_size
        self.conv_weights = nn.Parameter(th.Tensor(th.Size(shape)))
        init.convWeights(self.conv_weights, conv_init)

        # Initialize the scaling coefficients for the conv weight normalization
        if scale_f and normalizedWeights:
            self.scale_f = nn.Parameter(th.Tensor(output_features).fill_(1))
        else:
            self.register_parameter('scale_f', None)

        # Initialize the bias for the conv layer
        if bias_f:
            self.bias_f = nn.Parameter(th.Tensor(output_features).fill_(0))
        else:
            self.register_parameter('bias_f', None)

        # Initialize the bias for the transpose conv layer
        if bias_t:
            self.bias_t = nn.Parameter(th.Tensor(input_channels).fill_(0))
        else:
            self.register_parameter('bias_t', None)

        if not self.convWeightSharing:
            self.convt_weights = nn.Parameter(th.Tensor(th.Size(shape)))
            init.convWeights(self.convt_weights, conv_init)

            if scale_t and normalizedWeights:
                self.scale_t = nn.Parameter(
                    th.Tensor(output_features).fill_(1))
            else:
                self.register_parameter('scale_t', None)

        numparams_prelu1 = output_features if rpa_prelu1_mc else 1
        numparams_prelu2 = rpa_output_features if rpa_prelu2_mc else 1

        self.rpa_depth = rpa_depth
        self.shortcut = formatInput2Tuple(shortcut,
                                          bool,
                                          rpa_depth,
                                          strict=False)
        self.resPA = nn.Sequential(*nn.ModuleList([modules.ResidualPreActivationLayer(\
                        rpa_kernel_size1,rpa_kernel_size2,output_features,\
                        rpa_output_features,rpa_bias1,rpa_bias2,1,1,\
                        numparams_prelu1,numparams_prelu2,prelu_init,padType,\
                        rpa_scale1,rpa_scale2,rpa_normalizedWeights,\
                        rpa_zeroMeanWeights,rpa_init,self.shortcut[i]) \
                        for i in range(self.rpa_depth)]))

        self.bbproj = nn.Hardtanh(min_val=clb, max_val=cub)

        # Initialize the parameter for the L2Proj layer
        if alpha:
            self.alpha = nn.Parameter(th.Tensor(1).fill_(0))
        else:
            self.register_parameter('alpha', None)
Beispiel #8
0
    def __init__(self,kernel,stdn,random_seed=20180102,filepath='',train=True,\
                 color=False,shape=(256,256),padType='valid',batchSize = 40,\
                 mask = None):
        r"""batchSize is used internally to create the blurred data so that 
        the system memory can be suffient.
        mask: In case we don't want to use all the training or testing ground
        truth data the mask should contain the indices of the images to be used.
        Otherwise it should be set to None.
        """
        assert(isinstance(stdn,(float,int,tuple))),"stdn is expected to be either "\
        +"a float or an int or a tuple"

        if isinstance(stdn, float):
            stdn = (stdn, )
        if isinstance(stdn, tuple):
            stdn = tuple(float(i) for i in stdn)

        assert (
            th.is_tensor(kernel)), 'The blur kernel must be a torch tensor.'

        self.kernel = kernel
        self.padType = padType
        self.stdn = th.tensor(stdn).type_as(kernel)
        self.train = train
        self.rnd_seed = random_seed
        self.batchSize = batchSize

        crop = None
        if self.padType == "valid":
            crop = getPad2RetainShape(kernel.shape)

        fshape = (3, 2, 0, 1)

        if self.train:
            if os.path.isfile(filepath):
                f = np.load(filepath)
                self.train_gt = f['train_set'].transpose(fshape)
            else:
                currentPath = os.path.dirname(os.path.realpath(__file__))
                listPath = os.path.join(
                    currentPath,
                    "../../datasets/BSDS500/BSDS_validation_list.txt")
                imdbPath = os.path.join(currentPath, "../../datasets/BSDS500/")
                self.train_gt = gen_imdb_BSDS500_fromList(color=color,\
                                    listPath = listPath, imdbPath = imdbPath,\
                                    shape=shape,data ='train').transpose(fshape)

            if mask is None:
                self.train_gt = th.from_numpy(self.train_gt).type_as(kernel)
            else:
                self.train_gt = th.from_numpy(
                    self.train_gt[mask, ...]).type_as(kernel)

            self.train_data = self.generate_BlurredNoisyData()
            if crop is not None:
                self.train_gt = crop2D(self.train_gt, crop)
        else:
            if os.path.isfile(filepath):
                f = np.load(filepath)
                self.test_gt = f['test_set'].transpose(fshape)
            else:
                currentPath = os.path.dirname(os.path.realpath(__file__))
                listPath = os.path.join(
                    currentPath,
                    "../../datasets/BSDS500/BSDS_validation_list.txt")
                imdbPath = os.path.join(currentPath, "../../datasets/BSDS500/")
                self.test_gt = gen_imdb_BSDS500_fromList(color=color,\
                                    listPath = listPath, imdbPath = imdbPath,\
                                    shape=shape,data ='test').transpose(fshape)
            if mask is None:
                self.test_gt = th.from_numpy(self.test_gt).type_as(kernel)
            else:
                self.test_gt = th.from_numpy(self.test_gt[mask,
                                                          ...]).type_as(kernel)

            self.test_data = self.generate_BlurredNoisyData()
            if crop is not None:
                self.test_gt = crop2D(self.test_gt, crop)
    def __init__(self, kernel_size,\
                 input_channels,\
                 output_features,\
                 rbf_mixtures,\
                 rbf_precision,\
                 pad='same',\
                 convWeightSharing=True,\
                 lb=-100,\
                 ub=100,\
                 padType='symmetric',\
                 scale_f=True,\
                 scale_t=True,\
                 normalizedWeights=True,\
                 zeroMeanWeights=True,\
                 prox_param=True):

        super(ResRBFPoisLayer, self).__init__()

        kernel_size = formatInput2Tuple(kernel_size, int, 2)

        if isinstance(pad, str) and pad == 'same':
            pad = getPad2RetainShape(kernel_size)

        self.pad = formatInput2Tuple(pad, int, 4)
        self.padType = padType
        self.normalizedWeights = normalizedWeights
        self.zeroMeanWeights = zeroMeanWeights
        self.convWeightSharing = convWeightSharing
        self.lb = lb
        self.ub = ub

        # Initialize conv weights
        shape = (output_features, input_channels) + kernel_size
        self.conv_weights = nn.Parameter(th.Tensor(th.Size(shape)))

        # initialize_conv_weights(self.conv_weights)
        init.dct(self.conv_weights)

        # Initialize the scaling coefficients for the conv weight normalization
        if scale_f and normalizedWeights:
            self.scale_f = nn.Parameter(th.Tensor(output_features).fill_(0.1))
        else:
            self.register_parameter('scale_f', None)

        if not self.convWeightSharing:
            self.convt_weights = nn.Parameter(th.Tensor(th.Size(shape)))
            init.dct(self.convt_weights)

            if scale_t and normalizedWeights:
                self.scale_t = nn.Parameter(
                    th.Tensor(output_features).fill_(0.1))
            else:
                self.register_parameter('scale_t', None)

        # Initialize the params for the PoisProx.
        # Projection condition multiplier.
        if prox_param:
            self.prox_param = nn.Parameter(th.Tensor(1).fill_(0.1))
        else:
            self.register_parameter('prox_param', None)

        # Initialize the rbf_weights
        self.rbf_weights = nn.Parameter(
            th.Tensor(output_features, rbf_mixtures).fill_(1e-4))
        self.rbf_centers = th.linspace(lb, ub,
                                       rbf_mixtures).type_as(self.rbf_weights)
        self.rbf_precision = rbf_precision