예제 #1
0
 def __init__(self,kernel_size,\
              input_channels,\
              output_features,\
              bias=False,\
              stride=1,\
              pad = 'same',\
              padType = 'symmetric',\
              conv_init = 'dct',\
              scale = False,\
              normalizedWeights=False,\
              zeroMeanWeights=False):
     
     super(NConv_transpose2D,self).__init__()
     
     kernel_size = formatInput2Tuple(kernel_size,int,2)
     
     if isinstance(pad,str) and pad == 'same':
         # center of the kernel
         Kc = th.Tensor(kernel_size).add(1).div(2).floor()
         pad = (int(Kc[0])-1, kernel_size[0]-int(Kc[0]),\
                int(Kc[1])-1,kernel_size[1]-int(Kc[1]))        
     
     
     self.pad = formatInput2Tuple(pad,int,4)
     self.padType = padType
     self.stride = formatInput2Tuple(stride,int,2)
     
     # Initialize conv weights
     shape = (output_features,input_channels)+kernel_size
     self.conv_weights = nn.Parameter(th.Tensor(th.Size(shape)))
     init.convWeights(self.conv_weights,conv_init)
     
     # Initialize the scaling coefficients for the conv weight normalization
     if scale and normalizedWeights:
         self.scale = nn.Parameter(th.Tensor(output_features).fill_(1))
     else:
         self.register_parameter('scale', None)
     
     if bias:
         self.bias = nn.Parameter(th.Tensor(input_channels).fill_(0))
     else:
         self.register_parameter('bias', None)           
예제 #2
0
def nconv_transpose2D(input,weights,bias=None,stride=1,pad=0,padType='zero',\
                      dilation=1,scale=None,normalizedWeights=False,\
                      zeroMeanWeights=False):
    r"""Transpose 2D normalized convolution."""
    
    stride = formatInput2Tuple(stride,int,2)
    pad = formatInput2Tuple(pad,int,4)
    dilation = formatInput2Tuple(dilation,int,2)
    
    assert(input.dim() == 4), "The dimensions of the input tensor are "\
        +"expected to be equal to 4."    
    
    weights = weightNormalization(weights,scale,normalizedWeights,zeroMeanWeights)
    
    out = th.nn.functional.conv_transpose2d(input,weights,bias,stride=stride,\
                                            dilation=dilation)
    
    if sum(pad) != 0:
        out = pad_transpose2D(out,pad,padType)
        
    return out
예제 #3
0
def nconv2D(input,weights,bias=None,stride=1,pad=0,padType='zero',\
            dilation=1,scale=None,normalizedWeights=False,\
            zeroMeanWeights=False):
    r"""2D Convolution of an input tensor of size B x C X H x W where the 
    weights of the filters are normalized. 
    """
    
    stride = formatInput2Tuple(stride,int,2)
    pad = formatInput2Tuple(pad,int,4)
    dilation = formatInput2Tuple(dilation,int,2)
    
    assert(input.dim() == 4), "The dimensions of the input tensor are "\
        +"expected to be equal to 4."    
    
    if sum(pad) != 0 :
        input = pad2D(input,pad,padType)
    
    weights = weightNormalization(weights,scale,normalizedWeights,zeroMeanWeights)
    
    return th.nn.functional.conv2d(input,weights,bias,stride=stride,\
                                   dilation=dilation)
예제 #4
0
def residualPreActivation(input, conv1_weights, conv2_weights, \
      prelu1_weights, prelu2_weights, bias1 = None, scale1 = None, \
      dilation1 = 1, bias2 = None, scale2 = None, dilation2 = 1, \
      normalizedWeights = False, zeroMeanWeights = False, shortcut = False,\
      padType = 'symmetric'):
        
    normalizedWeights = formatInput2Tuple(normalizedWeights,bool,2)    
    zeroMeanWeights = formatInput2Tuple(zeroMeanWeights,bool,2)    

    assert(any(prelu1_weights.numel() == i for i in (1,input.size(1)))),\
        "Dimensions mismatch between input and prelu1_weights."
    assert(any(prelu2_weights.numel() == i for i in (1,conv1_weights.size(0)))),\
        "Dimensions mismatch between conv1_weights and prelu2_weights."
    assert(conv1_weights.size(0) == conv2_weights.size(1)), "Dimensions "+\
        "mismatch between conv1_weights and conv2_weights."
    assert(conv2_weights.size(0) == input.size(1)), "Dimensions "+\
        "mismatch between conv2_weights and input."        
    
    if (normalizedWeights[0] and scale1 is not None) or zeroMeanWeights[0]:
        conv1_weights = weightNormalization(conv1_weights,scale1,\
                                    normalizedWeights[0],zeroMeanWeights[0])

    if (normalizedWeights[1] and scale2 is not None) or zeroMeanWeights[1]:
        conv2_weights = weightNormalization(conv2_weights,scale2,\
                                    normalizedWeights[1],zeroMeanWeights[1])
    
    pad1 = getPad2RetainShape(conv1_weights.shape[2:4],dilation1)
    pad2 = getPad2RetainShape(conv2_weights.shape[2:4],dilation2)
    
    out = th.nn.functional.prelu(input,prelu1_weights)
    out = conv2d(pad2D(out,pad1,padType),conv1_weights,bias = bias1, dilation = dilation1)
    out = th.nn.functional.prelu(out,prelu2_weights)
    out = conv2d(pad2D(out,pad2,padType),conv2_weights,bias = bias2, dilation = dilation2)
    
    if shortcut:
        return out.add(input)
    else:
        return out
예제 #5
0
    def __init__(self, input_channels,\
                 wiener_kernel_size = (5,5),\
                 wiener_output_features = 24,\
                 numWienerFilters = 4,\
                 wienerWeightSharing = True,\
                 wienerChannelSharing = False,\
                 alphaChannelSharing = True,\
                 alpha_update = True,\
                 lb = 1e-3,\
                 ub = 1e-1,\
                 wiener_pad = True,\
                 wiener_padType = 'symmetric',\
                 edgeTaper = True,\
                 wiener_scale = True,\
                 wiener_normalizedWeights = True,\
                 wiener_zeroMeanWeights = True,\
                 kernel_size = (5,5),\
                 output_features = 32,\
                 convWeightSharing = True,\
                 pad = 'same',\
                 padType = 'symmetric',\
                 conv_init = 'dct',\
                 bias_f= True,\
                 bias_t = True,\
                 scale_f = True,\
                 scale_t = True,\
                 normalizedWeights = True,\
                 zeroMeanWeights = True,\
                 alpha_proj = True,\
                 rpa_depth = 5,\
                 rpa_kernel_size1 = (3,3),\
                 rpa_kernel_size2 = (3,3),\
                 rpa_output_features = 64,\
                 rpa_init = 'msra',\
                 rpa_bias1 = True,\
                 rpa_bias2 = True,\
                 rpa_prelu1_mc = True,\
                 rpa_prelu2_mc = True,\
                 prelu_init = 0.1,\
                 rpa_scale1 = True,\
                 rpa_scale2 = True,\
                 rpa_normalizedWeights = True,\
                 rpa_zeroMeanWeights = True,\
                 shortcut = (True,False),\
                 clb = 0,\
                 cub = 255):

        super(WienerDeblurNet, self).__init__()

        # Initialize the Wiener filters used for deconvolution
        self.wiener_pad = wiener_pad
        self.wiener_padType = wiener_padType
        self.edgetaper = edgeTaper
        self.wienerWeightSharing = wienerWeightSharing
        self.wiener_normalizedWeights = wiener_normalizedWeights
        self.wiener_zeroMeanWeights = wiener_zeroMeanWeights
        self.alpha_update = alpha_update

        assert (numWienerFilters >
                1), "More than one Wiener filter is expected."

        wchannels = 1 if wienerChannelSharing else input_channels

        wiener_kernel_size = formatInput2Tuple(wiener_kernel_size, int, 2)

        if self.wienerWeightSharing:
            shape = (wiener_output_features, wchannels) + wiener_kernel_size
        else:
            shape = (numWienerFilters, wiener_output_features,
                     wchannels) + wiener_kernel_size

        self.wiener_conv_weights = nn.Parameter(th.Tensor(th.Size(shape)))
        init.dctMultiWiener(self.wiener_conv_weights)

        if wiener_scale and wiener_normalizedWeights:
            if self.wienerWeightSharing:
                self.wiener_scale = nn.Parameter(
                    th.Tensor(wiener_output_features).fill_(0.1))
            else:
                self.wiener_scale = nn.Parameter(
                    th.Tensor(numWienerFilters,
                              wiener_output_features).fill_(0.1))
        else:
            self.register_parameter('wiener_scale', None)

        assert(lb > 0 and ub > 0),"Lower (lb) and upper (ub) bounds of the "\
        +"beta parameter must be positive numbers."
        alpha = th.logspace(log10(lb), log10(ub),
                            numWienerFilters).unsqueeze(-1).log()
        if alphaChannelSharing:
            shape = (numWienerFilters, 1)
        else:
            alpha = alpha.repeat(1, input_channels)
            shape = (numWienerFilters, input_channels)

        if self.alpha_update:
            self.alpha = nn.Parameter(th.Tensor(th.Size(shape)))
            self.alpha.data.copy_(alpha)
        else:
            self.alpha = alpha

        # Initialize the Residual Denoising Network
        kernel_size = formatInput2Tuple(kernel_size, int, 2)

        if isinstance(pad, str) and pad == 'same':
            pad = getPad2RetainShape(kernel_size)


#            Kc = th.Tensor(kernel_size).add(1).div(2).floor()
#            pad = (int(Kc[0])-1, kernel_size[0]-int(Kc[0]),\
#                   int(Kc[1])-1,kernel_size[1]-int(Kc[1]))

        self.pad = formatInput2Tuple(pad, int, 4)
        self.padType = padType
        self.normalizedWeights = normalizedWeights
        self.zeroMeanWeights = zeroMeanWeights
        self.convWeightSharing = convWeightSharing

        # Initialize conv weights
        shape = (output_features, input_channels) + kernel_size
        self.conv_weights = nn.Parameter(th.Tensor(th.Size(shape)))
        init.convWeights(self.conv_weights, conv_init)

        # Initialize the scaling coefficients for the conv weight normalization
        if scale_f and normalizedWeights:
            self.scale_f = nn.Parameter(th.Tensor(output_features).fill_(1))
        else:
            self.register_parameter('scale_f', None)

        # Initialize the bias for the conv layer
        if bias_f:
            self.bias_f = nn.Parameter(th.Tensor(output_features).fill_(0))
        else:
            self.register_parameter('bias_f', None)

        # Initialize the bias for the transpose conv layer
        if bias_t:
            self.bias_t = nn.Parameter(th.Tensor(input_channels).fill_(0))
        else:
            self.register_parameter('bias_t', None)

        if not self.convWeightSharing:
            self.convt_weights = nn.Parameter(th.Tensor(th.Size(shape)))
            init.convWeights(self.convt_weights, conv_init)

            if scale_t and normalizedWeights:
                self.scale_t = nn.Parameter(
                    th.Tensor(output_features).fill_(1))
            else:
                self.register_parameter('scale_t', None)

        numparams_prelu1 = output_features if rpa_prelu1_mc else 1
        numparams_prelu2 = rpa_output_features if rpa_prelu2_mc else 1

        self.rpa_depth = rpa_depth
        self.shortcut = formatInput2Tuple(shortcut,
                                          bool,
                                          rpa_depth,
                                          strict=False)
        self.resPA = nn.ModuleList([modules.ResidualPreActivationLayer(\
                        rpa_kernel_size1,rpa_kernel_size2,output_features,\
                        rpa_output_features,rpa_bias1,rpa_bias2,1,1,\
                        numparams_prelu1,numparams_prelu2,prelu_init,padType,\
                        rpa_scale1,rpa_scale2,rpa_normalizedWeights,\
                        rpa_zeroMeanWeights,rpa_init,self.shortcut[i]) \
                        for i in range(self.rpa_depth)])

        self.bbproj = nn.Hardtanh(min_val=clb, max_val=cub)

        # Initialize the parameter for the L2Proj layer
        if alpha_proj:
            self.alpha_proj = nn.Parameter(th.Tensor(1).fill_(0))
        else:
            self.register_parameter('alpha_proj', None)

        # Initialize the parameter for weighting the outputs of each Residual
        # Denoising Network
        self.weights = nn.Parameter(
            th.Tensor(1, numWienerFilters, 1, 1,
                      1).fill_(1 / numWienerFilters))
예제 #6
0
    def __init__(self, kernel1_size,\
                 kernel2_size,\
                 input_channels,\
                 output_features,\
                 bias1 = False,\
                 bias2 = False,\
                 dilation1 = 1,\
                 dilation2 = 1,\
                 numparams_prelu1 = 1,\
                 numparams_prelu2 = 1,\
                 prelu_init = 0.1,\
                 padType = 'symmetric',\
                 scale1 = True,\
                 scale2 = True,\
                 normalizedWeights = True,\
                 zeroMeanWeights = True,\
                 weights_init = 'msra',\
                 shortcut = False):
        
        super(ResidualPreActivationLayer, self).__init__()
        
        self.normalizedWeights = formatInput2Tuple(normalizedWeights,bool,2)
        self.zeroMeanWeights = formatInput2Tuple(zeroMeanWeights,bool,2)
        self.shortcut = shortcut
        self.dilation1 = formatInput2Tuple(dilation1,int,2)
        self.dilation2 = formatInput2Tuple(dilation2,int,2)
        self.padType = padType
        
        kernel1_size = formatInput2Tuple(kernel1_size,int,2)
        kernel2_size = formatInput2Tuple(kernel2_size,int,2)

        # Init of conv1 weights
        shape = (output_features,input_channels)+kernel1_size
        self.conv1_weights = nn.Parameter(th.Tensor(th.Size(shape)))
        init.msra(self.conv1_weights)
        # Init of conv2 weights
        shape = (input_channels,output_features)+kernel2_size
        self.conv2_weights = nn.Parameter(th.Tensor(th.Size(shape)))
        init.convWeights(self.conv2_weights,weights_init)
        
        # Initialize the scaling coefficients for the conv weights normalization
        if scale1 and self.normalizedWeights[0]:
            self.scale1 = nn.Parameter(th.Tensor(output_features).fill_(0.1))
        else:
            self.register_parameter('scale1', None)        
        
        if scale2 and self.normalizedWeights[1]:
            self.scale2 = nn.Parameter(th.Tensor(input_channels).fill_(0.1))
        else:
            self.register_parameter('scale2', None)        

        if bias1:
            self.bias1 = nn.Parameter(th.Tensor(output_features).fill_(0))
        else:
            self.register_parameter('bias1', None)    

        if bias2:
            self.bias2 = nn.Parameter(th.Tensor(input_channels).fill_(0))
        else:
            self.register_parameter('bias2', None)               

        # Init of prelu weights
        self.prelu1_weights = nn.Parameter(th.Tensor(numparams_prelu1).fill_(prelu_init))
        self.prelu2_weights = nn.Parameter(th.Tensor(numparams_prelu2).fill_(prelu_init))
예제 #7
0
 def __init__(self, kernel_size,\
              input_channels,\
              output_features,\
              numWienerFilters = 4,\
              sharedWienerFilters = False,\
              sharedChannels = True,\
              sharedAlphaChannels = True,\
              alpha_update = True,\
              lb = 1e-4,\
              ub = 1e-2,\
              pad = True,\
              padType = 'symmetric',\
              edgeTaper = True,\
              scale = True,\
              normalizedWeights = True,\
              zeroMeanWeights = True):
     
     super(WienerDeconvLayer,self).__init__()
     
     self.pad = pad
     self.padType = padType
     self.edgetaper = edgeTaper
     self.sharedWienerFilters = sharedWienerFilters
     self.normalizedWeights = normalizedWeights
     self.zeroMeanWeights = zeroMeanWeights
     
     assert(numWienerFilters > 1),"More than one Wiener filter is expected."
     
     # Initialize conv regularization weights 
     channels = 1 if sharedChannels else input_channels
     
     kernel_size = formatInput2Tuple(kernel_size,int,2)
     
     if sharedWienerFilters:
         shape = (output_features,channels)+kernel_size
     else:
         shape = (numWienerFilters,output_features,channels)+kernel_size
         
     self.conv_weights = nn.Parameter(th.Tensor(th.Size(shape)))
     init.dctMultiWiener(self.conv_weights)
     
     if scale and normalizedWeights:
         if sharedWienerFilters:
             self.scale = nn.Parameter(th.Tensor(output_features).fill_(0.1))   
         else:
             self.scale = nn.Parameter(th.Tensor(numWienerFilters,output_features).fill_(0.1))   
     else:
         self.register_parameter('scale', None)             
             
     assert(lb > 0 and ub > 0),"Lower (lb) and upper (ub) bounds of the "\
     +"alpha parameter must be positive numbers."
     alpha = th.logspace(log10(lb),log10(ub),numWienerFilters).unsqueeze(-1).log()
     if sharedAlphaChannels:            
         shape = (numWienerFilters,1)
     else:
         alpha = alpha.repeat(1,input_channels)
         shape = (numWienerFilters,input_channels)
     
     if alpha_update:       
         self.alpha = nn.Parameter(th.Tensor(th.Size(shape)))
         self.alpha.data.copy_(alpha)
     else:
         self.alpha = alpha
예제 #8
0
    def __init__(self, kernel_size,\
                 input_channels,\
                 output_features,\
                 rbf_mixtures,\
                 rbf_precision,\
                 pad = 'same',\
                 convWeightSharing = True,\
                 alpha = True,
                 lb = -100,\
                 ub = 100,\
                 padType = 'symmetric',\
                 scale_f = True,\
                 scale_t = True,\
                 normalizedWeights = True,\
                 zeroMeanWeights = True):
        
        super(ResidualRBFLayer, self).__init__()
        
        kernel_size = formatInput2Tuple(kernel_size,int,2)       
        
        if isinstance(pad,str) and pad == 'same':
            pad = getPad2RetainShape(kernel_size)
#            # center of the kernel
#            Kc = th.Tensor(kernel_size).add(1).div(2).floor()
#            pad = (int(Kc[0])-1, kernel_size[0]-int(Kc[0]),\
#                   int(Kc[1])-1,kernel_size[1]-int(Kc[1]))
            
        self.pad = formatInput2Tuple(pad,int,4)
        self.padType = padType
        self.normalizedWeights = normalizedWeights
        self.zeroMeanWeights = zeroMeanWeights
        self.lb = lb
        self.ub = ub
        
        # Initialize conv weights
        shape = (output_features,input_channels)+kernel_size
        self.conv_weights = nn.Parameter(th.Tensor(th.Size(shape)))
        init.dct(self.conv_weights)
        
        if convWeightSharing:
            self.convt_weights = self.conv_weights
        else:
            self.convt_weights = nn.Parameter(th.Tensor(th.Size(shape)))
            init.dct(self.convt_weights)
        
        # Initialize the scaling coefficients for the conv weight normalization
        if scale_f and normalizedWeights:
            self.scale_f = nn.Parameter(th.Tensor(output_features).fill_(1))
        else:
            self.register_parameter('scale_f', None)
        
        if scale_t and normalizedWeights:
            if convWeightSharing and scale_f:
                self.scale_t = self.scale_f
            elif not convWeightSharing or (convWeightSharing and not scale_f):
                self.scale_t = nn.Parameter(th.Tensor(output_features).fill_(1))
        else :
            self.register_parameter('scale_t', None)
        
        # Initialize the params for the proxL2
        if alpha :
            self.alpha_prox = nn.Parameter(th.Tensor(1).fill_(0))
        else:
            self.register_parameter('alpha_prox', None)
        
        # Initialize the rbf_weights
        self.rbf_weights = nn.Parameter(th.Tensor(output_features,rbf_mixtures).fill_(1e-4))
        self.rbf_centers = th.linspace(lb,ub,rbf_mixtures).type_as(self.rbf_weights)
        self.rbf_precision = rbf_precision
예제 #9
0
파일: pydl.py 프로젝트: keqpan/ExpPyTorch
    def __init__(self, kernel_size,\
                 input_channels,\
                 output_features,\
                 convWeightSharing = True,\
                 pad = 'same',\
                 padType = 'symmetric',\
                 conv_init = 'dct',\
                 bias_f= True,\
                 bias_t = True,\
                 scale_f = True,\
                 scale_t = True,\
                 normalizedWeights = True,\
                 zeroMeanWeights = True,\
                 alpha = True,\
                 rpa_depth = 5,\
                 rpa_kernel_size1 = (3,3),\
                 rpa_kernel_size2 = (3,3),\
                 rpa_output_features = 64,\
                 rpa_init = 'msra',\
                 rpa_bias1 = True,\
                 rpa_bias2 = True,\
                 rpa_prelu1_mc = True,\
                 rpa_prelu2_mc = True,\
                 prelu_init = 0.1,\
                 rpa_scale1 = True,\
                 rpa_scale2 = True,\
                 rpa_normalizedWeights = True,\
                 rpa_zeroMeanWeights = True,\
                 shortcut = (True,False),\
                 clb = 0,\
                 cub = 255):

        super(UDNetPA, self).__init__()

        kernel_size = formatInput2Tuple(kernel_size, int, 2)

        if isinstance(pad, str) and pad == 'same':
            pad = getPad2RetainShape(kernel_size)
            pad = (pad[1], pad[2])


#            Kc = th.Tensor(kernel_size).add(1).div(2).floor()
#            pad = (int(Kc[0])-1, kernel_size[0]-int(Kc[0]),\
#                   int(Kc[1])-1,kernel_size[1]-int(Kc[1]))

        self.pad = formatInput2Tuple(pad, int, 2)
        self.padType = padType
        self.normalizedWeights = normalizedWeights
        self.zeroMeanWeights = zeroMeanWeights
        self.convWeightSharing = convWeightSharing

        # Initialize conv weights
        shape = (output_features, input_channels) + kernel_size
        self.conv_weights = nn.Parameter(th.Tensor(th.Size(shape)))
        init.convWeights(self.conv_weights, conv_init)

        # Initialize the scaling coefficients for the conv weight normalization
        if scale_f and normalizedWeights:
            self.scale_f = nn.Parameter(th.Tensor(output_features).fill_(1))
        else:
            self.register_parameter('scale_f', None)

        # Initialize the bias for the conv layer
        if bias_f:
            self.bias_f = nn.Parameter(th.Tensor(output_features).fill_(0))
        else:
            self.register_parameter('bias_f', None)

        # Initialize the bias for the transpose conv layer
        if bias_t:
            self.bias_t = nn.Parameter(th.Tensor(input_channels).fill_(0))
        else:
            self.register_parameter('bias_t', None)

        if not self.convWeightSharing:
            self.convt_weights = nn.Parameter(th.Tensor(th.Size(shape)))
            init.convWeights(self.convt_weights, conv_init)

            if scale_t and normalizedWeights:
                self.scale_t = nn.Parameter(
                    th.Tensor(output_features).fill_(1))
            else:
                self.register_parameter('scale_t', None)

        numparams_prelu1 = output_features if rpa_prelu1_mc else 1
        numparams_prelu2 = rpa_output_features if rpa_prelu2_mc else 1

        self.rpa_depth = rpa_depth
        self.shortcut = formatInput2Tuple(shortcut,
                                          bool,
                                          rpa_depth,
                                          strict=False)
        self.resPA = nn.Sequential(*nn.ModuleList([modules.ResidualPreActivationLayer(\
                        rpa_kernel_size1,rpa_kernel_size2,output_features,\
                        rpa_output_features,rpa_bias1,rpa_bias2,1,1,\
                        numparams_prelu1,numparams_prelu2,prelu_init,padType,\
                        rpa_scale1,rpa_scale2,rpa_normalizedWeights,\
                        rpa_zeroMeanWeights,rpa_init,self.shortcut[i]) \
                        for i in range(self.rpa_depth)]))

        self.bbproj = nn.Hardtanh(min_val=clb, max_val=cub)

        # Initialize the parameter for the L2Proj layer
        if alpha:
            self.alpha = nn.Parameter(th.Tensor(1).fill_(0))
        else:
            self.register_parameter('alpha', None)
if isinstance(opt.lr_milestones, tuple):
    opt.lr_milestones = list(opt.lr_milestones)
else:
    opt.lr_milestones = [opt.lr_milestones]
if opt.convWeightSharing:
    str_ws = '-WS'
else:
    str_ws = '-NoWS'

if opt.color:
    strc = 'color'
else:
    strc = 'gray'

opt.kernel_size = formatInput2Tuple(opt.kernel_size, int, 2)

if isinstance(opt.stdn, tuple) and len(opt.stdn) == 1:
    stdn = opt.stdn[0]
else:
    stdn = opt.stdn

if opt.xid == '':
    opt.xid = 'UDNet'
else:
    opt.xid = 'UDNet_' + opt.xid

dirname = "{}_{}_stages:{}_kernel:{}x{}_filters:{}{}_stdn:{}_joint_train".format(opt.xid,\
                 strc,opt.stages,opt.kernel_size[0],opt.kernel_size[1],\
                 opt.num_filters,str_ws,stdn)
예제 #11
0
# numTrainImagesperPSF=4,numTestImagesperPSF=3,data_seed=20180102,amsgrad=True)



if isinstance(opt.lr_milestones,tuple): 
    opt.lr_milestones = list(opt.lr_milestones)
else:
    opt.lr_milestones = [opt.lr_milestones]

str_cws = '-CWS' if opt.convWeightSharing else ''
str_wws = '-WWS' if opt.wienerWeightSharing else ''
str_wcs = '-WCS' if opt.wienerChannelSharing and opt.color else ''
strc = 'color' if opt.color else 'gray'


opt.wiener_kernel_size = formatInput2Tuple(opt.wiener_kernel_size,int,2)
opt.kernel_size = formatInput2Tuple(opt.kernel_size,int,2)
opt.rpa_kernel_size1 = formatInput2Tuple(opt.rpa_kernel_size1,int,2)
opt.rpa_kernel_size2 = formatInput2Tuple(opt.rpa_kernel_size2,int,2)

if isinstance(opt.stdn,tuple) :
    if len(opt.stdn) == 1:
        stdn = str(opt.stdn[0])
    else:
        stdn = "("+str(opt.stdn[0])+"->"+str(opt.stdn[-1])+")"
else:
    stdn = str(opt.stdn)
    
if opt.xid == '':
    opt.xid = 'WDNet'
else:
예제 #12
0
    def __init__(self, kernel_size,\
                 input_channels,\
                 output_features,\
                 rbf_mixtures,\
                 rbf_precision,\
                 pad='same',\
                 convWeightSharing=True,\
                 lb=-100,\
                 ub=100,\
                 padType='symmetric',\
                 scale_f=True,\
                 scale_t=True,\
                 normalizedWeights=True,\
                 zeroMeanWeights=True,\
                 prox_param=True):

        super(ResRBFPoisLayer, self).__init__()

        kernel_size = formatInput2Tuple(kernel_size, int, 2)

        if isinstance(pad, str) and pad == 'same':
            pad = getPad2RetainShape(kernel_size)

        self.pad = formatInput2Tuple(pad, int, 4)
        self.padType = padType
        self.normalizedWeights = normalizedWeights
        self.zeroMeanWeights = zeroMeanWeights
        self.convWeightSharing = convWeightSharing
        self.lb = lb
        self.ub = ub

        # Initialize conv weights
        shape = (output_features, input_channels) + kernel_size
        self.conv_weights = nn.Parameter(th.Tensor(th.Size(shape)))

        # initialize_conv_weights(self.conv_weights)
        init.dct(self.conv_weights)

        # Initialize the scaling coefficients for the conv weight normalization
        if scale_f and normalizedWeights:
            self.scale_f = nn.Parameter(th.Tensor(output_features).fill_(0.1))
        else:
            self.register_parameter('scale_f', None)

        if not self.convWeightSharing:
            self.convt_weights = nn.Parameter(th.Tensor(th.Size(shape)))
            init.dct(self.convt_weights)

            if scale_t and normalizedWeights:
                self.scale_t = nn.Parameter(
                    th.Tensor(output_features).fill_(0.1))
            else:
                self.register_parameter('scale_t', None)

        # Initialize the params for the PoisProx.
        # Projection condition multiplier.
        if prox_param:
            self.prox_param = nn.Parameter(th.Tensor(1).fill_(0.1))
        else:
            self.register_parameter('prox_param', None)

        # Initialize the rbf_weights
        self.rbf_weights = nn.Parameter(
            th.Tensor(output_features, rbf_mixtures).fill_(1e-4))
        self.rbf_centers = th.linspace(lb, ub,
                                       rbf_mixtures).type_as(self.rbf_weights)
        self.rbf_precision = rbf_precision