示例#1
0
 def reset_parameters(self):
     init._no_grad_normal_(self.warp, 0, 0.01)
     init._no_grad_normal_(self.latent_params, 0, 1 / self.out_features)
     if self.bias is not None:
         weight = self.get_weight()
         fan_in, _ = init._calculate_fan_in_and_fan_out(weight)
         bound = 1 / math.sqrt(fan_in)
         init.uniform_(self.bias, -bound, bound)
 def reset_parameters(self):
     init._no_grad_normal_(self.w, 0, 1)
     init._no_grad_normal_(self.p, 10)
     if self.bias is not None:
         y = self.pooling()
         fan_in, _ = init._calculate_fan_in_and_fan_out(y)
         bound = 1 / math.sqrt(fan_in)
         init.uniform_(self.bias, -bound, bound)
示例#3
0
 def reset_parameters(self):
     if self.init_method == "kaiming":
         kaiming_uniform_(self.weight, a=math.sqrt(5))
         if self.bias is not None:
             fan_in, _ = _calculate_fan_in_and_fan_out(self.weight)
             bound = 1 / math.sqrt(fan_in)
             init.uniform_(self.bias, -bound, bound)
     elif self.init_method == "normal":
         init._no_grad_normal_(self.weight, 0, 1)
         if self.bias is not None:
             init._no_grad_normal_(self.bias, 0, 1)
     else:
         raise ValueError(f"Unsupported init method {self.init_method}.")
示例#4
0
def variance_scaling_(tensor, gain=1.):
    """
    VarianceScaling in https://keras.io/zh/initializers/
    """
    fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
    std = math.sqrt(gain / float(fan_in))
    var_scaled = _no_grad_normal_(tensor, 0., std)
    return var_scaled
示例#5
0
def variance_scaling_(tensor, gain=1.):
    # type: (Tensor, float) -> Tensor
    r"""
    initializer for SeparableConv in Regressor/Classifier
    reference: https://keras.io/zh/initializers/  VarianceScaling
    """
    fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
    std = math.sqrt(gain / float(fan_in))

    return _no_grad_normal_(tensor, 0., std)
示例#6
0
def enhanceNormal(tensor, dim, baseInitMethod, baseInitMethodParams):
	#Calcukate the std
	if baseInitMethod == 'kaiming':
		mean, std = getNormalKaimingStd(tensor, **baseInitMethodParams)
	elif baseInitMethod == 'xavier':
		mean, std = getNormalKaimingStd(tensor, **baseInitMethodParams)
	else:
		raise UnsupportedInitMethod('enhanceNormal.'+ str(baseInitMethod) +' unsupported method. Use \'kaiming\' or \'xavier\'')

	if dim == 0:		
		#Regular case. This means we initialize the entire tensor from same normal distribution like baseInitMethod (both )
		torchInit._no_grad_normal_(tensor, mean = 0, std = std)
		return

	for filt in tensor.data:
		#If dim is 1, then we initialize each filter
		if dim == 1:
			
			r = gengamma.rvs(a  = 0.5, c = 1, loc = 0, scale = 2*((std)**2), size=1)[0]
			
			torchInit._no_grad_normal_(filt, mean = 0, std = r**0.5)

			continue

		#If we got here, dim is 2, and initialize for each sub-filter

		#Sample variance from gamma distribution
		r = gengamma.rvs(a  = 0.5, c = 1, loc = 0, scale = 2*((std)**2), size=filt.shape[0])
		
		for subfiltIdx, subfilt in enumerate(filt):			
			torchInit._no_grad_normal_(subfilt, mean = 0, std = r[subfiltIdx]**0.5)	

	return
示例#7
0
 def initializer(self, tensor):
     fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
     factor = (fan_in + fan_out) / 2.0
     scale = math.sqrt(self.magnitude / factor)
     _no_grad_normal_(tensor, 0, scale)
示例#8
0
def xavier_normal_small_init_(tensor, gain=1.):
    # type: (Tensor, float) -> Tensor
    fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
    std = gain * math.sqrt(2.0 / float(fan_in + 4 * fan_out))

    return _no_grad_normal_(tensor, 0., std)
示例#9
0
def _resnet_normal(tensor):
	fan_in, fan_out = init._calculate_fan_in_and_fan_out(tensor)
	std = math.sqrt(2.0 / float(fan_out))
	return init._no_grad_normal_(tensor, 0., std)