Exemplo n.º 1
0
    def reset_parameters(self):
        std = math.sqrt(2.0 / float(self.input_dim + self.output_dim))
        a = math.sqrt(
            3.0) * std  # Calculate uniform bounds from standard deviation

        init._no_grad_uniform_(self.W.weight, -a, a)

        if self.W.bias is not None:
            init._no_grad_zero_(self.W.bias)
Exemplo n.º 2
0
def xavier_uniform_(tensor, gain=1.):
    # type: (Tensor, float) -> Tensor
    r"""Fills the input `Tensor` with values according to the method
    described in `Understanding the difficulty of training deep feedforward
    neural networks` - Glorot, X. & Bengio, Y. (2010), using a uniform
    distribution. The resulting tensor will have values sampled from
    :math:`\mathcal{U}(-a, a)` where
    .. math::
        a = \text{gain} \times \sqrt{\frac{6}{\text{fan\_in} + \text{fan\_out}}}
    Also known as Glorot initialization.
    Args:
        tensor: an n-dimensional `torch.Tensor`
        gain: an optional scaling factor
    Examples:
        >>> w = torch.empty(3, 5)
        >>> nn.init.xavier_uniform_(w, gain=nn.init.calculate_gain('relu'))
    """
    fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
    std = gain * math.sqrt(6.0 / float(fan_in + fan_out))
    # a = math.sqrt(3.0) * std  # Calculate uniform bounds from standard deviation
    a = std

    res = _no_grad_uniform_(tensor, -a, a)
    # print(res)
    return res
Exemplo n.º 3
0
def xavier_uniform_small_init_(tensor, gain=1.):
    # type: (Tensor, float) -> Tensor
    fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
    std = gain * math.sqrt(2.0 / float(fan_in + 4 * fan_out))
    a = math.sqrt(
        3.0) * std  # Calculate uniform bounds from standard deviation

    return _no_grad_uniform_(tensor, -a, a)
    def reset_parameters(self):
        fan_in = sum(self.input_dims)

        std = math.sqrt(2.0 / float(fan_in + self.output_dim))
        a = math.sqrt(
            3.0) * std  # Calculate uniform bounds from standard deviation

        for transform in self.transforms:
            if self.init_type == "xavier":
                init._no_grad_uniform_(transform.weight, -a, a)
            else:
                print("did not implement he init")
                exit()

        init.zeros_(self.full_bias)

        for layer_norm in self.layer_norms:
            layer_norm.reset_parameters()
Exemplo n.º 5
0
Arquivo: base.py Projeto: kclip/snn
    def __init__(self,
                 n_inputs,
                 n_outputs,
                 synaptic_filter=filters.raised_cosine_pillow_08,
                 n_basis_feedforward=1,
                 n_basis_feedback=1,
                 tau_ff=1,
                 tau_fb=1,
                 mu=0.5,
                 device='cpu'):
        super(SNNLayer, self).__init__()

        self.device = device

        self.n_inputs = n_inputs
        self.n_outputs = n_outputs

        ### Feedforward connections
        self.n_basis_feedforward = n_basis_feedforward
        self.feedforward_filter = synaptic_filter(
            tau_ff, self.n_basis_feedforward, mu).transpose(0,
                                                            1).to(self.device)
        self.feedforward_filter.requires_grad = False
        self.tau_ff = tau_ff

        ### Feedback connections
        self.n_basis_feedback = n_basis_feedback
        self.feedback_filter = synaptic_filter(tau_fb, self.n_basis_feedback,
                                               mu).transpose(0,
                                                             1).to(self.device)
        self.feedback_filter.requires_grad = False
        self.tau_fb = tau_fb

        self.ff_weights = torch.nn.parameter.Parameter(
            torch.Tensor(n_outputs, n_inputs, n_basis_feedforward))

        self.fb_weights = torch.nn.parameter.Parameter(
            torch.Tensor(n_outputs, n_basis_feedback))

        self.bias = torch.nn.parameter.Parameter(torch.Tensor(n_outputs))

        a = self.get_xavier()
        _no_grad_uniform_(self.ff_weights, -a, a)
        _no_grad_uniform_(self.fb_weights, -a, a)
        _no_grad_uniform_(self.bias, -a, a)

        self.spiking_history = torch.zeros([self.n_outputs, 2],
                                           requires_grad=False).to(self.device)

        self.potential = None

        ### Number of timesteps to keep in synaptic memory
        self.memory_length = max(self.tau_ff, self.tau_fb)
Exemplo n.º 6
0
def enhanceUniform(tensor, dim, baseInitMethod, baseInitMethodParams):
	#Calcukate the bound
	if baseInitMethod == 'kaiming':
		bound = getUniformKaimingBound(tensor, **baseInitMethodParams)
	elif baseInitMethod == 'xavier':
		bound = getUniformXavierBound(tensor, **baseInitMethodParams)
	else:
		raise UnsupportedInitMethod('enhanceUniform.'+ str(baseInitMethod) +' unsupported method. Use \'kaiming\' or \'xavier\'')

	if dim == 0:		
		#Regular case. This means we initialize the entire tensor from same normal distribution like baseInitMethod (both )
		torchInit._no_grad_uniform_(tensor, -bound, bound)
		return

	for filt in tensor.data:
		#If dim is 1, then we initialize each filter
		if dim == 1:

			#According to formula of inversing uniform distribution.
			unif_1 = uniform.rvs(loc=0,scale = 1, size=1)[0]
			enhanceBound = (bound*unif_1)**2
			enhanceBound = (enhanceBound*3)**0.5
			
			torchInit._no_grad_uniform_(filt, -enhanceBound, enhanceBound)
			
			#Continue to next filter
			continue

		#If we got here, dim is 2, and initialize for each sub-filter
		#Calculate the bounds. According to formula of inversing uniform distribution.
		unif_1 = uniform.rvs(loc=0,scale = 1, size=filt.shape[0])
		enhanceBound = (bound*unif_1)**2
		enhanceBound = (enhanceBound*3)**0.5
		
		for subfiltIdx, subfilt in enumerate(filt):
			torchInit._no_grad_uniform_(subfilt, -enhanceBound[subfiltIdx], enhanceBound[subfiltIdx])
	
	return