Пример #1
0
 def backward(ctx, grad_spike_seq, grad_v_next):
     grad_x, grad_v = _C_neuron.LIF_bptt(grad_spike_seq, grad_v_next,
                                         ctx.saved_tensors[0],
                                         ctx.saved_tensors[1],
                                         ctx.reciprocal_tau,
                                         ctx.detach_input)
     return grad_x, grad_v, None, None, None, None, None, None, None
Пример #2
0
def LIF_bptt(grad_spike: torch.Tensor, grad_v_next: torch.Tensor,
             grad_s_to_h: torch.Tensor, grad_v_to_h: float,
             reciprocal_tau: float, detach_input: bool):
    '''
    * :ref:`API in English <LIF_bptt-en>`

    .. _LIF_bptt-cn:

    :param reciprocal_tau: :math:`\\frac{1}{\\tau}`
    :type reciprocal_tau: float

    其余的参数参见 :ref:`bptt_template <bptt_template-cn>`。

    :ref:`LIF_backward <LIF_backward-cn>` 的多步版本。

    梯度的计算按照

    .. math::
        \\frac{\\partial H_{t}}{\\partial X_{t}} & = \\frac{1}{\\tau}

        \\frac{\\partial H_{t}}{\\partial V_{t-1}} & = 1 - \\frac{1}{\\tau}

    * :ref:`中文API <LIF_bptt-cn>`

    .. _LIF_bptt-en:

    :param reciprocal_tau: :math:`\\frac{1}{\\tau}`
    :type reciprocal_tau: float

    See :ref:`bptt_template <bptt_template-en>` for more details about other args。

    The multi-step version of :ref:`LIF_backward <LIF_backward-en>`.

    The gradients are calculated by

    .. math::
        \\frac{\\partial H_{t}}{\\partial X_{t}} & = \\frac{1}{\\tau}

        \\frac{\\partial H_{t}}{\\partial V_{t-1}} & = 1 - \\frac{1}{\\tau}

    '''
    return _C_neuron.LIF_bptt(grad_spike, grad_v_next, grad_s_to_h,
                              grad_v_to_h, reciprocal_tau, detach_input)