def backward(ctx, gradOutput): (membranePotential, threshold, pdfTimeConstant, pdfScale, srmKernel, Ts) = ctx.saved_tensors spikePdf = pdfScale / pdfTimeConstant * torch.exp( -torch.abs(membranePotential - threshold) / pdfTimeConstant) return slayerCuda.corr(gradOutput * spikePdf, srmKernel, Ts), None, None, None
def backward(ctx, gradOutput): (filter, Ts) = ctx.saved_tensors gradInput = slayerCuda.corr(gradOutput.contiguous(), filter, Ts) if filter.requires_grad is False: gradFilter = None else: gradFilter = None pass return gradInput, gradFilter, None
def backward(ctx, gradOutput): ''' ''' (filter, Ts) = ctx.saved_tensors return slayerCuda.corr(gradOutput, filter, Ts), None, None