コード例 #1
0
 def forward(ctx, input, delay, Ts):
     device = input.device
     dtype  = input.dtype
     output = slayerCuda.shift(input, delay.data, Ts)
     Ts = torch.autograd.Variable(torch.tensor(Ts, device=device, dtype=dtype), requires_grad=False)
     ctx.save_for_backward(output, delay.data, Ts)
     return output
コード例 #2
0
    def backward(ctx, gradOutput):
        (output, delay, Ts) = ctx.saved_tensors
        diffFilter = torch.tensor([-1, 1], dtype=gradOutput.dtype).to(gradOutput.device) / Ts
        outputDiff = slayerCuda.conv(output, diffFilter, 1)
        # the conv operation should not be scaled by Ts. 
        # As such, the output is -( x[k+1]/Ts - x[k]/Ts ) which is what we want.
        gradDelay  = torch.sum(gradOutput * outputDiff, [0, -1], keepdim=True).reshape(gradOutput.shape[1:-1]) * Ts
        # no minus needed here, as it is included in diffFilter which is -1 * [1, -1]

        return slayerCuda.shift(gradOutput, -delay, Ts), gradDelay, None
コード例 #3
0
 def forward(ctx, input, delay, Ts=1):
     '''
     '''
     device = input.device
     dtype  = input.dtype
     output = slayerCuda.shift(input.contiguous(), delay, Ts)
     Ts     = torch.autograd.Variable(torch.tensor(Ts   , device=device, dtype=dtype), requires_grad=False)
     delay  = torch.autograd.Variable(torch.tensor(delay, device=device, dtype=dtype), requires_grad=False)
     ctx.save_for_backward(delay, Ts)
     return output
コード例 #4
0
	def testShiftPerNeuron(self):

		delay.delay.data = torch.rand((delay.delay.data.shape)).to(device) * 2 - 1
		outTensor = slayerCuda.shift(inTensor, delay.delay.data, Ts)

		netError = 0

		for n in range(N):
			for c in range(C):
				for h in range(H):
					for w in range(W):
						shift = int( (delay.delay.data[c, h, w] / Ts).item() )
						error = checkShift(inTensor[n, c, h, w], outTensor[n, c, h, w], shift)
						# print (n, c, h, w, error)
						netError += error

		# print('Shift (per neuron) Error:', checkShift(n, c, h, w, shift))
		self.assertEqual(netError, 0, 'Shift (per neuron) error must be zero.')
コード例 #5
0
    def backward(ctx, gradOutput):
        '''
		'''
        (delay, Ts) = ctx.saved_tensors
        return slayerCuda.shift(gradOutput, -delay, Ts), None, None