Ejemplo n.º 1
0
	def forward(ctx, membranePotential, refractoryResponse, neuron, Ts):
		'''
		'''
		device = membranePotential.device
		dtype  = membranePotential.dtype
		threshold      = neuron['theta']
		oldDevice = torch.cuda.current_device()

		# if device != oldDevice: torch.cuda.set_device(device)
		# torch.cuda.device(3)

		# spikeTensor = torch.empty_like(membranePotential)

		# print('membranePotential  :', membranePotential .device)
		# print('spikeTensor        :', spikeTensor       .device)
		# print('refractoryResponse :', refractoryResponse.device)
			
		# (membranePotential, spikes) = slayer_cuda.get_spikes_cuda(membranePotential,
		# 														  torch.empty_like(membranePotential),	# tensor for spikes
		# 														  refractoryResponse,
		# 														  threshold,
		# 														  Ts)
		spikes = slayerCuda.getSpikes(membranePotential, refractoryResponse, threshold, Ts)
		
		pdfScale        = torch.autograd.Variable(torch.tensor(neuron['scaleRho']                 , device=device, dtype=dtype), requires_grad=False)
		# pdfTimeConstant = torch.autograd.Variable(torch.tensor(neuron['tauRho']                   , device=device, dtype=dtype), requires_grad=False) # needs to be scaled by theta
		pdfTimeConstant = torch.autograd.Variable(torch.tensor(neuron['tauRho'] * neuron['theta'] , device=device, dtype=dtype), requires_grad=False) # needs to be scaled by theta
		threshold       = torch.autograd.Variable(torch.tensor(neuron['theta']                    , device=device, dtype=dtype), requires_grad=False)
		ctx.save_for_backward(membranePotential, threshold, pdfTimeConstant, pdfScale)
		# torch.cuda.synchronize()
		
		# if device != oldDevice: torch.cuda.set_device(oldDevice)
		# torch.cuda.device(oldDevice)
		
		return spikes
Ejemplo n.º 2
0
    def forward(ctx, membranePotential, refractoryResponse, neuron, Ts):
        device = membranePotential.device
        dtype  = membranePotential.dtype
        threshold      = neuron['theta']
        oldDevice = torch.cuda.current_device()
        spikes = slayerCuda.getSpikes(membranePotential, refractoryResponse, threshold, Ts)

        pdfScale        = torch.autograd.Variable(torch.tensor(neuron['scaleRho']                 , device=device, dtype=dtype), requires_grad=False)
        pdfTimeConstant = torch.autograd.Variable(torch.tensor(neuron['tauRho'] * neuron['theta'] , device=device, dtype=dtype), requires_grad=False) # needs to be scaled by theta
        threshold       = torch.autograd.Variable(torch.tensor(neuron['theta']                    , device=device, dtype=dtype), requires_grad=False)
        ctx.save_for_backward(membranePotential, threshold, pdfTimeConstant, pdfScale)
        return spikes