def _conv2d_connection_update(self, **kwargs) -> None: # language=rst """ Hebbian learning rule for ``Conv2dConnection`` subclass of ``AbstractConnection`` class. """ out_channels, _, kernel_height, kernel_width = self.connection.w.size() padding, stride = self.connection.padding, self.connection.stride batch_size = self.source.batch_size # Reshaping spike traces and spike occurrences. source_x = im2col_indices( self.source.x, kernel_height, kernel_width, padding=padding, stride=stride ) target_x = self.target.x.view(batch_size, out_channels, -1) source_s = im2col_indices( self.source.s.float(), kernel_height, kernel_width, padding=padding, stride=stride, ) target_s = self.target.s.view(batch_size, out_channels, -1).float() # Pre-synaptic update. pre = self.reduction(torch.bmm(target_x, source_s.permute((0, 2, 1))), dim=0) self.connection.w += self.nu[0] * pre.view(self.connection.w.size()) # Post-synaptic update. post = self.reduction(torch.bmm(target_s, source_x.permute((0, 2, 1))), dim=0) self.connection.w += self.nu[1] * post.view(self.connection.w.size()) super().update()
def _conv2d_connection_update(self, **kwargs) -> None: # language=rst """ Post-pre learning rule for ``Conv2dConnection`` subclass of ``AbstractConnection`` class. """ # Get convolutional layer parameters. ( out_channels, in_channels, kernel_height, kernel_width, ) = self.connection.w.size() padding, stride = self.connection.padding, self.connection.stride batch_size = self.source.batch_size # Reshaping spike traces and spike occurrences. source_x = im2col_indices( self.source.x, kernel_height, kernel_width, padding=padding, stride=stride ) target_x = self.target.x.view(batch_size, out_channels, -1) source_s = im2col_indices( self.source.s.float(), kernel_height, kernel_width, padding=padding, stride=stride, ) target_s = self.target.s.view(batch_size, out_channels, -1).float() update = 0 # Pre-synaptic update. if self.nu[0]: pre = self.reduction( torch.bmm(target_x, source_s.permute((0, 2, 1))), dim=0 ) update -= ( self.nu[0] * pre.view(self.connection.w.size()) * (self.connection.w - self.wmin) ) # Post-synaptic update. if self.nu[1]: post = self.reduction( torch.bmm(target_s, source_x.permute((0, 2, 1))), dim=0 ) update += ( self.nu[1] * post.view(self.connection.w.size()) * (self.wmax - self.connection.wmin) ) self.connection.w += update super().update()
def _conv2d_connection_update(self, **kwargs) -> None: # language=rst """ MSTDPET learning rule for ``Conv2dConnection`` subclass of ``AbstractConnection`` class. Keyword arguments: :param Union[float, torch.Tensor] reward: Reward signal from reinforcement learning task. :param float a_plus: Learning rate (post-synaptic). :param float a_minus: Learning rate (pre-synaptic). """ batch_size = self.source.batch_size # Initialize eligibility and eligibility trace. if not hasattr(self, "eligibility"): self.eligibility = torch.zeros(batch_size, *self.connection.w.shape) if not hasattr(self, "eligibility_trace"): self.eligibility_trace = torch.zeros(batch_size, *self.connection.w.shape) # Parse keyword arguments. reward = kwargs["reward"] a_plus = torch.tensor(kwargs.get("a_plus", 1.0)) a_minus = torch.tensor(kwargs.get("a_minus", -1.0)) # Calculate value of eligibility trace based on the value of the point eligibility value of the past timestep. self.eligibility_trace *= error_exp(-self.connection.dt / self.tc_e_trace, error=self.neederror) # Compute weight update. update = reward * self.eligibility_trace self.connection.w += self.nu[0] * self.connection.dt * torch.sum(update, dim=0) out_channels, _, kernel_height, kernel_width = self.connection.w.size() padding, stride = self.connection.padding, self.connection.stride # Initialize P^+ and P^-. if not hasattr(self, "p_plus"): self.p_plus = torch.zeros(batch_size, *self.source.shape) self.p_plus = im2col_indices( self.p_plus, kernel_height, kernel_width, padding=padding, stride=stride ) if not hasattr(self, "p_minus"): self.p_minus = torch.zeros(batch_size, *self.target.shape) self.p_minus = self.p_minus.view(batch_size, out_channels, -1).float() # Reshaping spike occurrences. source_s = im2col_indices( self.source.s.float(), kernel_height, kernel_width, padding=padding, stride=stride, ) target_s = ( self.target.s.permute(1, 2, 3, 0).view(batch_size, out_channels, -1).float() ) # Update P^+ and P^- values. self.p_plus *= error_exp(-self.connection.dt / self.tc_plus, error=self.neederror) self.p_plus += a_plus * source_s self.p_minus *= error_exp(-self.connection.dt / self.tc_minus, error=self.neederror) self.p_minus += a_minus * target_s # Calculate point eligibility value. self.eligibility = torch.bmm( target_s, self.p_plus.permute((0, 2, 1)) ) + torch.bmm(self.p_minus, source_s.permute((0, 2, 1))) self.eligibility = self.eligibility.view(self.connection.w.size()) super().update()