def asynchronous_chl( self, checkpoint=None, skip_learning=False) -> (np.ndarray, np.ndarray, np.ndarray, int): """Learns associations by means applying CHL asynchronously""" if checkpoint: epoch = checkpoint['epoch'] E = checkpoint['E'] P = checkpoint['P'] A = checkpoint['A'] else: self.start_time = time.time() self.time_since_statistics = self.start_time self.data = dict() E = [self.config.min_error * np.size(self.patterns, 0) + 1 ] ## Error values. Initial error value > min_error P = [0] # Number of patterns correct A = [0] # Number of analogies correct epoch = 0 while E[-1] > self.config.min_error * np.size( self.patterns, 0) and epoch < self.config.max_epochs: try: # calculate and record statistics for this epoch self.collect_statistics(self, E, P, A, epoch, self.data) if skip_learning: break for p in self.patterns: # I cannot get it to converge with positive phase first. # Maybe that's ok. Movellan (1990) suggests it won't converge # without negative phase first. Also, Leech PhD (2008) # Simulation 5 does negative first, too. # And so does Detorakis et al (2019). # add noise p = add_noise(p, self.config.noise) if self.config.learn_patterns_explicitly: # negative phase (expectation) if self.config.unlearn_clamp == 'input': self.unlearn(p) elif self.config.unlearn_clamp == 'transformation': self.unlearn_t(p) else: # 'none' self.unlearn_x(p) self.update_weights_negative() if self.config.adaptive_bias: self.update_biases_negative() # positive phase (confirmation) self.learn(p) self.update_weights_positive() if self.config.adaptive_bias: self.update_biases_positive() epoch += 1 except KeyboardInterrupt: break return E[1:], P[1:], A[1:], epoch, self.data
def synchronous_chl( self, checkpoint=None, skip_learning=False) -> (np.ndarray, np.ndarray, np.ndarray, int): """Learns associations by means applying CHL synchronously""" if checkpoint: epoch = checkpoint['epoch'] E = checkpoint['E'] P = checkpoint['P'] A = checkpoint['A'] else: self.start_time = time.time() self.time_since_statistics = self.start_time self.data = dict() E = [self.config.min_error * np.size(self.patterns, 0) + 1 ] ## Error values. Initial error value > min_error P = [0] # Number of patterns correct A = [0] # Number of analogies correct epoch = 0 while E[-1] > self.config.min_error * np.size( self.patterns, 0) and epoch < self.config.max_epochs: try: # calculate and record statistics for this epoch self.collect_statistics(self, E, P, A, epoch, self.data) if skip_learning: break for p in self.patterns: # add noise p = add_noise(p, self.config.noise) #positive phase (confirmation) self.learn(p) x_plus = np.copy(self.x) t_plus = np.copy(self.t) h_plus = np.copy(self.h) o_plus = np.copy(self.o) z_plus = np.copy(self.z) #negative phase (expectation) self.unlearn(p, epoch) x_minus = np.copy(self.x) t_minus = np.copy(self.t) h_minus = np.copy(self.h) o_minus = np.copy(self.o) z_minus = np.copy(self.z) self.update_weights_synchronous(x_plus, x_minus, t_plus, t_minus, h_plus, h_minus, o_plus, o_minus, z_plus, z_minus) if self.config.adaptive_bias: self.update_biases_synchronous(x_plus, x_minus, t_plus, t_minus, h_plus, h_minus, o_plus, o_minus, z_plus, z_minus) if self.config.learn_transformations_explicitly: #positive phase (confirmation) self.learn(p) x_plus = np.copy(self.x) t_plus = np.copy(self.t) h_plus = np.copy(self.h) o_plus = np.copy(self.o) z_plus = np.copy(self.z) #negative phase (expectation) self.unlearn_t(p) x_minus = np.copy(self.x) t_minus = np.copy(self.t) h_minus = np.copy(self.h) o_minus = np.copy(self.o) z_minus = np.copy(self.z) self.update_weights_synchronous( x_plus, x_minus, t_plus, t_minus, h_plus, h_minus, o_plus, o_minus, z_plus, z_minus) if self.config.adaptive_bias: self.update_biases_synchronous( x_plus, x_minus, t_plus, t_minus, h_plus, h_minus, o_plus, o_minus, z_plus, z_minus) epoch += 1 except KeyboardInterrupt: break return E[1:], P[1:], A[1:], epoch, self.data