def learn(self, examples, max_iterations=100): """Learn a perceptron from [([feature], class)]. Set the weights member variable to a list of numbers corresponding to the weights learned by the perceptron algorithm from the training examples. The number of weights should be one more than the number of features in each example. Args: examples: a list of pairs of a list of features and a class variable. Features should be numbers, class should be 0 or 1. max_iterations: number of iterations to train. Gives up afterwards Raises: NotConverged, if training did not converge within the provided number of iterations. Returns: This object """ #Initialize weights to 0 self.weights = [] length = 0; #For each tuple for element in examples: #input i = element[0] length = len(i) #n + 1 weights for x in range(0, length + 1): self.weights = self.weights + [0] #While not converged and max_iterations while not self.isConverged(examples) and max_iterations >= 0: #for each input for (x,y) in examples: #compute p p = self.predict(x) if not len(x) == len(self.weights): x = x + [1] if not p == y: if p < y: common.scale_and_add(self.weights, 1, x) else: common.scale_and_add(self.weights, -1, x) max_iterations = max_iterations - 1 if not self.isConverged(examples): raise NotConverged() return self
def learn(self, examples, max_iterations=100): """ Learn a perceptron from [([feature], class)]. Set the weights member variable to a list of numbers corresponding to the weights learned by the perceptron algorithm from the training examples. The number of weights should be one more than the number of features in each example. Args: examples: a list of pairs of a list of features and a class variable. Features should be numbers, class should be 0 or 1. max_iterations: number of iterations to train. Gives up afterwards Raises: NotConverged, if training did not converge within the provided number of iterations. Returns: This object """ # COMPLETE THIS IMPLEMENTATION # Set up self.weights. Initialize them to 0. self.weights = [] num_units = len(examples[0][0]) for _ in xrange(num_units): self.weights.append(0) cur_itteration = 0 done = True # update loop while True: done = True for example in examples: if self.predict(example[0]) != example[1]: done = False if self.predict(example[0]) is 0 and example[1] is 1: common.scale_and_add(self.weights, 1, example[0]) else: common.scale_and_add(self.weights, -1, example[0]) if done is True: break cur_itteration += 1 if cur_itteration >= max_iterations: raise NotConverged return self
def learn(self, examples, max_iterations=100): """Learn a perceptron from [([feature], class)]. Set the weights member variable to a list of numbers corresponding to the weights learned by the perceptron algorithm from the training examples. The number of weights should be one more than the number of features in each example. Args: examples: a list of pairs of a list of features and a class variable. Features should be numbers, class should be 0 or 1. max_iterations: number of iterations to train. Gives up afterwards Raises: NotConverged, if training did not converge within the provided number of iterations. Returns: This object """ # COMPLETE THIS IMPLEMENTATION self.weights = [0] for x in range(0, len(examples[0])): self.weights.append(0) notconverged = True iters = 0 while(notconverged): iters += 1 for z in examples: x = z[0] y = z[1] p = self.predict(x) x.append(1) self.weights = common.scale_and_add(self.weights, ((y - p)), x) del x[-1] if all(z[1] == self.predict(z[0]) for z in examples): break if iters > max_iterations: raise NotConverged() return self