def train(self, data, num_iters): shouldContinue = True for iteration_count in xrange(num_iters): num_errors = 0 for vector, label in data: activation = dot(self.weights, vector) + self.bias if label * activation <= 0: # Update weights for i in xrange(self.dimensions): self.weights[i] += label * vector[i] self.bias += label num_errors += 1 if self.onIteration is not None: shouldContinue = self.onIteration(iteration_count, self.weights, self.bias, num_errors, len(data)) if shouldContinue is False: break return self.weights, self.bias
def train(self, data, num_iters): shouldContinue = True for iteration_count in xrange(num_iters): num_errors = 0 for vector, label in data: activation = dot(self.weights, vector) + self.bias if label * activation <= 0: # Update weights for i in xrange(self.dimensions): self.weights[i] += label * vector[i] self.cached_weights[i] += label * vector[i] * self.counter self.bias += label self.cached_bias += label * self.counter num_errors += 1 self.counter += 1 if self.onIteration is not None: shouldContinue = self.onIteration(iteration_count, self.weights, self.bias, num_errors, len(data)) if shouldContinue is False: break return ( array_subtract(self.weights, scale_array(self.cached_weights, 1 / self.counter)), self.bias - self.cached_bias / self.counter, )
def classify(self, input_vector): weights = array_subtract(self.weights, scale_array(self.cached_weights, 1 / self.counter)) bias = self.bias - self.cached_bias / self.counter return sign(dot(weights, input_vector) + bias)
def classify(self, input_vector): return sign(dot(self.weights, input_vector) + self.bias)
def classify(vector, weights, bias): activation = dot(vector, weights) + bias return sign(activation)