def train(self, X, y, learning_rate=0.1, cost_threshold=1e-6, diff_threshold=1e-16, max_iter=10000, min_iter=0, lambda_=3): nclass = NeuralNetClassification(self) #self.thetas = npe.reshape_for_neuralnet(fmin_cg( # f=nclass._flat_cost, # x0=npe.flatten(self.thetas), # fprime=nclass._flat_gradients, # args=(X, y), # epsilon=learning_rate, # # gtol=gtol, # disp=False, # maxiter=50), self) self.thetas = npe.reshape_for_neuralnet( gradient_descent(start_thetas=npe.flatten(self.thetas), cost_func=nclass._flat_cost, gradient_func=nclass._flat_gradients, args=(X, y, lambda_), learning_rate=learning_rate, min_iter=min_iter, max_iter=max_iter, cost_threshold=cost_threshold, diff_threshold=diff_threshold), self)
def _flat_forwardpropagate(self, flat_thetas, X, *args): thetas = npe.reshape_for_neuralnet(flat_thetas, self.neuralnet) r,_,_ = self._forwardpropagate(thetas, self.neuralnet.num_layers(), X) return npe.flatten(r)
def _flat_regularized_gradients(self, flat_thetas, X, y, l, *args): thetas = npe.reshape_for_neuralnet(flat_thetas, self.neuralnet) gradients = self._regularized_gradients(thetas, X, y, l) f = npe.flatten(gradients) return f
def _flat_regularized_cost(self, flat_thetas, X, y, l, *args): thetas = npe.reshape_for_neuralnet(flat_thetas, self.neuralnet) return self._regularized_cost(thetas, X, y, l)
def _flat_cost(self, flat_thetas, X, y, *args): thetas = npe.reshape_for_neuralnet(flat_thetas, self.neuralnet) return self._cost(thetas, X, y)