Пример #1
0
 def train(self,
           X,
           y,
           learning_rate=0.1,
           cost_threshold=1e-6,
           diff_threshold=1e-16,
           max_iter=10000,
           min_iter=0,
           lambda_=3):
     nclass = NeuralNetClassification(self)
     #self.thetas = npe.reshape_for_neuralnet(fmin_cg(
     #         f=nclass._flat_cost,
     #         x0=npe.flatten(self.thetas),
     #         fprime=nclass._flat_gradients,
     #         args=(X, y),
     #         epsilon=learning_rate,
     #        # gtol=gtol,
     #         disp=False,
     #         maxiter=50), self)
     self.thetas = npe.reshape_for_neuralnet(
         gradient_descent(start_thetas=npe.flatten(self.thetas),
                          cost_func=nclass._flat_cost,
                          gradient_func=nclass._flat_gradients,
                          args=(X, y, lambda_),
                          learning_rate=learning_rate,
                          min_iter=min_iter,
                          max_iter=max_iter,
                          cost_threshold=cost_threshold,
                          diff_threshold=diff_threshold), self)
Пример #2
0
    def _flat_forwardpropagate(self, flat_thetas, X, *args):

        thetas = npe.reshape_for_neuralnet(flat_thetas,
                                  self.neuralnet)

        r,_,_ = self._forwardpropagate(thetas, self.neuralnet.num_layers(), X)
        return npe.flatten(r)
Пример #3
0
 def numerical_gradients(self, X, y, epsilon=1e-4, lambda_=3):
     flat_thetas = npe.flatten(self.thetas)
     numgrad = np.zeros(flat_thetas.shape)
     perturb = np.zeros(flat_thetas.shape)
     for i in range(0, flat_thetas.size):
         perturb[i] = epsilon
         loss1 = self.get_algorithms()._flat_cost(flat_thetas - perturb, X,
                                                  y, lambda_)
         loss2 = self.get_algorithms()._flat_cost(flat_thetas + perturb, X,
                                                  y, lambda_)
         numgrad[i] = (loss2 - loss1) / (2 * epsilon)
         perturb[i] = 0
     return numgrad
Пример #4
0
 def check_gradients(self,
                     X,
                     y,
                     do_print=False,
                     imprecision=1e-8,
                     epsilon=1e-8,
                     lambda_=3):
     flat_thetas = npe.flatten(self.thetas)
     algorithmic = self.get_algorithms()._flat_gradients(
         flat_thetas, X, y, lambda_)
     numerical = self.numerical_gradients(X, y, epsilon, lambda_)
     all_match = True
     for i in range(0, flat_thetas.size):
         is_match = abs(algorithmic[i] - numerical[i]) < imprecision
         all_match = all_match and is_match
         if (do_print):
             print(i, numerical[i], algorithmic[i], is_match)
     if (do_print and all_match):
         print("All gradients are matching")
     elif (do_print and not all_match):
         print("Not all gradients are matching")
     return all_match
Пример #5
0
 def _flat_regularized_gradients(self, flat_thetas, X, y, l, *args):
     thetas = npe.reshape_for_neuralnet(flat_thetas, self.neuralnet)
     gradients = self._regularized_gradients(thetas, X, y, l)
     f = npe.flatten(gradients)
     return f