Ejemplo n.º 1
0
 def fit(self, x, y, method='Newton-Raphson', max_iter=20):
     """ Learn the parameters of the model, i.e. self.params.
     
     Parameters
     ----------
     x : array of shape nb_samples*nb_features
     y : array of shape nb_samples*output_dim
     method : string indicating the type of optimization
         - 'Newton-Raphson'
     nb_iter : the maximum number of iterations
     
     Returns
     -------
     params : the matrix of parameters 
     
     Examples
     --------
         >>> from esnlm.nodes import LogisticRegression
         >>> x = np.array([[1., 0.],[0., 1]]
         >>> y = np.array([[0., 1.],[1., 0]]
         >>> params = LogisticRegression(2,2).fit(x, y)
     """
     if type(y) == type([]):
         y = np.eye(self.output_dim)[y]
     
     def _objective_function(params):
         py_given_x = softmax(np.dot(x, params.reshape(self.params.shape)))
         lik = np.prod(py_given_x**y, axis=1)
         return np.sum(np.log(lik+1e-7))
     
     params = np.array(self.params)
     old_value = _objective_function(params)
     
     if method == 'Newton-Raphson':
         print "... Newton-Raphson:",
         for i in range(max_iter):
             if self.verbose == True:
                 print i,
             
             post = softmax(np.dot(x, params))
             grad = gradient(x, y, post, np.ones((y.shape[0], )))
             hess = hessian(x, y, post, np.ones((y.shape[0], )))
     
             params = newton_raphson(grad, hess, params, _objective_function)
         
             new_value = _objective_function(params)
             if new_value < old_value + 1:
                 break
             old_value = new_value
         
         self.params = params.reshape(self.params.shape)
         if self.verbose == True:
             print "The End."
     
     else:
         from scipy.optimize import minimize
         
         def obj(params):
             return -_objective_function(params)
             
         def grd(params):
             post = softmax(np.dot(x, params.reshape(self.params.shape)))
             return -gradient(x, y, post, np.ones((y.shape[0], ))).squeeze()
         
         def hsn(params):
             post = softmax(np.dot(x, params.reshape(self.params.shape)))
             return -hessian(x, y, post, np.ones((y.shape[0], )))
         
         params = params.reshape(params.size)
         res = minimize(obj, params,jac=grd, hess=hsn, method=method, 
                        options={'maxiter':100, 'xtol': 1e-4, 'disp': True})
         params = res.x
         self.params = params.reshape(self.params.shape)
     
     return params
         
         
Ejemplo n.º 2
0
 def grd(params):
     post = softmax(np.dot(x, params.reshape(self.params.shape)))
     return -gradient(x, y, post, np.ones((y.shape[0], ))).squeeze()