Exemple #1
0
 def gradient(self, p):
     derivs = safe_dot(self.diffs, p)
     q = derivs / np.sqrt(derivs**2 + self.beta)
     grad = safe_dot(self.diffs.T, q)
     if len(grad.shape) > 1:
         grad = np.array(grad.T).ravel()
     return grad
Exemple #2
0
 def value(self, p):
     pred = self.from_cache(p, 'predict')
     residuals = self.data - pred
     if self.weights is None:
         return np.linalg.norm(residuals)**2
     else:
         return safe_dot(residuals.T, safe_dot(self.weights, residuals))
Exemple #3
0
 def minimize(self, objective):
     stats = dict(method="Newton's method",
              iterations=0,
              objective=[])
     p = np.array(self.initial)
     value = objective.value(p)
     stats['objective'].append(value)
     for iteration in range(self.maxit):
         grad = objective.gradient(p)
         hess = objective.hessian(p)
         if self.precondition:
             diag = np.abs(safe_diagonal(hess))
             diag[diag < 10 ** -10] = 10 ** -10
             precond = sp.diags(1. / diag, 0).tocsr()
             hess = safe_dot(precond, hess)
             grad = safe_dot(precond, grad)
         p = p + safe_solve(hess, -grad)
         new_value = objective.value(p)
         stats['objective'].append(new_value)
         stats['iterations'] += 1
         stop = (new_value > value
                 or abs(new_value - value) < self.tol*abs(value))
         if stop:
             break
         value = new_value
     if iteration == self.maxit - 1:
         warnings.warn(
             'Newton optimizer exited because maximum iterations reached. '
             + 'Might not have achieved convergence. '
             + 'Try increasing the maximum number of iterations allowed.',
             RuntimeWarning)
     self.stats = stats
     return p
Exemple #4
0
 def gradient_at_null(self):
     # Need the gradient evaluate at the null vector for the linear least
     # squares solver.
     jac = self.from_cache(None, 'jacobian')
     if self.weights is None:
         grad = -2 * safe_dot(jac.T, self.data)
     else:
         grad = -2 * safe_dot(jac.T, safe_dot(self.weights, self.data))
     return self._grad_to_1d(grad)
Exemple #5
0
 def gradient(self, p):
     jac = self.from_cache(p, 'jacobian')
     pred = self.from_cache(p, 'predict')
     residuals = self.data - pred
     if self.weights is None:
         grad = -2 * safe_dot(jac.T, residuals)
     else:
         grad = -2 * safe_dot(jac.T, safe_dot(self.weights, residuals))
     return self._grad_to_1d(grad)
Exemple #6
0
 def hessian(self, p):
     jac = self.from_cache(p, 'jacobian')
     if self.weights is None:
         return 2 * safe_dot(jac.T, jac)
     else:
         return 2 * safe_dot(jac.T, safe_dot(self.weights, jac))
Exemple #7
0
 def hessian(self, p):
     derivs = safe_dot(self.diffs, p)
     q = self.beta / ((derivs**2 + self.beta)**1.5)
     q_matrix = sp.diags(q, 0).tocsr()
     return safe_dot(self.diffs.T, q_matrix * self.diffs)
Exemple #8
0
 def value(self, p):
     return np.linalg.norm(safe_dot(self.diffs, p), 1)
Exemple #9
0
 def gradient(self, p):
     return 2 * safe_dot(self.RtR, p)
Exemple #10
0
 def value(self, p):
     return safe_dot(p.T, safe_dot(self.RtR, p))
Exemple #11
0
 def __init__(self, diffs):
     self.islinear = True
     self.diffs = diffs
     self.RtR = safe_dot(diffs.T, diffs)
Exemple #12
0
 def minimize(self, objective):
     stats = dict(method="Levemberg-Marquardt",
                  iterations=0,
                  objective=[],
                  step_attempts=[],
                  step_size=[])
     p = np.array(self.initial)
     value = objective.value(p)
     lamb = self.lamb
     stats['objective'].append(value)
     stats['step_attempts'].append(0)
     stats['step_size'].append(lamb)
     for iteration in range(self.maxit):
         grad = objective.gradient(p)
         hess = objective.hessian(p)
         if self.precondition:
             diag = np.abs(safe_diagonal(hess))
             diag[diag < 1e-10] = 1e-10
             precond = sp.diags(1/diag, 0).tocsr()
             hess = safe_dot(precond, hess)
             grad = safe_dot(precond, grad)
         diag = sp.diags(safe_diagonal(hess), 0).tocsr()
         # Try to take a step
         took_step = False
         for step in range(self.maxsteps):
             newp = p + safe_solve(hess + lamb*diag, -grad)
             newvalue = objective.value(newp)
             decrease = newvalue < value
             if not decrease:
                 if lamb < 1e15:
                     lamb = lamb*self.dlamb
             else:
                 if lamb > 1e-15:
                     lamb = lamb/self.dlamb
                 took_step = True
                 break
         if not took_step:
             stop = True
             warnings.warn(
                 "LevMarq optimizer exited because couldn't take a step "
                 + 'without increasing the objective function. '
                 + 'Might not have achieved convergence. '
                 + 'Try increasing the max number of step attempts.',
                 RuntimeWarning)
         else:
             stop = abs(newvalue - value) < self.tol*abs(value)
             p = newp
             value = newvalue
             stats['objective'].append(value)
             stats['iterations'] += 1
             stats['step_attempts'].append(step + 1)
             stats['step_size'].append(lamb)
         if stop:
             break
     if iteration == self.maxit - 1:
         warnings.warn(
             'LevMarq optmizer exited because maximum iterations reached. '
             + 'Might not have achieved convergence. '
             + 'Try increasing the maximum number of iterations allowed.',
             RuntimeWarning)
     self.stats = stats
     return p