Exemplo n.º 1
0
 def updateAg(self):
     tm = timer()
     pif('updating A and g...')
     JT = self.J.T
     self.A = JT.dot(self.J)
     self.g = JT.dot(-self.r).reshape((-1, 1))
     pif('A and g updated in %.2fs' % tm())
Exemplo n.º 2
0
 def updateJ(self, obj):
     tm = timer()
     pif('computing Jacobian...')
     self.J = obj.J
     if self.J is None:
         raise Exception("Computing Jacobian failed!")
     if sp.issparse(self.J):
         tm2 = timer()
         self.J = self.J.tocsr()
         pif('converted to csr in {}secs'.format(tm2()))
         assert (self.J.nnz > 0)
     elif ch.VERBOSE:
         nonzero = np.count_nonzero(self.J)
         pif('Jacobian dense with sparsity %.3f' % (nonzero / self.J.size))
     pif('Jacobian (%dx%d) computed in %.2fs' %
         (self.J.shape[0], self.J.shape[1], tm()))
     if self.J.shape[1] != self.p.size:
         raise Exception('Jacobian size mismatch with objective input')
     return self.J
Exemplo n.º 3
0
 def updateGN(self):
     tm = timer()
     if sp.issparse(self.A):
         self.A.eliminate_zeros()
         pif('sparse solve...sparsity infill is %.3f%% (hessian %dx%d)' %
             (100. * self.A.nnz / (self.A.shape[0] * self.A.shape[1]),
              self.A.shape[0], self.A.shape[1]))
         if self.g.size > 1:
             self.d_gn = self.solve(self.A, self.g).ravel()
             if np.any(np.isnan(self.d_gn)) or np.any(np.isinf(self.d_gn)):
                 from scipy.sparse.linalg import lsqr
                 warnings.warn("sparse solve failed, falling back to lsqr")
                 self.d_gn = lsqr(self.A, self.g)[0].ravel()
         else:
             self.d_gn = np.atleast_1d(self.g.ravel()[0] / self.A[0, 0])
         pif('sparse solve...done in %.2fs' % tm())
     else:
         pif('dense solve...')
         try:
             self.d_gn = np.linalg.solve(self.A, self.g).ravel()
         except Exception:
             warnings.warn("dense solve failed, falling back to lsqr")
             self.d_gn = np.linalg.lstsq(self.A, self.g)[0].ravel()
         pif('dense solve...done in %.2fs' % tm())
Exemplo n.º 4
0
    def dr_wrt(self, wrt, profiler=None):
        '''
        Loop over free variables and delete cache for the whole tree after finished each one
        '''
        if wrt is self.x:
            jacs = []
            for fvi, freevar in enumerate(self.free_variables):
                tm = timer()
                if isinstance(freevar, ch.Select):
                    new_jac = self.obj.dr_wrt(freevar.a, profiler=profiler)
                    try:
                        new_jac = new_jac[:, freevar.idxs]
                    except:
                        # non-csc sparse matrices may not support column-wise indexing
                        new_jac = new_jac.tocsc()[:, freevar.idxs]
                else:
                    new_jac = self.obj.dr_wrt(freevar, profiler=profiler)

                pif('dx wrt {} in {}sec, sparse: {}'.format(
                    freevar.short_name, tm(), sp.issparse(new_jac)))

                if self._make_dense and sp.issparse(new_jac):
                    new_jac = new_jac.todense()
                if self._make_sparse and not sp.issparse(new_jac):
                    new_jac = sp.csc_matrix(new_jac)

                if new_jac is None:
                    raise Exception(
                        'Objective has no derivative wrt free variable {}. '
                        'You should likely remove it.'.format(fvi))

                jacs.append(new_jac)
            tm = timer()
            utils.dfs_do_func_on_graph(self.obj, clear_cache_single)
            pif('dfs_do_func_on_graph in {}sec'.format(tm()))
            tm = timer()
            J = hstack(jacs)
            pif('hstack in {}sec'.format(tm()))
            return J
Exemplo n.º 5
0
 def update_step(self):
     # if the Cauchy point is outside the trust region,
     # take that direction but only to the edge of the trust region
     if self.delta is not None and np.linalg.norm(self.d_sd) >= self.delta:
         pif('PROGRESS: Using stunted cauchy')
         self.d_dl = np.array(self.delta / np.linalg.norm(self.d_sd) *
                              self.d_sd).ravel()
     else:
         if self.d_gn is None:
             # We only need to compute this once per iteration
             self.updateGN()
         # if the gauss-newton solution is within the trust region, use it
         if self.delta is None or np.linalg.norm(self.d_gn) <= self.delta:
             pif('PROGRESS: Using gauss-newton solution')
             self.d_dl = np.array(self.d_gn).ravel()
             if self.delta is None:
                 self.delta = np.linalg.norm(self.d_gn)
         else:  # between cauchy step and gauss-newton step
             pif('PROGRESS: between cauchy and gauss-newton')
             # apply step
             self.d_dl = self.d_sd + self.beta_multiplier * (self.d_gn -
                                                             self.d_sd)
Exemplo n.º 6
0
 def stop(msg):
     if not state.done:
         pif(msg)
     state.done = True
Exemplo n.º 7
0
def minimize_dogleg(obj,
                    free_variables,
                    on_step=None,
                    maxiter=200,
                    max_fevals=np.inf,
                    sparse_solver='spsolve',
                    disp=True,
                    e_1=1e-15,
                    e_2=1e-15,
                    e_3=0.,
                    delta_0=None,
                    treat_as_dense=False):
    """"Nonlinear optimization using Powell's dogleg method.
    See Lourakis et al, 2005, ICCV '05, "Is Levenberg-Marquardt the
    Most Efficient Optimization for Implementing Bundle Adjustment?":
    http://www.ics.forth.gr/cvrl/publications/conferences/0201-P0401-lourakis-levenberg.pdf

    e_N are stopping conditions:
    e_1 is gradient magnatude threshold
    e_2 is step size magnatude threshold
    e_3 is improvement threshold (as a ratio; 0.1 means it must improve by 10%% at each step)

    maxiter and max_fevals are also stopping conditions. Note that they're not quite the same,
    as an iteration may evaluate the function more than once.

    sparse_solver is the solver to use to calculate the Gauss-Newton step in the common case
    that the Jacobian is sparse. It can be 'spsolve' (in which case scipy.sparse.linalg.spsolve
    will be used), 'cg' (in which case scipy.sparse.linalg.cg will be used), or any callable
    that matches the api of scipy.sparse.linalg.spsolve to solve `A x = b` for x where A is sparse.

    cg, uses a Conjugate Gradient method, and will be faster if A is sparse but x is dense.
    spsolve will be faster if x is also sparse.

    delta_0 defines the initial trust region. Generally speaking, if this is set too low then
    the optimization will never really go anywhere (to small a trust region to make any real
    progress before running out of iterations) and if it's set too high then the optimization
    will diverge immidiately and go wild (such a large trust region that the initial step so
    far overshoots that it can't recover). If it's left as None, it will be automatically
    estimated on the first iteration; it's always updated at each iteration, so this is treated
    only as an initialization.

    handle_as_dense explicitly converts all Jacobians of obj to dense matrices
    """

    solve = setup_sparse_solver(sparse_solver)
    obj, callback = setup_objective(obj,
                                    free_variables,
                                    on_step=on_step,
                                    disp=disp,
                                    make_dense=treat_as_dense)

    state = DoglegState(delta=delta_0, solve=solve)
    state.p = obj.x.r

    #inject profiler if in DEBUG mode
    if ch.DEBUG:
        from .monitor import DrWrtProfiler
        obj.profiler = DrWrtProfiler(obj)

    callback()
    state.updateJ(obj)
    state.r = obj.r

    def stop(msg):
        if not state.done:
            pif(msg)
        state.done = True

    if np.linalg.norm(state.g, np.inf) < e_1:
        stop('stopping because norm(g, np.inf) < %.2e' % e_1)
    while not state.done:
        state.start_iteration()
        while True:
            state.update_step()
            if state.step_size <= e_2 * np.linalg.norm(state.p):
                stop('stopping because of small step size (norm_dl < %.2e)' %
                     (e_2 * np.linalg.norm(state.p)))
            else:
                tm = timer()
                obj.x = state.p + state.step
                trial = state.trial_r(obj.r)
                pif('Residuals computed in %.2fs' % tm())
                # if the objective function improved, update input parameter estimate.
                # Note that the obj.x already has the new parms,
                # and we should not set them again to the same (or we'll bust the cache)
                if trial.is_improvement:
                    state.p = state.p + state.step
                    callback()
                    if e_3 > 0. and trial.improvement < e_3:
                        stop('stopping because improvement < %.1e%%' %
                             (100 * e_3))
                    else:
                        state.updateJ(obj)
                        state.r = trial.r
                        if np.linalg.norm(state.g, np.inf) < e_1:
                            stop('stopping because norm(g, np.inf) < %.2e' %
                                 e_1)
                else:  # Put the old parms back
                    obj.x = ch.Ch(state.p)
                    obj.on_changed(
                        'x')  # copies from flat vector to free variables
                # update our trust region
                state.updateRadius(trial.rho)
                if state.delta <= e_2 * np.linalg.norm(state.p):
                    stop('stopping because trust region is too small')
            if state.done or trial.is_improvement or (obj.fevals >=
                                                      max_fevals):
                break
        if state.iteration >= maxiter:
            stop(
                'stopping because max number of user-specified iterations (%d) has been met'
                % maxiter)
        elif obj.fevals >= max_fevals:
            stop(
                'stopping because max number of user-specified func evals (%d) has been met'
                % max_fevals)
    return obj.free_variables
Exemplo n.º 8
0
 def start_iteration(self):
     self.iteration += 1
     pif('beginning iteration %d' % (self.iteration, ))
     self.d_sd = (np.linalg.norm(self.g)**2 /
                  np.linalg.norm(self.J.dot(self.g))**2 * self.g).ravel()
     self.d_gn = None