Exemplo n.º 1
0
 def doneMsg(self, where):
     if self.code > 0:
         msg('done at %s with error status: %s', where, self.errMsg)
     else:
         if self.params.verbose:
             msg('done at %s with val=%g (%g, %g)', where, self.value.t,
                 self.value.F, self.value.Fp)
Exemplo n.º 2
0
  def ushrink(self,a,b):
    abar = a;
    bbar = b;

    count = 0;
    while True:
      count += 1;
      
      if self.params.verbose:
        msg('in ushrink')
        self.printBracket(abar,bbar)
      if count > self.params.nshrink:
        self.setError('too many contractions in ushrink')
        return (abar,bbar)
    
      d=self.eval( (1-self.params.theta)*abar.t+self.params.theta*bbar.t );
      if self.wolfe(d):
        self.setDone(d)
        return (abar,bbar)

      if d.Fp>=0:
        bbar = d;
        return (abar,bbar)
    
      if d.F <= self.fpert:
        abar=d;
      else:
        bbar=d;
Exemplo n.º 3
0
  def update(self, a, b, ct ):
    abar = a
    bbar = b
    
    params = self.params
    
    if params.verbose: msg('update %g %g %g', a.t, b.t, ct);
    if  (ct<=a.t) or (ct>=b.t):
      if params.verbose: msg('midpoint out of interval')
      return (abar,bbar)

    c = self.eval(ct)

    if self.wolfe(c):
      self.setDone(c)
      return (abar,bbar)

    if c.Fp >= 0:
      if params.verbose: msg('midpoint with non-negative slope. Becomes b.')
      abar = a;
      bbar = c;
      if params.debug: self.verifyBracket(abar,bbar)
      return (abar,bbar)

    if c.F <= self.fpert:
      if params.verbose: msg('midpoint with negative slope, small value. Becomes a.')
      abar = c;
      bbar = b;
      if params.debug: self.verifyBracket(abar,bbar)
      return (abar,bbar)

    if params.verbose: msg('midpoint with negative slope, large value. Shrinking to left.')
    (abar,bbar) = self.ushrink( a, c );
    if params.debug: self.verifyBracket(abar,bbar)
Exemplo n.º 4
0
    def ushrink(self, a, b):
        abar = a
        bbar = b

        count = 0
        while True:
            count += 1

            if self.params.verbose:
                msg('in ushrink')
                self.printBracket(abar, bbar)
            if count > self.params.nshrink:
                self.setError('too many contractions in ushrink')
                return (abar, bbar)

            d = self.eval((1 - self.params.theta) * abar.t +
                          self.params.theta * bbar.t)
            if self.wolfe(d):
                self.setDone(d)
                return (abar, bbar)

            if d.Fp >= 0:
                bbar = d
                return (abar, bbar)

            if d.F <= self.fpert:
                abar = d
            else:
                bbar = d
Exemplo n.º 5
0
 def verifyBracket(self,a,b):
   good = (a.Fp<=0) and (b.Fp >=0) and (a.F<= self.fpert);
   if not good:
     msg( 'bracket inconsistant: a %g b %g f(a) %g fp(a) %g f(b) %g fp(b) %g fpert %g', a.t, b.t, a.F, a.Fp, b.F, b.Fp, self.fpert )
     pause()
   if(a.t>=b.t):
     msg('bracket not a bracket (a>=b): a %g b %g f(a) %g fp(a) %g f(b) %g fp(b) %g fpert %g', a.t, b.t, a.F, a.Fp, b.F, b.Fp, self.fpert )
Exemplo n.º 6
0
  def eval(self,x,d,y,t):
    """
    Efficiently evaluate the function Phi(t) and Phi'(t) where
    
        Phi(t) = 0.5 * || y- F(x+td) ||_Y^2

    as described in the classlevel documentation.
    """

    if self.z is None:
      self.z = x.copy()
      self.r = y.copy()
      # self.Fz = dolfinutils.vectorLike(y)
      self.Td = y.vector_like()
    else:
      self.z.set(x)
      self.r.set(y)

    forward_problem = self.forward_problem

    self.z.axpy(t,d)

    try:
      self.Fz = forward_problem.evalFandLinearize(self.z,out=self.Fz,guess=self.Fz)
    except Exception as e:
      msg('Exception during evaluation of linesearch at t=%g;\n%s',t,str(e))
      return (numpy.nan,numpy.nan,None)
    self.Td=forward_problem.T(d,out=self.Td)

    self.r -= self.Fz
    J = 0.5*forward_problem.rangeIP(self.r,self.r)
    Jp = -self.forward_problem.rangeIP(self.Td,self.r)

    # Extra return value 'None' can be used to store extra data.
    return (J,Jp,None)
Exemplo n.º 7
0
  def solve(self,x0,y,*args):
    (x,y) = self.initialize(x0,y,*args)

    cg_reset = x.size()
    if( self.params.cg_reset != 0): cg_reset = self.params.cg_reset

    forward_problem = self.forwardProblem()
    r=y.copy()
    Tx = forward_problem.T(x)
    r -= Tx

    normsq_r = forward_problem.domainIP(r,r)

    # d = r
    d = r.copy()

    # Eventually will contain T(d)
    Td = None

    count = 0
    while True:
      if count > self.params.ITER_MAX:
        raise IterationCountFailure(self.params.ITER_MAX)
      count += 1

      if self.stopConditionMet(count,x,y,r):
        msg('done at iteration %d', count)
        break

      Td = forward_problem.T(d,out=Td)

      self.iterationHook( count, x, y, d, r, Td )

      alpha = normsq_r/forward_problem.domainIP(d,Td)
      if( (count+1 % cg_reset) == 0 ): 
         msg('resetting cg via steepest descent')
         alpha = 1
      
      # x = x + alpha*d
      x.axpy(alpha,d)

      # r = r - alpha*Td
      r.axpy(-alpha,Td)

      prev_normsq_r = normsq_r
      normsq_r = forward_problem.domainIP(r,r)
      beta = normsq_r / prev_normsq_r
      if( (count+1 % cg_reset) == 0 ): beta = 0
      
      if(self.params.steepest_descent):
        beta = 0
      # d = T*r + beta*d
      d *= beta
      d += r


    y = forward_problem.T(x)
    return self.finalize(x,y)
Exemplo n.º 8
0
 def stopConditionMet(self, iter, x, y, r):
     """
 Given a current iteration number, current value of x, desired value y of F(X), and current residual, 
 returns whether the stop condition has been met.
 """
     disc = sqrt(self.forward_problem.rangeIP(r, r))
     target = self.params.mu * self.targetDiscrepancy
     if self.params.verbose:
         msg('Iteration %d: discrepancy %g target %g', iter, disc, target)
     return disc <= target
Exemplo n.º 9
0
 def stopConditionMet(self,iter,x,y,r):
   """
   Given a current iteration number, current value of x, desired value y of F(X), and current residual, 
   returns whether the stop condition has been met.
   """
   disc = sqrt(self.forward_problem.rangeIP(r,r))
   target = self.params.mu*self.targetDiscrepancy
   if self.params.verbose:
     msg('Iteration %d: discrepancy %g target %g',iter,disc,target)
   return disc <= target
Exemplo n.º 10
0
 def secant(self,a,b):
   # % What if a'=b'?  We'll generate a +/-Inf, which will subsequently test as being out
   # % of any interval when 'update' is subsequently called.  So this seems safe.
   
   if self.params.verbose: msg( 'secant: a %g fp(a) %4.8g b %g fp(b) %4.8g',a.t,a.Fp, b.t, b.Fp)
   if(a.t==b.t):
     msg('a=b, inconcievable!')
   if -a.Fp <= b.Fp:
     return a.t-(a.t-b.t)*(a.Fp/(a.Fp-b.Fp));
   else:
     return b.t-(a.t-b.t)*((b.Fp)/(a.Fp-b.Fp));
Exemplo n.º 11
0
 def verifyBracket(self, a, b):
     good = (a.Fp <= 0) and (b.Fp >= 0) and (a.F <= self.fpert)
     if not good:
         msg(
             'bracket inconsistent: a %g b %g f(a) %g fp(a) %g f(b) %g fp(b) %g fpert %g',
             a.t, b.t, a.F, a.Fp, b.F, b.Fp, self.fpert)
         pause()
     if (a.t >= b.t):
         msg(
             'bracket not a bracket (a>=b): a %g b %g f(a) %g fp(a) %g f(b) %g fp(b) %g fpert %g',
             a.t, b.t, a.F, a.Fp, b.F, b.Fp, self.fpert)
Exemplo n.º 12
0
    def secant(self, a, b):
        # % What if a'=b'?  We'll generate a +/-Inf, which will subsequently test as being out
        # % of any interval when 'update' is subsequently called.  So this seems safe.

        if self.params.verbose:
            msg('secant: a %g fp(a) %4.8g b %g fp(b) %4.8g', a.t, a.Fp, b.t,
                b.Fp)
        if (a.t == b.t):
            msg('a=b, inconcievable!')
        if -a.Fp <= b.Fp:
            return a.t - (a.t - b.t) * (a.Fp / (a.Fp - b.Fp))
        else:
            return b.t - (a.t - b.t) * ((b.Fp) / (a.Fp - b.Fp))
Exemplo n.º 13
0
    def secantsq(self, a, b):
        ct = self.secant(a, b)
        if self.params.verbose: msg('first secant to %g', ct)
        (A, B) = self.update(a, b, ct)
        if self.code >= 0:
            return (A, B)

        if B.t == ct:
            ct2 = self.secant(b, B)
            if self.params.verbose:
                msg('second secant on left half A %g B %g with c=%g', A.t, B.t,
                    ct2)
            (abar, bbar) = self.update(A, B, ct2)
        elif A.t == ct:
            ct2 = self.secant(a, A)
            if self.params.verbose:
                msg('second secant on right half A %g B %g with c=%g', A.t,
                    B.t, ct2)
            (abar, bbar) = self.update(A, B, ct2)
        else:
            if self.params.verbose:
                msg('first secant gave a shrink in update. Keeping A %g B %g',
                    A.t, B.t)
            abar = A
            bbar = B

        return (abar, bbar)
Exemplo n.º 14
0
    def stopConditionMet(self, count, x, Fx, y, r):
        """
    Determines if minimization should be halted (based, e.g. on a Morozov discrepancy principle)

    In: 
        * count: current iteration count
        * x:     point in domain of potential minimizer.
        * Fx:    value of nonlinear function at x
        * y:     desired value of F(x)
        * r:     current residual    
    """
        J = sqrt(abs(self.forward_problem.rangeIP(r, r)))
        Jgoal = self.params.mu * self.targetDiscrepancy

        msg('(%d) J=%g goal=%g', count, J, Jgoal)

        return J <= Jgoal
Exemplo n.º 15
0
  def stopConditionMet(self,count,x,Fx,y,r):
    """
    Determines if minimization should be halted (based, e.g. on a Morozov discrepancy principle)

    In: 
        * count: current iteration count
        * x:     point in domain of potential minimizer.
        * Fx:    value of nonlinear function at x
        * y:     desired value of F(x)
        * r:     current residual    
    """
    J = sqrt(abs(self.forward_problem.rangeIP(r,r)));
    Jgoal = self.params.mu*self.targetDiscrepancy
    
    msg('(%d) J=%g goal=%g',count,J,Jgoal)
    
    return J <= Jgoal
Exemplo n.º 16
0
    def update(self, a, b, ct):
        abar = a
        bbar = b

        params = self.params

        if params.verbose: msg('update %g %g %g', a.t, b.t, ct)
        if (ct <= a.t) or (ct >= b.t):
            if params.verbose: msg('midpoint out of interval')
            return (abar, bbar)

        c = self.eval(ct)

        if self.wolfe(c):
            self.setDone(c)
            return (abar, bbar)

        if c.Fp >= 0:
            if params.verbose:
                msg('midpoint with non-negative slope. Becomes b.')
            abar = a
            bbar = c
            if params.debug: self.verifyBracket(abar, bbar)
            return (abar, bbar)

        if c.F <= self.fpert:
            if params.verbose:
                msg('midpoint with negative slope, small value. Becomes a.')
            abar = c
            bbar = b
            if params.debug: self.verifyBracket(abar, bbar)
            return (abar, bbar)

        if params.verbose:
            msg('midpoint with negative slope, large value. Shrinking to left.'
                )
        (abar, bbar) = self.ushrink(a, c)
        if params.debug: self.verifyBracket(abar, bbar)

        return (abar, bbar)
Exemplo n.º 17
0
    def solve(self, x0, y, *args):
        (x, y) = self.initialize(x0, y, *args)

        cg_reset = x.size()
        if (self.params.cg_reset != 0): cg_reset = self.params.cg_reset

        forward_problem = self.forwardProblem()
        r = y.copy()
        Tx = forward_problem.T(x)
        r -= Tx

        normsq_r = forward_problem.domainIP(r, r)

        # d = r
        d = r.copy()

        # Eventually will contain T(d)
        Td = None

        count = 0
        while True:
            if count > self.params.ITER_MAX:
                raise IterationCountFailure(self.params.ITER_MAX)
            count += 1

            if self.stopConditionMet(count, x, y, r):
                msg('done at iteration %d', count)
                break

            if self.params.verbose:
                msg('solving linear problem')
            Td = forward_problem.T(d, out=Td)

            self.iterationHook(count, x, y, d, r, Td)

            alpha = normsq_r / forward_problem.domainIP(d, Td)
            if ((count + 1 % cg_reset) == 0):
                msg('resetting cg via steepest descent')
                alpha = 1

            # x = x + alpha*d
            x.axpy(alpha, d)

            # r = r - alpha*Td
            r.axpy(-alpha, Td)

            prev_normsq_r = normsq_r
            normsq_r = forward_problem.domainIP(r, r)
            beta = normsq_r / prev_normsq_r
            if ((count + 1 % cg_reset) == 0): beta = 0

            if (self.params.steepest_descent):
                beta = 0
            # d = T*r + beta*d
            d *= beta
            d += r

        y = forward_problem.T(x)
        return self.finalize(x, y)
Exemplo n.º 18
0
  def secantsq(self,a,b):
    ct = self.secant(a,b)
    if self.params.verbose: msg('first secant to %g', ct)
    (A,B) = self.update(a,b,ct)
    if self.code >= 0:
      return (A,B)

    if B.t == ct:
      ct2 = self.secant(b,B);
      if self.params.verbose: msg('second secant on left half A %g B %g with c=%g',A.t, B.t, ct2)
      (abar,bbar) = self.update(A,B,ct2)
    elif A.t == ct:
      ct2 = self.secant(a,A);
      if self.params.verbose: msg('second secant on right half A %g B %g with c=%g',A.t, B.t, ct2)
      (abar,bbar) = self.update(A,B,ct2)
    else:
      if self.params.verbose: msg('first secant gave a shrink in update. Keeping A %g B %g',A.t, B.t)
      abar = A; bbar = B
    
    return (abar,bbar)
Exemplo n.º 19
0
    def bracket(self, z, c):
        a = z
        b = c

        count = 0
        while True:
            if count > self.params.nshrink:
                self.setError('Too many expansions in bracket')
                return (a, b)
            count += 1

            if b.Fp >= 0:
                if self.params.verbose:
                    msg('initial bracket ends with expansion: b has positive slope'
                        )
                return (a, b)

            if b.F > self.fpert:
                if self.params.verbose: msg('initial bracket contraction')
                return self.ushrink(a, b)

            if self.params.verbose: msg('initial bracket expanding')
            a = b
            rho = self.params.rho
            while True:
                if count > self.params.nshrink:
                    self.setError('Unable to find a valid input')
                    return (a, b)
                c = self.eval(rho * b.t)
                if not numpy.isnan(c.F):
                    b = c
                    break
                msg('Hit a NaN at t=%g', rho * b.t)
                rho *= 0.5
                count += 1

            if self.wolfe(b):
                #msg('decrease %g slope %g f0 %g fpert %g', b.F-params.f0, b.t*params.wolfe_hi, params.f0, params.fpert)
                self.setDone(b)
                return (a, b)
Exemplo n.º 20
0
  def bracket( self, z, c ):
    a = z
    b = c
    
    count = 0
    while True:
      if count > self.params.nshrink:
        self.setError('Too many expansions in bracket')
        return (a,b)
      count += 1

      if b.Fp >= 0:
        if self.params.verbose: msg('initial bracket ends with expansion: b has positive slope')
        return (a,b)
    
      if b.F > self.fpert:
        if self.params.verbose: msg('initial bracket contraction')
        return self.ushrink(a,b);

      if self.params.verbose: msg('initial bracket expanding')
      a = b;
      rho = self.params.rho
      while True:
        if count > self.params.nshrink:
          self.setError('Unable to find a valid input')
          return (a,b)
        c = self.eval(rho*b.t)
        if not numpy.isnan(c.F):
          b = c
          break
        msg('Hit a NaN at t=%g',rho*b.t)
        rho*=0.5
        count += 1

      if self.wolfe(b):
        #msg('decrease %g slope %g f0 %g fpert %g', b.F-params.f0, b.t*params.wolfe_hi, params.f0, params.fpert)
        self.setDone(b);
        return(a,b)
Exemplo n.º 21
0
  def wolfe(self,c):
    if self.params.verbose: msg('checking wolfe of c=%g (%g,%g)',c.t,c.F,c.Fp)

    if c.Fp >= self.wolfe_lo:
      if (c.F-self.f0) <= c.t*self.wolfe_hi:
        return True

      if self.params.verbose: msg('failed sufficient decrease' )

      # % if (  (c.F <= params.fpert) && (c.Fp <= params.awolfe_hi ) )
      # %   msg('met awolfe')
      # %   met = true;
      # %   return;
      # % end
      # if params.verbose: msg('failed awolfe sufficient decrease' )
    else:
      if self.params.verbose: msg('failed slope flatness')

    return False
Exemplo n.º 22
0
    def wolfe(self, c):
        if self.params.verbose:
            msg('checking wolfe of c=%g (%g,%g)', c.t, c.F, c.Fp)

        if c.Fp >= self.wolfe_lo:
            if (c.F - self.f0) <= c.t * self.wolfe_hi:
                return True

            if self.params.verbose: msg('failed sufficient decrease')

            # % if ((c.F <= params.fpert) && (c.Fp <= params.awolfe_hi))
            # %   msg('met awolfe')
            # %   met = true;
            # %   return;
            # % end
            # if params.verbose: msg('failed awolfe sufficient decrease')
        else:
            if self.params.verbose: msg('failed slope flatness')

        return False
Exemplo n.º 23
0
  def search(self,F,F0,F0p,t0):
    self.code = -1
    self.errMsg = 'no error'
    self.F = F

    params = self.params

    z = Bunch(F=F0,Fp=F0p,t=0,data=None)
    assert F0p <= 0

    # % Set up constants for checking Wolfe conditions.
    self.wolfe_lo = params.sigma*z.Fp;
    self.wolfe_hi = params.delta*z.Fp;
    self.awolfe_hi = (2*params.delta-1)*z.Fp;
    self.fpert = z.F + params.epsilon;
    self.f0 = z.F;

    if params.verbose: msg('starting at z=%g (%g,%g)', z.t, z.F, z.Fp )

    while True:
      c = self.eval(t0)
      if not numpy.isnan(c.F):
        break
      msg('Hit a NaN in initial evaluation at t=%g',t0)        
      t0 *= 0.5
    
    if params.verbose: msg('initial guess c=%g (%g,%g)', c.t, c.F, c.Fp )

    if self.wolfe(c):
      if params.verbose: msg('done at init')
      self.setDone(c)
      return

    (aj,bj) = self.bracket(z,c)
    if params.verbose: msg('initial bracket %g %g',aj.t,bj.t)
   
    if self.code >= 0:
      self.doneMsg('initial bracket')
      return

    if params.debug: self.verifyBracket(aj,bj)

    count = 0;

    while True:
      count += 1;

      if count> params.nsecant:
        self.setError('too many bisections in main loop')
        return

      (a,b) = self.secantsq(aj,bj);
      if params.verbose: msg('secantsq a %g b %g', a.t, b.t)
      if params.verbose: self.printBracket(a,b)
      if self.code >= 0:
        self.doneMsg('secant');
        return
      
      if  (b.t-a.t) > params.gamma*(bj.t-aj.t):
        (a,b) = self.update(a, b, (a.t+b.t)/2 );      
        if params.verbose: msg('update to a %g b %g', aj.t, bj.t)
        if params.verbose: self.printBracket(a,b)
        if self.code >= 0:
          self.doneMsg( 'bisect');
          return
      aj = a
      bj = b
Exemplo n.º 24
0
  def solve(self, x0, y, *args):
    """Solve the inverse problem F(x)=y.  The initial estimate is x=x0.
    Any extra arguments are passed to :func:`initialize`.
    """
    (x,y) = self.initialize(x0,y,*args)

    # self.discrepancy_history=[]

    forward_problem = self.forwardProblem()
    cg_reset = x.size()
    if (self.params.cg_reset != 0): cg_reset = self.params.cg_reset

    # Initial functional evalutation
    if self.params.verbose: msg('initial evaluation')
    Fx = forward_problem.evalFandLinearize(x)

    residual = y.copy()
    residual.axpy(-1, Fx)

    Jx = 0.5*forward_problem.rangeIP(residual,residual)

    TStarR = forward_problem.TStar(residual)
    TStarRLast = TStarR.copy()

    # Compute our first guess for the descent direction.  
    d = TStarR.copy();
    Jp = -forward_problem.domainIP(TStarR,d)

    if self.params.verbose: msg('initial J %g Jp %g', Jx, Jp)

    # We need to give an initial guess for the stepsize in the linesearch routine
    # t0 = Jx/(1-Jp);
    t0 = Jx/(self.params.deriv_eps-Jp)

    # An analog of matlab's realmin
    realmin = np.finfo(np.double).tiny
    
    # The class that performs our linesearches.
    line_search = linesearchHZ.LinesearchHZ(params=self.params.linesearch)
    line_searchee = ForwardProblemLineSearchAdaptor(forward_problem)

    # Keep track of multiple line search failures.
    prevLineSearchFailed = False

    try:
      # Main loop
      count = 0
      while True:
        # self.discrepancy_history.append(sqrt(2*Jx))

        if count > self.params.ITER_MAX:
          raise IterationCountFailure(self.params.ITER_MAX)
        count += 1

        self.iterationHook(count,x,Fx,y,d,residual,TStarR)

        if self.stopConditionMet(count,x,Fx,y,residual):
          msg('done at iteration %d', count)
          break

        # Phi = lambda t: self.evalPhiAndPhiPrime(forward_problem,x,d,y,t)
        Phi = lambda t: line_searchee.eval(x,d,y,t)
        line_search.search(Phi,Jx,Jp,t0)
        if line_search.error():
          if prevLineSearchFailed:
            raise NumericalFailure('linesearch failed twice in a row: %s' % line_search.errMsg);
          else:
            msg('linesearch failed: %s, switching to steepest descent', line_search.errMsg);
            d = TStarR;
            t = 1/(self.params.deriv_eps-Jp);
            prevLineSearchFailed = True;
            continue      

        prevLineSearchFailed = False;

        t = line_search.value.t;
        x.axpy(t,d)
        TStarRLast.set(TStarR)

        Fx = forward_problem.evalFandLinearize(x,out=Fx,guess=Fx)
        residual.set(y)
        residual -= Fx

        self.xUpdateHook(count,x,Fx,y,residual)

        TStarR = forward_problem.TStar(residual,out=TStarR)

        Jx = 0.5*forward_problem.rangeIP(residual,residual)

        if self.params.steepest_descent:
          beta = 0
        else:
          # Polak-Ribiere
          beta = forward_problem.domainIP(TStarR,TStarR-TStarRLast)/forward_problem.domainIP(TStarRLast,TStarRLast)

          # If we have done more iterations than we have points, reset the conjugate gradient method.
          if count > cg_reset:
            beta = 0

        d *= beta
        d += TStarR

        JpLast = Jp
        Jp =  -forward_problem.domainIP(TStarR,d)
        if Jp >=0:
          if self.params.verbose:
            msg('found an uphill direction; resetting!');
          d.set(TStarR)
          Jp = -forward_problem.domainIP(TStarR,d);
          t0 = Jx/(self.params.deriv_eps-Jp);
        else:
          t0 =  t* min(10, JpLast/(Jp - realmin));
    except Exception as e:
      # Store the current x and y values in case they are interesting to the caller, then
      # re-raise the exception.
      import traceback
      traceback.print_exc()
      self.finalState = self.finalize(x,Fx)
      raise e

    return self.finalize(x, Fx)
Exemplo n.º 25
0
    def solve(self, x0, y, *args):
        """Solve the inverse problem F(x)=y.  The initial estimate is x=x0.
    Any extra arguments are passed to :func:`initialize`.
    """
        (x, y) = self.initialize(x0, y, *args)

        # self.discrepancy_history=[]

        forward_problem = self.forwardProblem()
        cg_reset = x.size()
        if (self.params.cg_reset != 0): cg_reset = self.params.cg_reset

        # Initial functional evalutation
        if self.params.verbose: msg('initial evaluation')
        Fx = forward_problem.evalFandLinearize(x)

        residual = y.copy()
        residual.axpy(-1, Fx)

        Jx = 0.5 * forward_problem.rangeIP(residual, residual)

        TStarR = forward_problem.TStar(residual)
        TStarRLast = TStarR.copy()

        # Compute our first guess for the descent direction.
        d = TStarR.copy()
        Jp = -forward_problem.domainIP(TStarR, d)

        if self.params.verbose: msg('initial J %g Jp %g', Jx, Jp)

        # We need to give an initial guess for the stepsize in the linesearch routine
        # t0 = Jx/(1-Jp);
        t0 = Jx / (self.params.deriv_eps - Jp)

        # An analog of matlab's realmin
        realmin = np.finfo(np.double).tiny

        # The class that performs our linesearches.
        line_search = linesearchHZ.LinesearchHZ(params=self.params.linesearch)
        line_searchee = ForwardProblemLineSearchAdaptor(forward_problem)

        # Keep track of multiple line search failures.
        prevLineSearchFailed = False

        try:
            # Main loop
            count = 0
            while True:
                # self.discrepancy_history.append(sqrt(2*Jx))

                if count > self.params.ITER_MAX:
                    raise IterationCountFailure(self.params.ITER_MAX)
                count += 1

                self.iterationHook(count, x, Fx, y, d, residual, TStarR)

                if self.stopConditionMet(count, x, Fx, y, residual):
                    msg('done at iteration %d', count)
                    break

                # Phi = lambda t: self.evalPhiAndPhiPrime(forward_problem,x,d,y,t)
                Phi = lambda t: line_searchee.eval(x, d, y, t)
                line_search.search(Phi, Jx, Jp, t0)
                if line_search.error():
                    if prevLineSearchFailed:
                        raise NumericalFailure(
                            'linesearch failed twice in a row: %s' %
                            line_search.errMsg)
                    else:
                        msg(
                            'linesearch failed: %s, switching to steepest descent',
                            line_search.errMsg)
                        d = TStarR
                        t = 1 / (self.params.deriv_eps - Jp)
                        prevLineSearchFailed = True
                        continue

                prevLineSearchFailed = False

                t = line_search.value.t
                x.axpy(t, d)
                TStarRLast.set(TStarR)

                Fx = forward_problem.evalFandLinearize(x, out=Fx, guess=Fx)
                residual.set(y)
                residual -= Fx

                self.xUpdateHook(count, x, Fx, y, residual)

                TStarR = forward_problem.TStar(residual, out=TStarR)

                Jx = 0.5 * forward_problem.rangeIP(residual, residual)

                if self.params.steepest_descent:
                    beta = 0
                else:
                    # Polak-Ribiere
                    beta = forward_problem.domainIP(
                        TStarR,
                        TStarR - TStarRLast) / forward_problem.domainIP(
                            TStarRLast, TStarRLast)

                    # If we have done more iterations than we have points, reset the conjugate gradient method.
                    if count > cg_reset:
                        beta = 0

                d *= beta
                d += TStarR

                JpLast = Jp
                Jp = -forward_problem.domainIP(TStarR, d)
                if Jp >= 0:
                    if self.params.verbose:
                        msg('found an uphill direction; resetting!')
                    d.set(TStarR)
                    Jp = -forward_problem.domainIP(TStarR, d)
                    t0 = Jx / (self.params.deriv_eps - Jp)
                else:
                    t0 = t * min(10, JpLast / (Jp - realmin))
        except Exception as e:
            # Store the current x and y values in case they are interesting to the caller, then
            # re-raise the exception.
            import traceback
            traceback.print_exc()
            self.finalState = self.finalize(x, Fx)
            raise e

        return self.finalize(x, Fx)
Exemplo n.º 26
0
    def solve(self, x0, y, *args):
        (x, y) = self.initialize(x0, y, *args)

        forward_problem = self.forwardProblem()
        Tx = forward_problem.T(x)
        r = y.copy()
        r -= Tx

        cg_reset = x.size()
        if (self.params.cg_reset != 0): cg_rest = self.params.cg_reset

        TStarR = forward_problem.TStar(r)
        normsq_TStarR = forward_problem.domainIP(TStarR, TStarR)

        # d = T^* r
        d = TStarR.copy()

        # Eventual storage for T(d)
        Td = None

        count = 0
        while True:
            if count > self.params.ITER_MAX:
                raise IterationCountFailure(self.params.ITER_MAX)
                break
            count += 1

            if self.stopConditionMet(count, x, y, r):
                msg('done at iteration %d', count)
                break

            Td = forward_problem.T(d, out=Td)

            self.iterationHook(count, x, y, d, r, Td, TStarR)

            alpha = normsq_TStarR / forward_problem.rangeIP(Td, Td)
            if ((count + 1 % cg_reset) == 0):
                msg('resetting cg via steepest descent')
                alpha = 1

            # x = x + alpha*d
            x.axpy(alpha, d)

            # r = r - alpha*Td
            r.axpy(-alpha, Td)

            # beta = ||r_{k+1}||^2 / ||r_k||^2
            prev_normsq_TStarR = normsq_TStarR
            TStarR = forward_problem.TStar(r, out=TStarR)
            normsq_TStarR = forward_problem.domainIP(TStarR, TStarR)
            beta = normsq_TStarR / prev_normsq_TStarR

            if ((count + 1 % cg_reset) == 0): beta = 0

            if (self.params.steepest_descent):
                beta = 0

            # d = T*r + beta*d
            d *= beta
            d += TStarR

        Tx = forward_problem.T(x, out=Tx)
        return self.finalize(x, Tx)
Exemplo n.º 27
0
  def solve(self, x0, y, *args):
    """Main routine to solve the inverse problem F(x)=y.  Initial guess is x=x0.
    Extra arguments are passed to :func:`initialize`."""
    (x,y,targetDiscrepancy) = self.initialize(x0,y,*args)

    self.discrepancy_history=[]

    forward_problem = self.forwardProblem()
    params = self.params
    
    cg_reset = x.size()
    if (self.params.cg_reset != 0): cg_reset = self.params.cg_reset

    # Initial functional evalutation
    Fx = forward_problem.evalFandLinearize(x)

    # Prepare some storage
    Td = None

    residual = y.copy()
    residual.axpy(-1, Fx)

    discrepancy = self.discrepancy(x,y,residual);
    
    # The class that performs our linesearches.
    line_search = linesearchHZ.LinesearchHZ(params=self.params.linesearch)
    line_searchee = ForwardProblemLineSearchAdaptor(forward_problem)

    # Main loop
    count = 0
    theta = params.thetaMax;
    # try:
    for kkkkk in range(1):
      while True:
        self.discrepancy_history.append(discrepancy)

        if count > self.params.ITER_MAX:
          raise IterationCountFailure(self.params.ITER_MAX)
        count += 1

        if theta < self.params.thetaMin:
          raise NumericalFailure('Reached smallest trust region size.')

        # Error to correct:
        discrepancyLin = (1-theta)*discrepancy + theta*targetDiscrepancy
        msg('(%d) discrepancy: current %g linear goal:%g goal: %g\n---', count, discrepancy, discrepancyLin, targetDiscrepancy)

        if discrepancy <= self.params.mu*targetDiscrepancy:
          msg('done at iteration %d', count)
          break

        # try:
        d = self.linearInverseSolve(x,y,residual,discrepancyLin)
        # except Exception as e:
        #   theta *= self.params.kappaTrust
        #   msg('Exception during linear inverse solve:\n%s\nShriking theta to %g.',str(e),theta)
        #   continue

        # forward_problem.evalFandLinearize(x,out=Fx)
        # residual[:] = y
        # residual -= Fx
        Td = forward_problem.T(d,out=Td)
        Jp = -forward_problem.rangeIP(Td,residual)

        if Jp >= 0:
          theta *= self.params.kappaTrust
          msg('Model problem found an uphill direction.  Shrinking theta to %g.',theta)
          continue

          # % Sometimes in the initial stages, the linearization asks for an adjustment many orders of magnitude
          # % larger than the size of the coefficient gamma.  This is due to the very shallow derivatives
          # % in the coefficent function (and hence large derivatives in its inverse).  We've been 
          # % guaranteed that dh is pointing downhill, so scale it so that its size is on the order 
          # % of the size of the current gamma and try it out.  If this doesn't do a good job, we'll end
          # % up reducing theta later.
          # if (params.forceGammaPositive)

        self.temper_d(x,d,y,residual)

        self.iterationHook(count,x,Fx,y,d,residual,Td)

        # Do a linesearch in the determined direction.
        Phi = lambda t: line_searchee.eval(x,d,y,t)
        Jx = 0.5*forward_problem.rangeIP(residual,residual)
        line_search.search(Phi,Jx,Jp,1)
        if line_search.error():
          msg('Linesearch failed: %s. Shrinking theta.', line_search.errMsg);        
          theta *= self.params.kappaTrust
          continue

        discrepancyPrev = discrepancy
        t = line_search.value.t
        x.axpy(t,d)

        forward_problem.evalFandLinearize(x,out=Fx,guess=Fx)
        residual.set(y)
        residual -= Fx
        discrepancy = self.discrepancy(x,y,residual);

        self.xUpdateHook(count,x,Fx,y,residual)
        
        # Check to see if we did a good job reducing the misfit (compared to the amount that we asked
        # the linearized problem to correct).
        rho = (discrepancy-discrepancyPrev)/(discrepancyLin-discrepancyPrev)
      
        # assert(rho>0)

        # Determine if the trust region requires any adjustment.
        if rho > self.params.rhoLow:
          # We have a good fit.
          if rho > self.params.rhoHigh:
            if theta < self.params.thetaMax:
              theta = min(theta/self.params.kappaTrust, self.params.thetaMax);
              msg('Very good decrease (rho=%g).  New theta %g', rho, theta);
            else:
              msg('Very good decrease (rho=%g).  Keeping theta %g', rho, theta);
          else:
            msg('Reasonable decrease (rho=%g). Keeping theta %g',rho, theta);
        else:
          # We have a lousy fit.  Try asking for less correction.
          theta = theta * params.kappaTrust;
          msg('Poor decrease (rho=%g) from %g to %g;', rho, discrepancyPrev, discrepancy)
          msg('wanted %g.  New theta %g', discrepancyLin, theta);
    # except Exception as e:
    #   # Store the current x and y values in case they are interesting to the caller, then
    #   # re-raise the exception.
    #   self.finalState = self.finalize(x,Fx)
    #   raise e
      
    return self.finalize(x, Fx)
Exemplo n.º 28
0
    def search(self, F, F0, F0p, t0):
        self.code = -1
        self.errMsg = 'no error'
        self.F = F

        params = self.params

        z = Bunch(F=F0, Fp=F0p, t=0, data=None)
        assert F0p <= 0

        # % Set up constants for checking Wolfe conditions.
        self.wolfe_lo = params.sigma * z.Fp
        self.wolfe_hi = params.delta * z.Fp
        self.awolfe_hi = (2 * params.delta - 1) * z.Fp
        self.fpert = z.F + params.epsilon
        self.f0 = z.F

        if params.verbose: msg('starting at z=%g (%g,%g)', z.t, z.F, z.Fp)

        while True:
            c = self.eval(t0)
            if not numpy.isnan(c.F):
                break
            msg('Hit a NaN in initial evaluation at t=%g', t0)
            t0 *= 0.5

        if params.verbose: msg('initial guess c=%g (%g,%g)', c.t, c.F, c.Fp)

        if self.wolfe(c):
            if params.verbose: msg('done at init')
            self.setDone(c)
            return

        (aj, bj) = self.bracket(z, c)
        if params.verbose: msg('initial bracket %g %g', aj.t, bj.t)

        if self.code >= 0:
            self.doneMsg('initial bracket')
            return

        if params.debug: self.verifyBracket(aj, bj)

        count = 0

        while True:
            count += 1

            if count > params.nsecant:
                self.setError('too many bisections in main loop')
                return

            (a, b) = self.secantsq(aj, bj)
            if params.verbose: msg('secantsq a %g b %g', a.t, b.t)
            if params.verbose: self.printBracket(a, b)
            if self.code >= 0:
                self.doneMsg('secant')
                return

            if (b.t - a.t) > params.gamma * (bj.t - aj.t):
                (a, b) = self.update(a, b, (a.t + b.t) / 2)
                if params.verbose: msg('update to a %g b %g', aj.t, bj.t)
                if params.verbose: self.printBracket(a, b)
                if self.code >= 0:
                    self.doneMsg('bisect')
                    return
            aj = a
            bj = b
Exemplo n.º 29
0
 def printBracket(self,a,b):
   msg('a %g b %g f(a) %g fp(a) %g f(b) %g fp(b) %g fpert %g', a.t, b.t, a.F, a.Fp, b.F, b.Fp, self.fpert )
Exemplo n.º 30
0
  def search(self,f,f0,df0,t0=None):
    INT = self.params.INT; # don't reevaluate within 0.1 of the limit of the current bracket
    EXT = self.params.EXT;              # extrapolate maximum 3 times the current step-size
    MAX = self.params.MAX;                     # max 20 function evaluations per line search
    RATIO = self.params.RATIO;                                   # maximum allowed slope ratio
    SIG = self.params.SIG;
    RHO = SIG/2; 
    SMALL = 10.**-16                    #minimize.m uses matlab's realmin 

    # SIG and RHO are the constants controlling the Wolfe-
    #Powell conditions. SIG is the maximum allowed absolute ratio between
    #previous and new slopes (derivatives in the search direction), thus setting
    #SIG to low (positive) values forces higher precision in the line-searches.
    #RHO is the minimum allowed fraction of the expected (from the slope at the
    #initial point in the linesearch). Constants must satisfy 0 < RHO < SIG < 1.
    #Tuning of SIG (depending on the nature of the function to be optimized) may
    #speed up the minimization; it is probably not worth playing much with RHO.

    d0 = df0;
    fdata0 = None
    if t0 is None:
      t0 = 1/(1.-d0)
    x3 = t0                                   # initial step is red/(|s|+1)
    X0 = 0; F0 = f0; dF0 = df0; Fdata0=fdata0              # make a copy of current values
    M = MAX

    x2 = 0; f2 = f0; d2 = d0; 
    while True:                      # keep extrapolating as long as necessary
      # x2 = 0; f2 = f0; d2 = d0; 
      f3 = f0; df3 = df0; fdata3 = fdata0;
      success = 0
      while (not success) and (M > 0):
        try:
          M = M - 1
          (f3, df3, fdata3) = f(x3)
          if isnan(f3) or isinf(f3) or any(isnan(df3)+isinf(df3)):
            raise Exception('nan')
          success = 1
        except:                    # catch any error which occured in f
          if self.params.verbose: msg('error on extrapolate. shrinking %g to %g', x3, (x2+x3)/2)
          x3 = (x2+x3)/2                       # bisect and try again

      if f3 < F0:
        X0 = x3; F0 = f3; dF0 = df3; Fdata0=fdata3   # keep best values
      d3 = df3                                         # new slope
      if d3 > SIG*d0 or f3 > f0+x3*RHO*d0 or M == 0:  break # are we done extrapolati

      x1 = x2; f1 = f2; d1 = d2                 # move point 2 to point 1
      x2 = x3; f2 = f3; d2 = d3                 # move point 3 to point 2
      A = 6*(f1-f2)+3*(d2+d1)*(x2-x1)          # make cubic extrapolation
      B = 3*(f2-f1)-(2*d1+d2)*(x2-x1)
      Z = B+sqrt(complex(B*B-A*d1*(x2-x1)))
      if Z != 0.0:
          x3 = x1-d1*(x2-x1)**2/Z              # num. error possible, ok!
      else: 
          x3 = inf
      if (not isreal(x3)) or isnan(x3) or isinf(x3) or (x3 < 0): 
                                                 # num prob | wrong sign?
          x3 = x2*EXT                        # extrapolate maximum amount
      elif x3 > x2*EXT:           # new point beyond extrapolation limit?
          x3 = x2*EXT                        # extrapolate maximum amount
      elif x3 < x2+INT*(x2-x1):  # new point too close to previous point?
          x3 = x2+INT*(x2-x1)
      x3 = real(x3)
      msg('extrapolating: x1 %g d1 %g x2 %g d2 %g x3 %g',x1,d1,x2,d3,x3)


    while (abs(d3) > -SIG*d0 or f3 > f0+x3*RHO*d0) and M > 0:  
      if (d3 > 0) or (f3 > f0+x3*RHO*d0):            # choose subinterval
        x4 = x3; f4 = f3; d4 = d3             # move point 3 to point 4
      else:
        x2 = x3; f2 = f3; d2 = d3             # move point 3 to point 2
      if self.params.verbose: msg('interpolating x2 %g x4 %g f2 %g f4 %g wolfef %g d2 %g d4 %g wolfed %g ',x2,x4,f2,f4,f0+x3*RHO*d0,d2,d4,-SIG*d0)

      if f4 > f0:           
        x3 = x2-(0.5*d2*(x4-x2)**2)/(f4-f2-d2*(x4-x2)) # quadratic interpolation
      else:
        A = 6*(f2-f4)/(x4-x2)+3*(d4+d2)           # cubic interpolation
        B = 3*(f4-f2)-(2*d2+d4)*(x4-x2)
        if A != 0:
          x3=x2+(sqrt(B*B-A*d2*(x4-x2)**2)-B)/A # num. error possible, ok!
        else:
          x3 = inf
      if isnan(x3) or isinf(x3):
        x3 = (x2+x4)/2      # if we had a numerical problem then bisect
      x3 = max(min(x3, x4-INT*(x4-x2)),x2+INT*(x4-x2))  # don't accept too close
      (f3, df3, fdata3) = f(x3);
      d3 =df3;
      M = M - 1;                      # count epochs
      if f3 < F0:
        X0 = x3; F0 = f3; dF0 = df3; Fdata0 = fdata3              # keep best values

    if (abs(d3) < -SIG*d0) and (f3 < f0+x3*RHO*d0):          # if line search succeeded
      self.code = 0
      self.value = Bunch(F=f3,Fp=d3,t=x3,data=fdata3)
      self.errMsg = ""
    else:
      self.code = 1
      if M == 0:
        self.errMsg = 'Too many function evaluations (>%d)' % MAX
      else:
        self.errMsg = 'unknown error';
      self.value = Bunch(F=f0,Fp=dF0,t=X0,data=Fdata0)
Exemplo n.º 31
0
 def printBracket(self, a, b):
     msg('a %g b %g f(a) %g fp(a) %g f(b) %g fp(b) %g fpert %g', a.t, b.t,
         a.F, a.Fp, b.F, b.Fp, self.fpert)
Exemplo n.º 32
0
  def solve(self,x0,y,*args):
    (x,y) = self.initialize(x0,y,*args)

    forward_problem = self.forwardProblem()    
    Tx = forward_problem.T(x)
    r = y.copy()
    r -= Tx

    cg_reset = x.size()
    if (self.params.cg_reset != 0): cg_rest = self.params.cg_reset

    TStarR = forward_problem.TStar(r)
    normsq_TStarR = forward_problem.domainIP(TStarR,TStarR)

    # d = T^* r
    d = TStarR.copy()

    # Eventual storage for T(d)
    Td = None

    count = 0
    while True:
      if count > self.params.ITER_MAX:
        raise IterationCountFailure(self.params.ITER_MAX)
        break
      count += 1

      if self.stopConditionMet(count,x,y,r):
        msg('done at iteration %d', count)
        break

      Td = forward_problem.T(d,out=Td)

      self.iterationHook(count, x, y, d, r, Td, TStarR)

      alpha = normsq_TStarR/forward_problem.rangeIP(Td,Td)
      if ((count+1 % cg_reset) == 0): 
        msg('resetting cg via steepest descent')
        alpha = 1

      # x = x + alpha*d
      x.axpy(alpha,d)

      # r = r - alpha*Td
      r.axpy(-alpha,Td)

      # beta = ||r_{k+1}||^2 / ||r_k||^2
      prev_normsq_TStarR = normsq_TStarR
      TStarR = forward_problem.TStar(r,out=TStarR)
      normsq_TStarR = forward_problem.domainIP(TStarR,TStarR)
      beta = normsq_TStarR/prev_normsq_TStarR
   
      if ((count+1 % cg_reset) == 0): beta = 0

      if (self.params.steepest_descent):
        beta = 0

      # d = T*r + beta*d
      d *= beta
      d += TStarR


    Tx = forward_problem.T(x, out=Tx)
    return self.finalize(x,Tx)
Exemplo n.º 33
0
    def search(self, f, f0, df0, t0=None):
        INT = self.params.INT
        # don't reevaluate within 0.1 of the limit of the current bracket
        EXT = self.params.EXT
        # extrapolate maximum 3 times the current step-size
        MAX = self.params.MAX
        # max 20 function evaluations per line search
        RATIO = self.params.RATIO
        # maximum allowed slope ratio
        SIG = self.params.SIG
        RHO = SIG / 2
        SMALL = 10.**-16  #minimize.m uses matlab's realmin

        # SIG and RHO are the constants controlling the Wolfe-
        #Powell conditions. SIG is the maximum allowed absolute ratio between
        #previous and new slopes (derivatives in the search direction), thus setting
        #SIG to low (positive) values forces higher precision in the line-searches.
        #RHO is the minimum allowed fraction of the expected (from the slope at the
        #initial point in the linesearch). Constants must satisfy 0 < RHO < SIG < 1.
        #Tuning of SIG (depending on the nature of the function to be optimized) may
        #speed up the minimization; it is probably not worth playing much with RHO.

        d0 = df0
        fdata0 = None
        if t0 is None:
            t0 = 1 / (1. - d0)
        x3 = t0  # initial step is red/(|s|+1)
        X0 = 0
        F0 = f0
        dF0 = df0
        Fdata0 = fdata0  # make a copy of current values
        M = MAX

        x2 = 0
        f2 = f0
        d2 = d0
        while True:  # keep extrapolating as long as necessary
            # x2 = 0; f2 = f0; d2 = d0;
            f3 = f0
            df3 = df0
            fdata3 = fdata0
            success = 0
            while (not success) and (M > 0):
                try:
                    M = M - 1
                    (f3, df3, fdata3) = f(x3)
                    if isnan(f3) or isinf(f3) or any(isnan(df3) + isinf(df3)):
                        raise Exception('nan')
                    success = 1
                except:  # catch any error which occured in f
                    if self.params.verbose:
                        msg('error on extrapolate. shrinking %g to %g', x3,
                            (x2 + x3) / 2)
                    x3 = (x2 + x3) / 2  # bisect and try again

            if f3 < F0:
                X0 = x3
                F0 = f3
                dF0 = df3
                Fdata0 = fdata3  # keep best values
            d3 = df3  # new slope
            if d3 > SIG * d0 or f3 > f0 + x3 * RHO * d0 or M == 0:
                break  # are we done extrapolati

            x1 = x2
            f1 = f2
            d1 = d2  # move point 2 to point 1
            x2 = x3
            f2 = f3
            d2 = d3  # move point 3 to point 2
            A = 6 * (f1 - f2) + 3 * (d2 + d1) * (x2 - x1
                                                 )  # make cubic extrapolation
            B = 3 * (f2 - f1) - (2 * d1 + d2) * (x2 - x1)
            Z = B + sqrt(complex(B * B - A * d1 * (x2 - x1)))
            if Z != 0.0:
                x3 = x1 - d1 * (x2 - x1)**2 / Z  # num. error possible, ok!
            else:
                x3 = inf
            if (not isreal(x3)) or isnan(x3) or isinf(x3) or (x3 < 0):
                # num prob | wrong sign?
                x3 = x2 * EXT  # extrapolate maximum amount
            elif x3 > x2 * EXT:  # new point beyond extrapolation limit?
                x3 = x2 * EXT  # extrapolate maximum amount
            elif x3 < x2 + INT * (
                    x2 - x1):  # new point too close to previous point?
                x3 = x2 + INT * (x2 - x1)
            x3 = real(x3)
            msg('extrapolating: x1 %g d1 %g x2 %g d2 %g x3 %g', x1, d1, x2, d3,
                x3)

        while (abs(d3) > -SIG * d0 or f3 > f0 + x3 * RHO * d0) and M > 0:
            if (d3 > 0) or (f3 > f0 + x3 * RHO * d0):  # choose subinterval
                x4 = x3
                f4 = f3
                d4 = d3  # move point 3 to point 4
            else:
                x2 = x3
                f2 = f3
                d2 = d3  # move point 3 to point 2
            if self.params.verbose:
                msg(
                    'interpolating x2 %g x4 %g f2 %g f4 %g wolfef %g d2 %g d4 %g wolfed %g ',
                    x2, x4, f2, f4, f0 + x3 * RHO * d0, d2, d4, -SIG * d0)

            if f4 > f0:
                x3 = x2 - (0.5 * d2 * (x4 - x2)**2) / (
                    f4 - f2 - d2 * (x4 - x2))  # quadratic interpolation
            else:
                A = 6 * (f2 - f4) / (x4 - x2) + 3 * (d4 + d2
                                                     )  # cubic interpolation
                B = 3 * (f4 - f2) - (2 * d2 + d4) * (x4 - x2)
                if A != 0:
                    x3 = x2 + (sqrt(B * B - A * d2 * (x4 - x2)**2) -
                               B) / A  # num. error possible, ok!
                else:
                    x3 = inf
            if isnan(x3) or isinf(x3):
                x3 = (x2 + x4) / 2  # if we had a numerical problem then bisect
            x3 = max(min(x3, x4 - INT * (x4 - x2)),
                     x2 + INT * (x4 - x2))  # don't accept too close
            (f3, df3, fdata3) = f(x3)
            d3 = df3
            M = M - 1
            # count epochs
            if f3 < F0:
                X0 = x3
                F0 = f3
                dF0 = df3
                Fdata0 = fdata3  # keep best values

        if (abs(d3) < -SIG * d0) and (
                f3 < f0 + x3 * RHO * d0):  # if line search succeeded
            self.code = 0
            self.value = Bunch(F=f3, Fp=d3, t=x3, data=fdata3)
            self.errMsg = ""
        else:
            self.code = 1
            if M == 0:
                self.errMsg = 'Too many function evaluations (>%d)' % MAX
            else:
                self.errMsg = 'unknown error'
            self.value = Bunch(F=f0, Fp=dF0, t=X0, data=Fdata0)
Exemplo n.º 34
0
    def solve(self, x0, y, *args):
        """Main routine to solve the inverse problem F(x)=y.  Initial guess is x=x0.
    Extra arguments are passed to :func:`initialize`."""
        (x, y, targetDiscrepancy) = self.initialize(x0, y, *args)

        self.discrepancy_history = []

        forward_problem = self.forwardProblem()
        params = self.params

        cg_reset = x.size()
        if (self.params.cg_reset != 0): cg_reset = self.params.cg_reset

        # Initial functional evalutation
        Fx = forward_problem.evalFandLinearize(x)

        # Prepare some storage
        Td = None

        residual = y.copy()
        residual.axpy(-1, Fx)

        discrepancy = self.discrepancy(x, y, residual)

        # The class that performs our linesearches.
        line_search = linesearchHZ.LinesearchHZ(params=self.params.linesearch)
        line_searchee = ForwardProblemLineSearchAdaptor(forward_problem)

        # Main loop
        count = 0
        theta = params.thetaMax
        # try:
        for kkkkk in range(1):
            while True:
                self.discrepancy_history.append(discrepancy)

                if count > self.params.ITER_MAX:
                    raise IterationCountFailure(self.params.ITER_MAX)
                count += 1

                if theta < self.params.thetaMin:
                    raise NumericalFailure(
                        'Reached smallest trust region size.')

                # Error to correct:
                discrepancyLin = (
                    1 - theta) * discrepancy + theta * targetDiscrepancy
                msg(
                    '(%d) discrepancy: current %g linear goal:%g goal: %g\n---',
                    count, discrepancy, discrepancyLin, targetDiscrepancy)

                if discrepancy <= self.params.mu * targetDiscrepancy:
                    msg('done at iteration %d', count)
                    break

                # try:
                d = self.linearInverseSolve(x, y, residual, discrepancyLin)
                # except Exception as e:
                #   theta *= self.params.kappaTrust
                #   msg('Exception during linear inverse solve:\n%s\nShriking theta to %g.',str(e),theta)
                #   continue

                # forward_problem.evalFandLinearize(x,out=Fx)
                # residual[:] = y
                # residual -= Fx
                Td = forward_problem.T(d, out=Td)
                Jp = -forward_problem.rangeIP(Td, residual)

                if Jp >= 0:
                    theta *= self.params.kappaTrust
                    msg(
                        'Model problem found an uphill direction.  Shrinking theta to %g.',
                        theta)
                    continue

                    # % Sometimes in the initial stages, the linearization asks for an adjustment many orders of magnitude
                    # % larger than the size of the coefficient gamma.  This is due to the very shallow derivatives
                    # % in the coefficent function (and hence large derivatives in its inverse).  We've been
                    # % guaranteed that dh is pointing downhill, so scale it so that its size is on the order
                    # % of the size of the current gamma and try it out.  If this doesn't do a good job, we'll end
                    # % up reducing theta later.
                    # if (params.forceGammaPositive)

                self.temper_d(x, d, y, residual)

                self.iterationHook(count, x, Fx, y, d, residual, Td)

                # Do a linesearch in the determined direction.
                Phi = lambda t: line_searchee.eval(x, d, y, t)
                Jx = 0.5 * forward_problem.rangeIP(residual, residual)
                line_search.search(Phi, Jx, Jp, 1)
                if line_search.error():
                    msg('Linesearch failed: %s. Shrinking theta.',
                        line_search.errMsg)
                    theta *= self.params.kappaTrust
                    continue

                discrepancyPrev = discrepancy
                t = line_search.value.t
                x.axpy(t, d)

                forward_problem.evalFandLinearize(x, out=Fx, guess=Fx)
                residual.set(y)
                residual -= Fx
                discrepancy = self.discrepancy(x, y, residual)

                self.xUpdateHook(count, x, Fx, y, residual)

                # Check to see if we did a good job reducing the misfit (compared to the amount that we asked
                # the linearized problem to correct).
                rho = (discrepancy - discrepancyPrev) / (discrepancyLin -
                                                         discrepancyPrev)

                # assert(rho>0)

                # Determine if the trust region requires any adjustment.
                if rho > self.params.rhoLow:
                    # We have a good fit.
                    if rho > self.params.rhoHigh:
                        if theta < self.params.thetaMax:
                            theta = min(theta / self.params.kappaTrust,
                                        self.params.thetaMax)
                            msg('Very good decrease (rho=%g).  New theta %g',
                                rho, theta)
                        else:
                            msg(
                                'Very good decrease (rho=%g).  Keeping theta %g',
                                rho, theta)
                    else:
                        msg('Reasonable decrease (rho=%g). Keeping theta %g',
                            rho, theta)
                else:
                    # We have a lousy fit.  Try asking for less correction.
                    theta = theta * params.kappaTrust
                    msg('Poor decrease (rho=%g) from %g to %g;', rho,
                        discrepancyPrev, discrepancy)
                    msg('wanted %g.  New theta %g', discrepancyLin, theta)
        # except Exception as e:
        #   # Store the current x and y values in case they are interesting to the caller, then
        #   # re-raise the exception.
        #   self.finalState = self.finalize(x,Fx)
        #   raise e

        return self.finalize(x, Fx)
Exemplo n.º 35
0
 def doneMsg(self,where):
   if self.code > 0:
     msg('done at %s with error status: %s', where, self.errMsg);
   else:
     if self.params.verbose: msg('done at %s with val=%g (%g, %g)', where, self.value.t, self.value.F, self.value.Fp );