def mat_solve(self, A, b):
     r"""Solve A x = b for x using the chosen solving method."""
     method = self._mat_solver
     if method == 'mp.lu_solve':
         x = mp.lu_solve(A, b)
     else:
         overwrite_a = overwrite_b = False
         if not isinstance(A, np.ndarray):
             if hasattr(A, 'tolist'): A = A.tolist()
             A = np.array(A, dtype=np.float64)
             overwrite_a = True
         if not isinstance(b, np.ndarray):
             if hasattr(b, 'tolist'): b = b.tolist()
             b = np.array(b, dtype=np.float64)
             overwrite_b = True
         if method == 'scipy.solve':
             x = linalg.solve(A,
                              b,
                              overwrite_a=overwrite_a,
                              overwrite_b=overwrite_b)
         elif method == 'scipy.lstsq':
             x, _residues, _rank, _sigma = linalg.lstsq(
                 A, b, overwrite_a=overwrite_a, overwrite_b=overwrite_b)
         else:
             raise NotImplementedError(
                 "Solver method '%s' not implemented." % method)
     return x
Ejemplo n.º 2
0
    def _indirect_transform(self, f_n, lobatto, use_mp):
        r"""Solve a system of equations to obtain the coefficients.

        Given a set of values `f_i` of the function to approximate at the
        collocation points `x_i`, this evaluates and solves a matrix equation
        \f[
            \sum_{j=0}^{\mathrm{num-1}} a_j T_{n(j)}(x_i) = f_i
        \f]
        to get the coefficients `a_n` of the basis functions. The notation
        `n(j)` indicates that this works even if only a subset of basis
        functions is considered.
        """
        num = len(f_n)
        jn = _create_jn(self.sym, num)
        Tn = [mp.one] * (2 * num) if use_mp else np.ones(2 * num)

        def row(xi):
            values = evaluate_Tn(xi, Tn, use_mp)
            if use_mp:
                return [values[j] for j in jn]
            return values[jn]

        x = self.collocation_points(num,
                                    internal_domain=True,
                                    lobatto=lobatto,
                                    use_mp=use_mp)
        Minv = map(row, x)
        if use_mp:
            a_n = mp.lu_solve(Minv, f_n)
        else:
            a_n = linalg.solve(np.array(Minv), np.array(f_n, dtype=float))
        return a_n
Ejemplo n.º 3
0
 def _from_physical_space(self, a_n, lobatto, use_mp, dps):
     num = len(a_n)
     with self.context(use_mp, dps):
         x = self.collocation_points(num, internal_domain=True,
                                     lobatto=lobatto, use_mp=use_mp)
         jn = self.create_frequencies(num)
         Minv = self.evaluate_trig_mat(x, jn, use_mp)
         if use_mp:
             a_n = mp.lu_solve(Minv, a_n)
         else:
             a_n = linalg.solve(Minv, np.array(a_n, dtype=float))
     return a_n
Ejemplo n.º 4
0
def pade_ls_coefficients(f, e, n):
    # Subroutine pade_ls_coeficients() finds coefficient of Pade approximant by Least Squares method
    # f - values of complex function for approximation
    # e - complex points in which function z is determined
    # n - number of coefficients, should be less than number of points in e (n<m)

    m = len(e)
    r = n / 2
    # Preparation of arrays to calc Pade coefficiens
    s = mp.zeros(m, 1)
    x = mp.zeros(m, n)
    for i in range(0, m):
        s[i] = f[i] * e[i]**r
        for j in range(0, r):
            x[i, j] = e[i]**j
        for j in range(r, 2 * r):
            x[i, j] = -f[i] * e[i]**(j - r)
    # Solving the equation: aX=b, where
    # a=x, b=s,
    #                       |p|
    #                   X = | |
    #                       |q|

    # pq = linalg.lstsq(x, s)[0]
    success = True
    solver = 'LU solver'

    try:
        pq = mp.lu_solve(x, s)
        # success = True
    # except ZeroDivisionError as err:
    except ZeroDivisionError:
        # if 'matrix is numerically singular' in err.message:
        try:
            pq = mp.qr_solve(x, s)
            pq = pq[0]
            solver = 'QR solver'
        # success = True
        except ValueError:
            # if 'matrix is numerically singular' in err.message:
            success = False
            pq = 123456.7
            # else:
            #     raise
    if success is True:
        pq.rows += 1
        pq[n, 0] = mp.mpc(1, 0)
    return pq, success, solver
Ejemplo n.º 5
0
def get_Tvec(ZTT, ZTS_S):
    Tvec = mp.lu_solve(ZTT, ZTS_S)
    return Tvec
Ejemplo n.º 6
0
 x0 = (flat_x[0]+flat_x[-1])/two
 xoff = [x-x0 for x in flat_x]
 mat = []
 for x in xoff:
   mat.append([x**i for i in range(fit_degree, -1, -1)])
 mat = mp.matrix(mat)
 max_error = zero
 cff = []
 # fit for each root (nn) and for roots (i=0) and weights (i=1)
 for nn in range(n):
   for i in range(2):
     val = [rtswts[x][i][nn] for x in flat_x]
     try:
       fit, _ = mp.qr_solve(mat, val)
     except:
       fit = mp.lu_solve(mat, val)
     cff.append(fit.T.tolist()[0])
     error = max([abs((mp.polyval(fit, x)-v)/v) for x,v in zip(xoff, val)])
     max_error = max(max_error, error)
 # if the error is too large, the number of poinst in the fit may have to be reduced
 if (max_error > acc):
   # error if it's already the minimum
   if (l == l_min):
     raise
   # if the last good fit was just one point less, that's the final one
   if (l-l_save == 1):
     l = l_save
     break
   # otherwise reduce to midpoint between current and last good fit
   l_max = l-1
   l = min(int(mp.ceil((l+l_save)/two)), l-1)
Ejemplo n.º 7
0
def modS_opt_mpmath(initdof,
                    S1list,
                    dgHfunc,
                    mineigfunc,
                    opttol=1e-2,
                    modSratio=1e-2,
                    check_iter_period=20):

    modenum = len(S1list)

    modSlist = []
    epsSlist = [0] * modenum
    test_modSlist = []
    test_S1list = []
    test_epsSlist = [
        0
    ] * modenum  #these test_ lists are for helping to evaluate how big to set future modS
    for mode in range(modenum):
        modSlist.append(mp.matrix(S1list[mode].rows, 1))
        test_modSlist.append(mp.matrix(S1list[mode].rows, 1))
        test_S1list.append(mp.matrix(S1list[mode].rows, 1))

    dofnum = initdof.rows
    dof = initdof.copy(
    )  #.copy() because we don't wont to modify the initialization array in place
    dofgrad = mp.matrix(dofnum, 1)
    dofHess = mp.matrix(dofnum, dofnum)

    tic = time.time()

    iternum = 0
    prevD = mp.inf
    alphaopt_grad = mp.one
    alphaopt_Hess = mp.one
    tol_orthoS = 1e-5
    dualfunc = lambda d: get_spatialProj_dualgradHess_modS(d, [], [],
                                                           dgHfunc,
                                                           S1list,
                                                           modSlist,
                                                           epsSlist,
                                                           get_grad=False,
                                                           get_Hess=False)

    while True:
        iternum += 1

        print('the iteration number is:', iternum, flush=True)

        doGD = (
            iternum % 2 == 0
        )  #flag for deciding whether to do a gradient step or a Newton step

        dualval = get_spatialProj_dualgradHess_modS(dof,
                                                    dofgrad,
                                                    dofHess,
                                                    dgHfunc,
                                                    S1list,
                                                    modSlist,
                                                    epsSlist,
                                                    get_grad=True,
                                                    get_Hess=(not doGD))
        objval = dualval - (dof.T * dofgrad)[0]
        abs_cstrt_sum = mp_dblabsdot(dof, dofgrad)
        print(
            'current dual, objective, absolute sum of constraint violation are',
            dualval, objval, abs_cstrt_sum)

        if np.abs(dualval - objval) < opttol * min(np.abs(
                objval), np.abs(dualval)) and abs_cstrt_sum < opttol * min(
                    np.abs(objval),
                    np.abs(dualval)):  #objective convergence termination
            break

        if iternum % check_iter_period == 0:
            print('previous dual is', prevD)
            if np.abs(prevD - dualval) < np.abs(
                    dualval
            ) * 1e-3:  #dual convergence / stuck optimization terminatino
                break
            prevD = dualval

        normgrad = np.linalg.norm(dofgrad)
        if not doGD:
            Ndir = mp.lu_solve(dofHess, -dofgrad)
            normNdir = np.linalg.norm(Ndir)
            pdir = Ndir / normNdir
            print('do regular Hessian step')
            print('normNdir is', normNdir)
            print('normgrad is', normgrad)
            print('Ndir dot grad is', np.dot(Ndir, dofgrad))
        if doGD:
            print('do regular gradient step')
            pdir = -dofgrad / normgrad
            print('normgrad is', normgrad)

        c1 = 0.5
        c2 = 0.7  #the parameters for doing line search
        if doGD:
            alpha_start = alphaopt_grad
        else:
            alpha_start = alphaopt_Hess
        alpha = alpha_start

        print('alpha before feasibility backtrack', alpha)
        while mineigfunc(dof + alpha * pdir)[1] <= 0:
            alpha *= c2

        alpha_feas = alpha
        print('alpha before backtracking is', alpha_feas)
        alphaopt = alpha
        Dopt = mp.inf
        while True:
            tmp = dualfunc(dof + alpha * pdir)
            if tmp < Dopt:  #the dual is still decreasing as we backtrack, continue
                Dopt = tmp
                alphaopt = alpha
            else:
                alphaopt = alpha
                break
            if tmp <= dualval + c1 * alpha * np.dot(
                    pdir, dofgrad):  #Armijo backtracking condition
                alphaopt = alpha
                break
            alpha *= c2

        if alphaopt / alpha_start > (
                c2 + 1) / 2:  #in this case can start with bigger step
            alpha_newstart = alphaopt * 2
        else:
            alpha_newstart = alphaopt
            if alpha_feas / alpha_start < (
                    c2 + 1
            ) / 2 and alphaopt / alpha_feas > (
                    c2 + 1
            ) / 2:  #this means we encountered feasibility wall and backtracking linesearch didn't reduce step size, we should add mod source
                singular_dof = dof + (
                    alpha_feas /
                    c2) * pdir  #dof that is roughly on duality boundary
                singular_mode, singular_eigw, singular_eigv = mineigfunc(
                    singular_dof, eigvals_only=False)
                if epsSlist[singular_mode] <= 0:
                    print('new modS aded at mode', singular_mode)
                    test_modSlist[singular_mode] = singular_eigv
                    test_epsSlist[singular_mode] = 1.0
                    modval = np.abs(
                        get_spatialProj_dualgradHess_modS(singular_dof, [], [],
                                                          dgHfunc,
                                                          test_S1list,
                                                          test_modSlist,
                                                          test_epsSlist,
                                                          get_grad=False,
                                                          get_Hess=False))
                    modSlist[singular_mode] = singular_eigv
                    epsSlist[singular_mode] = mp.sqrt(modSratio *
                                                      np.abs(dualval / modval))
                    test_modSlist[singular_mode] = mp.zeros(
                        S1list[singular_mode].rows, 1)
                    test_epsSlist[singular_mode] = 0
                elif np.abs(mp_conjdot(singular_eigv,
                                       modSlist[singular_mode])) < tol_orthoS:
                    print('changed modS at mode', singular_mode)
                    modSlist[singular_mode] = mp.sqrt(0.5) * (
                        modSlist[singular_mode] + singular_eigv)

        print('stepsize alphaopt is', alphaopt, '\n')
        dof += alpha * pdir
        if doGD:
            alphaopt_grad = alpha_newstart
        else:
            alphaopt_Hess = alpha_newstart

    ####NOW WE GRADUALLY BRING THE MAGNITUDES OF THE MODS DOWN TO ZERO####
    alphaopt_grad = max(alphaopt_grad, 5e-5 * mp.one)
    alphaopt_Hess = max(alphaopt_Hess, 5e-5 * mp.one)
    minalphatol = 1e-10
    olddualval = dualval
    reductFactor = 1e-1
    reductCount = 1
    lastReduct = False

    while True:  #gradual reduction of modified source amplitude, outer loop
        if not lastReduct:
            for i in range(len(epsSlist)):
                epsSlist[i] *= reductFactor
            modSratio *= reductFactor
        else:
            for i in range(len(epsSlist)):
                epsSlist[i] = 0

        iternum = 0
        while True:
            iternum += 1

            print('reducing modS now, at reduction #',
                  reductCount,
                  'the iteration number is:',
                  iternum,
                  flush=True)

            doGD = (
                iternum % 2 == 0
            )  #flag for deciding whether to do a gradient step or a Newton step

            dualval = get_spatialProj_dualgradHess_modS(dof,
                                                        dofgrad,
                                                        dofHess,
                                                        dgHfunc,
                                                        S1list,
                                                        modSlist,
                                                        epsSlist,
                                                        get_grad=True,
                                                        get_Hess=(not doGD))
            objval = dualval - (dof.T * dofgrad)[0]
            abs_cstrt_sum = mp_dblabsdot(dof, dofgrad)
            print(
                'current dual, objective, absolute sum of constraint violation are',
                dualval, objval, abs_cstrt_sum)

            if np.abs(dualval - objval) < opttol * min(np.abs(
                    objval), np.abs(dualval)) and abs_cstrt_sum < opttol * min(
                        np.abs(objval),
                        np.abs(dualval)):  #objective convergence termination
                break

            if iternum % check_iter_period == 0:
                print('previous dual is', prevD)
                if np.abs(prevD - dualval) < np.abs(
                        dualval
                ) * 1e-3:  #dual convergence / stuck optimization termination
                    break
                if alphaopt_grad < minalphatol and alphaopt_Hess < minalphatol:
                    alphaopt_grad = 5e-5
                    alphaopt_Hess = 5e-5  #periodically boost the max step size since we are gradually turning off the modified sources
                    ##SHOULD MAKE THIS MORE TRANSPARENT IN THE FUTURE##
                prevD = dualval

            normgrad = np.linalg.norm(dofgrad)
            if not doGD:
                Ndir = mp.lu_solve(dofHess, -dofgrad)
                normNdir = np.linalg.norm(Ndir)
                pdir = Ndir / normNdir
                print('do regular Hessian step')
                print('normNdir is', normNdir)
                print('normgrad is', normgrad)
                print('Ndir dot grad is', np.dot(Ndir, dofgrad))
            if doGD:
                print('do regular gradient step')
                pdir = -dofgrad / normgrad
                print('normgrad is', normgrad)

            c1 = 0.5
            c2 = 0.7  #the parameters for doing line search
            if doGD:
                alpha_start = alphaopt_grad
            else:
                alpha_start = alphaopt_Hess
            alpha = alpha_start

            print('alpha before feasibility backtrack', alpha)
            while mineigfunc(dof + alpha * pdir)[1] <= 0:
                alpha *= c2

            alpha_feas = alpha
            print('alpha before backtracking is', alpha_feas)
            alphaopt = alpha
            Dopt = mp.inf
            while True:
                tmp = dualfunc(dof + alpha * pdir)
                if tmp < Dopt:  #the dual is still decreasing as we backtrack, continue
                    Dopt = tmp
                    alphaopt = alpha
                else:
                    alphaopt = alpha
                    break
                if tmp <= dualval + c1 * alpha * np.dot(
                        pdir, dofgrad):  #Armijo backtracking condition
                    alphaopt = alpha
                    break
                alpha *= c2

            if alphaopt / alpha_start > (
                    c2 + 1) / 2:  #in this case can start with bigger step
                alpha_newstart = alphaopt * 2
            else:
                alpha_newstart = alphaopt
                if (not lastReduct) and alpha_feas / alpha_start < (
                        c2 + 1
                ) / 2 and alphaopt / alpha_feas > (
                        c2 + 1
                ) / 2:  #don't bother to modify sources if this is the final reduction iteration
                    singular_dof = dof + (
                        alpha_feas /
                        c2) * pdir  #dof that is roughly on duality boundary
                    singular_mode, singular_eigw, singular_eigv = mineigfunc(
                        singular_dof, eigvals_only=False)
                    if epsSlist[singular_mode] <= 0:
                        print('new modS aded at mode', singular_mode)
                        test_modSlist[singular_mode] = singular_eigv
                        test_epsSlist[singular_mode] = mp.one
                        modval = np.abs(
                            get_spatialProj_dualgradHess_modS(singular_dof, [],
                                                              [],
                                                              dgHfunc,
                                                              test_S1list,
                                                              test_modSlist,
                                                              test_epsSlist,
                                                              get_grad=False,
                                                              get_Hess=False))
                        modSlist[singular_mode] = singular_eigv
                        epsSlist[singular_mode] = mp.sqrt(
                            modSratio * np.abs(dualval / modval))
                        test_modSlist[singular_mode] = mp.zeros(
                            S1list[singular_mode].rows, 1)
                        test_epsSlist[singular_mode] = 0
                    elif np.abs(np.vdot(singular_eigv,
                                        modSlist[singular_mode])) < tol_orthoS:
                        print('changed modS at mode', singular_mode)
                        modSlist[singular_mode] = mp.sqrt(0.5) * (
                            modSlist[singular_mode] + singular_eigv)

            print('stepsize alphaopt is', alphaopt, '\n')
            dof += alpha * pdir
            if doGD:
                alphaopt_grad = alpha_newstart
            else:
                alphaopt_Hess = alpha_newstart

        if lastReduct:
            break
        if np.abs(olddualval - dualval) < opttol * np.abs(dualval):
            lastReduct = True
        olddualval = dualval
        reductCount += 1

    print('time elapsed:', time.time() - tic, flush=True)
    return dof, dofgrad, dualval, objval
Ejemplo n.º 8
0
def speedup_Green_Taylor_Arnoldi_RgNmn_Uconverge(n,
                                                 k,
                                                 R,
                                                 klim=10,
                                                 Taylor_tol=1e-8,
                                                 invchi=0.1,
                                                 Unormtol=1e-4,
                                                 veclim=3,
                                                 delveclim=2,
                                                 plotVectors=False):
    #sets up the Arnoldi matrix and associated unit vector lists for any given RgN
    #ti = time.time()

    kR = mp.mpf(k * R)
    pow_jndiv, coe_jndiv, pow_djn, coe_djn, pow_yndiv, coe_yndiv, pow_dyn, coe_dyn = get_Taylor_jndiv_djn_yndiv_dyn(
        n, kR, klim, tol=Taylor_tol)
    #print(time.time()-ti,'0')

    #print(len(pow_jndiv))
    pow_jndiv = np.array(pow_jndiv)
    pow_djn = np.array(pow_djn)
    nfac = mp.sqrt(n * (n + 1))
    rmRgN_Bpol = mp.one * np.zeros(pow_jndiv[-1] + 1 - (n - 1))
    rmRgN_Ppol = mp.one * np.zeros(pow_jndiv[-1] + 1 - (n - 1))
    rmRgN_Bpol[pow_jndiv - (n - 1)] += coe_jndiv
    rmRgN_Bpol[pow_djn - (n - 1)] += coe_djn
    rmRgN_Ppol[pow_jndiv - (n - 1)] += coe_jndiv
    rmRgN_Ppol *= nfac

    rmRgN_Bpol = po.Polynomial(rmRgN_Bpol)
    rmRgN_Ppol = po.Polynomial(rmRgN_Ppol)

    rnImN_Bpol = mp.one * np.zeros(pow_yndiv[-1] + 1 + (n + 2),
                                   dtype=np.complex)
    rnImN_Ppol = mp.one * np.zeros(pow_yndiv[-1] + 1 + (n + 2),
                                   dtype=np.complex)
    pow_yndiv = np.array(pow_yndiv)
    pow_dyn = np.array(pow_dyn)
    rnImN_Bpol[(n + 2) + pow_yndiv] += coe_yndiv
    rnImN_Bpol[(n + 2) + pow_dyn] += coe_dyn
    rnImN_Ppol[(n + 2) + pow_yndiv] += coe_yndiv
    rnImN_Ppol *= nfac

    rnImN_Bpol = po.Polynomial(rnImN_Bpol)
    rnImN_Ppol = po.Polynomial(rnImN_Ppol)

    if plotVectors:
        plot_rmnNpol(n, rmRgN_Bpol.coef, rmRgN_Ppol.coef, kR * 0.01, kR)

    unitrmnBpols = []
    unitrmnPpols = []
    #print(time.time()-ti,'1')
    RgNnorm = mp.sqrt(rmnNnormsqr_Taylor(n, k, R, rmRgN_Bpol, rmRgN_Ppol))
    rmnBpol = rmRgN_Bpol / RgNnorm
    rmnPpol = rmRgN_Ppol / RgNnorm
    unitrmnBpols.append(rmnBpol)
    unitrmnPpols.append(rmnPpol)
    #print(time.time()-ti,'2')
    rmnGBpol, rmnGPpol = rmnGreen_Taylor_Nmn_vec(n, k, R, rmRgN_Bpol,
                                                 rmRgN_Ppol, rnImN_Bpol,
                                                 rnImN_Ppol, rmnBpol, rmnPpol)
    rmnGBpol = rmnGBpol.cutdeg(rmRgN_Bpol.degree())
    rmnGPpol = rmnGPpol.cutdeg(rmRgN_Bpol.degree())
    #print(time.time()-ti,'3')
    prefactpow, Upol = rmnNpol_dot(2 * n - 2, rmnBpol, rmnPpol, rmnGBpol,
                                   rmnGPpol)
    Uinv = mp.matrix([[invchi - kR**prefactpow * po.polyval(kR, Upol) / k**3]])
    unitrmnBpols.append(rmnGBpol)
    unitrmnPpols.append(rmnGPpol)  #set up beginning of Arnoldi iteration
    #print(time.time()-ti,'4')
    b = mp.matrix([mp.one])
    prevUnorm = 1 / Uinv[0, 0]

    i = 1
    while i < veclim:
        speedup_Green_Taylor_Arnoldi_step_RgNmn(n,
                                                k,
                                                R,
                                                invchi,
                                                rmRgN_Bpol,
                                                rmRgN_Ppol,
                                                rnImN_Bpol,
                                                rnImN_Ppol,
                                                unitrmnBpols,
                                                unitrmnPpols,
                                                Uinv,
                                                plotVectors=plotVectors)
        i += 1
        if i == veclim:
            #solve for first column of U and see if its norm has converged
            b.rows = i
            x = mp.lu_solve(Uinv, b)
            Unorm = mp.norm(x)
            if np.abs(prevUnorm - Unorm) > np.abs(Unorm) * Unormtol:
                veclim += delveclim
                prevUnorm = Unorm
            #else: print(x)

    if veclim == 1:
        x = mp.one / Uinv[0, 0]
    """
    #print(x)
    print(mp.norm(x))
    plt.figure()
    plt.matshow(np.abs(np.array((Uinv**-1).tolist(),dtype=np.complex)))
    plt.show()
    #EUdUinv = mpmath.eigh(Uinv.transpose_conj()*Uinv, eigvals_only=True)
    #print(EUdUinv)
    #print(type(EUdUinv))
    """
    #print(time.time()-ti,'5')
    #returns everything necessary to potentially extend size of Uinv matrix later
    return rmRgN_Bpol, rmRgN_Ppol, rnImN_Bpol, rnImN_Ppol, unitrmnBpols, unitrmnPpols, Uinv
Ejemplo n.º 9
0
def speedup_Green_Taylor_Arnoldi_RgMmn_Uconverge(n,
                                                 k,
                                                 R,
                                                 klim=10,
                                                 Taylor_tol=1e-8,
                                                 invchi=0.1,
                                                 Unormtol=1e-4,
                                                 veclim=3,
                                                 delveclim=2,
                                                 plotVectors=False):
    #sets up the Arnoldi matrix and associated unit vector lists for any given RgM

    kR = mp.mpf(k * R)
    pow_jn, coe_jn, pow_yn, coe_yn = get_Taylor_jn_yn(n, kR, klim, Taylor_tol)
    #print(len(pow_jn))
    pow_jn = np.array(pow_jn)
    coe_jn = np.array(coe_jn)
    rmnRgM = mp.one * np.zeros(pow_jn[-1] + 1 - n)
    rmnRgM[pow_jn - n] += coe_jn
    rmnRgM = po.Polynomial(rmnRgM)

    pow_yn = np.array(pow_yn)
    coe_yn = np.array(coe_yn)
    rnImM = mp.one * np.zeros(pow_yn[-1] + 1 + n + 1)
    rnImM[pow_yn + n + 1] += coe_yn
    rnImM = po.Polynomial(rnImM)

    if plotVectors:
        plot_rmnMpol(n, rmnRgM.coef, 0.01 * kR, kR)

    unitrmnMpols = []
    RgMnorm = mp.sqrt(rmnMnormsqr_Taylor(n, k, R, rmnRgM))
    rmnMpol = rmnRgM / RgMnorm
    unitrmnMpols.append(rmnMpol)

    rmnGMpol = rmnGreen_Taylor_Mmn_vec(n, k, R, rmnRgM, rnImM, rmnMpol)
    rmnGMpol = rmnGMpol.cutdeg(rmnRgM.degree())
    prefactpow, Upol = rmnMpol_dot(2 * n, rmnMpol, rmnGMpol)
    Uinv = mp.matrix([[invchi - kR**prefactpow * po.polyval(kR, Upol) / k**3]])
    unitrmnMpols.append(rmnGMpol)  #set up beginning of Arnoldi iteration

    b = mp.matrix([mp.one])
    prevUnorm = 1 / Uinv[0, 0]

    i = 1
    while i < veclim:
        speedup_Green_Taylor_Arnoldi_step_RgMmn(n,
                                                k,
                                                R,
                                                invchi,
                                                rmnRgM,
                                                rnImM,
                                                unitrmnMpols,
                                                Uinv,
                                                plotVectors=plotVectors)
        i += 1
        if i == veclim:
            #solve for first column of U and see if its norm has converged
            b.rows = i
            x = mp.lu_solve(Uinv, b)
            Unorm = mp.norm(x)
            if np.abs(prevUnorm - Unorm) > np.abs(Unorm) * Unormtol:
                veclim += delveclim
                prevUnorm = Unorm
            #else: print(x)

    if veclim == 1:
        x = mp.one / Uinv[0, 0]
    """
    print(x)
    print(mp.norm(x))
    plt.figure()
    plt.matshow(np.abs(np.array((Uinv**-1).tolist(),dtype=np.complex)))
    plt.show()
    #EUdUinv = mpmath.eigh(Uinv.transpose_conj()*Uinv, eigvals_only=True)
    #print(EUdUinv)
    """
    #returns everything necessary to potentially extend size of Uinv matrix later
    return rmnRgM, rnImM, unitrmnMpols, Uinv
Ejemplo n.º 10
0
def shell_Green_grid_Arnoldi_RgandImMmn_Uconverge_mp(n,k,R1,R2, invchi, gridpts=1000, Unormtol=1e-10, veclim=3, delveclim=2, maxveclim=40, plotVectors=False):
    np.seterr(over='raise',under='raise',invalid='raise')
    #for high angular momentum number could have floating point issues; in this case, raise error. Outer method will catch the error and use the mpmath version instead
    rgrid = np.linspace(R1,R2,gridpts)
    rsqrgrid = rgrid**2
    rdiffgrid = np.diff(rgrid)
    """
    RgMgrid = sp.spherical_jn(n, k*rgrid) #the argument for radial part of spherical waves is kr
    ImMgrid = sp.spherical_yn(n, k*rgrid)
    RgMgrid = RgMgrid.astype(np.complex)
    ImMgrid = ImMgrid.astype(np.complex)
    
    RgMgrid = complex_to_mp(RgMgrid)
    ImMgrid = complex_to_mp(ImMgrid)
    """
    RgMgrid = mp_vec_spherical_jn(n, k*rgrid)
    ImMgrid = mp_vec_spherical_yn(n, k*rgrid)
    
    vecRgMgrid = RgMgrid / mp.sqrt(rgrid_Mmn_normsqr(RgMgrid, rsqrgrid,rdiffgrid))
    
    vecImMgrid = ImMgrid - rgrid_Mmn_vdot(vecRgMgrid, ImMgrid, rsqrgrid,rdiffgrid)*vecRgMgrid
    vecImMgrid /= mp.sqrt(rgrid_Mmn_normsqr(vecImMgrid,rsqrgrid,rdiffgrid))
    
    if plotVectors:
        rgrid_Mmn_plot(vecRgMgrid,rgrid)
        rgrid_Mmn_plot(vecImMgrid,rgrid)
    
    unitMvecs = [vecRgMgrid,vecImMgrid]
    
    GvecRgMgrid = shell_Green_grid_Mmn_vec_mp(n,k, rsqrgrid,rdiffgrid, RgMgrid,ImMgrid, vecRgMgrid)
    GvecImMgrid = shell_Green_grid_Mmn_vec_mp(n,k, rsqrgrid,rdiffgrid, RgMgrid,ImMgrid, vecImMgrid)
    Gmat = mp.zeros(2,2)
    Gmat[0,0] = rgrid_Mmn_vdot(vecRgMgrid, GvecRgMgrid, rsqrgrid,rdiffgrid)
    Gmat[0,1] = rgrid_Mmn_vdot(vecRgMgrid, GvecImMgrid, rsqrgrid,rdiffgrid)
    Gmat[1,0] = Gmat[0,1]
    Gmat[1,1] = rgrid_Mmn_vdot(vecImMgrid,GvecImMgrid, rsqrgrid,rdiffgrid)
    Uinv = mp.eye(2)*invchi-Gmat

    unitMvecs.append(GvecRgMgrid)
    unitMvecs.append(GvecImMgrid) #append unorthogonalized, unnormalized Arnoldi vector for further iterations
    
    b = mp.matrix([mp.one])
    prevUnorm = 1 / Uinv[0,0]
    
    i=2
    while i<veclim:
        Gmat = shell_Green_grid_Arnoldi_RgandImMmn_step_mp(n,k,invchi, rgrid,rsqrgrid,rdiffgrid, RgMgrid, ImMgrid, unitMvecs, Gmat, plotVectors=plotVectors)
        i += 1
        print(i)
        if i==maxveclim:
            break
        if i==veclim:
            #solve for first column of U and see if its norm has converged
            Uinv = mp.eye(Gmat.rows)*invchi-Gmat
            b.rows = i
            x = mp.lu_solve(Uinv, b)
            Unorm = mp.norm(x)
            print('Unorm', Unorm, flush=True)
            if np.abs(prevUnorm-Unorm) > np.abs(Unorm)*Unormtol:
                veclim += delveclim
                prevUnorm = Unorm
    
    return rgrid,rsqrgrid,rdiffgrid, RgMgrid, ImMgrid, unitMvecs, Uinv, Gmat
Ejemplo n.º 11
0
    def fit(self, SS, n, constant_bias=1, plot=False):

        k = SS.size - 1

        self.theta = np.array([mpf(0) for i in range(k + 1)])
        if constant_bias is None:
            self.theta[0] = mpf(1)
        else:
            self.theta[0] = mpf(constant_bias)
        self.current_log_likelihood = None

        for iteration in range(config.logpoly.Newton_max_iter):

            if config.logpoly.verbose:
                print('.')
                print('                                 iteration #',
                      iteration)
                sys.stdout.flush()

            if iteration == 0:
                self.current_log_likelihood, self.logZ, roots = _compute_log_likelihood(
                    SS, self.theta, n)

            ## Compute sufficient statistics and constructing the gradient and the Hessian

            if constant_bias is None:
                grad_dimensions = np.arange(k + 1)
            else:
                grad_dimensions = np.arange(1, k + 1)

            #######################

            if config.logpoly.verbose:
                print('compute_Expectations start')
                sys.stdout.flush()

            def func(x):
                return (mpmath.exp(1) *
                        np.ones(x.shape))**(mp_compute_poly(x, self.theta) -
                                            self.logZ)

            tmp = mp_moments(
                func, 2 * k + 1,
                np.unique(np.concatenate([np.array([mpf(0)]), roots])),
                config.logpoly.x_lbound, config.logpoly.x_ubound)
            ESS = mpmath.matrix(tmp[grad_dimensions])

            H = mpmath.matrix(len(grad_dimensions))
            for i in range(len(grad_dimensions)):
                for j in range(len(grad_dimensions)):
                    H[i, j] = tmp[grad_dimensions[i] + grad_dimensions[j]]

            H = -n * (H - (ESS * ESS.transpose()))
            grad = mpmath.matrix(SS[grad_dimensions]) - n * ESS

            if config.logpoly.verbose:
                print('compute_Expectations finish')
                sys.stdout.flush()

            if config.logpoly.verbose:
                print('solve_inversion start')
                sys.stdout.flush()

            delta_theta_subset = mp.lu_solve(H, -grad)
            lambda2 = (grad.transpose() * delta_theta_subset)[0]
            delta_theta_subset = np.array(delta_theta_subset)
            delta_theta = np.array([mpf(0) for _ in range(k + 1)])
            delta_theta[grad_dimensions] = delta_theta_subset

            if config.logpoly.verbose:
                print('solve_inversion finish')
                sys.stdout.flush()

            if config.logpoly.verbose:
                print('current_log_likelihood = ', self.current_log_likelihood)
                print('lambda_2 / 2 = ', lambda2 / 2)
                sys.stdout.flush()

            if lambda2 < 0:
                warnings.warn('lambda_2 < 0')
            if lambda2 / 2 < n * config.logpoly.theta_epsilon:
                if config.logpoly.verbose:
                    print('%')
                    sys.stdout.flush()
                break

            ## Line search
            lam = 1
            alpha = 0.49
            beta = 0.5

            while True:
                tmp_log_likelihood, tmp_logZ, tmp_roots = _compute_log_likelihood(
                    SS, self.theta + lam * delta_theta, n)

                if tmp_log_likelihood < self.current_log_likelihood + alpha * lam * np.inner(
                        grad, delta_theta_subset):
                    if config.logpoly.verbose:
                        print('+', end='')
                        sys.stdout.flush()
                    lam = lam * beta
                else:
                    break

            if tmp_log_likelihood <= self.current_log_likelihood:
                if config.logpoly.verbose:
                    print('*')
                    sys.stdout.flush()
                break

            self.theta = self.theta + lam * delta_theta
            self.current_log_likelihood = tmp_log_likelihood
            self.logZ = tmp_logZ
            roots = tmp_roots