Example #1
0
def minimal_residual(Afun, B, x0=None, par=None):
    fast = par.get('fast')

    res = {'norm_res': [], 'kit': 0}
    if x0 is None:
        x = B * (1. / par['alpha'])
    else:
        x = x0
    x_sol = x  # solution with minimal residuum

    if 'norm' not in par:
        norm = lambda X: X.norm(normal_domain=False)

    residuum = B - Afun(x)
    res['norm_res'].append(norm(residuum))
    beta = Afun(residuum.truncate(tol=par['tol_truncate'], fast=fast))

    M = SparseTensor(kind=x.kind, val=np.ones(x.N.size * [
        3,
    ]), rank=1)  # constant field
    FM = M.fourier().enlarge(x.N)
    minres_fail_counter = 0

    while (res['norm_res'][-1] > par['tol'] and res['kit'] < par['maxiter']):
        res['kit'] += 1

        if par['approx_omega']:
            omega = res['norm_res'][-1] / norm(beta)  # approximate omega
        else:
            omega = beta.inner(residuum) / norm(beta)**2  # exact formula

        x = (x + residuum * omega)

        # setting correct mean
        x = (-FM * x.mean() + x).truncate(rank=par['rank'],
                                          tol=par['tol_truncate'],
                                          fast=fast)

        residuum = B - Afun(x)

        res['norm_res'].append(norm(residuum))

        if res['norm_res'][-1] <= np.min(res['norm_res'][:-1]):
            x_sol = x
        else:
            minres_fail_counter += 1
            if minres_fail_counter >= par['minres_fails']:
                print(
                    'Residuum has risen up {} times -> ending solver.'.format(
                        par['minres_fails']))
                break

        beta = Afun(
            residuum.truncate(tol=min([res['norm_res'][-1] / 1e1, par['tol']]),
                              fast=fast))

    return x_sol, res
Example #2
0
    def test_qtt_fft(self):
        print('\nChecking QTT FFT functions ...')
        L1 = 3
        L2 = 4
        L3 = 5
        tol = 1e-6
        # v=np.random.rand(2**L1,2**L2)
        v = np.array(list(range(1, 2**(L1 + L2 + L3) + 1)))
        v = np.sin(v) / v  # to increase the rank

        v1 = np.reshape(v, (2**L1, 2**L2, 2**L3), order='F')
        # vFFT= DFT.fftnc(v, [2**L1, 2**L2])
        # start = time.clock()
        v1fft = np.fft.fftn(v1) / 2**(L1 + L2 + L3)
        # print("FFT time:     ", (time.clock() - start))

        vq = np.reshape(v, [2] * (L1 + L2 + L3), order='F')  # a quantic tensor
        vqtt = SparseTensor(kind='tt', val=vq)  # a qtt

        # start = time.clock()
        vqf = vqtt.qtt_fft([L1, L2, L3], tol=tol)
        # print("QTT_FFT time: ", (time.clock() - start))

        vqf_full = vqf.full().reshape((2**L3, 2**L2, 2**L1), order='F')

        print("discrepancy:  ", norm(vqf_full.T - v1fft) / norm(v1fft))
        print("maximum rank of the qtt is:", np.max(vqtt.r))

        self.assertTrue(norm(vqf_full.T - v1fft) / norm(v1fft) < 3 * tol)

        #        qtt_fft_time= timeit.timeit('vqf= vqtt.fourier() ', number=50,
        #              setup="from ffthompy.tensorsLowRank.objects import SparseTensor; import numpy as np; L1=9; L2=8; L3=7; tol=1e-6; v1=np.array(range(1,2**(L1+L2+L3)+1));  v1=np.sin(v1)/v1; vq= np.reshape(v1,[2]*(L1+L2+L3),order='F'); vqtt= SparseTensor(kind='tt', val=vq )")
        #        print("QTT FFT time:",qtt_fft_time)

        tt_fft_time = timeit.timeit(
            'v1f= v1tt.fourier()',
            number=10,
            setup=
            "from ffthompy.tensorsLowRank.objects import SparseTensor; import numpy as np; L1=9; L2=8; L3=7; v1=np.array(range(1,2**(L1+L2+L3)+1)); v1=np.sin(v1)/v1; v1tt= SparseTensor(kind='tt', val=v1,eps=1e-6 )"
        )
        print("  TT FFT time:", tt_fft_time)

        qtt_fft_time = timeit.timeit(
            'vqf= vqtt.qtt_fft( [L1,L2,L3],tol= tol) ',
            number=10,
            setup=
            "from ffthompy.tensorsLowRank.objects import SparseTensor; import numpy as np; L1=9; L2=8; L3=7; tol=1e-6; v1=np.array(range(1,2**(L1+L2+L3)+1)); v1=np.sin(v1)/v1; vq= np.reshape(v1,[2]*(L1+L2+L3),order='F'); vqtt= SparseTensor(kind='tt', val=vq )"
        )
        print("QTT FFT time:", qtt_fft_time)

        self.assertTrue(qtt_fft_time < 0.1 * tt_fft_time)

        print('...ok')
Example #3
0
    def test_tucker(self):
        print('\nChecking tucker ...')

        a = SparseTensor(kind='tucker', val=self.T3d)
        self.assertAlmostEqual(norm(a.full() - self.T3d), 0)

        b = SparseTensor(kind='tucker', val=self.T3dOther)

        self.assertAlmostEqual(norm((a + b).full() - self.T3d - self.T3dOther),
                               0)
        self.assertAlmostEqual(norm((a * b).full() - self.T3d * self.T3dOther),
                               0)
        print('...ok')
Example #4
0
def get_preconditioner_sparse(N, pars):
    hGrad = grad_tensor(N, pars.Y, fft_form='c')
    k2 = np.einsum('i...,i...', hGrad.val, np.conj(hGrad.val)).real
    k2[mean_index(N, fft_form='c')] = 1.
    Prank = np.min([10, N[0] - 1])
    val = 1. / k2
    Ps = SparseTensor(name='Ps',
                      kind=pars.kind,
                      val=val,
                      rank=Prank,
                      Fourier=True,
                      fft_form='c')
    Ps.set_fft_form()
    return Ps
Example #5
0
    def test_canoTensor(self):
        print('\nChecking canonical tensor...')

        u1, s1, vt1 = np.linalg.svd(self.T2d, full_matrices=0)
        a = CanoTensor(name='a', core=s1, basis=[u1.T, vt1])
        self.assertAlmostEqual(norm(a.full() - self.T2d), 0)

        a = SparseTensor(kind='cano', val=self.T2d)
        self.assertAlmostEqual(norm(a.full() - self.T2d), 0)

        b = SparseTensor(kind='cano', val=self.T2dOther)

        self.assertAlmostEqual(norm((a + b).full() - self.T2d - self.T2dOther),
                               0)
        self.assertAlmostEqual(norm((a * b).full() - self.T2d * self.T2dOther),
                               0)

        print('...ok')
Example #6
0
def richardson(Afun, B, x0=None, rank=None, tol=None, par=None, norm=None):
    if isinstance(par['alpha'], float):
        omega = 1. / par['alpha']
    else:
        raise ValueError()
    res = {'norm_res': [], 'kit': 0}
    if x0 is None:
        x = B * omega
    else:
        x = x0

    if norm is None:
        norm = lambda X: X.norm()

    res['norm_res'].append(norm(B))

    M = SparseTensor(kind=x.kind, val=np.ones(x.N.size * [
        3,
    ]), rank=1)  # constant field
    FM = M.fourier().enlarge(x.N)

    norm_res = 1e15
    while (norm_res > par['tol'] and res['kit'] < par['maxiter']):
        res['kit'] += 1
        residuum = B - Afun(x)
        norm_res = norm(residuum)
        if par['divcrit'] and norm_res > res['norm_res'][res['kit'] - 1]:
            break

        x = (x + residuum * omega)
        x = (-FM * x.mean() + x).truncate(rank=rank, tol=tol,
                                          fast=True)  # setting correct mean

        res['norm_res'].append(norm_res)

    return x, res
Example #7
0
def grad_tensor(N, Y, kind='TensorTrain'):
    assert(kind.lower() in ['cano','canotensor','tucker','tt','tensortrain'])

    dim=Y.size
    freq=Grid.get_xil(N, Y, fft_form='c')
    hGrad_s=[]

    for ii in range(dim):
        basis=[]
        for jj in range(dim):
            if ii==jj:
                basis.append(np.atleast_2d(freq[jj]*2*np.pi*1j))
            else:
                basis.append(np.atleast_2d(np.ones(N[jj])))

        if kind.lower() in ['cano', 'canotensor','tucker']:
            hGrad_s.append(SparseTensor(kind=kind, name='hGrad({})'.format(ii), core=np.array([1.]),
                                        basis=basis, Fourier=True, fft_form='c').set_fft_form())
        elif kind.lower() in ['tt','tensortrain']:
            cl = [bas.reshape((1,-1,1)) for bas in basis]
            hGrad_s.append(SparseTensor(kind=kind, core=cl, name='hGrad({})'.format(ii),
                                        Fourier=True, fft_form='c').set_fft_form())

    return hGrad_s
Example #8
0
    def test_orthogonalise(self):
        print('\nChecking orthogonalization functions ...')

        a = SparseTensor(kind='cano', val=self.T2d)
        b = SparseTensor(kind='cano', val=self.T2dOther)
        c = a + b
        co = c.orthogonalise()
        for i in range(co.order):
            I = np.eye(co.N[i])
            self.assertAlmostEqual(
                np.dot(co.basis[i], co.basis[i].T).any(), I.any())

        a = SparseTensor(kind='tucker', val=self.T3d)
        b = SparseTensor(kind='tucker', val=self.T3dOther)
        c = a + b
        co = c.orthogonalise()
        for i in range(co.order):
            I = np.eye(co.N[i])
            self.assertAlmostEqual(
                np.dot(co.basis[i], co.basis[i].T).any(), I.any())

        a = SparseTensor(kind='tt', val=self.T3d)
        b = SparseTensor(kind='tt', val=self.T3dOther)
        c = a + b
        co = c.orthogonalise(direction='lr')
        cr = co.to_list(co)
        for i in range(co.d):
            cr[i] = np.reshape(cr[i], (-1, co.r[i + 1]))
            I = np.eye(co.N[i])
            self.assertAlmostEqual(np.dot(cr[i].T, cr[i]).any(), I.any())

        co = c.orthogonalise(direction='rl')
        cr = co.to_list(co)
        for i in range(co.d):
            cr[i] = np.reshape(cr[i], (co.r[i], -1))
            I = np.eye(co.N[i])
            self.assertAlmostEqual(np.dot(cr[i], cr[i].T).any(), I.any())

        aSubTrain = c.tt_chunk(0, 1)
        co, ru = aSubTrain.orthogonalise(direction='rl', r_output=True)
        cr = co.to_list(co)
        for i in range(co.d):
            cr[i] = np.reshape(cr[i], (co.r[i], -1))
            I = np.eye(co.N[i])
            self.assertAlmostEqual(np.dot(cr[i], cr[i].T).any(), I.any())

        print('...ok')
Example #9
0
    def test_Fourier_truncation(self):
        print('\nChecking TT truncation in Fourier domain ...')
        N = np.random.randint(20, 50, size=3)
        a = np.arange(1, np.prod(N) + 1).reshape(N)
        cases = [[None] * 2, [None] * 2]
        # first a random test case
        cases[0] = [np.random.random(N), np.random.random(N)]
        # this produces a "smooth", more realistic, tensor with modest rank
        cases[1] = [np.sin(a) / a, np.exp(np.sin(a) / a)]

        for i in range(len(cases)):
            for fft_form in [0, 'c', 'sr']:

                a = cases[i][0]
                b = cases[i][1]
                ta = SparseTensor(
                    kind='tt', val=a, fft_form=fft_form
                )  # Fourier truncation works the best with option 'sr'
                tb = SparseTensor(kind='tt', val=b, fft_form=fft_form)
                tc = ta + tb
                k = tc.r[1:-1].max() / 2 - 5

                tct = tc.truncate(rank=k)

                err_normal_truncate = (tct - tc).norm()
                #                print("loss in normal  domain truncation:",norm(tct.full().val-(a+b) ))

                taf = ta.fourier()
                tbf = tb.fourier()
                tcf = taf + tbf
                tcft = tcf.truncate(rank=k)
                tcfti = tcft.fourier()
                #                print("norm of imag part of F inverse tensor",norm(tcfti.full().val.imag))

                err_Fourier_truncate = (tcfti - tc).norm()
                #                print("loss in Fourier domain truncation:",norm(tcfti.full().val-(a+b) ))

                # assert the two truncation errors are in the same order
                self.assertAlmostEqual(err_normal_truncate,
                                       err_Fourier_truncate,
                                       delta=err_normal_truncate * 3)

        print('...ok')
Example #10
0
def cheby2TERM(Afun, B, x0=None, par={}, callback=None):
    """
    Chebyshev two-term iterative solver

    Parameters
    ----------
    Afun : a function, represnting linear function A in the system Ax =B
    B : tensorsLowRank tensor representing vector B in the right-hand side of linear system
    x0 : tensorsLowRank tensor representing initial approximation of solution of linear system
    par : dict
          parameters of the method
    callback :

    Returns
    -------
    x : resulting unknown vector
    res : dict
        results
    """

    if 'tol' not in par:
        par['tol'] = 1e-06
    if 'maxiter' not in par:
        par['maxiter'] = 1e7
    if 'eigrange' not in par:
        raise NotImplementedError("It is necessary to calculate eigenvalues.")
    else:
        Egv = par['eigrange']

    res = {'norm_res': [], 'kit': 0}

    bnrm2 = B.norm()
    Ib = 1.0 / bnrm2
    if bnrm2 == 0:
        bnrm2 = 1.0

    if x0 is None:
        x = B
    else:
        x = x0

    r = B - Afun(x)
    r0 = r.norm()
    res['norm_res'].append(Ib * r0)  # For Normal Residue

    if res['norm_res'][-1] < par['tol']:  # if errnorm is less than tol
        return x, res

    M = SparseTensor(kind=x.kind, val=np.ones(x.N.size * [
        3,
    ]), rank=1)  # constant field
    FM = M.fourier().enlarge(x.N)

    d = (Egv[1] + Egv[0]) / 2.0  # np.mean(par['eigrange'])
    c = (Egv[1] - Egv[0]) / 2.0  # par['eigrange'][1] - d
    v = x * 0.0
    while (res['norm_res'][-1] > par['tol']) and (res['kit'] < par['maxiter']):
        res['kit'] += 1
        x_prev = x
        if res['kit'] == 1:
            p = 0
            w = 1 / d
        elif res['kit'] == 2:
            p = -(1 / 2) * (c / d) * (c / d)
            w = 1 / (d - c * c / 2 / d)
        else:
            p = -(c * c / 4) * w * w
            w = 1 / (d - c * c * w / 4)
        v = (r - p * v).truncate(rank=par['rank'], tol=par['tol_truncate'])
        x = (x_prev + w * v)
        x = (-FM * x.mean() + x).truncate(
            rank=par['tol'], tol=par['tol_truncate'])  # setting correct mean
        r = B - Afun(x)

        res['norm_res'].append((1.0 / r0) * r.norm())

        if callback is not None:
            callback(x)

    if par['tol'] < res['norm_res']:  # if tolerance is less than error norm
        print("Chebyshev solver does not converges!")
    else:
        print("Chebyshev solver converges.")

    if res['kit'] == 0:
        res['norm_res'] = 0
    return x, res
Example #11
0
def minimal_residual_debug(Afun, B, x0=None, par=None):
    fast = par.get('fast')

    M = SparseTensor(kind=B.kind, val=np.ones(B.N.size * [
        3,
    ]), rank=1)  # constant field
    FM = M.fourier().enlarge(B.N)

    res = {'norm_res': [], 'kit': 0}
    if x0 is None:
        x = B * (1. / par['alpha'])
    else:
        x = x0

    if 'norm' not in par:
        norm = lambda X: X.norm(normal_domain=False)

    residuum = (B - Afun(x)).truncate(rank=None, tol=par['tol'], fast=fast)
    res['norm_res'].append(norm(residuum))
    beta = Afun(residuum)

    norm_res = res['norm_res'][res['kit']]

    while (norm_res > par['tol'] and res['kit'] < par['maxiter']):
        res['kit'] += 1
        print('iteration = {}'.format(res['kit']))

        if par['approx_omega']:
            omega = norm_res / norm(beta)  # approximate omega
        else:
            omega = beta.inner(residuum) / norm(beta)**2  # exact formula

        x = (x + residuum * omega)
        x = (-FM * x.mean() + x).truncate(
            rank=par['rank'], tol=par['tol'])  # setting correct mean

        tic = Timer('compute residuum')
        residuum = (B - Afun(x))
        #         residuum=residuum.truncate(rank=2*rank, tol=tol)
        #         residuum=(B-Afun(x)).truncate(rank=rank, tol=tol)
        #         residuum=(B-Afun(x))
        tic.measure()
        tic = Timer('residuum norm')
        norm_res = norm(residuum)
        tic.measure()
        if par['divcrit'] and norm_res > res['norm_res'][-1]:
            break
        res['norm_res'].append(norm_res)

        tic = Timer('truncate residuum')
        #         residuum_for_beta=residuum.truncate(rank=rank, tol=tol)
        #         residuum_for_beta=residuum.truncate(rank=None, tol=1-4)
        tol = min([norm_res / 1e1, par['tol']])
        residuum_for_beta = residuum.truncate(rank=None, tol=tol, fast=fast)
        tic.measure()
        print('tolerance={}, rank={}'.format(tol, residuum_for_beta.r))
        print('residuum_for_beta.r={}'.format(residuum_for_beta.r))
        tic = Timer('compute beta')
        beta = Afun(residuum_for_beta)
        tic.measure()
        pass
    return x, res
Example #12
0
    def test_mean(self):
        print('\nChecking method mean() ...')
        a = SparseTensor(kind='cano', val=self.T2d)
        self.assertAlmostEqual(np.mean(self.T2d), a.mean())
        self.assertAlmostEqual(np.mean(self.T2d), a.fourier().mean())

        a = SparseTensor(kind='tucker', val=self.T3d)
        self.assertAlmostEqual(np.mean(self.T3d), a.mean())
        self.assertAlmostEqual(np.mean(self.T3d), a.fourier().mean())

        a = SparseTensor(kind='tt', val=self.T3d)
        self.assertAlmostEqual(np.mean(self.T3d), a.mean())
        self.assertAlmostEqual(np.mean(self.T3d), a.fourier().mean())
        print('...ok')
Example #13
0
    def test_Fourier(self):
        print('\nChecking Fourier functions ...')

        for opt in [0, 'c']:

            a = SparseTensor(kind='cano', val=self.T2d, fft_form=opt)
            T = Tensor(val=self.T2d,
                       order=0,
                       N=self.T2d.shape,
                       Fourier=False,
                       fft_form=opt)
            self.assertAlmostEqual(
                norm(a.fourier().full(fft_form=opt).val -
                     T.fourier(copy=True).val), 0)
            self.assertEqual(
                norm(a.fourier().fourier(real_output=True).full().val.imag), 0)

            a = SparseTensor(kind='tucker', val=self.T3d, fft_form=opt)
            T = Tensor(val=self.T3d,
                       order=0,
                       N=self.T3d.shape,
                       Fourier=False,
                       fft_form=opt)
            self.assertAlmostEqual(
                norm(a.fourier().full(fft_form=opt) - T.fourier(copy=True)), 0)
            self.assertEqual(
                norm(a.fourier().fourier(real_output=True).full().val.imag), 0)

            a = SparseTensor(kind='tt', val=self.T3d, fft_form=opt)
            T = Tensor(val=self.T3d,
                       order=0,
                       N=self.T3d.shape,
                       Fourier=False,
                       fft_form=opt)
            self.assertAlmostEqual(
                norm(a.fourier().full(fft_form=opt) -
                     T.fourier(copy=True).val), 0)
            self.assertEqual(
                norm(a.fourier().fourier(real_output=True).full().val.imag), 0)
        # checking shifting fft_forms
        sparse_opt = 'sr'
        for full_opt in [0, 'c']:

            a = SparseTensor(kind='cano', val=self.T2d, fft_form=sparse_opt)
            T = Tensor(val=self.T2d,
                       order=0,
                       N=self.T2d.shape,
                       Fourier=False,
                       fft_form=full_opt)
            self.assertAlmostEqual(
                norm(a.fourier().full(fft_form=full_opt) -
                     T.fourier(copy=True)), 0)
            self.assertAlmostEqual((a.fourier().set_fft_form(full_opt) -
                                    a.set_fft_form(full_opt).fourier()).norm(),
                                   0)

            a = SparseTensor(kind='tucker', val=self.T3d, fft_form=sparse_opt)
            T = Tensor(val=self.T3d,
                       order=0,
                       N=self.T3d.shape,
                       Fourier=False,
                       fft_form=full_opt)
            self.assertAlmostEqual(
                norm(a.fourier().full(fft_form=full_opt) -
                     T.fourier(copy=True)), 0)
            self.assertAlmostEqual((a.fourier().set_fft_form(full_opt) -
                                    a.set_fft_form(full_opt).fourier()).norm(),
                                   0)

            a = SparseTensor(kind='tt', val=self.T3d, fft_form=sparse_opt)
            T = Tensor(val=self.T3d,
                       order=0,
                       N=self.T3d.shape,
                       Fourier=False,
                       fft_form=full_opt)
            self.assertAlmostEqual(
                norm(a.fourier().full(fft_form=full_opt) -
                     T.fourier(copy=True)), 0)
            self.assertAlmostEqual((a.fourier().set_fft_form(full_opt) -
                                    a.set_fft_form(full_opt).fourier()).norm(),
                                   0)

        print('...ok')
Example #14
0
def homog_GaNi_sparse(Aganis, Agas, pars):
    debug = getattr(pars, 'debug', False)

    N = Aganis.N
    dim = N.__len__()
    hGrad_s = sgrad_tensor(N, pars.Y, kind=pars.kind)

    Aniso = getattr(pars, 'Aniso', np.zeros([dim, dim]))

    # creating constant field in tensorsLowRank tensor
    Es = SparseTensor(name='E',
                      kind=pars.kind,
                      val=np.ones(dim * (3, )),
                      rank=1)
    Es = Es.fourier().enlarge(N).fourier()

    material_law = Material_law(Aganis, Aniso, Es)

    def DFAFGfun_s(X, rank=None, tol=None, fast=False):  # linear operator
        assert (X.Fourier)
        FGX = [(hGrad_s[ii] * X).fourier() for ii in range(dim)]
        AFGFx = material_law(FGX, rank=rank, tol=tol, fast=fast)
        # or in following: Fourier, reduce, truncate
        FAFGFx = [AFGFx[ii].fourier() for ii in range(dim)]
        GFAFGFx = hGrad_s[0] * FAFGFx[0]  # div
        for ii in range(1, dim):
            GFAFGFx += hGrad_s[ii] * FAFGFx[ii]
        GFAFGFx = GFAFGFx.truncate(rank=rank, tol=tol, fast=fast)
        GFAFGFx.name = 'fun(x)'
        return -GFAFGFx

    # R.H.S.
    Bs = hGrad_s[0] * (Aganis * Es).fourier()  # minus from B and from div
    Ps = get_preconditioner_sparse(N, pars)

    def PDFAFGfun_s(Fx,
                    rank=pars.solver['rank'],
                    tol=pars.solver['tol_truncate'],
                    fast=pars.solver['fast']):
        R = DFAFGfun_s(Fx, rank=rank, tol=tol, fast=fast)
        R = Ps * R
        R = R.truncate(rank=rank, tol=tol, fast=fast)
        return R

    PBs = Ps * Bs
    PBs2 = PBs.truncate(tol=pars.rhs_tol, fast=False)
    if debug:
        print('r.h.s. norm = {}; error={}; rank={}'.format(
            np.linalg.norm(PBs.full().val),
            np.linalg.norm(PBs.full().val - PBs2.full().val), PBs2.r))

    PBs = PBs2

    tic = Timer(name=pars.solver['method'])
    Fu, ress = linear_solver_lowrank(pars.solver['method'],
                                     Afun=PDFAFGfun_s,
                                     B=PBs,
                                     par=pars.solver)
    tic.measure()

    print('iterations of solver={}'.format(ress['kit']))
    print('norm of residuum={}'.format(ress['norm_res'][-1]))
    Fu.name = 'Fu'
    print('norm(resP)={}'.format(np.linalg.norm(
        (PBs - PDFAFGfun_s(Fu)).full())))

    if Agas is None:  # GaNi homogenised properties
        print('!!!!! homogenised properties are GaNi only !!!!!')
        FGX = [(hGrad_s[ii] * Fu).fourier() for ii in range(dim)]
        FGX[0] += Es  # adding mean
        AH = calculate_AH_sparse(Aganis, Aniso, FGX, method='full')
    else:
        Nbar = 2 * np.array(N) - 1
        FGX = [((hGrad_s[ii] * Fu).enlarge(Nbar)).fourier()
               for ii in range(dim)]
        Es = SparseTensor(kind=pars.kind, val=np.ones(Nbar), rank=1)
        FGX[0] += Es  # adding mean
        AH = calculate_AH_sparse(Agas, Aniso, FGX, method='full')

    return Struct(AH=AH, e=FGX, solver=ress, Fu=Fu, time=tic.vals[0][0])