Exemplo n.º 1
0
def wavelet_tensors(request):
    """Returns the Hamiltonian and MERA tensors for the D=2 wavelet MERA.

  From Evenbly & White, Phys. Rev. Lett. 116, 140403 (2016).
  """
    D = 2
    h = simple_mera.ham_ising()

    E = np.array([[1, 0], [0, 1]])
    X = np.array([[0, 1], [1, 0]])
    Y = np.array([[0, -1j], [1j, 0]])
    Z = np.array([[1, 0], [0, -1]])

    wmat_un = np.real((np.sqrt(3) + np.sqrt(2)) / 4 * np.kron(E, E) +
                      (np.sqrt(3) - np.sqrt(2)) / 4 * np.kron(Z, Z) + 1.j *
                      (1 + np.sqrt(2)) / 4 * np.kron(X, Y) + 1.j *
                      (1 - np.sqrt(2)) / 4 * np.kron(Y, X))

    umat = np.real((np.sqrt(3) + 2) / 4 * np.kron(E, E) +
                   (np.sqrt(3) - 2) / 4 * np.kron(Z, Z) +
                   1.j / 4 * np.kron(X, Y) + 1.j / 4 * np.kron(Y, X))

    w = np.reshape(wmat_un, (D, D, D, D))[:, 0, :, :]
    u = np.reshape(umat, (D, D, D, D))

    w = np.transpose(w, [1, 2, 0])
    u = np.transpose(u, [2, 3, 0, 1])

    return tuple(x.astype(np.complex128) for x in (h, w, u))
Exemplo n.º 2
0
    def trainsplit(self, ntrain=1000, tt=4000):
        x_inp_real = np.real(self.denMO)[:, self.rnzl[0], self.rnzl[1]]
        x_inp_imag = np.imag(self.denMO)[:, self.inzl[0], self.inzl[1]]
        self.x_inp = np.hstack([x_inp_real, x_inp_imag])

        self.offset = 2
        self.tt = tt
        self.ntrain = ntrain
        self.x_inp = self.x_inp[self.offset:(self.tt + self.offset), :]

        self.dt = 0.08268
        self.tint_whole = np.arange(self.x_inp.shape[0]) * self.dt

        # training set
        self.x_inp_train = self.x_inp[:ntrain, :]
        self.tint = self.tint_whole[:ntrain]

        # validation set
        self.x_inp_valid = self.x_inp[ntrain:, :]
        self.tint_valid = self.tint_whole[ntrain:]

        # adding field commutator terms
        hpcommute_real = np.real(self.eftraincommuteMOflat)
        hpcommute_imag = np.imag(self.eftraincommuteMOflat)
        self.hpcommute_train = np.hstack([hpcommute_real, hpcommute_imag])
        self.hpcommute_train_loss = self.hpcommute_train[1:(self.ntrain -
                                                            1), :]

        # show that we got here
        return True
Exemplo n.º 3
0
    def _egrad_to_rgrad(self):
        """Checking egrad_to_rgrad method.
        1) rgrad is in the tangent space of a manifold's point
        2) <v1 egrad> = <v1 rgrad>_m (matching between egrad and rgrad)

        Args:

        Returns:
            list with two tf scalars that give maximum
            violation of two conditions
        """

        # vector that plays the role of a gradient
        xi = random.normal(self.key, (*self.u.shape, 2), dtype=jnp.float64)
        xi = xi[..., 0] + 1j * xi[..., 1]


        # rgrad
        rgrad = self.m.egrad_to_rgrad(self.u, xi)

        err1 = rgrad - self.m.proj(self.u, rgrad)
        err1 = jnp.real(jnp.linalg.norm(err1, axis=(-2, -1)))

        err2 = (self.v1.conj() * xi).sum(axis=(-2, -1)) - self.m.inner(self.u, self.v1, rgrad)[..., 0, 0]

        err2 = jnp.abs(jnp.real(err2))

        err1 = err1.max()
        err2 = err2.max()
        return err1, err2
Exemplo n.º 4
0
    def body_fun(state: LBFGSResults):
        # find search direction
        p_k = _two_loop_recursion(state)

        # line search
        ls_results = line_search(
            f=fun,
            xk=state.x_k,
            pk=p_k,
            old_fval=state.f_k,
            gfk=state.g_k,
            maxiter=maxls,
        )

        # evaluate at next iterate
        s_k = ls_results.a_k * p_k
        x_kp1 = state.x_k + s_k
        f_kp1 = ls_results.f_k
        g_kp1 = ls_results.g_k
        y_k = g_kp1 - state.g_k
        rho_k_inv = jnp.real(_dot(y_k, s_k))
        rho_k = jnp.reciprocal(rho_k_inv)
        gamma = rho_k_inv / jnp.real(_dot(jnp.conj(y_k), y_k))

        # replacements for next iteration
        status = 0
        status = jnp.where(state.f_k - f_kp1 < ftol, 4, status)
        status = jnp.where(state.ngev >= maxgrad, 3, status)  # type: ignore
        status = jnp.where(state.nfev >= maxfun, 2, status)  # type: ignore
        status = jnp.where(state.k >= maxiter, 1, status)  # type: ignore
        status = jnp.where(ls_results.failed, 5, status)

        converged = jnp.linalg.norm(g_kp1, ord=norm) < gtol

        # TODO(jakevdp): use a fixed-point procedure rather than type-casting?
        state = state._replace(
            converged=converged,
            failed=(status > 0) & (~converged),
            k=state.k + 1,
            nfev=state.nfev + ls_results.nfev,
            ngev=state.ngev + ls_results.ngev,
            x_k=x_kp1.astype(state.x_k.dtype),
            f_k=f_kp1.astype(state.f_k.dtype),
            g_k=g_kp1.astype(state.g_k.dtype),
            s_history=_update_history_vectors(history=state.s_history,
                                              new=s_k),
            y_history=_update_history_vectors(history=state.y_history,
                                              new=y_k),
            rho_history=_update_history_scalars(history=state.rho_history,
                                                new=rho_k),
            gamma=gamma,
            status=jnp.where(converged, 0, status),
            ls_status=ls_results.status,
        )

        return state
Exemplo n.º 5
0
    def testSvdWithRectangularInput(self, m, n, log_cond, full_matrices):
        """Tests SVD with rectangular input."""
        with jax.default_matmul_precision('float32'):
            a = np.random.uniform(low=0.3, high=0.9,
                                  size=(m, n)).astype(_SVD_TEST_DTYPE)
            u, s, v = osp_linalg.svd(a, full_matrices=False)
            cond = 10**log_cond
            s = jnp.linspace(cond, 1, min(m, n))
            a = (u * s) @ v
            a = a.astype(complex) * (1 + 1j)

            osp_linalg_fn = functools.partial(osp_linalg.svd,
                                              full_matrices=full_matrices)
            actual_u, actual_s, actual_v = svd.svd(a,
                                                   full_matrices=full_matrices)

            k = min(m, n)
            if m > n:
                unitary_u = jnp.real(actual_u.T.conj() @ actual_u)
                unitary_v = jnp.real(actual_v.T.conj() @ actual_v)
                unitary_u_size = m if full_matrices else k
                unitary_v_size = k
            else:
                unitary_u = jnp.real(actual_u @ actual_u.T.conj())
                unitary_v = jnp.real(actual_v @ actual_v.T.conj())
                unitary_u_size = k
                unitary_v_size = n if full_matrices else k

            _, expected_s, _ = osp_linalg_fn(a)

            svd_fn = lambda a: svd.svd(a, full_matrices=full_matrices)
            args_maker = lambda: [a]

            with self.subTest('Test JIT compatibility'):
                self._CompileAndCheck(svd_fn, args_maker)

            with self.subTest('Test unitary u.'):
                self.assertAllClose(np.eye(unitary_u_size),
                                    unitary_u,
                                    rtol=_SVD_RTOL,
                                    atol=2E-3)

            with self.subTest('Test unitary v.'):
                self.assertAllClose(np.eye(unitary_v_size),
                                    unitary_v,
                                    rtol=_SVD_RTOL,
                                    atol=2E-3)

            with self.subTest('Test s.'):
                self.assertAllClose(expected_s,
                                    jnp.real(actual_s),
                                    rtol=_SVD_RTOL,
                                    atol=1E-6)
Exemplo n.º 6
0
def pinv(model: SpectralSobolev1Fit):
    ns = model.exponents
    A = vander_builder(model.grid, ns)(model.mesh)
    B = vandergrad_builder(model.grid, ns)(model.mesh)
    I = np.ones((np.size(A, 0), 1))
    O = np.zeros((np.size(B, 0), 1))
    #
    if model.is_periodic:
        U = np.hstack((I, np.real(A), np.imag(A)))
        V = np.hstack((O, np.imag(B), np.real(B)))
    else:
        U = np.hstack((I, A))
        V = np.hstack((O, B))
    #
    return np.linalg.pinv(np.vstack((U, V)))
Exemplo n.º 7
0
def get_multi_probe_spect(fs, fname='test_spect.mat', ground_truth=True):

    fs = numpy.array(fs)
    if ground_truth:
        aa = sio.loadmat(fname)
        ideal_spect = np.real(aa['spect'])
        fs_ideal = numpy.linspace(0, 101, numpy.max(ideal_spect.shape))

        #     if probe_loc != 0:
        #         key_str = 'spect_'+str(probe_loc)
        #         ideal_spect = aa[key_str]
        #     else:
        #         key_str = 'spect'
        #         ideal_spect = aa[key_str][:, gabor_inds]

        target_PS = np.array(
            [numpy.interp(fs, fs_ideal, idl_ps) for idl_ps in ideal_spect.T]).T
    else:
        con_inds = numpy.arange(9)
        contrasts = numpy.array([0, 25, 50, 100, 100])
        target_PS = numpy.zeros((len(fs), len(con_inds)))
        ind = 0
        for cc in con_inds:
            if cc < len(contrasts):
                if cc == 0:
                    target_PS[:, cc] = con_spect(fs, contrasts[cc]) - 10
                else:
                    target_PS[:, cc] = con_spect(fs, contrasts[cc])
            else:
                target_PS[:, cc] = maun_spect(fs, cc - (len(contrasts) - 1))

    return target_PS
Exemplo n.º 8
0
def loss_spect_nonzero_contrasts(fs, spect, MULTI=False):
    '''
    MSE loss quantifying match of power spectra across
    contrasts [0, 25, 50, 100] with target spectra 
    '''
    epsilon = 0.0045  # a regularization term, this is the unnormalized value of the first target PS

    if MULTI:
        target_spect = np.array(
            get_multi_probe_spect(fs,
                                  probe_loc=0,
                                  gabor_inds=-1,
                                  norm=False,
                                  fname='test_spect.mat'))
    else:
        target_spect = np.array(
            get_target_spect(fs, ground_truth=True, norm=False))

    BS = target_spect[:, 0]
    target_spect = target_spect[:, 1:] - BS[:, None]
    target_spect = target_spect / np.mean(target_spect)

    BS = spect[:, 0]
    spect = np.real(spect[:, 1:] - BS[:, None])
    spect = spect / (np.mean(spect) + epsilon)

    spect_loss = np.mean((target_spect - spect)**2)  #MSE
    return spect_loss
Exemplo n.º 9
0
def stable_svd_jvp(primals, tangents):
    """Copied from the JAX source code and slightly tweaked for stability"""
    # Deformation parameter which yields regular SVD JVP rule when set to 0
    eps = 1e-10
    A, = primals
    dA, = tangents
    U, s, Vt = jnp.linalg.svd(A, full_matrices=False, compute_uv=True)

    _T = lambda x: jnp.swapaxes(x, -1, -2)
    _H = lambda x: jnp.conj(_T(x))
    k = s.shape[-1]
    Ut, V = _H(U), _H(Vt)
    s_dim = s[..., None, :]
    dS = jnp.matmul(jnp.matmul(Ut, dA), V)
    ds = jnp.real(jnp.diagonal(dS, 0, -2, -1))

    # Deformation by eps avoids getting NaN's when SV's are degenerate
    f = jnp.square(s_dim) - jnp.square(_T(s_dim)) + jnp.eye(k)
    f = f + eps / f  # eps controls stability
    F = 1 / f - jnp.eye(k) / (1 + eps)

    dSS = s_dim * dS
    SdS = _T(s_dim) * dS
    dU = jnp.matmul(U, F * (dSS + _T(dSS)))
    dV = jnp.matmul(V, F * (SdS + _T(SdS)))

    m, n = A.shape[-2], A.shape[-1]
    if m > n:
        dU = dU + jnp.matmul(
            jnp.eye(m) - jnp.matmul(U, Ut), jnp.matmul(dA, V)) / s_dim
    if n > m:
        dV = dV + jnp.matmul(
            jnp.eye(n) - jnp.matmul(V, Vt), jnp.matmul(_H(dA), U)) / s_dim
    return (U, s, Vt), (dU, ds, _T(dV))
Exemplo n.º 10
0
    def solve(self, Eloc, gradients, p=None):

        # Get TDVP equation from MC data
        self.S, F, Fdata = self.get_tdvp_equation(Eloc, gradients, p)

        # Transform TDVP equation to eigenbasis
        self.transform_to_eigenbasis(self.S, F, Fdata)

        # Discard eigenvalues below numerical precision
        #self.invEv = jnp.where(jnp.abs(self.ev / self.ev[-1]) > self.svdTol, 1./self.ev, 0.)
        self.invEv = jnp.where(
            jnp.abs(self.ev / self.ev[-1]) > 1e-14, 1. / self.ev, 0.)
        regularizer = 1. / (1. +
                            (self.svdTol / jnp.abs(self.ev / self.ev[-1]))**6)

        if p is None:
            # Construct a soft cutoff based on the SNR
            regularizer *= 1. / (1. + (self.snrTol /
                                       (0.5 * (self.snr + self.snr[::-1])))**6)
        #else:
        #    regularizer = jnp.ones(len(self.invEv))

        update = jnp.real(
            jnp.dot(self.V, (self.invEv * regularizer * self.VtF)))

        return update, jnp.linalg.norm(self.S.dot(update) -
                                       F) / jnp.linalg.norm(F)
Exemplo n.º 11
0
    def get_tdvp_equation(self, Eloc, gradients, p=None):

        self.ElocMean = mpi.global_mean(Eloc, p)
        self.ElocVar = jnp.real(mpi.global_variance(Eloc, p))
        Eloc = self.subtract_helper_Eloc(Eloc, self.ElocMean)
        gradientsMean = mpi.global_mean(gradients, p)
        gradients = self.subtract_helper_grad(gradients, gradientsMean)

        if p is None:

            EOdata = self.get_EO(self.rhsPrefactor, Eloc, gradients)

            self.F0 = mpi.global_mean(EOdata)

        else:

            EOdata = self.get_EO_p(self.rhsPrefactor, p, Eloc, gradients)

            self.F0 = mpi.global_sum(EOdata)

            #work with complex matrix
            #np = gradients.shape[1]//2
            #EOdata = EOdata[:,:np]
            #F = jnp.sum(EOdata, axis=0)
            #S = jnp.matmul(jnp.conj(jnp.transpose(gradients[:,:np])), jnp.matmul(jnp.diag(p), gradients[:,:np]) )

        F = self.makeReal(self.F0)

        self.S0 = mpi.global_covariance(gradients, p)
        S = self.makeReal(self.S0)

        if self.diagonalShift > 1e-10:
            S = S + jnp.diag(self.diagonalShift * jnp.diag(S))

        return S, F, EOdata
Exemplo n.º 12
0
    def fun_on_leaf(_z):
        if np.isnan(_z):
            if not ignore_nan:
                raise ValueError('NaN encountered')
            return np.real(_z)

        _z_re = np.real(_z)

        if not ignore_im_part:
            if not np.allclose(
                    _z_re, _z_re + np.imag(_z), rtol=rtol, atol=atol):
                raise ValueError(
                    'Significant imaginary part encountered where it was not expected'
                )

        return _z_re
Exemplo n.º 13
0
def isdm(mat):
    """Checks whether a given matrix is a valid density matrix.

    Args:
        mat (:obj:`jnp.ndarray`): Input matrix
    
    Returns:
        bool: ``True`` if input matrix is a valid density matrix; 
            ``False`` otherwise
    """
    isdensity = True

    if (
        isket(mat) == True
        or isbra(mat) == True
        or isherm(mat) == False
        or jnp.allclose(jnp.real(jnp.trace(mat)), 1, atol=1e-09) == False
    ):
        isdensity = False
    else:
        evals, _ = jnp.linalg.eig(mat)
        for eig in evals:
            if eig < 0 and jnp.allclose(eig, 0, atol=1e-06) == False:
                isdensity = False
                break

    return isdensity
        def update_opt(_, grads, state):
            x, v = state
            inputs = jnp.reshape(grads, (-1, 1))

            v_next = betas * v - alphas * inputs
            x_next = x + jnp.real(jnp.sum(v_next, axis=1))
            return (x_next, v_next)
Exemplo n.º 15
0
def ft_phase_screen(r0, N, delta, L0, l0):
    # Set PSD
    del_f = 1/(N*delta)
    fx = []
    tmp = []
    for i in range(512):
        tmp.append(-256 + i)
    for i in range(512):
        fx.append(tmp)
    fx = jnp.array(fx)
    # frequency grid [1/m]
    fy = jnp.transpose(fx)
    f, th = cart2pol(fx, fy)
    fm = 5.92/l0/(2*jnp.pi)
    f0 = 1/L0
    # modified von Karman atmospheric phase PSD
    PSD_phi = 0.023*jnp.power(r0, -5/3) * jnp.exp(-jnp.power((f/fm), 2))/ jnp.power((jnp.power(f, 2) + jnp.power(f0, 2)), 11/6)
    PSD_phi = np.array(PSD_phi)
    PSD_phi[256][256] = 0
    # random draws of Fourier coefficients
    A = np.random.randn(N, N)
    B = np.random.randn(N, N)

    cn = (A + complex("j")*B) * jnp.sqrt(PSD_phi)*del_f
    phz = jnp.real(ift2(cn,1))
    return phz
Exemplo n.º 16
0
def qgt_norm(driver: TDVP, x: PyTree):
    """
    Computes the norm induced by the QGT :math:`S`, i.e, :math:`x^\\dagger S x`.
    """
    y = driver._last_qgt @ x  # pylint: disable=protected-access
    xc_dot_y = nk.jax.tree_dot(nk.jax.tree_conj(x), y)
    return jnp.sqrt(jnp.real(xc_dot_y))
Exemplo n.º 17
0
def solve_bwd(params, res, grad):
    x, z = res
    x_grad, _ = grad
    x_adj, _ = solve_impl(z, x_grad, adjoint=True, params=params)
    z_grad = tuple(np.real(np.conj(a) * b) for a, b in zip(x_adj, x))
    b_grad = tuple(np.conj(a) for a in x_adj)
    return z_grad, b_grad
Exemplo n.º 18
0
def cost(params, inputs, outputs):
    r"""Calculates the cost on the whole 
        training dataset.
    
    Args:
        params (obj:`jnp.ndarray`): parameter vectors 
            :math:`\vec{\theta}, \vec{\phi}, 
            \vec{\omega}`
        inputs (obj:`jnp.ndarray`): input kets 
            :math:`|\psi_{i} \rangle`in the dataset
        outputs (obj:`jnp.ndarray`): output kets 
            :math:`U(\vec{\theta}, \vec{\phi}, 
            \vec{\omega})|ket_{input} \rangle` 
            in the dataset
    
    Returns:
        float: cost (evaluated on the entire dataset)
            of parametrizing :math:`U(\vec{\theta}, 
            \vec{\phi}, \vec{\omega})` with `params`                  
    """
    loss = 0.0
    thetas, phis, omegas = params
    unitary = Unitary(N)(thetas, phis, omegas)
    for k in range(train_len):
        pred = jnp.dot(unitary, inputs[k])
        loss += jnp.absolute(jnp.real(jnp.dot(outputs[k].conjugate().T, pred)))

    loss = 1 - (1 / train_len) * loss
    return loss[0][0]
Exemplo n.º 19
0
def test_fourier():
    import astropy.units as au

    def f(x):
        return jnp.exp(-np.pi * x.value**2)

    import pylab as plt
    x = jnp.linspace(-10., 10., 101) * au.km
    a = f(x)

    F = fourier(a, x)
    (s, ) = fft_freqs(x)
    _a = inv_fourier(F, x)

    plt.plot(s, f(s), label='A true')
    plt.plot(s, jnp.real(F), label='A numeric')
    plt.legend()
    plt.show()

    plt.plot(x, a, label='a')
    plt.plot(x, _a, label='a rec')
    plt.legend()
    # plt.ylim(-10,3)
    plt.show()

    fourier_kernel = FourierKernel(lambda k: jnp.abs(k)**(-11. / 3.), 0.1, 100)
Exemplo n.º 20
0
def pinv(model: SpectralGradientFit):
    A = vandergrad_builder(model.grid, model.exponents)(model.mesh)
    #
    if model.is_periodic:
        A = np.hstack((np.imag(A), np.real(A)))
    #
    return np.linalg.pinv(A)
Exemplo n.º 21
0
        def perform_mc_update(i, carry):

            # Generate update proposals
            newKeys = random.split(carry[2], carry[0].shape[0] + 1)
            carryKey = newKeys[-1]
            newStates = vmap(updateProposer,
                             in_axes=(0, 0, None))(newKeys[:len(carry[0])],
                                                   carry[0], updateProposerArg)
            #newStates = carry[0]

            # Compute acceptance probabilities
            newLogPsiSq = jax.vmap(lambda x, y: 2. * jnp.real(x(y)),
                                   in_axes=(None, 0))(net, newStates)
            P = jnp.exp(newLogPsiSq - carry[1])

            # Roll dice
            newKey, carryKey = random.split(carryKey, )
            accepted = random.bernoulli(newKey, P).reshape((-1, ))

            # Bookkeeping
            numProposed = carry[3] + len(newStates)
            numAccepted = carry[4] + jnp.sum(accepted)

            # Perform accepted updates
            def update(acc, old, new):
                return jax.lax.cond(acc, lambda x: x[1], lambda x: x[0],
                                    (old, new))

            carryStates = vmap(update, in_axes=(0, 0, 0))(accepted, carry[0],
                                                          newStates)

            carryLogPsiSq = jnp.where(accepted == True, newLogPsiSq, carry[1])

            return (carryStates, carryLogPsiSq, carryKey, numProposed,
                    numAccepted)
Exemplo n.º 22
0
def loss_MaunCon_spect(fs,
                       spect,
                       con_inds=np.arange(9),
                       ground_truth=True,
                       diffPS=False,
                       epsilon=0.0015):
    '''
    finds the MSE of the observed spect (spect) and target spect 
    inputs:
    fs - frequencies to find MSE over
    spect = observed spect
    con_inds = indices to compare PS between (could be used to limit to just Contrast Effect for instance)
    ground_truth = True uses PS found from MATLAB ssn, False uses idealized by hand PS from Ray Maunsell paper
    diffPS = True uses differences in PS with background spectra (BS), = False just tries to find the un-subtracted spectra
        -- should help with peak fitting when set to true
    epsilon = normalizing factor 0.0015 is 1/1000 of the mean of the ground_truth=True PS. 
    '''

    if ground_truth:
        target_spect = np.array(
            get_multi_probe_spect(fs,
                                  fname='test_spect.mat',
                                  ground_truth=ground_truth))
    else:
        target_spect = np.array(
            get_multi_probe_spect(fs, ground_truth=ground_truth))

    if diffPS:
        BS = target_spect[:, 0]
        target_spect = target_spect[:, 1:] - BS[:, None]
        target_spect = target_spect / np.mean(target_spect)

        BS = spect[:, 0]
        spect = np.real(spect[:, 1:] - BS[:, None])
        spect = spect / (np.mean(spect) + epsilon)

    else:
        target_spect = np.real(target_spect[:, con_inds] /
                               np.mean(target_spect[:, con_inds]))

        spect = np.real(spect) / (np.mean(np.real(spect)) + epsilon)

    #spect_loss = np.mean((target_spect - spect) ** 2) #MSE
    spect_loss = np.mean(np.abs(target_spect -
                                spect))  #MAE - mean absolute error

    return spect_loss, target_spect
Exemplo n.º 23
0
 def body_fun1(j, carry):
     i = his_size - 1 - j
     _q, _a_his = carry
     a_i = state.rho_history[i] * jnp.real(
         _dot(jnp.conj(state.s_history[i]), _q))
     _a_his = _a_his.at[i].set(a_i)
     _q = _q - a_i * jnp.conj(state.y_history[i])
     return _q, _a_his
def sample_random_signal(key, decay_vec):
    N = decay_vec.shape[0]
    raw = random.normal(key, [N, 2]) @ np.array([1, 1j])
    print("Raw vector ", random.normal(key, [N, 2]))

    signal_f = raw * decay_vec
    signal = np.real(np.fft.ifft(signal_f))
    return signal
Exemplo n.º 25
0
 def body_fun1(j, carry):
     i = his_size - 1 - j
     _q, _a_his = carry
     a_i = state.rho_history[i] * jnp.real(
         _dot(jnp.conj(state.s_history[i]), _q))
     _a_his = ops.index_update(_a_his, ops.index[i], a_i)
     _q = _q - a_i * jnp.conj(state.y_history[i])
     return _q, _a_his
Exemplo n.º 26
0
 def fe(const, var):
     hansatz_matrix = hansatz(const, var)
     e, v = eigh(hansatz_matrix)
     hmf = h(const, var) - hansatz_matrix  # h-h0 without interaction
     energy = expectation_m(hmf, const.beta, e, v)
     energy += hint(const, var, e, v)
     energy += -1 / const.beta * jnp.sum(log1exp(-const.beta * e))
     return jnp.real(energy)
Exemplo n.º 27
0
def compute_normal_modes(simulation_parameters):
    """Returns the angular frequencies and eigenvectors for the normal modes."""
    m, k_wall, k_pair = (simulation_parameters["m"],
                         simulation_parameters["k_wall"],
                         simulation_parameters["k_pair"])
    num_trajectories = m.shape[0]

    # Construct coupling matrix.
    coupling_matrix = (-(k_wall + 2 * k_pair) * jnp.eye(num_trajectories) +
                       k_pair * jnp.ones((num_trajectories, num_trajectories)))
    coupling_matrix = jnp.diag(1 / m) @ coupling_matrix

    # Compute eigenvalues and eigenvectors.
    eigvals, eigvecs = jnp.linalg.eig(coupling_matrix)
    w = jnp.sqrt(-eigvals)
    w = jnp.real(w)
    eigvecs = jnp.real(eigvecs)
    return w, eigvecs
Exemplo n.º 28
0
def voigt(x, p):
    sigma = p[3] / (2 * np.sqrt(2 * np.log(2)))
    gamma = p[2] / 2
    z = ((x - p[4]) + 1j * gamma) / (sigma * np.sqrt(2))
    decay = (np.exp(-x / p[5]))
    num = np.real(wofz(z))
    dem = sigma * np.sqrt(2 * np.pi)
    V = p[0] + decay * p[1] * (num / dem)
    return V
Exemplo n.º 29
0
    def _apply(
        self,
        iter: jnp.ndarray,
        grad: jnp.ndarray,
        state: Tuple[jnp.ndarray],
        param: jnp.ndarray,
        precond: Union[None, jnp.ndarray],
        use_precond=False
    ) -> Tuple[jnp.ndarray, Tuple[jnp.ndarray]]:
        if use_precond:
            rgrad = self.manifold.egrad_to_rgrad(param, grad.conj(), precond)
        else:
            rgrad = self.manifold.egrad_to_rgrad(param, grad.conj())
        momentum = self.beta1 * state[0] + (1 - self.beta1) * rgrad
        if use_precond:
            v = self.beta2 * state[1] + (1 - self.beta2) * self.manifold.inner(
                param, rgrad, rgrad, precond
            )
        else:
            v = self.beta2 * state[1] + (1 - self.beta2) * self.manifold.inner(
                param, rgrad, rgrad
            )
        if self.ams:
            v_hat = jax.lax.complex(jnp.maximum(jnp.real(v), jnp.real(state[2])), jnp.imag(v))

        # Bias correction
        lr_corr = (
            self.learning_rate
            * jnp.sqrt(1 - self.beta2 ** (iter + 1))
            / (1 - self.beta1 ** (iter + 1))
        )

        if self.ams:
            search_dir = -lr_corr * momentum / (jnp.sqrt(v_hat) + self.eps)
            param, momentum = self.manifold.retraction_transport(
                param, momentum, search_dir
            )
            return param, (momentum, v, v_hat)
        else:
            search_dir = -lr_corr * momentum / (jnp.sqrt(v) + self.eps)
            param, momentum = self.manifold.retraction_transport(
                param, momentum, search_dir
            )
            return param, (momentum, v)
Exemplo n.º 30
0
    def test_pw(self, phim, phiw, f0m, f0w, const, rho, theta, phif0, phi, f0):
        print(self.id + ': test_pw is called')
        ph = np.moveaxis(self.phase_f0(theta, rho), 1, 0)
        bw = self.BW_f0(phim, phiw, f0m, f0w, phi, f0)
        _phif0 = dplex.dtomine(np.einsum('ijk,il->ljk', phif0, const))
        _phif0 = dplex.deinsum('ijk,i->ijk', _phif0, ph)
        _phif0 = dplex.deinsum('ijk,ij->jk', _phif0, bw)
        _phif0 = np.real(np.sum(dplex.dabs(_phif0), axis=1))

        return -np.sum(np.log(_phif0))