예제 #1
0
def sigmasqr(cosmo,
             R,
             transfer_fn,
             kmin=0.0001,
             kmax=1000.0,
             ksteps=5,
             **kwargs):
    """ Computes the energy of the fluctuations within a sphere of R h^{-1} Mpc

  .. math::

     \\sigma^2(R)= \\frac{1}{2 \\pi^2} \\int_0^\\infty \\frac{dk}{k} k^3 P(k,z) W^2(kR)

  where

  .. math::

     W(kR) = \\frac{3j_1(kR)}{kR}
  """
    def int_sigma(logk):
        k = np.exp(logk)
        x = k * R
        w = 3.0 * (np.sin(x) - x * np.cos(x)) / (x * x * x)
        pk = transfer_fn(cosmo, k, **kwargs)**2 * primordial_matter_power(
            cosmo, k)
        return k * (k * w)**2 * pk

    y = romb(int_sigma, np.log10(kmin), np.log10(kmax), divmax=7)
    return 1.0 / (2.0 * np.pi**2.0) * y
예제 #2
0
def dtauCIA(nus, Tarr, Parr, dParr, vmr1, vmr2, mmw, g, nucia, tcia, logac):
    """dtau of the CIA continuum.

    Args:
       nus: wavenumber matrix (cm-1)
       Tarr: temperature array (K)
       Parr: temperature array (bar)
       dParr: delta temperature array (bar)
       vmr1: volume mixing ratio (VMR) for molecules 1 [N_layer]
       vmr2: volume mixing ratio (VMR) for molecules 2 [N_layer]
       mmw: mean molecular weight of atmosphere
       g: gravity (cm2/s)
       nucia: wavenumber array for CIA
       tcia: temperature array for CIA
       logac: log10(absorption coefficient of CIA)

    Returns:
       optical depth matrix  [N_layer, N_nus]
    """
    narr = number_density(Parr, Tarr)
    lognarr1 = jnp.log10(vmr1 * narr)  # log number density
    lognarr2 = jnp.log10(vmr2 * narr)  # log number density
    logkb = np.log10(kB)
    logg = jnp.log10(g)
    ddParr = dParr / Parr
    dtauc = (10**(logacia(Tarr, nus, nucia, tcia, logac) + lognarr1[:, None] +
                  lognarr2[:, None] + logkb - logg - logm_ucgs) *
             Tarr[:, None] / mmw * ddParr[:, None])

    return dtauc
예제 #3
0
    def sed(self,
            logt=None,
            logg=None,
            feh=None,
            afe=None,
            logl=None,
            av=0.0,
            rv=None,
            dist=None,
            logA=None,
            filters=None):
        """
        """

        if type(filters) == type(None):
            filters = self.anns.keys()

        if type(rv) == type(None):
            inpars = [10.0**logt, logg, feh, afe, av]
        else:
            inpars = [10.0**logt, logg, feh, afe, av, rv]

        BC = np.array([self.anns[f].eval(inpars) for f in filters])
        if (type(logl) != type(None)) and (type(dist) != type(None)):
            mu = 5 * np.log10(dist) - 5
            m = -2.5 * logl + 4.74 - BC + mu
        elif (type(logA) != type(None)):
            m = 5.0 * logA - 10.0 * (logt - np.log10(5770.0)) - 0.26 - BC
        else:
            raise IOError('cannot understand input pars into sed function')
        return m
예제 #4
0
 def autonus(self, checknus, tag='ESLOG'):
     if ~checknus:
         print('WARNING: the wavenumber grid does not look ' + tag)
         if self.autogridconv:
             print('the wavenumber grid is interpolated.')
             if tag == 'ESLOG':
                 return np.logspace(jnp.log10(self.nus[0]),
                                    jnp.log10(self.nus[-1]), len(self.nus))
             if tag == 'ESLIN':
                 return np.linspace(self.nus[0], self.nus[-1],
                                    len(self.nus))
     return self.nus
예제 #5
0
def dtauHminus_mmwl(nus, Tarr, Parr, dParr, vmre, vmrh, mmw, g):
    """dtau of the H- continuum.
       (for the case where mmw is given for each atmospheric layer)

    Args:
       nus: wavenumber matrix (cm-1)
       Tarr: temperature array (K)
       Parr: temperature array (bar)
       dParr: delta temperature array (bar)
       vmre: volume mixing ratio (VMR) for e- [N_layer]
       vmrH: volume mixing ratio (VMR) for H atoms [N_layer]
       mmw: mean molecular weight of atmosphere [N_layer]
       g: gravity (cm2/s)

    Returns:
       optical depth matrix  [N_layer, N_nus]
    """
    narr = number_density(Parr, Tarr)
    #       number_density_e: number density for e- [N_layer]
    #       number_density_h: number density for H atoms [N_layer]
    number_density_e = vmre * narr
    number_density_h = vmrh * narr
    logkb = np.log10(kB)
    logg = jnp.log10(g)
    ddParr = dParr / Parr
    logabc = (log_hminus_continuum(nus, Tarr, number_density_e,
                                   number_density_h))
    dtauh = 10**(logabc + logkb - logg -
                 logm_ucgs) * Tarr[:, None] / mmw[:, None] * ddParr[:, None]

    return dtauh
예제 #6
0
def log_hminus_continuum(nus, temperature, number_density_e, number_density_h):
    """John (1988) H- continuum opacity.

    Args:
       nus: wavenumber grid (cm-1) [Nnu]
       temperature: gas temperature array [K] [Nlayer]
       number_density_e: electron number density array [Nlayer]
       number_density_h: H atom number density array [Nlayer]

    Returns:
       log10(absorption coefficient) [Nlayer,Nnu]
    """
    # wavelength in units of microns
    wavelength_um = 1e4 / nus
    # first, compute the cross sections (in cm4/dyne)
    vkappa_bf = vmap(bound_free_absorption, (None, 0), 0)
    vkappa_ff = vmap(free_free_absorption, (None, 0), 0)
    mkappa_bf = vmap(vkappa_bf, (0, None), 0)
    mkappa_ff = vmap(vkappa_ff, (0, None), 0)
    kappa_bf = mkappa_bf(wavelength_um, temperature)
    kappa_ff = mkappa_ff(wavelength_um, temperature)
    #    kappa_bf = bound_free_absorption(wavelength_um, temperature)
    #    kappa_ff = free_free_absorption(wavelength_um, temperature)

    electron_pressure = number_density_e * kB * \
        temperature  # //electron pressure in dyne/cm2
    hydrogen_density = number_density_h

    # and now finally the absorption_coeff (in cm-1)
    absorption_coeff = (kappa_bf + kappa_ff) * \
        electron_pressure * hydrogen_density

    return jnp.log10(absorption_coeff.T)
예제 #7
0
    def __init__(self,
                 nu_max: DistLike,
                 log_tau: DistLike,
                 phi: DistLike = None):
        super().__init__(nu_max, log_tau=log_tau, phi=phi)
        self.units = {
            "a_he": u.dimensionless_unscaled,
            "b_he": u.megasecond**2,
            "tau_he": u.megasecond,
            "phi_he": u.rad,
        }
        self.symbols = {
            "a_he": r"$a_\mathrm{He}$",
            "b_he": r"$b_\mathrm{He}$",
            "tau_he": r"$\tau_\mathrm{He}$",
            "phi_he": r"$\phi_\mathrm{He}$",
        }
        # log units
        for k in ["a_he", "b_he", "tau_he"]:
            log_k = f"log_{k}"
            self.units[log_k] = u.LogUnit(self.units[k])
            self.symbols[log_k] = r"$\log\," + self.symbols[k][1:]

        log_numax = jnp.log10(distribution(nu_max).mean)
        # Attempt rough guess of glitch params
        self.log_a: dist.Distribution = dist.Normal(-2.119 + 0.005 * log_numax,
                                                    0.378)
        self.log_b: dist.Distribution = dist.Normal(0.024 - 1.811 * log_numax,
                                                    0.138)
예제 #8
0
    def __init__(self,
                 nu_max: DistLike,
                 log_tau: DistLike,
                 phi: DistLike = None):
        super().__init__(nu_max, log_tau=log_tau, phi=phi)

        self.units = {
            "a_cz": u.microhertz**3,
            "tau_cz": u.megasecond,
            "phi_cz": u.rad,
        }
        self.symbols = {
            "a_cz": r"$a_\mathrm{BCZ}$",
            "tau_cz": r"$\tau_\mathrm{BCZ}$",
            "phi_cz": r"$\phi_\mathrm{BCZ}$",
        }

        # log units
        for k in ["a_cz", "tau_cz"]:
            log_k = f"log_{k}"
            self.units[log_k] = u.LogUnit(self.units[k])
            self.symbols[log_k] = r"$\log\," + self.symbols[k][1:]

        log_numax = jnp.log10(distribution(nu_max).mean)
        # Rough guess of glitch params
        self.log_a: dist.Distribution = dist.Normal(-4.544 + 2.995 * log_numax,
                                                    0.52)
예제 #9
0
    def __call__(self):
        assignment = numpyro.sample("assignment",
                                    dist.Categorical(self.weights))

        loc = self.loc[assignment]
        cov = self.cov[assignment]

        nu_max = numpyro.sample("nu_max", self.nu_max)
        log_nu_max = jnp.log10(nu_max)

        teff = numpyro.sample("teff", self.teff)

        loc0101 = loc[0:2]
        cov0101 = jnp.array([[cov[0, 0], cov[0, 1]], [cov[1, 0], cov[1, 1]]])

        L = jax.scipy.linalg.cho_factor(cov0101, lower=True)
        A = jax.scipy.linalg.cho_solve(L,
                                       jnp.array([log_nu_max, teff]) - loc0101)

        loc2323 = loc[2:]
        cov2323 = jnp.array([[cov[2, 2], cov[2, 3]], [cov[3, 2], cov[3, 3]]])

        cov0123 = jnp.array([[cov[0, 2], cov[1, 2]], [cov[0, 3], cov[1, 3]]])
        v = jax.scipy.linalg.cho_solve(L, cov0123.T)

        cond_loc = loc2323 + jnp.dot(cov0123, A)
        cond_cov = (
            cov2323 - jnp.dot(cov0123, v) +
            self.noise * jnp.eye(2)  # Add white noise
        )
        numpyro.sample("log_tau", dist.MultivariateNormal(cond_loc, cond_cov))
예제 #10
0
    def genphot_scaled(self, pars, verbose=False):
        # define parameters from pars array
        Teff = pars[0]
        logg = pars[1]
        FeH = pars[2]
        aFe = pars[3]
        logA = pars[4]
        Av = pars[5]

        logTeff = np.log10(Teff)

        # create parameter dictionary
        photpars = {}
        photpars['logt'] = logTeff
        photpars['logg'] = logg
        photpars['feh'] = FeH
        photpars['afe'] = aFe
        photpars['logA'] = logA
        photpars['av'] = Av
        photpars['rv'] = 3.1

        # create filter list and arrange photometry to this list

        sed = self.fppsed.sed(**photpars)

        outdict = {ff_i: sed_i for sed_i, ff_i in zip(sed, self.filterarray)}

        return outdict
예제 #11
0
def troe_falloff_correction(
    T: float, lPr: np.ndarray, troe_coeffs: np.ndarray, troe_indices: np.ndarray
) -> np.ndarray:
    """
    modify rate constants use TROE falloff parameters
    returns: np.ndarray of F(T,P) 
    """
    troe_coeffs = troe_coeffs[troe_indices]
    F_cent = (
        np.multiply(
            np.subtract(1, troe_coeffs[:, 0]), np.exp(np.divide(-T, troe_coeffs[:, 3]))
        )
        + np.multiply(troe_coeffs[:, 0], np.exp(np.divide(-T, troe_coeffs[:, 1])))
        + np.exp(np.divide(-troe_coeffs[:, 2], T))
    )
    lF_cent = np.log10(F_cent)
    C = np.subtract(-0.4, np.multiply(0.67, lF_cent))
    N = np.subtract(0.75, np.multiply(1.27, lF_cent))
    f1_numerator = lPr + C
    f1_denominator_1 = N
    f1_denominator_2 = np.multiply(0.14, f1_numerator)
    f1 = np.divide(f1_numerator, np.subtract(f1_denominator_1, f1_denominator_2))
    F = np.power(10.0, np.divide(lF_cent, (1.0 + np.square(f1))))
    # F = 10**(lF_cent / (1. + f1**2.))
    return F
예제 #12
0
def H2O_MF(T=298.15):
    """Marshall and Frank, J. Phys. Chem. Ref. Data 10, 295-304."""
    # Matches Clegg's model [2019-07-02]
    log10kH2O = (
        -4.098 - 3.2452e3 / T + 2.2362e5 / T**2 - 3.984e7 / T**3 +
        (1.3957e1 - 1.2623e3 / T + 8.5641e5 / T**2) * np.log10(rhow_K75(T)))
    lnkH2O = log10kH2O * ln10
    return lnkH2O
예제 #13
0
def guess_ptargets(ptargets, totals):
    """Update ptargets with vaguely sensible first guesses for the stoichiometric
    solver.
    """
    assert isinstance(ptargets, OrderedDict)
    for s in ptargets:
        if s == "H":
            ptargets["H"] = 8.0
        elif s == "F":
            assert "F" in totals
            ptargets["F"] = -np.log10(totals["F"] / 2)
        elif s == "CO3":
            assert "CO2" in totals
            ptargets["CO3"] = -np.log10(totals["CO2"] / 10)
        elif s == "PO4":
            assert "PO4" in totals
            ptargets["PO4"] = -np.log10(totals["PO4"] / 2)
    return ptargets
예제 #14
0
    def genphot(self,pars,rvfree=False,verbose=False):
        # define parameters from pars array
        Teff = pars[0]
        logg = pars[1]
        FeH  = pars[2]
        aFe  = pars[3]
        logR = pars[4]
        Dist = pars[5]
        Av   = pars[6]

        Rv = lax.cond(rvfree,lambda _:pars[7],lambda _ :3.1,None)

        logTeff = np.log10(Teff)

        logL = 2.0*logR + 4.0*(logTeff - np.log10(5770.0))

        # create parameter dictionary
        photpars = {}
        photpars['logt'] = logTeff
        photpars['logg'] = logg
        photpars['feh']  = FeH
        photpars['afe']  = aFe
        photpars['logl'] = logL
        photpars['dist'] = Dist
        photpars['av']   = Av
        photpars['rv']   = Rv

        # create filter list and arrange photometry to this list

        # sed = self.ppsed.sed(filters=filterlist,**photpars)
        sed = self.fppsed.sed(**photpars)

        outdict = {ff_i:sed_i for sed_i,ff_i in zip(sed,self.filterarray)}

        # # calculate absolute bolometric magnitude
        # Mbol = -2.5*(2.0*logR + 4.0*np.log10(Teff/5770.0)) + 4.74

        # # calculate BC for all photometry in obs_phot dict
        # outdict = {}
        # for ii,kk in enumerate(self.filterarray):
        #   BCdict_i = float(self.ANNdict[kk].eval([Teff,logg,FeH,Av]))
        #   outdict[kk] = (Mbol - BCdict_i) + 5.0*np.log10(Dist) - 5.0

        return outdict
예제 #15
0
def ell_binning():
    # we put this here to make sure it's used consistently
    # plausible limits I guess
    ell_max = 2000
    n_ell = 100
    # choose ell bins from 10 .. 2000 log spaced
    ell_edges  = np.logspace(2, np.log10(ell_max), n_ell+1)
    ell = 0.5*(ell_edges[1:]+ell_edges[:-1])
    delta_ell =(ell_edges[1:]-ell_edges[:-1])
    return ell, delta_ell
예제 #16
0
파일: vae.py 프로젝트: byzhang/d3p
    def reconstruct_img(epoch, num_epochs, batchifier_state, svi_state, rng):
        """Reconstructs an image for the given epoch

        Obtains a sample from the testing data set and passes it through the
        VAE. Stores the result as image file 'epoch_{epoch}_recons.png' and
        the original input as 'epoch_{epoch}_original.png' in folder '.results'.

        :param epoch: Number of the current epoch
        :param num_epochs: Number of total epochs
        :param opt_state: Current state of the optimizer
        :param rng: rng key
        """
        assert (num_epochs > 0)
        img = test_fetch_plain(0, batchifier_state)[0][0]
        plt.imsave(os.path.join(
            RESULTS_DIR, "epoch_{:0{}d}_original.png".format(
                epoch, (int(jnp.log10(num_epochs)) + 1))),
                   img,
                   cmap='gray')
        rng, rng_binarize = random.split(rng, 2)
        test_sample = binarize(rng_binarize, img)
        test_sample = jnp.reshape(test_sample, (1, *jnp.shape(test_sample)))
        params = svi.get_params(svi_state)

        samples = sample_multi_posterior_predictive(
            rng, 10, model,
            (1, args.z_dim, args.hidden_dim, np.prod(test_sample.shape[1:])),
            guide, (test_sample, args.z_dim, args.hidden_dim), params)

        img_loc = samples['obs'][0].reshape([28, 28])
        avg_img_loc = jnp.mean(samples['obs'], axis=0).reshape([28, 28])
        plt.imsave(os.path.join(
            RESULTS_DIR, "epoch_{:0{}d}_recons_single.png".format(
                epoch, (int(jnp.log10(num_epochs)) + 1))),
                   img_loc,
                   cmap='gray')
        plt.imsave(os.path.join(
            RESULTS_DIR, "epoch_{:0{}d}_recons_avg.png".format(
                epoch, (int(jnp.log10(num_epochs)) + 1))),
                   avg_img_loc,
                   cmap='gray')
예제 #17
0
def _individual_halo_assembly_jax_kern(
    logt, dtarr, logmp, dmhdt_x0, dmhdt_k, dmhdt_early_index, dmhdt_late_index, indx_tmp
):
    """JAX kernel for the MAH of individual dark matter halos."""
    #  Use a sigmoid to model log10(dMh/dt) with arbitrary normalization
    slope = jax_sigmoid(logt, dmhdt_x0, dmhdt_k, dmhdt_early_index, dmhdt_late_index)
    _log_dmhdt_unnnormed = slope * (logt - logt[indx_tmp])

    # Integrate dMh/dt to calculate Mh(t) with arbitrary normalization
    _dmhdt_unnnormed = jax_np.power(10, _log_dmhdt_unnnormed)
    _dmah_unnnormed_integrand = _dmhdt_unnnormed * dtarr
    # in this section could use jax_np.logcumsumexp if it existed
    _mah_unnnormed = jax_np.cumsum(_dmah_unnnormed_integrand) * 1e9
    _logmah_unnnormed = jax_np.log10(_mah_unnnormed)

    # Normalize Mh(t) dMh/dt to integrate to logmp at logtmp
    _logmp_unnnormed = _logmah_unnnormed[indx_tmp]
    _rescaling_factor = logmp - _logmp_unnnormed
    logmah = _logmah_unnnormed + _rescaling_factor
    log_dmhdt = jax_np.log10(_dmhdt_unnnormed) + _rescaling_factor
    return logmah, log_dmhdt
예제 #18
0
    def sed(self,
            logt=None,
            logg=None,
            feh=None,
            afe=None,
            logl=None,
            av=0.0,
            rv=3.1,
            dist=None,
            logA=None,
            band_indices=slice(None)):
        """
        """

        # if type(rv) == type(None):
        #     inpars = [10.0**logt,logg,feh,afe,av]
        # else:
        inpars = [10.0**logt, logg, feh, afe, av, rv]

        def bcdefault(x):
            return self.anns.eval(inpars)

        def bchiav(x):
            BC0 = self.anns.eval([10.0**logt, logg, feh, afe, 0.0, 3.1])
            return self.HiAv.calc(BC0, av, rv)

        BC = lax.cond(av < 5.0, bcdefault, bchiav, None)

        if (type(logl) != type(None)) and (type(dist) != type(None)):
            mu = 5.0 * np.log10(dist) - 5.0
            m = -2.5 * logl + 4.74 - BC + mu
        elif (type(logA) != type(None)):
            m = 5.0 * logA - 10.0 * (logt - np.log10(5770.0)) - 0.26 - BC
        else:
            raise IOError('cannot understand input pars into sed function')

        try:
            return m[band_indices]
        except IndexError:
            return [m]
예제 #19
0
def getjov_logg(Rp, Mp):
    """logg from radius and mass in the Jovian unit.

    Args:
       Rp: radius in the unit of Jovian radius
       Mp: radius in the unit of Jovian mass

    Returns:
       logg

    Note:
       Mpcgs=Mp*const.MJ, Rpcgs=Rp*const.RJ,
       then logg is given by log10(const.G*Mpcgs/Rpcgs**2)
    """
    return jnp.log10(2478.57730044555*Mp/Rp**2)
    def cross_entropy_loss(self, params, X, y):

        # print(X.shape)
        # print(y.shape)
        """ Compute the multi-class cross-entropy loss """
        preds = self.forward_pass(params, X)
        # print(preds.shape)
        preds = np.exp(preds)
        predsum = np.sum(preds, axis=1, keepdims=True)
        preds /= predsum
        # print(np.sum(preds,axis=1))
        res = 0
        y = y.squeeze()
        for i in range(preds.shape[0]):
            res -= np.log10(preds[i][int(y[i])])
        res = res / preds.shape[0]
        # print(res)
        return res
예제 #21
0
def get_log_dmhdt_scatter(logm0, time):
    """Scatter in mass accretion rate across time.

    Parameters
    ----------
    logm0 : ndarray, shape (n, )
        Base-10 log of present-day halo mass

    time : ndarray, shape (n, )
        Cosmic time in Gyr.

    Returns
    -------
    scatter : ndarray, shape (n, )
        Scatter in dMh/dt in dex

    """
    scatter = np.array(_log_dmhdt_scatter(logm0, jnp.log10(time)))
    return scatter
예제 #22
0
    def j_score_init(stds, rng2):

        new_params = custom_init(stds, rng2)

        rand_input = jax.random.normal(rng2, [n, 4])
        rng2 += 1

        outputs = jax.vmap(
            partial(raw_lagrangian_eom,
                    learned_dynamics(new_params)))(rand_input)[:, 2:]

        #KL-divergence to mu=0, std=1:
        mu = jnp.average(outputs, axis=0)
        std = jnp.std(outputs, axis=0)

        KL = jnp.sum((mu**2 + std**2 - 1) / 2.0 - jnp.log(std))

        def total_output(p):
            return vmap(partial(raw_lagrangian_eom,
                                learned_dynamics(p)))(rand_input).sum()

        d_params = grad(total_output)(new_params)

        i = 0
        for l1 in d_params:
            if (len(l1)) == 0: continue
            new_l1 = []
            for l2 in l1:
                if len(l2.shape) == 1: continue

                mu = jnp.average(l2)
                std = jnp.std(l2)
                KL += (mu**2 + std**2 - 1) / 2.0 - jnp.log(std)

                #HACK
                desired_gaussian = jnp.sqrt(6) / jnp.sqrt(l2.shape[0] +
                                                          l2.shape[1])
                scaled_std = stds[i] / desired_gaussian
                #Avoid extremely large values
                KL += 0.1 * (scaled_std**2 / 2.0 - jnp.log(scaled_std))
                i += 1

        return jnp.log10(KL)
예제 #23
0
def get_forward_rate_constants(
    T: float,
    R: float,
    C: np.ndarray,
    kinetics_coeffs: KineticsCoeffs,
    kinetics_data: KineticsData,
) -> np.ndarray:
    """
    Calculate forward rate constants with three body, falloff and troe falloff modifications 
    returns: np.ndarray of forward rate constants
    """
    # vectorize and jit rate constants calculation
    initial_k = vmap_arrhenius(T, R, kinetics_coeffs.arrhenius_coeffs)
    C_M = np.matmul(C, kinetics_coeffs.efficiency_coeffs)  # calculate C_M
    three_body_k = np.multiply(
        initial_k[kinetics_data.three_body_indices],
        C_M[kinetics_data.three_body_indices],
    )  # get kf with three body update
    k = jax.ops.index_update(
        initial_k, jax.ops.index[kinetics_data.three_body_indices], three_body_k
    )  # three body update
    total_falloff_indices = np.concatenate(
        [kinetics_data.falloff_indices, kinetics_data.troe_falloff_indices]
    ).sort()
    k0 = vmap_arrhenius(T, R, kinetics_coeffs.arrhenius0_coeffs)  # calculate k0
    Pr = np.divide(np.multiply(k0, C_M), k)  # calculate Pr as kinf is k
    log10Pr = np.log10(Pr)
    falloff_k = np.multiply(
        k[total_falloff_indices],
        (Pr[total_falloff_indices] / (1.0 + Pr[total_falloff_indices])),
    )  # update all type of falloff
    kf = jax.ops.index_update(k, jax.ops.index[total_falloff_indices], falloff_k)
    F = troe_falloff_correction(
        T,
        log10Pr[kinetics_data.troe_falloff_indices],
        kinetics_coeffs.troe_coeffs,
        kinetics_data.troe_falloff_indices,
    )  # calculate F(T, P)
    troe_k = np.multiply(kf[kinetics_data.troe_falloff_indices], F)
    final_k = jax.ops.index_update(
        kf, jax.ops.index[kinetics_data.troe_falloff_indices], troe_k
    )  # final update
    return final_k
def predict_psnr_basic(kernel_fn,
                       train_fx,
                       test_fx,
                       train_x,
                       train_y,
                       test_x,
                       test_y,
                       t_final,
                       eta=None):
    g_dd = kernel_fn(train_x, train_x, 'ntk')
    g_td = kernel_fn(test_x, train_x, 'ntk')
    train_predict_fn = nt.predict.gradient_descent_mse(g_dd, train_y[...,
                                                                     None],
                                                       g_td)
    train_theory_y, test_theory_y = train_predict_fn(t_final, train_fx[...,
                                                                       None],
                                                     test_fx[..., None])

    calc_psnr = lambda f, g: -10. * np.log10(np.mean((f - g)**2))
    return calc_psnr(test_y,
                     test_theory_y[:, 0]), calc_psnr(train_y,
                                                     train_theory_y[:, 0])
예제 #25
0
N = 1500
nus, wav, res = nugrid(22900, 22960, N, unit='AA')
# mdbM=moldb.MdbExomol('.database/CO/12C-16O/Li2015',nus)
# loading molecular database
# molmass=molinfo.molmass("CO") #molecular mass (CO)
mdbM = moldb.MdbExomol('.database/H2O/1H2-16O/POKAZATEL', nus,
                       crit=1.e-45)  # loading molecular dat
molmassM = molinfo.molmass('H2O')  # molecular mass (H2O)

q = mdbM.qr_interp(1500.0)
S = SijT(1500.0, mdbM.logsij0, mdbM.nu_lines, mdbM.elower, q)
mask = S > 1.e-25
mdbM.masking(mask)

Tarr = jnp.logspace(jnp.log10(800), jnp.log10(1600), 100)
qt = vmap(mdbM.qr_interp)(Tarr)
SijM = jit(vmap(SijT,
                (0, None, None, None, 0)))(Tarr, mdbM.logsij0, mdbM.nu_lines,
                                           mdbM.elower, qt)

imax = jnp.argmax(SijM, axis=0)
Tmax = Tarr[imax]
print(jnp.min(Tmax))

pl = planck.piBarr(jnp.array([1100.0, 1000.0]), nus)
print(pl[1] / pl[0])

pl = planck.piBarr(jnp.array([1400.0, 1200.0]), nus)
print(pl[1] / pl[0])
예제 #26
0
def eval_metric(pred,gt):
    pred = jnp.clip(pred,0,1)
    gt = jnp.clip(gt,0,1)
    mse = ((gt - pred) ** 2).mean([1,2,3])
    psnr = -10. * jnp.log10(mse) / jnp.log10(10.)
    return {'mse':mse,'psnr':psnr}
예제 #27
0
def _calc_halo_history(logt, logtmp, logmp, x0, k, early, late):
    log_mah = _rolling_plaw_log_mah(logt, logtmp, logmp, x0, k, early, late)
    d_log_mh_dt = _calc_d_log_mh_dt(10.0 ** logt, logtmp, logmp, x0, k, early, late)
    dmhdt = d_log_mh_dt * (10.0 ** (log_mah - 9.0)) / jnp.log10(jnp.e)
    return dmhdt, log_mah
예제 #28
0
def _rolling_plaw_log_mah_vs_time(t, logtmp, logmp, x0, k, early, late):
    logt = jnp.log10(t)
    return _rolling_plaw_log_mah(logt, logtmp, logmp, x0, k, early, late)
예제 #29
0
def optimize_dome(S,  # the speaker array
                  ambisonic_order=3,
                  el_lim=-π/8,
                  tikhonov_lambda=1e-3,
                  sparseness_penalty=1,
                  do_report=False,
                  rE_goal='auto',
                  eval_order=None,
                  random_start=False
                  ):
    """Test optimizer with CCRMA Stage array."""
    #
    #
    order_h, order_v, sh_l, sh_m, id_string = pc.ambisonic_channels(ambisonic_order)
    order = max(order_h, order_v)  # FIXME
    is_3D = order_v > 0

    if eval_order is None:
        eval_order = ambisonic_order
        eval_order_given = False
    else:
        eval_order_given = True

    eval_order_h, eval_order_v, eval_sh_l, eval_sh_m, eval_id_string = \
        pc.ambisonic_channels(eval_order)

    mask_matrix = pc.mask_matrix(eval_sh_l, eval_sh_m, sh_l, sh_m)
    print(mask_matrix)

    # if True:
    #     S = esa.stage2017() + esa.nadir()#stage()
    #     spkr_array_name = S.name
    # else:
    #     # hack to enter Eric's array
    #     S = emb()
    #     spkr_array_name = 'EMB'

    spkr_array_name = S.name



    print('speaker array = ', spkr_array_name)

    S_u = np.array(sg.sph2cart(S.az, S.el, 1))

    gamma = shelf.gamma(sh_l, decoder_type='max_rE', decoder_3d=is_3D,
                        return_matrix=True)

    figs = []
    if not random_start:
        M_start = 'AllRAD'

        M_allrad = bd.allrad(sh_l, sh_m, S.az, S.el,
                             speaker_is_real=S.is_real)

        # remove imaginary speaker from S_u and Sr
        S_u = S_u[:, S.is_real] #S.Real.values]
        Sr = S[S.is_real] #S.Real.values]

        M_allrad_hf = M_allrad @ gamma

        # performance plots
        plot_title = "AllRAD, "
        if eval_order_given:
            plot_title += f"Design: {id_string}, Test: {eval_id_string}"
        else:
            plot_title += f"Signal set={id_string}"

        figs.append(
            lm.plot_performance(M_allrad_hf, S_u, sh_l, sh_m,
                                mask_matrix = mask_matrix,
                                title=plot_title))

        lm.plot_matrix(M_allrad_hf, title=plot_title)

        print(f"\n\n{plot_title}\nDiffuse field gain of each loudspeaker (dB)")
        for n, g in zip(Sr.ids,
                        10 * np.log10(np.sum(M_allrad ** 2, axis=1))):
            print(f"{n:3}:{g:8.2f} |{'=' * int(60 + g)}")

    else:
        M_start = 'Random'
        # let optimizer dream up a decoder on its own
        M_allrad = None
        # more mess from imaginary speakers
        S_u = S_u[:, S.is_real]
        Sr = S[S.is_real]

    # M_allrad = None

    # Objective for E
    T = sg.t_design5200()
    cap, *_ = sg.spherical_cap(T.u,
                               (0, 0, 1),  # apex
                               π/2 - el_lim)
    E0 = np.where(cap, 1.0, 0.1)  # inside, outside

    # np.array([0.1, 1.0])[cap.astype(np.int8)]

    # Objective for rE order+2 inside the cap, order-2 outside
    rE_goal = np.where(cap,
                       shelf.max_rE_3d(order+2), # inside the cap
                       shelf.max_rE_3d(max(order-2, 1)) # outside the cap
                       )

    #np.array([shelf.max_rE_3d(max(order-2, 1)),
    #                    shelf.max_rE_3d(order+2)])[cap.astype(np.int8)]

    M_opt, res = optimize(M_allrad, S_u, sh_l, sh_m, E_goal=E0,
                          iprint=50, tikhonov_lambda=tikhonov_lambda,
                          sparseness_penalty=sparseness_penalty,
                          rE_goal=rE_goal)

    plot_title = f"Optimized {M_start}, "
    if eval_order_given:
        plot_title += f"Design: {id_string}, Test: {eval_id_string}"
    else:
        plot_title += f"Signal set={id_string}"

    figs.append(
        lm.plot_performance(M_opt, S_u, sh_l, sh_m,
                            mask_matrix = mask_matrix,
                            title=plot_title
                            ))

    lm.plot_matrix(M_opt, title=plot_title)

    with io.StringIO() as f:
        print(f"ambisonic_order = {order}\n" +
              f"el_lim = {el_lim * 180 / π}\n" +
              f"tikhonov_lambda = {tikhonov_lambda}\n" +
              f"sparseness_penalty = {sparseness_penalty}\n",
              file=f)

        off = np.isclose(np.sum(M_opt ** 2, axis=1), 0, rtol=1e-6)  # 60dB down
        print("Using:\n", Sr.ids[~off.copy()], file=f)
        print("Turned off:\n", Sr.ids[off.copy()], file=f)

        print("\n\nDiffuse field gain of each loudspeaker (dB)", file=f)
        for n, g in zip(Sr.ids,
                        10 * np.log10(np.sum(M_opt ** 2, axis=1))):
            print(f"{n:3}:{g:8.2f} |{'=' * int(60 + g)}", file=f)
        report = f.getvalue()
        print(report)

    if do_report:
        reports.html_report(zip(*figs),
                            text=report,
                            directory=spkr_array_name,
                            name=f"{spkr_array_name}-{id_string}")

    return M_opt, dict(M_allrad=M_allrad, off=off, res=res)
예제 #30
0
 def searchphase(y, x):
     y_t = jnp.outer(y, TPS)
     x_t = jnp.tile(x[:,None], (1, testing_phases))
     snr_t = 10. * jnp.log10(getpower(y_t) / getpower(y_t - x_t))
     return TPS[jnp.argmax(snr_t)]