Пример #1
0
def test_conv1x1ortho_shape(n_channels, hw, n_samples, n_reflections):

    params_rng, data_rng = jax.random.split(KEY, 2)

    x = jax.random.normal(data_rng, shape=(n_samples, hw[0], hw[1], n_channels))

    # create layer
    init_func = Conv1x1Householder(n_channels=n_channels, n_reflections=n_reflections)

    # create layer
    params, forward_f, inverse_f = init_func(rng=params_rng, n_features=n_channels)

    # forward transformation
    z, log_abs_det = forward_f(params, x)

    # print(z.shape, log_abs_det.shape)

    # checks
    chex.assert_equal_shape([z, x])
    chex.assert_shape(np.atleast_1d(log_abs_det), (n_samples,))

    # inverse transformation
    x_approx, log_abs_det = inverse_f(params, z)

    # checks
    chex.assert_equal_shape([x_approx, x])
    chex.assert_shape(np.atleast_1d(log_abs_det), (n_samples,))
Пример #2
0
 def per_component_fun(j):
     log_prob_x_zj = jnp.sum(dist.Normal(mus[j], sigs[j]).log_prob(obs), axis=1).flatten()
     assert(jnp.atleast_1d(log_prob_x_zj).shape == (N,))
     log_prob_zj = dist.Categorical(pis_prior).log_prob(j)
     log_prob = log_prob_x_zj + log_prob_zj
     assert(jnp.atleast_1d(log_prob).shape == (N,))
     return log_prob
Пример #3
0
    def get_celerite_matrices(self, x, diag, **kwargs):
        x = np.atleast_1d(x)
        diag = np.atleast_1d(diag)
        if len(x.shape) != 1:
            raise ValueError("'x' must be one-dimensional")
        if x.shape != diag.shape:
            raise ValueError("dimension mismatch")

        ar, cr, ac, bc, cc, dc = self.get_coefficients()

        a = diag + np.sum(ar) + np.sum(ac)

        arg = dc[None, :] * x[:, None]
        cos = np.cos(arg)
        sin = np.sin(arg)
        z = np.zeros_like(x)

        U = np.concatenate(
            (
                ar[None, :] + z[:, None],
                ac[None, :] * cos + bc[None, :] * sin,
                ac[None, :] * sin - bc[None, :] * cos,
            ),
            axis=1,
        )

        V = np.concatenate(
            (np.ones_like(ar)[None, :] + z[:, None], cos, sin),
            axis=1,
        )

        c = np.concatenate((cr, cc, cc))

        return c, a, U, V
Пример #4
0
def linear_matter_power(cosmo, k, a=1.0, transfer_fn=tklib.Eisenstein_Hu, **kwargs):
    r""" Computes the linear matter power spectrum.

    Parameters
    ----------
    k: array_like
        Wave number in h Mpc^{-1}

    a: array_like, optional
        Scale factor (def: 1.0)

    transfer_fn: transfer_fn(cosmo, k, **kwargs)
        Transfer function

    Returns
    -------
    pk: array_like
        Linear matter power spectrum at the specified scale
        and scale factor.

    """
    k = np.atleast_1d(k)
    a = np.atleast_1d(a)
    g = bkgrd.growth_factor(cosmo, a)
    t = transfer_fn(cosmo, k, **kwargs)

    pknorm = cosmo.sigma8 ** 2 / sigmasqr(cosmo, 8.0, transfer_fn, **kwargs)

    pk = primordial_matter_power(cosmo, k) * t ** 2 * g ** 2

    # Apply normalisation
    pk = pk * pknorm
    return pk.squeeze()
Пример #5
0
    def __init__(self, num_patients: int, num_tests_per_cycle: int,
                 max_group_size: int, prior_infection_rate: np.ndarray,
                 prior_specificity: np.ndarray, prior_sensitivity: np.ndarray):
        self.num_patients = num_patients
        self.num_tests_per_cycle = num_tests_per_cycle
        self.max_group_size = max_group_size

        self.prior_infection_rate = np.atleast_1d(prior_infection_rate)
        self.prior_specificity = np.atleast_1d(prior_specificity)
        self.prior_sensitivity = np.atleast_1d(prior_sensitivity)
        self.log_prior_specificity = np.log(self.prior_specificity)
        self.log_prior_1msensitivity = np.log(1 - self.prior_sensitivity)
        self.logit_prior_sensitivity = special.logit(self.prior_sensitivity)
        self.logit_prior_specificity = special.logit(self.prior_specificity)

        self.curr_cycle = 0
        self.past_groups = None
        self.past_test_results = None
        self.groups_to_test = None
        self.particle_weights = None
        self.particles = None
        self.to_clear_positives = None
        self.all_cleared = False
        self.marginals = {}
        self.reset()  # Initializes the attributes above.
Пример #6
0
    def __init__(self, x, y, alpha=0., sigma=None, lamb=None, kernel_num=100):

        self.__x = transform_data(x)
        self.__y = transform_data(y)

        if self.__x.shape[1] != self.__y.shape[1]:
            raise ValueError("x and y must be same dimentions.")

        if sigma is None:
            sigma = np.logspace(-4, 9, 14)

        if lamb is None:
            lamb = np.logspace(-4, 9, 14)

        self.__x_num_row = self.__x.shape[0]
        self.__y_num_row = self.__y.shape[0]
        self.__kernel_num = min(
            [kernel_num, self.__x_num_row]
        )  # kernel number is the minimum number of x's lines and the number of kernel.
        self.__centers = np.array(
            rand.sample(list(self.__x), k=self.__kernel_num)
        )  # randomly choose candidates of rbf kernel centroid.
        self.__n_minimum = min(self.__x_num_row, self.__y_num_row)
        self.__kernel = jit(partial(gauss_kernel, centers=self.__centers))

        self._RuLSIF(
            x=self.__x,
            y=self.__y,
            alpha=alpha,
            s_sigma=np.atleast_1d(sigma),
            s_lambda=np.atleast_1d(lamb),
        )
Пример #7
0
 def __init__(self, name, mu, gamma, tracked=True):
     if not isinstance(mu, PriorTransform):
         mu = DeltaPrior('_{}_mu'.format(name), jnp.atleast_1d(mu), False)
     if not isinstance(gamma, PriorTransform):
         gamma = DeltaPrior('_{}_gamma'.format(name), jnp.atleast_1d(gamma),
                            False)
     U_dims = broadcast_shapes(get_shape(mu), get_shape(gamma))[0]
     super(MVNDiagPrior, self).__init__(name, U_dims, [mu, gamma], tracked)
Пример #8
0
    def __init__(self, x, y, alpha=0., sigma=None, lamb=None, kernel_num=100):
        """[summary]

        Args:
            x (array-like of float): 
                Numerator samples array. x is generated from p(x).
            y (array-like of float): 
                Denumerator samples array. y is generated from q(x).
            alpha (float or array-like, optional): 
                The alpha is a parameter that can adjust the mixing ratio r(x) = p(x)/(alpha*p(x)+(1-alpha)q(x))
                , and is set in the range of 0-1. 
                Defaults to 0.
            sigma (float or array-like, optional): 
                Bandwidth of kernel. If a value is set for sigma, that value is used for kernel bandwidth
                , and if a numerical array is set for sigma, Densratio selects the optimum value by using CV.
                Defaults to array of 10e-4 to 10e+9 divided into 14 on the log scale.
            lamb (float or array-like, optional): 
                Regularization parameter. If a value is set for lamb, that value is used for hyperparameter
                , and if a numerical array is set for lamb, Densratio selects the optimum value by using CV.
                Defaults to array of 10e-4 to 10e+9 divided into 14 on the log scale.
            kernel_num (int, optional): The number of kernels in the linear model. Defaults to 100.

        Raises:
            ValueError: [description]
        """        

        self.__x = transform_data(x)
        self.__y = transform_data(y)

        if self.__x.shape[1] != self.__y.shape[1]:
            raise ValueError("x and y must be same dimentions.")

        if sigma is None:
            sigma = np.logspace(-3,1,9)

        if lamb is None:
            lamb = np.logspace(-3,1,9)

        self.__x_num_row = self.__x.shape[0]
        self.__y_num_row = self.__y.shape[0]
        self.__kernel_num = np.min(np.array([kernel_num, self.__x_num_row])).item() #kernel number is the minimum number of x's lines and the number of kernel.
        self.__centers = np.array(rand.sample(list(self.__x),k=self.__kernel_num)) #randomly choose candidates of rbf kernel centroid.
        self.__n_minimum = min(self.__x_num_row, self.__y_num_row)
        # self.__kernel  = jit(partial(gauss_kernel,centers=self.__centers))

        self._RuLSIF(x = self.__x,
                     y = self.__y,
                     alpha = alpha,
                     s_sigma = np.atleast_1d(sigma),
                     s_lambda = np.atleast_1d(lamb),
                    )
Пример #9
0
 def __add__(self,
             other: 'cdict') -> 'cdict':
     out_cdict = self.copy()
     if other is None:
         return out_cdict
     for key, attr in out_cdict.__dict__.items():
         if hasattr(other, key) and (isinstance(attr, jnp.ndarray) or isinstance(getattr(other, key), jnp.ndarray)):
             attr_atl = attr
             other_attr_atl = other.__dict__[key]
             out_cdict.__setattr__(key, jnp.append(jnp.atleast_1d(attr_atl),
                                                   jnp.atleast_1d(other_attr_atl), axis=0))
     if hasattr(self, 'time') and hasattr(other, 'time'):
         out_cdict.time = self.time + other.time
     return out_cdict
Пример #10
0
def gaussian_cl_covariance(ell, probes, cl_signal, cl_noise, f_sky=0.25):
    """
    Computes a Gaussian covariance for the angular cls of the provided probes

    return_cls: (returns covariance)
    """
    ell = np.atleast_1d(ell)
    n_ell = len(ell)

    # Adding noise to auto-spectra
    cl_obs = cl_signal + cl_noise
    n_cls = cl_obs.shape[0]

    # Normalization of covariance
    norm = (2 * ell + 1) * np.gradient(ell) * f_sky

    # Retrieve ordering for blocks of the covariance matrix
    cov_blocks = np.array(_get_cov_blocks_ordering(probes))

    def get_cov_block(inds):
        a, b, c, d = inds
        cov = (cl_obs[a] * cl_obs[b] + cl_obs[c] * cl_obs[d]) / norm
        return cov * np.eye(n_ell)

    cov_mat = lax.map(get_cov_block, cov_blocks)

    # Reshape covariance matrix into proper matrix
    cov_mat = cov_mat.reshape((n_cls, n_cls, n_ell, n_ell))
    cov_mat = cov_mat.transpose(axes=(0, 2, 1, 3)).reshape(
        (n_ell * n_cls, n_ell * n_cls))
    return cov_mat
Пример #11
0
def ml_estimate(obs):
    N = jnp.atleast_1d(obs).shape[0]
    mu_loc = (1 / N) * jnp.sum(obs, axis=0)
    mu_var = 1 / jnp.sqrt(N + 1)
    mu_std = jnp.sqrt(mu_var)

    return mu_loc, mu_std
Пример #12
0
def gaussian_cl_covariance_and_mean(
    cosmo,
    ell,
    probes,
    transfer_fn=tklib.Eisenstein_Hu,
    nonlinear_fn=power.halofit,
    f_sky=0.25,
    sparse=False,
):
    """
    Computes a Gaussian covariance for the angular cls of the provided probes

    Set sparse True to return a sparse matrix representation that uses a factor
    of n_ell less memory and is compatible with the linear algebra operations
    in :mod:`jax_cosmo.sparse`.

    return_cls: (returns signal + noise cl, covariance)
    """
    ell = np.atleast_1d(ell)
    n_ell = len(ell)

    # Compute signal vectors
    cl_signal = angular_cl(cosmo,
                           ell,
                           probes,
                           transfer_fn=transfer_fn,
                           nonlinear_fn=nonlinear_fn)
    cl_noise = noise_cl(ell, probes)

    # retrieve the covariance
    cov_mat = gaussian_cl_covariance(ell, probes, cl_signal, cl_noise, f_sky,
                                     sparse)

    return cl_signal.flatten(), cov_mat
Пример #13
0
def get_initial_position(rng_key, model, num_chains, **kwargs):
    conditioning_vars = set(kwargs.keys())
    model_randvars = set(model.random_variables)
    to_sample_vars = model_randvars.difference(conditioning_vars)

    samples = sample_forward(rng_key, model, num_samples=num_chains, **kwargs)
    initial_positions = dict((var, samples[var]) for var in to_sample_vars)

    # A naive way to go about flattening the positions is to transform the
    # dictionary of arrays that contain the parameter value to a list of
    # dictionaries, one per position and then unravel the dictionaries.
    # However, this approach takes more time than getting the samples in the
    # first place.
    #
    # Luckily, JAX first sorts dictionaries by keys
    # (https://github.com/google/jax/blob/master/jaxlib/pytree.cc) when
    # raveling pytrees. We can thus ravel and stack parameter values in an
    # array, sorting by key; this gives our flattened positions. We then build
    # a single dictionary that contains the parameters value and use it to get
    # the unraveling function using `unravel_pytree`.
    positions = np.stack(
        [np.ravel(samples[s]) for s in sorted(initial_positions.keys())], axis=1
    )

    # np.atleast_1d is necessary to handle single chains
    sample_position_dict = {
        parameter: np.atleast_1d(values)[0]
        for parameter, values in initial_positions.items()
    }
    _, unravel_fn = jax_ravel_pytree(sample_position_dict)

    return positions, unravel_fn
Пример #14
0
 def get_psd(self, omega):
     omega = np.atleast_1d(omega)
     psd0 = self.term.get_psd(omega)
     arg = 0.5 * self.delta * omega
     arg += 1e-8 * (np.abs(arg) < 1e-8) * np.sign(arg)
     sinc = np.sin(arg) / arg
     return psd0 * sinc**2
Пример #15
0
def weak_lensing_kernel(cosmo, pzs, z, ell):
    """
    Returns a weak lensing kernel
    """
    z = np.atleast_1d(z)
    zmax = max([pz.zmax for pz in pzs])
    # Retrieve comoving distance corresponding to z
    chi = bkgrd.radial_comoving_distance(cosmo, z2a(z))

    @vmap
    def integrand(z_prime):
        chi_prime = bkgrd.radial_comoving_distance(cosmo, z2a(z_prime))
        # Stack the dndz of all redshift bins
        dndz = np.stack([pz(z_prime) for pz in pzs], axis=0)
        return dndz * np.clip(chi_prime - chi, 0) / np.clip(chi_prime, 1.0)

    # Computes the radial weak lensing kernel
    radial_kernel = np.squeeze(
        simps(integrand, z, zmax, 256) * (1.0 + z) * chi)
    # Constant term
    constant_factor = 3.0 * const.H0**2 * cosmo.Omega_m / 2.0 / const.c
    # Ell dependent factor
    ell_factor = np.sqrt(
        (ell - 1) * (ell) * (ell + 1) * (ell + 2)) / (ell + 0.5)**2
    return constant_factor * ell_factor * radial_kernel
Пример #16
0
def gaussian_cl_covariance_and_mean(
    cosmo,
    ell,
    probes,
    transfer_fn=tklib.Eisenstein_Hu,
    nonlinear_fn=power.halofit,
    f_sky=0.25,
):
    """
    Computes a Gaussian covariance for the angular cls of the provided probes

    return_cls: (returns signal + noise cl, covariance)
    """
    ell = np.atleast_1d(ell)
    n_ell = len(ell)

    # Compute signal vectors
    cl_signal = angular_cl(cosmo,
                           ell,
                           probes,
                           transfer_fn=transfer_fn,
                           nonlinear_fn=nonlinear_fn)
    cl_noise = noise_cl(ell, probes)

    # retrieve the covariance
    cov_mat = gaussian_cl_covariance(ell, probes, cl_signal, cl_noise, f_sky)

    return cl_signal.flatten(), cov_mat
Пример #17
0
def one_density_kernel(self, cosmo, z, ell, s=slice(None)):
    z = jnp.atleast_1d(z)
    # Extract parameters
    pzs, bias = self.params
    # Retrieve density kernel
    kernel = jax_cosmo.probes.density_kernel(cosmo, pzs[s], bias, z, ell)
    return kernel
Пример #18
0
def test_conv1x1_shape(n_channels, hw, n_samples):

    x = objax.random.normal((n_samples, hw[0], hw[1], n_channels), generator=generator)
    # print(x.shape)
    # create layer
    model = Conv1x1(n_channels=n_channels)

    # forward transformation
    z, log_abs_det = model(x)

    # print(z.shape, log_abs_det.shape)

    # checks
    chex.assert_equal_shape([z, x])
    chex.assert_shape(np.atleast_1d(log_abs_det), (n_samples,))

    # forward transformation
    z = model.transform(x)

    # checks
    chex.assert_equal_shape([z, x])

    # inverse transformation
    x_approx = model.inverse(z)

    # checks
    chex.assert_equal_shape([x_approx, x])
Пример #19
0
 def __init__(self, name, mu, b, tracked=True):
     if not isinstance(mu, PriorTransform):
         mu = DeltaPrior('_{}_mu'.format(name), jnp.atleast_1d(mu), False)
     if not isinstance(b, PriorTransform):
         b = DeltaPrior('_{}_b'.format(name), b, False)
     U_dims = broadcast_shapes(get_shape(mu), get_shape(b))[0]
     super(LaplacePrior, self).__init__(name, U_dims, [mu, b], tracked)
Пример #20
0
 def __init__(self, name, logits, tracked=True):
     if not isinstance(logits, PriorTransform):
         logits = DeltaPrior('_{}_logits'.format(name),
                             jnp.atleast_1d(logits), False)
     self._shape = get_shape(logits)
     U_dims = tuple_prod(self._shape)
     super(BernoulliPrior, self).__init__(name, U_dims, [logits], tracked)
Пример #21
0
 def pz_fn(self, z):
     # Extract parameters
     zcat, weight = self.params[:2]
     w = np.atleast_1d(weight)
     q = np.sum(w)
     X = np.expand_dims(zcat, axis=-1)
     k = self._kernel(self.config["bw"], X, z)
     return np.dot(k.T, w) / (q)
Пример #22
0
def flatten(pytree):

    vals, tree = jax.tree_flatten(pytree)
    vals2 = [jnp.atleast_1d(val) for val in vals
             ]  # convert scalars to array to allow concatenation
    v_flat = jnp.concatenate(vals2)
    idx = jnp.cumsum(jnp.array([val.size for val in vals2]))
    return v_flat, idx, tree
Пример #23
0
 def from_stats(cls, stats, counts, total_count=1):
     concentration = stats[0] + 1
     num_classes = concentration.shape[-1] + 1
     last_concentration = np.atleast_1d(counts * total_count -
                                        concentration.sum(axis=-1) +
                                        num_classes)
     return cls(np.concatenate([concentration, last_concentration],
                               axis=-1))
Пример #24
0
    def __call__(self, x):
        if jnp.ndim(x) == 1:
            x = jnp.atleast_1d(x)
        pars = self.param("jax",
                          lambda rng, shape: self.init_fun(rng, shape)[1],
                          x.shape)

        return self.apply_fun(pars, x)
Пример #25
0
 def response_gp(theta: np.ndarray, _x: np.ndarray) -> np.ndarray:
     _x = np.atleast_1d(_x)
     if _x.ndim == 1:
         # (n,) <- (1, k) @ (k, n)
         return (basis_predict(_x) @ theta).squeeze()
     else:
         # (n_constr, n) <- (n_constr, n, k) @ (k, n)
         return np.einsum('ijk,kj->ij', basis_predict(_x), theta)
Пример #26
0
def one_hot(z, K):
    z = np.atleast_1d(z).astype(int)
    assert np.all(z >= 0) and np.all(z < K)
    shp = z.shape
    N = z.size
    zoh = np.zeros((N, K))
    zoh[np.arange(N), np.arange(K)[np.ravel(z)]] = 1
    zoh = np.reshape(zoh, shp + (K, ))
    return zoh
Пример #27
0
    def get_value(self, tau0):
        dt = self.delta
        ar, cr, a, b, c, d = self.term.get_coefficients()

        # Format the lags correctly
        tau0 = np.abs(np.atleast_1d(tau0))
        tau = tau0[..., None]

        # Precompute some factors
        dpt = dt + tau
        dmt = dt - tau

        # Real parts:
        # tau > Delta
        crd = cr * dt
        cosh = np.cosh(crd)
        norm = 2 * ar / crd**2
        K_large = np.sum(norm * (cosh - 1) * np.exp(-cr * tau), axis=-1)

        # tau < Delta
        crdmt = cr * dmt
        K_small = K_large + np.sum(norm * (crdmt - np.sinh(crdmt)), axis=-1)

        # Complex part
        cd = c * dt
        dd = d * dt
        c2 = c**2
        d2 = d**2
        c2pd2 = c2 + d2
        C1 = a * (c2 - d2) + 2 * b * c * d
        C2 = b * (c2 - d2) - 2 * a * c * d
        norm = 1.0 / (dt * c2pd2)**2
        k0 = np.exp(-c * tau)
        cdt = np.cos(d * tau)
        sdt = np.sin(d * tau)

        # For tau > Delta
        cos_term = 2 * (np.cosh(cd) * np.cos(dd) - 1)
        sin_term = 2 * (np.sinh(cd) * np.sin(dd))
        factor = k0 * norm
        K_large += np.sum((C1 * cos_term - C2 * sin_term) * factor * cdt,
                          axis=-1)
        K_large += np.sum((C2 * cos_term + C1 * sin_term) * factor * sdt,
                          axis=-1)

        # tau < Delta
        edmt = np.exp(-c * dmt)
        edpt = np.exp(-c * dpt)
        cos_term = (edmt * np.cos(d * dmt) + edpt * np.cos(d * dpt) -
                    2 * k0 * cdt)
        sin_term = (edmt * np.sin(d * dmt) + edpt * np.sin(d * dpt) -
                    2 * k0 * sdt)
        K_small += np.sum(2 * (a * c + b * d) * c2pd2 * dmt * norm, axis=-1)
        K_small += np.sum((C1 * cos_term + C2 * sin_term) * norm, axis=-1)

        mask = tau0 >= dt
        return K_large * mask + K_small * (~mask)
Пример #28
0
def misclassification_polytope(a, c, ls):
    """creates misclassification constraints"""
    assert a.ndim == 2
    assert a.shape[0] == 1  # only batch size 1 is supported
    n_classes = a.shape[1]

    u = a[:, ls] - a[:, c]

    c = np.atleast_1d(np.asarray([c]).squeeze())
    ls = np.atleast_1d(np.asarray([ls]).squeeze())

    Av = lambda Vv: Vv[:, c] - Vv[:, ls]  # noqa: E731
    vA = lambda v: (
        scatter(c, np.sum(np.atleast_2d(v), axis=-1, keepdims=True), n_classes)
        +  # noqa: E731
        scatter(ls, -np.atleast_2d(v), n_classes))

    return Av, vA, u
Пример #29
0
def analytical_solution(obs):
    N = jnp.atleast_1d(obs).shape[0]
    x_var = .1
    x_var_inv = 1 / x_var
    mu_var = 1 / (x_var_inv * N + 1)
    mu_std = jnp.sqrt(mu_var)
    mu_loc = mu_var * jnp.sum(x_var_inv * obs, axis=0)

    return mu_loc, mu_std
Пример #30
0
 def __init__(self, name, mu, Gamma, ill_cond=False, tracked=True):
     self._ill_cond = ill_cond
     if not isinstance(mu, PriorTransform):
         mu = DeltaPrior('_{}_mu'.format(name), jnp.atleast_1d(mu), False)
     if not isinstance(Gamma, PriorTransform):
         Gamma = DeltaPrior('_{}_Gamma'.format(name), jnp.atleast_2d(Gamma),
                            False)
     U_dims = broadcast_shapes(get_shape(mu), get_shape(Gamma)[0:1])[0]
     super(MVNPrior, self).__init__(name, U_dims, [mu, Gamma], tracked)