Пример #1
0
def log_tomographic_weight_function(gamma, x1, x2, p1, p2=None, S=25):
    parabolic = False
    if p2 is None:
        parabolic = True
        p2 = p1

    x12 = x1 - x2
    A = p1 @ p1
    C = p2 @ p2
    B = -2. * p1 @ p2
    D = 2. * x12 @ p1
    E = -2. * x12 @ p2
    F = x12 @ x12 - gamma

    t1 = jnp.linspace(0., 1., S)[:, None]
    H = (D**2 - 4. * A * F + (2. * B * D - 4. * A * E) * t1 +
         (B**2 - 4. * A * C) * t1**2)
    u = (-D - B * t1)
    lower = jnp.clip(0.5 * (u - jnp.sqrt(H)) / A, 0., 1.)
    upper = jnp.clip(0.5 * (u + jnp.sqrt(H)) / A, 0., 1.)
    diff = (upper - lower) / (S - 1)
    if not parabolic:
        reg_valid = H >= 0.
        cdf = jnp.sum(jnp.where(reg_valid, diff, 0.), axis=0)
    else:
        cdf = jnp.sum(diff, axis=0)
    return jnp.log(jnp.diff(cdf)) - jnp.log(jnp.diff(gamma))
Пример #2
0
    def antiderivative(self, xs):
        """
        Computes the antiderivative of first order of this spline
        """
        # Retrieve parameters
        x, y, coefficients = self._x, self._y, self._coefficients

        # In case of quadratic, we redefine the knots
        if self.k == 2:
            knots = (x[1:] + x[:-1]) / 2.0
            # We add 2 artificial knots before and after
            knots = np.concatenate([
                np.array([x[0] - (x[1] - x[0]) / 2.0]),
                knots,
                np.array([x[-1] + (x[-1] - x[-2]) / 2.0]),
            ])
        else:
            knots = x

        # Determine the interval that x lies in
        ind = np.digitize(xs, knots) - 1
        # Include the right endpoint in spline piece C[m-1]
        ind = np.clip(ind, 0, len(knots) - 2)
        t = xs - knots[ind]

        if self.k == 1:
            a = y[:-1]
            b = coefficients
            h = np.diff(knots)
            cst = np.concatenate(
                [np.zeros(1), np.cumsum(a * h + b * h**2 / 2)])
            return cst[ind] + a[ind] * t + b[ind] * t**2 / 2

        if self.k == 2:
            h = np.diff(knots)
            dt = x - knots[:-1]
            b = coefficients[:-1]
            b1 = coefficients[1:]
            a = y - b * dt - (b1 - b) * dt**2 / (2 * h)
            c = (b1 - b) / (2 * h)
            cst = np.concatenate(
                [np.zeros(1),
                 np.cumsum(a * h + b * h**2 / 2 + c * h**3 / 3)])
            return cst[ind] + a[ind] * t + b[ind] * t**2 / 2 + c[ind] * t**3 / 3

        if self.k == 3:
            h = np.diff(knots)
            c = coefficients[:-1]
            c1 = coefficients[1:]
            a = y[:-1]
            a1 = y[1:]
            b = (a1 - a) / h - (2 * c + c1) * h / 3.0
            d = (c1 - c) / (3 * h)
            cst = np.concatenate([
                np.zeros(1),
                np.cumsum(a * h + b * h**2 / 2 + c * h**3 / 3 + d * h**4 / 4),
            ])
            return (cst[ind] + a[ind] * t + b[ind] * t**2 / 2 +
                    c[ind] * t**3 / 3 + d[ind] * t**4 / 4)
Пример #3
0
def pois(cell: PVCell, pot: Potentials) -> Array:

    ave_dgrid = (cell.dgrid[:-1] + cell.dgrid[1:]) / 2.
    ave_eps = (cell.eps[1:] + cell.eps[:-1]) / 2.
    pois = (ave_eps[:-1] * jnp.diff(pot.phi)[:-1] / cell.dgrid[:-1] -
            ave_eps[1:] * jnp.diff(pot.phi)[1:] /
            cell.dgrid[1:]) / ave_dgrid - physics.charge(cell, pot)[1:-1]
    return pois
Пример #4
0
def likelihood(par, dat):
    T, K = dat['T'], dat['K']

    # compute policy
    zb0 = calc_zbar(par, dat)
    pol = {'zb': zb0, 'zc': 0, 'kz': 0, 'vx': 0}

    # tabulate state
    st0 = zero_state(K)
    imp = dat['imp']

    # run simulation
    sim, _ = gen_path(par, pol, st0, imp, T, K)

    # extract simulation
    sim_c = sim['c']
    sim_d = sim['d']
    sim_a = sim['act']
    sim_o = sim['out']

    # extract data
    dat_c = dat['c']
    dat_d = dat['d']
    dat_a = dat['act']
    dat_o = dat['out']

    # get daily rates
    sim_c = np.diff(sim_c, axis=0)
    sim_d = np.diff(sim_d, axis=0)
    dat_c = np.diff(dat_c, axis=0)
    dat_d = np.diff(dat_d, axis=0)

    # actual standard deviations
    wgt0 = dat['wgt'][None, :]
    wgt1 = wgt0 / np.mean(wgt0)
    sig_0 = 1 / np.sqrt(wgt1)
    sig_a = sig_0 * par['σa']
    sig_o = sig_0 * par['σo']

    # epi match
    lik_c = poisson_err(dat_c, sim_c, wgt0)
    lik_d = poisson_err(dat_d, sim_d, wgt0)

    # econ match
    lik_a = gaussian_err(dat_a, sim_a, sig_a)
    lik_o = gaussian_err(dat_o, sim_o, sig_o)

    # sum it all up
    lik = 0.5 * lik_c + 10 * lik_d + lik_a + lik_o

    return lik
Пример #5
0
 def test_sort(self):
     s = ops.softsort(self.x, axis=-1, threshold=1e-3, epsilon=1e-3)
     self.assertEqual(s.shape, self.x.shape)
     deltas = np.diff(s, axis=-1) > 0
     self.assertAllClose(deltas,
                         np.ones(deltas.shape, dtype=bool),
                         check_dtypes=True)
Пример #6
0
 def prior_sample(self, num_samps, t=None):
     """
     Sample from the model prior f~N(0,K) multiple times using a nested loop.
     :param num_samps: the number of samples to draw [scalar]
     :param t: the input locations at which to sample (defaults to train+test set) [N_samp, 1]
     :return:
         f_sample: the prior samples [S, N_samp]
     """
     self.update_model(softplus_list(self.prior.hyp))
     if t is None:
         t = self.t_all
     else:
         x_ind = np.argsort(t[:, 0])
         t = t[x_ind]
     dt = np.concatenate([np.array([0.0]), np.diff(t[:, 0])])
     N = dt.shape[0]
     with loops.Scope() as s:
         s.f_sample = np.zeros([N, self.func_dim, num_samps])
         s.m = np.linalg.cholesky(self.Pinf) @ random.normal(random.PRNGKey(99), shape=[self.state_dim, 1])
         for i in s.range(num_samps):
             s.m = np.linalg.cholesky(self.Pinf) @ random.normal(random.PRNGKey(i), shape=[self.state_dim, 1])
             for k in s.range(N):
                 A = self.prior.state_transition(dt[k], self.prior.hyp)  # transition and noise process matrices
                 Q = self.Pinf - A @ self.Pinf @ A.T
                 C = np.linalg.cholesky(Q + 1e-6 * np.eye(self.state_dim))  # <--- can be a bit unstable
                 # we need to provide a different PRNG seed every time:
                 s.m = A @ s.m + C @ random.normal(random.PRNGKey(i*k+k), shape=[self.state_dim, 1])
                 H = self.prior.measurement_model(t[k, 1:], softplus_list(self.prior.hyp))
                 f = (H @ s.m).T
                 s.f_sample = index_add(s.f_sample, index[k, ..., i], np.squeeze(f))
     return s.f_sample
Пример #7
0
def make_eE_noiseCov(ssn, noise_pars, LFPrange):
    # setting up e_E and e_I: the projection/measurement vectors for
    # representing the "LFP" measurement (e_E good for LFP interpretation, but e_I ?)
    # eE = np.zeros(ssn.N)
    # # eE[LFPrange] =1/len(LFPrange)
    # index_update(eE, LFPrange, 1/len(LFPrange))
    # eI = np.zeros(ssn.N)
    # eI[ssn.Ne + LFPrange] =1/len(LFPrange)

    eE = np.hstack((np.array([i in LFPrange for i in range(ssn.Ne)],
                             dtype=np.float32), np.zeros(ssn.Ni)))

    # the script assumes independent noise to E and I, and spatially uniform magnitude of noise
    noiseCov = np.hstack((noise_pars.stdevE**2 * np.ones(ssn.Ne),
                          noise_pars.stdevI**2 * np.ones(ssn.Ni)))

    OriVec = ssn.topos_vec
    if noise_pars.corr_length > 0 and OriVec.size > 1:  #assumes one E and one I at every topos
        dOri = np.abs(OriVec)
        L = OriVec.size * np.diff(OriVec[:2])
        dOri[dOri > L /
             2] = L - dOri[dOri > L / 2]  # distance on circle/periodic B.C.
        SpatialFilt = toeplitz(
            np.exp(-(dOri**2) / (2 * noise_pars.corr_length**2)) /
            np.sqrt(2 * pi) / noise_pars.corr_length * L / ssn.Ne)
        sigTau1Sprd1 = 0.394  # roughly the std of spatially and temporally filtered noise when the white seed is randn(ssn.Nthetas,Nt)/sqrt(dt) and corr_time=corr_length = 1 (ms or angle, respectively)
        SpatialFilt = SpatialFilt * np.sqrt(
            noise_pars.corr_length /
            2) / sigTau1Sprd1  # for the sake of output
        SpatialFilt = np.kron(np.eye(2), SpatialFilt)  # 2 for E/I
    else:
        SpatialFilt = np.array(1)

    return eE, noiseCov, SpatialFilt  # , eI
Пример #8
0
def smooth_vsini_fft(wavelength,
                     spectrum,
                     outwave,
                     sigma_out,
                     inres=0.0,
                     **extras):
    # The kernel width for the convolution.
    sigma = np.sqrt(sigma_out**2 - inres**2)
    # if sigma <= 0:
    #     return np.interp(outwave, wavelength, spectrum)

    # make length of spectrum a power of 2 by resampling
    wave, spec = resample_wave(wavelength, spectrum)

    # get grid resolution (*not* the resolution of the input spectrum) and make
    # sure it's nearly constant.  It should be, by design (see resample_wave)
    invRgrid = np.diff(np.log(wave))
    # assert invRgrid.max() / invRgrid.min() < 1.05
    dv = ckms * np.median(invRgrid)

    # Do the convolution
    spec_conv = smooth_fft_vsini(dv, spec, sigma)
    # interpolate aonto output grid
    # if outwave is not None:
    spec_conv = jinterp(outwave, wave, spec_conv, right=np.nan, left=np.nan)

    return spec_conv
Пример #9
0
def tmrca_sf(t: np.ndarray, y: np.ndarray, n: int) -> np.ndarray:
    """The survival function of the TMRCA at each time point

    Args:
        t: time grid (including zero and infinity)
        y: effective population size in each epoch
        n: number of sampled haplotypes

    """
    # epoch durations
    s = np.diff(t)
    logu = -s / y
    logu = np.concatenate((np.array([0]), logu))
    # the A_2j are the product of this matrix
    # NOTE: using letter  "l" as a variable name to match text
    l = onp.arange(2, n + 1)[:, onp.newaxis]  # noqa: E741
    with onp.errstate(divide='ignore'):
        A2_terms = l * (l - 1) / (l * (l - 1) - l.T * (l.T - 1))
    onp.fill_diagonal(A2_terms, 1)
    A2 = np.prod(A2_terms, axis=0)

    binom_vec = l * (l - 1) / 2

    result = np.zeros(len(t))
    result = index_update(result, index[:-1],
                          np.squeeze(A2[np.newaxis, :]
                                     @ np.exp(np.cumsum(logu[np.newaxis, :-1],
                                                        axis=1)) ** binom_vec))

    assert np.all(np.isfinite(result))

    return result
Пример #10
0
def welfare_path(par, sim, wgt, disc, long_run, T, K):
    # params
    ψ = np.atleast_2d(par['ψ']) # optional county dependence
    wgt1 = wgt/np.sum(wgt) # to distribution

    # discounting
    ydelt = 1/year_days
    ytvec = np.arange(T)/year_days
    down = np.exp(-disc*ytvec)

    # input factors
    out = sim['out'][:T, :]
    irate = np.diff(sim['ka'][:T, :], axis=0)
    irate = np.concatenate([irate, irate[-1:, :]], axis=0)

    # immediate welfare
    util = out - ψ*irate
    eutil = np.sum(util*wgt1[None, :], axis=1)
    welf0 = (ydelt*down*eutil).sum()

    # total welfare
    if long_run:
        welf1 = (down[-1]*eutil[-1])/disc
        welf = disc*(welf0 + welf1)
    else:
        Ty = T/year_days
        welf = disc*welf0/(1-np.exp(-disc*Ty))

    return welf
Пример #11
0
def M(n: int, t: np.ndarray, y: np.ndarray) -> np.ndarray:
    r"""The M matrix defined in the paper's appendix

    Args:
        n: the number of sampled haplotypes :math:`n`
        t: time grid, starting at zero and ending at np.inf
        y: population size in each epoch

    Returns:
        :math:`(n-1)\times m` matrix, where :math:`m` is the number of epochs
        (the length of the ``y`` argument)

    """
    # epoch durations
    s = np.diff(t)
    # we handle the final infinite epoch carefully to facilitate autograd
    u = np.exp(-s[:-1] / y[:-1])
    u = np.concatenate((np.array([1]), u, np.array([0])))

    n_range = np.arange(2, n + 1)
    binom_vec = n_range * (n_range - 1) / 2

    return np.exp(binom_vec[:, np.newaxis]
                  * np.cumsum(np.log(u[np.newaxis, :-1]), axis=1)
                  - np.log(binom_vec[:, np.newaxis])) \
        @ (np.eye(len(y), k=0) - np.eye(len(y), k=-1)) \
        @ np.diag(y)
Пример #12
0
def generate_sample_grid(theta_mean, theta_std, n):
    """
    Create a meshgrid of n ** n_dim samples,
    tiling [theta_mean[i] - 5 * theta_std[i], theta_mean[i] + 5 * theta_std]
    into n portions.
    Also returns the volume element.

    Parameters
    ----------
    theta_mean, theta_std : ndarray (n_dim)

    Returns
    -------
    theta_samples : ndarray (nobj, n_dim)

    vol_element: scalar
        Volume element

    """
    n_components = theta_mean.size
    xs = [
        np.linspace(
            theta_mean[i] - 5 * theta_std[i],
            theta_mean[i] + 5 * theta_std[i],
            n,
        )
        for i in range(n_components)
    ]
    mxs = np.meshgrid(*xs)
    orshape = mxs[0].shape
    mxsf = np.vstack([i.ravel() for i in mxs]).T
    dxs = np.vstack([np.diff(xs[i])[i] for i in range(n_components)])
    vol_element = np.prod(dxs)
    theta_samples = np.vstack(mxsf)
    return theta_samples, vol_element
Пример #13
0
 def get_bins(x1, x2, p1, p2):
     g_min, g_max = gamma_min_max(x1, p1, x2, p2)
     bins = jnp.linspace(g_min, g_max, S_gamma)
     gamma = 0.5 * (bins[:-1] + bins[1:])
     log_w = log_tomographic_weight_function(bins, x1, x2, p1, p2, S=S_marg)
     log_dgamma = jnp.log(jnp.diff(bins))
     return log_dgamma, gamma, log_w
Пример #14
0
def interp_manygrids(grids, xs, axis=0, return_wnext=True, trim=False):
    # this routine interpolates xs on many grids, defined along
    # the axis in an array grids. (so for axis=0 grids are
    #grids[:,i,j,k] for all i, j, k)

    assert np.all(np.diff(grids, axis=axis) > 0)
    '''
    if trim: xs = np.clip(xs[:,None,None],
                          grids.min(axis=axis,keepdims=True),
                          grids.max(axis=axis,keepdims=True))
    '''

    # this requires everything to be sorted
    mat = grids[..., None] < xs[(None, ) * grids.ndim + (slice(None), )]
    ng = grids.shape[axis]
    j = np.clip(np.sum(mat, axis=axis)[None, ...] - 1, 0, ng - 2)
    j = np.swapaxes(j, -1, axis).squeeze(axis=-1)
    grid_j = np.take_along_axis(grids, j, axis=axis)
    grid_jp = np.take_along_axis(grids, j + 1, axis=axis)

    xs_r = xs.reshape((1, ) * (axis - 1) + (xs.size, ) + (1, ) *
                      (grids.ndim - 1 - axis))

    wnext = (xs_r - grid_j) / (grid_jp - grid_j)
    return j, (wnext if return_wnext else 1 - wnext)
Пример #15
0
 def test_sort(self):
     q = soft_quantilizer.SoftQuantilizer(self.x,
                                          threshold=1e-3,
                                          epsilon=1e-3)
     deltas = np.diff(q.softsort, axis=-1) > 0
     self.assertAllClose(deltas,
                         np.ones(deltas.shape, dtype=bool),
                         check_dtypes=True)
Пример #16
0
 def test_sort_descending(self):
   x = self.x[0][0]
   s = ops.softsort(x, axis=-1, direction='DESCENDING',
                    threshold=1e-3, epsilon=1e-3)
   self.assertEqual(s.shape, x.shape)
   deltas = np.diff(s, axis=-1) < 0
   self.assertAllClose(
       deltas, np.ones(deltas.shape, dtype=bool), check_dtypes=True)
Пример #17
0
 def inverse_fun(params, inputs, **kwargs):
     outputs = np.hstack((
         inputs[:, 0, None],  # redshift
         (inputs[:, ref_idx, None] - ref_mean) / ref_std,  # ref mag
         -np.diff(inputs[:, 1:]),  # colors
     ))
     log_det = -np.log(ref_std) * np.ones(inputs.shape[0])
     return outputs, log_det
Пример #18
0
 def test_sort_batch(self, topk):
   x = jax.random.uniform(self.rng, (32, 20, 12, 8))
   axis = 1
   xs = soft_sort.sort(x, axis=axis, topk=topk)
   expected_shape = list(x.shape)
   expected_shape[axis] = topk if (0 < topk < x.shape[axis]) else x.shape[axis]
   self.assertEqual(xs.shape, tuple(expected_shape))
   self.assertTrue(jnp.alltrue(jnp.diff(xs, axis=axis) >= 0.0))
Пример #19
0
    def dynamics(self,
                 T,
                 params,
                 x0,
                 num_frozen=0,
                 confirmed=None,
                 death=None,
                 suffix=""):
        '''Run SEIRD dynamics for T time steps'''

        beta0, \
        sigma, \
        gamma, \
        rw_scale, \
        drift, \
        det_prob0, \
        confirmed_dispersion, \
        death_dispersion, \
        death_prob, \
        death_rate, \
        det_prob_d = params

        rw = frozen_random_walk("rw" + suffix,
                                num_steps=T - 1,
                                num_frozen=num_frozen)

        beta = numpyro.deterministic("beta", beta0 * np.exp(rw_scale * rw))

        det_prob = numpyro.sample(
            "det_prob" + suffix,
            LogisticRandomWalk(loc=det_prob0,
                               scale=rw_scale,
                               drift=0,
                               num_steps=T - 1))

        # Run ODE
        x = SEIRDModel.run(T, x0, (beta, sigma, gamma, death_prob, death_rate))

        numpyro.deterministic("x" + suffix, x[1:])

        x_diff = np.diff(x, axis=0)

        # Noisy observations
        with numpyro.handlers.scale(scale=0.5):
            y = observe_nb2("dy" + suffix,
                            x_diff[:, 6],
                            det_prob,
                            confirmed_dispersion,
                            obs=confirmed)

        with numpyro.handlers.scale(scale=2.0):
            z = observe_nb2("dz" + suffix,
                            x_diff[:, 5],
                            det_prob_d,
                            death_dispersion,
                            obs=death)

        return beta, det_prob, x, y, z
Пример #20
0
def ddn(cell: PVCell, pot: Potentials) -> Array:

    R = recomb.all_recomb(cell, pot)

    Jn = current.Jn(cell, pot)

    ave_dgrid = (cell.dgrid[:-1] + cell.dgrid[1:]) / 2.

    return -R[1:-1] + cell.G[1:-1] + jnp.diff(Jn) / ave_dgrid
Пример #21
0
 def test_topk_one_array(self, k):
   n = 20
   x = jax.random.uniform(self.rng, (n,))
   axis = 0
   xs = soft_sort.sort(x, axis=axis, topk=k, epsilon=1e-3)
   outsize = k if 0 < k < n else n
   self.assertEqual(xs.shape, (outsize,))
   self.assertTrue(jnp.alltrue(jnp.diff(xs, axis=axis) >= 0.0))
   self.assertAllClose(xs, jnp.sort(x, axis=axis)[-outsize:], atol=0.01)
Пример #22
0
 def test_sort_descending(self):
     x = self.x[0][0]
     s = ops.softsort(x,
                      axis=-1,
                      direction='DESCENDING',
                      threshold=1e-3,
                      epsilon=1e-3)
     self.assertEqual(s.shape, x.shape)
     deltas = jnp.diff(s, axis=-1) < 0
     np.testing.assert_allclose(deltas, jnp.ones(deltas.shape, dtype=bool))
Пример #23
0
def get_cluster_distance(data, eps):
    if data.shape[0] == 0: return 0
    result = ripser(data, maxdim=0)
    deaths = result['dgms'][0][:,1]
    deaths = deaths[deaths < 1E308]
    diffs = np.diff(deaths,axis=0)
    max_index = np.argmax(diffs)
    radius = deaths[max_index+1]/4
    if radius < eps: radius = 1E6
    return radius
Пример #24
0
def create_interpolator(points, values):
    if not hasattr(values, "ndim"):
        # allow reasonable duck-typed values
        values = jnp.asarray(values)

    if len(points) > values.ndim:
        raise ValueError("There are %d point arrays, but values has %d "
                         "dimensions" % (len(points), values.ndim))

    if hasattr(values, "dtype") and hasattr(values, "astype"):
        if not jnp.issubdtype(values.dtype, jnp.inexact):
            values = values.astype(float)

    for i, p in enumerate(points):
        if not jnp.all(jnp.diff(p) > 0.0):
            raise ValueError(
                "The points in dimension %d must be strictly ascending" % i)
        if not jnp.asarray(p).ndim == 1:
            raise ValueError(
                "The points in dimension %d must be 1-dimensional" % i)
        if not values.shape[i] == len(p):
            raise ValueError("There are %d points and %d values in "
                             "dimension %d" % (len(p), values.shape[i], i))
    grid = tuple([jnp.asarray(p) for p in points])
    ndim = len(grid)

    def interpolator(xi, method="linear"):
        if method not in ["linear", "nearest"]:
            raise ValueError("Method '%s' is not defined" % method)

        xi = _ndim_coords_from_arrays(xi, ndim)
        if xi.shape[-1] != len(grid):
            raise ValueError("The requested sample points xi have dimension "
                             "%d, but this RegularGridInterpolator has "
                             "dimension %d" % (xi.shape[1], ndim))

        xi_shape = xi.shape
        xi = xi.reshape(-1, xi_shape[-1])

        for i, p in enumerate(xi.T):
            if not jnp.logical_and(jnp.all(grid[i][0] <= p),
                                   jnp.all(p <= grid[i][-1])):
                raise ValueError(
                    "One of the requested xi is out of bounds in dimension %d"
                    % i)

        indices, norm_distances = _find_indices(xi.T, grid)
        if method == "linear":
            result = _evaluate_linear(values, indices, norm_distances)
        elif method == "nearest":
            result = _evaluate_nearest(values, indices, norm_distances)

        return result.reshape(xi_shape[:-1] + values.shape[ndim:])

    return interpolator
Пример #25
0
def build_raised_cosine_matrix(nh, endpoints, b, dt):
    """
    Make basis of raised cosines with logarithmically stretched time axis.
    
    Ported from [matlab code](https://github.com/pillowlab/raisedCosineBasis)
    
    Parameters
    ==========
    nh : int
        number of basis vectors
    
    endpoints : array like, shape=(2, )
        absoute temporal position of center of 1st and last cosine basis vector
        
    b : float
        offset for nonlinear stretching of x axis: y=log(x+b)
    
    dt : float
        time bin size of bins representing basis
        
    Return
    ======
    
    ttgrid : shape=(nt, )
        time lattice on which basis is defined
    
    basis : shape=(nt, nh)
        original cosine basis vectors
        
    """
    def nl(x):
        return np.log(x + 1e-20)

    def invnl(x):
        return np.exp(x) - 1e-20

    def raised_cosine_basis(x, c, dc):
        return 0.5 * (np.cos(
            np.maximum(-np.pi, np.minimum(np.pi,
                                          (x - c) * np.pi / dc / 2))) + 1)

    yendpoints = nl(endpoints + b)
    dctr = np.diff(yendpoints) / (nh - 1)
    ctrs = np.linspace(yendpoints[0], yendpoints[1], nh)
    maxt = invnl(yendpoints[1] + 2 * dctr) - b
    ttgrid = np.arange(0, maxt + dt, dt)
    nt = len(ttgrid)

    xx = np.tile(nl(ttgrid + b)[:, np.newaxis], (1, nh))
    cc = np.tile(ctrs, (nt, 1))

    basis = raised_cosine_basis(xx, cc, dctr)

    return ttgrid, basis
Пример #26
0
def differentiable_cumsum_capper(x, threshold, width):
    """Starts out 1 and decays to 0 so that (result * x).cumsum() <= threshold."""
    cumsum = x.cumsum()
    capped_cumsum = threshold - width * nn.softplus(
        (threshold - cumsum) / width)
    capped_x = jnp.concatenate([x[:1], jnp.diff(capped_cumsum)])
    # The "double where" trick is needed to ensure non-nan gradients here. See
    # https://github.com/google/jax/issues/1052#issuecomment-514083352
    nonzero_x = jnp.where(x == 0.0, 1.0, x)
    capper = jnp.where(x == 0.0, 0.0, capped_x / nonzero_x)
    return capper
Пример #27
0
def generation_lambda(design: PVDesign, phi_0: f64, alpha: Array) -> Array:

    # phi_0, alpha expected to be in SI units

    x = design.grid * scales.length * scales.cm  # m
    dx = jnp.diff(x)  # m

    phi = phi_0 * jnp.exp(-jnp.cumsum(
        jnp.concatenate([jnp.zeros(1), alpha[:-1] * dx])))  # 1 / (m^2 s)
    g = phi * alpha  # 1 / (m^3 s)

    return g
Пример #28
0
def collapse_particles(rng, particle_weights, particles):
    """Collapses identical particles and recompute their weights."""
    n_particles, num_patients = particles.shape
    if n_particles < 2:
        return particle_weights, particles

    alpha = jax.random.normal(rng, shape=((1, num_patients)))
    random_projection = np.sum(particles * alpha, axis=-1)
    indices = np.argsort(random_projection)
    particles = particles[indices, :]
    particle_weights = particle_weights[indices]
    cumsum_particle_weights = np.cumsum(particle_weights)
    random_projection = random_projection[indices]
    indices_singles, = np.where(
        np.flip(np.diff(np.flip(random_projection)) != 0))
    indices_singles = np.append(indices_singles, n_particles - 1)
    new_weights = np.diff(cumsum_particle_weights[indices_singles])
    new_weights = np.append(
        np.array(cumsum_particle_weights[indices_singles[0]]), new_weights)
    new_particles = particles[indices_singles, :]
    return new_weights, new_particles
Пример #29
0
    def cond_fn(iteration, const, state):  # pylint: disable=unused-argument
        """Stopping criterion. Checking decrease of objective is needed here."""
        _, threshold = const
        errors, _, _ = state
        err = errors[iteration // inner_iterations - 1]

        return jnp.logical_or(
            iteration == 0,
            jnp.logical_and(
                jnp.logical_and(jnp.isfinite(err), err > threshold),
                jnp.all(
                    jnp.diff(errors) <= 0)))  # check decreasing obj, else stop
Пример #30
0
def test_studentst(shape=(1000, ), loc=0.0, scale=5.0, dof=5.0):
    key = jr.PRNGKey(time.time_ns())
    key1, key2 = jr.split(key, 2)
    zs = jr.normal(key1, shape=shape)
    alpha, beta = dof / 2.0, 2.0 / dof
    taus = jr.gamma(key2, alpha, shape=shape) / beta
    data = loc + zs * scale / np.sqrt(taus)
    # true = dists.StudentT(dof, loc, scale)
    # data = true.sample(seed=key, sample_shape=shape)
    norm = dists.Normal.fit(data)
    stdt, lps = dists.StudentT.fit(data)
    assert np.all(np.diff(lps) > -1e-3)
    assert stdt.log_prob(data).mean() > norm.log_prob(data).mean()