コード例 #1
0
def jaxsvd_bwd(r, tangents):
    U, S, V = r
    du, ds, dv = tangents

    dU = jnp.conj(du)
    dS = jnp.conj(ds)
    dV = jnp.transpose(dv)

    ms = jnp.diag(S)
    ms1 = jnp.diag(_safe_reciprocal(S))
    dAs = U @ jnp.diag(dS) @ V

    F = S * S - (S * S)[:, None]
    F = _safe_reciprocal(F) - jnp.diag(jnp.diag(_safe_reciprocal(F)))

    J = F * (h(U) @ dU)
    dAu = U @ (J + h(J)) @ ms @ V

    K = F * (V @ dV)
    dAv = U @ ms @ (K + h(K)) @ V

    O = h(dU) @ U @ ms1
    dAc = -1 / 2.0 * U @ (jnp.diag(jnp.diag(O - jnp.conj(O)))) @ V

    dAv = dAv + U @ ms1 @ h(dV) @ (jnp.eye(jnp.size(V[1, :])) - h(V) @ V)
    dAu = dAu + (jnp.eye(jnp.size(U[:, 1])) - U @ h(U)) @ dU @ ms1 @ V
    grad_a = jnp.conj(dAv + dAu + dAs + dAc)
    return (grad_a, )
コード例 #2
0
ファイル: random.py プロジェクト: ziheng0924/google-research
  def get_groups(self, state):
    """Produces random design matrix with nmax 1s per line.

    Args:
     state: the current state.State of the system.

    Returns:
     A np.array<bool>[num_groups, patients].
    """
    if self.method == 'single':
      new_groups = sample_groups_of_size(
          (state.num_patients, state.extra_tests_needed), state.max_group_size)
    # if prior_infection_rate is a scalar take average otherwise sum
    if np.size(self.prior_infection_rate) == 1:
      max_infected = int(
          np.ceil(self.prior_infection_rate * state.num_patients))
    elif np.size(self.prior_infection_rate) == state.num_patients:
      max_infected = int(np.sum(self.prior_infection_rate))

    if self.method == 'disjoint':
      new_groups = sample_disjoint_matrix(state.num_patients,
                                          state.extra_tests_needed,
                                          state.max_group_size, max_infected,
                                          self.max_iter)
      if new_groups is None:
        raise ValueError('No satisfying matrix found after max iterations')
    if self.method == 'count':
      new_groups, _ = sample_maxeval_disjoint_matrix(
          state.num_patients, state.extra_tests_needed, state.max_group_size,
          max_infected, self.max_iter, self.count_fn)

    return np.array(new_groups)
コード例 #3
0
def get_sparsity(params: hk.Params):
    """Calculate the total sparsity and tensor-wise sparsity of params."""
    total_params = sum(jnp.size(x) for x in jax.tree_leaves(params))
    total_nnz = sum(jnp.sum(x != 0.) for x in jax.tree_leaves(params))
    leaf_sparsity = jax.tree_map(lambda x: jnp.sum(x == 0) / jnp.size(x),
                                 params)
    return total_params, total_nnz, leaf_sparsity
コード例 #4
0
 def test_selector_without_particles(self, selector):
     groups = selector.get_groups(self.rng, self.state)
     self.assertGreater(groups.shape[0], 0)
     self.assertEqual(groups.shape[1], self.num_patients)
     self.assertEqual(np.size(self.state.groups_to_test), 0)
     selector(self.rng, self.state)
     self.assertGreater(np.size(self.state.groups_to_test), 0)
コード例 #5
0
 def test_selector_with_particles(self, selector):
     sampler = sequential_monte_carlo.SmcSampler()
     rngs = jax.random.split(self.rng, 2)
     sampler.produce_sample(rngs[0], self.state)
     self.state.update_particles(sampler)
     self.assertEqual(np.size(self.state.groups_to_test), 0)
     selector(rngs[1], self.state)
     self.assertGreater(np.size(self.state.groups_to_test), 0)
コード例 #6
0
ファイル: test_minibatch.py プロジェクト: DPBayes/d3p
    def test_split_batchify_init(self):
        data = jnp.arange(0, 100)
        init, fetch = split_batchify_data((data, ), 10)

        rng_key = jax.random.PRNGKey(0)
        num_batches, batchifier_state = init(rng_key)

        self.assertEqual(10, num_batches)
        self.assertEqual(jnp.size(data), jnp.size(batchifier_state))
        self.assertTrue(np.allclose(np.unique(batchifier_state), data))
コード例 #7
0
    def bias(self, value: np.ndarray):
        # - Replicate `bias` from a scalar value
        if np.size(value) == 1:
            value = np.repeat(value, self._size)

        assert (
            np.size(value) == self._size
        ), "`bias` must have {:d} elements or be a scalar".format(self._size)

        self._bias = np.reshape(value, self._size).astype("float")
コード例 #8
0
 def test_add_groups_to_test(self):
     self.assertEqual(np.size(self.state.groups_to_test), 0)
     self.state.add_groups_to_test(self.groups)
     self.assertTrue(np.all(self.state.groups_to_test == self.groups))
     self.state.add_groups_to_test(self.groups)
     self.assertEqual(self.state.groups_to_test.shape,
                      (2 * self.num_groups, self.state.num_patients))
     self.assertGreater(self.state.num_groups_left_to_test, 0)
     self.state.reset()
     self.assertEqual(np.size(self.state.groups_to_test), 0)
     self.assertEqual(self.state.num_groups_left_to_test, 0)
コード例 #9
0
 def get_groups(self, rng, state):
   if np.size(state.past_groups) > 0 and np.size(state.to_clear_positives) > 0:
     to_split = state.past_groups[state.to_clear_positives, :]
     # we can only split groups that have more than 1 individual
     to_split = to_split[np.sum(to_split, axis=-1) > 1, :]
     if np.size(to_split) > 0:
       if np.ndim(to_split) == 1:
         to_split = onp.expand_dims(to_split, axis=0)
       # each group indexed by indices will be split in split_factor terms
       return self._split_groups(to_split)
     else:
       logging.info('only singletons')
コード例 #10
0
 def test_act(self):
   num_patients = 40
   num_tests_per_cycle = 4
   s = state.State(num_patients, num_tests_per_cycle,
                   max_group_size=5, prior_infection_rate=0.05,
                   prior_specificity=0.95, prior_sensitivity=0.80)
   self.assertEqual(np.size(s.groups_to_test), 0)
   self.assertEqual(self.policy.index, 0)
   self.policy.act(self.rng, s)
   self.assertGreater(np.size(s.groups_to_test), 0)
   self.assertEqual(s.groups_to_test.shape[1], num_patients)
   self.assertGreater(s.groups_to_test.shape[0], 0)
   self.assertEqual(self.policy.index, 1)
コード例 #11
0
def onnx_pad(x, pads, constant_value=0.0, mode='constant'):
    input_rank = x.ndim
    if input_rank * 2 != jnp.size(pads):
        raise Exception(
            'The number of elements in raw_pads should be 2 * data_rank')

    # re-order to np.pad accepted order ((x1_begin, x1_end), (x2_begin, x2_end), ...)
    pad_width = ()
    for i in range(int(jnp.size(pads) / 2)):
        pad_width += (((pads[i], pads[i + input_rank])), )

    if mode == 'constant':
        return jnp.pad(x, pad_width, mode, constant_values=constant_value)
    return jnp.pad(x, pad_width, mode)
コード例 #12
0
 def add_test_results(self, test_results):
     """Update state with results from recently tested groups."""
     self.past_test_results = np.concatenate(
         (self.past_test_results, test_results), axis=0)
     missing_entries_in_to_clear = (np.size(self.past_test_results) -
                                    np.size(self.to_clear_positives))
     if missing_entries_in_to_clear > 0:
         # we should update the list of groups that have been tested positives.
         # this information is used by some strategies, notably Dorfman type ones.
         # if some entries are missing, they come by default from the latest wave
         # of tests carried out.
         self.to_clear_positives = np.concatenate(
             (self.to_clear_positives,
              test_results[-missing_entries_in_to_clear:]),
             axis=0)
コード例 #13
0
def pinv(model: SpectralSobolev1Fit):
    ns = model.exponents
    A = vander_builder(model.grid, ns)(model.mesh)
    B = vandergrad_builder(model.grid, ns)(model.mesh)
    I = np.ones((np.size(A, 0), 1))
    O = np.zeros((np.size(B, 0), 1))
    #
    if model.is_periodic:
        U = np.hstack((I, np.real(A), np.imag(A)))
        V = np.hstack((O, np.imag(B), np.real(B)))
    else:
        U = np.hstack((I, A))
        V = np.hstack((O, B))
    #
    return np.linalg.pinv(np.vstack((U, V)))
コード例 #14
0
    def vjp_all(g):

        vjp_y = g[-1, :]
        vjp_t0 = 0
        time_vjp_list = []
        vjp_args = np.zeros(np.size(flat_args))

        for i in range(T - 1, 0, -1):

            # Compute effect of moving measurement time.
            vjp_cur_t = np.dot(func(yt[i, :], t[i], *func_args), g[i, :])
            time_vjp_list.append(vjp_cur_t)
            vjp_t0 = vjp_t0 - vjp_cur_t

            # Run augmented system backwards to the previous observation.
            aug_y0 = np.hstack((yt[i, :], vjp_y, vjp_t0, vjp_args))
            aug_ans = odeint(augmented_dynamics, aug_y0,
                             np.stack([t[i], t[i - 1]]), (flat_args, ))
            _, vjp_y, vjp_t0, vjp_args = unpack(aug_ans[1])

            # Add gradient from current output.
            vjp_y = vjp_y + g[i - 1, :]

        time_vjp_list.append(vjp_t0)
        vjp_times = np.hstack(time_vjp_list)[::-1]

        return None, vjp_y, vjp_times, unravel(vjp_args)
コード例 #15
0
 def test_add_test_results(self):
     self.assertEqual(np.size(self.state.groups_to_test), 0)
     self.state.add_groups_to_test(self.groups)
     self.state.add_past_groups(self.groups)
     self.state.add_test_results(self.results)
     self.assertTrue(np.all(self.state.past_groups == self.groups))
     self.assertTrue(np.all(self.state.past_test_results == self.results))
コード例 #16
0
def select_from_sizes(values: np.ndarray, sizes: np.ndarray) -> np.ndarray:
    """Selects using indices group_sizes the relevant values for a parameter.

  Given a parameter vector (or possibly constant) that describes values
  for groups of size 1,2,...., k_max selects values according to vector
  group_sizes. When an item in group_sizes is larger than the size of
  the vector, we revert to the last element of the vector by default.

  Note that the values array is 0-indexed, therefore the values corresponding
  to size 1 is values[0], to size 2 values[1] and more generally, the value for
  a group of size i is values[i-1].

  Args:
    values: a np.ndarray that can be of size 1 or more, from which to seleect
     the values from.
    sizes: np.array[int] representing the group sizes we want to extract the
     values of.

  Returns:
    vector of parameter values, chosen at corresponding group sizes,
    of the same size of group_sizes.

  Raises:
   ValueError when the size array is not one dimensional.
  """
    values = np.asarray(values)
    dim = np.ndim(values)
    if dim > 1:
        raise ValueError(f"sizes argument has dimension {dim} > 1.")

    # The values are 0-indexed, but sizes are strictly positives.
    indices = np.minimum(sizes, np.size(values)) - 1
    return np.squeeze(values[indices])
コード例 #17
0
 def topk_mask_internal(value):
     assert value.ndim == 1
     indices = jnp.argsort(value)
     k = jnp.round(density_fraction * jnp.size(value)).astype(jnp.int32)
     mask = jnp.greater_equal(np.arange(value.size), value.size - k)
     mask = jnp.zeros_like(mask).at[indices].set(mask)
     return mask.astype(np.int32)
コード例 #18
0
ファイル: ops.py プロジェクト: HackerShohag/SuggestBot-bn
def _convert_to_tensor(value, dtype=None, dtype_hint=None, name=None):  # pylint: disable=unused-argument
    """Emulates tf.convert_to_tensor."""
    assert not tf.is_tensor(value), value
    if isinstance(value, np.ndarray):
        if dtype is not None:
            dtype = utils.numpy_dtype(dtype)
            # if np.result_type(value, dtype) != dtype:
            #   raise ValueError('Expected dtype {} but got {} with dtype {}.'.format(
            #       dtype, value, value.dtype))
            return value.astype(dtype)
        return value
    if isinstance(value, TensorShape):
        value = [int(d) for d in value.as_list()]
    if dtype is None and dtype_hint is not None:
        dtype_hint = utils.numpy_dtype(dtype_hint)
        value = np.array(value)
        if np.size(value):
            # Match TF behavior, which won't downcast e.g. float to int.
            if np.issubdtype(value.dtype, np.complexfloating):
                if not np.issubdtype(dtype_hint, np.complexfloating):
                    return value
            if np.issubdtype(value.dtype, np.floating):
                if not np.issubdtype(dtype_hint, np.floating):
                    return value
            if np.issubdtype(value.dtype, np.integer):
                if not np.issubdtype(dtype_hint, np.integer):
                    return value
        return value.astype(dtype_hint)
    return np.array(value, dtype=utils.numpy_dtype(dtype or dtype_hint))
コード例 #19
0
ファイル: split.py プロジェクト: ziheng0924/google-research
  def _split_groups(self, groups):
    """Splits the groups."""
    # if split_factor is None, we do exhaustive split,
    # i.e. we test everyone as in Dorfman groups
    use_split_factor = self.split_factor
    # make sure this is a matrix
    if np.ndim(groups) == 1:
      groups = onp.expand_dims(groups, 0)
    n_groups, n_patients = groups.shape

    # we form new groups one by one now.
    new_groups = None
    for i in range(n_groups):
      group_i = groups[i, :]
      # test if there is one individual to test
      if np.sum(group_i) > 1:
        indices, = np.where(group_i)
        if self.split_factor is None:
          use_split_factor = np.size(indices)
        indices = onp.array_split(indices, use_split_factor)
        newg = onp.zeros((len(indices), n_patients))
        for j in range(len(indices)):
          newg[j, indices[j]] = 1
      if new_groups is None:
        new_groups = newg
      else:
        new_groups = onp.concatenate((new_groups, newg), axis=0)
    return np.array(new_groups, dtype=bool)
コード例 #20
0
def vandergrad_builder(grid, exponents):
    """
    Returns a closure over the grid and exponents to build a Vandermonde-like
    matrix for fitting the gradient of a Fourier or Chebyshev expansion.
    """
    ns = exponents
    #
    if grid.shape.size == 1:

        def flip_multiply(x, y):
            return x
    else:

        def flip_multiply(x, y):
            return x * np.fliplr(y)

    #
    if grid.is_periodic:

        def expand(x):
            z = np.exp(-1j * np.pi * ns * x)
            return flip_multiply(ns * z, z).T
    else:

        def expand(x):
            z = x**(np.maximum(ns - 1, 0))
            return flip_multiply(ns * z, x).T

    #
    return jit(lambda xs: vmap(expand)(xs).reshape(-1, np.size(ns, 0)))
コード例 #21
0
    def _setup_prototype(self, *args, **kwargs):
        super(AutoContinuous, self)._setup_prototype(*args, **kwargs)
        # FIXME: without block statement, get AssertionError: all sites must have unique names
        init_params, is_valid = block(find_valid_initial_params)(
            self._init_rng,
            self.model,
            *args,
            init_strategy=self.init_strategy,
            **kwargs)
        self._inv_transforms = {}
        self._has_transformed_dist = False
        unconstrained_sites = {}
        for name, site in self.prototype_trace.items():
            if site['type'] == 'sample' and not site['is_observed']:
                if site['intermediates']:
                    transform = biject_to(site['fn'].base_dist.support)
                    self._inv_transforms[name] = transform
                    unconstrained_sites[name] = transform.inv(
                        site['intermediates'][0][0])
                    self._has_transformed_dist = True
                else:
                    transform = biject_to(site['fn'].support)
                    self._inv_transforms[name] = transform
                    unconstrained_sites[name] = transform.inv(site['value'])

        self._init_latent, self.unpack_latent = ravel_pytree(init_params)
        self.latent_size = np.size(self._init_latent)
        if self.latent_size == 0:
            raise RuntimeError(
                '{} found no latent variables; Use an empty guide instead'.
                format(type(self).__name__))
コード例 #22
0
ファイル: random.py プロジェクト: ziheng0924/google-research
  def get_groups(self, rng, state):
    """Produces random design matrix fixed number of 1s per line.

    Args:
     rng: np.ndarray<int>[2]: the random key.
     state: the current state.State of the system.

    Returns:
     A np.array<bool>[num_groups, patients].
    """
    if self.group_size is None:
      # if no size has been defined, we compute it adaptively
      # in the simple case where prior is uniform.
      if np.size(state.prior_infection_rate) == 1:
        group_size = np.ceil(
            (np.log(state.prior_sensitivity - .5) -
             np.log(state.prior_sensitivity + state.prior_specificity - 1)) /
            np.log(1 - state.prior_infection_rate))
        group_size = np.minimum(group_size, state.max_group_size)
      # if prior is not uniform, pick max size.
      else:
        group_size = self.max_group_size
    else:
      group_size = self.group_size
    group_size = int(np.squeeze(group_size))
    new_groups = np.empty((0, state.num_patients), dtype=bool)
    for _ in range(state.extra_tests_needed):
      rng, rng_shuffle = jax.random.split(rng, 2)
      vec = np.zeros((1, state.num_patients), dtype=bool)
      idx = jax.random.permutation(rng_shuffle, np.arange(state.num_patients))
      vec = jax.ops.index_update(vec, [0, idx[0:group_size]], True)
      new_groups = np.concatenate((new_groups, vec), axis=0)
    return new_groups
コード例 #23
0
def _ravel_list(lst):
  if not lst: return jnp.array([], jnp.float32), lambda _: []
  from_dtypes = [dtypes.dtype(l) for l in lst]
  to_dtype = dtypes.result_type(*from_dtypes)
  sizes, shapes = unzip2((jnp.size(x), jnp.shape(x)) for x in lst)
  indices = np.cumsum(sizes)

  if all(dt == to_dtype for dt in from_dtypes):
    # Skip any dtype conversion, resulting in a dtype-polymorphic `unravel`.
    # See https://github.com/google/jax/issues/7809.
    del from_dtypes, to_dtype
    def unravel(arr):
      chunks = jnp.split(arr, indices[:-1])
      return [chunk.reshape(shape) for chunk, shape in zip(chunks, shapes)]
    raveled = jnp.concatenate([jnp.ravel(e) for e in lst])
    return raveled, unravel

  # When there is more than one distinct input dtype, we perform type
  # conversions and produce a dtype-specific unravel function.
  def unravel(arr):
    arr_dtype = dtypes.dtype(arr)
    if arr_dtype != to_dtype:
      raise TypeError(f"unravel function given array of dtype {arr_dtype}, "
                      f"but expected dtype {to_dtype}")
    chunks = jnp.split(arr, indices[:-1])
    with warnings.catch_warnings():
      warnings.simplefilter("ignore")  # ignore complex-to-real cast warning
      return [lax.convert_element_type(chunk.reshape(shape), dtype)
              for chunk, shape, dtype in zip(chunks, shapes, from_dtypes)]

  ravel = lambda e: jnp.ravel(lax.convert_element_type(e, to_dtype))
  raveled = jnp.concatenate([ravel(e) for e in lst])
  return raveled, unravel
コード例 #24
0
ファイル: fresnel.py プロジェクト: quesmax/morphine
    def pupil_coordinates(x, y, pixelscale):
        """Utility function to generate coordinates arrays for a pupil
        plane wavefront

        Parameters
        ------------
        x, y : array_like
            pixel indices
        pixelscale : float or 2-tuple of floats
            the pixel scale in meters/pixel, optionally different in
            X and Y

        Returns
        -------
        Y, X :  array_like
            Wavefront coordinates in either meters or arcseconds for pupil and image, respectively
        """
        # Override parent class method to provide one that's comparatible with
        # FFT indexing conventions. Centered one one pixel not on the middle
        # of the array.
        # This function is intentionally distinct from the regular Wavefront.coordinates(), and behaves
        # slightly differently. This is required for use in the angular spectrum propagation in the PTP and
        # Direct propagations.

        pixelscale_mpix = pixelscale
        if np.size(pixelscale_mpix) != 1:
            pixel_scale_x, pixel_scale_y = pixelscale_mpix
        else:
            pixel_scale_x, pixel_scale_y = pixelscale_mpix, pixelscale_mpix

        return pixel_scale_y * y, pixel_scale_x * x
コード例 #25
0
def _triangular_solve(matrix, rhs, lower=True, adjoint=False, name=None):  # pylint: disable=redefined-outer-name
    """Scipy solve does not broadcast, so we must do so explicitly."""
    del name
    if JAX_MODE:  # But JAX uses XLA, which can do a batched solve.
        matrix = matrix + np.zeros(rhs.shape[:-2] + (1, 1), dtype=matrix.dtype)
        rhs = rhs + np.zeros(matrix.shape[:-2] + (1, 1), dtype=rhs.dtype)
        return scipy_linalg.solve_triangular(matrix,
                                             rhs,
                                             lower=lower,
                                             trans='C' if adjoint else 'N')
    try:
        bcast = onp.broadcast(matrix[..., :1], rhs)
    except ValueError as e:
        raise ValueError(
            'Error with inputs shaped `matrix`={}, rhs={}:\n{}'.format(
                matrix.shape, rhs.shape, str(e)))
    dim = matrix.shape[-1]
    matrix = onp.broadcast_to(matrix, bcast.shape[:-1] + (dim, ))
    rhs = onp.broadcast_to(rhs, bcast.shape)
    nbatch = int(np.prod(matrix.shape[:-2]))
    flat_mat = matrix.reshape(nbatch, dim, dim)
    flat_rhs = rhs.reshape(nbatch, dim, rhs.shape[-1])
    result = np.empty(flat_rhs.shape)
    if np.size(result):
        # ValueError: On entry to STRTRS parameter number 7 had an illegal value.
        for i, (mat, rh) in enumerate(zip(flat_mat, flat_rhs)):
            result[i] = scipy_linalg.solve_triangular(
                mat, rh, lower=lower, trans='C' if adjoint else 'N')
    return result.reshape(*rhs.shape)
コード例 #26
0
    def produce_sample(self, rng, state):
        """Produces a particle approx to posterior distribution given tests.

    If no tests have been carried out so far, naively sample from
    prior distribution.

    Otherwise take into account previous tests to form posterior
    and sample from it using a SMC sampler.

    Args:
     rng: a random key
     state: the current state of what has been tested, etc.

    Returns:
     Nothing but sets the particle_weights and particles members.
    """
        shape = (self._num_particles, state.num_patients)
        if np.size(state.past_test_results) == 0:
            self.particles = (jax.random.uniform(rng, shape=shape) <
                              state.prior_infection_rate)
            self.particle_weights = np.ones(
                (self._num_particles, )) / self._num_particles
        else:
            rngs = jax.random.split(rng, 2)
            if self._resample_at_each_iteration or state.particles is None:
                particles = jax.random.uniform(rngs[0], shape=shape) < 0.5
            else:
                particles = state.particles
            # sample from posterior
            self.particle_weights, self.particles = self.resample_move(
                rngs[1], particles, state)
コード例 #27
0
def test_adjoint_g_dynamics():
    # Check that the function that computes the product of the augmented
    # diffusion dynamics against a vector actually does the same thing as
    # computing the diffusion matrix explicitly.
    D, ts, y0, args, f, g = make_sde()

    flat_args, unravel = ravel_pytree(args)

    def flat_f(y, t, flat_args):
        return f(y, t, unravel(flat_args))

    def flat_g(y, t, flat_args):
        return g(y, t, unravel(flat_args))

    aug_y, unpack = ravel_pytree((y0, y0, np.zeros(np.size(flat_args))))
    f_aug, g_aug, aug_gdg = make_ito_adjoint_dynamics(flat_f, flat_g, unpack)

    # Test g_aug
    sigma = make_explicit_sigma(flat_g, unpack)
    explicit = sigma(aug_y, ts[0], flat_args)
    implicit = jacobian(g_aug, argnums=3)(aug_y, ts[0], flat_args,
                                          np.ones(y0.shape))
    assert np.allclose(explicit, implicit)

    # Test aug_gdg (Milstein correction factor)
    explicit_milstein = make_explicit_milstein(sigma, aug_y, ts[0], flat_args)
    implicit_milstein = jacobian(aug_gdg, argnums=3)(aug_y, ts[0], flat_args,
                                                     np.ones(y0.shape))
    print(explicit_milstein)
    print(implicit_milstein)
    assert np.allclose(explicit_milstein, implicit_milstein)
コード例 #28
0
    def _setup_prototype(self, *args, **kwargs):
        super(AutoContinuous, self)._setup_prototype(*args, **kwargs)
        rng_key = numpyro.sample("_{}_rng_key_init".format(self.prefix),
                                 dist.PRNGIdentity())
        init_params, _ = handlers.block(find_valid_initial_params)(
            rng_key,
            self.model,
            init_strategy=self.init_strategy,
            model_args=args,
            model_kwargs=kwargs)
        self._inv_transforms = {}
        self._has_transformed_dist = False
        unconstrained_sites = {}
        for name, site in self.prototype_trace.items():
            if site['type'] == 'sample' and not site['is_observed']:
                if site['intermediates']:
                    transform = biject_to(site['fn'].base_dist.support)
                    self._inv_transforms[name] = transform
                    unconstrained_sites[name] = transform.inv(
                        site['intermediates'][0][0])
                    self._has_transformed_dist = True
                else:
                    transform = biject_to(site['fn'].support)
                    self._inv_transforms[name] = transform
                    unconstrained_sites[name] = transform.inv(site['value'])

        self._init_latent, self._unpack_latent = ravel_pytree(init_params)
        self.latent_size = np.size(self._init_latent)
        if self.base_dist is None:
            self.base_dist = dist.Independent(
                dist.Normal(np.zeros(self.latent_size), 1.), 1)
        if self.latent_size == 0:
            raise RuntimeError(
                '{} found no latent variables; Use an empty guide instead'.
                format(type(self).__name__))
コード例 #29
0
def generate_latents(len_sc, N):

  M1 = onp.array([range(N)])- onp.transpose(onp.array([range(N)]))
  if np.size(len_sc)>0:
    K = [np.exp(-(np.square(M1)/(2*np.square(len_sc[i])))) for i in np.arange(np.size(len_sc))]
  else:
    K = np.exp(-(np.square(M1)/(2*np.square(len_sc))))

  n_latents = np.size(len_sc)
  #draw a rate with GP stats (one or many)

  if np.size(len_sc)>0:
    latents = np.array([onp.random.multivariate_normal(onp.zeros(N), K[i]) for i in onp.arange(np.size(len_sc))])
  else:
    latents = np.array(onp.random.multivariate_normal(onp.zeros(N), K, n_latents))
  return latents
コード例 #30
0
 def proposal_dist(z, g):
     g = -self._preconditioner.flatten(g)
     dim = jnp.size(g)
     rho2 = jnp.clip(jnp.dot(g, g), a_min=1.0)
     covar = (self._mu2 * jnp.eye(dim) + self._lam2_minus_mu2 *
              jnp.outer(g, g) / jnp.dot(g, g)) / rho2
     return dist.MultivariateNormal(loc=self._preconditioner.flatten(z),
                                    covariance_matrix=covar)