示例#1
0
def make_kernel_bias_prior_spike_and_slab(
        kernel_shape,
        bias_shape,
        kernel_initializer=None,  # pylint: disable=unused-argument
        bias_initializer=None,  # pylint: disable=unused-argument
        kernel_batch_ndims=0,  # pylint: disable=unused-argument
        bias_batch_ndims=0,  # pylint: disable=unused-argument
        dtype=tf.float32,
        kernel_name='prior_kernel',
        bias_name='prior_bias'):
    """Create prior for Variational layers with kernel and bias.

  Note: Distribution scale is inversely related to regularization strength.
  Consider a "Normal" prior; bigger scale corresponds to less L2 regularization.
  I.e.,
  ```python
  scale    = (2. * l2weight)**-0.5
  l2weight = scale**-2. / 2.
  ```
  have a similar regularizing effect.

  The std. deviation of each of the component distributions returned by this
  function is approximately `1415` (or approximately `l2weight = 25e-6`). In
  other words this prior is extremely "weak".

  Args:
    kernel_shape: ...
    bias_shape: ...
    kernel_initializer: Ignored.
      Default value: `None` (i.e., `tf.initializers.glorot_uniform()`).
    bias_initializer: Ignored.
      Default value: `None` (i.e., `tf.zeros`).
    kernel_batch_ndims: ...
      Default value: `0`.
    bias_batch_ndims: ...
      Default value: `0`.
    dtype: ...
      Default value: `tf.float32`.
    kernel_name: ...
      Default value: `"prior_kernel"`.
    bias_name: ...
      Default value: `"prior_bias"`.

  Returns:
    kernel_and_bias_distribution: ...
  """
    w = MixtureSameFamily(mixture_distribution=Categorical(probs=[0.5, 0.5]),
                          components_distribution=Normal(loc=0.,
                                                         scale=tf.constant(
                                                             [1., 2000.],
                                                             dtype=dtype)))
    return JointDistributionSequential([
        Sample(w, kernel_shape, name=kernel_name),
        Sample(w, bias_shape, name=bias_name),
    ])
def compute_log_prob_uncompressed(
    alignment: Alignment,
    transition_probs_tree: TensorflowUnrootedTree,
    frequencies,
):
    sequences_encoded = alignment.get_encoded_sequence_tensor(
        transition_probs_tree.taxon_set)
    dist = Sample(
        LeafCTMC(transition_probs_tree, frequencies),
        sample_shape=(alignment.site_count, ),
    )
    return dist.log_prob(sequences_encoded)
示例#3
0
def make_kernel_bias_prior_spike_and_slab(kernel_shape,
                                          bias_shape,
                                          dtype=tf.float32,
                                          kernel_initializer=None,
                                          bias_initializer=None):
    """Create prior for Variational layers with kernel and bias."""
    del kernel_initializer, bias_initializer
    w = MixtureSameFamily(mixture_distribution=Categorical(probs=[0.5, 0.5]),
                          components_distribution=Normal(loc=0.,
                                                         scale=tf.constant(
                                                             [1., 2000.],
                                                             dtype=dtype)))
    return JointDistributionSequential([
        Sample(w, kernel_shape, name='prior_kernel'),
        Sample(w, bias_shape, name='prior_bias'),
    ])