Esempio n. 1
0
 def new(params,
         event_shape=(),
         activation=tf.identity,
         validate_args=False,
         name="ZIPoissonLayer"):
     """Create the distribution instance from a `params` vector."""
     params = tf.convert_to_tensor(value=params, name='params')
     event_shape = dist_util.expand_to_vector(
         tf.convert_to_tensor(value=event_shape,
                              name='event_shape',
                              dtype=tf.int32),
         tensor_name='event_shape',
     )
     output_shape = tf.concat(
         [tf.shape(input=params)[:-1], event_shape],
         axis=0,
     )
     (log_rate_params, logits_params) = tf.split(params, 2, axis=-1)
     return tfd.Independent(
         ZeroInflated(count_distribution=tfd.Poisson(
             log_rate=activation(tf.reshape(log_rate_params, output_shape)),
             validate_args=validate_args),
                      logits=tf.reshape(logits_params, output_shape),
                      validate_args=validate_args),
         reinterpreted_batch_ndims=tf.size(input=event_shape),
         name=name,
     )
Esempio n. 2
0
    def new(params, event_shape=(), validate_args=False, name=None):
        """Create the distribution instance from a `params` vector."""
        from odin.bay.distributions import ZeroInflated

        with tf.compat.v1.name_scope(name, 'ZeroInflatedPoisson',
                                     [params, event_shape]):
            params = tf.convert_to_tensor(value=params, name='params')
            event_shape = dist_util.expand_to_vector(tf.convert_to_tensor(
                value=event_shape, name='event_shape', dtype=tf.int32),
                                                     tensor_name='event_shape')
            output_shape = tf.concat([
                tf.shape(input=params)[:-1],
                event_shape,
            ],
                                     axis=0)
            (log_rate_params, logits_params) = tf.split(params, 2, axis=-1)
            zip = ZeroInflated(count_distribution=tfd.Poisson(
                log_rate=tf.reshape(log_rate_params, output_shape),
                validate_args=validate_args),
                               logits=tf.reshape(logits_params, output_shape),
                               validate_args=validate_args)
            return tfd.Independent(
                zip,
                reinterpreted_batch_ndims=tf.size(input=event_shape),
                validate_args=validate_args)
Esempio n. 3
0
    def new(params,
            event_shape=(),
            given_log_count=True,
            validate_args=False,
            name=None):
        """Create the distribution instance from a `params` vector."""
        from odin.bay.distributions import ZeroInflated

        with tf.compat.v1.name_scope(name, 'ZeroInflatedNegativeBinomial',
                                     [params, event_shape]):
            params = tf.convert_to_tensor(value=params, name='params')
            event_shape = dist_util.expand_to_vector(tf.convert_to_tensor(
                value=event_shape, name='event_shape', dtype=tf.int32),
                                                     tensor_name='event_shape')
            output_shape = tf.concat([
                tf.shape(input=params)[:-1],
                event_shape,
            ],
                                     axis=0)
            (total_count_params, logits_params,
             rate_params) = tf.split(params, 3, axis=-1)
            if given_log_count:
                total_count_params = tf.exp(total_count_params,
                                            name='total_count')
            nb = tfd.NegativeBinomial(
                total_count=tf.reshape(total_count_params, output_shape),
                logits=tf.reshape(logits_params, output_shape),
                validate_args=validate_args)
            zinb = ZeroInflated(count_distribution=nb,
                                logits=tf.reshape(rate_params, output_shape),
                                validate_args=validate_args)
            return tfd.Independent(
                zinb,
                reinterpreted_batch_ndims=tf.size(input=event_shape),
                validate_args=validate_args)
Esempio n. 4
0
 def new(params,
         event_shape=(),
         given_logits=True,
         validate_args=False,
         name='ZIBernoulliLayer'):
   """Create the distribution instance from a `params` vector."""
   params = tf.convert_to_tensor(value=params, name='params')
   event_shape = dist_util.expand_to_vector(
       tf.convert_to_tensor(value=event_shape,
                            name='event_shape',
                            dtype=tf.int32),
       tensor_name='event_shape',
   )
   output_shape = tf.concat(
       [tf.shape(input=params)[:-1], event_shape],
       axis=0,
   )
   (bernoulli_params, rate_params) = tf.split(params, 2, axis=-1)
   bernoulli_params = tf.reshape(bernoulli_params, output_shape)
   bern = tfd.Bernoulli(logits=bernoulli_params if given_logits else None,
                        probs=bernoulli_params if not given_logits else None,
                        validate_args=validate_args)
   zibern = ZeroInflated(count_distribution=bern,
                         logits=tf.reshape(rate_params, output_shape),
                         validate_args=validate_args)
   return tfd.Independent(zibern,
                          reinterpreted_batch_ndims=tf.size(input=event_shape),
                          name=name)
Esempio n. 5
0
 def new(params,
         event_shape=(),
         mean_activation=tf.nn.softplus,
         disp_activation=softplus1,
         validate_args=False,
         name="ZINegativeBinomialDispLayer",
         disp=None,
         rate=None):
     r"""Create the distribution instance from a `params` vector."""
     params = tf.convert_to_tensor(value=params, name='params')
     event_shape = dist_util.expand_to_vector(
         tf.convert_to_tensor(value=event_shape,
                              name='event_shape',
                              dtype=tf.int32),
         tensor_name='event_shape',
     )
     output_shape = tf.concat((tf.shape(input=params)[:-1], event_shape),
                              axis=0)
     ### splitting the parameters
     if disp is None:  # full dispersion
         if rate is None:
             loc, disp, rate = tf.split(params, 3, axis=-1)
             rate = tf.reshape(rate, output_shape)
         else:
             loc, disp = tf.split(params, 2, axis=-1)
         disp = tf.reshape(disp, output_shape)
     else:  # share dispersion
         if rate is None:
             loc, rate = tf.split(params, 2, axis=-1)
             rate = tf.reshape(rate, output_shape)
         else:
             loc = params
     # as count value, do exp if necessary
     loc = tf.reshape(loc, output_shape)
     loc = mean_activation(loc)
     disp = disp_activation(disp)
     # create the distribution
     nb = NegativeBinomialDisp(loc=loc,
                               disp=disp,
                               validate_args=validate_args)
     zinb = ZeroInflated(count_distribution=nb,
                         logits=rate,
                         validate_args=validate_args)
     return tfd.Independent(
         zinb,
         reinterpreted_batch_ndims=tf.size(input=event_shape),
         name=name)
Esempio n. 6
0
 def new(params,
         event_shape=(),
         count_activation=tf.exp,
         validate_args=False,
         name="ZINegativeBinomialLayer",
         disp=None,
         rate=None):
     r"""Create the distribution instance from a `params` vector."""
     params = tf.convert_to_tensor(value=params, name='params')
     event_shape = dist_util.expand_to_vector(
         tf.convert_to_tensor(value=event_shape,
                              name='event_shape',
                              dtype=tf.int32),
         tensor_name='event_shape',
     )
     output_shape = tf.concat((tf.shape(input=params)[:-1], event_shape),
                              axis=0)
     if disp is None:  # full dispersion
         if rate is None:
             total_count, logits, rate = tf.split(params, 3, axis=-1)
             rate = tf.reshape(rate, output_shape)
         else:
             total_count, logits = tf.split(params, 2, axis=-1)
         logits = tf.reshape(logits, output_shape)
     else:  # share dispersion
         if rate is None:
             total_count, rate = tf.split(params, 2, axis=-1)
             rate = tf.reshape(rate, output_shape)
         else:
             total_count = params
         logits = disp
     total_count = tf.reshape(total_count, output_shape)
     total_count = count_activation(total_count)
     nb = tfd.NegativeBinomial(total_count=total_count,
                               logits=logits,
                               validate_args=validate_args)
     zinb = ZeroInflated(count_distribution=nb,
                         logits=rate,
                         validate_args=validate_args)
     return tfd.Independent(
         zinb,
         reinterpreted_batch_ndims=tf.size(input=event_shape),
         name=name)
Esempio n. 7
0
 def new(params,
         event_shape=(),
         mean_activation=tf.nn.softplus,
         disp_activation=softplus1,
         dispersion='full',
         validate_args=False,
         name="ZINegativeBinomialDispLayer"):
   """Create the distribution instance from a `params` vector."""
   params = tf.convert_to_tensor(value=params, name='params')
   event_shape = dist_util.expand_to_vector(
       tf.convert_to_tensor(value=event_shape,
                            name='event_shape',
                            dtype=tf.int32),
       tensor_name='event_shape',
   )
   output_shape = tf.concat(
       [tf.shape(input=params)[:-1], event_shape],
       axis=0,
   )
   # splitting the parameters
   (loc_params, disp_params, rate_params) = tf.split(params, 3, axis=-1)
   if dispersion == 'single':
     disp_params = tf.reduce_mean(disp_params)
   elif dispersion == 'share':
     disp_params = tf.reduce_mean(disp_params,
                                  axis=tf.range(0,
                                                output_shape.shape[0] - 1,
                                                dtype='int32'),
                                  keepdims=True)
   # as count value, do exp if necessary
   loc_params = mean_activation(loc_params)
   disp_params = disp_activation(disp_params)
   # create the distribution
   nb = NegativeBinomialDisp(loc=tf.reshape(loc_params, output_shape),
                             disp=tf.reshape(disp_params, output_shape)
                             if dispersion == 'full' else disp_params,
                             validate_args=validate_args)
   zinb = ZeroInflated(count_distribution=nb,
                       logits=tf.reshape(rate_params, output_shape),
                       validate_args=validate_args)
   return tfd.Independent(zinb,
                          reinterpreted_batch_ndims=tf.size(input=event_shape),
                          name=name)
Esempio n. 8
0
 def new(params,
         event_shape=(),
         count_activation=tf.exp,
         dispersion='full',
         validate_args=False,
         name="ZINegativeBinomialLayer"):
   r"""Create the distribution instance from a `params` vector."""
   params = tf.convert_to_tensor(value=params, name='params')
   event_shape = dist_util.expand_to_vector(
       tf.convert_to_tensor(value=event_shape,
                            name='event_shape',
                            dtype=tf.int32),
       tensor_name='event_shape',
   )
   output_shape = tf.concat(
       [tf.shape(input=params)[:-1], event_shape],
       axis=0,
   )
   ndims = output_shape.shape[0]
   (total_count_params, logits_params, rate_params) = tf.split(params,
                                                               3,
                                                               axis=-1)
   if dispersion == 'single':
     logits_params = tf.reduce_mean(logits_params)
   elif dispersion == 'share':
     logits_params = tf.reduce_mean(logits_params,
                                    axis=tf.range(0, ndims - 1, dtype='int32'),
                                    keepdims=True)
   total_count_params = count_activation(total_count_params)
   nb = tfd.NegativeBinomial(total_count=tf.reshape(total_count_params,
                                                    output_shape),
                             logits=tf.reshape(logits_params, output_shape)
                             if dispersion == 'full' else logits_params,
                             validate_args=validate_args)
   zinb = ZeroInflated(count_distribution=nb,
                       logits=tf.reshape(rate_params, output_shape),
                       validate_args=validate_args)
   return tfd.Independent(zinb,
                          reinterpreted_batch_ndims=tf.size(input=event_shape),
                          name=name)
Esempio n. 9
0
 def new(params,
         event_shape=(),
         n_components=2,
         mean_activation=softplus1,
         disp_activation=tf.identity,
         alternative=False,
         zero_inflated=False,
         validate_args=False,
         logits=None,
         mean=None,
         disp=None,
         rate=None):
     r""" Create the distribution instance from a `params` vector. """
     n_components = tf.convert_to_tensor(value=n_components,
                                         name='n_components',
                                         dtype_hint=tf.int32)
     event_size = tf.convert_to_tensor(
         tf.reduce_prod(event_shape),
         dtype_hint=tf.int32,
         name='event_size',
     )
     ### prepare params
     params = tf.convert_to_tensor(value=params, name='params')
     event_shape = dist_util.expand_to_vector(tf.convert_to_tensor(
         value=event_shape, name='event_shape', dtype=tf.int32),
                                              tensor_name='event_shape')
     output_shape = tf.concat([
         tf.shape(input=params)[:-1],
         [n_components],
         event_shape,
     ],
                              axis=0)
     ### Create the mixture
     if logits is None:
         logits = params[..., :n_components]
         params = params[..., n_components:]
     mixture = tfp.distributions.Categorical(logits=logits,
                                             name="MixtureWeights")
     ### zero_inflated
     if zero_inflated:
         if mean is None:
             mean = params[..., :n_components * event_size]
             mean = tf.reshape(mean, output_shape)
             params = params[..., n_components * event_size:]
         disp, rate = _to_loc_scale(lambda: tf.split(params, 2, axis=-1),
                                    params,
                                    loc=disp,
                                    scale=rate,
                                    loc_shape=output_shape,
                                    scale_shape=output_shape)
     else:  # negative binomial
         rate = None
         mean, disp = _to_loc_scale(lambda: tf.split(params, 2, axis=-1),
                                    params,
                                    loc=mean,
                                    scale=disp,
                                    loc_shape=output_shape,
                                    scale_shape=output_shape)
     ### applying activation
     mean = mean_activation(mean)
     disp = disp_activation(disp)
     ### alternative parameterization
     if alternative:
         NBtype = NegativeBinomialDisp
         name = 'NegBinDisp'
     else:
         NBtype = tfp.distributions.NegativeBinomial
         name = 'NegBin'
     components = tfp.distributions.Independent(
         NBtype(mean, disp, validate_args=validate_args),
         reinterpreted_batch_ndims=tf.size(input=event_shape),
         validate_args=validate_args)
     ### zero-inflated
     if zero_inflated:
         name = 'ZI' + name
         components = ZeroInflated(count_distribution=components,
                                   logits=rate,
                                   validate_args=False)
     return tfp.distributions.MixtureSameFamily(mixture,
                                                components,
                                                validate_args=False,
                                                name='Mixture%s' % name)
assert_consistent_statistics(Bernoulli(logits=logits),
                             Bernoulli(logits=logits))
assert_consistent_statistics(
    Independent(Bernoulli(probs=probs), reinterpreted_batch_ndims=1),
    Independent(Bernoulli(logits=logits), reinterpreted_batch_ndims=1))

assert_consistent_statistics(
    NegativeBinomial(total_count=count, logits=logits),
    NegativeBinomial(total_count=count, probs=probs))
assert_consistent_statistics(
    Independent(NegativeBinomial(total_count=count, logits=logits),
                reinterpreted_batch_ndims=1),
    Independent(NegativeBinomial(total_count=count, probs=probs),
                reinterpreted_batch_ndims=1))
assert_consistent_statistics(
    ZeroInflated(NegativeBinomial(total_count=count, logits=logits),
                 logits=logits),
    ZeroInflated(NegativeBinomial(total_count=count, probs=probs),
                 probs=probs))
assert_consistent_statistics(
    Independent(ZeroInflated(NegativeBinomial(total_count=count,
                                              logits=logits),
                             logits=logits),
                reinterpreted_batch_ndims=1),
    Independent(ZeroInflated(NegativeBinomial(total_count=count, probs=probs),
                             probs=probs),
                reinterpreted_batch_ndims=1))
assert_consistent_statistics(
    ZeroInflated(Independent(NegativeBinomial(total_count=count,
                                              logits=logits),
                             reinterpreted_batch_ndims=1),
                 logits=logits),
Esempio n. 11
0
  def new(
      params,
      event_shape=(),
      n_components=2,
      mean_activation=softplus1,
      disp_activation=tf.identity,
      dispersion='full',
      alternative=False,
      zero_inflated=False,
      validate_args=False,
  ):
    """Create the distribution instance from a `params` vector."""
    params = tf.convert_to_tensor(value=params, name='params')
    n_components = tf.convert_to_tensor(value=n_components,
                                        name='n_components',
                                        dtype_hint=tf.int32)
    event_shape = dist_util.expand_to_vector(tf.convert_to_tensor(
        value=event_shape, name='event_shape', dtype=tf.int32),
                                             tensor_name='event_shape')
    output_shape = tf.concat([
        tf.shape(input=params)[:-1],
        [n_components],
        event_shape,
    ],
                             axis=0)
    mixture = tfp.distributions.Categorical(logits=params[..., :n_components])
    if zero_inflated:
      mean, disp, rate = tf.split(params[..., n_components:], 3, axis=-1)
      rate = tf.reshape(rate, output_shape)
    else:
      mean, disp = tf.split(params[..., n_components:], 2, axis=-1)
      rate = None
    mean = tf.reshape(mean, output_shape)
    disp = tf.reshape(disp, output_shape)

    if dispersion == 'single':
      disp = tf.reduce_mean(disp)
    elif dispersion == 'share':
      disp = tf.reduce_mean(disp,
                            axis=tf.range(0,
                                          output_shape.shape[0] - 1,
                                          dtype='int32'),
                            keepdims=True)
    mean = mean_activation(mean)
    disp = disp_activation(disp)

    if alternative:
      NBtype = NegativeBinomialDisp
      name = 'NegBinDisp'
    else:
      NBtype = tfp.distributions.NegativeBinomial
      name = 'NegBin'
    components = tfp.distributions.Independent(
        NBtype(mean, disp, validate_args=validate_args),
        reinterpreted_batch_ndims=tf.size(input=event_shape),
        validate_args=validate_args)
    if zero_inflated:
      name = 'ZI' + name
      components = ZeroInflated(count_distribution=components,
                                logits=rate,
                                validate_args=False)
    return tfp.distributions.MixtureSameFamily(mixture,
                                               components,
                                               validate_args=False,
                                               name='Mixture%s' % name)
Esempio n. 12
0
    nb = NegativeBinomialDisp(loc=mean, disp=disp_row)
    llk1 = tf.reduce_sum(nb.log_prob(x), axis=1).numpy()
    llk2 = log_nb_positive(x=torch.Tensor(x),
                           mu=torch.Tensor(mean),
                           theta=torch.Tensor(disp_row)).numpy()
    print(np.all(np.isclose(llk1, llk2)))
except:
    print("NOT POSSIBLE TO BROADCAST the first dimension")

# all disp available
nb = NegativeBinomialDisp(loc=mean, disp=disp)
llk1 = tf.reduce_sum(nb.log_prob(x), axis=1).numpy()
llk2 = log_nb_positive(x=torch.Tensor(x),
                       mu=torch.Tensor(mean),
                       theta=torch.Tensor(disp)).numpy()
print(np.all(np.isclose(llk1, llk2)))

s1 = nb.sample().numpy()
s2 = torch_nb(mean, disp).numpy()
print(describe(s1))
print(describe(s2))

zinb = ZeroInflated(nb, probs=pi)
llk1 = tf.reduce_sum(zinb.log_prob(x), axis=1).numpy()
llk2 = log_zinb_positive(x=torch.Tensor(x),
                         mu=torch.Tensor(mean),
                         theta=torch.Tensor(disp),
                         pi=torch.Tensor(pi)).numpy()
print(llk1)
print(llk2)