Esempio n. 1
0
def smape_loss(true, predicted, weights):
    """
    Differentiable SMAPE loss
    :param true: Truth values
    :param predicted: Predicted values
    :param weights: Weights mask to exclude some values
    :return:
    """
    epsilon = 0.1  # Smoothing factor, helps SMAPE to be well-behaved near zero
    true_o = tf.expm1(true)
    pred_o = tf.expm1(predicted)
    summ = tf.maximum(tf.abs(true_o) + tf.abs(pred_o) + epsilon, 0.5 + epsilon)
    smape = tf.abs(pred_o - true_o) / summ * 2.0
    return tf.losses.compute_weighted_loss(smape, weights, loss_collection=None)
Esempio n. 2
0
    def build_step(self, signals):
        J = signals.gather(self.J_data)
        voltage = signals.gather(self.voltage_data)
        refractory = signals.gather(self.refractory_data)

        delta_t = tf.clip_by_value(signals.dt - refractory, self.zero,
                                   signals.dt)

        dV = (voltage - J) * tf.expm1(-delta_t / self.tau_rc)
        voltage += dV

        spiked = voltage > self.one
        spikes = tf.cast(spiked, signals.dtype) * self.amplitude
        signals.scatter(self.output_data, spikes)

        partial_ref = -self.tau_rc * tf.log1p((self.one - voltage) /
                                              (J - self.one))
        # FastLIF version (linearly approximate spike time when calculating
        # remaining refractory period)
        # partial_ref = signals.dt * (voltage - self.one) / dV

        refractory = tf.where(spiked, self.tau_ref - partial_ref,
                              refractory - signals.dt)

        signals.mark_gather(self.J_data)
        signals.scatter(self.refractory_data, refractory)

        voltage = tf.where(spiked, self.zeros,
                           tf.maximum(voltage, self.min_voltage))
        signals.scatter(self.voltage_data, voltage)
Esempio n. 3
0
def calc_smape_rounded(true, predicted, weights):
    """
    Calculates SMAPE on rounded submission values. Should be close to official SMAPE in competition
    :param true:
    :param predicted:
    :param weights: Weights mask to exclude some values
    :return:
    """
    n_valid = tf.reduce_sum(weights)
    true_o = tf.round(tf.expm1(true))
    pred_o = tf.maximum(tf.round(tf.expm1(predicted)), 0.0)
    summ = tf.abs(true_o) + tf.abs(pred_o)
    zeros = summ < 0.01
    raw_smape = tf.abs(pred_o - true_o) / summ * 2.0
    smape = tf.where(zeros, tf.zeros_like(summ, dtype=tf.float32), raw_smape)
    return tf.reduce_sum(smape * weights) / n_valid
Esempio n. 4
0
 def _inverse(self, y):
   y = self._maybe_assert_valid_y(y)
   if self.power == 0.:
     return tf.log(y)
   # If large y accuracy is an issue, consider using:
   # (y**self.power - 1.) / self.power when y >> 1.
   return tf.expm1(tf.log(y) * self.power) / self.power
Esempio n. 5
0
def total_variation(logu, name=None):
  """The Total Variation Csiszar-function in log-space.

  A Csiszar-function is a member of,

  ```none
  F = { f:R_+ to R : f convex }.
  ```

  The Total-Variation Csiszar-function is:

  ```none
  f(u) = 0.5 |u - 1|
  ```

  Warning: this function makes non-log-space calculations and may therefore be
  numerically unstable for `|logu| >> 0`.

  Args:
    logu: `float`-like `Tensor` representing `log(u)` from above.
    name: Python `str` name prefixed to Ops created by this function.

  Returns:
    total_variation_of_u: `float`-like `Tensor` of the Csiszar-function
      evaluated at `u = exp(logu)`.
  """

  with tf.name_scope(name, "total_variation", [logu]):
    logu = tf.convert_to_tensor(logu, name="logu")
    return 0.5 * tf.abs(tf.expm1(logu))
Esempio n. 6
0
 def _inverse(self, y):
     y = self._maybe_assert_valid_y(y)
     if self.power == 0.:
         return tf.log(y)
     # If large y accuracy is an issue, consider using:
     # (y**self.power - 1.) / self.power when y >> 1.
     return tf.expm1(tf.log(y) * self.power) / self.power
Esempio n. 7
0
def _kl_gumbel_gumbel(a, b, name=None):
    """Calculate the batched KL divergence KL(a || b) with a and b Gumbel.

  Args:
    a: instance of a Gumbel distribution object.
    b: instance of a Gumbel distribution object.
    name: (optional) Name to use for created operations.
      default is "kl_gumbel_gumbel".

  Returns:
    Batchwise KL(a || b)
  """
    with tf.name_scope(name, "kl_gumbel_gumbel",
                       [a.loc, b.loc, a.scale, b.scale]):
        # Consistent with
        # http://www.mast.queensu.ca/~communications/Papers/gil-msc11.pdf, page 64
        # The paper uses beta to refer to scale and mu to refer to loc.
        # There is actually an error in the solution as printed; this is based on
        # the second-to-last step of the derivation. The value as printed would be
        # off by (a.loc - b.loc) / b.scale.
        return (tf.log(b.scale) - tf.log(a.scale) + np.euler_gamma *
                (a.scale / b.scale - 1.) +
                tf.expm1((b.loc - a.loc) / b.scale +
                         tf.lgamma(a.scale / b.scale + 1.)) +
                (a.loc - b.loc) / b.scale)
Esempio n. 8
0
def calc_smape_rounded(true, predicted, weights):
    """
    Calculates SMAPE on rounded submission values. Should be close to official SMAPE in competition
    :param true:
    :param predicted:
    :param weights: Weights mask to exclude some values
    :return:
    """
    n_valid = tf.reduce_sum(weights)
    true_o = tf.round(tf.expm1(true))
    pred_o = tf.maximum(tf.round(tf.expm1(predicted)), 0.0)
    summ = tf.abs(true_o) + tf.abs(pred_o)
    zeros = summ < 0.01
    raw_smape = tf.abs(pred_o - true_o) / summ * 2.0
    smape = tf.where(zeros, tf.zeros_like(summ, dtype=tf.float32), raw_smape)
    return tf.reduce_sum(smape * weights) / n_valid
Esempio n. 9
0
def chi_square(logu, name=None):
  """The chi-Square Csiszar-function in log-space.

  A Csiszar-function is a member of,

  ```none
  F = { f:R_+ to R : f convex }.
  ```

  The Chi-square Csiszar-function is:

  ```none
  f(u) = u**2 - 1
  ```

  Warning: this function makes non-log-space calculations and may therefore be
  numerically unstable for `|logu| >> 0`.

  Args:
    logu: `float`-like `Tensor` representing `log(u)` from above.
    name: Python `str` name prefixed to Ops created by this function.

  Returns:
    chi_square_of_u: `float`-like `Tensor` of the Csiszar-function evaluated
      at `u = exp(logu)`.
  """

  with tf.name_scope(name, "chi_square", [logu]):
    logu = tf.convert_to_tensor(logu, name="logu")
    return tf.expm1(2. * logu)
Esempio n. 10
0
def total_variation(logu, name=None):
  """The Total Variation Csiszar-function in log-space.

  A Csiszar-function is a member of,

  ```none
  F = { f:R_+ to R : f convex }.
  ```

  The Total-Variation Csiszar-function is:

  ```none
  f(u) = 0.5 |u - 1|
  ```

  Warning: this function makes non-log-space calculations and may therefore be
  numerically unstable for `|logu| >> 0`.

  Args:
    logu: `float`-like `Tensor` representing `log(u)` from above.
    name: Python `str` name prefixed to Ops created by this function.

  Returns:
    total_variation_of_u: `float`-like `Tensor` of the Csiszar-function
      evaluated at `u = exp(logu)`.
  """

  with tf.name_scope(name, "total_variation", [logu]):
    logu = tf.convert_to_tensor(logu, name="logu")
    return 0.5 * tf.abs(tf.expm1(logu))
Esempio n. 11
0
def smape_loss(true, predicted, weights):
    """
    Differentiable SMAPE loss
    :param true: Truth values
    :param predicted: Predicted values
    :param weights: Weights mask to exclude some values
    :return:
    """
    epsilon = 0.1  # Smoothing factor, helps SMAPE to be well-behaved near zero
    true_o = tf.expm1(true)
    pred_o = tf.expm1(predicted)
    summ = tf.maximum(tf.abs(true_o) + tf.abs(pred_o) + epsilon, 0.5 + epsilon)
    smape = tf.abs(pred_o - true_o) / summ * 2.0
    return tf.losses.compute_weighted_loss(smape,
                                           weights,
                                           loss_collection=None)
Esempio n. 12
0
def chi_square(logu, name=None):
  """The chi-Square Csiszar-function in log-space.

  A Csiszar-function is a member of,

  ```none
  F = { f:R_+ to R : f convex }.
  ```

  The Chi-square Csiszar-function is:

  ```none
  f(u) = u**2 - 1
  ```

  Warning: this function makes non-log-space calculations and may therefore be
  numerically unstable for `|logu| >> 0`.

  Args:
    logu: `float`-like `Tensor` representing `log(u)` from above.
    name: Python `str` name prefixed to Ops created by this function.

  Returns:
    chi_square_of_u: `float`-like `Tensor` of the Csiszar-function evaluated
      at `u = exp(logu)`.
  """

  with tf.name_scope(name, "chi_square", [logu]):
    logu = tf.convert_to_tensor(logu, name="logu")
    return tf.expm1(2. * logu)
Esempio n. 13
0
    def _step(self, J, voltage, refractory, dt):
        delta_t = tf.clip_by_value(dt - refractory, self.zero, dt)

        dV = (voltage - J) * tf.expm1(-delta_t / self.tau_rc)
        voltage += dV

        spiked = voltage > self.one
        spikes = tf.cast(spiked, J.dtype) * self.alpha

        partial_ref = -self.tau_rc * tf.log1p((self.one - voltage) /
                                              (J - self.one))
        # FastLIF version (linearly approximate spike time when calculating
        # remaining refractory period)
        # partial_ref = signals.dt * (voltage - self.one) / dV

        refractory = tf.where(spiked, self.tau_ref - partial_ref,
                              refractory - dt)

        voltage = tf.where(spiked, self.zeros,
                           tf.maximum(voltage, self.min_voltage))

        # we use stop_gradient to avoid propagating any nans (those get
        # propagated through the cond even if the spiking version isn't
        # being used at all)
        return (tf.stop_gradient(spikes), tf.stop_gradient(voltage),
                tf.stop_gradient(refractory))
 def model_fn(features, labels, mode, params, config):
     is_training = True if mode == estimator.ModeKeys.TRAIN else False
     if mode != estimator.ModeKeys.PREDICT:
         features = self._parse_sequence_weight(features)
         features = self.sparse2dense(features,
                                      self._dataset.varlen_list)
     features = self._dense2sparse(features, self._dataset.varlen_list)
     network = self._Network(self._flags, self._dataset, 'network')
     dense, embeddings = network.build_features(features)
     network_out = network(dense, embeddings, is_training)
     # 防止回归值小于0
     predictions = tf.maximum(
         tf.keras.layers.Dense(1, activation=None,
                               name='output')(network_out), 0.)
     if mode == estimator.ModeKeys.PREDICT:
         outputs = {
             "predictions": predictions,
             self._flags.label_key: tf.expm1(predictions)
         }
         self._output_cols = list(outputs.keys())
         return estimator.EstimatorSpec(mode, predictions=outputs)
     loss = self._build_regression_loss(labels, predictions)
     metrics = self._build_regression_metrics(loss, labels, predictions,
                                              self._flags.label_key)
     self._build_summary(loss, metrics)
     if mode == estimator.ModeKeys.EVAL:
         return estimator.EstimatorSpec(mode,
                                        loss=loss,
                                        eval_metric_ops=metrics)
     assert mode == estimator.ModeKeys.TRAIN
     train_op = self._build_train_op(loss)
     return estimator.EstimatorSpec(mode=mode,
                                    loss=loss,
                                    train_op=train_op)
Esempio n. 15
0
def denorm(logmagnitude):
    '''
    Exp(logmagnitude) - 1
    :param logmagnitude: Log-normalized magnitude spectrogram
    :return: Unnormalized magnitude spectrogram
    '''
    return tf.expm1(logmagnitude)
Esempio n. 16
0
def calc_differentiable_mape_loss(true_y, predictions):
    """
    Calculate the differentiable MAPE loss.
    """

    # calculate loss
    mask = tf.logical_not(tf.math.equal(true_y, tf.zeros_like(true_y)))
    # Fill NaNs by zeros (can use any value)
    # Assign zero weight to zeros, will not calculate loss for those true_y.
    weights = tf.to_float(mask)
    # mape_loss
    epsilon = 0.1  # Smoothing factor, helps SMAPE to be well-behaved near zero
    true_o = tf.expm1(true_y)
    pred_o = tf.expm1(predictions)
    mape_loss_origin = tf.abs(pred_o - true_o) / (tf.abs(true_o) + epsilon)
    mape_loss = tf.losses.compute_weighted_loss(mape_loss_origin, weights, loss_collection=None)
    return mape_loss
Esempio n. 17
0
def log1mexp(input_a):
    # input_a: positive
    # return the same shape as input
    result = slicing_where(condition=tf.less_equal(input_a, tf.log(2.0)),
                           full_input=-input_a,
                           true_branch=lambda x: tf.log(-tf.expm1(x)),
                           false_branch=lambda x: tf.log1p(-tf.exp(x)))
    return result
Esempio n. 18
0
def smape_loss(true, predicted, weights):
    """
    Differentiable SMAPE loss
    :param true: Truth values
    :param predicted: Predicted values
    :param weights: Weights mask to exclude some values
    :return:
    """
    epsilon = 0.1  # Smoothing factor, helps SMAPE to be well-behaved near zero
    # todo expm1:自然指数减1,即e^x-1
    true_o = tf.expm1(true)
    pred_o = tf.expm1(predicted)
    summ = tf.maximum(tf.abs(true_o) + tf.abs(pred_o) + epsilon, 0.5 + epsilon)
    smape = tf.abs(pred_o - true_o) / summ * 2.0
    # todo 为何算这样一种损失函数呢?SMAPE 对称平均绝对百分比误差(Symmetric Mean Absolute Percentage Error) https://blog.csdn.net/guolindonggld/article/details/87856780
    # todo tf.losses.compute_weighted_loss:返回与losses相同类型的加权损失Tensor,如果reduction是NONE,它的形状与losses相同;否则,它是标量.
    return tf.losses.compute_weighted_loss(smape,
                                           weights,
                                           loss_collection=None)
Esempio n. 19
0
 def _cdf(self, x):
     if self.validate_args:
         x = distribution_util.embed_check_nonnegative_integer_form(x)
     else:
         # Whether or not x is integer-form, the following is well-defined.
         # However, scipy takes the floor, so we do too.
         x = tf.floor(x)
     x *= tf.ones_like(self.probs)
     return tf.where(x < 0., tf.zeros_like(x), -tf.expm1(
         (1. + x) * tf.log1p(-self.probs)))
Esempio n. 20
0
def sonify(spectrogram, samples, transform_op_fn, logscaled=True):
    graph = tf.Graph()
    with graph.as_default():

        noise = tf.Variable(tf.random_normal([samples], stddev=1e-6))

        x = transform_op_fn(noise)
        y = spectrogram

        if logscaled:
            x = tf.expm1(x)
            y = tf.expm1(y)

        # tf.nn.normalize arguments changed between versions...
        def normalize(a):
            return a / tf.sqrt(tf.maximum(tf.reduce_sum(a**2, axis=0), 1E-12))

        x = normalize(x)
        y = normalize(y)
        tf.losses.mean_squared_error(x, y[-tf.shape(x)[0]:])

        optimizer = tf.contrib.opt.ScipyOptimizerInterface(
            loss=tf.losses.get_total_loss(),
            var_list=[noise],
            tol=1e-16,
            method='L-BFGS-B',
            options={
                'maxiter': sonify_steps,
                'disp': True
            })

    # THIS REALLY SHOULDN'T RUN ON GPU BUT SEEMS TO?
    config = tf.ConfigProto(device_count={
        'CPU': 1,
        'GPU': 0
    },
                            allow_soft_placement=True,
                            log_device_placement=False)
    with tf.Session(config=config, graph=graph) as session:
        session.run(tf.global_variables_initializer())
        optimizer.minimize(session)
        waveform = session.run(noise)
    return waveform
Esempio n. 21
0
 def _cdf(self, x):
   if self.validate_args:
     x = distribution_util.embed_check_nonnegative_integer_form(x)
   else:
     # Whether or not x is integer-form, the following is well-defined.
     # However, scipy takes the floor, so we do too.
     x = tf.floor(x)
   x *= tf.ones_like(self.probs)
   return tf.where(x < 0., tf.zeros_like(x), -tf.expm1(
       (1. + x) * tf.log1p(-self.probs)))
Esempio n. 22
0
def t_power(logu, t, self_normalized=False, name=None):
  """The T-Power Csiszar-function in log-space.

  A Csiszar-function is a member of,

  ```none
  F = { f:R_+ to R : f convex }.
  ```

  When `self_normalized = True` the T-Power Csiszar-function is:

  ```none
  f(u) = s [ u**t - 1 - t(u - 1) ]
  s = { -1   0 < t < 1
      { +1   otherwise
  ```

  When `self_normalized = False` the `- t(u - 1)` term is omitted.

  This is similar to the `amari_alpha` Csiszar-function, with the associated
  divergence being the same up to factors depending only on `t`.

  Args:
    logu: `float`-like `Tensor` representing `log(u)` from above.
    t:  `Tensor` of same `dtype` as `logu` and broadcastable shape.
    self_normalized: Python `bool` indicating whether `f'(u=1)=0`.
    name: Python `str` name prefixed to Ops created by this function.

  Returns:
    t_power_of_u: `float`-like `Tensor` of the Csiszar-function evaluated
      at `u = exp(logu)`.
  """
  with tf.name_scope(name, "t_power", [logu, t]):
    logu = tf.convert_to_tensor(logu, name="logu")
    t = tf.convert_to_tensor(t, dtype=logu.dtype.base_dtype, name="t")
    fu = tf.expm1(t * logu)
    if self_normalized:
      fu -= t * tf.expm1(logu)
    fu *= tf.where(tf.logical_and(0. < t, t < 1.),
                   -tf.ones_like(t),
                   tf.ones_like(t))
    return fu
Esempio n. 23
0
def t_power(logu, t, self_normalized=False, name=None):
  """The T-Power Csiszar-function in log-space.

  A Csiszar-function is a member of,

  ```none
  F = { f:R_+ to R : f convex }.
  ```

  When `self_normalized = True` the T-Power Csiszar-function is:

  ```none
  f(u) = s [ u**t - 1 - t(u - 1) ]
  s = { -1   0 < t < 1
      { +1   otherwise
  ```

  When `self_normalized = False` the `- t(u - 1)` term is omitted.

  This is similar to the `amari_alpha` Csiszar-function, with the associated
  divergence being the same up to factors depending only on `t`.

  Args:
    logu: `float`-like `Tensor` representing `log(u)` from above.
    t:  `Tensor` of same `dtype` as `logu` and broadcastable shape.
    self_normalized: Python `bool` indicating whether `f'(u=1)=0`.
    name: Python `str` name prefixed to Ops created by this function.

  Returns:
    t_power_of_u: `float`-like `Tensor` of the Csiszar-function evaluated
      at `u = exp(logu)`.
  """
  with tf.name_scope(name, "t_power", [logu, t]):
    logu = tf.convert_to_tensor(logu, name="logu")
    t = tf.convert_to_tensor(t, dtype=logu.dtype.base_dtype, name="t")
    fu = tf.expm1(t * logu)
    if self_normalized:
      fu -= t * tf.expm1(logu)
    fu *= tf.where(tf.logical_and(0. < t, t < 1.),
                   -tf.ones_like(t),
                   tf.ones_like(t))
    return fu
Esempio n. 24
0
def to_exp_signal(s_signal):
    '''
    magnitude m -> exp(m)-1
    '''
    half_fft_sz = hparams.FFT_SIZE // 2
    ndim = len(s_signal.get_shape().as_list())
    s_signal_abs = tf.sqrt(tf.add(
        *tf.split(tf.square(s_signal), [half_fft_sz]*2, axis=ndim-1)) + hparams.EPS)
    s_signal_scale = tf.tile(
        tf.expm1(s_signal_abs) / s_signal_abs, [1] * (ndim-1) + [2])
    return s_signal_scale * s_signal
Esempio n. 25
0
 def rush_larsen(self, g, g_inf, g_tau, dt, name=None):
     """
         rush_larsens is a helper funcion to implement the Rush-Larsen
         direct integration of the gating variables
     """
     #return tf.clip_by_value(g_inf - (g_inf - g) * tf.exp(-dt/g_tau), 0.0,
     #                        1.0, name=name)
     return tf.clip_by_value(g + (g - g_inf) * tf.expm1(-dt / g_tau),
                             0.00001,
                             0.99999,
                             name=name)
Esempio n. 26
0
def log1mexp(input_a):
    # input_a: positive
    # return the same shape as input
    input_a = tf.maximum(1e-10, input_a)
    temp1 = tf.log(-tf.expm1(-input_a))
    temp2 = tf.log1p(-tf.exp(-input_a))
    result = tf.where(tf.less_equal(input_a, tf.log(2.0)), temp1, temp2)
    # result = slicing_where(condition = tf.less_equal(input_a, tf.log(2.0)),
    #   full_input = -input_a,
    #   true_branch = lambda x: tf.log(-tf.expm1(x)),
    #   false_branch = lambda x: tf.log1p(-tf.exp(x)))
    return result
Esempio n. 27
0
 def _inverse_log_det_jacobian(self, y):
     # Could also do:
     #   ildj = tf.reduce_sum(y - distribution_util.softplus_inverse(y),
     #                              axis=event_dims)
     # but the following is more numerically stable. Ie,
     # Y = Log[1 + exp{X}] ==> X = Log[exp{Y} - 1]
     # ==> dX/dY = exp{Y} / (exp{Y} - 1)
     #           = 1 / (1 - exp{-Y}),
     # which is the most stable for large Y > 0. For small Y, we use
     # 1 - exp{-Y} approx Y.
     if self.hinge_softness is not None:
         y /= tf.cast(self.hinge_softness, y.dtype)
     return -tf.log(-tf.expm1(-y))
Esempio n. 28
0
 def _log_likelihood(self, ref, pre_recon):
     recon_dim = ref.get_shape().as_list()[1]
     self.mu = tf.identity(nn.dense(
         pre_recon,
         recon_dim,
         deviation_regularizer=self.deviation_regularizer,
         scope="mu_dense"),
                           name="mu")
     self.log_var = tf.identity(nn.dense(
         pre_recon,
         recon_dim,
         deviation_regularizer=self.deviation_regularizer,
         scope="log_var_dense"),
                                name="log_var")
     self.recon = tf.expm1(self.mu)
     return self._log_ln_positive(tf.log1p(ref), self.mu, self.log_var)
Esempio n. 29
0
 def bottleneck(self, x):
   hparams = self.hparams
   z_size = hparams.bottleneck_bits
   x_shape = common_layers.shape_list(x)
   with tf.variable_scope("vae"):
     mu = tf.layers.dense(x, z_size, name="mu")
     if hparams.mode != tf.estimator.ModeKeys.TRAIN:
       return mu, 0.0  # No sampling or kl loss on eval.
     log_sigma = tf.layers.dense(x, z_size, name="log_sigma")
     epsilon = tf.random_normal(x_shape[:-1] + [z_size])
     z = mu + tf.exp(log_sigma / 2) * epsilon
     kl = 0.5 * tf.reduce_mean(
         tf.expm1(log_sigma) + tf.square(mu) - log_sigma, axis=-1)
     free_bits = z_size // 4
     kl_loss = tf.reduce_mean(tf.maximum(kl - free_bits, 0.0))
   return z, kl_loss * hparams.kl_beta
Esempio n. 30
0
 def loglikelihood(y_true, y_pred):
     sigma = K.get_value(param["sigma"])
     log_sigma = np.log(sigma)
     wp_true = tf.unstack(y_true, num=2, axis=1)[0]
     bp = tf.unstack(y_true, num=2, axis=1)[1]
     wp_pred = tf.unstack(y_pred, num=2, axis=1)[0]
     a = tf.exp(-(bp - wp_pred) / sigma)
     bp_loglik1 = tf.log(-tf.expm1(-a))
     bp_loglik2 = tf.log1p(-tf.exp(-a))
     bp_loglik = tf.where(a < 0.693, bp_loglik1, bp_loglik2)
     is_not_win = tf.is_nan(wp_true)
     is_win = tf.logical_not(is_not_win)
     wp_idx = tf.to_int32(tf.where(is_win))
     wp_true = tf.boolean_mask(wp_true, is_win)
     wp_pred = tf.boolean_mask(wp_pred, is_win)
     z = (wp_true - wp_pred) / sigma
     wp_loglik = -(z + tf.exp(-z)) - log_sigma
     wp_loglik = tf.scatter_nd(wp_idx, wp_loglik, tf.shape(is_win))
     return tf.where(is_win, wp_loglik, bp_loglik)
Esempio n. 31
0
    def _step(self, J, voltage, refractory, dt):
        tau_ref = discretize_tau_ref(self.tau_ref, dt)
        tau_rc = discretize_tau_rc(self.tau_rc, dt)

        delta_t = tf.clip_by_value(dt - refractory, self.zero, dt)
        voltage -= (J - voltage) * tf.expm1(-delta_t / tau_rc)

        spiked = voltage > self.one
        spikes = tf.cast(spiked, J.dtype) * self.alpha

        refractory = tf.where(spiked, tau_ref + self.zeros, refractory - dt)
        voltage = tf.where(spiked, self.zeros,
                           tf.maximum(voltage, self.min_voltage))

        # we use stop_gradient to avoid propagating any nans (those get
        # propagated through the cond even if the spiking version isn't
        # being used at all)
        return (tf.stop_gradient(spikes), tf.stop_gradient(voltage),
                tf.stop_gradient(refractory))
Esempio n. 32
0
def modified_gan(logu, self_normalized=False, name=None):
  """The Modified-GAN Csiszar-function in log-space.

  A Csiszar-function is a member of,

  ```none
  F = { f:R_+ to R : f convex }.
  ```

  When `self_normalized = True` the modified-GAN (Generative/Adversarial
  Network) Csiszar-function is:

  ```none
  f(u) = log(1 + u) - log(u) + 0.5 (u - 1)
  ```

  When `self_normalized = False` the `0.5 (u - 1)` is omitted.

  The unmodified GAN Csiszar-function is identical to Jensen-Shannon (with
  `self_normalized = False`).

  Warning: this function makes non-log-space calculations and may therefore be
  numerically unstable for `|logu| >> 0`.

  Args:
    logu: `float`-like `Tensor` representing `log(u)` from above.
    self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When
      `f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even
      when `p, q` are unnormalized measures.
    name: Python `str` name prefixed to Ops created by this function.

  Returns:
    chi_square_of_u: `float`-like `Tensor` of the Csiszar-function evaluated
      at `u = exp(logu)`.
  """

  with tf.name_scope(name, "chi_square", [logu]):
    logu = tf.convert_to_tensor(logu, name="logu")
    y = tf.nn.softplus(logu) - logu
    if self_normalized:
      y += 0.5 * tf.expm1(logu)
    return y
Esempio n. 33
0
def log1p_abs(logu, name=None):
    """The log1p-abs Csiszar-function in log-space.

  A Csiszar-function is a member of,

  ```none
  F = { f:R_+ to R : f convex }.
  ```

  The Log1p-Abs Csiszar-function is:

  ```none
  f(u) = u**(sign(u-1)) - 1
  ```

  This function is so-named because it was invented from the following recipe.
  Choose a convex function g such that g(0)=0 and solve for f:

  ```none
  log(1 + f(u)) = g(log(u)).
    <=>
  f(u) = exp(g(log(u))) - 1
  ```

  That is, the graph is identically `g` when y-axis is `log1p`-domain and x-axis
  is `log`-domain.

  Warning: this function makes non-log-space calculations and may therefore be
  numerically unstable for `|logu| >> 0`.

  Args:
    logu: `float`-like `Tensor` representing `log(u)` from above.
    name: Python `str` name prefixed to Ops created by this function.

  Returns:
    log1p_abs_of_u: `float`-like `Tensor` of the Csiszar-function evaluated
      at `u = exp(logu)`.
  """

    with tf.name_scope(name, "log1p_abs", [logu]):
        logu = tf.convert_to_tensor(logu, name="logu")
        return tf.expm1(tf.abs(logu))
Esempio n. 34
0
def log1p_abs(logu, name=None):
  """The log1p-abs Csiszar-function in log-space.

  A Csiszar-function is a member of,

  ```none
  F = { f:R_+ to R : f convex }.
  ```

  The Log1p-Abs Csiszar-function is:

  ```none
  f(u) = u**(sign(u-1)) - 1
  ```

  This function is so-named because it was invented from the following recipe.
  Choose a convex function g such that g(0)=0 and solve for f:

  ```none
  log(1 + f(u)) = g(log(u)).
    <=>
  f(u) = exp(g(log(u))) - 1
  ```

  That is, the graph is identically `g` when y-axis is `log1p`-domain and x-axis
  is `log`-domain.

  Warning: this function makes non-log-space calculations and may therefore be
  numerically unstable for `|logu| >> 0`.

  Args:
    logu: `float`-like `Tensor` representing `log(u)` from above.
    name: Python `str` name prefixed to Ops created by this function.

  Returns:
    log1p_abs_of_u: `float`-like `Tensor` of the Csiszar-function evaluated
      at `u = exp(logu)`.
  """

  with tf.name_scope(name, "log1p_abs", [logu]):
    logu = tf.convert_to_tensor(logu, name="logu")
    return tf.expm1(tf.abs(logu))
Esempio n. 35
0
def modified_gan(logu, self_normalized=False, name=None):
    """The Modified-GAN Csiszar-function in log-space.

  A Csiszar-function is a member of,

  ```none
  F = { f:R_+ to R : f convex }.
  ```

  When `self_normalized = True` the modified-GAN (Generative/Adversarial
  Network) Csiszar-function is:

  ```none
  f(u) = log(1 + u) - log(u) + 0.5 (u - 1)
  ```

  When `self_normalized = False` the `0.5 (u - 1)` is omitted.

  The unmodified GAN Csiszar-function is identical to Jensen-Shannon (with
  `self_normalized = False`).

  Warning: this function makes non-log-space calculations and may therefore be
  numerically unstable for `|logu| >> 0`.

  Args:
    logu: `float`-like `Tensor` representing `log(u)` from above.
    self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When
      `f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even
      when `p, q` are unnormalized measures.
    name: Python `str` name prefixed to Ops created by this function.

  Returns:
    chi_square_of_u: `float`-like `Tensor` of the Csiszar-function evaluated
      at `u = exp(logu)`.
  """

    with tf.name_scope(name, "chi_square", [logu]):
        logu = tf.convert_to_tensor(logu, name="logu")
        y = tf.nn.softplus(logu) - logu
        if self_normalized:
            y += 0.5 * tf.expm1(logu)
        return y
Esempio n. 36
0
 def _log_likelihood(
         self, ref: tf.Tensor, pre_recon: typing.List[tf.Tensor]
 ) -> tf.Tensor:
     recon_dim = ref.get_shape().as_list()[1]
     self.mu = tf.identity(nn.dense(
         pre_recon, recon_dim,
         deviation_regularizer=self.deviation_regularizer,
         scope="mu_dense"
     ), name="mu")
     self.log_var = tf.identity(nn.dense(
         pre_recon, recon_dim,
         deviation_regularizer=self.deviation_regularizer,
         scope="log_var_dense"
     ), name="log_var")
     self.pi = tf.identity(nn.dense(
         pre_recon, recon_dim,
         deviation_regularizer=self.deviation_regularizer,
         scope="dropout_logit_dense"
     ), name="dropout_logit")
     self.recon = tf.expm1(self.mu)
     self.dropout_rate = tf.sigmoid(self.pi)
     return self._log_ziln_positive(
         tf.log1p(ref), self.mu, self.log_var, self.pi)
Esempio n. 37
0
def jeffreys(logu, name=None):
  """The Jeffreys Csiszar-function in log-space.

  A Csiszar-function is a member of,

  ```none
  F = { f:R_+ to R : f convex }.
  ```

  The Jeffreys Csiszar-function is:

  ```none
  f(u) = 0.5 ( u log(u) - log(u) )
       = 0.5 kl_forward + 0.5 kl_reverse
       = symmetrized_csiszar_function(kl_reverse)
       = symmetrized_csiszar_function(kl_forward)
  ```

  This Csiszar-function induces a symmetric f-Divergence, i.e.,
  `D_f[p, q] = D_f[q, p]`.

  Warning: this function makes non-log-space calculations and may therefore be
  numerically unstable for `|logu| >> 0`.

  Args:
    logu: `float`-like `Tensor` representing `log(u)` from above.
    name: Python `str` name prefixed to Ops created by this function.

  Returns:
    jeffreys_of_u: `float`-like `Tensor` of the Csiszar-function evaluated
      at `u = exp(logu)`.
  """

  with tf.name_scope(name, "jeffreys", [logu]):
    logu = tf.convert_to_tensor(logu, name="logu")
    return 0.5 * tf.expm1(logu) * logu
Esempio n. 38
0
def jeffreys(logu, name=None):
    """The Jeffreys Csiszar-function in log-space.

  A Csiszar-function is a member of,

  ```none
  F = { f:R_+ to R : f convex }.
  ```

  The Jeffreys Csiszar-function is:

  ```none
  f(u) = 0.5 ( u log(u) - log(u) )
       = 0.5 kl_forward + 0.5 kl_reverse
       = symmetrized_csiszar_function(kl_reverse)
       = symmetrized_csiszar_function(kl_forward)
  ```

  This Csiszar-function induces a symmetric f-Divergence, i.e.,
  `D_f[p, q] = D_f[q, p]`.

  Warning: this function makes non-log-space calculations and may therefore be
  numerically unstable for `|logu| >> 0`.

  Args:
    logu: `float`-like `Tensor` representing `log(u)` from above.
    name: Python `str` name prefixed to Ops created by this function.

  Returns:
    jeffreys_of_u: `float`-like `Tensor` of the Csiszar-function evaluated
      at `u = exp(logu)`.
  """

    with tf.name_scope(name, "jeffreys", [logu]):
        logu = tf.convert_to_tensor(logu, name="logu")
        return 0.5 * tf.expm1(logu) * logu
Esempio n. 39
0
def _softplus_inverse(x):
    """Helper which computes the function inverse of `tf.nn.softplus`."""
    return tf.log(tf.expm1(x))
Esempio n. 40
0
 def backward_tensor(self, y):
     ys = tf.maximum(y - self._lower, tf.as_dtype(settings.float_type).min)
     return ys + tf.log(-tf.expm1(-ys))
Esempio n. 41
0
 def _cdf(self, x):
   return self._extend_support(
       x, lambda x: -tf.expm1(self.concentration * tf.log(self.scale / x)),
       alt=0.)
Esempio n. 42
0
 def _variance(self):
   variance = self.distribution.variance()
   return (tf.expm1(variance) *
           tf.exp(2. * self.distribution.mean() + variance))
Esempio n. 43
0
def amari_alpha(logu, alpha=1., self_normalized=False, name=None):
  """The Amari-alpha Csiszar-function in log-space.

  A Csiszar-function is a member of,

  ```none
  F = { f:R_+ to R : f convex }.
  ```

  When `self_normalized = True`, the Amari-alpha Csiszar-function is:

  ```none
  f(u) = { -log(u) + (u - 1),     alpha = 0
         { u log(u) - (u - 1),    alpha = 1
         { [(u**alpha - 1) - alpha (u - 1)] / (alpha (alpha - 1)),    otherwise
  ```

  When `self_normalized = False` the `(u - 1)` terms are omitted.

  Warning: when `alpha != 0` and/or `self_normalized = True` this function makes
  non-log-space calculations and may therefore be numerically unstable for
  `|logu| >> 0`.

  For more information, see:
    A. Cichocki and S. Amari. "Families of Alpha-Beta-and GammaDivergences:
    Flexible and Robust Measures of Similarities." Entropy, vol. 12, no. 6, pp.
    1532-1568, 2010.

  Args:
    logu: `float`-like `Tensor` representing `log(u)` from above.
    alpha: `float`-like Python scalar. (See Mathematical Details for meaning.)
    self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When
      `f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even
      when `p, q` are unnormalized measures.
    name: Python `str` name prefixed to Ops created by this function.

  Returns:
    amari_alpha_of_u: `float`-like `Tensor` of the Csiszar-function evaluated
      at `u = exp(logu)`.

  Raises:
    TypeError: if `alpha` is `None` or a `Tensor`.
    TypeError: if `self_normalized` is `None` or a `Tensor`.
  """
  with tf.name_scope(name, "amari_alpha", [logu]):
    if alpha is None or tf.contrib.framework.is_tensor(alpha):
      raise TypeError("`alpha` cannot be `None` or `Tensor` type.")
    if (self_normalized is None or
        tf.contrib.framework.is_tensor(self_normalized)):
      raise TypeError("`self_normalized` cannot be `None` or `Tensor` type.")

    logu = tf.convert_to_tensor(logu, name="logu")

    if alpha == 0.:
      f = -logu
    elif alpha == 1.:
      f = tf.exp(logu) * logu
    else:
      f = tf.expm1(alpha * logu) / (alpha * (alpha - 1.))

    if not self_normalized:
      return f

    if alpha == 0.:
      return f + tf.expm1(logu)
    elif alpha == 1.:
      return f - tf.expm1(logu)
    else:
      return f - tf.expm1(logu) / (alpha - 1.)
Esempio n. 44
0
def _softplus_inverse(x):
  """Helper which computes the function inverse of `tf.nn.softplus`."""
  return tf.log(tf.expm1(x))
Esempio n. 45
0
 def quantile(self, rho):
     return tf.log1p(rho * tf.expm1(self.beta)) / self.beta
Esempio n. 46
0
 def _forward(self, x):
   x = self._maybe_assert_valid_x(x)
   return -tf.expm1(-((x / self.scale)**self.concentration))
Esempio n. 47
0
 def _variance(self):
   return tf.expm1(self.distribution.scale**2.) * tf.exp(
       2. * self.distribution.loc + self.distribution.scale**2.)