Ejemplo n.º 1
0
def _complexity_cost(weights, mean, sigma, prior_density):
    k = -0.5 * math.log(2 * math.pi)
    cost = k - gen_math_ops.log(sigma) - 0.5 * gen_math_ops.square(
        (weights - mean) / sigma)
    cost -= gen_math_ops.log(prior_density(weights))
    cost = math_ops.reduce_sum(cost)
    return cost
Ejemplo n.º 2
0
    def GraphFn(self, x1, x2):
        x = x1
        q = math_ops.abs(x)
        q = q + 1.0
        q = gen_math_ops.exp(q)
        q = gen_math_ops.log(q)
        q = array_ops.squeeze(q, axis=-2)
        q = math_ops.abs(q)
        q = q + 2.2
        q = gen_math_ops.sqrt(q)
        q = gen_math_ops.rsqrt(q)
        q = math_ops.negative(q)
        q = array_ops.squeeze(q, axis=3)
        q = math_ops.abs(q)
        q = q + 3.0
        a = gen_math_ops.reciprocal(q)

        # this chain of operations has a batch size of 5, which is different from
        # the batch size for the other operations.
        x = constant_op.constant(np.random.randn(5, 8, 12), dtype=x.dtype)
        q = math_ops.abs(x)
        q = q + 2.0
        q = gen_math_ops.exp(q)
        q = gen_math_ops.log(q)
        q = math_ops.abs(q)
        q = q + 2.1
        q = gen_math_ops.sqrt(q)
        q = gen_math_ops.rsqrt(q)
        q = math_ops.negative(q)
        q = math_ops.abs(q)
        q = q + 4.0
        b = gen_math_ops.reciprocal(q)

        # TODO(jie): this one will break, broadcasting on batch.
        x = x2
        q = math_ops.abs(x)
        q = q + 5.0
        q = gen_math_ops.exp(q)
        q = array_ops.squeeze(q, axis=[-1, -2, 3])
        q = gen_math_ops.log(q)
        q = math_ops.abs(q)
        q = q + 5.1
        q = gen_array_ops.reshape(q, [12, 5, 1, 1, 8, 1, 12])
        q = array_ops.squeeze(q, axis=[5, 2, 3])
        q = gen_math_ops.sqrt(q)
        q = math_ops.abs(q)
        q = q + 5.2
        q = gen_math_ops.rsqrt(q)
        q = math_ops.negative(q)
        q = math_ops.abs(q)
        q = q + 5.3
        c = gen_math_ops.reciprocal(q)

        q = a * b
        q = q / c
        return array_ops.squeeze(q, name="output_0")
def _dkwm_cdf_envelope(n, error_rate, name=None):
    """Computes the CDF envelope that the DKWM inequality licenses.

  The [Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
  (https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval)
  gives a stochastic bound on the distance between the true cumulative
  distribution function (CDF) of any distribution and its empirical
  CDF.  To wit, for `n` iid samples from any distribution with CDF F,

  ```none
  P(sup_x |F_n(x) - F(x)| > eps) < 2exp(-2n eps^2)
  ```

  This function computes the envelope size `eps` as a function of the
  number of samples `n` and the desired limit on the left-hand
  probability above.

  Args:
    n: Tensor of numbers of samples drawn.
    error_rate: Floating-point tensor of admissible rates of mistakes.
    name: A name for this operation (optional).

  Returns:
    eps: Tensor of maximum distances the true CDF can be from the
      empirical CDF.  This scales as `O(sqrt(-log(error_rate)))` and
      as `O(1 / sqrt(n))`.  The shape is the broadcast of `n` and
      `error_rate`.
  """
    with ops.name_scope(name, "dkwm_cdf_envelope", [n, error_rate]):
        n = math_ops.cast(n, dtype=error_rate.dtype)
        return math_ops.sqrt(-gen_math_ops.log(error_rate / 2.) / (2. * n))
Ejemplo n.º 4
0
def _dkwm_cdf_envelope(n, error_rate, name=None):
  """Computes the CDF envelope that the DKWM inequality licenses.

  The [Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
  (https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval)
  gives a stochastic bound on the distance between the true cumulative
  distribution function (CDF) of any distribution and its empirical
  CDF.  To wit, for `n` iid samples from any distribution with CDF F,

  ```none
  P(sup_x |F_n(x) - F(x)| > eps) < 2exp(-2n eps^2)
  ```

  This function computes the envelope size `eps` as a function of the
  number of samples `n` and the desired limit on the left-hand
  probability above.

  Args:
    n: Tensor of numbers of samples drawn.
    error_rate: Floating-point tensor of admissible rates of mistakes.
    name: A name for this operation (optional).

  Returns:
    eps: Tensor of maximum distances the true CDF can be from the
      empirical CDF.  This scales as `O(sqrt(-log(error_rate)))` and
      as `O(1 / sqrt(n))`.  The shape is the broadcast of `n` and
      `error_rate`.
  """
  with ops.name_scope(name, "dkwm_cdf_envelope", [n, error_rate]):
    n = math_ops.cast(n, dtype=error_rate.dtype)
    return math_ops.sqrt(-gen_math_ops.log(error_rate / 2.) / (2. * n))
Ejemplo n.º 5
0
def fucking_deep_gaze_logsumexp(input_tensor,axis=None, keepdims=False,
        name=None):
    """
    Adaptd from
    https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/math_ops.py.
    It is the same as the classic logsumexp instead you substact log(N) where N
    in the number of tensor over which compute the logsumexp (if you have 10
    readout nets, N=10). I don't know why they do this.
    """
    keepdims = False if keepdims is None else keepdims
    input_tensor = ops.convert_to_tensor(input_tensor)
    with ops.name_scope(name, "ReduceLogSumExp", [input_tensor]) as name:
        raw_max = tf.reduce_max(input_tensor, axis=axis, keep_dims=True)
        my_max = array_ops.stop_gradient( array_ops.where(
            gen_math_ops.is_finite(raw_max), raw_max,
            array_ops.zeros_like(raw_max)))
        result = gen_math_ops.log(
                #reduce_sum( # normal logsumexp
                tf.reduce_mean( # fuckimg modif from deep_gaze for the output only
                    gen_math_ops.exp(tf.subtract(input_tensor, my_max)),
                    axis, keep_dims=keepdims))
        if not keepdims:
            my_max = array_ops.reshape(my_max, array_ops.shape(result))
        result = gen_math_ops.add(result, my_max)
        return result
Ejemplo n.º 6
0
def signal_to_noise(y_true, y_pred, mode='snr', data_format=None, epsilon=1e-8):
    '''Signal-to-noise ratio. (metric)
    Calculate the signal-to-noise ratio. It support different modes.
    Arguments:
        mode:        (1)  snr: mean [ y_true^2 / (y_pred - y_true)^2 ]
                     (2) psnr: mean [ max( y_true^2 ) / (y_pred - y_true)^2 ]
        data_format: 'channels_first' or 'channels_last'. The default setting is generally
                     'channels_last' like other tf.keras APIs.
        epsilon:      used for avoid zero division.
    Input:
        y_true: label, tensor in any shape.
        y_pred: prediction, tensor in any shape.
    Output:
        scalar, the mean SNR.
    '''
    get_reduced_axes = get_channels(y_true, data_format)
    if mode.casefold() == 'psnr':
        signal = math_ops.reduce_max(gen_math_ops.square(y_true), axis=get_reduced_axes)
    else:
        signal = math_ops.reduce_sum(gen_math_ops.square(y_true), axis=get_reduced_axes)
    noise = math_ops.reduce_sum(gen_math_ops.square(y_true - y_pred), axis=get_reduced_axes) + epsilon
    coeff = (10.0/2.3025851) # 10/log_e(10)
    return coeff*math_ops.reduce_mean(gen_math_ops.log(math_ops.divide(signal, noise)))
  def GetParams(self):
    """Test for unary operations in TF-TRT."""
    dtype = dtypes.float32
    input_name = "input"
    input_dims = [12, 5, 8, 1, 1, 12]
    output_name = "output"
    input2_name = "input_2"
    input2_dims = [12, 5, 8, 1, 12, 1, 1]
    g = ops.Graph()
    with g.as_default():
      x = array_ops.placeholder(dtype=dtype, shape=input_dims, name=input_name)
      q = math_ops.abs(x)
      q = q + 1.0
      q = gen_math_ops.exp(q)
      q = gen_math_ops.log(q)
      q = array_ops.squeeze(q, axis=-2)
      q = math_ops.abs(q)
      q = q + 2.2
      q = gen_math_ops.sqrt(q)
      q = gen_math_ops.rsqrt(q)
      q = math_ops.negative(q)
      q = array_ops.squeeze(q, axis=3)
      q = math_ops.abs(q)
      q = q + 3.0
      a = gen_math_ops.reciprocal(q)

      x = constant_op.constant(np.random.randn(5, 8, 12), dtype=dtype)
      q = math_ops.abs(x)
      q = q + 2.0
      q = gen_math_ops.exp(q)
      q = gen_math_ops.log(q)
      q = math_ops.abs(q)
      q = q + 2.1
      q = gen_math_ops.sqrt(q)
      q = gen_math_ops.rsqrt(q)
      q = math_ops.negative(q)
      q = math_ops.abs(q)
      q = q + 4.0
      b = gen_math_ops.reciprocal(q)

      # TODO(jie): this one will break, broadcasting on batch.
      x = array_ops.placeholder(
          dtype=dtype, shape=input2_dims, name=input2_name)
      q = math_ops.abs(x)
      q = q + 5.0
      q = gen_math_ops.exp(q)
      q = array_ops.squeeze(q, axis=[-1, -2, 3])
      q = gen_math_ops.log(q)
      q = math_ops.abs(q)
      q = q + 5.1
      q = gen_array_ops.reshape(q, [12, 5, 1, 1, 8, 1, 12])
      q = array_ops.squeeze(q, axis=[5, 2, 3])
      q = gen_math_ops.sqrt(q)
      q = math_ops.abs(q)
      q = q + 5.2
      q = gen_math_ops.rsqrt(q)
      q = math_ops.negative(q)
      q = math_ops.abs(q)
      q = q + 5.3
      c = gen_math_ops.reciprocal(q)

      q = a * b
      q = q / c
      array_ops.squeeze(q, name=output_name)
    return trt_test.TfTrtIntegrationTestParams(
        gdef=g.as_graph_def(),
        input_names=[input_name, input2_name],
        input_dims=[input_dims, input2_dims],
        output_names=[output_name],
        expected_output_dims=[(12, 5, 8, 12)])
Ejemplo n.º 8
0
  def GetParams(self):
    """Test for unary operations in TF-TRT."""
    dtype = dtypes.float32
    input_name = "input"
    input_dims = [12, 5, 8, 1, 1, 12]
    input2_name = "input_2"
    input2_dims = [12, 5, 8, 1, 12, 1, 1]
    g = ops.Graph()
    with g.as_default():
      x = array_ops.placeholder(dtype=dtype, shape=input_dims, name=input_name)
      q = math_ops.abs(x)
      q = q + 1.0
      q = gen_math_ops.exp(q)
      q = gen_math_ops.log(q)
      q = array_ops.squeeze(q, axis=-2)
      q = math_ops.abs(q)
      q = q + 2.2
      q = gen_math_ops.sqrt(q)
      q = gen_math_ops.rsqrt(q)
      q = math_ops.negative(q)
      q = array_ops.squeeze(q, axis=3)
      q = math_ops.abs(q)
      q = q + 3.0
      a = gen_math_ops.reciprocal(q)

      x = constant_op.constant(np.random.randn(5, 8, 12), dtype=dtype)
      q = math_ops.abs(x)
      q = q + 2.0
      q = gen_math_ops.exp(q)
      q = gen_math_ops.log(q)
      q = math_ops.abs(q)
      q = q + 2.1
      q = gen_math_ops.sqrt(q)
      q = gen_math_ops.rsqrt(q)
      q = math_ops.negative(q)
      q = math_ops.abs(q)
      q = q + 4.0
      b = gen_math_ops.reciprocal(q)

      # TODO(jie): this one will break, broadcasting on batch.
      x = array_ops.placeholder(
          dtype=dtype, shape=input2_dims, name=input2_name)
      q = math_ops.abs(x)
      q = q + 5.0
      q = gen_math_ops.exp(q)
      q = array_ops.squeeze(q, axis=[-1, -2, 3])
      q = gen_math_ops.log(q)
      q = math_ops.abs(q)
      q = q + 5.1
      q = gen_array_ops.reshape(q, [12, 5, 1, 1, 8, 1, 12])
      q = array_ops.squeeze(q, axis=[5, 2, 3])
      q = gen_math_ops.sqrt(q)
      q = math_ops.abs(q)
      q = q + 5.2
      q = gen_math_ops.rsqrt(q)
      q = math_ops.negative(q)
      q = math_ops.abs(q)
      q = q + 5.3
      c = gen_math_ops.reciprocal(q)

      q = a * b
      q = q / c
      array_ops.squeeze(q, name=self.output_name)
    return trt_test.TfTrtIntegrationTestParams(
        gdef=g.as_graph_def(),
        input_names=[input_name, input2_name],
        input_dims=[input_dims, input2_dims],
        num_expected_engines=5,
        expected_output_dims=(12, 5, 8, 12),
        allclose_atol=1.e-03,
        allclose_rtol=1.e-03)