Exemplo n.º 1
0
    def _inverse_log_det_jacobian(self, y):
        # x = sinh(arcsinh(y) / tailweight - skewness)
        # Using sinh' = cosh, arcsinh'(y) = 1 / sqrt(y**2 + 1),
        # dx/dy
        # = cosh(arcsinh(y) / tailweight - skewness)
        #     / (tailweight * sqrt(y**2 + 1))

        # This is computed inside the log to avoid catastrophic cancellations
        # from cosh((arcsinh(y) / tailweight) - skewness) and sqrt(x**2 + 1).
        return (math_ops.log(
            math_ops.cosh(math_ops.asinh(y) / self.tailweight - self.skewness)
            # TODO(srvasude): Consider using cosh(arcsinh(x)) in cases
            # where (arcsinh(x) / tailweight) - skewness ~= arcsinh(x).
            / _sqrtx2p1(y)) - math_ops.log(self.tailweight))
Exemplo n.º 2
0
    def _forward_log_det_jacobian(self, x):
        # y = sinh((arcsinh(x) + skewness) * tailweight)
        # Using sinh' = cosh, arcsinh'(x) = 1 / sqrt(x**2 + 1),
        # dy/dx
        # = cosh((arcsinh(x) + skewness) * tailweight) * tailweight / sqrt(x**2 + 1)

        # This is computed inside the log to avoid catastrophic cancellations
        # from cosh((arcsinh(x) + skewness) * tailweight) and sqrt(x**2 + 1).
        return (math_ops.log(
            math_ops.cosh(
                (math_ops.asinh(x) + self.skewness) * self.tailweight)
            # TODO(srvasude): Consider using cosh(arcsinh(x)) in cases
            # where (arcsinh(x) + skewness) * tailweight ~= arcsinh(x).
            / _sqrtx2p1(x)) + math_ops.log(self.tailweight))
Exemplo n.º 3
0
  def _forward_log_det_jacobian(self, x):
    # y = sinh((arcsinh(x) + skewness) * tailweight)
    # Using sinh' = cosh, arcsinh'(x) = 1 / sqrt(x**2 + 1),
    # dy/dx
    # = cosh((arcsinh(x) + skewness) * tailweight) * tailweight / sqrt(x**2 + 1)

    # This is computed inside the log to avoid catastrophic cancellations
    # from cosh((arcsinh(x) + skewness) * tailweight) and sqrt(x**2 + 1).
    return (
        math_ops.log(math_ops.cosh(
            (math_ops.asinh(x) + self.skewness) * self.tailweight)
                     # TODO(srvasude): Consider using cosh(arcsinh(x)) in cases
                     # where (arcsinh(x) + skewness) * tailweight ~= arcsinh(x).
                     / _sqrtx2p1(x))
        + math_ops.log(self.tailweight))
Exemplo n.º 4
0
 def _forward_log_det_jacobian(self, x):
     # y = sinh((arcsinh(x) + skewness) * tailweight)
     # Using sinh' = cosh, arcsinh'(x) = 1 / sqrt(x**2 + 1),
     # dy/dx
     # = cosh((arcsinh(x) + skewness) * tailweight) * tailweight / sqrt(x**2 + 1)
     event_dims = self._event_dims_tensor(x)
     return math_ops.reduce_sum(
         # This is computed inside the log to avoid catastrophic cancellations
         # from cosh((arcsinh(x) + skewness) * tailweight) and sqrt(x**2 + 1).
         math_ops.log(
             math_ops.cosh(
                 (math_ops.asinh(x) + self.skewness) * self.tailweight)
             # TODO (srvasude): Consider using cosh(arcsinh(x)) in cases id:551 gh:552
             # where (arcsinh(x) + skewness) * tailweight ~= arcsinh(x).
             / _sqrtx2p1(x)) + math_ops.log(self.tailweight),
         axis=event_dims)
Exemplo n.º 5
0
  def _inverse_log_det_jacobian(self, y):
    # x = sinh(arcsinh(y) / tailweight - skewness)
    # Using sinh' = cosh, arcsinh'(y) = 1 / sqrt(y**2 + 1),
    # dx/dy
    # = cosh(arcsinh(y) / tailweight - skewness)
    #     / (tailweight * sqrt(y**2 + 1))

    # This is computed inside the log to avoid catastrophic cancellations
    # from cosh((arcsinh(y) / tailweight) - skewness) and sqrt(x**2 + 1).
    return (
        math_ops.log(math_ops.cosh(
            math_ops.asinh(y) / self.tailweight - self.skewness)
                     # TODO(srvasude): Consider using cosh(arcsinh(x)) in cases
                     # where (arcsinh(x) / tailweight) - skewness ~= arcsinh(x).
                     / _sqrtx2p1(y))
        - math_ops.log(self.tailweight))
 def _forward_log_det_jacobian(self, x):
   # y = sinh((arcsinh(x) + skewness) * tailweight)
   # Using sinh' = cosh, arcsinh'(x) = 1 / sqrt(x**2 + 1),
   # dy/dx
   # = cosh((arcsinh(x) + skewness) * tailweight) * tailweight / sqrt(x**2 + 1)
   # Note that this could potentially return a NaN due to the log1p(x**2)
   # term since, for instance, this will only be valid for float32 til ~1.7e19.
   # This is in contrast with the forward/inverse passes since an arcsinh
   # transformation is done first, which is valid until the maximum float
   # value.
   # TODO(srvasude): It might be possible to extend the range of validity to
   # match that of forward/inverse by approximating log1p(y**2) by 2 * log(y).
   event_dims = self._event_dims_tensor(x)
   return math_ops.reduce_sum(
       math_ops.log(math_ops.cosh(
           (math_ops.asinh(x) + self.skewness) * self.tailweight)) +
       math_ops.log(self.tailweight) - 0.5 * math_ops.log1p(x**2),
       axis=event_dims)
Exemplo n.º 7
0
        def scaled_add_op(x, scale, y):
            cwd = os.getcwd()
            outputs = {
                "output_types": [dtypes.float32],
                "output_shapes": [tensor_shape.TensorShape([SIZE])],
            }
            base_dir = os.path.join(cwd, "tensorflow/python/ipu")
            gp_path = os.path.join(base_dir,
                                   "tests/add_scaled_vector_add_codelet.cc")
            lib_path = os.path.join(base_dir,
                                    "libadd_partial_gradients_custom.so")

            return ipu.custom_ops.precompiled_user_op(
                [x, scale, y, math_ops.cos(x),
                 math_ops.cosh(y)],
                lib_path,
                gp_path,
                outs=outputs,
                inputs_with_gradients=[0, 2])
Exemplo n.º 8
0
def _AsinhGrad(op, grad):
  """Returns grad * 1/cosh(y)."""
  y = op.outputs[0]
  with ops.control_dependencies([grad]):
    y = math_ops.conj(y)
    return grad / math_ops.cosh(y)
Exemplo n.º 9
0
def _SinhGrad(op, grad):
  """Returns grad * cosh(x)."""
  x = op.inputs[0]
  with ops.control_dependencies([grad]):
    x = math_ops.conj(x)
    return grad * math_ops.cosh(x)
Exemplo n.º 10
0
def _AsinhGrad(op, grad):
    """Returns grad * 1/cosh(y)."""
    y = op.outputs[0]
    with ops.control_dependencies([grad]):
        y = math_ops.conj(y)
        return grad / math_ops.cosh(y)
Exemplo n.º 11
0
def _SinhGrad(op, grad):
    """Returns grad * cosh(x)."""
    x = op.inputs[0]
    with ops.control_dependencies([grad]):
        x = math_ops.conj(x)
        return grad * math_ops.cosh(x)
Exemplo n.º 12
0
 def exp_map(self, v, x):
     # eq (9), v is the gradient
     vnorm = math_ops.sqrt(self.lorentz_scalar_product(v, v))
     return math_ops.cosh(vnorm) * x + math_ops.sinh(vnorm) * v / vnorm