コード例 #1
0
ファイル: ordered.py プロジェクト: Jackiefan/tensorflow
 def _inverse_event_shape_tensor(self, output_shape):
   if self.validate_args:
     is_greater_one = check_ops.assert_greater(
         output_shape[-1], 1, message="Need last dimension greater than 1.")
     output_shape = control_flow_ops.with_dependencies(
         [is_greater_one], output_shape)
   return (output_shape[-1])[..., array_ops.newaxis]
コード例 #2
0
 def test_doesnt_raise_when_greater_and_broadcastable_shapes(self):
   with self.test_session():
     small = constant_op.constant([1], name="small")
     big = constant_op.constant([3, 2], name="big")
     with ops.control_dependencies([check_ops.assert_greater(big, small)]):
       out = array_ops.identity(small)
     out.eval()
コード例 #3
0
 def test_raises_when_less(self):
   small = constant_op.constant([1, 2], name="small")
   big = constant_op.constant([3, 4], name="big")
   with self.assertRaisesOpError("x > y did not hold"):
     with ops.control_dependencies([check_ops.assert_greater(small, big)]):
       out = array_ops.identity(big)
     self.evaluate(out)
コード例 #4
0
 def test_doesnt_raise_when_both_empty(self):
   with self.test_session():
     larry = constant_op.constant([])
     curly = constant_op.constant([])
     with ops.control_dependencies([check_ops.assert_greater(larry, curly)]):
       out = array_ops.identity(larry)
     out.eval()
コード例 #5
0
 def _inverse_event_shape_tensor(self, output_shape):
   if self.validate_args:
     # It is not possible for a negative shape so we need only check <= 1.
     is_greater_one = check_ops.assert_greater(
         output_shape[-1], 1, message="Need last dimension greater than 1.")
     output_shape = control_flow_ops.with_dependencies(
         [is_greater_one], output_shape)
   return (output_shape[-1] - 1)[..., array_ops.newaxis]
コード例 #6
0
 def _inverse_event_shape_tensor(self, output_shape):
   if self.validate_args:
     # It is not possible for a negative shape so we need only check <= 1.
     is_greater_one = check_ops.assert_greater(
         output_shape[-1], 1, message="Need last dimension greater than 1.")
     output_shape = control_flow_ops.with_dependencies(
         [is_greater_one], output_shape)
   return (output_shape[-1] - 1)[..., array_ops.newaxis]
コード例 #7
0
 def test_raises_when_less(self):
   with self.test_session():
     small = constant_op.constant([1, 2], name="small")
     big = constant_op.constant([3, 4], name="big")
     with ops.control_dependencies([check_ops.assert_greater(small, big)]):
       out = array_ops.identity(big)
     with self.assertRaisesOpError("small.*big"):
       out.eval()
コード例 #8
0
 def test_raises_when_equal(self):
     with self.test_session():
         small = constant_op.constant([1, 2], name="small")
         with ops.control_dependencies(
             [check_ops.assert_greater(small, small, message="fail")]):
             out = array_ops.identity(small)
         with self.assertRaisesOpError("fail.*small.*small"):
             out.eval()
コード例 #9
0
 def test_raises_when_equal(self):
   small = constant_op.constant([1, 2], name="small")
   with self.assertRaisesOpError("fail"):
     with ops.control_dependencies(
         [check_ops.assert_greater(
             small, small, message="fail")]):
       out = array_ops.identity(small)
     self.evaluate(out)
コード例 #10
0
 def test_raises_when_greater_but_non_broadcastable_shapes(self):
   with self.test_session():
     small = constant_op.constant([1, 1, 1], name="small")
     big = constant_op.constant([3, 2], name="big")
     with self.assertRaisesRegexp(ValueError, "must be"):
       with ops.control_dependencies([check_ops.assert_greater(big, small)]):
         out = array_ops.identity(small)
       out.eval()
コード例 #11
0
 def check(t):
   target = array_ops.shape(tensor)[1:]
   result = array_ops.broadcast_dynamic_shape(target, array_ops.shape(t))
   # This rank check ensures that I don't get a wrong answer from the
   # _shapes_ broadcasting against each other.
   gt = check_ops.assert_greater(array_ops.rank(target), array_ops.rank(t))
   eq = check_ops.assert_equal(target, result)
   return gt, eq
コード例 #12
0
 def check(t):
     target = array_ops.shape(tensor)[1:]
     result = array_ops.broadcast_dynamic_shape(target, array_ops.shape(t))
     # This rank check ensures that I don't get a wrong answer from the
     # _shapes_ broadcasting against each other.
     gt = check_ops.assert_greater(array_ops.rank(target),
                                   array_ops.rank(t))
     eq = check_ops.assert_equal(target, result)
     return gt, eq
コード例 #13
0
ファイル: ordered.py プロジェクト: Puschel2020/tensorflow
 def _inverse_event_shape_tensor(self, output_shape):
     if self.validate_args:
         is_greater_one = check_ops.assert_greater(
             output_shape[-1],
             1,
             message="Need last dimension greater than 1.")
         output_shape = control_flow_ops.with_dependencies([is_greater_one],
                                                           output_shape)
     return (output_shape[-1])[..., array_ops.newaxis]
コード例 #14
0
def assert_true_mean_equal_by_dkwm(samples,
                                   low,
                                   high,
                                   expected,
                                   false_fail_rate=1e-6,
                                   name=None):
    """Asserts the mean of the given distribution is as expected.

  More precisely, fails if there is enough evidence (using the
  [Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
  (https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval))
  that the true mean of some distribution from which the given samples are
  drawn is _not_ the given expected mean with statistical significance
  `false_fail_rate` or stronger, otherwise passes.  If you also want to
  check that you are gathering enough evidence that a pass is not
  spurious, see `min_num_samples_for_dkwm_mean_test` and
  `min_discrepancy_of_true_means_detectable_by_dkwm`.

  Note that `false_fail_rate` is a total false failure rate for all
  the assertions in the batch.  As such, if the batch is nontrivial,
  the assertion will insist on stronger evidence to fail any one member.

  Args:
    samples: Floating-point tensor of samples from the distribution(s)
      of interest.  Entries are assumed IID across the 0th dimension.
      The other dimensions must broadcast with `low` and `high`.
    low: Floating-point tensor of lower bounds on the distributions'
      supports.
    high: Floating-point tensor of upper bounds on the distributions'
      supports.
    expected: Floating-point tensor of expected true means.
    false_fail_rate: *Scalar* admissible total rate of mistakes.
    name: A name for this operation (optional).

  Returns:
    check: Op that raises `InvalidArgumentError` if any expected mean is
      outside the corresponding confidence interval.
  """
    with ops.name_scope(name, "assert_true_mean_equal_by_dkwm",
                        [samples, low, high, expected, false_fail_rate]):
        samples = ops.convert_to_tensor(samples, name="samples")
        low = ops.convert_to_tensor(low, name="low")
        high = ops.convert_to_tensor(high, name="high")
        expected = ops.convert_to_tensor(expected, name="expected")
        false_fail_rate = ops.convert_to_tensor(false_fail_rate,
                                                name="false_fail_rate")
        samples = _check_shape_dominates(samples, [low, high, expected])
        min_mean, max_mean = true_mean_confidence_interval_by_dkwm(
            samples, low, high, error_rate=false_fail_rate)
        less_op = check_ops.assert_less(
            min_mean, expected, message="Mean confidence interval too high")
        with ops.control_dependencies([less_op]):
            return check_ops.assert_greater(
                max_mean, expected, message="Mean confidence interval too low")
コード例 #15
0
 def test_raises_when_greater_but_non_broadcastable_shapes(self):
   small = constant_op.constant([1, 1, 1], name="small")
   big = constant_op.constant([3, 2], name="big")
   # The exception in eager and non-eager mode is different because
   # eager mode relies on shape check done as part of the C++ op, while
   # graph mode does shape checks when creating the `Operation` instance.
   with self.assertRaisesRegexp(
       (errors.InvalidArgumentError, ValueError),
       (r"Incompatible shapes: \[2\] vs. \[3\]|"
        r"Dimensions must be equal, but are 2 and 3")):
     with ops.control_dependencies([check_ops.assert_greater(big, small)]):
       out = array_ops.identity(small)
     self.evaluate(out)
コード例 #16
0
ファイル: softsign.py プロジェクト: Jackiefan/tensorflow
  def _maybe_assert_valid_y(self, y):
    if not self.validate_args:
      return y
    is_valid = [
        check_ops.assert_greater(
            y, math_ops.cast(-1., dtype=y.dtype.base_dtype),
            message="Inverse transformation input must be greater than -1."),
        check_ops.assert_less(
            y, math_ops.cast(1., dtype=y.dtype.base_dtype),
            message="Inverse transformation input must be less than 1.")
    ]

    return control_flow_ops.with_dependencies(is_valid, y)
コード例 #17
0
def assert_true_mean_equal_by_dkwm(
    samples, low, high, expected, false_fail_rate=1e-6, name=None):
  """Asserts the mean of the given distribution is as expected.

  More precisely, fails if there is enough evidence (using the
  [Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
  (https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval))
  that the true mean of some distribution from which the given samples are
  drawn is _not_ the given expected mean with statistical significance
  `false_fail_rate` or stronger, otherwise passes.  If you also want to
  check that you are gathering enough evidence that a pass is not
  spurious, see `min_num_samples_for_dkwm_mean_test` and
  `min_discrepancy_of_true_means_detectable_by_dkwm`.

  Note that `false_fail_rate` is a total false failure rate for all
  the assertions in the batch.  As such, if the batch is nontrivial,
  the assertion will insist on stronger evidence to fail any one member.

  Args:
    samples: Floating-point tensor of samples from the distribution(s)
      of interest.  Entries are assumed IID across the 0th dimension.
      The other dimensions must broadcast with `low` and `high`.
    low: Floating-point tensor of lower bounds on the distributions'
      supports.
    high: Floating-point tensor of upper bounds on the distributions'
      supports.
    expected: Floating-point tensor of expected true means.
    false_fail_rate: *Scalar* admissible total rate of mistakes.
    name: A name for this operation (optional).

  Returns:
    check: Op that raises `InvalidArgumentError` if any expected mean is
      outside the corresponding confidence interval.
  """
  with ops.name_scope(
      name, "assert_true_mean_equal_by_dkwm",
      [samples, low, high, expected, false_fail_rate]):
    samples = ops.convert_to_tensor(samples, name="samples")
    low = ops.convert_to_tensor(low, name="low")
    high = ops.convert_to_tensor(high, name="high")
    expected = ops.convert_to_tensor(expected, name="expected")
    false_fail_rate = ops.convert_to_tensor(
        false_fail_rate, name="false_fail_rate")
    samples = _check_shape_dominates(samples, [low, high, expected])
    min_mean, max_mean = true_mean_confidence_interval_by_dkwm(
        samples, low, high, error_rate=false_fail_rate)
    less_op = check_ops.assert_less(
        min_mean, expected, message="Mean confidence interval too high")
    with ops.control_dependencies([less_op]):
      return check_ops.assert_greater(
          max_mean, expected, message="Mean confidence interval too low")
コード例 #18
0
 def _maybe_attach_assertion(x):
     if not validate_args:
         return x
     if assert_positive:
         return control_flow_ops.with_dependencies([
             check_ops.assert_positive(
                 x, message="diagonal part must be positive"),
         ], x)
     # TODO(b/35157376): Use `assert_none_equal` once it exists.
     return control_flow_ops.with_dependencies([
         check_ops.assert_greater(math_ops.abs(x),
                                  array_ops.zeros([], x.dtype),
                                  message="diagonal part must be non-zero"),
     ], x)
コード例 #19
0
ファイル: distribution_util.py プロジェクト: LUTAN/tensorflow
 def _maybe_attach_assertion(x):
   if not validate_args:
     return x
   if assert_positive:
     return control_flow_ops.with_dependencies([
         check_ops.assert_positive(
             x, message="diagonal part must be positive"),
     ], x)
   # TODO(b/35157376): Use `assert_none_equal` once it exists.
   return control_flow_ops.with_dependencies([
       check_ops.assert_greater(
           math_ops.abs(x),
           array_ops.zeros([], x.dtype),
           message="diagonal part must be non-zero"),
   ], x)
コード例 #20
0
    def _maybe_assert_valid_y(self, y):
        if not self.validate_args:
            return y
        is_valid = [
            check_ops.assert_greater(
                y,
                math_ops.cast(-1., dtype=y.dtype.base_dtype),
                message="Inverse transformation input must be greater than -1."
            ),
            check_ops.assert_less(
                y,
                math_ops.cast(1., dtype=y.dtype.base_dtype),
                message="Inverse transformation input must be less than 1.")
        ]

        return control_flow_ops.with_dependencies(is_valid, y)
コード例 #21
0
def validate_n_classes(n_classes):
    """Validates n_classes argument.

  Required arguments: n_classes.

  Args:
    n_classes: The number of classes.

  Raises:
    ValueError: If n_classes is <= 2 and n_classes is a Python integer.
  Returns:
    n_classes in its original type.
  """
    if isinstance(n_classes, int) and (n_classes <= 2):
        raise ValueError('n_classes must be > 2: %s.' % n_classes)

    n_classes_as_tensor = ops.convert_to_tensor(n_classes)
    assert_n_classes = check_ops.assert_greater(
        n_classes_as_tensor, 2, message='n_classes must be greater than 2')
    with ops.control_dependencies([assert_n_classes]):
        control_flow_ops.no_op()
    # Return n_classes in its original type, so that any code
    # using the accessor logits_dimension() has the original type.
    return n_classes
コード例 #22
0
  def __init__(self,
               batch_size,
               total_num_examples,
               max_learning_rate=1.0,
               preconditioner_decay_rate=0.95,
               burnin=25,
               burnin_max_learning_rate=1e-6,
               use_single_learning_rate=False,
               name=None,
               variable_scope=None):
    default_name = 'VariationalSGDOptimizer'
    with ops.name_scope(name, default_name, [
        max_learning_rate, preconditioner_decay_rate, batch_size, burnin,
        burnin_max_learning_rate
    ]):
      if variable_scope is None:
        var_scope_name = ops.get_default_graph().unique_name(
            name or default_name)
        with varscope_ops.variable_scope(var_scope_name) as scope:
          self._variable_scope = scope
      else:
        self._variable_scope = variable_scope

      self._preconditioner_decay_rate = ops.convert_to_tensor(
          preconditioner_decay_rate, name='preconditioner_decay_rate')
      self._batch_size = ops.convert_to_tensor(batch_size, name='batch_size')
      self._total_num_examples = ops.convert_to_tensor(
          total_num_examples, name='total_num_examples')
      self._burnin = ops.convert_to_tensor(burnin, name='burnin')
      self._burnin_max_learning_rate = ops.convert_to_tensor(
          burnin_max_learning_rate, name='burnin_max_learning_rate')
      self._max_learning_rate = ops.convert_to_tensor(
          max_learning_rate, name='max_learning_rate')
      self._use_single_learning_rate = use_single_learning_rate

      with varscope_ops.variable_scope(self._variable_scope):
        self._counter = varscope_ops.get_variable(
            'counter', initializer=0, trainable=False)

      self._preconditioner_decay_rate = control_flow_ops.with_dependencies([
          check_ops.assert_non_negative(
              self._preconditioner_decay_rate,
              message='`preconditioner_decay_rate` must be non-negative'),
          check_ops.assert_less_equal(
              self._preconditioner_decay_rate,
              1.,
              message='`preconditioner_decay_rate` must be at most 1.'),
      ], self._preconditioner_decay_rate)

      self._batch_size = control_flow_ops.with_dependencies([
          check_ops.assert_greater(
              self._batch_size,
              0,
              message='`batch_size` must be greater than zero')
      ], self._batch_size)

      self._total_num_examples = control_flow_ops.with_dependencies([
          check_ops.assert_greater(
              self._total_num_examples,
              0,
              message='`total_num_examples` must be greater than zero')
      ], self._total_num_examples)

      self._burnin = control_flow_ops.with_dependencies([
          check_ops.assert_non_negative(
              self._burnin, message='`burnin` must be non-negative'),
          check_ops.assert_integer(
              self._burnin, message='`burnin` must be an integer')
      ], self._burnin)

      self._burnin_max_learning_rate = control_flow_ops.with_dependencies([
          check_ops.assert_non_negative(
              self._burnin_max_learning_rate,
              message='`burnin_max_learning_rate` must be non-negative')
      ], self._burnin_max_learning_rate)

      self._max_learning_rate = control_flow_ops.with_dependencies([
          check_ops.assert_non_negative(
              self._max_learning_rate,
              message='`max_learning_rate` must be non-negative')
      ], self._max_learning_rate)

      super(VariationalSGDOptimizer, self).__init__(
          use_locking=False, name=name or default_name)
コード例 #23
0
 def test_doesnt_raise_when_greater(self):
   small = constant_op.constant([3, 1], name="small")
   big = constant_op.constant([4, 2], name="big")
   with ops.control_dependencies([check_ops.assert_greater(big, small)]):
     out = array_ops.identity(small)
   self.evaluate(out)
コード例 #24
0
    def __init__(self,
                 loc=None,
                 scale_tril=None,
                 validate_args=False,
                 allow_nan_stats=True,
                 name="MultivariateNormalTriL"):
        """Construct Multivariate Normal distribution on `R^k`.

    The `batch_shape` is the broadcast shape between `loc` and `scale`
    arguments.

    The `event_shape` is given by the last dimension of `loc` or the last
    dimension of the matrix implied by `scale`.

    Recall that `covariance = scale @ scale.T`. A (non-batch) `scale` matrix is:

    ```none
    scale = scale_tril
    ```

    where `scale_tril` is lower-triangular `k x k` matrix with non-zero
    diagonal, i.e., `tf.diag_part(scale_tril) != 0`.

    Additional leading dimensions (if any) will index batches.

    Args:
      loc: Floating-point `Tensor`. If this is set to `None`, `loc` is
        implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where
        `b >= 0` and `k` is the event size.
      scale_tril: Floating-point, lower-triangular `Tensor` with non-zero
        diagonal elements. `scale_tril` has shape `[B1, ..., Bb, k, k]` where
        `b >= 0` and `k` is the event size.
      validate_args: Python `bool`, default `False`. When `True` distribution
        parameters are checked for validity despite possibly degrading runtime
        performance. When `False` invalid inputs may silently render incorrect
        outputs.
      allow_nan_stats: Python `bool`, default `True`. When `True`,
        statistics (e.g., mean, mode, variance) use the value "`NaN`" to
        indicate the result is undefined. When `False`, an exception is raised
        if one or more of the statistic's batch members are undefined.
      name: Python `str` name prefixed to Ops created by this class.

    Raises:
      ValueError: if neither `loc` nor `scale_tril` are specified.
    """
        parameters = locals()

        def _convert_to_tensor(x, name):
            return None if x is None else ops.convert_to_tensor(x, name=name)

        if loc is None and scale_tril is None:
            raise ValueError(
                "Must specify one or both of `loc`, `scale_tril`.")
        with ops.name_scope(name):
            with ops.name_scope("init", values=[loc, scale_tril]):
                loc = _convert_to_tensor(loc, name="loc")
                scale_tril = _convert_to_tensor(scale_tril, name="scale_tril")
                if scale_tril is None:
                    scale = linalg.LinearOperatorIdentity(
                        num_rows=distribution_util.dimension_size(loc, -1),
                        dtype=loc.dtype,
                        is_self_adjoint=True,
                        is_positive_definite=True,
                        assert_proper_shapes=validate_args)
                else:
                    if validate_args:
                        scale_tril = control_flow_ops.with_dependencies(
                            [
                                # TODO(b/35157376): Use `assert_none_equal` once it exists.
                                check_ops.assert_greater(
                                    math_ops.abs(
                                        array_ops.matrix_diag_part(
                                            scale_tril)),
                                    array_ops.zeros([], scale_tril.dtype),
                                    message=
                                    "`scale_tril` must have non-zero diagonal"
                                ),
                            ],
                            scale_tril)
                    scale = linalg.LinearOperatorTriL(
                        scale_tril,
                        is_non_singular=True,
                        is_self_adjoint=False,
                        is_positive_definite=False)
        super(MultivariateNormalTriL,
              self).__init__(loc=loc,
                             scale=scale,
                             validate_args=validate_args,
                             allow_nan_stats=allow_nan_stats,
                             name=name)
        self._parameters = parameters
コード例 #25
0
    def __init__(self,
                 learning_rate,
                 preconditioner_decay_rate=0.95,
                 num_pseudo_batches=1,
                 burnin=25,
                 diagonal_bias=1e-8,
                 name=None,
                 variable_scope=None):
        default_name = 'SGLDOptimizer'
        with ops.name_scope(name, default_name, [
                learning_rate, preconditioner_decay_rate, num_pseudo_batches,
                burnin, diagonal_bias
        ]):
            if variable_scope is None:
                var_scope_name = ops.get_default_graph().unique_name(
                    name or default_name)
                with varscope_ops.variable_scope(var_scope_name) as scope:
                    self._variable_scope = scope
            else:
                self._variable_scope = variable_scope

            self._preconditioner_decay_rate = ops.convert_to_tensor(
                preconditioner_decay_rate, name='preconditioner_decay_rate')
            self._num_pseudo_batches = ops.convert_to_tensor(
                num_pseudo_batches, name='num_pseudo_batches')
            self._burnin = ops.convert_to_tensor(burnin, name='burnin')
            self._diagonal_bias = ops.convert_to_tensor(diagonal_bias,
                                                        name='diagonal_bias')
            self._learning_rate = ops.convert_to_tensor(learning_rate,
                                                        name='learning_rate')

            with varscope_ops.variable_scope(self._variable_scope):
                self._counter = varscope_ops.get_variable('counter',
                                                          initializer=0,
                                                          trainable=False)

            self._preconditioner_decay_rate = control_flow_ops.with_dependencies([
                check_ops.assert_non_negative(
                    self._preconditioner_decay_rate,
                    message='`preconditioner_decay_rate` must be non-negative'
                ),
                check_ops.assert_less_equal(
                    self._preconditioner_decay_rate,
                    1.,
                    message='`preconditioner_decay_rate` must be at most 1.'),
            ], self._preconditioner_decay_rate)

            self._num_pseudo_batches = control_flow_ops.with_dependencies([
                check_ops.assert_greater(
                    self._num_pseudo_batches,
                    0,
                    message='`num_pseudo_batches` must be greater than zero')
            ], self._num_pseudo_batches)

            self._burnin = control_flow_ops.with_dependencies([
                check_ops.assert_non_negative(
                    self._burnin, message='`burnin` must be non-negative'),
                check_ops.assert_integer(self._burnin,
                                         message='`burnin` must be an integer')
            ], self._burnin)

            self._diagonal_bias = control_flow_ops.with_dependencies([
                check_ops.assert_non_negative(
                    self._diagonal_bias,
                    message='`diagonal_bias` must be non-negative')
            ], self._diagonal_bias)

            super(SGLDOptimizer, self).__init__(use_locking=False,
                                                name=name or default_name)
コード例 #26
0
  def __init__(self,
               learning_rate,
               preconditioner_decay_rate=0.95,
               num_pseudo_batches=1,
               burnin=25,
               diagonal_bias=1e-8,
               name=None,
               variable_scope=None):
    default_name = 'SGLDOptimizer'
    with ops.name_scope(name, default_name, [
        learning_rate, preconditioner_decay_rate, num_pseudo_batches, burnin,
        diagonal_bias
    ]):
      if variable_scope is None:
        var_scope_name = ops.get_default_graph().unique_name(
            name or default_name)
        with varscope_ops.variable_scope(var_scope_name) as scope:
          self._variable_scope = scope
      else:
        self._variable_scope = variable_scope

      self._preconditioner_decay_rate = ops.convert_to_tensor(
          preconditioner_decay_rate, name='preconditioner_decay_rate')
      self._num_pseudo_batches = ops.convert_to_tensor(
          num_pseudo_batches, name='num_pseudo_batches')
      self._burnin = ops.convert_to_tensor(burnin, name='burnin')
      self._diagonal_bias = ops.convert_to_tensor(
          diagonal_bias, name='diagonal_bias')
      self._learning_rate = ops.convert_to_tensor(
          learning_rate, name='learning_rate')

      with varscope_ops.variable_scope(self._variable_scope):
        self._counter = varscope_ops.get_variable(
            'counter', initializer=0, trainable=False)

      self._preconditioner_decay_rate = control_flow_ops.with_dependencies([
          check_ops.assert_non_negative(
              self._preconditioner_decay_rate,
              message='`preconditioner_decay_rate` must be non-negative'),
          check_ops.assert_less_equal(
              self._preconditioner_decay_rate,
              1.,
              message='`preconditioner_decay_rate` must be at most 1.'),
      ], self._preconditioner_decay_rate)

      self._num_pseudo_batches = control_flow_ops.with_dependencies([
          check_ops.assert_greater(
              self._num_pseudo_batches,
              0,
              message='`num_pseudo_batches` must be greater than zero')
      ], self._num_pseudo_batches)

      self._burnin = control_flow_ops.with_dependencies([
          check_ops.assert_non_negative(
              self._burnin, message='`burnin` must be non-negative'),
          check_ops.assert_integer(
              self._burnin, message='`burnin` must be an integer')
      ], self._burnin)

      self._diagonal_bias = control_flow_ops.with_dependencies([
          check_ops.assert_non_negative(
              self._diagonal_bias,
              message='`diagonal_bias` must be non-negative')
      ], self._diagonal_bias)

      super(SGLDOptimizer, self).__init__(use_locking=False,
                                          name=name or default_name)
コード例 #27
0
ファイル: mvn_tril.py プロジェクト: Immexxx/tensorflow
  def __init__(self,
               loc=None,
               scale_tril=None,
               validate_args=False,
               allow_nan_stats=True,
               name="MultivariateNormalTriL"):
    """Construct Multivariate Normal distribution on `R^k`.

    The `batch_shape` is the broadcast shape between `loc` and `scale`
    arguments.

    The `event_shape` is given by the last dimension of `loc` or the last
    dimension of the matrix implied by `scale`.

    Recall that `covariance = scale @ scale.T`. A (non-batch) `scale` matrix is:

    ```none
    scale = scale_tril
    ```

    where `scale_tril` is lower-triangular `k x k` matrix with non-zero
    diagonal, i.e., `tf.diag_part(scale_tril) != 0`.

    Additional leading dimensions (if any) will index batches.

    Args:
      loc: Floating-point `Tensor`. If this is set to `None`, `loc` is
        implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where
        `b >= 0` and `k` is the event size.
      scale_tril: Floating-point, lower-triangular `Tensor` with non-zero
        diagonal elements. `scale_tril` has shape `[B1, ..., Bb, k, k]` where
        `b >= 0` and `k` is the event size.
      validate_args: Python `bool`, default `False`. When `True` distribution
        parameters are checked for validity despite possibly degrading runtime
        performance. When `False` invalid inputs may silently render incorrect
        outputs.
      allow_nan_stats: Python `bool`, default `True`. When `True`,
        statistics (e.g., mean, mode, variance) use the value "`NaN`" to
        indicate the result is undefined. When `False`, an exception is raised
        if one or more of the statistic's batch members are undefined.
      name: Python `str` name prefixed to Ops created by this class.

    Raises:
      ValueError: if neither `loc` nor `scale_tril` are specified.
    """
    parameters = locals()
    def _convert_to_tensor(x, name):
      return None if x is None else ops.convert_to_tensor(x, name=name)
    if loc is None and scale_tril is None:
      raise ValueError("Must specify one or both of `loc`, `scale_tril`.")
    with ops.name_scope(name):
      with ops.name_scope("init", values=[loc, scale_tril]):
        loc = _convert_to_tensor(loc, name="loc")
        scale_tril = _convert_to_tensor(scale_tril, name="scale_tril")
        if scale_tril is None:
          scale = linalg.LinearOperatorIdentity(
              num_rows=distribution_util.dimension_size(loc, -1),
              dtype=loc.dtype,
              is_self_adjoint=True,
              is_positive_definite=True,
              assert_proper_shapes=validate_args)
        else:
          if validate_args:
            scale_tril = control_flow_ops.with_dependencies([
                # TODO(b/35157376): Use `assert_none_equal` once it exists.
                check_ops.assert_greater(
                    math_ops.abs(array_ops.matrix_diag_part(scale_tril)),
                    array_ops.zeros([], scale_tril.dtype),
                    message="`scale_tril` must have non-zero diagonal"),
            ], scale_tril)
          scale = linalg.LinearOperatorTriL(
              scale_tril,
              is_non_singular=True,
              is_self_adjoint=False,
              is_positive_definite=False)
    super(MultivariateNormalTriL, self).__init__(
        loc=loc,
        scale=scale,
        validate_args=validate_args,
        allow_nan_stats=allow_nan_stats,
        name=name)
    self._parameters = parameters
コード例 #28
0
    def __init__(self,
                 batch_size,
                 total_num_examples,
                 max_learning_rate=1.0,
                 preconditioner_decay_rate=0.95,
                 burnin=25,
                 burnin_max_learning_rate=1e-6,
                 use_single_learning_rate=False,
                 name=None,
                 variable_scope=None):
        default_name = 'VariationalSGDOptimizer'
        with ops.name_scope(name, default_name, [
                max_learning_rate, preconditioner_decay_rate, batch_size,
                burnin, burnin_max_learning_rate
        ]):
            if variable_scope is None:
                var_scope_name = ops.get_default_graph().unique_name(
                    name or default_name)
                with varscope_ops.variable_scope(var_scope_name) as scope:
                    self._variable_scope = scope
            else:
                self._variable_scope = variable_scope

            self._preconditioner_decay_rate = ops.convert_to_tensor(
                preconditioner_decay_rate, name='preconditioner_decay_rate')
            self._batch_size = ops.convert_to_tensor(batch_size,
                                                     name='batch_size')
            self._total_num_examples = ops.convert_to_tensor(
                total_num_examples, name='total_num_examples')
            self._burnin = ops.convert_to_tensor(burnin, name='burnin')
            self._burnin_max_learning_rate = ops.convert_to_tensor(
                burnin_max_learning_rate, name='burnin_max_learning_rate')
            self._max_learning_rate = ops.convert_to_tensor(
                max_learning_rate, name='max_learning_rate')
            self._use_single_learning_rate = use_single_learning_rate

            with varscope_ops.variable_scope(self._variable_scope):
                self._counter = varscope_ops.get_variable('counter',
                                                          initializer=0,
                                                          trainable=False)

            self._preconditioner_decay_rate = control_flow_ops.with_dependencies([
                check_ops.assert_non_negative(
                    self._preconditioner_decay_rate,
                    message='`preconditioner_decay_rate` must be non-negative'
                ),
                check_ops.assert_less_equal(
                    self._preconditioner_decay_rate,
                    1.,
                    message='`preconditioner_decay_rate` must be at most 1.'),
            ], self._preconditioner_decay_rate)

            self._batch_size = control_flow_ops.with_dependencies([
                check_ops.assert_greater(
                    self._batch_size,
                    0,
                    message='`batch_size` must be greater than zero')
            ], self._batch_size)

            self._total_num_examples = control_flow_ops.with_dependencies([
                check_ops.assert_greater(
                    self._total_num_examples,
                    0,
                    message='`total_num_examples` must be greater than zero')
            ], self._total_num_examples)

            self._burnin = control_flow_ops.with_dependencies([
                check_ops.assert_non_negative(
                    self._burnin, message='`burnin` must be non-negative'),
                check_ops.assert_integer(self._burnin,
                                         message='`burnin` must be an integer')
            ], self._burnin)

            self._burnin_max_learning_rate = control_flow_ops.with_dependencies([
                check_ops.assert_non_negative(
                    self._burnin_max_learning_rate,
                    message='`burnin_max_learning_rate` must be non-negative')
            ], self._burnin_max_learning_rate)

            self._max_learning_rate = control_flow_ops.with_dependencies([
                check_ops.assert_non_negative(
                    self._max_learning_rate,
                    message='`max_learning_rate` must be non-negative')
            ], self._max_learning_rate)

            super(VariationalSGDOptimizer, self).__init__(use_locking=False,
                                                          name=name
                                                          or default_name)