コード例 #1
0
def op(name,
       images,
       max_outputs=3,
       display_name=None,
       description=None,
       collections=None):
    """Create a legacy image summary op for use in a TensorFlow graph.

  Arguments:
    name: A unique name for the generated summary node.
    images: A `Tensor` representing pixel data with shape `[k, h, w, c]`,
      where `k` is the number of images, `h` and `w` are the height and
      width of the images, and `c` is the number of channels, which
      should be 1, 3, or 4. Any of the dimensions may be statically
      unknown (i.e., `None`).
    max_outputs: Optional `int` or rank-0 integer `Tensor`. At most this
      many images will be emitted at each step. When more than
      `max_outputs` many images are provided, the first `max_outputs` many
      images will be used and the rest silently discarded.
    display_name: Optional name for this summary in TensorBoard, as a
      constant `str`. Defaults to `name`.
    description: Optional long-form description for this summary, as a
      constant `str`. Markdown is supported. Defaults to empty.
    collections: Optional list of graph collections keys. The new
      summary op is added to these collections. Defaults to
      `[Graph Keys.SUMMARIES]`.

  Returns:
    A TensorFlow summary op.
  """
    # TODO(nickfelt): remove on-demand imports once dep situation is fixed.
    import tensorflow.compat.v1 as tf

    if display_name is None:
        display_name = name
    summary_metadata = metadata.create_summary_metadata(
        display_name=display_name, description=description)
    with tf.name_scope(name), \
         tf.control_dependencies([tf.assert_rank(images, 4),
                                  tf.assert_type(images, tf.uint8),
                                  tf.assert_non_negative(max_outputs)]):
        limited_images = images[:max_outputs]
        encoded_images = tf.map_fn(tf.image.encode_png,
                                   limited_images,
                                   dtype=tf.string,
                                   name='encode_each_image')
        image_shape = tf.shape(images)
        dimensions = tf.stack([
            tf.as_string(image_shape[2], name='width'),
            tf.as_string(image_shape[1], name='height')
        ],
                              name='dimensions')
        tensor = tf.concat([dimensions, encoded_images], axis=0)
        return tf.summary.tensor_summary(name='image_summary',
                                         tensor=tensor,
                                         collections=collections,
                                         summary_metadata=summary_metadata)
コード例 #2
0
    def _prepare(self, var_list):
        # We need to put the conversion and check here because a user will likely
        # want to decay the learning rate dynamically.
        self._learning_rate_tensor = distribution_util.with_dependencies(
            [
                tf1.assert_non_negative(
                    self._learning_rate,
                    message='`learning_rate` must be non-negative')
            ],
            tf.convert_to_tensor(value=self._learning_rate,
                                 name='learning_rate_tensor'))
        self._decay_tensor = tf.convert_to_tensor(
            value=self._preconditioner_decay_rate,
            name='preconditioner_decay_rate')

        super(StochasticGradientLangevinDynamics, self)._prepare(var_list)
コード例 #3
0
ファイル: rcnn_target.py プロジェクト: dshea89/luminoth
 def disable_some_bgs():
     # Mutatis mutandis, all comments from disable_some_fgs apply.
     shuffled_inds = tf.random_shuffle(bg_inds, seed=self._seed)
     disable_place = (tf.shape(bg_inds)[0] - max_bg)
     integrity_assertion = tf.assert_non_negative(
         disable_place,
         message="disable_place in disable_some_bgs is negative.")
     with tf.control_dependencies([integrity_assertion]):
         disable_inds = shuffled_inds[:disable_place]
     is_disabled = tf.sparse_to_dense(sparse_indices=disable_inds,
                                      sparse_values=True,
                                      default_value=False,
                                      output_shape=tf.cast(
                                          proposals_label_shape,
                                          tf.int64),
                                      validate_indices=False)
     return tf.where(condition=is_disabled,
                     x=tf.fill(dims=proposals_label_shape, value=-1.),
                     y=proposals_label)
コード例 #4
0
    def __init__(self,
                 learning_rate,
                 preconditioner_decay_rate=0.95,
                 data_size=1,
                 burnin=25,
                 diagonal_bias=1e-8,
                 name=None,
                 parallel_iterations=10):
        default_name = 'StochasticGradientLangevinDynamics'
        with tf1.name_scope(name, default_name, [
                learning_rate, preconditioner_decay_rate, data_size, burnin,
                diagonal_bias
        ]):
            if tf.executing_eagerly():
                raise NotImplementedError(
                    'Eager execution currently not supported for '
                    ' SGLD optimizer.')

            self._preconditioner_decay_rate = tf.convert_to_tensor(
                value=preconditioner_decay_rate,
                name='preconditioner_decay_rate')
            self._data_size = tf.convert_to_tensor(value=data_size,
                                                   name='data_size')
            self._burnin = tf.convert_to_tensor(value=burnin,
                                                name='burnin',
                                                dtype=dtype_util.common_dtype(
                                                    [burnin],
                                                    dtype_hint=tf.int64))
            self._diagonal_bias = tf.convert_to_tensor(value=diagonal_bias,
                                                       name='diagonal_bias')
            # TODO(b/124800185): Consider migrating `learning_rate` to be a
            # hyperparameter handled by the base Optimizer class. This would allow
            # users to plug in a `tf.keras.optimizers.schedules.LearningRateSchedule`
            # object in addition to Tensors.
            self._learning_rate = tf.convert_to_tensor(value=learning_rate,
                                                       name='learning_rate')
            self._parallel_iterations = parallel_iterations

            self._preconditioner_decay_rate = distribution_util.with_dependencies(
                [
                    tf1.assert_non_negative(
                        self._preconditioner_decay_rate,
                        message=
                        '`preconditioner_decay_rate` must be non-negative'),
                    tf1.assert_less_equal(
                        self._preconditioner_decay_rate,
                        1.,
                        message='`preconditioner_decay_rate` must be at most 1.'
                    ),
                ], self._preconditioner_decay_rate)

            self._data_size = distribution_util.with_dependencies([
                tf1.assert_greater(
                    self._data_size,
                    0,
                    message='`data_size` must be greater than zero')
            ], self._data_size)

            self._burnin = distribution_util.with_dependencies([
                tf1.assert_non_negative(
                    self._burnin, message='`burnin` must be non-negative'),
                tf1.assert_integer(self._burnin,
                                   message='`burnin` must be an integer')
            ], self._burnin)

            self._diagonal_bias = distribution_util.with_dependencies([
                tf1.assert_non_negative(
                    self._diagonal_bias,
                    message='`diagonal_bias` must be non-negative')
            ], self._diagonal_bias)

            super(StochasticGradientLangevinDynamics,
                  self).__init__(name=name or default_name)
コード例 #5
0
    def __init__(self,
                 batch_size,
                 total_num_examples,
                 max_learning_rate=1.,
                 preconditioner_decay_rate=0.95,
                 burnin=25,
                 burnin_max_learning_rate=1e-6,
                 use_single_learning_rate=False,
                 name=None):
        default_name = 'VariationalSGD'
        with tf1.name_scope(name, default_name, [
                max_learning_rate, preconditioner_decay_rate, batch_size,
                burnin, burnin_max_learning_rate
        ]):
            self._preconditioner_decay_rate = tf.convert_to_tensor(
                value=preconditioner_decay_rate,
                name='preconditioner_decay_rate')
            self._batch_size = tf.convert_to_tensor(value=batch_size,
                                                    name='batch_size')
            self._total_num_examples = tf.convert_to_tensor(
                value=total_num_examples, name='total_num_examples')

            self._burnin = tf.convert_to_tensor(value=burnin,
                                                name='burnin',
                                                dtype=dtype_util.common_dtype(
                                                    [burnin],
                                                    dtype_hint=tf.int64))
            self._burnin_max_learning_rate = tf.convert_to_tensor(
                value=burnin_max_learning_rate,
                name='burnin_max_learning_rate')
            self._max_learning_rate = tf.convert_to_tensor(
                value=max_learning_rate, name='max_learning_rate')
            self._use_single_learning_rate = use_single_learning_rate

            self._preconditioner_decay_rate = distribution_util.with_dependencies(
                [
                    tf1.assert_non_negative(
                        self._preconditioner_decay_rate,
                        message=
                        '`preconditioner_decay_rate` must be non-negative'),
                    tf1.assert_less_equal(
                        self._preconditioner_decay_rate,
                        1.,
                        message='`preconditioner_decay_rate` must be at most 1.'
                    ),
                ], self._preconditioner_decay_rate)

            self._batch_size = distribution_util.with_dependencies([
                tf1.assert_greater(
                    self._batch_size,
                    0,
                    message='`batch_size` must be greater than zero')
            ], self._batch_size)

            self._total_num_examples = distribution_util.with_dependencies([
                tf1.assert_greater(
                    self._total_num_examples,
                    0,
                    message='`total_num_examples` must be greater than zero')
            ], self._total_num_examples)

            self._burnin = distribution_util.with_dependencies([
                tf1.assert_non_negative(
                    self._burnin, message='`burnin` must be non-negative'),
                tf1.assert_integer(self._burnin,
                                   message='`burnin` must be an integer')
            ], self._burnin)

            self._burnin_max_learning_rate = distribution_util.with_dependencies(
                [
                    tf1.assert_non_negative(
                        self._burnin_max_learning_rate,
                        message=
                        '`burnin_max_learning_rate` must be non-negative')
                ], self._burnin_max_learning_rate)

            self._max_learning_rate = distribution_util.with_dependencies([
                tf1.assert_non_negative(
                    self._max_learning_rate,
                    message='`max_learning_rate` must be non-negative')
            ], self._max_learning_rate)

            super(VariationalSGD, self).__init__(name=name or default_name)