def _fuzzy_constraints_(weights_tf):
        # Так выглядит stack_indeces [[a1, b1], [a2, b2] ... ] end if an == 0  or bn == 0
        if (
                weights_tf.shape.__len__() != 2
        ):  # Это веса относящиеся только по отношению к модели макровица и вкладов к каждой инвестиции портфеля
            weights_tf = weights_tf[:, np.newaxis]

        # тут нужно учесть мягкие неравенства в виде новых ограничений
        # stack индексов должен быть на единицу больше по модулю
        fuzzy_functional = -tf.reduce_min([weights_tf[iter_1 - 1] - weights_tf[iter_2 - 1] \
                 for iter_1 , iter_2 in stack_indeces])
        markovitz_functionals = tf.matmul(
            tf.transpose(weights_tf), tf.matmul(cov_matrix, weights_tf)
        )[0][
            0]  # [0, 0] чтобы после произведения взять единственный элемент от матрицы [1, 1]
        ####### Можно сделать два вида функционалов в теории множественной оптимизации по нескольким целевым функциям
        lambda_weight = tf.random.uniform(shape=(2, ), minval=1., maxval=2.)
        linear_functional = markovitz_functionals * lambda_weight[0].numpy(
        ) + fuzzy_functional * lambda_weight[1].numpy()
        ##### Chebyshev functionals ####
        chebyshev_flag = True
        if (chebyshev_flag):
            epsilon = 1.  # Непонятно как его подбирать из каких соображений
            lambda_weight = tf.random.uniform(shape=(2, ),
                                              minval=1.,
                                              maxval=2.)
            chebyshev_f = tf.reduce_min([lambda_weight[0].numpy() * fuzzy_functional, lambda_weight[1].numpy() * markovitz_functionals]) + \
                      tf.constant(epsilon).numpy() * tf.reduce_sum([fuzzy_functional, markovitz_functionals])
        return chebyshev_f
Exemple #2
0
    def call(self, inputs, count_weights=None):
        if isinstance(inputs, (list, np.ndarray)):
            inputs = tf.convert_to_tensor(inputs)
        if inputs.shape.rank == 1:
            inputs = tf.compat.v1.expand_dims(inputs, 1)

        if count_weights is not None and self.output_mode != COUNT:
            raise ValueError(
                "count_weights is not used in `output_mode='binary'`. "
                "Please pass a single input.")

        out_depth = self.num_tokens
        binary_output = (self.output_mode == BINARY)
        if isinstance(inputs, tf.SparseTensor):
            max_value = tf.reduce_max(inputs.values)
            min_value = tf.reduce_min(inputs.values)
        else:
            max_value = tf.reduce_max(inputs)
            min_value = tf.reduce_min(inputs)
        condition = tf.logical_and(
            tf.greater(tf.cast(out_depth, max_value.dtype), max_value),
            tf.greater_equal(min_value, tf.cast(0, min_value.dtype)))
        tf.Assert(condition, [
            "Input values must be in the range 0 <= values < num_tokens"
            " with num_tokens={}".format(out_depth)
        ])
        if self.sparse:
            return sparse_bincount(inputs, out_depth, binary_output,
                                   count_weights)
        else:
            return dense_bincount(inputs, out_depth, binary_output,
                                  count_weights)
    def call(self, inputs, count_weights=None):
        inputs = utils.ensure_tensor(inputs)

        if count_weights is not None:
            if self.output_mode != COUNT:
                raise ValueError(
                    "`count_weights` is not used when `output_mode` is not `'count'`. "
                    "Received `count_weights={}`.".format(count_weights))
            count_weights = utils.ensure_tensor(count_weights,
                                                self.compute_dtype)

        depth = self.num_tokens
        if isinstance(inputs, tf.SparseTensor):
            max_value = tf.reduce_max(inputs.values)
            min_value = tf.reduce_min(inputs.values)
        else:
            max_value = tf.reduce_max(inputs)
            min_value = tf.reduce_min(inputs)
        condition = tf.logical_and(
            tf.greater(tf.cast(depth, max_value.dtype), max_value),
            tf.greater_equal(min_value, tf.cast(0, min_value.dtype)))
        assertion = tf.Assert(condition, [
            "Input values must be in the range 0 <= values < num_tokens"
            " with num_tokens={}".format(depth)
        ])
        with tf.control_dependencies([assertion]):
            return utils.encode_categorical_inputs(
                inputs,
                output_mode=self.output_mode,
                depth=depth,
                dtype=self.compute_dtype,
                sparse=self.sparse,
                count_weights=count_weights)
 def coverage_box(bboxes):
   y_min, x_min, y_max, x_max = tf.split(
       value=bboxes, num_or_size_splits=4, axis=1)
   y_min_coverage = tf.reduce_min(y_min, axis=0)
   x_min_coverage = tf.reduce_min(x_min, axis=0)
   y_max_coverage = tf.reduce_max(y_max, axis=0)
   x_max_coverage = tf.reduce_max(x_max, axis=0)
   return tf.stack(
       [y_min_coverage, x_min_coverage, y_max_coverage, x_max_coverage],
       axis=1)
Exemple #5
0
    def do_run_run_run():
      """Do a run, return RunHMCResults."""
      states, trace = tfp.mcmc.sample_chain(
          num_results,
          current_state=tf.identity(target_mvn.sample(
              seed=test_util.test_seed())),
          kernel=hmc_kernel,
          num_burnin_steps=num_adaptation_steps,
          seed=test_util.test_seed(),
          trace_fn=trace_fn)

      # If we had some number of chain dimensions, we would change sample_axis.
      sample_axis = 0

      sample_cov = tfp.stats.covariance(states, sample_axis=sample_axis)
      max_variance = tf.reduce_max(tf.linalg.diag_part(sample_cov))
      max_stddev = tf.sqrt(max_variance)
      min_ess = tf.reduce_min(tfp.mcmc.effective_sample_size(states))
      mean_accept_prob = tf.reduce_mean(trace['accept_prob'])

      # Asymptotic step size given that P[accept] = mean_accept_prob.
      asymptotic_step_size = self._calculate_asymptotic_step_size(
          scales=internal_scales,
          prob_accept=mean_accept_prob,
      )

      return RunHMCResults(
          draws=states,
          step_size=trace['step_size'],
          final_step_size=trace['step_size'][-1],
          asymptotic_step_size=asymptotic_step_size,
          accept_prob=trace['accept_prob'],
          mean_accept_prob=mean_accept_prob,
          min_ess=tf.reduce_min(tfp.mcmc.effective_sample_size(states)),
          sample_mean=tf.reduce_mean(states, axis=sample_axis),
          sample_cov=sample_cov,
          sample_var=tf.linalg.diag_part(sample_cov),

          # Standard error in variance estimation is related to standard
          # deviation of variance estimates. For a Normal, this is just Sqrt(2)
          # times variance divided by sqrt sample size (or so my old notes say).
          # So a relative tolerance is useful.
          # Add in a factor of 5 as a buffer.
          var_rtol=5 * tf.sqrt(2.) / tf.sqrt(min_ess),

          # For covariance matrix estimates, there can be terms that have
          # expectation = 0 (e.g. off diagonal entries). So the above doesn't
          # hold. So use an atol.
          cov_atol=5 * max_variance / tf.sqrt(min_ess),

          # Standard error in mean estimation is stddev divided by sqrt
          # sample size. This is an absolute tolerance.
          # Add in a factor of 5 as a buffer.
          mean_atol=5 * max_stddev / tf.sqrt(min_ess),
      )
    def call(self, inputs, count_weights=None):
        if isinstance(inputs, (list, np.ndarray)):
            inputs = tf.convert_to_tensor(inputs)

        def expand_dims(inputs, axis):
            if tf_utils.is_sparse(inputs):
                return tf.sparse.expand_dims(inputs, axis)
            else:
                return tf.compat.v1.expand_dims(inputs, axis)

        original_shape = inputs.shape
        # In all cases, we should uprank scalar input to a single sample.
        if inputs.shape.rank == 0:
            inputs = expand_dims(inputs, -1)
        # One hot will unprank only if the final output dimension is not already 1.
        if self.output_mode == ONE_HOT:
            if inputs.shape[-1] != 1:
                inputs = expand_dims(inputs, -1)

        # TODO(b/190445202): remove output rank restriction.
        if inputs.shape.rank > 2:
            raise ValueError(
                "Received input shape {}, which would result in output rank {}. "
                "Currently only outputs up to rank 2 are supported.".format(
                    original_shape, inputs.shape.rank))

        if count_weights is not None and self.output_mode != COUNT:
            raise ValueError(
                "`count_weights` is not used when `output_mode` is not `'count'`. "
                "Received `count_weights={}`.".format(count_weights))

        out_depth = self.num_tokens
        binary_output = self.output_mode in (MULTI_HOT, ONE_HOT)
        if isinstance(inputs, tf.SparseTensor):
            max_value = tf.reduce_max(inputs.values)
            min_value = tf.reduce_min(inputs.values)
        else:
            max_value = tf.reduce_max(inputs)
            min_value = tf.reduce_min(inputs)
        condition = tf.logical_and(
            tf.greater(tf.cast(out_depth, max_value.dtype), max_value),
            tf.greater_equal(min_value, tf.cast(0, min_value.dtype)))
        assertion = tf.Assert(condition, [
            "Input values must be in the range 0 <= values < num_tokens"
            " with num_tokens={}".format(out_depth)
        ])
        with tf.control_dependencies([assertion]):
            if self.sparse:
                return sparse_bincount(inputs, out_depth, binary_output,
                                       count_weights)
            else:
                return dense_bincount(inputs, out_depth, binary_output,
                                      count_weights)
Exemple #7
0
    def test_multi_state_part(self, use_default):
        mvn = tfd.JointDistributionSequential([
            tfd.Normal(1., 0.1),
            tfd.Normal(2., 1.),
            tfd.Independent(tfd.Normal(3 * tf.ones([2, 3, 4]), 10.), 3)
        ])

        if use_default:
            momentum_distribution = None
            step_size = 0.1
        else:
            reshape_to_scalar = tfp.bijectors.Reshape(event_shape_out=[])
            reshape_to_234 = tfp.bijectors.Reshape(event_shape_out=[2, 3, 4])
            momentum_distribution = _CompositeJointDistributionSequential([
                reshape_to_scalar(
                    _CompositeMultivariateNormalPrecisionFactorLinearOperator(
                        precision_factor=tf.linalg.LinearOperatorDiag([0.1]))),
                reshape_to_scalar(
                    _CompositeMultivariateNormalPrecisionFactorLinearOperator(
                        precision_factor=tf.linalg.LinearOperatorDiag([1.]))),
                reshape_to_234(
                    _CompositeMultivariateNormalPrecisionFactorLinearOperator(
                        precision_factor=tf.linalg.LinearOperatorDiag(
                            tf.fill([24], 10.))))
            ])
            step_size = 0.3
        nuts_kernel = tfp.experimental.mcmc.PreconditionedNoUTurnSampler(
            target_log_prob_fn=mvn.log_prob,
            momentum_distribution=momentum_distribution,
            step_size=step_size,
            max_tree_depth=4)

        draws = tfp.mcmc.sample_chain(100,
                                      [0., 0., tf.zeros((2, 3, 4))],
                                      kernel=nuts_kernel,
                                      seed=test_util.test_seed(),
                                      trace_fn=None)
        ess = tfp.mcmc.effective_sample_size(
            draws, filter_threshold=0, filter_beyond_positive_pairs=False)
        if not use_default:
            self.assertGreaterEqual(
                self.evaluate(
                    tf.reduce_min(tf.nest.map_structure(tf.reduce_min, ess))),
                40.)
        else:
            self.assertLess(
                self.evaluate(
                    tf.reduce_min(tf.nest.map_structure(tf.reduce_min, ess))),
                50.)
def pad_tensors(tensors, dtype=None, name=None):
    """Pads the innermost dimension of `Tensor`s to a common shape.

  Given a list of `Tensor`s of the same `dtype` and with shapes
  `batch_shape_i + [n_i]`, pads the innermost dimension of each tensor to
  `batch_shape_i + [max(n_i)]`. For each tensor `t`, the padding is done with
  values `t[..., -1]`.

  ### Example
  ```python
  x = [[1, 2, 3, 9], [2, 3, 5, 2]]
  y = [4, 5, 8]
  pad_tensors([x, y])
  # Expected: [array([[1, 2, 3, 9], [2, 3, 5, 2]], array([4, 5, 8, 8])]
  ```

  Args:
    tensors: A list of tensors of the same `dtype` and shapes
      `batch_shape_i + [n_i]`.
    dtype: The default dtype to use when converting values to `Tensor`s.
      Default value: `None` which means that default dtypes inferred by
        TensorFlow are used.
    name: Python string. The name to give to the ops created by this class.
      Default value: `None` which maps to the default name `pad_tensors`.
  Returns:
    A list of `Tensor`s of shape `batch_shape_i + [max(n_i)]`.

  Raises:
    ValueError: If input is not an instance of a list or a tuple.
  """
    if not isinstance(tensors, (tuple, list)):
        raise ValueError(
            f"`tensors` should be a list or a tuple but have type {type(tensors)}"
        )
    if not tensors:
        return []
    name = name or "pad_tensors"
    with tf.name_scope(name):
        t0 = tf.convert_to_tensor(tensors[0], dtype=dtype)
        dtype = dtype or t0.dtype
        tensors = [t0] + [
            tf.convert_to_tensor(t, dtype=dtype) for t in tensors[1:]
        ]
        max_size = tf.reduce_max([tf.shape(t)[-1] for t in tensors])
        padded_tensors = []

        for t in tensors:
            paddings = ((t.shape.rank - 1) * [[0, 0]] +
                        [[0, max_size - tf.shape(t)[-1]]])
            # Padded value has to be a constant
            constant_values = tf.reduce_min(t) - 1
            pad_t = tf.pad(t,
                           paddings,
                           mode="CONSTANT",
                           constant_values=constant_values)
            # Correct padded value
            pad_t = tf.where(pad_t > constant_values, pad_t,
                             tf.expand_dims(t[..., -1], axis=-1))
            padded_tensors.append(pad_t)
    return padded_tensors
    def testNegativeBinomialSample(self):
        probs = [.3, .9]
        total_count = [4., 11.]
        n = int(100e3)
        negbinom = tfd.NegativeBinomial(total_count=total_count,
                                        probs=probs,
                                        validate_args=True)

        samples = negbinom.sample(n, seed=test_util.test_seed())
        self.assertEqual([n, 2], samples.shape)

        sample_mean = tf.reduce_mean(samples, axis=0)
        sample_var = tf.reduce_mean(
            (samples - sample_mean[tf.newaxis, ...])**2., axis=0)
        sample_min = tf.reduce_min(samples)
        [sample_mean_, sample_var_,
         sample_min_] = self.evaluate([sample_mean, sample_var, sample_min])
        self.assertAllEqual(np.ones(sample_min_.shape, dtype=np.bool),
                            sample_min_ >= 0.0)
        for i in range(2):
            self.assertAllClose(sample_mean_[i],
                                stats.nbinom.mean(total_count[i],
                                                  1 - probs[i]),
                                atol=0.,
                                rtol=.02)
            self.assertAllClose(sample_var_[i],
                                stats.nbinom.var(total_count[i], 1 - probs[i]),
                                atol=0.,
                                rtol=.02)
Exemple #10
0
 def call(self, multi_objectives: tf.Tensor) -> tf.Tensor:
   _validate_scalarization_parameter_shape(multi_objectives, {
       'weights': self._weights,
       'reference_point': self._reference_point
   })
   return tf.reduce_min(
       (multi_objectives - self._reference_point) * self._weights, axis=1)
Exemple #11
0
def random_square_crop(image_size, min_scale):
  """Generates a random square crop within an image.

  Args:
    image_size: a [height, width] tensor.
    min_scale: how much the minimum dimension can be scaled down when taking a
      crop. (e.g. if the image is 480 x 640, a min_scale of 0.8 means the output
      crop can have a height and width between 480 and 384, which is 480 * 0.8.)

  Returns:
    output_begin, output_size and image_size.
    output_begin and output_size are three element tensors specifying the shape
    to crop using crop_sequence below. image_size is a two element
    [height, width] tensor from the input.
  """
  min_dim = tf.reduce_min(image_size[0:2])
  sampled_size = tf.to_int32(
      tf.to_float(min_dim) * tf.random_uniform([], min_scale, 1.0))
  output_size = tf.stack([sampled_size, sampled_size, -1])
  height_offset = tf.random_uniform([],
                                    0,
                                    image_size[0] - sampled_size + 1,
                                    dtype=tf.int32)
  width_offset = tf.random_uniform([],
                                   0,
                                   image_size[1] - sampled_size + 1,
                                   dtype=tf.int32)
  output_begin = tf.stack([height_offset, width_offset, 0])
  return output_begin, output_size, image_size
Exemple #12
0
  def test_batched_state(self):
    mvn = tfd.MultivariateNormalDiag(
        loc=[1., 2., 3.], scale_diag=[0.1, 1., 10.])
    batch_shape = [2, 4]
    if self.use_default_momentum_distribution:
      momentum_distribution = None
      step_size = 0.1
    else:
      momentum_distribution = tfde.MultivariateNormalPrecisionFactorLinearOperator(
          tf.zeros((2, 4, 3)), precision_factor=mvn.scale)
      step_size = 0.3

    hmc_kernel = tfp.experimental.mcmc.PreconditionedHamiltonianMonteCarlo(
        target_log_prob_fn=mvn.log_prob,
        momentum_distribution=momentum_distribution,
        step_size=step_size,
        num_leapfrog_steps=10)

    draws = tfp.mcmc.sample_chain(
        110,
        tf.zeros(batch_shape + [3]),
        kernel=hmc_kernel,
        seed=test_util.test_seed(),
        trace_fn=None)
    ess = tfp.mcmc.effective_sample_size(draws[10:], cross_chain_dims=[1, 2],
                                         filter_threshold=0,
                                         filter_beyond_positive_pairs=False)
    if not self.use_default_momentum_distribution:
      self.assertAllClose(self.evaluate(ess), 100 * 2. * 4. * tf.ones(3))
    else:
      self.assertLess(self.evaluate(tf.reduce_min(ess)), 100.)
Exemple #13
0
    def test_batched_state(self, use_default):
        mvn = tfd.MultivariateNormalDiag(loc=[1., 2., 3.],
                                         scale_diag=[0.1, 1., 10.])
        batch_shape = [2, 4]
        if use_default:
            step_size = 0.1
            momentum_distribution = None
        else:
            step_size = 1.0
            momentum_distribution = _CompositeMultivariateNormalPrecisionFactorLinearOperator(
                tf.zeros((2, 4, 3)), precision_factor=mvn.scale)

        nuts_kernel = tfp.experimental.mcmc.PreconditionedNoUTurnSampler(
            target_log_prob_fn=mvn.log_prob,
            momentum_distribution=momentum_distribution,
            step_size=step_size,
            max_tree_depth=5)

        draws = tfp.mcmc.sample_chain(110,
                                      tf.zeros(batch_shape + [3]),
                                      kernel=nuts_kernel,
                                      seed=test_util.test_seed(),
                                      trace_fn=None)
        ess = tfp.mcmc.effective_sample_size(
            draws[10:],
            cross_chain_dims=[1, 2],
            filter_threshold=0,
            filter_beyond_positive_pairs=False)
        if not use_default:
            self.assertAllClose(self.evaluate(ess), 100 * 2. * 4. * tf.ones(3))
        else:
            self.assertLess(self.evaluate(tf.reduce_min(ess)), 100.)
Exemple #14
0
  def test_transform(self):
    mvn = tfd.MultivariateNormalDiag(loc=[1., 2., 3.], scale_diag=[1., 1., 1.])
    diag_variance = tf.constant([0.1, 1., 10.])

    if self.use_default_momentum_distribution:
      momentum_distribution = None
    else:
      momentum_distribution = tfde.MultivariateNormalPrecisionFactorLinearOperator(
          precision_factor=tf.linalg.LinearOperatorDiag(
              tf.math.sqrt(diag_variance)))
    hmc_kernel = tfp.experimental.mcmc.PreconditionedHamiltonianMonteCarlo(
        target_log_prob_fn=mvn.log_prob,
        momentum_distribution=momentum_distribution,
        step_size=0.3,
        num_leapfrog_steps=10)

    transformed_kernel = tfp.mcmc.TransformedTransitionKernel(
        hmc_kernel, bijector=tfb.Scale(tf.math.rsqrt(diag_variance)))

    draws = tfp.mcmc.sample_chain(
        110,
        tf.zeros(3),
        kernel=transformed_kernel,
        seed=test_util.test_seed(),
        trace_fn=None)
    ess = tfp.mcmc.effective_sample_size(draws[-100:],
                                         filter_threshold=0,
                                         filter_beyond_positive_pairs=False)

    if not self.use_default_momentum_distribution:
      self.assertAllClose(ess, tf.fill([3], 100.))
    else:
      self.assertLess(self.evaluate(tf.reduce_min(ess)), 100.)
def _psd_mask(x):
    """Computes whether each square matrix in the input is positive semi-definite.

  Args:
    x: A floating-point `Tensor` of shape `[B1, ..., Bn, M, M]`.

  Returns:
    mask: A floating-point `Tensor` of shape `[B1, ... Bn]`.  Each
      scalar is 1 if the corresponding matrix was PSD, otherwise 0.
  """
    # Allegedly
    # https://scicomp.stackexchange.com/questions/12979/testing-if-a-matrix-is-positive-semi-definite
    # it is more efficient to test for positive semi-definiteness by
    # trying to compute the Cholesky decomposition -- the matrix is PSD
    # if you succeed and not PSD if you fail.  However, TensorFlow's
    # Cholesky raises an exception if _any_ of the input matrices are
    # not PSD, from which I don't know how to extract _which ones_, so I
    # proceed by explicitly computing all the eigenvalues and checking
    # whether they are all positive or not.
    #
    # Also, as was discussed in the answer, it is somewhat dangerous to
    # treat SPD-ness as binary in floating-point arithmetic. Cholesky
    # factorization can complete and 'look' like everything is fine
    # (e.g., O(1) entries and a diagonal of all ones) but the matrix can
    # have an exponential condition number.
    eigenvalues, _ = tf.linalg.eigh(x)
    return tf.cast(tf.reduce_min(eigenvalues, axis=-1) >= 0, dtype=x.dtype)
    def test_tril(self, use_default):
        if tf.executing_eagerly():
            self.skipTest(
                'b/169882656 Too many warnings are issued in eager logs')
        cov = 0.9 * tf.ones([3, 3]) + 0.1 * tf.eye(3)
        scale = tf.linalg.cholesky(cov)
        mv_tril = tfd.MultivariateNormalTriL(loc=[1., 2., 3.],
                                             scale_tril=scale)

        if use_default:
            momentum_distribution = None
        else:
            momentum_distribution = tfd_e.MultivariateNormalInverseScaleLinearOperator(
                tf.zeros(3),
                inverse_scale=tf.linalg.LinearOperatorFullMatrix(cov))
        hmc_kernel = tfp.experimental.mcmc.PreconditionedHamiltonianMonteCarlo(
            target_log_prob_fn=mv_tril.log_prob,
            momentum_distribution=momentum_distribution,
            step_size=0.2,
            num_leapfrog_steps=10)
        draws = tfp.mcmc.sample_chain(120,
                                      tf.zeros(3),
                                      kernel=hmc_kernel,
                                      seed=test_util.test_seed(),
                                      trace_fn=None)
        ess = tfp.mcmc.effective_sample_size(
            draws[-100:],
            filter_threshold=0,
            filter_beyond_positive_pairs=False)

        if not use_default:
            self.assertAllClose(ess, tf.fill([3], 100.))
        else:
            self.assertLess(self.evaluate(tf.reduce_min(ess)), 60.)
Exemple #17
0
    def get_all_steps(self,
                      num_steps: Optional[int] = None,
                      limit: Optional[int] = None) -> EnvStep:
        if self.num_steps <= 0:
            raise ValueError('No steps in the dataset.')

        num_steps_ = 1
        if num_steps is not None:
            num_steps_ = num_steps

        max_range = self._last_valid_steps_id + 1
        if limit is not None:
            max_range = tf.minimum(max_range, tf.cast(limit, tf.int64))
        all_valid_steps = self._valid_steps_table.read(tf.range(max_range))

        # Can't collect trajectories that trail off end of dataset.
        if tf.reduce_min(
                all_valid_steps) + num_steps_ > self._last_step_id + 1:
            raise ValueError('Not enough steps in the dataset.')
        all_valid_steps = tf.gather(
            all_valid_steps,
            tf.where(
                all_valid_steps + num_steps_ <= self._last_step_id + 1)[:, 0])

        rows_to_get = (all_valid_steps[:, None] +
                       tf.range(num_steps_, dtype=tf.int64)[None, :])
        rows_to_get = tf.math.mod(rows_to_get, self._last_step_id + 1)
        steps = self._data_table.read(rows_to_get)
        self._last_rows_read = rows_to_get

        if num_steps is None:
            steps = tf.nest.map_structure(lambda t: tf.squeeze(t, 1), steps)
            self._last_rows_read = tf.squeeze(self._last_rows_read, 1)

        return steps
Exemple #18
0
 def select_actor_action(self, env_output, agent_output):
     oracle_next_action = env_output.observation[
         constants.ORACLE_NEXT_ACTION]
     oracle_next_action_indices = tf.where(
         tf.equal(env_output.observation[constants.CONN_IDS],
                  oracle_next_action))
     oracle_next_action_idx = tf.reduce_min(oracle_next_action_indices)
     assert self._mode, 'mode must be set.'
     if self._mode == 'train':
         if self._loss_type == common.CE_LOSS:
             # This is teacher-forcing mode, so choose action same as oracle action.
             action_idx = oracle_next_action_idx
         elif self._loss_type == common.AC_LOSS:
             # Choose next pano from probability distribution over next panos
             action_idx = tfp.distributions.Categorical(
                 logits=agent_output.policy_logits).sample()
         else:
             raise ValueError('Unsupported loss type {}'.format(
                 self._loss_type))
     else:
         # In non-train modes, choose greedily.
         action_idx = tf.argmax(agent_output.policy_logits, axis=-1)
     action_val = env_output.observation[constants.CONN_IDS][action_idx]
     return common.ActorAction(chosen_action_idx=int(action_idx.numpy()),
                               oracle_next_action_idx=int(
                                   oracle_next_action_idx.numpy())), int(
                                       action_val.numpy())
  def select_actor_action(self, env_output, unused_agent_output):
    """Returns the next ground truth action pano id."""
    time_step = env_output.observation[constants.TIME_STEP]
    current_pano_id = env_output.observation[constants.PANO_ID]
    golden_path = env_output.observation[constants.GOLDEN_PATH]
    golden_path_len = sum(
        [1 for pid in golden_path if pid != constants.INVALID_NODE_ID])

    # Sanity check: ensure pano id is on the golden path.
    if current_pano_id != golden_path[time_step]:
      raise ValueError(
          'Current pano id does not match that in golden path: {} vs. {}'
          .format(current_pano_id, golden_path[time_step]))

    if ((current_pano_id == env_output.observation[constants.GOAL_PANO_ID] and
         time_step == golden_path_len - 1) or
        current_pano_id == constants.STOP_NODE_ID):
      next_golden_pano_id = constants.STOP_NODE_ID
    else:
      next_golden_pano_id = golden_path[time_step + 1]

    try:
      unused_action_idx = tf.where(
          tf.equal(env_output.observation[constants.CONN_IDS],
                   next_golden_pano_id))
    except ValueError:
      # Current and next panos are not connected, use idx for invalid node.
      unused_action_idx = unused_action_idx = tf.where(
          tf.equal(env_output.observation[constants.CONN_IDS],
                   constants.INVALID_NODE_ID))
    unused_action_idx = tf.cast(tf.reduce_min(unused_action_idx), tf.int32)
    return common.ActorAction(
        chosen_action_idx=unused_action_idx.numpy(),
        oracle_next_action_idx=unused_action_idx.numpy()), int(
            next_golden_pano_id)
Exemple #20
0
    def test_batches(self, use_default):
        mvn = tfd.JointDistributionSequential(
            [tfd.Normal(1., 0.1),
             tfd.Normal(2., 1.),
             tfd.Normal(3., 10.)])
        n_chains = 10
        if use_default:
            momentum_distribution = None
            step_size = 0.1
        else:
            reshape_to_scalar = tfp.bijectors.Reshape(event_shape_out=[])
            momentum_distribution = _CompositeJointDistributionSequential([
                reshape_to_scalar(
                    _CompositeMultivariateNormalPrecisionFactorLinearOperator(
                        precision_factor=tf.linalg.LinearOperatorDiag(
                            tf.fill([n_chains, 1], 0.1)))),
                reshape_to_scalar(
                    _CompositeMultivariateNormalPrecisionFactorLinearOperator(
                        precision_factor=tf.linalg.LinearOperatorDiag(
                            tf.fill([n_chains, 1], 1.)))),
                reshape_to_scalar(
                    _CompositeMultivariateNormalPrecisionFactorLinearOperator(
                        precision_factor=tf.linalg.LinearOperatorDiag(
                            tf.fill([n_chains, 1], 10.)))),
            ])
            step_size = 1.1

        nuts_kernel = tfp.experimental.mcmc.PreconditionedNoUTurnSampler(
            target_log_prob_fn=mvn.log_prob,
            momentum_distribution=momentum_distribution,
            step_size=step_size,
            max_tree_depth=4)

        draws = tfp.mcmc.sample_chain(100,
                                      [tf.zeros([n_chains]) for _ in range(3)],
                                      kernel=nuts_kernel,
                                      seed=test_util.test_seed(),
                                      trace_fn=None)
        ess = tfp.mcmc.effective_sample_size(
            draws,
            cross_chain_dims=[1 for _ in draws],
            filter_threshold=0,
            filter_beyond_positive_pairs=False)
        if not use_default:
            self.assertGreaterEqual(self.evaluate(tf.reduce_min(ess)), 40.)
        else:
            self.assertLess(self.evaluate(tf.reduce_min(ess)), 100.)
Exemple #21
0
    def testDefaultArgs(self):
        bs = 32
        val_size = eval_size = 1000
        datasets = data.get_datasets(shuffle_size=1)
        (dataset_train, dataset_test, subset_val, subset_test,
         subset_val2) = datasets
        x, y = next(dataset_train.__iter__())
        self.assertEqual(x.shape, [bs, 32, 32, 3])
        self.assertEqual(y.shape, [bs])
        self.assertLessEqual(tf.reduce_max(x), 1.0)
        self.assertGreaterEqual(tf.reduce_min(x), -1.)
        x, y = next(dataset_test.__iter__())
        self.assertLessEqual(tf.reduce_max(x), 1.0)
        self.assertGreaterEqual(tf.reduce_min(x), -1.)
        self.assertEqual(x.shape, [bs, 32, 32, 3])
        self.assertEqual(y.shape, [bs])

        c_iterator = subset_val.__iter__()
        x, y = next(c_iterator)
        self.assertLessEqual(tf.reduce_max(x), 1.0)
        self.assertGreaterEqual(tf.reduce_min(x), -1.0)
        self.assertEqual(x.shape, [val_size, 32, 32, 3])
        self.assertEqual(y.shape, [val_size])
        # Since chunk_size=None, it should only have one batch.
        with self.assertRaises(StopIteration):
            next(c_iterator)

        c_iterator = subset_val2.__iter__()
        x2, y2 = next(c_iterator)
        self.assertLessEqual(tf.reduce_max(x2), 1.0)
        self.assertGreaterEqual(tf.reduce_min(x2), -1.)
        self.assertEqual(x2.shape, [eval_size, 32, 32, 3])
        self.assertEqual(y2.shape, [eval_size])
        # Check that the subset's are disjoint.
        self.assertNotAllClose(x, x2)
        # Since chunk_size=None, it should only have one batch.
        with self.assertRaises(StopIteration):
            next(c_iterator)

        c_iterator = subset_test.__iter__()
        x, y = next(c_iterator)
        self.assertLessEqual(tf.reduce_max(x), 1.0)
        self.assertGreaterEqual(tf.reduce_min(x), -1.)
        self.assertEqual(x.shape, [eval_size, 32, 32, 3])
        self.assertEqual(y.shape, [eval_size])
        with self.assertRaises(StopIteration):
            next(c_iterator)
Exemple #22
0
 def _scalarize(self, transformed_multi_objectives: tf.Tensor) -> tf.Tensor:
   _validate_scalarization_parameter_shape(transformed_multi_objectives, {
       'weights': self._weights,
       'reference_point': self._reference_point
   })
   return tf.reduce_min(
       (transformed_multi_objectives - self._reference_point) * self._weights,
       axis=-1)
 def testSyuvIsScaledYuv(self):
   """Tests that rgb_to_syuv is proportional to tf.image.rgb_to_yuv()."""
   rgb = np.float32(np.random.uniform(size=(32, 32, 3)))
   syuv = util.rgb_to_syuv(rgb)
   yuv = tf.image.rgb_to_yuv(rgb)
   # Check that the ratio between `syuv` and `yuv` is nearly constant.
   ratio = syuv / yuv
   self.assertAllClose(tf.reduce_min(ratio), tf.reduce_max(ratio))
Exemple #24
0
 def _scalarize(self, transformed_multi_objectives: tf.Tensor) -> tf.Tensor:
   transformed_multi_objectives = tf.maximum(transformed_multi_objectives, 0)
   nonzero_mask = tf.broadcast_to(
       tf.cast(tf.abs(self._direction) >= self.ALMOST_ZERO, dtype=tf.bool),
       tf.shape(transformed_multi_objectives))
   return tf.reduce_min(
       tf.where(nonzero_mask, transformed_multi_objectives / self._direction,
                transformed_multi_objectives.dtype.max),
       axis=1)
 def call(self, multi_objectives: tf.Tensor) -> tf.Tensor:
     transformed_objectives = tf.maximum(
         multi_objectives * self._slopes + self._offsets, 0)
     nonzero_mask = tf.broadcast_to(
         tf.cast(tf.abs(self._direction) >= self.ALMOST_ZERO,
                 dtype=tf.bool), multi_objectives.shape)
     return tf.reduce_min(tf.where(nonzero_mask,
                                   transformed_objectives / self._direction,
                                   multi_objectives.dtype.max),
                          axis=1)
Exemple #26
0
    def test_tril(self, use_default):
        if tf.executing_eagerly():
            self.skipTest(
                'b/169882656 Too many warnings are issued in eager logs')
        cov = 0.9 * tf.ones([3, 3]) + 0.1 * tf.eye(3)
        scale = tf.linalg.cholesky(cov)
        mv_tril = tfd.MultivariateNormalTriL(loc=[1., 2., 3.],
                                             scale_tril=scale)

        if use_default:
            momentum_distribution = None
            step_size = 0.3
        else:
            momentum_distribution = _CompositeMultivariateNormalPrecisionFactorLinearOperator(
                # TODO(b/170015229) Don't use the covariance as inverse scale,
                # it is the wrong preconditioner.
                precision_factor=tf.linalg.LinearOperatorFullMatrix(cov), )
            step_size = 1.1
        nuts_kernel = tfp.experimental.mcmc.PreconditionedNoUTurnSampler(
            target_log_prob_fn=mv_tril.log_prob,
            momentum_distribution=momentum_distribution,
            step_size=step_size,
            max_tree_depth=4)
        draws = tfp.mcmc.sample_chain(120,
                                      tf.zeros(3),
                                      kernel=nuts_kernel,
                                      seed=test_util.test_seed(),
                                      trace_fn=None)
        ess = tfp.mcmc.effective_sample_size(
            draws[-100:],
            filter_threshold=0,
            filter_beyond_positive_pairs=False)

        # TODO(b/170015229): These and other tests like it, which assert ess is
        # greater than some number, were all passing, even though the preconditioner
        # was the wrong one. Why is that? A guess is that since there are *many*
        # ways to have larger ess, these tests don't really test correctness.
        # Perhaps remove all tests like these.
        if not use_default:
            self.assertGreaterEqual(self.evaluate(tf.reduce_min(ess)), 40.)
        else:
            self.assertLess(self.evaluate(tf.reduce_min(ess)), 100.)
Exemple #27
0
def minGamma(inputs,gamma=1):
    """ continuous relaxation of min defined in the D3TW paper"""
    if gamma == 0:
        minG = tf.reduce_min(inputs)
    else:
        # log-sum-exp stabilization trick
        zi = (-inputs / gamma)
        max_zi = tf.reduce_max(zi)
        log_sum_G = max_zi + tf.math.log(tf.reduce_sum(tf.math.exp(zi-max_zi))) #+ 1e-10)
        minG = -gamma * log_sum_G
    return minG
 def _example_parser(range_val: int) -> Dict[str, tf.Tensor]:
     """Parses a single range integer into stateless image Tensors."""
     image = tf.random.stateless_normal(
         self._image_shape,
         [self._split_seed[split], self._split_seed[split] + range_val],
         dtype=tf.float32)
     image_min = tf.reduce_min(image)
     image_max = tf.reduce_max(image)
     # Normalize the values of the image to be in [-1, 1].
     image = 2.0 * (image - image_min) / (image_max - image_min) - 1.0
     label = tf.zeros([], tf.int32)
     return {"features": image, "labels": label}
    def test_batches(self, use_default):
        mvn = tfd.JointDistributionSequential(
            [tfd.Normal(1., 0.1),
             tfd.Normal(2., 1.),
             tfd.Normal(3., 10.)])
        n_chains = 10
        if use_default:
            momentum_distribution = None
            step_size = 0.1
        else:
            reshape_to_scalar = tfp.bijectors.Reshape(event_shape_out=[])
            momentum_distribution = tfd.JointDistributionSequential([
                reshape_to_scalar(
                    tfd_e.MultivariateNormalInverseScaleLinearOperator(
                        0.,
                        tf.linalg.LinearOperatorDiag(
                            tf.fill([n_chains, 1], 0.1)))),
                reshape_to_scalar(
                    tfd_e.MultivariateNormalInverseScaleLinearOperator(
                        0.,
                        tf.linalg.LinearOperatorDiag(tf.fill([n_chains, 1],
                                                             1.)))),
                reshape_to_scalar(
                    tfd_e.MultivariateNormalInverseScaleLinearOperator(
                        0.,
                        tf.linalg.LinearOperatorDiag(
                            tf.fill([n_chains, 1], 10.)))),
            ])
            step_size = 0.3

        hmc_kernel = tfp.experimental.mcmc.PreconditionedHamiltonianMonteCarlo(
            target_log_prob_fn=mvn.log_prob,
            momentum_distribution=momentum_distribution,
            step_size=step_size,
            num_leapfrog_steps=10)

        draws = tfp.mcmc.sample_chain(100,
                                      [tf.zeros([n_chains]) for _ in range(3)],
                                      kernel=hmc_kernel,
                                      seed=test_util.test_seed(),
                                      trace_fn=None)
        ess = tfp.mcmc.effective_sample_size(
            draws,
            cross_chain_dims=[1 for _ in draws],
            filter_threshold=0,
            filter_beyond_positive_pairs=False)
        if not use_default:
            self.assertAllClose(self.evaluate(ess),
                                100 * n_chains * tf.ones(3))
        else:
            self.assertLess(self.evaluate(tf.reduce_min(ess)), 50.)
Exemple #30
0
  def test_multi_state_part(self):
    mvn = tfd.JointDistributionSequential([
        tfd.Normal(1., 0.1),
        tfd.Normal(2., 1.),
        tfd.Independent(tfd.Normal(3 * tf.ones([2, 3, 4]), 10.), 3)
    ])

    if self.use_default_momentum_distribution:
      momentum_distribution = None
      step_size = 0.1
    else:
      reshape_to_scalar = tfp.bijectors.Reshape(event_shape_out=[])
      reshape_to_234 = tfp.bijectors.Reshape(event_shape_out=[2, 3, 4])
      momentum_distribution = tfd.JointDistributionSequential([
          reshape_to_scalar(
              tfde.MultivariateNormalPrecisionFactorLinearOperator(
                  precision_factor=tf.linalg.LinearOperatorDiag([0.1]))),
          reshape_to_scalar(
              tfde.MultivariateNormalPrecisionFactorLinearOperator(
                  precision_factor=tf.linalg.LinearOperatorDiag([1.]))),
          reshape_to_234(
              tfde.MultivariateNormalPrecisionFactorLinearOperator(
                  precision_factor=tf.linalg.LinearOperatorDiag(
                      tf.fill([24], 10.))))
      ])
      step_size = 0.3
    hmc_kernel = tfp.experimental.mcmc.PreconditionedHamiltonianMonteCarlo(
        target_log_prob_fn=mvn.log_prob,
        momentum_distribution=momentum_distribution,
        step_size=step_size,
        num_leapfrog_steps=10)

    draws = tfp.mcmc.sample_chain(
        100, [0., 0., tf.zeros((2, 3, 4))],
        kernel=hmc_kernel,
        seed=test_util.test_seed(),
        trace_fn=None)
    ess = tfp.mcmc.effective_sample_size(draws,
                                         filter_threshold=0,
                                         filter_beyond_positive_pairs=False)
    if not self.use_default_momentum_distribution:
      self.assertAllClose(
          self.evaluate(ess),
          [tf.constant(100.),
           tf.constant(100.), 100. * tf.ones((2, 3, 4))])
    else:
      self.assertLess(
          self.evaluate(
              tf.reduce_min(tf.nest.map_structure(tf.reduce_min, ess))),
          50.)