def test_bad_reshape_size(self):
    dims = 2
    new_batch_shape = [2, 3]
    old_batch_shape = [2]   # 2 != 2*3

    new_batch_shape_ph = (
        constant_op.constant(np.int32(new_batch_shape)) if self.is_static_shape
        else array_ops.placeholder_with_default(
            np.int32(new_batch_shape), shape=None))

    scale = np.ones(old_batch_shape + [dims], self.dtype)
    scale_ph = array_ops.placeholder_with_default(
        scale, shape=scale.shape if self.is_static_shape else None)
    mvn = mvn_lib.MultivariateNormalDiag(scale_diag=scale_ph)

    if self.is_static_shape:
      with self.assertRaisesRegexp(
          ValueError, (r"`batch_shape` size \(6\) must match "
                       r"`distribution\.batch_shape` size \(2\)")):
        batch_reshape_lib.BatchReshape(
            distribution=mvn,
            batch_shape=new_batch_shape_ph,
            validate_args=True)

    else:
      with self.test_session():
        with self.assertRaisesOpError(r"Shape sizes do not match."):
          batch_reshape_lib.BatchReshape(
              distribution=mvn,
              batch_shape=new_batch_shape_ph,
              validate_args=True).sample().eval()
  def test_broadcasting_explicitly_unsupported(self):
    old_batch_shape = [4]
    new_batch_shape = [1, 4, 1]
    rate_ = self.dtype([1, 10, 2, 20])

    rate = array_ops.placeholder_with_default(
        rate_,
        shape=old_batch_shape if self.is_static_shape else None)
    poisson_4 = poisson_lib.Poisson(rate)
    new_batch_shape_ph = (
        constant_op.constant(np.int32(new_batch_shape)) if self.is_static_shape
        else array_ops.placeholder_with_default(
            np.int32(new_batch_shape), shape=None))
    poisson_141_reshaped = batch_reshape_lib.BatchReshape(
        poisson_4, new_batch_shape_ph, validate_args=True)

    x_4 = self.dtype([2, 12, 3, 23])
    x_114 = self.dtype([2, 12, 3, 23]).reshape(1, 1, 4)

    if self.is_static_shape:
      with self.assertRaisesRegexp(NotImplementedError,
                                   "too few batch and event dims"):
        poisson_141_reshaped.log_prob(x_4)
      with self.assertRaisesRegexp(NotImplementedError,
                                   "unexpected batch and event shape"):
        poisson_141_reshaped.log_prob(x_114)
      return

    with self.assertRaisesOpError("too few batch and event dims"):
      with self.test_session():
        poisson_141_reshaped.log_prob(x_4).eval()

    with self.assertRaisesOpError("unexpected batch and event shape"):
      with self.test_session():
        poisson_141_reshaped.log_prob(x_114).eval()
Exemplo n.º 3
0
  def testError(self,
                descr,
                mode,
                data,
                repeats,
                axis,
                exception=ValueError,
                error=None):
    # Make sure that this is also an error case for numpy.
    with self.assertRaises(exception):
      np.repeat(data, repeats, axis)

    if mode == 'constant':
      data = constant_op.constant(data)
      repeats = constant_op.constant(repeats)
    elif mode == 'dynamic':
      data = constant_op.constant(data)
      repeats = constant_op.constant(repeats)
      data = array_ops.placeholder_with_default(data, data.shape)
      repeats = array_ops.placeholder_with_default(repeats, repeats.shape)
    elif mode == 'unknown_shape':
      data = array_ops.placeholder_with_default(data, None)
      repeats = array_ops.placeholder_with_default(repeats, None)

    with self.assertRaisesRegexp(exception, error):
      ragged_util.repeat(data, repeats, axis)
Exemplo n.º 4
0
  def testBatchFunctionOpWithCapturedInput(self):
    """Tests that batch_function op works with captured input."""
    with self.test_session() as sess:
      captured_inp0 = array_ops.placeholder_with_default(2, shape=[])
      captured_inp1 = array_ops.placeholder_with_default(1, shape=[])
      inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1])

      @function.Defun(dtypes.int32)
      def computation(inp):
        return inp + captured_inp0 - captured_inp1

      result = gen_batch_ops.batch_function(
          num_batch_threads=1,
          max_batch_size=10,
          batch_timeout_micros=100000,  # 100ms
          allowed_batch_sizes=[3, 10],
          batching_queue="",
          f=computation,
          in_tensors=[inp],
          captured_tensors=computation.captured_inputs,
          Tout=[o.type for o in computation.definition.signature.output_arg])

      thread_results = []

      def worker():
        thread_results.extend(sess.run([result], feed_dict={inp: [1]}))

      worker_thread = threading.Thread(target=worker)
      worker_thread.start()
      main_results = sess.run([result], feed_dict={inp: [2]})
      worker_thread.join()
      self.assertEqual(thread_results[0], [2])
      self.assertEqual(main_results[0], [3])
Exemplo n.º 5
0
  def testBatchDecoratedWithCapturedInput(self):
    """Tests that the batch_function decorator works."""
    if context.executing_eagerly():
      return
    with self.cached_session() as sess:
      captured_inp0 = array_ops.placeholder_with_default(2, shape=[])
      captured_inp1 = array_ops.placeholder_with_default(1, shape=[])

      @batch_ops.batch_function(1, 10, 100000)
      def computation(in_t):
        return in_t + captured_inp0 - captured_inp1

      inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
      result = computation(inp)
      thread_results = []

      def worker():
        thread_results.extend(sess.run([result], feed_dict={inp: [1]}))

      worker_thread = threading.Thread(target=worker)
      worker_thread.start()
      main_results = sess.run([result], feed_dict={inp: [2]})
      worker_thread.join()
      self.assertEqual(thread_results[0], [2])
      self.assertEqual(main_results[0], [3])
  def test_non_vector_shape(self):
    dims = 2
    new_batch_shape = 2
    old_batch_shape = [2]

    new_batch_shape_ph = (
        constant_op.constant(np.int32(new_batch_shape)) if self.is_static_shape
        else array_ops.placeholder_with_default(
            np.int32(new_batch_shape), shape=None))

    scale = np.ones(old_batch_shape + [dims], self.dtype)
    scale_ph = array_ops.placeholder_with_default(
        scale, shape=scale.shape if self.is_static_shape else None)
    mvn = mvn_lib.MultivariateNormalDiag(scale_diag=scale_ph)

    if self.is_static_shape:
      with self.assertRaisesRegexp(ValueError, r".*must be a vector.*"):
        batch_reshape_lib.BatchReshape(
            distribution=mvn,
            batch_shape=new_batch_shape_ph,
            validate_args=True)

    else:
      with self.test_session():
        with self.assertRaisesOpError(r".*must be a vector.*"):
          batch_reshape_lib.BatchReshape(
              distribution=mvn,
              batch_shape=new_batch_shape_ph,
              validate_args=True).sample().eval()
  def test_non_positive_shape(self):
    dims = 2
    old_batch_shape = [4]
    if self.is_static_shape:
      # Unknown first dimension does not trigger size check. Note that
      # any dimension < 0 is treated statically as unknown.
      new_batch_shape = [-1, 0]
    else:
      new_batch_shape = [-2, -2]  # -2 * -2 = 4, same size as the old shape.

    new_batch_shape_ph = (
        constant_op.constant(np.int32(new_batch_shape)) if self.is_static_shape
        else array_ops.placeholder_with_default(
            np.int32(new_batch_shape), shape=None))

    scale = np.ones(old_batch_shape + [dims], self.dtype)
    scale_ph = array_ops.placeholder_with_default(
        scale, shape=scale.shape if self.is_static_shape else None)
    mvn = mvn_lib.MultivariateNormalDiag(scale_diag=scale_ph)

    if self.is_static_shape:
      with self.assertRaisesRegexp(ValueError, r".*must be >=-1.*"):
        batch_reshape_lib.BatchReshape(
            distribution=mvn,
            batch_shape=new_batch_shape_ph,
            validate_args=True)

    else:
      with self.test_session():
        with self.assertRaisesOpError(r".*must be >=-1.*"):
          batch_reshape_lib.BatchReshape(
              distribution=mvn,
              batch_shape=new_batch_shape_ph,
              validate_args=True).sample().eval()
Exemplo n.º 8
0
  def testCDFWithDynamicEventShapeUnknownNdims(
      self, events, histograms, expected_cdf):
    """Test that dynamically-sized events with unknown shape work."""
    event_ph = array_ops.placeholder_with_default(events, shape=None)
    histograms_ph = array_ops.placeholder_with_default(histograms, shape=None)
    dist = categorical.Categorical(probs=histograms_ph)
    cdf_op = dist.cdf(event_ph)

    actual_cdf = self.evaluate(cdf_op)
    self.assertAllClose(actual_cdf, expected_cdf)
 def testRaggedTensorSplitsMismatchErrorAtRuntime(self):
   splits1 = array_ops.placeholder_with_default(
       constant_op.constant([0, 3, 3, 5], dtypes.int64), None)
   splits2 = array_ops.placeholder_with_default(
       constant_op.constant([0, 1, 3, 5], dtypes.int64), None)
   x = ragged_tensor.RaggedTensor.from_row_splits([3, 1, 4, 1, 5], splits1)
   y = ragged_tensor.RaggedTensor.from_row_splits([1, 2, 3, 4, 5], splits2)
   with self.assertRaisesRegexp(errors.InvalidArgumentError,
                                r'.*Inputs must have identical ragged splits'):
     self.evaluate(ragged_functional_ops.map_flat_values(math_ops.add, x, y))
 def setUp(self):
   super(BatchSequencesWithStatesTest, self).setUp()
   self.value_length = 4
   ind1 = np.array([
       [0, 0],
       [1, 0], [1, 3], [1, 4],
       [3, 2], [3, 3]])
   val1 = np.array([0, 10, 13, 14, 32, 33])
   shape1 = np.array([self.value_length, 6])
   sp_tensor1 = sparse_tensor.SparseTensor(
       array_ops.constant(ind1, dtypes.int64),
       array_ops.constant(val1, dtypes.int64),
       array_ops.placeholder_with_default(shape1, shape=[2]))
   ind2 = np.array([
       [0, 0, 1],
       [0, 1, 0],
       [0, 1, 2],
       [1, 0, 3],
       [1, 1, 0],
       [1, 1, 1],
       [1, 1, 2],
       [1, 2, 2]])
   val2 = np.array([1, 10, 12, 103, 150, 149, 150, 122])
   shape2 = np.array([self.value_length, 3, 4])
   sp_tensor2 = sparse_tensor.SparseTensor(
       array_ops.constant(ind2, dtypes.int64),
       array_ops.constant(val2, dtypes.int64),
       array_ops.placeholder_with_default(shape2, shape=[3]))
   sp_tensor3 = sparse_tensor.SparseTensor(
       array_ops.constant([[1, 9], [2, 2], [2, 10]], dtypes.int64),
       array_ops.constant([7, 15, 2], dtypes.int64),
       array_ops.constant([5, 12], dtypes.int64)
   )
   self.sp_tensor3_expected = sparse_tensor.SparseTensorValue(
       [[0, 1, 9], [0, 2, 2], [0, 2, 10], [1, 1, 9], [1, 2, 2], [1, 2, 10]],
       [7, 15, 2, 7, 15, 2],
       [2, 5, 12]
   )
   self.batch_size = 2
   self.key = string_ops.string_join([
       "key_", string_ops.as_string(
           math_ops.cast(10000 * random_ops.random_uniform(()), dtypes.int32))
   ])
   self.sequences = {
       "seq1": np.random.rand(self.value_length, 5),
       "seq2": np.random.rand(self.value_length, 4, 2),
       "seq3": sp_tensor1,
       "seq4": sp_tensor2}
   self.context = {
       "context1": [3, 4],
       "sp_context": sp_tensor3}
   self.initial_states = {
       "state1": np.random.rand(6, 7),
       "state2": np.random.rand(8)
   }
Exemplo n.º 11
0
 def testDynamicShapes(self):
   for dtype in [dtypes.float32, dtypes.float64]:
     default_x1 = constant_op.constant(0.1, dtype=dtype)
     default_x2 = constant_op.constant(3.1, dtype=dtype)
     x1 = array_ops.placeholder_with_default(default_x1, shape=None)
     x2 = array_ops.placeholder_with_default(default_x2, shape=None)
     dx1, dx2 = self._nextafter_gradient(x1, x2)
     expected_dx1 = constant_op.constant(1, dtype=dtype)
     expected_dx2 = constant_op.constant(0, dtype=dtype)
     self.assertAllClose(expected_dx1, dx1)
     self.assertAllClose(expected_dx2, dx2)
 def testSampleProbConsistent(self):
   with self.test_session() as sess:
     pln = poisson_lognormal.PoissonLogNormalQuadratureCompound(
         loc=array_ops.placeholder_with_default(
             -2.,
             shape=[] if self.static_shape else None),
         scale=array_ops.placeholder_with_default(
             1.1,
             shape=[] if self.static_shape else None),
         quadrature_size=10,
         validate_args=True)
     self.run_test_sample_consistent_log_prob(
         sess.run, pln, batch_size=1, rtol=0.1)
 def testRaggedTensorSplitsMismatchErrorAtRuntime(self):
   splits1 = array_ops.placeholder_with_default(
       constant_op.constant([0, 3, 3, 5], dtypes.int64), None)
   splits2 = array_ops.placeholder_with_default(
       constant_op.constant([0, 1, 3, 5], dtypes.int64), None)
   x = ragged.from_row_splits([3, 1, 4, 1, 5], splits1)
   y = ragged.from_row_splits([1, 2, 3, 4, 5], splits2)
   result = ragged.map_inner_values(math_ops.add, x, y)
   with self.test_session():
     self.assertRaisesRegexp(
         errors.InvalidArgumentError,
         r'\[Inputs must have identical ragged splits\] '
         r'\[Condition x == y did not hold element-wise:\].*', result.eval)
 def testMeanVarianceBroadcastScalar(self):
   with self.test_session() as sess:
     pln = poisson_lognormal.PoissonLogNormalQuadratureCompound(
         loc=array_ops.placeholder_with_default(
             [0., -0.5],
             shape=[2] if self.static_shape else None),
         scale=array_ops.placeholder_with_default(
             1.,
             shape=[] if self.static_shape else None),
         quadrature_size=10,
         validate_args=True)
     self.run_test_sample_consistent_mean_variance(
         sess.run, pln, rtol=0.1, atol=0.01)
 def testSampleProbConsistentBroadcastBoth(self):
   with self.test_session() as sess:
     pln = poisson_lognormal.PoissonLogNormalQuadratureCompound(
         loc=array_ops.placeholder_with_default(
             [[0.], [-0.5]],
             shape=[2, 1] if self.static_shape else None),
         scale=array_ops.placeholder_with_default(
             [[1., 0.9]],
             shape=[1, 2] if self.static_shape else None),
         quadrature_size=10,
         validate_args=True)
     self.run_test_sample_consistent_log_prob(
         sess.run, pln, batch_size=4, rtol=0.1, atol=0.08)
Exemplo n.º 16
0
  def make_mvn(self, dims, new_batch_shape, old_batch_shape):
    new_batch_shape_ph = (
        constant_op.constant(np.int32(new_batch_shape)) if self.is_static_shape
        else array_ops.placeholder_with_default(
            np.int32(new_batch_shape), shape=None))

    scale = np.ones(old_batch_shape + [dims], self.dtype)
    scale_ph = array_ops.placeholder_with_default(
        scale, shape=scale.shape if self.is_static_shape else None)
    mvn = mvn_lib.MultivariateNormalDiag(scale_diag=scale_ph)
    reshape_mvn = batch_reshape_lib.BatchReshape(
        distribution=mvn,
        batch_shape=new_batch_shape_ph,
        validate_args=True)
    return mvn, reshape_mvn
  def operator_and_matrix(
      self, build_info, dtype, use_placeholder,
      ensure_self_adjoint_and_pd=False):
    shape = list(build_info.shape)
    row = np.random.uniform(low=1., high=5., size=shape[:-1])
    col = np.random.uniform(low=1., high=5., size=shape[:-1])

    # Make sure first entry is the same
    row[..., 0] = col[..., 0]

    if ensure_self_adjoint_and_pd:
      # Note that a Toeplitz matrix generated from a linearly decreasing
      # non-negative sequence is positive definite. See
      # https://www.math.cinvestav.mx/~grudsky/Papers/118_29062012_Albrecht.pdf
      # for details.
      row = np.linspace(start=10., stop=1., num=shape[-1])

      # The entries for the first row and column should be the same to guarantee
      # symmetric.
      row = col

    lin_op_row = math_ops.cast(row, dtype=dtype)
    lin_op_col = math_ops.cast(col, dtype=dtype)

    if use_placeholder:
      lin_op_row = array_ops.placeholder_with_default(
          lin_op_row, shape=None)
      lin_op_col = array_ops.placeholder_with_default(
          lin_op_col, shape=None)

    operator = linear_operator_toeplitz.LinearOperatorToeplitz(
        row=lin_op_row,
        col=lin_op_col,
        is_self_adjoint=True if ensure_self_adjoint_and_pd else None,
        is_positive_definite=True if ensure_self_adjoint_and_pd else None)

    flattened_row = np.reshape(row, (-1, shape[-1]))
    flattened_col = np.reshape(col, (-1, shape[-1]))
    flattened_toeplitz = np.zeros(
        [flattened_row.shape[0], shape[-1], shape[-1]])
    for i in range(flattened_row.shape[0]):
      flattened_toeplitz[i] = scipy.linalg.toeplitz(
          flattened_col[i],
          flattened_row[i])
    matrix = np.reshape(flattened_toeplitz, shape)
    matrix = math_ops.cast(matrix, dtype=dtype)

    return operator, matrix
Exemplo n.º 18
0
 def test_placeholder_with_default_fed(self):
   with self.test_session() as sess, self.test_scope():
     v = resource_variable_ops.ResourceVariable(4.0)
     ph = array_ops.placeholder_with_default(v, shape=[])
     out = ph * 2
     sess.run(variables.variables_initializer([v]))
     self.assertEqual(2.0, sess.run(out, {ph: 1.0}))
Exemplo n.º 19
0
 def _serving_input_receiver_fn():
   """A receiver function to be passed to export_savedmodel."""
   placeholders = {}
   placeholders[feature_keys.TrainEvalFeatures.TIMES] = (
       array_ops.placeholder(
           name=feature_keys.TrainEvalFeatures.TIMES,
           dtype=dtypes.int64,
           shape=[default_batch_size, default_series_length]))
   # Values are only necessary when filtering. For prediction the default
   # value will be ignored.
   placeholders[feature_keys.TrainEvalFeatures.VALUES] = (
       array_ops.placeholder_with_default(
           name=feature_keys.TrainEvalFeatures.VALUES,
           input=array_ops.zeros(
               shape=[
                   default_batch_size
                   if default_batch_size else 0, default_series_length
                   if default_series_length else 0, self._model.num_features
               ],
               dtype=self._model.dtype),
           shape=(default_batch_size, default_series_length,
                  self._model.num_features)))
   if self._model.exogenous_feature_columns:
     with ops.Graph().as_default():
       # Default placeholders have only an unknown batch dimension. Make them
       # in a separate graph, then splice in the series length to the shapes
       # and re-create them in the outer graph.
       parsed_features = (
           feature_column.make_parse_example_spec(
               self._model.exogenous_feature_columns))
       placeholder_features = parsing_ops.parse_example(
           serialized=array_ops.placeholder(
               shape=[None], dtype=dtypes.string),
           features=parsed_features)
       exogenous_feature_shapes = {
           key: (value.get_shape(), value.dtype) for key, value
           in placeholder_features.items()}
     for feature_key, (batch_only_feature_shape, value_dtype) in (
         exogenous_feature_shapes.items()):
       batch_only_feature_shape = (
           batch_only_feature_shape.with_rank_at_least(1).as_list())
       feature_shape = ([default_batch_size, default_series_length]
                        + batch_only_feature_shape[1:])
       placeholders[feature_key] = array_ops.placeholder(
           dtype=value_dtype, name=feature_key, shape=feature_shape)
   # Models may not know the shape of their state without creating some
   # variables/ops. Avoid polluting the default graph by making a new one. We
   # use only static metadata from the returned Tensors.
   with ops.Graph().as_default():
     self._model.initialize_graph()
     model_start_state = self._model.get_start_state()
   for prefixed_state_name, state_tensor in ts_head_lib.state_to_dictionary(
       model_start_state).items():
     state_shape_with_batch = tensor_shape.TensorShape(
         (default_batch_size,)).concatenate(state_tensor.get_shape())
     placeholders[prefixed_state_name] = array_ops.placeholder(
         name=prefixed_state_name,
         shape=state_shape_with_batch,
         dtype=state_tensor.dtype)
   return export_lib.ServingInputReceiver(placeholders, placeholders)
Exemplo n.º 20
0
 def testDimTooLarge(self):
   with self.test_session():
     # Use placeholder to make sure we get runtime error instead of shape
     # inference error.
     dim = array_ops.placeholder_with_default(100, shape=[])
     with self.assertRaises(errors_impl.InvalidArgumentError):
       nn_ops.softmax([1., 2., 3., 4.], axis=dim).eval()
Exemplo n.º 21
0
  def testRaggedTile(self,
                     descr,
                     rt_input,
                     multiples,
                     expected,
                     ragged_rank=None):
    rt = ragged_factory_ops.constant(rt_input, ragged_rank)

    expected_shape = [
        None if dim is None else dim * multiple
        for (dim, multiple) in zip(rt.shape.as_list(), multiples)
    ]

    # Test with both const & non-const multiples: ragged_tile has a few code
    # paths that optimize the case where multiples[d] is known to be 1.
    const_multiples = constant_op.constant(multiples, dtypes.int64)
    non_const_multiples = array_ops.placeholder_with_default(
        const_multiples, shape=[len(multiples)])

    for multiples_tensor in (const_multiples, non_const_multiples):
      tiled = ragged_array_ops.tile(rt, multiples_tensor)
      self.assertEqual(tiled.ragged_rank, rt.ragged_rank)
      self.assertEqual(tiled.shape.ndims, rt.shape.ndims)
      if multiples_tensor is const_multiples:
        self.assertEqual(tiled.shape.as_list(), expected_shape)
      with self.test_session():
        self.assertEqual(tiled.eval().tolist(), expected)
Exemplo n.º 22
0
 def _run_test(self, x_, use_deferred_shape=False, **kwargs):
   x_ = np.asarray(x_)
   with self.cached_session() as sess:
     static_shape = None if use_deferred_shape else x_.shape
     x_pl = array_ops.placeholder_with_default(x_, shape=static_shape)
     # Add `zeros_like(x)` such that x's value and gradient are identical. We
     # do this so we can ensure each gradient value is mapped to the right
     # gradient location.  (Not doing this means the gradient wrt `x` is simple
     # `ones_like(x)`.)
     # Note:
     #   zeros_like_x_pl == zeros_like(x_pl)
     #   gradient(zeros_like_x_pl, x_pl) == x_pl - 1
     zeros_like_x_pl = (x_pl * array_ops.stop_gradient(x_pl - 1.)
                        - array_ops.stop_gradient(x_pl * (x_pl - 1.)))
     x = x_pl + zeros_like_x_pl
     actual = du.fill_triangular(x, **kwargs)
     grad_actual = gradients_impl.gradients(actual, x_pl)[0]
     [actual_, grad_actual_] = sess.run([actual, grad_actual],
                                        feed_dict={x_pl: x_})
   expected = self._fill_triangular(x_, **kwargs)
   if use_deferred_shape:
     self.assertEqual(None, actual.shape)
   else:
     self.assertAllEqual(expected.shape, actual.shape)
   self.assertAllClose(expected, actual_, rtol=1e-8, atol=1e-9)
   self.assertAllClose(x_, grad_actual_, rtol=1e-8, atol=1e-9)
  def _operator_and_matrix(
      self, build_info, dtype, use_placeholder,
      ensure_self_adjoint_and_pd=False):
    shape = build_info.shape
    # For this test class, we are creating real spectrums.
    # We also want the spectrum to have eigenvalues bounded away from zero.
    #
    # spectrum is bounded away from zero.
    spectrum = linear_operator_test_util.random_sign_uniform(
        shape=self._shape_to_spectrum_shape(shape), minval=1., maxval=2.)
    if ensure_self_adjoint_and_pd:
      spectrum = math_ops.abs(spectrum)
    # If dtype is complex, cast spectrum to complex.  The imaginary part will be
    # zero, so the operator will still be self-adjoint.
    spectrum = math_ops.cast(spectrum, dtype)

    lin_op_spectrum = spectrum

    if use_placeholder:
      lin_op_spectrum = array_ops.placeholder_with_default(spectrum, shape=None)

    operator = linalg.LinearOperatorCirculant(
        lin_op_spectrum,
        is_self_adjoint=True,
        is_positive_definite=True if ensure_self_adjoint_and_pd else None,
        input_output_dtype=dtype)

    mat = self._spectrum_to_circulant_1d(spectrum, shape, dtype=dtype)

    return operator, mat
Exemplo n.º 24
0
 def _model_start_state_placeholders(
     self, batch_size_tensor, static_batch_size=None):
   """Creates placeholders with zeroed start state for the current model."""
   gathered_state = {}
   # Models may not know the shape of their state without creating some
   # variables/ops. Avoid polluting the default graph by making a new one. We
   # use only static metadata from the returned Tensors.
   with ops.Graph().as_default():
     self._model.initialize_graph()
     # Evaluate the initial state as same-dtype "zero" values. These zero
     # constants aren't used, but are necessary for feeding to
     # placeholder_with_default for the "cold start" case where state is not
     # fed to the model.
     def _zeros_like_constant(tensor):
       return tensor_util.constant_value(array_ops.zeros_like(tensor))
     start_state = nest.map_structure(
         _zeros_like_constant, self._model.get_start_state())
   for prefixed_state_name, state in ts_head_lib.state_to_dictionary(
       start_state).items():
     state_shape_with_batch = tensor_shape.TensorShape(
         (static_batch_size,)).concatenate(state.shape)
     default_state_broadcast = array_ops.tile(
         state[None, ...],
         multiples=array_ops.concat(
             [batch_size_tensor[None],
              array_ops.ones(len(state.shape), dtype=dtypes.int32)],
             axis=0))
     gathered_state[prefixed_state_name] = array_ops.placeholder_with_default(
         input=default_state_broadcast,
         name=prefixed_state_name,
         shape=state_shape_with_batch)
   return gathered_state
Exemplo n.º 25
0
  def testConcurrentReaders(self):
    count_placeholder = array_ops.placeholder_with_default(
        constant_op.constant(5, dtypes.int64), shape=[])
    dataset = dataset_ops.Dataset.range(count_placeholder).cache()
    d1 = dataset.map(lambda x: x + 1)
    d2 = dataset.map(lambda x: x + 6)

    i1 = d1.make_initializable_iterator()
    i2 = d2.make_initializable_iterator()

    with self.cached_session() as sess:
      sess.run(i1.initializer)

      self.assertEqual(1, sess.run(i1.get_next()))
      self.assertEqual(2, sess.run(i1.get_next()))
      self.assertEqual(3, sess.run(i1.get_next()))

      sess.run(i2.initializer, feed_dict={count_placeholder: 3})

      self.assertEqual(6, sess.run(i2.get_next()))
      self.assertEqual(7, sess.run(i2.get_next()))
      self.assertEqual(4, sess.run(i1.get_next()))  # interleave execution
      self.assertEqual([8, 5], sess.run([i2.get_next(), i1.get_next()]))

      with self.assertRaises(errors.OutOfRangeError):
        sess.run(i1.get_next())
      with self.assertRaises(errors.OutOfRangeError):
        sess.run(i2.get_next())
  def _operator_and_matrix(self, build_info, dtype, use_placeholder):
    shape = list(build_info.shape)

    # Either 1 or 2 matrices, depending.
    num_operators = rng.randint(low=1, high=3)
    matrices = [
        linear_operator_test_util.random_positive_definite_matrix(
            shape, dtype, force_well_conditioned=True)
        for _ in range(num_operators)
    ]

    lin_op_matrices = matrices

    if use_placeholder:
      lin_op_matrices = [
          array_ops.placeholder_with_default(
              matrix, shape=None) for matrix in matrices]

    operator = linalg.LinearOperatorComposition(
        [linalg.LinearOperatorFullMatrix(l) for l in lin_op_matrices],
        is_square=True)

    matmul_order_list = list(reversed(matrices))
    mat = matmul_order_list[0]
    for other_mat in matmul_order_list[1:]:
      mat = math_ops.matmul(other_mat, mat)

    return operator, mat
  def _operator_and_matrix(self, build_info, dtype, use_placeholder):
    shape = list(build_info.shape)
    assert shape[-1] == shape[-2]

    batch_shape = shape[:-2]
    num_rows = shape[-1]

    # Uniform values that are at least length 1 from the origin.  Allows the
    # operator to be well conditioned.
    # Shape batch_shape
    multiplier = linear_operator_test_util.random_sign_uniform(
        shape=batch_shape, minval=1., maxval=2., dtype=dtype)


    # Nothing to feed since LinearOperatorScaledIdentity takes no Tensor args.
    lin_op_multiplier = multiplier

    if use_placeholder:
      lin_op_multiplier = array_ops.placeholder_with_default(
          multiplier, shape=None)

    operator = linalg_lib.LinearOperatorScaledIdentity(
        num_rows, lin_op_multiplier)

    multiplier_matrix = array_ops.expand_dims(
        array_ops.expand_dims(multiplier, -1), -1)
    matrix = multiplier_matrix * linalg_ops.eye(
        num_rows, batch_shape=batch_shape, dtype=dtype)

    return operator, matrix
  def _operator_and_matrix(self, build_info, dtype, use_placeholder):
    shape = list(build_info.shape)
    expected_factors = build_info.__dict__["factors"]
    matrices = [
        linear_operator_test_util.random_positive_definite_matrix(
            block_shape, dtype, force_well_conditioned=True)
        for block_shape in expected_factors
    ]

    lin_op_matrices = matrices

    if use_placeholder:
      lin_op_matrices = [
          array_ops.placeholder_with_default(m, shape=None) for m in matrices]

    operator = kronecker.LinearOperatorKronecker(
        [linalg.LinearOperatorFullMatrix(
            l, is_square=True) for l in lin_op_matrices])

    matrices = linear_operator_util.broadcast_matrix_batch_dims(matrices)

    kronecker_dense = _kronecker_dense(matrices)

    if not use_placeholder:
      kronecker_dense.set_shape(shape)

    return operator, kronecker_dense
Exemplo n.º 29
0
  def make_normal(self, new_batch_shape, old_batch_shape):
    new_batch_shape_ph = (
        constant_op.constant(np.int32(new_batch_shape)) if self.is_static_shape
        else array_ops.placeholder_with_default(
            np.int32(new_batch_shape), shape=None))

    scale = self.dtype(0.5 + np.arange(
        np.prod(old_batch_shape)).reshape(old_batch_shape))
    scale_ph = array_ops.placeholder_with_default(
        scale, shape=scale.shape if self.is_static_shape else None)
    normal = normal_lib.Normal(loc=self.dtype(0), scale=scale_ph)
    reshape_normal = batch_reshape_lib.BatchReshape(
        distribution=normal,
        batch_shape=new_batch_shape_ph,
        validate_args=True)
    return normal, reshape_normal
Exemplo n.º 30
0
  def testBasicUnbatchDecorated(self):
    """Tests that the batch_function decorator works."""
    if context.executing_eagerly():
      return
    with self.cached_session() as sess:
      # TODO(apassos): Removing this line causes test flakiness! Ideally should
      # be investigated.
      default_inp = array_ops.placeholder_with_default(2, shape=[])  # pylint: disable=unused-variable

      @batch_ops.batch_function(1, 10, 100000)
      def computation(in_t):
        self.assertTrue(in_t.shape is not None)
        return in_t + 1

      inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
      result = computation(inp)
      thread_results = []

      def worker():
        thread_results.extend(sess.run([result], feed_dict={inp: [1]}))

      worker_thread = threading.Thread(target=worker)
      worker_thread.start()
      main_results = sess.run([result], feed_dict={inp: [2]})
      worker_thread.join()
      self.assertEqual(thread_results[0], [2])
      self.assertEqual(main_results[0], [3])
Exemplo n.º 31
0
def _unused_handle():
    """Returns a placeholder as a handle that is not supposed to be accessed."""
    error_message = (
        "Trying to access a placeholder that is not supposed to be "
        "executed. This means you are executing a graph generated "
        "from the cross-replica context in an in-replica context.")
    save_error_message = (
        "It seems that you are trying to save a "
        "tf.types.experimental.ConcreteFunction that involves a distributed "
        "model, and the model contains parts that are loaded form a SavedModel. "
        "It's not supported to save such tf.types.experimental.ConcreteFunction. "
        "Try saving a tf.function with input_signature instead, and file a bug if"
        " there are still issues.")

    assert_op = control_flow_ops.Assert(
        array_ops.placeholder_with_default(False, shape=()), [error_message])
    if (not context.executing_eagerly()
        ) and ops.get_default_graph().building_function:
        ops.get_default_graph().mark_as_unsaveable(save_error_message)

    with ops.control_dependencies([assert_op]):
        return array_ops.placeholder(dtype=dtypes.resource)
Exemplo n.º 32
0
    def testCreateZerosSlotFromDynamicShapedVariable(self):
        # slot_creator is used only in optimizer V1.
        with ops.Graph().as_default(), self.cached_session():
            dyn_shape = constant_op.constant([2], dtype=dtypes.int32)
            dyn_shape = array_ops.placeholder_with_default(dyn_shape,
                                                           shape=[None])
            v = variable_scope.get_variable(
                "var",
                initializer=random_ops.random_uniform(dyn_shape,
                                                      dtype=dtypes.float64),
                validate_shape=False)
            with ops.control_dependencies(None):
                slot = slot_creator.create_zeros_slot(v,
                                                      name="slot",
                                                      dtype=dtypes.float64)

            self.evaluate(variables.global_variables_initializer())

            self.assertEqual("var/slot", slot.op.name)
            self.assertEqual([2], array_ops.shape(slot).eval())
            self.assertEqual(dtypes.float64, slot.dtype.base_dtype)
            self.assertAllEqual([0.0, 0.0], self.evaluate(slot))
Exemplo n.º 33
0
  def _operator_and_matrix(
      self, build_info, dtype, use_placeholder,
      ensure_self_adjoint_and_pd=False):
    shape = build_info.shape
    # For this test class, we are creating Hermitian spectrums.
    # We also want the spectrum to have eigenvalues bounded away from zero.
    #
    # pre_spectrum is bounded away from zero.
    pre_spectrum = linear_operator_test_util.random_uniform(
        shape=self._shape_to_spectrum_shape(shape), minval=1., maxval=2.)
    pre_spectrum_c = _to_complex(pre_spectrum)

    # Real{IFFT[pre_spectrum]}
    #  = IFFT[EvenPartOf[pre_spectrum]]
    # is the IFFT of something that is also bounded away from zero.
    # Therefore, FFT[pre_h] would be a well-conditioned spectrum.
    pre_h = fft_ops.ifft2d(pre_spectrum_c)

    # A spectrum is Hermitian iff it is the DFT of a real convolution kernel.
    # So we will make spectrum = FFT[h], for real valued h.
    h = math_ops.real(pre_h)
    h_c = _to_complex(h)

    spectrum = fft_ops.fft2d(h_c)

    lin_op_spectrum = spectrum

    if use_placeholder:
      lin_op_spectrum = array_ops.placeholder_with_default(spectrum, shape=None)

    operator = linalg.LinearOperatorCirculant2D(
        lin_op_spectrum,
        is_positive_definite=True if ensure_self_adjoint_and_pd else None,
        is_self_adjoint=True if ensure_self_adjoint_and_pd else None,
        input_output_dtype=dtype)

    mat = self._spectrum_to_circulant_2d(spectrum, shape, dtype=dtype)

    return operator, mat
  def operator_and_matrix(
      self, build_info, dtype, use_placeholder,
      ensure_self_adjoint_and_pd=False):

    shape = list(build_info.shape)
    assert shape[-1] == shape[-2]

    batch_shape = shape[:-2]
    num_rows = shape[-1]

    # Uniform values that are at least length 1 from the origin.  Allows the
    # operator to be well conditioned.
    # Shape batch_shape
    multiplier = linear_operator_test_util.random_sign_uniform(
        shape=batch_shape, minval=1., maxval=2., dtype=dtype)

    if ensure_self_adjoint_and_pd:
      # Abs on complex64 will result in a float32, so we cast back up.
      multiplier = math_ops.cast(math_ops.abs(multiplier), dtype=dtype)

    # Nothing to feed since LinearOperatorScaledIdentity takes no Tensor args.
    lin_op_multiplier = multiplier

    if use_placeholder:
      lin_op_multiplier = array_ops.placeholder_with_default(
          multiplier, shape=None)

    operator = linalg_lib.LinearOperatorScaledIdentity(
        num_rows,
        lin_op_multiplier,
        is_self_adjoint=True if ensure_self_adjoint_and_pd else None,
        is_positive_definite=True if ensure_self_adjoint_and_pd else None)

    multiplier_matrix = array_ops.expand_dims(
        array_ops.expand_dims(multiplier, -1), -1)
    matrix = multiplier_matrix * linalg_ops.eye(
        num_rows, batch_shape=batch_shape, dtype=dtype)

    return operator, matrix
Exemplo n.º 35
0
    def _operator_and_matrix(self, build_info, dtype, use_placeholder):
        shape = list(build_info.shape)
        expected_blocks = (build_info.__dict__["blocks"]
                           if "blocks" in build_info.__dict__ else [shape])
        matrices = [
            linear_operator_test_util.random_positive_definite_matrix(
                block_shape, dtype, force_well_conditioned=True)
            for block_shape in expected_blocks
        ]

        lin_op_matrices = matrices

        if use_placeholder:
            lin_op_matrices = [
                array_ops.placeholder_with_default(matrix, shape=None)
                for matrix in matrices
            ]

        operator = block_diag.LinearOperatorBlockDiag([
            linalg.LinearOperatorFullMatrix(l, is_square=True)
            for l in lin_op_matrices
        ])

        # Should be auto-set.
        self.assertTrue(operator.is_square)

        # Broadcast the shapes.
        expected_shape = list(build_info.shape)

        matrices = linear_operator_util.broadcast_matrix_batch_dims(matrices)

        block_diag_dense = _block_diag_dense(expected_shape, matrices)

        if not use_placeholder:
            block_diag_dense.set_shape(
                expected_shape[:-2] + [expected_shape[-1], expected_shape[-1]])

        return operator, block_diag_dense
    def check_results_versus_brute_force(self, x, axis, max_lags, center,
                                         normalize):
        """Compute auto-correlation by brute force, then compare to tf result."""
        # Brute for auto-corr -- avoiding fft and transpositions.
        axis_len = x.shape[axis]
        if max_lags is None:
            max_lags = axis_len - 1
        else:
            max_lags = min(axis_len - 1, max_lags)
        auto_corr_at_lag = []
        if center:
            x -= x.mean(axis=axis, keepdims=True)
        for m in range(max_lags + 1):
            auto_corr_at_lag.append(
                (np.take(x, indices=range(0, axis_len - m), axis=axis) *
                 np.conj(np.take(x, indices=range(m, axis_len),
                                 axis=axis))).mean(axis=axis, keepdims=True))
        rxx = np.concatenate(auto_corr_at_lag, axis=axis)
        if normalize:
            rxx /= np.take(rxx, [0], axis=axis)

        x_ph = array_ops.placeholder_with_default(
            x, shape=x.shape if self.use_static_shape else None)
        with spectral_ops_test_util.fft_kernel_label_map():
            with self.test_session():
                auto_corr = sample_stats.auto_correlation(x_ph,
                                                          axis=axis,
                                                          max_lags=max_lags,
                                                          center=center,
                                                          normalize=normalize)
                if self.use_static_shape:
                    output_shape = list(x.shape)
                    output_shape[axis] = max_lags + 1
                    self.assertAllEqual(output_shape, auto_corr.shape)
                self.assertAllClose(rxx,
                                    auto_corr.eval(),
                                    rtol=1e-5,
                                    atol=1e-5)
Exemplo n.º 37
0
  def operator_and_matrix(self,
                          shape_info,
                          dtype,
                          use_placeholder,
                          ensure_self_adjoint_and_pd=False):
    del ensure_self_adjoint_and_pd
    shape = shape_info.shape
    # Will be well conditioned enough to get accurate solves.
    spectrum = linear_operator_test_util.random_sign_uniform(
        shape=self._shape_to_spectrum_shape(shape),
        dtype=dtype,
        minval=1.,
        maxval=2.)

    lin_op_spectrum = spectrum

    if use_placeholder:
      lin_op_spectrum = array_ops.placeholder_with_default(spectrum, shape=None)

    operator = linalg.LinearOperatorCirculant2D(
        lin_op_spectrum, input_output_dtype=dtype)

    self.assertEqual(
        operator.parameters,
        {
            "input_output_dtype": dtype,
            "is_non_singular": None,
            "is_positive_definite": None,
            "is_self_adjoint": None,
            "is_square": True,
            "name": "LinearOperatorCirculant2D",
            "spectrum": lin_op_spectrum,
        }
    )

    mat = self._spectrum_to_circulant_2d(spectrum, shape, dtype=dtype)

    return operator, mat
Exemplo n.º 38
0
    def operator_and_matrix(self, build_info, dtype, use_placeholder):
        sess = ops.get_default_session()
        shape = list(build_info.shape)

        # Test only the case of 2 matrices.
        # The Square test uses either 1 or 2, so we have tested the case of 1 matrix
        # sufficiently.
        num_operators = 2

        # Create 2 matrices/operators, A1, A2, which becomes A = A1 A2.
        # Use inner dimension of 2.
        k = 2
        batch_shape = shape[:-2]
        shape_1 = batch_shape + [shape[-2], k]
        shape_2 = batch_shape + [k, shape[-1]]

        matrices = [
            linear_operator_test_util.random_normal(shape_1, dtype=dtype),
            linear_operator_test_util.random_normal(shape_2, dtype=dtype)
        ]

        lin_op_matrices = matrices

        if use_placeholder:
            lin_op_matrices = [
                array_ops.placeholder_with_default(matrix, shape=None)
                for matrix in matrices
            ]

        operator = linalg.LinearOperatorComposition(
            [linalg.LinearOperatorFullMatrix(l) for l in lin_op_matrices])

        matmul_order_list = list(reversed(matrices))
        mat = matmul_order_list[0]
        for other_mat in matmul_order_list[1:]:
            mat = math_ops.matmul(other_mat, mat)

        return operator, mat
  def operator_and_matrix(self, build_info, dtype, use_placeholder,
                          ensure_self_adjoint_and_pd=False):
    shape = list(build_info.shape)

    # Either 1 or 2 matrices, depending.
    num_operators = rng.randint(low=1, high=3)
    if ensure_self_adjoint_and_pd:
      # The random PD matrices are also symmetric. Here we are computing
      # A @ A ... @ A. Since A is symmetric and PD, so are any powers of it.
      matrices = [
          linear_operator_test_util.random_positive_definite_matrix(
              shape, dtype, force_well_conditioned=True)] * num_operators
    else:
      matrices = [
          linear_operator_test_util.random_positive_definite_matrix(
              shape, dtype, force_well_conditioned=True)
          for _ in range(num_operators)
      ]

    lin_op_matrices = matrices

    if use_placeholder:
      lin_op_matrices = [
          array_ops.placeholder_with_default(
              matrix, shape=None) for matrix in matrices]

    operator = linalg.LinearOperatorComposition(
        [linalg.LinearOperatorFullMatrix(l) for l in lin_op_matrices],
        is_positive_definite=True if ensure_self_adjoint_and_pd else None,
        is_self_adjoint=True if ensure_self_adjoint_and_pd else None,
        is_square=True)

    matmul_order_list = list(reversed(matrices))
    mat = matmul_order_list[0]
    for other_mat in matmul_order_list[1:]:
      mat = math_ops.matmul(other_mat, mat)

    return operator, mat
 def test_normalization(self):
     l = 10000
     x = 3 * rng.randn(l).astype(self.dtype)
     x_ph = array_ops.placeholder_with_default(
         x, shape=(l, ) if self.use_static_shape else None)
     with spectral_ops_test_util.fft_kernel_label_map():
         with self.test_session():
             rxx = sample_stats.auto_correlation(x_ph,
                                                 max_lags=l // 2,
                                                 center=True,
                                                 normalize=True)
             if self.use_static_shape:
                 self.assertAllEqual((l // 2 + 1, ), rxx.shape)
             rxx_ = rxx.eval()
             # Note that RXX[0] = 1, despite the fact that E[X^2] = 9, and this is
             # due to normalize=True.
             # OSS CPU FFT has some accuracy issues is not the most accurate.
             # So this tolerance is a bit bad.
             self.assertAllClose(1., rxx_[0], rtol=0.05)
             # The maximal error in the rest of the sequence is not great.
             self.assertAllClose(np.zeros(l // 2), rxx_[1:], atol=0.1)
             # The mean error in the rest is ok, actually 0.008 when I tested it.
             self.assertLess(np.abs(rxx_[1:]).mean(), 0.02)
Exemplo n.º 41
0
def get_output_sample_weight_and_mode(skip_target_weighing_indices,
                                      sample_weight_mode, output_name,
                                      output_index):
    """Returns the sample weight and weight mode for a single output."""
    if output_index in skip_target_weighing_indices:
        return None, None

    if sample_weight_mode == 'temporal':
        default_value = [[1.]]
        shape = [None, None]
        mode = 'temporal'
    else:
        default_value = [1.]
        shape = [None]
        mode = None
    if context.executing_eagerly():
        weight = None
    else:
        weight = array_ops.placeholder_with_default(
            constant_op.constant(default_value, dtype=K.floatx()),
            shape=shape,
            name=output_name + '_sample_weights')
    return weight, mode
Exemplo n.º 42
0
  def test_gather(self):
    x = random_ops.random_uniform([3, 3, 3])
    x2 = array_ops.placeholder_with_default(x, shape=None)  # Has dynamic shape.

    def loop_fn(i):
      outputs = []
      x_i = array_ops.gather(x, i)
      for y in [x, x2, x_i]:
        axes = [0] if y is x_i else [0, 2, -1]
        for axis in axes:
          outputs.append(array_ops.gather(y, 2, axis=axis))
          outputs.append(
              array_ops.gather(y, math_ops.cast(2, dtypes.int64), axis=axis))
          outputs.append(
              array_ops.gather(y, 2, axis=math_ops.cast(axis, dtypes.int64)))
          outputs.append(
              array_ops.gather(y, math_ops.cast(i, dtypes.int64), axis=axis))
          outputs.append(array_ops.gather(y, [i], axis=axis))
          outputs.append(array_ops.gather(y, [i, 2], axis=axis))
          outputs.append(array_ops.gather(y, [[2, i], [i, 1]], axis=axis))
      return outputs

    self._test_loop_fn(loop_fn, 3)
Exemplo n.º 43
0
  def operator_and_matrix(
      self, shape_info, dtype, use_placeholder,
      ensure_self_adjoint_and_pd=False):
    del ensure_self_adjoint_and_pd
    shape = shape_info.shape
    # Will be well conditioned enough to get accurate solves.
    spectrum = linear_operator_test_util.random_sign_uniform(
        shape=self._shape_to_spectrum_shape(shape),
        dtype=dtype,
        minval=1.,
        maxval=2.)

    lin_op_spectrum = spectrum

    if use_placeholder:
      lin_op_spectrum = array_ops.placeholder_with_default(spectrum, shape=None)

    operator = linalg.LinearOperatorCirculant2D(
        lin_op_spectrum, input_output_dtype=dtype)

    mat = self._spectrum_to_circulant_2d(spectrum, shape, dtype=dtype)

    return operator, mat
 def test_step_function_sequence(self):
     # x jumps to new random value every 10 steps.  So correlation length = 10.
     x = (rng.randint(-10, 10, size=(1000, 1)) * np.ones(
         (1, 10))).ravel().astype(self.dtype)
     x_ph = array_ops.placeholder_with_default(
         x, shape=(1000 * 10, ) if self.use_static_shape else None)
     with spectral_ops_test_util.fft_kernel_label_map():
         with self.test_session():
             rxx = sample_stats.auto_correlation(x_ph,
                                                 max_lags=1000 * 10 // 2,
                                                 center=True,
                                                 normalize=False)
             if self.use_static_shape:
                 self.assertAllEqual((1000 * 10 // 2 + 1, ), rxx.shape)
             rxx_ = rxx.eval()
             rxx_ /= rxx_[0]
             # Expect positive correlation for the first 10 lags, then significantly
             # smaller negative.
             self.assertGreater(rxx_[:10].min(), 0)
             self.assertGreater(rxx_[9], 5 * rxx_[10:20].mean())
             # RXX should be decreasing for the first 10 lags.
             diff = np.diff(rxx_)
             self.assertLess(diff[:10].max(), 0)
Exemplo n.º 45
0
    def _check_versus_expected_effective_sample_size(self,
                                                     x_,
                                                     expected_ess,
                                                     sess,
                                                     atol=1e-2,
                                                     rtol=1e-2,
                                                     filter_threshold=None,
                                                     filter_beyond_lag=None):
        x = array_ops.placeholder_with_default(
            input=x_, shape=x_.shape if self.use_static_shape else None)
        ess = mcmc_diagnostics.effective_sample_size(
            x,
            filter_threshold=filter_threshold,
            filter_beyond_lag=filter_beyond_lag)
        if self.use_static_shape:
            self.assertAllEqual(x.shape[1:], ess.shape)

        ess_ = sess.run(ess)

        self.assertAllClose(np.ones_like(ess_) * expected_ess,
                            ess_,
                            atol=atol,
                            rtol=rtol)
Exemplo n.º 46
0
  def testInvalid(self):
    r = np.random.RandomState(0)
    cases = [
        # incorrect rank.
        ('ij,jk->ik', r.randn(1, 2, 3), r.randn(3, 4)),
        ('...ij,jk->ik', r.randn(3), r.randn(3, 4)),
        # inconsistent dimensions.
        ('ij,jk->ik', r.randn(2, 3), r.randn(4, 4)),
        # broadcasting is invalid
        ('...ij,...jk->...ik', r.randn(5, 2, 3), r.randn(7, 3, 4)),
        # output should have ellipsis when broadcasting shape is
        # non-empty.
        ('...ij,...jk->ik', r.randn(2, 2, 3), r.randn(3, 4)),
    ]
    for args in cases:
      with self.assertRaises((ValueError, errors.InvalidArgumentError)):
        _ = self.evaluate(gen_linalg_ops.einsum(args[1:], args[0]))

      placeholders = [
          array_ops.placeholder_with_default(x, shape=None) for x in args[1:]
      ]
      with self.assertRaises((ValueError, errors.InvalidArgumentError)):
        _ = self.evaluate(gen_linalg_ops.einsum(placeholders, args[0]))
Exemplo n.º 47
0
    def _model_start_state_placeholders(self,
                                        batch_size_tensor,
                                        static_batch_size=None):
        """Creates placeholders with zeroed start state for the current model."""
        gathered_state = {}
        # Models may not know the shape of their state without creating some
        # variables/ops. Avoid polluting the default graph by making a new one. We
        # use only static metadata from the returned Tensors.
        with ops.Graph().as_default():
            self._model.initialize_graph()

            # Evaluate the initial state as same-dtype "zero" values. These zero
            # constants aren't used, but are necessary for feeding to
            # placeholder_with_default for the "cold start" case where state is not
            # fed to the model.
            def _zeros_like_constant(tensor):
                return tensor_util.constant_value(array_ops.zeros_like(tensor))

            start_state = nest.map_structure(_zeros_like_constant,
                                             self._model.get_start_state())
        for prefixed_state_name, state in ts_head_lib.state_to_dictionary(
                start_state).items():
            state_shape_with_batch = tensor_shape.TensorShape(
                (static_batch_size, )).concatenate(state.shape)
            default_state_broadcast = array_ops.tile(
                state[None, ...],
                multiples=array_ops.concat([
                    batch_size_tensor[None],
                    array_ops.ones(len(state.shape), dtype=dtypes.int32)
                ],
                                           axis=0))
            gathered_state[
                prefixed_state_name] = array_ops.placeholder_with_default(
                    input=default_state_broadcast,
                    name=prefixed_state_name,
                    shape=state_shape_with_batch)
        return gathered_state
Exemplo n.º 48
0
    def test_invalid_equation(self):
        r = np.random.RandomState(0)
        cases = [
            # invalid equation format.
            ('a0->a', r.randn(5, 3)),
            ('a->a,a', r.randn(5)),
            ('a->a->a', r.randn(5)),
            ('ijk ijk', r.randn(1, 2, 3), r.randn(1, 2, 3)),
            ('ij.jk->ik', r.randn(2, 3), r.randn(3, 4)),
            # output label not present in input.
            ('a->b', r.randn(5)),
            ('ij,jk->im', r.randn(2, 3), r.randn(3, 4)),
            # wrong shape.
            ('ij,jk->ik', r.randn(1, 2, 3), r.randn(3, 4)),
            # inconsistent dimensions.
            ('ij,jk->ik', r.randn(2, 3), r.randn(4, 4)),
            # output has repeated subscripts.
            ('ij,jk->iik', r.randn(2, 3), r.randn(3, 4)),
            # too many ellipses
            ('...ij...,jk...->ik...', r.randn(2, 3), r.randn(3, 4)),
            ('...ij,jk...->...ik...', r.randn(2, 3), r.randn(3, 4)),
            # invalid broadcast dimensions.
            ('...ij,...jk->...ik', r.randn(5, 2, 3), r.randn(7, 3, 4)),
            # output should have ellipsis when broadcasting shape is non-empty.
            ('...ij,...jk->ik', r.randn(2, 2, 3), r.randn(3, 4)),
        ]
        for args in cases:
            with self.assertRaises((ValueError, errors.InvalidArgumentError)):
                _ = special_math_ops.einsum(*args)

            placeholders = [
                array_ops.placeholder_with_default(x, shape=None)
                for x in args[1:]
            ]
            with self.assertRaises((ValueError, errors.InvalidArgumentError)):
                _ = self.evaluate(
                    special_math_ops.einsum(args[0], *placeholders))
    def operator_and_matrix(self,
                            build_info,
                            dtype,
                            use_placeholder,
                            ensure_self_adjoint_and_pd=False):
        del ensure_self_adjoint_and_pd
        shape = list(build_info.shape)

        # Create 2 matrices/operators, A1, A2, which becomes A = A1 A2.
        # Use inner dimension of 2.
        k = 2
        batch_shape = shape[:-2]
        shape_1 = batch_shape + [shape[-2], k]
        shape_2 = batch_shape + [k, shape[-1]]

        matrices = [
            linear_operator_test_util.random_normal(shape_1, dtype=dtype),
            linear_operator_test_util.random_normal(shape_2, dtype=dtype)
        ]

        lin_op_matrices = matrices

        if use_placeholder:
            lin_op_matrices = [
                array_ops.placeholder_with_default(matrix, shape=None)
                for matrix in matrices
            ]

        operator = linalg.LinearOperatorComposition(
            [linalg.LinearOperatorFullMatrix(l) for l in lin_op_matrices])

        matmul_order_list = list(reversed(matrices))
        mat = matmul_order_list[0]
        for other_mat in matmul_order_list[1:]:
            mat = math_ops.matmul(other_mat, mat)

        return operator, mat
Exemplo n.º 50
0
    def test_move_dimension_dynamic_shape(self):

        x_ = random_ops.random_normal(shape=[200, 30, 4, 1, 6])
        x = array_ops.placeholder_with_default(input=x_, shape=None)

        x_perm = distribution_util.move_dimension(x, 1, 1)
        self.assertAllEqual(self.evaluate(array_ops.shape(x_perm)),
                            [200, 30, 4, 1, 6])

        x_perm = distribution_util.move_dimension(x, 0, 3)
        self.assertAllEqual(self.evaluate(array_ops.shape(x_perm)),
                            [30, 4, 1, 200, 6])

        x_perm = distribution_util.move_dimension(x, 0, -2)
        self.assertAllEqual(self.evaluate(array_ops.shape(x_perm)),
                            [30, 4, 1, 200, 6])

        x_perm = distribution_util.move_dimension(x, 4, 2)
        self.assertAllEqual(self.evaluate(array_ops.shape(x_perm)),
                            [200, 30, 6, 4, 1])

        x_perm = distribution_util.move_dimension(x, -1, 2)
        self.assertAllEqual(self.evaluate(array_ops.shape(x_perm)),
                            [200, 30, 6, 4, 1])
    def testEmptyCacheReading(self):
        components = (np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8]),
                      np.array([9.0, 10.0, 11.0, 12.0]))
        count_placeholder = array_ops.placeholder_with_default(
            constant_op.constant(5, dtypes.int64), shape=[])

        repeat_dataset = (dataset_ops.Dataset.from_tensor_slices(
            components).repeat(count_placeholder))

        cache_dataset = repeat_dataset.cache()

        # Create initialization ops for iterators without and with
        # caching, respectively.
        iterator = cache_dataset.make_initializable_iterator()
        init_cache_op = iterator.initializer

        get_next = iterator.get_next()

        with self.test_session() as sess:
            # Initialize with an empty upstream and a missing cache file (should
            # throw errors.OutOfRangeError immediately).
            sess.run(init_cache_op, feed_dict={count_placeholder: 0})
            with self.assertRaises(errors.OutOfRangeError):
                sess.run(get_next)
Exemplo n.º 52
0
  def operator_and_matrix(
      self, build_info, dtype, use_placeholder,
      ensure_self_adjoint_and_pd=False):
    shape = list(build_info.shape)
    diag = linear_operator_test_util.random_sign_uniform(
        shape[:-1], minval=1., maxval=2., dtype=dtype)

    if ensure_self_adjoint_and_pd:
      # Abs on complex64 will result in a float32, so we cast back up.
      diag = math_ops.cast(math_ops.abs(diag), dtype=dtype)

    lin_op_diag = diag

    if use_placeholder:
      lin_op_diag = array_ops.placeholder_with_default(diag, shape=None)

    operator = linalg.LinearOperatorDiag(
        lin_op_diag,
        is_self_adjoint=True if ensure_self_adjoint_and_pd else None,
        is_positive_definite=True if ensure_self_adjoint_and_pd else None)

    matrix = array_ops.matrix_diag(diag)

    return operator, matrix
Exemplo n.º 53
0
  def operator_and_matrix(
      self, build_info, dtype, use_placeholder,
      ensure_self_adjoint_and_pd=False):
    shape = list(build_info.shape)
    reflection_axis = linear_operator_test_util.random_sign_uniform(
        shape[:-1], minval=1., maxval=2., dtype=dtype)
    # Make sure unit norm.
    reflection_axis = reflection_axis / linalg_ops.norm(
        reflection_axis, axis=-1, keepdims=True)

    lin_op_reflection_axis = reflection_axis

    if use_placeholder:
      lin_op_reflection_axis = array_ops.placeholder_with_default(
          reflection_axis, shape=None)

    operator = householder.LinearOperatorHouseholder(lin_op_reflection_axis)

    mat = reflection_axis[..., array_ops.newaxis]
    matrix = -2 * math_ops.matmul(mat, mat, adjoint_b=True)
    matrix = array_ops.matrix_set_diag(
        matrix, 1. + array_ops.matrix_diag_part(matrix))

    return operator, matrix
Exemplo n.º 54
0
    def operator_and_matrix(self,
                            build_info,
                            dtype,
                            use_placeholder,
                            ensure_self_adjoint_and_pd=False):
        shape = list(build_info.shape)

        matrix = linear_operator_test_util.random_positive_definite_matrix(
            shape, dtype)

        lin_op_matrix = matrix

        if use_placeholder:
            lin_op_matrix = array_ops.placeholder_with_default(matrix,
                                                               shape=None)

        # Set the hints to none to test non-symmetric PD code paths.
        operator = linalg.LinearOperatorFullMatrix(
            lin_op_matrix,
            is_square=True,
            is_self_adjoint=True if ensure_self_adjoint_and_pd else None,
            is_positive_definite=True if ensure_self_adjoint_and_pd else None)

        return operator, matrix
  def _operator_and_matrix(
      self, build_info, dtype, use_placeholder,
      ensure_self_adjoint_and_pd=False):
    # Kronecker products constructed below will be from symmetric
    # positive-definite matrices.
    del ensure_self_adjoint_and_pd
    shape = list(build_info.shape)
    expected_factors = build_info.__dict__["factors"]
    matrices = [
        linear_operator_test_util.random_positive_definite_matrix(
            block_shape, dtype, force_well_conditioned=True)
        for block_shape in expected_factors
    ]

    lin_op_matrices = matrices

    if use_placeholder:
      lin_op_matrices = [
          array_ops.placeholder_with_default(m, shape=None) for m in matrices]

    operator = kronecker.LinearOperatorKronecker(
        [linalg.LinearOperatorFullMatrix(
            l,
            is_square=True,
            is_self_adjoint=True,
            is_positive_definite=True)
         for l in lin_op_matrices])

    matrices = linear_operator_util.broadcast_matrix_batch_dims(matrices)

    kronecker_dense = _kronecker_dense(matrices)

    if not use_placeholder:
      kronecker_dense.set_shape(shape)

    return operator, kronecker_dense
Exemplo n.º 56
0
  def testBasicUnbatchDecorated(self):
    """Tests that the batch_function decorator works."""
    with self.cached_session() as sess:
      # TODO(apassos): Removing this line causes test flakiness! Ideally should
      # be investigated.
      default_inp = array_ops.placeholder_with_default(2, shape=[])  # pylint: disable=unused-variable

      @batch_ops.batch_function(1, 10, 100000)
      def computation(in_t):
        return in_t + 1

      inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
      result = computation(inp)
      thread_results = []

      def worker():
        thread_results.extend(sess.run([result], feed_dict={inp: [1]}))

      worker_thread = threading.Thread(target=worker)
      worker_thread.start()
      main_results = sess.run([result], feed_dict={inp: [2]})
      worker_thread.join()
      self.assertEqual(thread_results[0], [2])
      self.assertEqual(main_results[0], [3])
Exemplo n.º 57
0
  def _operator_and_matrix(
      self, build_info, dtype, use_placeholder,
      ensure_self_adjoint_and_pd=False):

    # Matrix is always symmetric and positive definite in this class.
    del ensure_self_adjoint_and_pd

    shape = list(build_info.shape)

    matrix = linear_operator_test_util.random_positive_definite_matrix(
        shape, dtype, force_well_conditioned=True)

    lin_op_matrix = matrix

    if use_placeholder:
      lin_op_matrix = array_ops.placeholder_with_default(matrix, shape=None)

    operator = linalg.LinearOperatorFullMatrix(
        lin_op_matrix,
        is_square=True,
        is_self_adjoint=True,
        is_positive_definite=True)

    return operator, matrix
Exemplo n.º 58
0
    def testToSingleElement(self):
        skip_value = array_ops.placeholder(dtypes.int64, shape=[])
        take_value = array_ops.placeholder_with_default(constant_op.constant(
            1, dtype=dtypes.int64),
                                                        shape=[])

        dataset = (dataset_ops.Dataset.range(100).skip(skip_value).map(
            lambda x: x * x).take(take_value))

        element = dataset_ops.get_single_element(dataset)

        with self.test_session() as sess:
            self.assertEqual(0, sess.run(element, feed_dict={skip_value: 0}))
            self.assertEqual(25, sess.run(element, feed_dict={skip_value: 5}))
            self.assertEqual(100, sess.run(element, feed_dict={skip_value:
                                                               10}))

            with self.assertRaisesRegexp(errors.InvalidArgumentError,
                                         "Dataset was empty."):
                sess.run(element, feed_dict={skip_value: 100})

            with self.assertRaisesRegexp(errors.InvalidArgumentError,
                                         "Dataset had more than one element."):
                sess.run(element, feed_dict={skip_value: 0, take_value: 2})
    def _testDynamicDecodeRNN(self, time_major, has_attention):
        encoder_sequence_length = np.array([3, 2, 3, 1, 1])
        decoder_sequence_length = np.array([2, 0, 1, 2, 3])
        batch_size = 5
        decoder_max_time = 4
        input_depth = 7
        cell_depth = 9
        attention_depth = 6
        vocab_size = 20
        end_token = vocab_size - 1
        start_token = 0
        embedding_dim = 50
        max_out = max(decoder_sequence_length)
        output_layer = layers_core.Dense(vocab_size,
                                         use_bias=True,
                                         activation=None)
        beam_width = 3

        with self.test_session() as sess:
            batch_size_tensor = constant_op.constant(batch_size)
            embedding = np.random.randn(vocab_size,
                                        embedding_dim).astype(np.float32)
            cell = rnn_cell.LSTMCell(cell_depth)
            initial_state = cell.zero_state(batch_size, dtypes.float32)
            if has_attention:
                inputs = array_ops.placeholder_with_default(
                    np.random.randn(batch_size, decoder_max_time,
                                    input_depth).astype(np.float32),
                    shape=(None, None, input_depth))
                tiled_inputs = beam_search_decoder.tile_batch(
                    inputs, multiplier=beam_width)
                tiled_sequence_length = beam_search_decoder.tile_batch(
                    encoder_sequence_length, multiplier=beam_width)
                attention_mechanism = attention_wrapper.BahdanauAttention(
                    num_units=attention_depth,
                    memory=tiled_inputs,
                    memory_sequence_length=tiled_sequence_length)
                initial_state = beam_search_decoder.tile_batch(
                    initial_state, multiplier=beam_width)
                cell = attention_wrapper.AttentionWrapper(
                    cell=cell,
                    attention_mechanism=attention_mechanism,
                    attention_layer_size=attention_depth,
                    alignment_history=False)
            cell_state = cell.zero_state(dtype=dtypes.float32,
                                         batch_size=batch_size_tensor *
                                         beam_width)
            if has_attention:
                cell_state = cell_state.clone(cell_state=initial_state)
            bsd = beam_search_decoder.BeamSearchDecoder(
                cell=cell,
                embedding=embedding,
                start_tokens=array_ops.fill([batch_size_tensor], start_token),
                end_token=end_token,
                initial_state=cell_state,
                beam_width=beam_width,
                output_layer=output_layer,
                length_penalty_weight=0.0)

            final_outputs, final_state, final_sequence_lengths = (
                decoder.dynamic_decode(bsd,
                                       output_time_major=time_major,
                                       maximum_iterations=max_out))

            def _t(shape):
                if time_major:
                    return (shape[1], shape[0]) + shape[2:]
                return shape

            self.assertTrue(
                isinstance(final_outputs,
                           beam_search_decoder.FinalBeamSearchDecoderOutput))
            self.assertTrue(
                isinstance(final_state,
                           beam_search_decoder.BeamSearchDecoderState))

            beam_search_decoder_output = final_outputs.beam_search_decoder_output
            self.assertEqual(
                _t((batch_size, None, beam_width)),
                tuple(beam_search_decoder_output.scores.get_shape().as_list()))
            self.assertEqual(
                _t((batch_size, None, beam_width)),
                tuple(final_outputs.predicted_ids.get_shape().as_list()))

            sess.run(variables.global_variables_initializer())
            sess_results = sess.run({
                'final_outputs':
                final_outputs,
                'final_state':
                final_state,
                'final_sequence_lengths':
                final_sequence_lengths
            })

            max_sequence_length = np.max(
                sess_results['final_sequence_lengths'])

            # A smoke test
            self.assertEqual(
                _t((batch_size, max_sequence_length, beam_width)),
                sess_results['final_outputs'].beam_search_decoder_output.
                scores.shape)
            self.assertEqual(
                _t((batch_size, max_sequence_length, beam_width)),
                sess_results['final_outputs'].beam_search_decoder_output.
                predicted_ids.shape)
Exemplo n.º 60
0
 def testGradient(self):
   with self.test_session():
     x = array_ops.placeholder(dtypes_lib.float32, [5, 7])
     y = array_ops.placeholder_with_default(x, None)
     err = gradient_checker.compute_gradient_error(x, [5, 7], y, [5, 7])
     self.assertLess(err, 1e-3)