Example #1
0
  def testPlaceholder(self):
    with self.test_session(use_gpu=False):
      foo = array_ops.sparse_placeholder(dtypes.float32, shape=(10, 47))
      self.assertAllEqual([10, 47], foo.get_shape())

      foo = array_ops.sparse_placeholder(dtypes.float32, shape=(None, 47))
      self.assertAllEqual(None, foo.get_shape())
    def testPlaceholder(self):
        with self.test_session(use_gpu=False):
            foo = array_ops.sparse_placeholder(dtypes.float32, shape=(10, 47))
            self.assertAllEqual([10, 47], foo.get_shape())

            foo = array_ops.sparse_placeholder(dtypes.float32,
                                               shape=(None, 47))
            self.assertAllEqual(None, foo.get_shape())
Example #3
0
 def _create_dummy_inputs(self):
   return {
       "sc_int": array_ops.sparse_placeholder(dtypes.int32),
       "sc_hash": array_ops.sparse_placeholder(dtypes.string),
       "sc_keys": array_ops.sparse_placeholder(dtypes.string),
       "sc_vocab": array_ops.sparse_placeholder(dtypes.string),
       "real": array_ops.placeholder(dtypes.float32)
   }
Example #4
0
  def testSparseConcatStaticShape(self):
    if context.executing_eagerly():
      self.skipTest('sparse_spaceholder is only available in graph context.')
    input_a = array_ops.sparse_placeholder(dtypes.float32, shape=(2, 1))
    input_b = array_ops.sparse_placeholder(dtypes.float32, shape=(2, 2))

    result = sparse_ops.sparse_concat_v2(axis=1, sp_inputs=[input_a, input_b])
    self.assertEqual(result.shape, [2, 3])
 def _create_dummy_inputs(self):
   return {
       "sc_int": array_ops.sparse_placeholder(dtypes.int32),
       "sc_hash": array_ops.sparse_placeholder(dtypes.string),
       "sc_keys": array_ops.sparse_placeholder(dtypes.string),
       "sc_vocab": array_ops.sparse_placeholder(dtypes.string),
       "real": array_ops.placeholder(dtypes.float32)
   }
    def test_build_all_signature_defs_with_single_alternatives(self):
        # Force the test to run in graph mode.
        # This tests a deprecated v1 API that depends on graph-only functions such
        # as build_tensor_info.
        with ops.Graph().as_default():
            receiver_tensor = array_ops.placeholder(dtypes.string)
            receiver_tensors_alternative_1 = array_ops.placeholder(
                dtypes.int64)
            receiver_tensors_alternative_2 = array_ops.sparse_placeholder(
                dtypes.float32)
            # Note we are passing single Tensors as values of
            # receiver_tensors_alternatives, where normally that is a dict.
            # In this case a dict will be created using the default receiver tensor
            # name "input".
            receiver_tensors_alternatives = {
                "other1": receiver_tensors_alternative_1,
                "other2": receiver_tensors_alternative_2
            }
            output_1 = constant_op.constant([1.])
            output_2 = constant_op.constant(["2"])
            output_3 = constant_op.constant(["3"])
            export_outputs = {
                signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
                export_output.RegressionOutput(value=output_1),
                "head-2":
                export_output.ClassificationOutput(classes=output_2),
                "head-3":
                export_output.PredictOutput(
                    outputs={"some_output_3": output_3}),
            }

            signature_defs = export_utils.build_all_signature_defs(
                receiver_tensor, export_outputs, receiver_tensors_alternatives)

            expected_signature_defs = {
                "serving_default":
                signature_def_utils.regression_signature_def(
                    receiver_tensor, output_1),
                "head-2":
                signature_def_utils.classification_signature_def(
                    receiver_tensor, output_2, None),
                "head-3":
                signature_def_utils.predict_signature_def(
                    {"input": receiver_tensor}, {"some_output_3": output_3}),
                "other1:head-3":
                signature_def_utils.predict_signature_def(
                    {"input": receiver_tensors_alternative_1},
                    {"some_output_3": output_3}),
                "other2:head-3":
                signature_def_utils.predict_signature_def(
                    {"input": receiver_tensors_alternative_2},
                    {"some_output_3": output_3})

                # Note that the alternatives 'other:serving_default' and
                # 'other:head-2' are invalid, because regression and classification
                # signatures must take a single string input.  Here we verify that
                # these invalid signatures are not included in the export_utils.
            }

            self.assertDictEqual(expected_signature_defs, signature_defs)
Example #7
0
    def __init__(self,
                 n_units,
                 batch_size=None,
                 dtype=dtypes.float32,
                 name="sparse_input"):
        """

        Args:
            n_units (int): the number of output units for this layer
            batch_size (int): batch_size for the input, helps to define the shape for this sparse layer
            dtype: the output type for the values in the ``SparseTensor`` that this layer outputs
            name: name for the layer
        """
        shape = [batch_size, n_units]
        super().__init__(None, n_units, shape, dtype, name)

        with ops.name_scope(name):
            self.placeholder = array_ops.sparse_placeholder(
                dtype, self.shape, name)
            n_indices = array_ops.shape(self.placeholder.indices)[0]
            n_values = array_ops.shape(self.placeholder.values)[0]

            valid_values = check_ops.assert_equal(
                n_indices, n_values, message="Invalid number of values")
            with ops.control_dependencies([valid_values]):
                values = array_ops.identity(self.placeholder.values)

            self.tensor = SparseTensor(self.placeholder.indices, values,
                                       self.placeholder.dense_shape)
Example #8
0
 def testFeedSparsePlaceholder(self):
   with session.Session() as s:
     indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
     values = np.array([1.0, 2.0]).astype(np.float32)
     shape = np.array([7, 9, 2]).astype(np.int64)
     sp = array_ops.sparse_placeholder(dtype=np.float32, name='placeholder1')
     sp_indices = array_ops.identity(sp.indices)
     sp_values = array_ops.identity(sp.values)
     sp_shape = array_ops.identity(sp.shape)
     sp2 = ops.SparseTensor(sp_indices, sp_values, sp_shape)
     # Feed with tuple
     indices_out, values_out, shape_out = s.run(
         [sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)})
     self.assertAllEqual(indices_out, indices)
     self.assertAllEqual(values_out, values)
     self.assertAllEqual(shape_out, shape)
     # Feed with SparseTensorValue
     indices_out, values_out, shape_out = s.run(
         [sp_indices, sp_values, sp_shape],
         {sp: ops.SparseTensorValue(indices, values, shape)})
     self.assertAllEqual(indices_out, indices)
     self.assertAllEqual(values_out, values)
     self.assertAllEqual(shape_out, shape)
     # Feed with SparseTensorValue, fetch SparseTensorValue
     sp2_out = s.run(sp2, {sp: ops.SparseTensorValue(indices, values, shape)})
     self.assertAllEqual(sp2_out.indices, indices)
     self.assertAllEqual(sp2_out.values, values)
     self.assertAllEqual(sp2_out.shape, shape)
    def test_invalid_axes(self):
        sp_a = sparse_tensor.SparseTensor(indices=[[0, 0], [1, 1]],
                                          values=[1, 2],
                                          dense_shape=[2, 2])
        b = tf.constant([[1, 2], [3, 4]])
        # Invalid static axes.
        for axes_value in -1, 0, [1], [[1]], [[1], [0, 1]]:
            with self.assertRaises(ValueError):
                sparse_ops.sparse_tensor_dense_tensordot(sp_a, b, axes_value)

        with self.assertRaises(IndexError):
            sparse_ops.sparse_tensor_dense_tensordot(sp_a, b, [[0], [7]])

        # Invalid dynamic axes.
        a_ph = array_ops.sparse_placeholder(dtypes.float32)
        b_ph = array_ops.placeholder(dtypes.float32)
        axes_ph = array_ops.placeholder(dtypes.int32)
        output = sparse_ops.sparse_tensor_dense_tensordot(a_ph, b_ph, axes_ph)
        # Note: We don't support scalar Tensor values for axes.
        for axes_value in 1, [1], [0, 1], [[1]], [[0, 1]], [[0], [7]]:
            with self.test_session() as sess:
                with self.assertRaises(errors_impl.InvalidArgumentError):
                    _ = sess.run([output],
                                 feed_dict={
                                     a_ph: sp_a,
                                     b_ph: b,
                                     axes_ph: axes_value
                                 })
 def test_invalid_shape(self):
     sp_a = sparse_tensor.SparseTensor(indices=[[0, 0], [1, 1]],
                                       values=[1, 2],
                                       dense_shape=[2, 2])
     b = [[1, 2], [3, 4], [5, 6]]
     a_axes = [1]
     b_axes = [0]
     # Invalid static shapes.
     with self.assertRaises(ValueError):
         sparse_ops.sparse_tensor_dense_tensordot(sp_a, b, (a_axes, b_axes))
     # Invalid dynamic shapes.
     with self.test_session() as sess:
         with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
                                      "Matrix size-incompatible"):
             a_ph = array_ops.sparse_placeholder(dtypes.float32)
             b_ph = array_ops.placeholder(dtypes.float32)
             axes_ph = array_ops.placeholder(dtypes.int32)
             output = sparse_ops.sparse_tensor_dense_tensordot(
                 a_ph, b_ph, axes_ph)
             _ = sess.run([output],
                          feed_dict={
                              a_ph: sp_a,
                              b_ph: b,
                              axes_ph: (a_axes, b_axes)
                          })
 def test_tensordot(self):
     num_trials = min(30, num_dims_ * num_dims_)
     if dtype_ == np.float16:
         tol = 0.05
     elif dtype_ == np.float32 or dtype_ == np.complex64:
         tol = 1e-5
     else:
         tol = 1e-12
     for _ in range(num_trials):
         a_np, b_np, a_dims_np, b_dims_np, sp_a_np = _generate_random_tensors_and_dims(
         )
         np_ans = np.tensordot(a_np, b_np, axes=(a_dims_np, b_dims_np))
         with self.test_session(use_gpu=True) as sess:
             if dynamic_shape_:
                 sp_a = array_ops.sparse_placeholder(dtype_)
                 b = array_ops.placeholder(dtype_)
                 axes = array_ops.placeholder(dtypes.int32)
                 c = sparse_ops.sparse_tensor_dense_tensordot(sp_a, b, axes)
                 tf_ans = sess.run(c,
                                   feed_dict={
                                       sp_a: sp_a_np,
                                       b: b_np,
                                       axes: (a_dims_np, b_dims_np)
                                   })
             else:
                 sp_a = sparse_tensor.SparseTensor(indices=sp_a_np[0],
                                                   values=sp_a_np[1],
                                                   dense_shape=sp_a_np[2])
                 tf_ans = sparse_ops.sparse_tensor_dense_tensordot(
                     sp_a, b_np, (a_dims_np, b_dims_np)).eval()
         self.assertAllClose(tf_ans, np_ans, rtol=tol, atol=tol)
         self.assertAllEqual(tf_ans.shape, np_ans.shape)
Example #12
0
 def testFeedSparsePlaceholder(self):
     with session.Session() as s:
         indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
         values = np.array([1.0, 2.0]).astype(np.float32)
         shape = np.array([7, 9, 2]).astype(np.int64)
         sp = array_ops.sparse_placeholder(dtype=np.float32,
                                           name='placeholder1')
         sp_indices = array_ops.identity(sp.indices)
         sp_values = array_ops.identity(sp.values)
         sp_shape = array_ops.identity(sp.shape)
         sp2 = ops.SparseTensor(sp_indices, sp_values, sp_shape)
         # Feed with tuple
         indices_out, values_out, shape_out = s.run(
             [sp_indices, sp_values, sp_shape],
             {sp: (indices, values, shape)})
         self.assertAllEqual(indices_out, indices)
         self.assertAllEqual(values_out, values)
         self.assertAllEqual(shape_out, shape)
         # Feed with SparseTensorValue
         indices_out, values_out, shape_out = s.run(
             [sp_indices, sp_values, sp_shape],
             {sp: ops.SparseTensorValue(indices, values, shape)})
         self.assertAllEqual(indices_out, indices)
         self.assertAllEqual(values_out, values)
         self.assertAllEqual(shape_out, shape)
         # Feed with SparseTensorValue, fetch SparseTensorValue
         sp2_out = s.run(
             sp2, {sp: ops.SparseTensorValue(indices, values, shape)})
         self.assertAllEqual(sp2_out.indices, indices)
         self.assertAllEqual(sp2_out.values, values)
         self.assertAllEqual(sp2_out.shape, shape)
Example #13
0
    def setUp(self):
        self._tmp_dir = tempfile.mktemp()

        self.v = variables.Variable(10.0, name="v")
        self.w = variables.Variable(21.0, name="w")
        self.delta = constant_op.constant(1.0, name="delta")
        self.inc_v = state_ops.assign_add(self.v, self.delta, name="inc_v")

        self.w_int = control_flow_ops.with_dependencies(
            [self.inc_v],
            math_ops.cast(self.w, dtypes.int32, name="w_int_inner"),
            name="w_int_outer")

        self.ph = array_ops.placeholder(dtypes.float32, name="ph")
        self.xph = array_ops.transpose(self.ph, name="xph")
        self.m = constant_op.constant([[0.0, 1.0, 2.0], [-4.0, -1.0, 0.0]],
                                      dtype=dtypes.float32,
                                      name="m")
        self.y = math_ops.matmul(self.m, self.xph, name="y")

        self.sparse_ph = array_ops.sparse_placeholder(
            dtypes.float32, shape=([5, 5]), name="sparse_placeholder")
        self.sparse_add = sparse_ops.sparse_add(self.sparse_ph, self.sparse_ph)

        self.sess = session.Session()

        # Initialize variable.
        self.sess.run(variables.global_variables_initializer())
Example #14
0
def make_place_holder_tensors_for_base_features(feature_columns):
  """Returns placeholder tensors for inference.

  Args:
    feature_columns: An iterable containing all the feature columns. All items
      should be instances of classes derived from _FeatureColumn.
  Returns:
    A dict mapping feature keys to SparseTensors (sparse columns) or
    placeholder Tensors (dense columns).
  """
  # Get dict mapping features to FixedLenFeature or VarLenFeature values.
  dict_for_parse_example = create_feature_spec_for_parsing(feature_columns)
  placeholders = {}
  for column_name, column_type in dict_for_parse_example.items():
    if isinstance(column_type, parsing_ops.VarLenFeature):
      # Sparse placeholder for sparse tensors.
      placeholders[column_name] = array_ops.sparse_placeholder(
          column_type.dtype,
          name="Placeholder_" + column_name)
    else:
      # Simple placeholder for dense tensors.
      placeholders[column_name] = array_ops.placeholder(
          column_type.dtype,
          shape=(None, column_type.shape[0]),
          name="Placeholder_" + column_name)
  return placeholders
def make_place_holder_tensors_for_base_features(feature_columns):
  """Returns placeholder tensors for inference.

  Args:
    feature_columns: An iterable containing all the feature columns. All items
      should be instances of classes derived from _FeatureColumn.
  Returns:
    A dict mapping feature keys to SparseTensors (sparse columns) or
    placeholder Tensors (dense columns).
  """
  # Get dict mapping features to FixedLenFeature or VarLenFeature values.
  dict_for_parse_example = create_feature_spec_for_parsing(feature_columns)
  placeholders = {}
  for column_name, column_type in dict_for_parse_example.items():
    if isinstance(column_type, parsing_ops.VarLenFeature):
      # Sparse placeholder for sparse tensors.
      placeholders[column_name] = array_ops.sparse_placeholder(
          column_type.dtype,
          name="Placeholder_" + column_name)
    else:
      # Simple placeholder for dense tensors.
      placeholders[column_name] = array_ops.placeholder(
          column_type.dtype,
          shape=(None, column_type.shape[0]),
          name="Placeholder_" + column_name)
  return placeholders
  def setUp(self):
    self._tmp_dir = tempfile.mktemp()

    self.v = variables.VariableV1(10.0, name="v")
    self.w = variables.VariableV1(21.0, name="w")
    self.delta = constant_op.constant(1.0, name="delta")
    self.inc_v = state_ops.assign_add(self.v, self.delta, name="inc_v")

    self.w_int = control_flow_ops.with_dependencies(
        [self.inc_v],
        math_ops.cast(self.w, dtypes.int32, name="w_int_inner"),
        name="w_int_outer")

    self.ph = array_ops.placeholder(dtypes.float32, name="ph")
    self.xph = array_ops.transpose(self.ph, name="xph")
    self.m = constant_op.constant(
        [[0.0, 1.0, 2.0], [-4.0, -1.0, 0.0]], dtype=dtypes.float32, name="m")
    self.y = math_ops.matmul(self.m, self.xph, name="y")

    self.sparse_ph = array_ops.sparse_placeholder(
        dtypes.float32, shape=([5, 5]), name="sparse_placeholder")
    self.sparse_add = sparse_ops.sparse_add(self.sparse_ph, self.sparse_ph)

    rewriter_config = rewriter_config_pb2.RewriterConfig(
        disable_model_pruning=True,
        arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF,
        dependency_optimization=rewriter_config_pb2.RewriterConfig.OFF)
    graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)
    config_proto = config_pb2.ConfigProto(graph_options=graph_options)
    self.sess = session.Session(config=config_proto)

    # Initialize variable.
    self.sess.run(variables.global_variables_initializer())
  def setUp(self):
    self._tmp_dir = tempfile.mktemp()

    self.v = variables.Variable(10.0, name="v")
    self.w = variables.Variable(21.0, name="w")
    self.delta = constant_op.constant(1.0, name="delta")
    self.inc_v = state_ops.assign_add(self.v, self.delta, name="inc_v")

    self.w_int = control_flow_ops.with_dependencies(
        [self.inc_v],
        math_ops.cast(self.w, dtypes.int32, name="w_int_inner"),
        name="w_int_outer")

    self.ph = array_ops.placeholder(dtypes.float32, name="ph")
    self.xph = array_ops.transpose(self.ph, name="xph")
    self.m = constant_op.constant(
        [[0.0, 1.0, 2.0], [-4.0, -1.0, 0.0]], dtype=dtypes.float32, name="m")
    self.y = math_ops.matmul(self.m, self.xph, name="y")

    self.sparse_ph = array_ops.sparse_placeholder(
        dtypes.float32, shape=([5, 5]), name="sparse_placeholder")
    self.sparse_add = sparse_ops.sparse_add(self.sparse_ph, self.sparse_ph)

    rewriter_config = rewriter_config_pb2.RewriterConfig(
        disable_model_pruning=True,
        arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF,
        dependency_optimization=rewriter_config_pb2.RewriterConfig.OFF)
    graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)
    config_proto = config_pb2.ConfigProto(graph_options=graph_options)
    self.sess = session.Session(config=config_proto)

    # Initialize variable.
    self.sess.run(variables.global_variables_initializer())
  def setUp(self):
    self._tmp_dir = tempfile.mktemp()

    self.v = variables.Variable(10.0, name="v")
    self.w = variables.Variable(21.0, name="w")
    self.delta = constant_op.constant(1.0, name="delta")
    self.inc_v = state_ops.assign_add(self.v, self.delta, name="inc_v")

    self.w_int = control_flow_ops.with_dependencies(
        [self.inc_v],
        math_ops.cast(self.w, dtypes.int32, name="w_int_inner"),
        name="w_int_outer")

    self.ph = array_ops.placeholder(dtypes.float32, name="ph")
    self.xph = array_ops.transpose(self.ph, name="xph")
    self.m = constant_op.constant(
        [[0.0, 1.0, 2.0], [-4.0, -1.0, 0.0]], dtype=dtypes.float32, name="m")
    self.y = math_ops.matmul(self.m, self.xph, name="y")

    self.sparse_ph = array_ops.sparse_placeholder(
        dtypes.float32, shape=([5, 5]), name="sparse_placeholder")
    self.sparse_add = sparse_ops.sparse_add(self.sparse_ph, self.sparse_ph)

    self.sess = session.Session()

    # Initialize variable.
    self.sess.run(variables.global_variables_initializer())
Example #19
0
    def testFromSparseTensorSlices(self, slices):
        """Test a dataset based on slices of a `tf.sparse.SparseTensor`."""
        st = array_ops.sparse_placeholder(dtypes.float64)
        iterator = dataset_ops.make_initializable_iterator(
            dataset_ops.Dataset.from_sparse_tensor_slices(st))
        init_op = iterator.initializer
        get_next = sparse_tensor.SparseTensor(*iterator.get_next())

        with self.cached_session() as sess:
            # Test with sparse tensor in the appropriate order.
            # pylint: disable=g-complex-comprehension
            indices = np.array([[i, j] for i in range(len(slices))
                                for j in range(len(slices[i]))])
            values = np.array([val for s in slices for val in s])
            # pylint: enable=g-complex-comprehension
            dense_shape = np.array(
                [len(slices), max(len(s) for s in slices) + 1])
            sparse_feed = sparse_tensor.SparseTensorValue(
                indices, values, dense_shape)
            sess.run(init_op, feed_dict={st: sparse_feed})
            for i, s in enumerate(slices):
                results = sess.run(get_next)
                self.assertAllEqual(s, results.values)
                expected_indices = np.array([[j] for j in range(len(slices[i]))
                                             ]).reshape([-1, 1])
                self.assertAllEqual(expected_indices, results.indices)
                self.assertAllEqual(dense_shape[1:], results.dense_shape)
            with self.assertRaises(errors.OutOfRangeError):
                sess.run(get_next)
Example #20
0
    def test_get_sparse_tensors_dynamic_zero_length(self):
        """Tests _get_sparse_tensors with a dynamic sequence length."""
        with ops.Graph().as_default():
            inputs = sparse_tensor.SparseTensorValue(indices=np.zeros((0, 2)),
                                                     values=[],
                                                     dense_shape=(2, 0))
            expected = sparse_tensor.SparseTensorValue(indices=np.zeros(
                (0, 3)),
                                                       values=np.array(
                                                           (), dtype=np.int64),
                                                       dense_shape=(2, 0, 1))
            column = sfc.sequence_categorical_column_with_vocabulary_file(
                key='aaa',
                vocabulary_file=self._wire_vocabulary_file_name,
                vocabulary_size=self._wire_vocabulary_size)
            input_placeholder_shape = list(inputs.dense_shape)
            # Make second dimension (sequence length) dynamic.
            input_placeholder_shape[1] = None
            input_placeholder = array_ops.sparse_placeholder(
                dtypes.string, shape=input_placeholder_shape)
            id_weight_pair = _get_sparse_tensors(column,
                                                 {'aaa': input_placeholder})

            self.assertIsNone(id_weight_pair.weight_tensor)
            with _initialized_session() as sess:
                result = id_weight_pair.id_tensor.eval(
                    session=sess, feed_dict={input_placeholder: inputs})
                _assert_sparse_tensor_value(self, expected, result)
Example #21
0
    def test_with_1d_unknown_shape_sparse_tensor(self):
        embedding_values = (
            (1., 2.),  # id 0
            (6., 7.),  # id 1
            (11., 12.)  # id 2
        )

        def _initializer(shape, dtype, partition_info=None):
            del shape, dtype, partition_info
            return embedding_values

        # price has 1 dimension in dense_features
        price = fc.numeric_column('price')

        # one_hot_body_style has 3 dims in dense_features.
        body_style = fc.categorical_column_with_vocabulary_list(
            'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
        one_hot_body_style = fc.indicator_column(body_style)

        # embedded_body_style has 5 dims in dense_features.
        country = fc.categorical_column_with_vocabulary_list(
            'country', vocabulary_list=['US', 'JP', 'CA'])
        embedded_country = fc.embedding_column(country,
                                               dimension=2,
                                               initializer=_initializer)

        # Provides 1-dim tensor and dense tensor.
        features = {
            'price': array_ops.placeholder(dtypes.float32),
            'body-style': array_ops.sparse_placeholder(dtypes.string),
            # This is dense tensor for the categorical_column.
            'country': array_ops.placeholder(dtypes.string),
        }
        self.assertIsNone(features['price'].shape.ndims)
        self.assertIsNone(features['body-style'].get_shape().ndims)
        self.assertIsNone(features['country'].shape.ndims)

        price_data = np.array([11., 12.])
        body_style_data = sparse_tensor.SparseTensorValue(indices=((0, ),
                                                                   (1, )),
                                                          values=('sedan',
                                                                  'hardtop'),
                                                          dense_shape=(2, ))
        country_data = np.array([['US'], ['CA']])

        net = df.DenseFeatures([price, one_hot_body_style,
                                embedded_country])(features)
        self.assertEqual(1 + 3 + 2, net.shape[1])
        with _initialized_session() as sess:

            # Each row is formed by concatenating `embedded_body_style`,
            # `one_hot_body_style`, and `price` in order.
            self.assertAllEqual(
                [[0., 0., 1., 1., 2., 11.], [1., 0., 0., 11., 12., 12.]],
                sess.run(net,
                         feed_dict={
                             features['price']: price_data,
                             features['body-style']: body_style_data,
                             features['country']: country_data
                         }))
Example #22
0
  def testInvalidDimensionSizeInputUnavailableInGraphConstruction(self):
    sp_input = array_ops.sparse_placeholder(dtype=dtypes.int32)
    with self.test_session(use_gpu=False) as sess:
      new_shape = np.array([3, 7, 5], dtype=np.int64)
      out = sparse_ops.sparse_reset_shape(sp_input, new_shape)

      with self.assertRaisesOpError("x <= y did not hold element-wise"):
        sess.run(out, feed_dict={sp_input: self._SparseTensorValue_2x5x6()})
Example #23
0
 def testGetTensorFromInfoSparse(self):
   expected = array_ops.sparse_placeholder(dtypes.float32, name="x")
   tensor_info = utils.build_tensor_info(expected)
   actual = utils.get_tensor_from_tensor_info(tensor_info)
   self.assertIsInstance(actual, sparse_tensor.SparseTensor)
   self.assertEqual(expected.values.name, actual.values.name)
   self.assertEqual(expected.indices.name, actual.indices.name)
   self.assertEqual(expected.dense_shape.name, actual.dense_shape.name)
Example #24
0
 def testGetTensorFromInfoSparse(self):
     expected = array_ops.sparse_placeholder(dtypes.float32, name="x")
     tensor_info = utils.build_tensor_info(expected)
     actual = utils.get_tensor_from_tensor_info(tensor_info)
     self.assertIsInstance(actual, sparse_tensor.SparseTensor)
     self.assertEqual(expected.values.name, actual.values.name)
     self.assertEqual(expected.indices.name, actual.indices.name)
     self.assertEqual(expected.dense_shape.name, actual.dense_shape.name)
Example #25
0
  def testInvalidDimensionSizeInputUnavailableInGraphConstruction(self):
    sp_input = array_ops.sparse_placeholder(dtype=dtypes.int32)
    with self.test_session(use_gpu=False) as sess:
      new_shape = np.array([3, 7, 5], dtype=np.int64)
      out = sparse_ops.sparse_reset_shape(sp_input, new_shape)

      with self.assertRaisesOpError("x <= y did not hold element-wise"):
        sess.run(out, feed_dict={sp_input: self._SparseTensorValue_2x5x6()})
Example #26
0
  def __init__(self,
               input_shape=None,
               batch_size=None,
               dtype=dtypes.float32,
               input_tensor=None,
               sparse=False,
               name=None):
    super(InputLayer, self).__init__(dtype=dtype, name=name)
    self.built = True
    self.sparse = sparse
    self.batch_size = batch_size

    if isinstance(input_shape, tensor_shape.TensorShape):
      input_shape = tuple(input_shape.as_list())

    if input_tensor is None:
      if input_shape is not None:
        batch_input_shape = (batch_size,) + tuple(input_shape)
      else:
        batch_input_shape = None

      if context.in_eager_mode():
        # In eager mode, create a temporary placeholder to call the layer on.
        input_tensor = base._DeferredTensor(  # pylint: disable=protected-access
            shape=batch_input_shape,
            dtype=dtype,
            name=self.name)
      else:
        # In graph mode, create a graph placeholder to call the layer on.
        if sparse:
          input_tensor = array_ops.sparse_placeholder(
              shape=batch_input_shape,
              dtype=dtype,
              name=self.name)
        else:
          input_tensor = array_ops.placeholder(
              shape=batch_input_shape,
              dtype=dtype,
              name=self.name)

      # For compatibility with Keras API.
      self.is_placeholder = True
      self._batch_input_shape = batch_input_shape
    else:
      # For compatibility with Keras API.
      self.is_placeholder = False
      self._batch_input_shape = tuple(input_tensor.get_shape().as_list())

    # Create an input node to add to self.outbound_node
    # and set output_tensors' _keras_history.
    input_tensor._keras_history = (self, 0, 0)  # pylint: disable=protected-access
    base.Node(
        self,
        inbound_layers=[],
        node_indices=[],
        tensor_indices=[],
        input_tensors=[input_tensor],
        output_tensors=[input_tensor])
Example #27
0
  def __init__(self,
               input_shape=None,
               batch_size=None,
               dtype=dtypes.float32,
               input_tensor=None,
               sparse=False,
               name=None):
    super(InputLayer, self).__init__(dtype=dtype, name=name)
    self.built = True
    self.sparse = sparse
    self.batch_size = batch_size

    if isinstance(input_shape, tensor_shape.TensorShape):
      input_shape = tuple(input_shape.as_list())

    if input_tensor is None:
      if input_shape is not None:
        batch_input_shape = (batch_size,) + tuple(input_shape)
      else:
        batch_input_shape = None

      if context.in_eager_mode():
        # In eager mode, create a temporary placeholder to call the layer on.
        input_tensor = base._DeferredTensor(  # pylint: disable=protected-access
            shape=batch_input_shape,
            dtype=dtype,
            name=self.name)
      else:
        # In graph mode, create a graph placeholder to call the layer on.
        if sparse:
          input_tensor = array_ops.sparse_placeholder(
              shape=batch_input_shape,
              dtype=dtype,
              name=self.name)
        else:
          input_tensor = array_ops.placeholder(
              shape=batch_input_shape,
              dtype=dtype,
              name=self.name)

      # For compatibility with Keras API.
      self.is_placeholder = True
      self._batch_input_shape = batch_input_shape
    else:
      # For compatibility with Keras API.
      self.is_placeholder = False
      self._batch_input_shape = tuple(input_tensor.get_shape().as_list())

    # Create an input node to add to self.outbound_node
    # and set output_tensors' _keras_history.
    input_tensor._keras_history = (self, 0, 0)  # pylint: disable=protected-access
    base.Node(
        self,
        inbound_layers=[],
        node_indices=[],
        tensor_indices=[],
        input_tensors=[input_tensor],
        output_tensors=[input_tensor])
Example #28
0
  def _to_placeholder(self):
    spec = self.type_spec

    # nest.map_structure loses dense shape information for sparse tensors.
    # So, we special-case sparse placeholder creation.
    # This only preserves shape information for top-level sparse tensors;
    # not for sparse tensors that are nested inside another composite
    # tensor.
    return array_ops.sparse_placeholder(dtype=spec.dtype, shape=spec.shape)
Example #29
0
  def test_build_all_signature_defs_with_single_alternatives(self):
    receiver_tensor = array_ops.placeholder(dtypes.string)
    receiver_tensors_alternative_1 = array_ops.placeholder(dtypes.int64)
    receiver_tensors_alternative_2 = array_ops.sparse_placeholder(
        dtypes.float32)
    # Note we are passing single Tensors as values of
    # receiver_tensors_alternatives, where normally that is a dict.
    # In this case a dict will be created using the default receiver tensor
    # name "input".
    receiver_tensors_alternatives = {"other1": receiver_tensors_alternative_1,
                                     "other2": receiver_tensors_alternative_2}
    output_1 = constant_op.constant([1.])
    output_2 = constant_op.constant(["2"])
    output_3 = constant_op.constant(["3"])
    export_outputs = {
        signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
            export_output.RegressionOutput(value=output_1),
        "head-2": export_output.ClassificationOutput(classes=output_2),
        "head-3": export_output.PredictOutput(outputs={
            "some_output_3": output_3
        }),
    }

    signature_defs = export.build_all_signature_defs(
        receiver_tensor, export_outputs, receiver_tensors_alternatives)

    expected_signature_defs = {
        "serving_default":
            signature_def_utils.regression_signature_def(
                receiver_tensor,
                output_1),
        "head-2":
            signature_def_utils.classification_signature_def(
                receiver_tensor,
                output_2, None),
        "head-3":
            signature_def_utils.predict_signature_def(
                {"input": receiver_tensor},
                {"some_output_3": output_3}),
        "other1:head-3":
            signature_def_utils.predict_signature_def(
                {"input": receiver_tensors_alternative_1},
                {"some_output_3": output_3}),
        "other2:head-3":
            signature_def_utils.predict_signature_def(
                {"input": receiver_tensors_alternative_2},
                {"some_output_3": output_3})

        # Note that the alternatives 'other:serving_default' and 'other:head-2'
        # are invalid, because regession and classification signatures must take
        # a single string input.  Here we verify that these invalid signatures
        # are not included in the export.
    }

    self.assertDictEqual(expected_signature_defs, signature_defs)
Example #30
0
def placeholder(shape=None, ndim=None, dtype=None, sparse=False, name=None):
    if dtype is None:
        dtype = dtypes.float32
    if not shape:
        if ndim:
            shape = (None, ) * ndim
    if sparse:
        x = array_ops.sparse_placeholder(dtype, shape=shape, name=name)
    else:
        x = array_ops.placeholder(dtype, shape=shape, name=name)
    return x
Example #31
0
    def testInputUnavaibleInGraphConstructionOk(self):
        with self.test_session(use_gpu=False) as sess:
            sp_input = array_ops.sparse_placeholder(dtype=dtypes.int32)
            new_shape = np.array([3, 6, 7], dtype=np.int64)
            sp_output = sparse_ops.sparse_reset_shape(sp_input, new_shape)

            output = sess.run(sp_output, feed_dict={sp_input: self._SparseTensorValue_2x5x6()})

            self.assertAllEqual(output.indices, [[0, 0, 0], [0, 1, 0], [0, 1, 3], [1, 1, 4], [1, 3, 2], [1, 3, 3]])
            self.assertAllEqual(output.values, [0, 10, 13, 14, 32, 33])
            self.assertAllEqual(output.shape, [3, 6, 7])
    def benchmarkBatchSparse(self):
        non_zeros_per_row_values = [0, 1, 5, 10, 100]
        batch_size_values = [1, 32, 64, 128, 1024]

        sparse_placeholder = array_ops.sparse_placeholder(dtype=dtypes.int64)
        batch_size_placeholder = array_ops.placeholder(dtype=dtypes.int64,
                                                       shape=[])

        dataset = dataset_ops.Dataset.from_tensors(
            sparse_placeholder).repeat().batch(batch_size_placeholder)
        iterator = dataset.make_initializable_iterator()
        next_element = iterator.get_next()

        for non_zeros_per_row in non_zeros_per_row_values:

            sparse_value = sparse_tensor.SparseTensorValue(
                indices=np.arange(non_zeros_per_row,
                                  dtype=np.int64)[:, np.newaxis],
                values=np.arange(non_zeros_per_row, dtype=np.int64),
                dense_shape=[1000])

            for batch_size in batch_size_values:

                with session.Session() as sess:
                    sess.run(iterator.initializer,
                             feed_dict={
                                 sparse_placeholder: sparse_value,
                                 batch_size_placeholder: batch_size
                             })
                    # Run five steps to warm up the session caches before taking the
                    # first measurement.
                    for _ in range(5):
                        sess.run(next_element.indices.op)
                    deltas = []
                    for _ in range(100):
                        start = time.time()
                        for _ in range(100):
                            sess.run(next_element.indices.op)
                        end = time.time()
                        deltas.append(end - start)

                median_wall_time = np.median(deltas) / 100.0

                print(
                    'Batch sparse dataset non-zeros per row: %d batch_size: %d '
                    'wall time: %f' %
                    (non_zeros_per_row, batch_size, median_wall_time))
                self.report_benchmark(
                    iters=10000,
                    wall_time=median_wall_time,
                    name='benchmark_batch_sparse_dataset_nnz_%d_batch_size_%d'
                    % (non_zeros_per_row, batch_size))
Example #33
0
 def testBuildTensorInfoSparse(self):
     x = array_ops.sparse_placeholder(dtypes.float32, [42, 69], name="x")
     x_tensor_info = utils.build_tensor_info(x)
     self.assertEqual(x.values.name,
                      x_tensor_info.coo_sparse.values_tensor_name)
     self.assertEqual(x.indices.name,
                      x_tensor_info.coo_sparse.indices_tensor_name)
     self.assertEqual(x.dense_shape.name,
                      x_tensor_info.coo_sparse.dense_shape_tensor_name)
     self.assertEqual(types_pb2.DT_FLOAT, x_tensor_info.dtype)
     self.assertEqual(2, len(x_tensor_info.tensor_shape.dim))
     self.assertEqual(42, x_tensor_info.tensor_shape.dim[0].size)
     self.assertEqual(69, x_tensor_info.tensor_shape.dim[1].size)
Example #34
0
 def testBuildTensorInfoSparse(self):
   x = array_ops.sparse_placeholder(dtypes.float32, [42, 69], name="x")
   x_tensor_info = utils.build_tensor_info(x)
   self.assertEqual(x.values.name,
                    x_tensor_info.coo_sparse.values_tensor_name)
   self.assertEqual(x.indices.name,
                    x_tensor_info.coo_sparse.indices_tensor_name)
   self.assertEqual(x.dense_shape.name,
                    x_tensor_info.coo_sparse.dense_shape_tensor_name)
   self.assertEqual(types_pb2.DT_FLOAT, x_tensor_info.dtype)
   self.assertEqual(2, len(x_tensor_info.tensor_shape.dim))
   self.assertEqual(42, x_tensor_info.tensor_shape.dim[0].size)
   self.assertEqual(69, x_tensor_info.tensor_shape.dim[1].size)
  def testFeedInputUnavailableInGraphConstructionOk(self):
    with self.session(use_gpu=False) as sess:
      sp_input = array_ops.sparse_placeholder(dtype=dtypes.int32)
      new_shape = np.array([3, 6, 7], dtype=np.int64)
      sp_output = sparse_ops.sparse_reset_shape(sp_input, new_shape)

      output = sess.run(sp_output,
                        feed_dict={sp_input: self._SparseTensorValue_2x5x6()})

      self.assertAllEqual(output.indices, [[0, 0, 0], [0, 1, 0], [0, 1, 3],
                                           [1, 1, 4], [1, 3, 2], [1, 3, 3]])
      self.assertAllEqual(output.values, [0, 10, 13, 14, 32, 33])
      self.assertAllEqual(output.dense_shape, [3, 6, 7])
Example #36
0
def keras_tensor_to_placeholder(x):
    """Construct a graph placeholder to represent a KerasTensor when tracing."""
    if hasattr(x, '_user_registered_symbolic_object'):
        return x._user_registered_symbolic_object  # pylint: disable=protected-access

    if isinstance(x, KerasTensor):
        spec = x.type_spec

        if x._inferred_value is not None:  # pylint: disable=protected-access
            # If we suspect this KerasTensor might be representing a shape tensor,
            # and we were able to extract value information with TensorFlow's shape
            # handling when making the KerasTensor, we construct the placeholder by
            # re-injecting the inferred value information into the graph.
            # Even though keras layers each trace in their own scratch
            # graph, this shape value info injection allows us to capture
            # a sizable and useful subset of the C++ shape value inference TF can do
            # if all tf ops appear in the same graph when using shape ops.
            #
            # Examples of things this cannot infer concrete dimensions for
            # that the full single-graph C++ shape inference sometimes can are:
            # * cases where the shape tensor is cast out of int32 before being
            #   manipulated w/ floating point numbers then converted back
            # * cases where int32 tensors w/ rank > 2 are manipulated before being
            #   used as a shape tensor
            inferred_value = array_ops.shape(
                array_ops.placeholder(shape=x._inferred_value,
                                      dtype=dtypes.int32))  # pylint: disable=protected-access
            if spec.shape.rank == 0:
                # `tf.shape` always returns a rank-1, we may need to turn it back to a
                # scalar.
                inferred_value = inferred_value[0]
            return inferred_value  # pylint: disable=protected-access

        if isinstance(spec, sparse_tensor.SparseTensorSpec):
            # nest.map_structure loses dense shape information for sparse tensors.
            # So, we special-case sparse placeholder creation.
            # This only preserves shape information for top-level sparse tensors;
            # not for sparse tensors that are nested inside another composite
            # tensor.
            return array_ops.sparse_placeholder(dtype=spec.dtype,
                                                shape=spec.shape)

        def component_to_placeholder(component):
            return array_ops.placeholder(component.dtype, component.shape)

        ph = nest.map_structure(component_to_placeholder,
                                spec,
                                expand_composites=True)
        return ph
    else:
        return x
    def testFromSparseTensorSlices(self):
        """Test a dataset based on slices of a `tf.sparse.SparseTensor`."""
        st = array_ops.sparse_placeholder(dtypes.float64)
        iterator = dataset_ops.make_initializable_iterator(
            dataset_ops.Dataset.from_sparse_tensor_slices(st))
        init_op = iterator.initializer
        get_next = sparse_tensor.SparseTensor(*iterator.get_next())

        with self.cached_session() as sess:
            slices = [[1., 2., 3.], [1.], [1.], [1., 2.], [], [1., 2.], [], [],
                      []]

            # Test with sparse tensor in the appropriate order.
            # pylint: disable=g-complex-comprehension
            indices = np.array([[i, j] for i in range(len(slices))
                                for j in range(len(slices[i]))])
            values = np.array([val for s in slices for val in s])
            # pylint: enable=g-complex-comprehension
            dense_shape = np.array(
                [len(slices), max(len(s) for s in slices) + 1])
            sparse_feed = sparse_tensor.SparseTensorValue(
                indices, values, dense_shape)
            sess.run(init_op, feed_dict={st: sparse_feed})
            for i, s in enumerate(slices):
                results = sess.run(get_next)
                self.assertAllEqual(s, results.values)
                expected_indices = np.array([[j] for j in range(len(slices[i]))
                                             ]).reshape([-1, 1])
                self.assertAllEqual(expected_indices, results.indices)
                self.assertAllEqual(dense_shape[1:], results.dense_shape)
            with self.assertRaises(errors.OutOfRangeError):
                sess.run(get_next)

            # Test with sparse tensor in the reverse order, which is not
            # currently supported.
            reverse_order_indices = indices[::-1, :]
            reverse_order_values = values[::-1]
            sparse_feed = sparse_tensor.SparseTensorValue(
                reverse_order_indices, reverse_order_values, dense_shape)
            with self.assertRaises(errors.UnimplementedError):
                sess.run(init_op, feed_dict={st: sparse_feed})

            # Test with an empty sparse tensor.
            empty_indices = np.empty((0, 4), dtype=np.int64)
            empty_values = np.empty((0, ), dtype=np.float64)
            empty_dense_shape = [0, 4, 37, 9]
            sparse_feed = sparse_tensor.SparseTensorValue(
                empty_indices, empty_values, empty_dense_shape)
            sess.run(init_op, feed_dict={st: sparse_feed})
            with self.assertRaises(errors.OutOfRangeError):
                sess.run(get_next)
 def test_partial_shape_inference(self):
     a = array_ops.sparse_placeholder(dtypes.float32)
     b = array_ops.placeholder(dtypes.float32)
     axes = ([1], [0])
     output = sparse_ops.sparse_tensor_dense_tensordot(a, b, axes)
     self.assertEqual(output.get_shape().ndims, None)
     a = array_ops.sparse_placeholder(dtypes.float32, shape=[None, 2])
     b.set_shape([2, 3])
     output = sparse_ops.sparse_tensor_dense_tensordot(a, b, axes)
     output_shape = output.get_shape()
     self.assertEqual(output_shape.ndims, 2)
     output_shape = output_shape.as_list()
     self.assertEqual(output_shape[0], None)
     self.assertEqual(output_shape[1], 3)
     a = array_ops.sparse_placeholder(dtypes.float32, shape=[2, 2])
     b = array_ops.placeholder(dtypes.float32)
     b.set_shape([2, None])
     output = sparse_ops.sparse_tensor_dense_tensordot(a, b, axes)
     output_shape = output.get_shape()
     self.assertEqual(output_shape.ndims, 2)
     output_shape = output_shape.as_list()
     self.assertEqual(output_shape[0], 2)
     self.assertEqual(output_shape[1], None)
Example #39
0
 def testGetNonFullySpecifiedShapes(self):
   outputs = {
       "output-1": array_ops.placeholder(dtypes.float32, [None, 10, None]),
       "output-2": array_ops.sparse_placeholder(dtypes.float32),
   }
   signature_def = _make_signature({}, outputs)
   shapes = signature_def_utils_impl.get_signature_def_output_shapes(
       signature_def)
   self.assertEqual(len(shapes), 2)
   # Must compare shapes with as_list() since 2 equivalent non-fully defined
   # shapes are not equal to each other.
   self.assertEqual(shapes["output-1"].as_list(), [None, 10, None])
   # Must compare `dims` since its an unknown shape.
   self.assertEqual(shapes["output-2"].dims, None)
 def testGetNonFullySpecifiedShapes(self):
   outputs = {
       "output-1": array_ops.placeholder(dtypes.float32, [None, 10, None]),
       "output-2": array_ops.sparse_placeholder(dtypes.float32),
   }
   signature_def = _make_signature({}, outputs)
   shapes = signature_def_utils_impl.get_signature_def_output_shapes(
       signature_def)
   self.assertEqual(len(shapes), 2)
   # Must compare shapes with as_list() since 2 equivalent non-fully defined
   # shapes are not equal to each other.
   self.assertEqual(shapes["output-1"].as_list(), [None, 10, None])
   # Must compare `dims` since its an unknown shape.
   self.assertEqual(shapes["output-2"].dims, None)
Example #41
0
 def testFeedSparePlaceholderConstantShape(self):
     with session.Session() as s:
         indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
         values = np.array([1.0, 2.0]).astype(np.float32)
         shape = np.array([7, 9, 2]).astype(np.int64)
         sp = array_ops.sparse_placeholder(dtype=np.float32, shape=shape, name="placeholder1")
         self.assertAllEqual(sp.shape.eval(session=s), shape)
         self.assertAllEqual(tensor_util.constant_value(sp.shape), shape)
         sp_indices = array_ops.identity(sp.indices)
         sp_values = array_ops.identity(sp.values)
         sp_shape = array_ops.identity(sp.shape)
         # Feed with tuple
         indices_out, values_out, shape_out = s.run([sp_indices, sp_values, sp_shape], {sp: (indices, values)})
         self.assertAllEqual(indices_out, indices)
         self.assertAllEqual(values_out, values)
         self.assertAllEqual(shape_out, shape)
Example #42
0
  def benchmarkBatchSparse(self):
    non_zeros_per_row_values = [0, 1, 5, 10, 100]
    batch_size_values = [1, 32, 64, 128, 1024]

    sparse_placeholder = array_ops.sparse_placeholder(dtype=dtypes.int64)
    batch_size_placeholder = array_ops.placeholder(dtype=dtypes.int64, shape=[])

    dataset = dataset_ops.Dataset.from_tensors(sparse_placeholder).repeat(
        ).batch(batch_size_placeholder)
    options = dataset_ops.Options()
    options.experimental_optimization.apply_default_optimizations = False
    dataset = dataset.with_options(options)
    iterator = dataset_ops.make_initializable_iterator(dataset)
    next_element = iterator.get_next()

    for non_zeros_per_row in non_zeros_per_row_values:

      sparse_value = sparse_tensor.SparseTensorValue(
          indices=np.arange(non_zeros_per_row, dtype=np.int64)[:, np.newaxis],
          values=np.arange(non_zeros_per_row, dtype=np.int64),
          dense_shape=[1000])

      for batch_size in batch_size_values:

        with session.Session() as sess:
          sess.run(iterator.initializer, feed_dict={
              sparse_placeholder: sparse_value,
              batch_size_placeholder: batch_size})
          # Run five steps to warm up the session caches before taking the
          # first measurement.
          for _ in range(5):
            sess.run(next_element.indices.op)
          deltas = []
          for _ in range(100):
            start = time.time()
            for _ in range(100):
              sess.run(next_element.indices.op)
            end = time.time()
            deltas.append(end - start)

        median_wall_time = np.median(deltas) / 100.0

        self.report_benchmark(
            iters=10000,
            wall_time=median_wall_time,
            name="sparse_num_elements_%d_batch_size_%d" %
            (non_zeros_per_row, batch_size))
Example #43
0
  def test_build_all_signature_defs_with_dict_alternatives(self):
    with context.graph_mode():
      receiver_tensor = array_ops.placeholder(dtypes.string)
      receiver_tensors_alternative_1 = {
          "foo": array_ops.placeholder(dtypes.int64),
          "bar": array_ops.sparse_placeholder(dtypes.float32)}
      receiver_tensors_alternatives = {"other": receiver_tensors_alternative_1}
      output_1 = constant_op.constant([1.])
      output_2 = constant_op.constant(["2"])
      output_3 = constant_op.constant(["3"])
      export_outputs = {
          signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
              export_output.RegressionOutput(value=output_1),
          "head-2": export_output.ClassificationOutput(classes=output_2),
          "head-3": export_output.PredictOutput(outputs={
              "some_output_3": output_3
          }),
      }

      signature_defs = export_utils.build_all_signature_defs(
          receiver_tensor, export_outputs, receiver_tensors_alternatives)

      expected_signature_defs = {
          "serving_default":
              signature_def_utils.regression_signature_def(
                  receiver_tensor,
                  output_1),
          "head-2":
              signature_def_utils.classification_signature_def(
                  receiver_tensor,
                  output_2, None),
          "head-3":
              signature_def_utils.predict_signature_def(
                  {"input": receiver_tensor},
                  {"some_output_3": output_3}),
          "other:head-3":
              signature_def_utils.predict_signature_def(
                  receiver_tensors_alternative_1,
                  {"some_output_3": output_3})

          # Note that the alternatives 'other:serving_default' and
          # 'other:head-2' are invalid, because regession and classification
          # signatures must take a single string input.  Here we verify that
          # these invalid signatures are not included in the export_utils.
      }

      self.assertDictEqual(expected_signature_defs, signature_defs)
  def testFromSparseTensorSlices(self):
    """Test a dataset based on slices of a `tf.SparseTensor`."""
    st = array_ops.sparse_placeholder(dtypes.float64)
    iterator = (dataset_ops.Dataset.from_sparse_tensor_slices(st)
                .make_initializable_iterator())
    init_op = iterator.initializer
    get_next = sparse_tensor.SparseTensor(*iterator.get_next())

    with self.test_session() as sess:
      slices = [[1., 2., 3.], [1.], [1.], [1., 2.], [], [1., 2.], [], [], []]

      # Test with sparse tensor in the appropriate order.
      indices = np.array(
          [[i, j] for i in range(len(slices)) for j in range(len(slices[i]))])
      values = np.array([val for s in slices for val in s])
      dense_shape = np.array([len(slices), max(len(s) for s in slices) + 1])
      sparse_feed = sparse_tensor.SparseTensorValue(indices, values,
                                                    dense_shape)
      sess.run(init_op, feed_dict={st: sparse_feed})
      for i, s in enumerate(slices):
        results = sess.run(get_next)
        self.assertAllEqual(s, results.values)
        expected_indices = np.array(
            [[j] for j in range(len(slices[i]))]).reshape([-1, 1])
        self.assertAllEqual(expected_indices, results.indices)
        self.assertAllEqual(dense_shape[1:], results.dense_shape)
      with self.assertRaises(errors.OutOfRangeError):
        sess.run(get_next)

      # Test with sparse tensor in the reverse order, which is not
      # currently supported.
      reverse_order_indices = indices[::-1, :]
      reverse_order_values = values[::-1]
      sparse_feed = sparse_tensor.SparseTensorValue(
          reverse_order_indices, reverse_order_values, dense_shape)
      with self.assertRaises(errors.UnimplementedError):
        sess.run(init_op, feed_dict={st: sparse_feed})

      # Test with an empty sparse tensor.
      empty_indices = np.empty((0, 4), dtype=np.int64)
      empty_values = np.empty((0,), dtype=np.float64)
      empty_dense_shape = [0, 4, 37, 9]
      sparse_feed = sparse_tensor.SparseTensorValue(empty_indices, empty_values,
                                                    empty_dense_shape)
      sess.run(init_op, feed_dict={st: sparse_feed})
      with self.assertRaises(errors.OutOfRangeError):
        sess.run(get_next)
Example #45
0
    def testEmptySparseTensorSlicesInvalid(self):
        """Test a dataset based on invalid `tf.sparse.SparseTensor`."""
        st = array_ops.sparse_placeholder(dtypes.float64)
        iterator = dataset_ops.make_initializable_iterator(
            dataset_ops.Dataset.from_sparse_tensor_slices(st))
        init_op = iterator.initializer

        with self.cached_session() as sess:
            # Test with an empty sparse tensor but with non empty values.
            empty_indices = np.empty((0, 4), dtype=np.int64)
            non_empty_values = [1, 2, 3, 4]
            empty_dense_shape = [0, 4, 37, 9]
            sparse_feed = sparse_tensor.SparseTensorValue(
                empty_indices, non_empty_values, empty_dense_shape)
            # Here, we expect the test to fail when running the feed.
            with self.assertRaises(errors.InvalidArgumentError):
                sess.run(init_op, feed_dict={st: sparse_feed})
Example #46
0
  def test_build_all_signature_defs_with_dict_alternatives(self):
    receiver_tensor = array_ops.placeholder(dtypes.string)
    receiver_tensors_alternative_1 = {
        "foo": array_ops.placeholder(dtypes.int64),
        "bar": array_ops.sparse_placeholder(dtypes.float32)}
    receiver_tensors_alternatives = {"other": receiver_tensors_alternative_1}
    output_1 = constant_op.constant([1.])
    output_2 = constant_op.constant(["2"])
    output_3 = constant_op.constant(["3"])
    export_outputs = {
        signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
            export_output.RegressionOutput(value=output_1),
        "head-2": export_output.ClassificationOutput(classes=output_2),
        "head-3": export_output.PredictOutput(outputs={
            "some_output_3": output_3
        }),
    }

    signature_defs = export_utils.build_all_signature_defs(
        receiver_tensor, export_outputs, receiver_tensors_alternatives)

    expected_signature_defs = {
        "serving_default":
            signature_def_utils.regression_signature_def(
                receiver_tensor,
                output_1),
        "head-2":
            signature_def_utils.classification_signature_def(
                receiver_tensor,
                output_2, None),
        "head-3":
            signature_def_utils.predict_signature_def(
                {"input": receiver_tensor},
                {"some_output_3": output_3}),
        "other:head-3":
            signature_def_utils.predict_signature_def(
                receiver_tensors_alternative_1,
                {"some_output_3": output_3})

        # Note that the alternatives 'other:serving_default' and
        # 'other:head-2' are invalid, because regession and classification
        # signatures must take a single string input.  Here we verify that
        # these invalid signatures are not included in the export_utils.
    }

    self.assertDictEqual(expected_signature_defs, signature_defs)
Example #47
0
    def testEmptySparseTensorSlices(self):
        """Test a dataset based on slices of an empty `tf.sparse.SparseTensor`."""
        st = array_ops.sparse_placeholder(dtypes.float64)
        iterator = dataset_ops.make_initializable_iterator(
            dataset_ops.Dataset.from_sparse_tensor_slices(st))
        init_op = iterator.initializer
        get_next = sparse_tensor.SparseTensor(*iterator.get_next())

        with self.cached_session() as sess:
            # Test with an empty sparse tensor.
            empty_indices = np.empty((0, 4), dtype=np.int64)
            empty_values = np.empty((0, ), dtype=np.float64)
            empty_dense_shape = [0, 4, 37, 9]
            sparse_feed = sparse_tensor.SparseTensorValue(
                empty_indices, empty_values, empty_dense_shape)
            sess.run(init_op, feed_dict={st: sparse_feed})
            with self.assertRaises(errors.OutOfRangeError):
                sess.run(get_next)
Example #48
0
 def testNoShapePlaceholder(self):
   foo = array_ops.sparse_placeholder(dtypes.float32, shape=None)
   self.assertAllEqual(None, foo.get_shape())
   self.assertAllEqual([None, None], foo.indices.get_shape().as_list())
  def _run_test_als_transposed(self, use_factors_weights_cache):
    with self.test_session():
      col_init = np.random.rand(7, 3)
      als_model = factorization_ops.WALSModel(
          5,
          7,
          3,
          col_init=col_init,
          row_weights=None,
          col_weights=None,
          use_factors_weights_cache=use_factors_weights_cache)

      als_model.initialize_op.run()
      als_model.worker_init.run()

      wals_model = factorization_ops.WALSModel(
          5,
          7,
          3,
          col_init=col_init,
          row_weights=[0] * 5,
          col_weights=[0] * 7,
          use_factors_weights_cache=use_factors_weights_cache)
      wals_model.initialize_op.run()
      wals_model.worker_init.run()
      sp_feeder = array_ops.sparse_placeholder(dtypes.float32)
      # Here test partial row update with identical inputs but with transposed
      # input for als.
      sp_r_t = np_matrix_to_tf_sparse(
          INPUT_MATRIX, [3, 1], transpose=True).eval()
      sp_r = np_matrix_to_tf_sparse(INPUT_MATRIX, [3, 1]).eval()

      feed_dict = {sp_feeder: sp_r_t}
      als_model.row_update_prep_gramian_op.run()
      als_model.initialize_row_update_op.run()
      process_input_op = als_model.update_row_factors(
          sp_input=sp_feeder, transpose_input=True)[1]
      process_input_op.run(feed_dict=feed_dict)
      # Only updated row 1 and row 3, so only compare these rows since others
      # have randomly initialized values.
      row_factors1 = [
          als_model.row_factors[0].eval()[1], als_model.row_factors[0].eval()[3]
      ]
      # Testing row projection. Projection weight doesn't matter in this case
      # since the model is ALS special case. Note that the ordering of the
      # returned results will be preserved as the input feature vectors
      # ordering.
      als_projected_row_factors1 = als_model.project_row_factors(
          sp_input=sp_feeder, transpose_input=True).eval(feed_dict=feed_dict)

      feed_dict = {sp_feeder: sp_r}
      wals_model.row_update_prep_gramian_op.run()
      wals_model.initialize_row_update_op.run()
      process_input_op = wals_model.update_row_factors(sp_input=sp_feeder)[1]
      process_input_op.run(feed_dict=feed_dict)
      # Only updated row 1 and row 3, so only compare these rows since others
      # have randomly initialized values.
      row_factors2 = [
          wals_model.row_factors[0].eval()[1],
          wals_model.row_factors[0].eval()[3]
      ]
      for r1, r2 in zip(row_factors1, row_factors2):
        self.assertAllClose(r1, r2, atol=1e-3)
      # Note that the ordering of the returned projection results is preserved
      # as the input feature vectors ordering.
      self.assertAllClose(
          als_projected_row_factors1, [row_factors2[1], row_factors2[0]],
          atol=1e-3)
Example #50
0
 def get_placeholder(self):
   if self.is_sparse:
     return array_ops.sparse_placeholder(dtype=self.dtype)
   return array_ops.placeholder(dtype=self.dtype,
                                shape=[None] + list(self.shape[1:]))
Example #51
0
  def __init__(self,
               input_shape=None,
               batch_size=None,
               dtype=None,
               input_tensor=None,
               sparse=False,
               name=None,
               **kwargs):
    if 'batch_input_shape' in kwargs:
      batch_input_shape = kwargs.pop('batch_input_shape')
      if input_shape and batch_input_shape:
        raise ValueError('Only provide the input_shape OR '
                         'batch_input_shape argument to '
                         'InputLayer, not both at the same time.')
      batch_size = batch_input_shape[0]
      input_shape = batch_input_shape[1:]
    if kwargs:
      raise ValueError('Unrecognized keyword arguments:', kwargs.keys())

    if not name:
      prefix = 'input'
      name = prefix + '_' + str(K.get_uid(prefix))

    if not dtype:
      if input_tensor is None:
        dtype = K.floatx()
      else:
        dtype = K.dtype(input_tensor)
    super(InputLayer, self).__init__(dtype=dtype, name=name)
    self.built = True
    self.sparse = sparse
    self.batch_size = batch_size

    if isinstance(input_shape, tensor_shape.TensorShape):
      input_shape = tuple(input_shape.as_list())

    if input_tensor is None:
      if input_shape is not None:
        batch_input_shape = (batch_size,) + tuple(input_shape)
      else:
        batch_input_shape = None

      if context.executing_eagerly():
        # In eager mode, create a temporary placeholder to call the layer on.
        input_tensor = base_layer.DeferredTensor(  # pylint: disable=protected-access
            shape=batch_input_shape,
            dtype=dtype,
            name=self.name)
      else:
        # In graph mode, create a graph placeholder to call the layer on.
        if sparse:
          input_tensor = array_ops.sparse_placeholder(
              shape=batch_input_shape,
              dtype=dtype,
              name=self.name)
        else:
          input_tensor = array_ops.placeholder(
              shape=batch_input_shape,
              dtype=dtype,
              name=self.name)

      # For compatibility with Keras API.
      self.is_placeholder = True
      self._batch_input_shape = batch_input_shape
    else:
      # For compatibility with Keras API.
      self.is_placeholder = False
      self._batch_input_shape = tuple(input_tensor.get_shape().as_list())

      if context.executing_eagerly():
        raise ValueError('You should not pass an input tensor when executing '
                         'in eager mode. For example, instead of creating an '
                         'InputLayer, you should instantiate your model and '
                         'directly call it on your input.')

    # Create an input node to add to self.outbound_node
    # and set output_tensors' _keras_history.
    input_tensor._keras_history = (self, 0, 0)  # pylint: disable=protected-access
    base_layer.Node(
        self,
        inbound_layers=[],
        node_indices=[],
        tensor_indices=[],
        input_tensors=[input_tensor],
        output_tensors=[input_tensor])
  def _run_test_als(self, use_factors_weights_cache):
    with self.test_session():
      col_init = np.random.rand(7, 3)
      als_model = factorization_ops.WALSModel(
          5,
          7,
          3,
          col_init=col_init,
          row_weights=None,
          col_weights=None,
          use_factors_weights_cache=use_factors_weights_cache)

      als_model.initialize_op.run()
      als_model.worker_init.run()
      als_model.row_update_prep_gramian_op.run()
      als_model.initialize_row_update_op.run()
      process_input_op = als_model.update_row_factors(self._wals_inputs)[1]
      process_input_op.run()
      row_factors1 = [x.eval() for x in als_model.row_factors]
      # Testing row projection. Projection weight doesn't matter in this case
      # since the model is ALS special case.
      als_projected_row_factors1 = als_model.project_row_factors(
          self._wals_inputs).eval()

      wals_model = factorization_ops.WALSModel(
          5,
          7,
          3,
          col_init=col_init,
          row_weights=0,
          col_weights=0,
          use_factors_weights_cache=use_factors_weights_cache)
      wals_model.initialize_op.run()
      wals_model.worker_init.run()
      wals_model.row_update_prep_gramian_op.run()
      wals_model.initialize_row_update_op.run()
      process_input_op = wals_model.update_row_factors(self._wals_inputs)[1]
      process_input_op.run()
      row_factors2 = [x.eval() for x in wals_model.row_factors]

      for r1, r2 in zip(row_factors1, row_factors2):
        self.assertAllClose(r1, r2, atol=1e-3)
      self.assertAllClose(
          als_projected_row_factors1,
          [row for shard in row_factors2 for row in shard],
          atol=1e-3)

      # Here we test partial column updates.
      sp_c = np_matrix_to_tf_sparse(
          INPUT_MATRIX, col_slices=[2, 0], shuffle=True).eval()

      sp_feeder = array_ops.sparse_placeholder(dtypes.float32)
      feed_dict = {sp_feeder: sp_c}
      als_model.col_update_prep_gramian_op.run()
      als_model.initialize_col_update_op.run()
      process_input_op = als_model.update_col_factors(sp_input=sp_feeder)[1]
      process_input_op.run(feed_dict=feed_dict)
      col_factors1 = [x.eval() for x in als_model.col_factors]
      # Testing column projection. Projection weight doesn't matter in this case
      # since the model is ALS special case.
      als_projected_col_factors1 = als_model.project_col_factors(
          np_matrix_to_tf_sparse(
              INPUT_MATRIX, col_slices=[2, 0], shuffle=False)).eval()

      feed_dict = {sp_feeder: sp_c}
      wals_model.col_update_prep_gramian_op.run()
      wals_model.initialize_col_update_op.run()
      process_input_op = wals_model.update_col_factors(sp_input=sp_feeder)[1]
      process_input_op.run(feed_dict=feed_dict)
      col_factors2 = [x.eval() for x in wals_model.col_factors]

      for c1, c2 in zip(col_factors1, col_factors2):
        self.assertAllClose(c1, c2, rtol=5e-3, atol=1e-2)
      self.assertAllClose(
          als_projected_col_factors1,
          [col_factors2[0][2], col_factors2[0][0]],
          # TODO(yifanchen): Investigate the root cause for
          # the accuracy change from 1e-3 to 1e-2.
          atol=1e-2)
  def _run_test_process_input_transposed(self, use_factors_weights_cache):
    with self.test_session():
      sp_feeder = array_ops.sparse_placeholder(dtypes.float32)
      wals_model = factorization_ops.WALSModel(
          5,
          7,
          3,
          num_row_shards=2,
          num_col_shards=3,
          regularization=0.01,
          unobserved_weight=0.1,
          col_init=self.col_init,
          row_weights=self.row_wts,
          col_weights=self.col_wts,
          use_factors_weights_cache=use_factors_weights_cache)

      wals_model.initialize_op.run()
      wals_model.worker_init.run()

      # Split input into multiple SparseTensors with scattered rows.
      # Here the inputs are transposed. But the same constraints as described in
      # the previous non-transposed test case apply to these inputs (before they
      # are transposed).
      sp_r0_t = np_matrix_to_tf_sparse(
          INPUT_MATRIX, [0, 3], transpose=True).eval()
      sp_r1_t = np_matrix_to_tf_sparse(
          INPUT_MATRIX, [4, 1], shuffle=True, transpose=True).eval()
      sp_r2_t = np_matrix_to_tf_sparse(INPUT_MATRIX, [2], transpose=True).eval()
      sp_r3_t = sp_r1_t
      input_scattered_rows = [sp_r0_t, sp_r1_t, sp_r2_t, sp_r3_t]

      # Test updating row factors.
      # Here we feed in scattered rows of the input.
      # Note that the needed suffix of placeholder are in the order of test
      # case name lexicographical order and then in the line order of where
      # they appear.
      wals_model.row_update_prep_gramian_op.run()
      wals_model.initialize_row_update_op.run()
      process_input_op = wals_model.update_row_factors(
          sp_input=sp_feeder, transpose_input=True)[1]
      for inp in input_scattered_rows:
        feed_dict = {sp_feeder: inp}
        process_input_op.run(feed_dict=feed_dict)
      row_factors = [x.eval() for x in wals_model.row_factors]

      self.assertAllClose(row_factors[0], self._row_factors_0, atol=1e-3)
      self.assertAllClose(row_factors[1], self._row_factors_1, atol=1e-3)

      # Test row projection.
      # Using the specified projection weights for the 2 row feature vectors.
      # This is expected to reprodue the same row factors in the model as the
      # weights and feature vectors are identical to that used in model
      # training.
      projected_rows = wals_model.project_row_factors(
          sp_input=sp_feeder,
          transpose_input=True,
          projection_weights=[0.5, 0.2])
      # Don't specify the projection weight, so 1.0 will be used. The feature
      # weights will be those specified in model.
      projected_rows_no_weights = wals_model.project_row_factors(
          sp_input=sp_feeder, transpose_input=True)
      feed_dict = {
          sp_feeder:
              np_matrix_to_tf_sparse(
                  INPUT_MATRIX, [4, 1], shuffle=False, transpose=True).eval()
      }
      self.assertAllClose(
          projected_rows.eval(feed_dict=feed_dict),
          [self._row_factors_1[1], self._row_factors_0[1]],
          atol=1e-3)
      self.assertAllClose(
          projected_rows_no_weights.eval(feed_dict=feed_dict),
          [[1.915879, 1.992677, 1.109057], [0.569082, 0.715088, 0.31777]],
          atol=1e-3)

      # Split input into multiple SparseTensors with scattered columns.
      # Here the inputs are transposed. But the same constraints as described in
      # the previous non-transposed test case apply to these inputs (before they
      # are transposed).
      sp_c0_t = np_matrix_to_tf_sparse(
          INPUT_MATRIX, col_slices=[0, 1], transpose=True).eval()
      sp_c1_t = np_matrix_to_tf_sparse(
          INPUT_MATRIX, col_slices=[4, 2], transpose=True).eval()
      sp_c2_t = np_matrix_to_tf_sparse(
          INPUT_MATRIX, col_slices=[5], transpose=True, shuffle=True).eval()
      sp_c3_t = np_matrix_to_tf_sparse(
          INPUT_MATRIX, col_slices=[3, 6], transpose=True).eval()

      sp_c4_t = sp_c2_t
      input_scattered_cols = [sp_c0_t, sp_c1_t, sp_c2_t, sp_c3_t, sp_c4_t]

      # Test updating column factors.
      # Here we feed in scattered columns of the input.
      wals_model.col_update_prep_gramian_op.run()
      wals_model.initialize_col_update_op.run()
      process_input_op = wals_model.update_col_factors(
          sp_input=sp_feeder, transpose_input=True)[1]
      for inp in input_scattered_cols:
        feed_dict = {sp_feeder: inp}
        process_input_op.run(feed_dict=feed_dict)
      col_factors = [x.eval() for x in wals_model.col_factors]

      self.assertAllClose(col_factors[0], self._col_factors_0, atol=1e-3)
      self.assertAllClose(col_factors[1], self._col_factors_1, atol=1e-3)
      self.assertAllClose(col_factors[2], self._col_factors_2, atol=1e-3)

      # Test column projection.
      # Using the specified projection weights for the 2 column feature vectors.
      # This is expected to reprodue the same column factors in the model as the
      # weights and feature vectors are identical to that used in model
      # training.
      projected_cols = wals_model.project_col_factors(
          sp_input=sp_feeder,
          transpose_input=True,
          projection_weights=[0.4, 0.7])
      # Don't specify the projection weight, so 1.0 will be used. The feature
      # weights will be those specified in model.
      projected_cols_no_weights = wals_model.project_col_factors(
          sp_input=sp_feeder, transpose_input=True)
      feed_dict = {sp_feeder: sp_c3_t}
      self.assertAllClose(
          projected_cols.eval(feed_dict=feed_dict),
          [self._col_factors_1[0], self._col_factors_2[1]],
          atol=1e-3)
      self.assertAllClose(
          projected_cols_no_weights.eval(feed_dict=feed_dict),
          [[3.585139, -0.487476, -3.852232],
           [0.557937, 1.813907, 1.331171]],
          atol=1e-3)
  def _run_test_process_input(self, use_factors_weights_cache):
    with self.test_session():
      sp_feeder = array_ops.sparse_placeholder(dtypes.float32)
      wals_model = factorization_ops.WALSModel(
          5,
          7,
          3,
          num_row_shards=2,
          num_col_shards=3,
          regularization=0.01,
          unobserved_weight=0.1,
          col_init=self.col_init,
          row_weights=self.row_wts,
          col_weights=self.col_wts,
          use_factors_weights_cache=use_factors_weights_cache)

      wals_model.initialize_op.run()
      wals_model.worker_init.run()

      # Split input into multiple sparse tensors with scattered rows. Note that
      # this split can be different than the factor sharding and the inputs can
      # consist of non-consecutive rows. Each row needs to include all non-zero
      # elements in that row.
      sp_r0 = np_matrix_to_tf_sparse(INPUT_MATRIX, [0, 2]).eval()
      sp_r1 = np_matrix_to_tf_sparse(INPUT_MATRIX, [1, 4], shuffle=True).eval()
      sp_r2 = np_matrix_to_tf_sparse(INPUT_MATRIX, [3], shuffle=True).eval()
      input_scattered_rows = [sp_r0, sp_r1, sp_r2]

      # Test updating row factors.
      # Here we feed in scattered rows of the input.
      wals_model.row_update_prep_gramian_op.run()
      wals_model.initialize_row_update_op.run()
      process_input_op = wals_model.update_row_factors(
          sp_input=sp_feeder, transpose_input=False)[1]
      for inp in input_scattered_rows:
        feed_dict = {sp_feeder: inp}
        process_input_op.run(feed_dict=feed_dict)
      row_factors = [x.eval() for x in wals_model.row_factors]

      self.assertAllClose(row_factors[0], self._row_factors_0, atol=1e-3)
      self.assertAllClose(row_factors[1], self._row_factors_1, atol=1e-3)

      # Test row projection.
      # Using the specified projection weights for the 2 row feature vectors.
      # This is expected to reprodue the same row factors in the model as the
      # weights and feature vectors are identical to that used in model
      # training.
      projected_rows = wals_model.project_row_factors(
          sp_input=sp_feeder,
          transpose_input=False,
          projection_weights=[0.2, 0.5])
      # Don't specify the projection weight, so 1.0 will be used. The feature
      # weights will be those specified in model.
      projected_rows_no_weights = wals_model.project_row_factors(
          sp_input=sp_feeder, transpose_input=False)
      feed_dict = {
          sp_feeder:
              np_matrix_to_tf_sparse(
                  INPUT_MATRIX, [1, 4], shuffle=False).eval()
      }
      self.assertAllClose(
          projected_rows.eval(feed_dict=feed_dict),
          [self._row_factors_0[1], self._row_factors_1[1]],
          atol=1e-3)
      self.assertAllClose(
          projected_rows_no_weights.eval(feed_dict=feed_dict),
          [[0.569082, 0.715088, 0.31777], [1.915879, 1.992677, 1.109057]],
          atol=1e-3)

      # Split input into multiple sparse tensors with scattered columns. Note
      # that here the elements in the sparse tensors are not ordered and also
      # do not need to consist of consecutive columns. However, each column
      # needs to include all non-zero elements in that column.
      sp_c0 = np_matrix_to_tf_sparse(INPUT_MATRIX, col_slices=[2, 0]).eval()
      sp_c1 = np_matrix_to_tf_sparse(
          INPUT_MATRIX, col_slices=[5, 3, 1], shuffle=True).eval()
      sp_c2 = np_matrix_to_tf_sparse(INPUT_MATRIX, col_slices=[4, 6]).eval()
      sp_c3 = np_matrix_to_tf_sparse(
          INPUT_MATRIX, col_slices=[3, 6], shuffle=True).eval()

      input_scattered_cols = [sp_c0, sp_c1, sp_c2, sp_c3]

      # Test updating column factors.
      # Here we feed in scattered columns of the input.
      wals_model.col_update_prep_gramian_op.run()
      wals_model.initialize_col_update_op.run()
      process_input_op = wals_model.update_col_factors(
          sp_input=sp_feeder, transpose_input=False)[1]
      for inp in input_scattered_cols:
        feed_dict = {sp_feeder: inp}
        process_input_op.run(feed_dict=feed_dict)
      col_factors = [x.eval() for x in wals_model.col_factors]

      self.assertAllClose(col_factors[0], self._col_factors_0, atol=1e-3)
      self.assertAllClose(col_factors[1], self._col_factors_1, atol=1e-3)
      self.assertAllClose(col_factors[2], self._col_factors_2, atol=1e-3)

      # Test column projection.
      # Using the specified projection weights for the 3 column feature vectors.
      # This is expected to reprodue the same column factors in the model as the
      # weights and feature vectors are identical to that used in model
      # training.
      projected_cols = wals_model.project_col_factors(
          sp_input=sp_feeder,
          transpose_input=False,
          projection_weights=[0.6, 0.4, 0.2])
      # Don't specify the projection weight, so 1.0 will be used. The feature
      # weights will be those specified in model.
      projected_cols_no_weights = wals_model.project_col_factors(
          sp_input=sp_feeder, transpose_input=False)
      feed_dict = {
          sp_feeder:
              np_matrix_to_tf_sparse(
                  INPUT_MATRIX, col_slices=[5, 3, 1], shuffle=False).eval()
      }
      self.assertAllClose(
          projected_cols.eval(feed_dict=feed_dict), [
              self._col_factors_2[0], self._col_factors_1[0],
              self._col_factors_0[1]
          ],
          atol=1e-3)
      self.assertAllClose(
          projected_cols_no_weights.eval(feed_dict=feed_dict),
          [[3.471045, -1.250835, -3.598917],
           [3.585139, -0.487476, -3.852232],
           [0.346433, 1.360644, 1.677121]],
          atol=1e-3)
Example #55
0
  def __init__(self,
               input_shape=None,
               batch_size=None,
               dtype=None,
               input_tensor=None,
               sparse=False,
               name=None,
               **kwargs):
    if 'batch_input_shape' in kwargs:
      batch_input_shape = kwargs.pop('batch_input_shape')
      if input_shape and batch_input_shape:
        raise ValueError('Only provide the input_shape OR '
                         'batch_input_shape argument to '
                         'InputLayer, not both at the same time.')
      batch_size = batch_input_shape[0]
      input_shape = batch_input_shape[1:]
    if kwargs:
      raise ValueError('Unrecognized keyword arguments:', kwargs.keys())

    if not name:
      prefix = 'input'
      name = prefix + '_' + str(backend.get_uid(prefix))

    if not dtype:
      if input_tensor is None:
        dtype = backend.floatx()
      else:
        dtype = backend.dtype(input_tensor)
    super(InputLayer, self).__init__(dtype=dtype, name=name)
    self.built = True
    self.sparse = sparse
    self.batch_size = batch_size
    self.supports_masking = True

    if isinstance(input_shape, tensor_shape.TensorShape):
      input_shape = tuple(input_shape.as_list())

    if input_tensor is None:
      if input_shape is not None:
        batch_input_shape = (batch_size,) + tuple(input_shape)
      else:
        batch_input_shape = None
      graph = backend.get_graph()
      with context.graph_mode():
        with graph.as_default():
          # In graph mode, create a graph placeholder to call the layer on.
          if sparse:
            input_tensor = array_ops.sparse_placeholder(
                shape=batch_input_shape,
                dtype=dtype,
                name=self.name)
          else:
            input_tensor = array_ops.placeholder(
                shape=batch_input_shape,
                dtype=dtype,
                name=self.name)

      self.is_placeholder = True
      self._batch_input_shape = batch_input_shape
    else:
      if not tf_utils.is_symbolic_tensor(input_tensor):
        raise ValueError('You should not pass an EagerTensor to `Input`. '
                         'For example, instead of creating an '
                         'InputLayer, you should instantiate your model and '
                         'directly call it on your input.')
      self.is_placeholder = False
      self._batch_input_shape = tuple(input_tensor.get_shape().as_list())

    # Create an input node to add to self.outbound_node
    # and set output_tensors' _keras_history.
    input_tensor._keras_history = (self, 0, 0)  # pylint: disable=protected-access
    base_layer.Node(
        self,
        inbound_layers=[],
        node_indices=[],
        tensor_indices=[],
        input_tensors=[input_tensor],
        output_tensors=[input_tensor])
Example #56
0
 def testPlaceholder(self):
   foo = array_ops.sparse_placeholder(dtypes.float32, shape=(10, 47))
   self.assertAllEqual([10, 47], foo.get_shape())
   self.assertAllEqual([None, 2], foo.indices.get_shape().as_list())