示例#1
0
    def test_enqueue_with_outside_compilation_non_direct_input(self):
        strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
        mid_level_api.build([
            TensorShape((self.batch_size, 2)),
            TensorShape((self.batch_size, 2)),
            TensorShape((self.batch_size, 3))
        ])
        dataset = self._create_sparse_dataset(strategy)
        dataset_iter = iter(
            strategy.experimental_distribute_dataset(
                dataset,
                options=distribute_lib.InputOptions(
                    experimental_fetch_to_device=False)))

        @def_function.function
        def enqueue_with_outside_compilation():
            def get_activations(features):
                # This inserts a mul operation on the TPU to trigger the direct input
                # error.
                features = (features[0] * 2, features[1] * 2, features[2] * 2)
                mid_level_api.enqueue(features, training=False)
                return mid_level_api.dequeue()

            return strategy.run(get_activations, args=(next(dataset_iter), ))

        with self.assertRaisesRegex(
                ValueError,
                'which does not have the `_tpu_input_identity` attr'):
            enqueue_with_outside_compilation()
示例#2
0
  def test_enqueue_incorrect_shape_feature(self):
    strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')

    sparse = self._create_high_dimensional_sparse_dataset(strategy)
    sparse_iter = iter(
        strategy.experimental_distribute_dataset(
            sparse,
            options=distribute_lib.InputOptions(
                experimental_fetch_to_device=False)))

    mid_level_api._output_shapes = [TensorShape((1, 1)) for _ in range(3)]
    # The output shape passed to build method is consistent.
    mid_level_api.build([TensorShape([1, 1, 1]) for _ in range(3)])

    @def_function.function
    def test_fn():

      def step():
        return mid_level_api.dequeue()

      mid_level_api.enqueue(next(sparse_iter), training=False)
      return strategy.run(step)

    # Enqueued tensor has shape inconsistent with the output shape setting.
    with self.assertRaisesRegex(ValueError,
                                'Inconsistent shape founded for input feature'):
      test_fn()
示例#3
0
 def test_output_shapes_priority_over_feature_config_and_build(self):
   _, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
   # The output shapes setting in the feature config has the first priority.
   mid_level_api._output_shapes = [TensorShape((2, 4)) for _ in range(3)]
   mid_level_api.build([TensorShape((2, None, None)) for _ in range(3)])
   self.assertEqual(mid_level_api._output_shapes,
                    [TensorShape((2, 4)) for _ in range(3)])
示例#4
0
 def output_size(self):
     """
 Set self.output_size
 """
     return (TensorShape(self.state_dims[0]),
             TensorShape(self.state_dims[0]),
             TensorShape(self.state_dims[0] * 2))
示例#5
0
    def test_pass_none_to_apply_gradients(self):
        strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
        mid_level_api.build([
            TensorShape((self.batch_size, 2)),
            TensorShape((self.batch_size, 2)),
            TensorShape((self.batch_size, 3))
        ])
        dataset = self._create_sparse_dataset(strategy)
        data = next(
            iter(
                strategy.experimental_distribute_dataset(
                    dataset,
                    options=distribute_lib.InputOptions(
                        experimental_fetch_to_device=False))))

        @def_function.function
        def embedding_and_set_gradients(data):
            mid_level_api.enqueue(data)

            def tpu_fn():
                results = mid_level_api.dequeue()
                mid_level_api.apply_gradients(
                    (None, None, array_ops.ones_like(results[2])))
                return results

            return strategy.run(tpu_fn)

        @def_function.function
        def embedding_only(data):
            mid_level_api.enqueue(data, training=False)

            def tpu_fn():
                return mid_level_api.dequeue()

            return strategy.run(tpu_fn)

        first = self._get_replica_numpy(embedding_and_set_gradients(data),
                                        strategy, 0)
        second = self._get_replica_numpy(embedding_only(data), strategy, 0)

        # First two features should be the same as None gradient was applied.
        # Third feature had gradient of 1 passed in from each core.
        # Each core received the same ids per core and returned the following batch:
        # [ row 3, row 0 + row 1 + row 2 ]
        # so gradient update was (learning rate = 0.1):
        #   row 0: -1/3*0.1
        #   row 1: -1/3*0.1
        #   row 2: -1/3*0.1
        #   row 3: -1*0.1
        # There is a factor of num_replicas because each replica gave an update.

        num_replicas = strategy.num_replicas_in_sync
        update = ([[0.0]], [[0.0]], [[0.1 * num_replicas],
                                     [0.1 / 3 * num_replicas]])
        golden = tuple(
            [feature - np.array(up) for feature, up in zip(first, update)])

        self.assertAllClose(golden, second)
示例#6
0
  def test_build_incorrect_output_shapes(self):
    _, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
    # Output shapes is set in the mid_level_api, but build with incorrect output
    # shapes.
    mid_level_api._output_shapes = [TensorShape((2, 4)) for _ in range(3)]

    with self.assertRaisesRegex(ValueError,
                                'Inconsistent shape founded for input feature'):
      mid_level_api.build([TensorShape([1, 1, 1]) for _ in range(3)])
示例#7
0
    def test_forward(self):
        normalizing_flow = NormalizingFlow(self.K, self.dim)
        z, log_q = normalizing_flow.forward(self.z, self.log_q)

        self.assertIsInstance(z, Tensor)
        self.assertEqual(z.shape, TensorShape([100, self.dim]))

        self.assertIsInstance(log_q, Tensor)
        self.assertEqual(log_q.shape, TensorShape([100]))
示例#8
0
    def test_enqueue_with_weights(self, ragged):
        strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
        weight = 0.5
        if ragged:
            dataset = self._create_ragged_dataset(strategy,
                                                  include_weights=True,
                                                  weight=weight)
        else:
            dataset = self._create_sparse_dataset(strategy,
                                                  include_weights=True,
                                                  weight=weight)
            mid_level_api.build([
                TensorShape((self.batch_size, 2)),
                TensorShape((self.batch_size, 2)),
                TensorShape((self.batch_size, 3))
            ])

        dataset_iter = iter(
            strategy.experimental_distribute_dataset(
                dataset,
                options=distribute_lib.InputOptions(
                    experimental_fetch_to_device=False)))

        @def_function.function
        def enqueue_and_get(features, weights):
            def get_activations():
                return mid_level_api.dequeue()

            mid_level_api.enqueue(features, weights=weights, training=False)
            return strategy.run(get_activations)

        features, weights = next(dataset_iter)
        # Replace the weight for the second feature by None to test.
        weights = (weights[0], None, weights[2])

        no_weights_activations = enqueue_and_get(features, weights=None)
        weights_activations = enqueue_and_get(features, weights=weights)

        # Extact per core numpy arrays.
        no_weights0 = self._get_replica_numpy(no_weights_activations, strategy,
                                              0)
        weights0 = self._get_replica_numpy(weights_activations, strategy, 0)
        # videos table has sum combiner and users table has mean combiner.
        # i.e. users table lookups isn't affected by the weights as all the weights
        # are the same.
        # Tuple entry 0 and 1 are the watched and favorited features from the videos
        # table and entry 2 is the friends feature from the users table.
        # Note that None was passed as a weight for entry 1 so weight should have no
        # effect.
        weight = (0.5, 1.0, 1.0)
        golden = tuple(
            [no_weight * w for no_weight, w in zip(no_weights0, weight)])

        self.assertAllClose(golden, weights0)
    def test_traj_converter_tensor_with_casting_int(self):

        data = [1]
        data_spec = tf.constant(0, dtype=tf.int32)

        data_tensor, spec = convert_data_to_tensor(data, data_spec)

        expected_tensor = tf.constant([1], dtype=tf.int32)
        expected_spec = tf.TensorSpec(TensorShape([]), tf.int32)
        bad_spec = tf.TensorSpec(TensorShape([]), tf.float32)

        self.assertAllEqual(data_tensor, expected_tensor)
        self.assertAllEqual(spec, expected_spec)
        self.assertNotEqual(spec, bad_spec)
示例#10
0
  def test_not_fully_defined_output_shapes_in_feature_config(self):
    _, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')

    # Feature config sets undefined output shapes
    mid_level_api._output_shapes = [TensorShape(None) for _ in range(3)]
    with self.assertRaisesRegex(ValueError, 'Input Feature'):
      mid_level_api.build()
示例#11
0
    def build(self, input_shape):
        with tf.variable_scope(self.name):
            input_shape = TensorShape(input_shape)
            if self.data_format == 'channels_first':
                channel_axis = 1
            else:
                channel_axis = -1
            if input_shape[channel_axis].value is None:
                raise ValueError('The channel dimension of the inputs '
                                 'should be defined. Found `None`.')
            input_dim = int(input_shape[channel_axis])
            kernel_shape = self.kernel_size + (input_dim, self.filters)

            print("input_shape: {}".format(input_shape))
            self.kernel = sn_kernel(shape=kernel_shape, scope='kernel')
            self.bias = tf.get_variable(
                name='bias',
                shape=[self.filters],
                initializer=tf.initializers.zeros(dtype=self.dtype))
            self._convolution_op = Convolution(
                input_shape,
                filter_shape=self.kernel.get_shape(),
                dilation_rate=self.dilation_rate,
                strides=self.strides,
                padding=self.padding.upper(),
                data_format=conv_utils.convert_data_format(
                    self.data_format, self.rank + 2))
            self.built = True
示例#12
0
    def test_multi_input_lstm(self):
        self.LOGGER.info(self._testMethodName)

        cell = MultiInputLSTMCell(num_units=128)  # state_size = 128
        # self.LOGGER.info(cell.state_size)  # 128
        cell.build(inputs_shape=TensorShape([32, 100]))

        self.LOGGER.info(tf.contrib.framework.get_trainable_variables())
示例#13
0
    def test_enqueue_with_outside_compilation(self, use_mlir):
        if use_mlir:
            config.enable_mlir_bridge()

        strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
        mid_level_api.build([
            TensorShape((self.batch_size, 2)),
            TensorShape((self.batch_size, 2)),
            TensorShape((self.batch_size, 3))
        ])
        dataset = self._create_sparse_dataset(strategy)
        dataset_iter = iter(
            strategy.experimental_distribute_dataset(
                dataset,
                options=distribute_lib.InputOptions(
                    experimental_fetch_to_device=False)))

        @def_function.function
        def enqueue_with_outside_compilation(data):
            def get_activations(features):
                mid_level_api.enqueue(features, training=False)
                return mid_level_api.dequeue()

            return strategy.run(get_activations, args=(data, ))

        @def_function.function
        def enqueue_without_outside_compilation(data):
            def get_activations():
                return mid_level_api.dequeue()

            mid_level_api.enqueue(data, training=False)
            return strategy.run(get_activations)

        features = next(dataset_iter)

        activations_oc = enqueue_with_outside_compilation(features)
        activations = enqueue_without_outside_compilation(features)

        # Extact per core numpy arrays.
        activations_oc0 = self._get_replica_numpy(activations_oc, strategy, 0)
        activations0 = self._get_replica_numpy(activations, strategy, 0)

        self.assertAllClose(activations_oc0, activations0)
示例#14
0
  def test_enqueue_dense_sparse_ragged(self):
    strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')

    dataset = self._create_high_dimensional_dense_dataset(strategy)
    dense_iter = iter(
        strategy.experimental_distribute_dataset(
            dataset,
            options=distribute_lib.InputOptions(
                experimental_fetch_to_device=False)))

    sparse = self._create_high_dimensional_sparse_dataset(strategy)
    sparse_iter = iter(
        strategy.experimental_distribute_dataset(
            sparse,
            options=distribute_lib.InputOptions(
                experimental_fetch_to_device=False)))

    ragged = self._create_high_dimensional_ragged_dataset(strategy)
    ragged_iter = iter(
        strategy.experimental_distribute_dataset(
            ragged,
            options=distribute_lib.InputOptions(
                experimental_fetch_to_device=False)))

    mid_level_api.build([
        TensorShape([self.batch_size, self.data_batch_size, 1]),
        TensorShape([self.batch_size, self.data_batch_size, 2]),
        TensorShape([self.batch_size, self.data_batch_size, 3])
    ])

    @def_function.function
    def test_fn():

      def step():
        return mid_level_api.dequeue()

      features = (next(dense_iter)[0], next(sparse_iter)[1],
                  next(ragged_iter)[2])
      mid_level_api.enqueue(features, training=False)
      return strategy.run(step)

    test_fn()
示例#15
0
  def __init__(self,
               table: TableConfig,
               max_sequence_length: int = 0,
               validate_weights_and_indices: bool = True,
               output_shape: Optional[Union[List[int], TensorShape]] = None,
               name: Optional[Text] = None):
    """Feature configuration.

    Args:
      table: An instance of `tf.tpu.experimental.embedding.TableConfig`,
        describing the table in which this feature should be looked up.
      max_sequence_length: If positive, the feature is a sequence feature with
        the corresponding maximum sequence length. If the sequence is longer
        than this, it will be truncated. If 0, the feature is not a sequence
        feature.
      validate_weights_and_indices: If true, uses safe_embedding_lookup during
        serving which ensures there are no empty rows and all weights and ids
        are positive at the expense of extra compute cost.
      output_shape: Optional argument to config the output shape of the feature
        activation. If provided, the feature feeding to the `embedding.enqueue`
        has to match the shape (for ragged tensor, the input shape and output
        shape can mismatch). If not provided, the shape can be either provided
        to the `embedding.build` or auto detected at the runtime.
      name: An optional name for the feature, useful for debugging.

    Returns:
      `FeatureConfig`.

    Raises:
      ValueError: if `table` is not an instance of
        `tf.tpu.experimental.embedding.TableConfig`.
      ValueError: if `max_sequence_length` not an integer or is negative.
    """
    if not isinstance(table, TableConfig):
      raise ValueError(f"Argument `table` has invalid type {type(table)}. "
                       "Expected `tf.tpu.experimental.embedding.TableConfig`.")

    if not isinstance(max_sequence_length, int) or max_sequence_length < 0:
      raise ValueError(
          f"Argument `max_sequence_length` must be an int and must be >= 0. "
          f"Received: {max_sequence_length}")

    self.table = table
    self.max_sequence_length = max_sequence_length
    self.name = name
    self.output_shape = TensorShape(output_shape)

    if not isinstance(
        validate_weights_and_indices, bool):
      raise ValueError(
          f"Argument `validate_weights_and_indices` must be a boolean. "
          f"Received: {validate_weights_and_indices}")

    self.validate_weights_and_indices = validate_weights_and_indices
    def test_traj_converter_dict_to_tensor(self):

        data = {"k": [1]}
        data_spec = {"k": tf.constant(0, dtype=tf.float32)}

        data_tensor, spec = convert_data_to_tensor(data, data_spec)

        expected_tensor = {"k": tf.constant([1.0], dtype=tf.float32)}
        expected_spec = {"k": tf.TensorSpec(TensorShape([]))}

        self.assertAllEqual(data_tensor, expected_tensor)
        self.assertAllEqual(spec, expected_spec)
示例#17
0
  def test_different_input_shapes(self):
    strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')

    sparse = self._create_high_dimensional_sparse_dataset(strategy)
    sparse_iter = iter(
        strategy.experimental_distribute_dataset(
            sparse,
            options=distribute_lib.InputOptions(
                experimental_fetch_to_device=False)))
    # Create a feature with shape (1, 3, 1)
    dense_feature = constant_op.constant(
        np.zeros(3), shape=(1, 3, 1), dtype=dtypes.int32)
    dense_dataset = dataset_ops.DatasetV2.from_tensors(
        dense_feature).unbatch().repeat().batch(
            1 * strategy.num_replicas_in_sync, drop_remainder=True)
    dense_iter = iter(
        strategy.experimental_distribute_dataset(
            dense_dataset,
            options=distribute_lib.InputOptions(
                experimental_fetch_to_device=False)))

    @def_function.function
    def test_fn():

      def step():
        return mid_level_api.dequeue()

      features = (next(dense_iter), next(sparse_iter)[1], next(sparse_iter)[2])
      mid_level_api.enqueue(features, training=False)
      return strategy.run(step)

    test_fn()

    self.assertEqual(mid_level_api._output_shapes, [
        TensorShape((1, 3)),
        TensorShape((self.batch_size, self.data_batch_size)),
        TensorShape((self.batch_size, self.data_batch_size))
    ])
 def tpu_embedding_config():
   feature_configs = []
   for dim, vocab, name in table_data:
     feature_configs.append(tpu_embedding_v2_utils.FeatureConfig(
         table=tpu_embedding_v2_utils.TableConfig(
             vocabulary_size=int(vocab), dim=int(dim),
             initializer=init_ops_v2.Zeros(), name=name)))
   optimizer = tpu_embedding_v2_utils.Adagrad(
       learning_rate=0.1)
   with strategy.scope():
     mid_level_api = tpu_embedding_v2.TPUEmbedding(
         feature_config=feature_configs,
         optimizer=optimizer)
   mid_level_api._output_shapes = [TensorShape(128)] * len(feature_configs)
   return mid_level_api._create_config_proto()
示例#19
0
    def test_sequence_feature_with_build(self, is_updated_shape):
        seq_length = 3
        # Set the max_seq_length in feature config
        for feature in self.feature_config:
            feature.max_sequence_length = seq_length
        strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
        dataset = self._create_sparse_dataset(strategy)
        feature_iter = iter(
            strategy.experimental_distribute_dataset(
                dataset,
                options=distribute_lib.InputOptions(
                    experimental_fetch_to_device=False)))
        if is_updated_shape:
            mid_level_api.build([
                TensorShape([self.batch_size, seq_length, 2]),
                TensorShape([self.batch_size, seq_length, 2]),
                TensorShape([self.batch_size, seq_length, 3])
            ])
        else:
            mid_level_api.build([
                TensorShape([self.batch_size, 2]),
                TensorShape([self.batch_size, 2]),
                TensorShape([self.batch_size, 3])
            ])

        @def_function.function
        def test_fn():
            def step():
                return mid_level_api.dequeue()

            mid_level_api.enqueue(next(feature_iter), training=False)
            return strategy.run(step)

        output = test_fn()
        self.assertEqual(
            self._get_replica_numpy(output[0], strategy, 0).shape, (2, 3, 4))
        self.assertEqual(
            self._get_replica_numpy(output[1], strategy, 0).shape, (2, 3, 4))
        self.assertEqual(
            self._get_replica_numpy(output[2], strategy, 0).shape, (2, 3, 2))
示例#20
0
    def resize(self, new_output_nodes=None,
               output_nodes_to_prune=None,
               input_nodes_to_prune=None,
               split_output_nodes=None,
               split_input_nodes=None,
               data_set_train=None,
               data_set_validation=None,
               no_splitting_or_pruning=False,
               split_nodes_noise_std=.1):
        """Resize this layer by changing the number of output nodes. Will also resize any downstream layers

        Args:
            data_set_validation (DataSet):Data set used for validating this network
            data_set_train (DataSet): Data set used for training this network
            no_splitting_or_pruning (bool): If set to true then noise is just added randomly rather than splitting nodes
            new_output_nodes (int | tuple of ints): If passed we change the number of output nodes of this layer to be new_output_nodes
            output_nodes_to_prune ([int]): list of indexes of the output nodes we want pruned e.g. [1, 3] would remove
                the 1st and 3rd output node from this layer
            input_nodes_to_prune ([int]): list of indexes of the input nodes we want pruned e.g. [1, 3] would remove the
                1st and 3rd input node from this layer
            split_output_nodes ([int]): list of indexes of nodes to split. This is for growing the layer
            split_input_nodes: ([int]): list of indexes of nodes that where split in the previous layer.
            split_nodes_noise_std (float): standard deviation of noise to add when splitting a node
        """
        if isinstance(new_output_nodes, tuple):
            new_output_nodes = new_output_nodes[self.get_resizable_dimension()]
        elif new_output_nodes is not None and not isinstance(new_output_nodes, int):
            raise ValueError("new_output_nodes must be tuple of int %s" % (new_output_nodes,))

        if not no_splitting_or_pruning:
            # choose nodes to split or prune
            if new_output_nodes is not None:
                if output_nodes_to_prune is None and split_output_nodes is None:
                    if new_output_nodes < self.get_resizable_dimension_size():
                        output_nodes_to_prune = self._choose_nodes_to_prune(new_output_nodes, data_set_train,
                                                                            data_set_validation)
                    elif new_output_nodes > self.get_resizable_dimension_size():
                        split_output_nodes = self._choose_nodes_to_split(new_output_nodes, data_set_train,
                                                                         data_set_validation)
            elif self.has_resizable_dimension():
                new_output_nodes = self.get_resizable_dimension_size()
                if output_nodes_to_prune:
                    new_output_nodes -= len(output_nodes_to_prune)
                if split_output_nodes:
                    new_output_nodes += len(split_output_nodes)

        new_input_nodes = self.input_layer.output_nodes
        input_nodes_changed = new_input_nodes != self._input_nodes

        if self.has_resizable_dimension() and new_output_nodes is not None:
            output_nodes_changed = new_output_nodes != self.get_resizable_dimension_size()
            temp_output_nodes = list(self._output_nodes)
            temp_output_nodes[self.get_resizable_dimension()] = new_output_nodes

            self._output_nodes = tuple(temp_output_nodes)
        else:
            output_nodes_changed = False

        self._input_nodes = new_input_nodes

        for name, bound_variable in self._bound_variables.iteritems():
            if input_nodes_changed and self._bound_dimensions_contains_input(bound_variable.dimensions) or \
                            output_nodes_changed and self._bound_dimensions_contains_output(bound_variable.dimensions):

                self._forget_assign_op(name)

                int_dims = self._bound_dimensions_to_ints(bound_variable.dimensions)

                if isinstance(bound_variable.variable, tf.Variable):
                    old_values = self._session.run(bound_variable.variable)
                    if output_nodes_to_prune or split_output_nodes:
                        output_bound_axis = self._get_axis(bound_variable.dimensions,
                                                           self.OUTPUT_BOUND_VALUE, self.OUTPUT_DIM_3_BOUND_VALUE)
                        if output_nodes_to_prune:
                            old_values = np.delete(old_values, output_nodes_to_prune, output_bound_axis)
                        else:  # split
                            old_values = array_extend(old_values, {output_bound_axis: split_output_nodes},
                                                      noise_std=split_nodes_noise_std)
                    if input_nodes_to_prune or split_input_nodes:
                        input_bound_axis = self._get_axis(bound_variable.dimensions,
                                                          self.INPUT_BOUND_VALUE, self.INPUT_DIM_3_BOUND_VALUE)
                        if input_nodes_to_prune:
                            old_values = np.delete(old_values, input_nodes_to_prune, input_bound_axis)
                        else:  # split
                            old_values = array_extend(old_values, {input_bound_axis: split_input_nodes},
                                                      halve_extended_vectors=True)
                    if no_splitting_or_pruning:
                        new_values = self._weight_extender_func(old_values, int_dims)
                    else:
                        new_values = old_values

                    tf_resize(self._session, bound_variable.variable, int_dims,
                              new_values, self._get_assign_function(name))
                else:
                    # this is a tensor, not a variable so has no weights
                    tf_resize(self._session, bound_variable.variable, int_dims)

        if input_nodes_changed and self._batch_normalize_input:
            if self._batch_norm_mean_train is not None:
                tf_resize(self._session, self._batch_norm_mean_train, self._input_nodes)
                tf_resize(self._session, self._batch_norm_var_train, self._input_nodes)
            if self._batch_norm_mean_predict is not None:
                tf_resize(self._session, self._batch_norm_mean_predict, self._input_nodes)
                tf_resize(self._session, self._batch_norm_var_predict, self._input_nodes)
            if self._normalized_train is not None:
                tf_resize(self._session, self._normalized_train, (None,) + self._input_nodes)
            if self._normalized_predict is not None:
                tf_resize(self._session, self._normalized_predict, (None,) + self._input_nodes)
                tf_resize(self.session, self._normalized_predict.op.inputs[0].op.inputs[1].op.inputs[1], self._input_nodes)
            # THIS needs fixing -> self._normalized_predict.op.inputs[0].op.inputs[1].op.inputs[1]
            # self._normalized_predict.op.inputs[0].op.inputs[1].op.inputs[1].op.inputs[1] returns [2] and should be [4]

            # This line fixed the issue, this is all very hacky...
            # self._mat_mul.op.inputs[0]._shape = TensorShape((None,) + self._input_nodes)
            from tensorflow.python.framework.tensor_shape import TensorShape

            if '_mat_mul_is_train_equal_' + str(True) in self.__dict__:
                self.__dict__['_mat_mul_is_train_equal_' + str(True)].op.inputs[0]._shape = TensorShape(
                    (None,) + self._input_nodes)
                self.__dict__['_mat_mul_is_train_equal_' + str(True)].op.inputs[0].op.inputs[0]._shape = TensorShape(
                    (None,) + self._input_nodes)
                self.__dict__['_mat_mul_is_train_equal_' + str(True)].op.inputs[0].op.inputs[0].op.inputs[
                    0]._shape = TensorShape((None,) + self._input_nodes)
                self.__dict__['_mat_mul_is_train_equal_' + str(True)].op.inputs[0].op.inputs[0].op.inputs[0].op.inputs[
                    0]._shape = TensorShape((None,) + self._input_nodes)
                # tf_resize(self._session, self.__dict__['_mat_mul_is_train_equal_' + str(True)], (None,) + self._input_nodes)
            if '_mat_mul_is_train_equal_' + str(False) in self.__dict__:
                self.__dict__['_mat_mul_is_train_equal_' + str(False)].op.inputs[0]._shape = TensorShape(
                    (None,) + self._input_nodes)
                self.__dict__['_mat_mul_is_train_equal_' + str(False)].op.inputs[0].op.inputs[0]._shape = TensorShape(
                    (None,) + self._input_nodes)
                self.__dict__['_mat_mul_is_train_equal_' + str(False)].op.inputs[0].op.inputs[0].op.inputs[
                    0]._shape = TensorShape((None,) + self._input_nodes)
                self.__dict__['_mat_mul_is_train_equal_' + str(False)].op.inputs[0].op.inputs[0].op.inputs[0].op.inputs[
                    0]._shape = TensorShape((None,) + self._input_nodes)
                # tf_resize(self._session, self.__dict__['_mat_mul_is_train_equal_' + str(False)], (None,) + self._input_nodes)

        if output_nodes_changed:
            if has_lazyprop(self, 'activation_predict'):
                tf_resize(self._session, self.activation_predict, (None,) + self._output_nodes)
            if has_lazyprop(self, 'activation_train'):
                tf_resize(self._session, self.activation_train, (None,) + self._output_nodes)

        if input_nodes_changed and self.bactivate:
            if has_lazyprop(self, 'bactivation_train'):
                tf_resize(self._session, self.bactivation_train, (None,) + self._input_nodes)
            if has_lazyprop(self, 'bactivation_predict'):
                tf_resize(self._session, self.bactivation_predict, (None,) + self._input_nodes)

        if self._next_layer and self._next_layer._resize_needed():
            self._next_layer.resize(input_nodes_to_prune=output_nodes_to_prune, split_input_nodes=split_output_nodes,
                                    no_splitting_or_pruning=no_splitting_or_pruning)
 def _batch_shape(self):
     return TensorShape(self._dim + 1)
示例#22
0
 def output_size(self):
     return Decoder_Output(
         linear= TensorShape([hp.Sound.Mel_Dim]),
         stop= TensorShape([1])  #Current, it is hard code
         )
示例#23
0
    def __init__(self,
                 shape,
                 local_replica_id,
                 initializer=None,
                 trainable=True,
                 use_hashtable=True,
                 name="EmbeddingVariable",
                 dtype=None,
                 key_dtype=None,
                 *args,
                 **kwargs):
        if (not isinstance(shape, list)) or (len(shape) != 2):
            raise ValueError("shape_per_gpu must be a list which represents: "+\
                             "[vocabulary_size_per_gpu, embedding_vector_size].")
        self.m_shape_per_gpu = TensorShape(shape)
        self.m_local_replica_id = local_replica_id
        self.m_initializer = initializer or InPlaceInitializer(
            name="random_uniform")
        if not isinstance(self.m_initializer, InPlaceInitializer):
            self.m_initializer = tf_initializers.get(self.m_initializer)
        self.m_trainable = trainable
        self.m_use_hashtable = use_hashtable
        self.m_embedding_layer = None
        self.m_dtype = dtype or dtypes.float32
        self.m_key_dtype = key_dtype or dtypes.int64
        # produce intial_value
        if isinstance(self.m_initializer, InPlaceInitializer):
            # TODO: serialize it
            self.m_initial_value = self.m_initializer.name
        else:
            self.m_initial_value = self.m_initializer(
                shape=self.m_shape_per_gpu, dtype=self.m_dtype)

        collections = [ops.GraphKeys.GLOBAL_VARIABLES]
        if trainable and ops.GraphKeys.TRAINABLE_VARIABLES not in collections:
            collections = list(collections) + [
                ops.GraphKeys.TRAINABLE_VARIABLES
            ]

        with ops.init_scope():
            self._in_graph_mode = not context.executing_eagerly()
            with ops.name_scope(name) as var_name_scope:
                # TODO: use regulare expression
                while var_name_scope[-1] == r"/":
                    var_name_scope = var_name_scope[:-1]
                var_name = var_name_scope
                self.m_var_name = var_name
                self.m_unique_id = "%s_%d" % (var_name, ops.uid())

                # attr = resource_variable_ops.attr_value_pb2.AttrValue(
                #     list=resource_variable_ops.attr_value_pb2.AttrValue.ListValue(
                #         s=[resource_variable_ops.compat.as_bytes("loc:@%s" % self.m_var_name)]))

                # with ops.get_default_graph()._attr_scope({"_class": attr}):
                with ops.NullContextmanager():
                    # m_handle is the handle to EmbeddingVariable, tf_handle is the handle to TF Var.
                    self.m_handle, self.tf_handle = kit_lib.create_var(
                        var_name=var_name,
                        dtype=self.m_dtype,
                        shape=self.m_shape_per_gpu)

                    if self._in_graph_mode:
                        with ops.name_scope("IsInitialized"):
                            self._is_initialized_op = ops.convert_to_tensor(
                                True)  # TODO: should not hard-writing???

                            if (isinstance(self.m_initial_value, ops.Tensor)
                                    and not self.m_initial_value.shape.
                                    is_compatible_with(self.m_shape_per_gpu)):
                                raise ValueError(
                                    "The initial value's shape (%s) is not compatible with "
                                    "the explicitly supplied `shape` argument (%s)."
                                    % (initial_value.shape,
                                       self.m_shape_per_gpu))

                            _init_op = kit_lib.assign_embedding_variable(
                                emb_var_handle=self.m_handle,
                                tf_var_handle=self.tf_handle,
                                var_name=var_name,
                                initial_value=self.m_initial_value,
                                local_replica_id=self.m_local_replica_id,
                                trainable=self.m_trainable,
                                shape=self.m_shape_per_gpu,
                                use_hashtable=self.m_use_hashtable,
                                dtype=self.m_dtype,
                                key_dtype=self.m_key_dtype)
                            self._initializer_op = control_flow_ops.group(
                                (_init_op))
                    else:
                        raise RuntimeError(
                            "Currently, EmbeddingVariable does not support Eager mode."
                        )

                    if not context.executing_eagerly():
                        ops.add_to_collections(collections, self)

            super(EmbeddingVariable, self).__init__(
                trainable=self.m_trainable,
                shape=self.m_shape_per_gpu,
                dtype=self.m_dtype,
                handle=self.m_handle,
                handle_name=var_name,
                distribute_strategy=get_strategy() if has_strategy() else None,
                synchronization=VariableSynchronization.NONE,
                aggregation=VariableAggregation.ONLY_FIRST_REPLICA,
                unique_id=self.m_unique_id,
                initializer_op=self._initializer_op,
                is_initialized_op=self._is_initialized_op,
                *args,
                **kwargs)
            handle_data = resource_variable_ops.cpp_shape_inference_pb2.CppShapeInferenceResult.HandleData(
            )
            handle_data.is_set = True
            handle_data.shape_and_type.append(
                resource_variable_ops.cpp_shape_inference_pb2.
                CppShapeInferenceResult.HandleShapeAndType(
                    shape=self.shape.as_proto(),
                    dtype=self.dtype.as_datatype_enum))
            resource_variable_ops._set_handle_shapes_and_types(
                self.m_handle,
                handle_data,
                graph_mode=False if context.executing_eagerly() else True)
            resource_variable_ops._set_handle_shapes_and_types(
                self.tf_handle,
                handle_data,
                graph_mode=False if context.executing_eagerly() else True)
 def test_calc_prob_tf(self):
     p = TargetDistribution1.calc_prob_tf(self.z)
     self.assertIsInstance(p, Tensor)
     self.assertEqual(p.shape, TensorShape([100]))
示例#25
0
    def __init__(self,
                 shape,
                 local_replica_id,
                 initializer=None,
                 trainable=True,
                 use_hashtable=True,
                 name="EmbeddingVariable",
                 dtype=None,
                 key_dtype=None,
                 *args,
                 **kwargs):
        if (not isinstance(shape, list)) or (len(shape) != 2):
            raise ValueError("shape_per_gpu must be a list which represents: "+\
                             "[vocabulary_size_per_gpu, embedding_vector_size].")
        self.m_shape_per_gpu = TensorShape(shape)
        self.m_local_replica_id = local_replica_id
        self.m_initializer = initializer or InPlaceInitializer(name="random_uniform")
        if not isinstance(self.m_initializer, InPlaceInitializer):
            self.m_initializer = tf_initializers.get(self.m_initializer)
        self.m_trainable = trainable
        self.m_use_hashtable = use_hashtable
        self.m_embedding_layer = None
        self.m_dtype = dtype or dtypes.float32
        self.m_key_dtype = key_dtype or dtypes.int64
        # produce intial_value
        if isinstance(self.m_initializer, InPlaceInitializer):
            # TODO: serialize it
            self.m_initial_value = self.m_initializer.name
        else:
            self.m_initial_value = self.m_initializer(shape=self.m_shape_per_gpu, dtype=self.m_dtype)

        with ops.init_scope():
            with ops.name_scope(name):
                self.m_var_name = self._gen_unique_name(name)
                self.m_unique_id = "%s_%d" %(self.m_var_name, ops.uid())

                # m_handle is the handle to EmbeddingVariable, tf_handle is the handle to TF Var.
                self.m_handle, self.tf_handle = kit_lib.create_var(
                                            var_name=self.m_var_name,
                                            dtype=self.m_dtype,
                                            shape=self.m_shape_per_gpu)

                with ops.name_scope("IsInitialized"):
                    self._is_initialized_op = ops.convert_to_tensor(True)

                    if (isinstance(self.m_initial_value, ops.Tensor) and
                        not self.m_initial_value.shape.is_compatible_with(self.m_shape_per_gpu)):
                        raise ValueError("The initial value's shape (%s) is not compatible with "
                                         "the explicitly supplied `shape` argument (%s)." %
                                         (self.m_initial_value.shape, self.m_shape_per_gpu))

                    _init_op = kit_lib.assign_embedding_variable(emb_var_handle=self.m_handle,
                                                            tf_var_handle=self.tf_handle,
                                                            var_name=self.m_var_name,
                                                            initial_value=self.m_initial_value,
                                                            local_replica_id=self.m_local_replica_id,
                                                            trainable=self.m_trainable,
                                                            shape=self.m_shape_per_gpu,
                                                            use_hashtable=self.m_use_hashtable,
                                                            dtype=self.m_dtype,
                                                            key_dtype=self.m_key_dtype)
                    self._initializer_op = control_flow_ops.group((_init_op))

            super(EmbeddingVariable, self).__init__(trainable=self.m_trainable,
                                                    shape=self.m_shape_per_gpu,
                                                    dtype=self.m_dtype,
                                                    handle=self.m_handle,
                                                    handle_name=self.m_var_name,
                                                    distribute_strategy=get_strategy() if has_strategy() else None,
                                                    synchronization=VariableSynchronization.NONE,
                                                    aggregation=VariableAggregation.ONLY_FIRST_REPLICA,
                                                    unique_id=self.m_unique_id,
                                                    *args, **kwargs)

            handle_data = resource_variable_ops.cpp_shape_inference_pb2.CppShapeInferenceResult.HandleData()
            handle_data.is_set = True
            handle_data.shape_and_type.append(
                resource_variable_ops.cpp_shape_inference_pb2.CppShapeInferenceResult.HandleShapeAndType(
                    shape=self.shape.as_proto(), dtype=self.dtype.as_datatype_enum))
            resource_variable_ops._set_handle_shapes_and_types(self.m_handle, handle_data, 
                graph_mode=False if context.executing_eagerly() else True)
            resource_variable_ops._set_handle_shapes_and_types(self.tf_handle, handle_data, 
                graph_mode=False if context.executing_eagerly() else True)
    def test_embedding(self, optimizer_name, training, sparse,
                       is_high_dimensional):
        strategy, mid_level_api, optimizer = (
            self._create_strategy_and_mid_level(optimizer_name))

        if sparse:
            if is_high_dimensional:
                dataset = self._create_high_dimensional_sparse_dataset(
                    strategy)
            else:
                dataset = self._create_sparse_dataset(strategy)
        else:
            if is_high_dimensional:
                dataset = self._create_high_dimensional_sparse_dataset(
                    strategy)
            else:
                dataset = self._create_ragged_dataset(strategy)

        if is_high_dimensional:
            if sparse:
                mid_level_api.build([
                    TensorShape([self.batch_size, self.data_batch_size, 2]),
                    TensorShape([self.batch_size, self.data_batch_size, 2]),
                    TensorShape([self.batch_size, self.data_batch_size, 3]),
                ])
            else:
                mid_level_api.build([
                    TensorShape([self.batch_size, self.data_batch_size, None]),
                    TensorShape([self.batch_size, self.data_batch_size, None]),
                    TensorShape([self.batch_size, self.data_batch_size, None]),
                ])

        dist = strategy.experimental_distribute_dataset(
            dataset,
            options=distribute_lib.InputOptions(
                experimental_fetch_to_device=False))
        dist_iter = iter(dist)

        @def_function.function
        def test_fn():
            def step():
                """Create and run computation that returns the embedding activations."""
                if not training:
                    activations = mid_level_api.dequeue()
                    total_loss = _get_total_loss_tensor(activations)
                    ret_val = [total_loss] + list(activations)
                    return ret_val
                else:
                    with backprop.GradientTape() as tape:
                        activations = mid_level_api.dequeue()
                        tape.watch(activations)
                        total_loss = _get_total_loss_tensor(activations)
                        loss_per_replica = total_loss / strategy.num_replicas_in_sync
                    gradients = tape.gradient(loss_per_replica, activations)
                    mid_level_api.apply_gradients(gradients)
                ret_val = [total_loss] + list(activations)
                return ret_val

            mid_level_api.enqueue(next(dist_iter), training=training)
            result = strategy.run(step)
            return result

        # Run model.
        shard_out_val = test_fn()

        # Retrieve TPU weights to CPU.
        mid_level_api._retrieve_variables()

        # Compute sparse tensors for global batch.
        if is_high_dimensional:
            input_data = next(
                iter(self._create_high_dimensional_sparse_dataset(strategy)))
        else:
            input_data = next(iter(self._create_sparse_dataset(strategy)))

        # Check results.
        self._check_results(strategy, shard_out_val, training, input_data,
                            mid_level_api._variables, optimizer,
                            is_high_dimensional)
示例#27
0
 def test_calc_loss(self):
     normalizing_flow = NormalizingFlow(self.K, self.dim)
     loss = normalizing_flow.calc_loss(self.z, self.log_q,
                                       TargetDistribution1)
     self.assertIsInstance(loss, Tensor)
     self.assertEqual(loss.shape, TensorShape([]))
示例#28
0
    def test_enqueue_with_outside_compilation_auto_mode(self):
        strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
        mid_level_api.build([
            TensorShape((self.batch_size, 2)),
            TensorShape((self.batch_size, 2)),
            TensorShape((self.batch_size, 3))
        ])
        dataset = self._create_sparse_dataset(strategy)
        dataset_iter = iter(
            strategy.experimental_distribute_dataset(
                dataset,
                options=distribute_lib.InputOptions(
                    experimental_fetch_to_device=False)))

        @def_function.function
        def enqueue_with_no_gradient_apply(data):
            def get_activations(features):
                # Note the lack of setting training=False, so training defaults to true
                # here even though we don't have apply gradients.
                # We detect the correct mode based on which ops exist that share the
                # same 'name'.
                mid_level_api.enqueue(features, name='call1')
                return mid_level_api.dequeue(name='call1')

            return strategy.run(get_activations, args=(data, ))

        @def_function.function
        def enqueue_with_gradient_apply(data):
            def get_activations(features):
                mid_level_api.enqueue(features, name='call2')
                activations = mid_level_api.dequeue(name='call2')
                # Apply an all ones gradient
                gradients = nest.map_structure(array_ops.ones_like,
                                               activations)
                mid_level_api.apply_gradients(gradients, name='call2')
                return activations

            return strategy.run(get_activations, args=(data, ))

        data = next(dataset_iter)
        before_gradient_apply = enqueue_with_gradient_apply(data)
        after_gradient_apply = enqueue_with_no_gradient_apply(data)
        before_gradient_apply0 = self._get_replica_numpy(
            before_gradient_apply, strategy, 0)
        after_gradient_apply0 = self._get_replica_numpy(
            after_gradient_apply, strategy, 0)

        num_replicas = strategy.num_replicas_in_sync
        # We are passing a gradient of 1 for all lookups, optimizer is SGD with a
        # learning rate of 0.1. Feature 0 and 1 are looked up with a sum combiner
        # with the following ids:
        # Feature 0: [0, 0, 1], [0, 1, 1], ... repeated over num_replicas
        # Feature 1: [0, 1, 1], [0, 0, 1], ... repeated over num_replicas
        # i.e. Row 0 and 1 were looked up 3*num_replicas times over all cores and as
        # the gradient is 1, the accumulated gradient is 3*num_replicas for each
        # position in row 0 and 1 in table.
        #
        # See comments in test_pass_none_to_apply_gradients for the update to
        # Feature 2 and its table.
        # The *2 in the next tests are because those rows have 2 lookups vs
        # the 1 lookup in the other row.
        update = ([[0.3 * num_replicas],
                   [0.3 * num_replicas * 2]], [[0.3 * num_replicas * 2],
                                               [0.3 * num_replicas]],
                  [[0.1 * num_replicas], [0.1 / 3 * num_replicas]])
        golden = tuple([
            before - np.array(up)
            for before, up in zip(before_gradient_apply0, update)
        ])

        self.assertAllClose(golden, after_gradient_apply0)
示例#29
0
 def _shape(batch_size, from_shape):
     if (not isinstance(from_shape, TensorShape) or from_shape.ndims == 0):
         return TensorShape(None)
     else:
         batch_size = tensor_util.constant_value(tf.convert_to_tensor(batch_size, name="batch_size"))
     return TensorShape([batch_size]).concatenate(from_shape)
示例#30
0
  def test_not_fully_defined_output_shapes_for_build(self):
    _, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')

    # Build with undefined output shape
    with self.assertRaisesRegex(ValueError, 'Input Feature'):
      mid_level_api.build([TensorShape([1, None, None]) for _ in range(3)])