Esempio n. 1
0
def _unique_layer_name(name):
  """Makes a layer name (or arbitrary string) unique within a TensorFlow graph.

  Arguments:
    name: String name to make unique.

  Returns:
    Unique string name.

  Example:

  ```
    >>> _unique_layer_name('dense')
    dense_1
    >>> _unique_layer_name('dense')
    dense_2
  ```
  """
  layer_name_uids_collection = ops.get_collection('LAYER_NAME_UIDS')
  if not layer_name_uids_collection:
    layer_name_uids = {}
    ops.add_to_collection('LAYER_NAME_UIDS', layer_name_uids)
  else:
    layer_name_uids = layer_name_uids_collection[0]
  if name not in layer_name_uids:
    layer_name_uids[name] = 1
  else:
    layer_name_uids[name] += 1
  return name + '_' + str(layer_name_uids[name])
Esempio n. 2
0
def _get_saver():
  """Lazy init and return saver."""
  saver = _get_first_op_from_collection(ops.GraphKeys.SAVERS)
  if saver is None and variables.global_variables():
    saver = tf_saver.Saver()
    ops.add_to_collection(ops.GraphKeys.SAVERS, saver)
  return saver
Esempio n. 3
0
  def _maybe_add_main_op(self, main_op):
    """Adds main op to the SavedModel.

    Args:
      main_op: Main op to run as part of graph initialization. If None, no
        main op will be added to the graph.

    Raises:
      TypeError: if main op is provided but is not of type `Operation`.
      ValueError: if the Graph already contains an init op.
    """
    if main_op is None:
      return

    if not isinstance(main_op, ops.Operation):
      raise TypeError("main_op needs to be an Operation: %r" % main_op)

    # Validate that no other init ops have been added to this graph already.
    # We check main_op and legacy_init_op for thoroughness and explicitness.
    for init_op_key in (constants.MAIN_OP_KEY, constants.LEGACY_INIT_OP_KEY):
      if ops.get_collection(init_op_key):
        raise ValueError(
            "Graph already contains one or more main ops under the "
            "collection {}.".format(init_op_key))

    ops.add_to_collection(constants.MAIN_OP_KEY, main_op)
Esempio n. 4
0
def fertile_stats_variable(params, stats_config, name, container=None):
  r"""Creates a stats object and returns a handle to it.

  Args:
    params: A TensorForestParams object.
    stats_config: A `Tensor` of type `string`. Serialized proto of the stats.
    name: A name for the variable.
    container: An optional `string`. Defaults to `""`.

  Returns:
    A `Tensor` of type mutable `string`. The handle to the stats.
  """
  with ops.name_scope(name, "FertileStatsVariable") as name:
    fertile_stats_var = FertileStatsVariable(params, stats_config, name,
                                             container)
    resource_handle = fertile_stats_var.resource_handle
    create_op = fertile_stats_var.initializer
    is_initialized_op = fertile_stats_var.is_initialized()
    # Adds the variable to the savable list.
    saveable = (
        fertile_stats_var._gather_saveables_for_checkpoint()[  # pylint: disable=protected-access
            "fertile_stats_variable"](name=resource_handle.name))
    ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)
    resources.register_resource(resource_handle, create_op, is_initialized_op)
    return resource_handle
Esempio n. 5
0
def add_loss(loss):
    """Adds a externally defined loss to collection of losses.

  Args:
    loss: A loss `Tensor`.
  """
    ops.add_to_collection(ops.GraphKeys.LOSSES, loss)
Esempio n. 6
0
def _get_or_create_global_step_read(graph=None):
  """Gets or creates global step read tensor in graph.

  Args:
    graph: The graph in which to create the global step read tensor. If missing,
      use default graph.

  Returns:
    Global step read tensor if there is global_step_tensor else return None.
  """
  graph = graph or ops.get_default_graph()
  global_step_read_tensor = _get_global_step_read(graph)
  if global_step_read_tensor is not None:
    return global_step_read_tensor
  global_step_tensor = get_global_step(graph)
  if global_step_tensor is None:
    return None
  # add 'zero' so that it will create a copy of variable as Tensor.
  with graph.as_default() as g, g.name_scope(None):
    # using initialized_value to ensure that global_step is initialized before
    # this run. This is needed for example Estimator makes all model_fn build
    # under global_step_read_tensor dependency.
    global_step_value = global_step_tensor.initialized_value() if isinstance(
        global_step_tensor, variables.Variable) else global_step_tensor
    global_step_read_tensor = global_step_value + 0
    ops.add_to_collection(GLOBAL_STEP_READ_KEY, global_step_read_tensor)
  return _get_global_step_read(graph)
Esempio n. 7
0
def summary_writer_function(name, tensor, function, family=None):
  """Helper function to write summaries.

  Args:
    name: name of the summary
    tensor: main tensor to form the summary
    function: function taking a tag and a scope which writes the summary
    family: optional, the summary's family

  Returns:
    The result of writing the summary.
  """
  def record():
    with summary_op_util.summary_scope(
        name, family, values=[tensor]) as (tag, scope):
      with ops.control_dependencies([function(tag, scope)]):
        return constant_op.constant(True)

  if context.context().summary_writer_resource is None:
    return control_flow_ops.no_op()
  with ops.device("cpu:0"):
    op = utils.smart_cond(
        should_record_summaries(), record, _nothing, name="")
    ops.add_to_collection(ops.GraphKeys._SUMMARY_COLLECTION, op)  # pylint: disable=protected-access
  return op
Esempio n. 8
0
  def initialize(self, table):
    """Initializes the given `table` with `keys` and `values` tensors.

    Args:
      table: The table to initialize.

    Returns:
      The operation that initializes the table.

    Raises:
      TypeError: when the keys and values data types do not match the table
      key and value data types.
    """
    _check_table_dtypes(table, self._keys.dtype, self._values.dtype)
    with ops.name_scope(
        self._name, values=(table.resource_handle, self._keys,
                            self._values)) as scope:
      if context.executing_eagerly():
        # Ensure a unique name when eager execution is enabled to avoid spurious
        # sharing issues.
        scope += str(ops.uid())
      if fwd_compat.forward_compatible(2018, 9, 19):
        init_op = gen_lookup_ops.lookup_table_import_v2(
            table.resource_handle, self._keys, self._values, name=scope)
      else:
        # To maintain forward compatibiltiy, use the old implementation.
        init_op = gen_lookup_ops.initialize_table_v2(
            table.resource_handle, self._keys, self._values, name=scope)
    ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op)
    return init_op
Esempio n. 9
0
  def _train_model(self, input_fn, hooks):
    all_hooks = []
    with ops.Graph().as_default() as g, g.device(self._device_fn):
      random_seed.set_random_seed(self._config.tf_random_seed)
      global_step_tensor = training.create_global_step(g)
      with ops.device('/cpu:0'):
        features, labels = input_fn()
      estimator_spec = self._call_model_fn(features, labels,
                                           model_fn_lib.ModeKeys.TRAIN)
      ops.add_to_collection(ops.GraphKeys.LOSSES, estimator_spec.loss)
      all_hooks.extend([
          training.NanTensorHook(estimator_spec.loss),
          training.LoggingTensorHook(
              {
                  'loss': estimator_spec.loss,
                  'step': global_step_tensor
              },
              every_n_iter=100)
      ])
      all_hooks.extend(hooks)
      all_hooks.extend(estimator_spec.training_hooks)

      if not (estimator_spec.scaffold.saver or
              ops.get_collection(ops.GraphKeys.SAVERS)):
        ops.add_to_collection(ops.GraphKeys.SAVERS,
                              training.Saver(
                                  sharded=True,
                                  max_to_keep=self._config.keep_checkpoint_max,
                                  defer_build=True))

      chief_hooks = []
      if (self._config.save_checkpoints_secs or
          self._config.save_checkpoints_steps):
        saver_hook_exists = any([
            isinstance(h, training.CheckpointSaverHook)
            for h in (all_hooks + chief_hooks +
                      estimator_spec.training_chief_hooks)
        ])
        if not saver_hook_exists:
          chief_hooks = [
              training.CheckpointSaverHook(
                  self._model_dir,
                  save_secs=self._config.save_checkpoints_secs,
                  save_steps=self._config.save_checkpoints_steps,
                  scaffold=estimator_spec.scaffold)
          ]
      with training.MonitoredTrainingSession(
          master=self._config.master,
          is_chief=self._config.is_chief,
          checkpoint_dir=self._model_dir,
          scaffold=estimator_spec.scaffold,
          hooks=all_hooks,
          chief_only_hooks=chief_hooks + estimator_spec.training_chief_hooks,
          save_checkpoint_secs=0,  # Saving is handled by a hook.
          save_summaries_steps=self._config.save_summary_steps,
          config=config_pb2.ConfigProto(allow_soft_placement=True)) as mon_sess:
        loss = None
        while not mon_sess.should_stop():
          _, loss = mon_sess.run([estimator_spec.train_op, estimator_spec.loss])
      return loss
Esempio n. 10
0
  def testTrainOpAfterVariables(self):
    export_dir = self._get_export_dir("test_train_op_after_variables")
    builder = saved_model_builder.SavedModelBuilder(export_dir)

    with self.test_session(graph=ops.Graph()) as sess:
      # Add `v1` and `v2` variables to the graph.
      v1 = variables.Variable(1, name="v1")
      ops.add_to_collection("v", v1)
      v2 = variables.Variable(2, name="v2")
      ops.add_to_collection("v", v2)

      sess.run(variables.global_variables_initializer())
      builder.add_meta_graph_and_variables(sess, ["pre_foo"])

      train_op = state_ops.assign_add(v1, v2)
      sess.run(train_op)
      # TODO(karmel): remove explicit call when in the public method.
      builder._add_train_op(train_op)
      builder.add_meta_graph(["foo"])

    # Save the SavedModel to disk.
    builder.save()

    with self.test_session(graph=ops.Graph()) as sess:
      loader.load(sess, ["foo"], export_dir)
      self.assertIsInstance(
          ops.get_collection(constants.TRAIN_OP_KEY)[0], ops.Tensor)

    with self.test_session(graph=ops.Graph()) as sess:
      loader.load(sess, ["pre_foo"], export_dir)
      self.assertFalse(ops.get_collection(constants.TRAIN_OP_KEY))
Esempio n. 11
0
  def initialize(self, table):
    """Initializes the given `table` with `keys` and `values` tensors.

    Args:
      table: The table to initialize.

    Returns:
      The operation that initializes the table.

    Raises:
      TypeError: when the keys and values data types do not match the table
      key and value data types.
    """
    _check_table_dtypes(table, self._keys.dtype, self._values.dtype)
    with ops.name_scope(
        self._name, values=(table.table_ref, self._keys,
                            self._values)) as scope:
      if context.executing_eagerly():
        # Ensure a unique name when eager execution is enabled to avoid spurious
        # sharing issues.
        scope += str(ops.uid())
      init_op = gen_lookup_ops.initialize_table_v2(
          table.table_ref, self._keys, self._values, name=scope)
    ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op)
    return init_op
Esempio n. 12
0
def _CreateParamsSavable(params,
                         model,
                         base_variable_scope=None,
                         name="params_canonical"):
  """Create a RNNParamsSaveable for the weight and bias parameters.

  Args:
    params: a Variable for weight and bias parameters.
    model: a CudnnRNN model.
    base_variable_scope: a string, prefix of names of saved variables.
    name: a string, name of the RNNParamsSaveable object.
  Returns:
    a RNNParamsSaveable object.
  """
  if model._rnn_mode == CUDNN_LSTM:
    fn = cudnn_rnn_ops.CudnnLSTMSaveable
  elif model._rnn_mode == CUDNN_GRU:
    fn = cudnn_rnn_ops.CudnnGRUSaveable
  elif model._rnn_mode == CUDNN_RNN_TANH:
    fn = cudnn_rnn_ops.CudnnRNNTanhSaveable
  elif model._rnn_mode == CUDNN_RNN_RELU:
    fn = cudnn_rnn_ops.CudnnRNNReluSaveable
  params_saveable = fn(
      params,
      model.num_layers,
      model.num_units,
      model.input_size,
      model.input_mode,
      model.direction,
      scope=base_variable_scope,
      name=name)
  ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, params_saveable)
  return params_saveable
 def _add_iterator_ops_to_collection(self, init_op, get_next):
   ops.add_to_collection("iterator_ops", init_op)
   # `get_next` may be a tuple e.g. in TensorSliceDataset. Since Collections
   # do not support tuples we flatten the tensors and restore the shape in
   # `_get_iterator_ops_from_collection`.
   for el in nest.flatten(get_next):
     ops.add_to_collection("iterator_ops", el)
Esempio n. 14
0
def tree_variable(params, tree_config, stats_handle, name, container=None):
  r"""Creates a tree model and returns a handle to it.

  Args:
    params: A TensorForestParams object.
    tree_config: A `Tensor` of type `string`. Serialized proto of the tree.
    stats_handle: Resource handle to the stats object.
    name: A name for the variable.
    container: An optional `string`. Defaults to `""`.

  Returns:
    A `Tensor` of type mutable `string`. The handle to the tree.
  """
  with ops.name_scope(name, "TreeVariable") as name:
    resource_handle = gen_model_ops.decision_tree_resource_handle_op(
        container, shared_name=name, name=name)

    create_op = gen_model_ops.create_tree_variable(
        resource_handle,
        tree_config,
        params=params.serialized_params_proto)
    is_initialized_op = gen_model_ops.tree_is_initialized_op(resource_handle)
    # Adds the variable to the savable list.
    saveable = TreeVariableSavable(params, resource_handle, stats_handle,
                                   create_op,
                                   resource_handle.name)
    ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)
    resources.register_resource(resource_handle, create_op, is_initialized_op)
    return resource_handle
Esempio n. 15
0
def _compute_weighted_loss(losses, weight):
    """Computes the weighted loss.

  Args:
    losses: A tensor of size [batch_size, d1, ... dN].
    weight: A tensor of size [1] or [batch_size, d1, ... dK] where K < N.

  Returns:
    A scalar `Tensor` that returns the weighted loss.

  Raises:
    ValueError: If the weight shape is not compatible with the losses shape or
      if the number of dimensions (rank) of either losses or weight is missing.
  """
    losses = math_ops.to_float(losses)
    weight = math_ops.to_float(ops.convert_to_tensor(weight))

    if losses.get_shape().ndims is None:
        raise ValueError("losses.get_shape().ndims cannot be None")
    if weight.get_shape().ndims is None:
        raise ValueError("weight.get_shape().ndims cannot be None")

    total_loss = _scale_losses(losses, weight)
    num_present = _num_present(losses, weight)
    mean_loss = _safe_mean(total_loss, num_present)
    ops.add_to_collection(ops.GraphKeys.LOSSES, mean_loss)
    return mean_loss
Esempio n. 16
0
def add_scalar_summary(tensor, name=None, prefix=None, print_summary=False):
  """Adds a scalar summary for the given tensor.

  Args:
    tensor: a variable or op tensor.
    name: the optional name for the summary.
    prefix: An optional prefix for the summary names.
    print_summary: If `True`, the summary is printed to stdout when the summary
      is computed.

  Returns:
    A scalar `Tensor` of type `string` whose contents are the serialized
    `Summary` protocol buffer.
  """
  collections = [] if print_summary else None
  summary_name = _get_summary_name(tensor, name, prefix)

  # If print_summary, then we need to make sure that this call doesn't add the
  # non-printing op to the collection. We'll add it to the collection later.
  op = summary.scalar(
      name=summary_name, tensor=tensor, collections=collections)
  if print_summary:
    op = logging_ops.Print(op, [tensor], summary_name)
    ops.add_to_collection(ops.GraphKeys.SUMMARIES, op)
  return op
Esempio n. 17
0
def apply_regularization(regularizer, weights_list=None):
  """Returns the summed penalty by applying `regularizer` to the `weights_list`.

  Adding a regularization penalty over the layer weights and embedding weights
  can help prevent overfitting the training data. Regularization over layer
  biases is less common/useful, but assuming proper data preprocessing/mean
  subtraction, it usually shouldn't hurt much either.

  Args:
    regularizer: A function that takes a single `Tensor` argument and returns
      a scalar `Tensor` output.
    weights_list: List of weights `Tensors` or `Variables` to apply
      `regularizer` over. Defaults to the `GraphKeys.WEIGHTS` collection if
      `None`.

  Returns:
    A scalar representing the overall regularization penalty.

  Raises:
    ValueError: If `regularizer` does not return a scalar output.
  """
  if not weights_list:
    weights_list = ops.get_collection(ops.GraphKeys.WEIGHTS)
  with ops.op_scope(weights_list, 'get_regularization_penalty') as scope:
    penalties = [regularizer(w) for w in weights_list]
    for p in penalties:
      if p.get_shape().ndims != 0:
        raise ValueError('regularizer must return a scalar Tensor instead of a '
                         'Tensor with rank %d.' % p.get_shape().ndims)

    summed_penalty = math_ops.add_n(penalties, name=scope)
    ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES, summed_penalty)
    return summed_penalty
Esempio n. 18
0
  def testTrainOpGroup(self):
    export_dir = self._get_export_dir("test_train_op_group")
    builder = saved_model_builder.SavedModelBuilder(export_dir)

    with self.test_session(graph=ops.Graph()) as sess:
      # Add `v1` and `v2` variables to the graph.
      v1 = variables.Variable(1, name="v1")
      ops.add_to_collection("v", v1)
      v2 = variables.Variable(2, name="v2")
      ops.add_to_collection("v", v2)

      sess.run(variables.global_variables_initializer())
      train_op = control_flow_ops.group()

      sess.run(train_op)
      # TODO(karmel): remove explicit call when in the public method.
      builder._add_train_op(train_op)
      builder.add_meta_graph_and_variables(sess, ["foo"])

    # Save the SavedModel to disk.
    builder.save()

    with self.test_session(graph=ops.Graph()) as sess:
      loader.load(sess, ["foo"], export_dir)
      self.assertEqual(1, ops.get_collection("v")[0].eval())
      self.assertEqual(2, ops.get_collection("v")[1].eval())
      self.assertIsInstance(
          ops.get_collection(constants.TRAIN_OP_KEY)[0], ops.Operation)
Esempio n. 19
0
  def initialize(self, table):
    """Initializes the table from a text file.

    Args:
      table: The table to be initialized.

    Returns:
      The operation that initializes the table.

    Raises:
      TypeError: when the keys and values data types do not match the table
      key and value data types.
    """
    _check_table_dtypes(table, self.key_dtype, self.value_dtype)
    with ops.name_scope(self._name, "text_file_init",
                        (table.table_ref,)) as scope:
      filename = ops.convert_to_tensor(
          self._filename, dtypes.string, name="asset_filepath")
      # pylint: disable=protected-access
      init_op = gen_lookup_ops._initialize_table_from_text_file_v2(
          table.table_ref,
          filename,
          self._key_index,
          self._value_index,
          -1 if self._vocab_size is None else self._vocab_size,
          self._delimiter,
          name=scope)
      # pylint: enable=protected-access
    ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op)
    # If the filename tensor is anything other than a string constant (e.g., if
    # it is a placeholder) then it does not make sense to track it as an asset.
    if context.in_graph_mode() and constant_op.is_constant(filename):
      ops.add_to_collection(ops.GraphKeys.ASSET_FILEPATHS, filename)
    return init_op
Esempio n. 20
0
  def testCustomSaveable(self):
    export_dir = self._get_export_dir("custom_saveable")
    builder = saved_model_builder.SavedModelBuilder(export_dir)

    with session.Session(
        graph=ops.Graph(),
        config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
      # CheckpointedOp is a key-value table that can be saved across sessions.
      # The table register itself in SAVEABLE_OBJECTS collection.
      v1 = saver_test_utils.CheckpointedOp(name="v1")
      variables.global_variables_initializer().run()
      v1.insert("k1", 3.0).run()
      # Once the table is restored, we can access it through this reference.
      ops.add_to_collection("table_ref", v1.table_ref)
      builder.add_meta_graph_and_variables(sess, ["foo"])

    # Save the SavedModel to disk.
    builder.save()

    with session.Session(
        graph=ops.Graph(),
        config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
      loader.load(sess, ["foo"], export_dir)
      # Instantiate a wrapper object from the checkpointed reference.
      v1 = saver_test_utils.CheckpointedOp(
          name="v1", table_ref=ops.get_collection("table_ref")[0])
      self.assertEqual(b"k1", v1.keys().eval())
      self.assertEqual(3.0, v1.values().eval())
Esempio n. 21
0
  def testKeepNodes(self):
    g = ops.Graph()
    with g.as_default():
      a1 = variables.VariableV1(
          1.0)  # Must be preserved since it's in the collection 'variables'.
      a2 = constant_op.constant(0, shape=[50, 50], name='keep')
      ops.add_to_collection('a2', a2)  # Explicitly add to collection.
      with g._attr_scope(
          {'_grappler_do_not_remove': attr_value_pb2.AttrValue(b=True)}):
        a3 = constant_op.constant(0, name='keep2')
      b = constant_op.constant(1, shape=[100, 10])
      c = constant_op.constant(0, shape=[10, 30])
      d = math_ops.matmul(b, c)
      ops.add_to_collection('train_op', d)  # d is the fetch node.

    # Optimize the graph.
    mg = meta_graph.create_meta_graph_def(graph=g)
    config = config_pb2.ConfigProto()
    rewriter_config = config.graph_options.rewrite_options
    rewriter_config.min_graph_nodes = -1
    optimized_graph = tf_optimizer.OptimizeGraph(config, mg)

    # Check that the nodes referenced in various collections have been preserved
    optimized_graph_nodes = [node.name for node in optimized_graph.node]
    expected_nodes = [
        d.op.name, a1.op.name, a2.op.name, a3.op.name, 'Variable/initial_value',
        'Variable/Assign'
    ]
    self.assertEqual(len(optimized_graph_nodes), len(expected_nodes))
    self.assertAllInSet(optimized_graph_nodes, expected_nodes)
Esempio n. 22
0
    def initialize_from(self, keys, values, name=None):
        """Initialize the table with the provided keys and values tensors.

    Construct an initializer object from keys and value tensors.

    Args:
      keys: The tensor for the keys.
      values: The tensor for the values.
      name: Optional name for the op.

    Returns:
      The operation that initializes the table.

    Raises:
      TypeError: when the keys and values data types do not match the table
      key and value data types.
    """
        if name is None:
            name = "%s_initialize_table" % self.name
        with ops.op_scope([keys, values], None, name):
            keys = ops.convert_to_tensor(keys, dtype=self.key_dtype, name="keys")
            values = ops.convert_to_tensor(values, dtype=self.value_dtype, name="values")

        init_op = gen_data_flow_ops._initialize_table(self.table_ref, keys, values, name=name)
        ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op)
        return init_op
Esempio n. 23
0
  def __init__(self, iterator_resource, initializer, output_types,
               output_shapes, output_classes):
    """Creates a new iterator from the given iterator resource.

    Note: Most users will not call this initializer directly, and will
    instead use `Dataset.make_initializable_iterator()` or
    `Dataset.make_one_shot_iterator()`.

    Args:
      iterator_resource: A `tf.resource` scalar `tf.Tensor` representing the
        iterator.
      initializer: A `tf.Operation` that should be run to initialize this
        iterator.
      output_types: A nested structure of `tf.DType` objects corresponding to
        each component of an element of this iterator.
      output_shapes: A nested structure of `tf.TensorShape` objects
        corresponding to each component of an element of this iterator.
      output_classes: A nested structure of Python `type` objects corresponding
        to each component of an element of this iterator.
    """
    self._iterator_resource = iterator_resource
    self._initializer = initializer
    self._output_classes = output_classes
    self._output_types = output_types
    self._output_shapes = output_shapes
    self._string_handle = gen_dataset_ops.iterator_to_string_handle(
        self._iterator_resource)
    self._get_next_call_count = 0
    ops.add_to_collection(GLOBAL_ITERATORS, self._iterator_resource)
Esempio n. 24
0
  def __init__(self, iterator_resource, initializer, output_types,
               output_shapes, output_classes):
    """Creates a new iterator from the given iterator resource.

    Note: Most users will not call this initializer directly, and will
    instead use `Dataset.make_initializable_iterator()` or
    `Dataset.make_one_shot_iterator()`.

    Args:
      iterator_resource: A `tf.resource` scalar `tf.Tensor` representing the
        iterator.
      initializer: A `tf.Operation` that should be run to initialize this
        iterator.
      output_types: A nested structure of `tf.DType` objects corresponding to
        each component of an element of this iterator.
      output_shapes: A nested structure of `tf.TensorShape` objects
        corresponding to each component of an element of this iterator.
      output_classes: A nested structure of Python `type` objects corresponding
        to each component of an element of this iterator.
    """
    self._iterator_resource = iterator_resource
    self._initializer = initializer

    if (output_types is None or output_shapes is None
        or output_classes is None):
      raise ValueError("If `structure` is not specified, all of "
                       "`output_types`, `output_shapes`, and `output_classes`"
                       " must be specified.")
    self._structure = structure_lib.convert_legacy_structure(
        output_types, output_shapes, output_classes)

    self._string_handle = gen_dataset_ops.iterator_to_string_handle(
        self._iterator_resource)
    self._get_next_call_count = 0
    ops.add_to_collection(GLOBAL_ITERATORS, self._iterator_resource)
Esempio n. 25
0
def tree_ensemble_variable(stamp_token,
                           tree_ensemble_config,
                           name,
                           container=None):
  r"""Creates a tree ensemble model and returns a handle to it.

  Args:
    stamp_token: The initial stamp token value for the ensemble resource.
    tree_ensemble_config: A `Tensor` of type `string`.
      Serialized proto of the tree ensemble.
    name: A name for the ensemble variable.
    container: An optional `string`. Defaults to `""`.

  Returns:
    A `Tensor` of type mutable `string`. The handle to the tree ensemble.
  """
  with ops.name_scope(name, "TreeEnsembleVariable") as name:
    resource_handle = gen_model_ops.decision_tree_ensemble_resource_handle_op(
        container, shared_name=name, name=name)
    create_op = gen_model_ops.create_tree_ensemble_variable(
        resource_handle, stamp_token, tree_ensemble_config)
    is_initialized_op = gen_model_ops.tree_ensemble_is_initialized_op(
        resource_handle)
    # Adds the variable to the savable list.
    saveable = TreeEnsembleVariableSavable(resource_handle, create_op,
                                           resource_handle.name)
    ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)
    resources.register_resource(resource_handle, create_op, is_initialized_op)
    return resource_handle
Esempio n. 26
0
def _get_default_variable_store():
  store = ops.get_collection(_VARSTORE_KEY)
  if store:
    return store[0]
  store = _VariableStore()
  ops.add_to_collection(_VARSTORE_KEY, store)
  return store
 def test_train_worker_monitor(self):
   # We need to explicitly set device due to check on non-chief workers
   # requiring all variables to have a device assigned.
   with tf.Graph().as_default() as g, g.device('/cpu:0'):
     global_step = tf.contrib.framework.create_global_step(g)
     train_op = tf.assign_add(global_step, 1)
     loss_op = tf.constant(2.0)
     tf.scalar_summary('loss', loss_op)
     # Add explicit "local" init op to initialize all variables
     # as there's no chief to init here.
     init_op = variables.initialize_all_variables()
     ops.add_to_collection(ops.GraphKeys.LOCAL_INIT_OP, init_op)
     # Create worker monitors where one should be active on the worker
     # and the other chief exclusive.
     chief_exclusive_monitor = _BaseMonitorWrapper(False)
     all_workers_monitor = _BaseMonitorWrapper(True)
     with self.test_session(g):
       loss = learn.graph_actions.train(
           g, output_dir=self._output_dir,
           global_step_tensor=global_step,
           train_op=train_op, loss_op=loss_op,
           supervisor_is_chief=False, steps=1,
           monitors=[chief_exclusive_monitor, all_workers_monitor])
     self.assertEqual(2.0, loss)
     self.assertTrue(not chief_exclusive_monitor.is_active and
                     all_workers_monitor.is_active,
                     'Only non-chief runnable monitor must have been active.')
     self.assertTrue(not chief_exclusive_monitor.has_step and
                     all_workers_monitor.has_step,
                     'Only non-chief runnable monitor must have a step.')
Esempio n. 28
0
  def initialize(self, table):
    """Initializes the table from a text file.

    Args:
      table: The table to be initialized.

    Returns:
      The operation that initializes the table.

    Raises:
      TypeError: when the keys and values data types do not match the table
      key and value data types.
    """
    # pylint: disable=protected-access
    table._check_table_dtypes(self.key_dtype, self.value_dtype)
    with ops.name_scope(self._name, "text_file_init", [table]) as scope:
      filename = ops.convert_to_tensor(self._filename,
                                       dtypes.string,
                                       name="asset_filepath")
      init_op = gen_data_flow_ops._initialize_table_from_text_file(
          table.table_ref,
          filename,
          self._key_index,
          self._value_index,
          -1 if self._vocab_size is None else self._vocab_size,
          self._delimiter,
          name=scope)
    # pylint: enable=protected-access
    ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op)
    ops.add_to_collection(ops.GraphKeys.ASSET_FILEPATHS, filename)
    return init_op
Esempio n. 29
0
  def _create_saveable(self):
    """Create custom saveable for the Cudnn layer.

    Called during layer building process to make sharing checkpoints between
    Cudnn and Cudnn-compatible RNNs easy.
    Returns:
      a `CudnnOpaqueParamsSaveable` object.
    Raises:
      RuntimeError: if any custom saveable is already created for this layer.
    """
    if self._saveable is not None:
      raise RuntimeError("Cudnn saveable already created.")
    self._saveable = self._saveable_cls(  # pylint:disable=not-callable
        opaque_params=self.trainable_variables[0],
        num_layers=self.num_layers,
        num_units=self.num_units,
        input_size=self.input_size,
        input_mode=self.input_mode,
        direction=self.direction,
        scope=vs.get_variable_scope(),
        name="%s_saveable" % self.trainable_variables[0].name.split(":")[0])
    self._saveable._add_trackable_dependencies(  # pylint: disable=protected-access
        trackable=self,
        dtype=self._plain_dtype)
    ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, self._saveable)
Esempio n. 30
0
  def _init_ready_op(self,
                     ready_op=USE_DEFAULT,
                     ready_for_local_init_op=USE_DEFAULT):
    """Initializes ready_op.

    Args:
      ready_op: `Tensor` to check if the model is initialized.
        If it's set to USE_DEFAULT, creates an op that checks all
        the variables are initialized.
      ready_for_local_init_op: `Tensor` to check if the model is ready to run
        local_init_op.
        If it's set to USE_DEFAULT, creates an op that checks all
        the global variables are initialized.
    """
    if ready_op is Supervisor.USE_DEFAULT:
      ready_op = self._get_first_op_from_collection(ops.GraphKeys.READY_OP)
      if ready_op is None:
        ready_op = variables.report_uninitialized_variables()
        ops.add_to_collection(ops.GraphKeys.READY_OP, ready_op)
    self._ready_op = ready_op

    # ready_for_local_init_op defaults to None for backward compatibility
    if ready_for_local_init_op is Supervisor.USE_DEFAULT:
      ready_for_local_init_op = self._get_first_op_from_collection(
          ops.GraphKeys.READY_FOR_LOCAL_INIT_OP)
    self._ready_for_local_init_op = ready_for_local_init_op
Esempio n. 31
0
  def test_unused_update_ops(self, create_gan_model_fn, provide_update_ops):
    model = create_gan_model_fn()
    loss = train.gan_loss(model)

    # Add generator and discriminator update ops.
    with variable_scope.variable_scope(model.generator_scope):
      gen_update_count = variable_scope.get_variable('gen_count', initializer=0)
      gen_update_op = gen_update_count.assign_add(1)
      ops.add_to_collection(ops.GraphKeys.UPDATE_OPS, gen_update_op)
    with variable_scope.variable_scope(model.discriminator_scope):
      dis_update_count = variable_scope.get_variable('dis_count', initializer=0)
      dis_update_op = dis_update_count.assign_add(1)
      ops.add_to_collection(ops.GraphKeys.UPDATE_OPS, dis_update_op)

    # Add an update op outside the generator and discriminator scopes.
    if provide_update_ops:
      kwargs = {
          'update_ops': [
              constant_op.constant(1.0), gen_update_op, dis_update_op
          ]
      }
    else:
      ops.add_to_collection(ops.GraphKeys.UPDATE_OPS, constant_op.constant(1.0))
      kwargs = {}

    g_opt = gradient_descent.GradientDescentOptimizer(1.0)
    d_opt = gradient_descent.GradientDescentOptimizer(1.0)

    with self.assertRaisesRegexp(ValueError, 'There are unused update ops:'):
      train.gan_train_ops(
          model, loss, g_opt, d_opt, check_for_unused_update_ops=True, **kwargs)
    train_ops = train.gan_train_ops(
        model, loss, g_opt, d_opt, check_for_unused_update_ops=False, **kwargs)

    with self.test_session(use_gpu=True) as sess:
      sess.run(variables.global_variables_initializer())
      self.assertEqual(0, gen_update_count.eval())
      self.assertEqual(0, dis_update_count.eval())

      train_ops.generator_train_op.eval()
      self.assertEqual(1, gen_update_count.eval())
      self.assertEqual(0, dis_update_count.eval())

      train_ops.discriminator_train_op.eval()
      self.assertEqual(1, gen_update_count.eval())
      self.assertEqual(1, dis_update_count.eval())
Esempio n. 32
0
def apply_mask(x, scope, task_id=1):
    """Apply mask to a given weight tensor.

  Args:
    x: Input weight tensor
    scope: The current variable scope. Defaults to ""
  Returns:
    Tensor representing masked_weights

  """
    mask = _weight_mask_variable(x, scope)
    threshold = _weight_threshold_variable(x, scope)

    conditional_op = control_flow_ops.cond(
        have_encountered_new_task(mask,
                                  task_id), lambda: control_flow_ops.no_op(),
        lambda: initialize_mask_and_weight(mask, x, task_id))

    with tf.control_dependencies([conditional_op]):
        # use all previous task's weight and current task's weights to inference
        if FLAGS.share_only_task_1:
            boolean_mask = tf.cast(tf.logical_or(
                tf.equal(tf.identity(mask), 1),
                tf.equal(tf.identity(mask), task_id)),
                                   dtype=tf.float32)
        else:
            boolean_mask = tf.cast(tf.logical_and(
                tf.greater_equal(tf.identity(mask), 1),
                tf.less_equal(tf.identity(mask), task_id)),
                                   dtype=tf.float32)

        masked_weights = math_ops.multiply(boolean_mask, x, 'masked_weight')

        # Make sure the mask for a given variable are not added multiple times to the
        # collection. This is particularly important when applying mask to RNN's
        # weight variables
        if mask not in ops.get_collection_ref(_MASK_COLLECTION):
            ops.add_to_collection(_THRESHOLD_COLLECTION, threshold)
            ops.add_to_collection(_MASK_COLLECTION, mask)
            ops.add_to_collection(_WEIGHT_COLLECTION, x)
        # ops.add_to_collection(_MASKED_WEIGHT_COLLECTION, masked_weights)

    return masked_weights
Esempio n. 33
0
  def testGetRegularizationLoss(self):
    # Empty regularization collection should evaluate to 0.0.
    with self.cached_session():
      self.assertEqual(0.0, util.get_regularization_loss().eval())

    # Loss should sum.
    ops.add_to_collection(
        ops.GraphKeys.REGULARIZATION_LOSSES, constant_op.constant(2.0))
    ops.add_to_collection(
        ops.GraphKeys.REGULARIZATION_LOSSES, constant_op.constant(3.0))
    with self.cached_session():
      self.assertEqual(5.0, util.get_regularization_loss().eval())

    # Check scope capture mechanism.
    with ops.name_scope('scope1'):
      ops.add_to_collection(
          ops.GraphKeys.REGULARIZATION_LOSSES, constant_op.constant(-1.0))
    with self.cached_session():
      self.assertEqual(-1.0, util.get_regularization_loss('scope1').eval())
Esempio n. 34
0
    def testGradientOfDeserializedCond(self):
        with ops.Graph().as_default():
            pred = array_ops.placeholder(dtypes.bool, name="pred")
            x = constant_op.constant(3.0, name="x")
            ops.add_to_collection("x", x)

            def true_fn():
                return math_ops.pow(x, 3)

            def false_fn():
                return x

            ops.add_to_collection("pred", pred)
            cond = cond_v2.cond_v2(pred, true_fn, false_fn, name="cond")
            for c in cond:
                ops.add_to_collection("cond", c)
            meta_graph = saver.export_meta_graph()

        with ops.Graph().as_default() as g:
            with self.test_session(graph=g) as sess:
                saver.import_meta_graph(meta_graph)
                x = ops.get_collection("x")[0]
                pred = ops.get_collection("pred")[0]
                cond = ops.get_collection("cond")
                cond_grad = gradients_impl.gradients(cond, [x],
                                                     name="cond_grad")
                cond_grad_grad = gradients_impl.gradients(
                    cond_grad, [x], name="cond_grad_grad")
                # d[x^3]/dx = 3x^2
                true_val = sess.run(cond_grad, {pred: True})
                self.assertEqual(true_val, [27.0])
                # d[x]/dx = 1
                false_val = sess.run(cond_grad, {pred: False})
                self.assertEqual(false_val, [1.0])

                true_val = sess.run(cond_grad_grad, {pred: True})
                # d2[x^3]/dx2 = 6x
                self.assertEqual(true_val, [18.0])
                false_val = sess.run(cond_grad_grad, {pred: False})
                # d2[x]/dx2 = 0
                self.assertEqual(false_val, [0.0])
Esempio n. 35
0
 def _build_graph(self,
                  range_limit=10,
                  num_repeats=5,
                  buffer_size=5,
                  seed=None,
                  reshuffle_each_iteration=None,
                  build_saveable=True):
     iterator = dataset_ops.Dataset.range(range_limit).shuffle(
         buffer_size,
         seed=seed,
         reshuffle_each_iteration=reshuffle_each_iteration).repeat(
             num_repeats).make_initializable_iterator()
     if build_saveable:
         saveable = contrib_iterator_ops.make_saveable_from_iterator(
             iterator)
         ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)
     init_op = iterator.initializer
     get_next = iterator.get_next()
     ops.add_to_collection("iterator_ops", init_op)
     ops.add_to_collection("iterator_ops", get_next)
     saver = saver_lib.Saver(allow_empty=True)
     return init_op, get_next, saver
Esempio n. 36
0
    def testLegacyInitOp(self):
        export_dir = os.path.join(test.get_temp_dir(), "test_legacy_init_op")
        builder = saved_model_builder.SavedModelBuilder(export_dir)

        with self.test_session(graph=ops.Graph()) as sess:
            # Add `v1` and `v2` variables to the graph.
            v1 = variables.Variable(1, name="v1")
            ops.add_to_collection("v", v1)
            v2 = variables.Variable(2, name="v2")
            ops.add_to_collection("v", v2)

            # Initialize another variable `v3` to 42.
            v3 = variables.Variable(42,
                                    name="v3",
                                    trainable=False,
                                    collections=[])
            ops.add_to_collection("v", v3)

            # Set up an assignment op to be run as part of the legacy_init_op.
            assign_v3 = state_ops.assign(v3, math_ops.add(v1, v2))
            legacy_init_op = control_flow_ops.group(assign_v3,
                                                    name="legacy_init_op")

            sess.run(variables.global_variables_initializer())
            builder.add_meta_graph_and_variables(sess, ["foo"],
                                                 legacy_init_op=legacy_init_op)

        # Save the SavedModel to disk.
        builder.save()

        with self.test_session(graph=ops.Graph()) as sess:
            loader.load(sess, ["foo"], export_dir)
            self.assertEqual(1, ops.get_collection("v")[0].eval())
            self.assertEqual(2, ops.get_collection("v")[1].eval())
            # Evaluates to the sum of the first two variables and assigned as part of
            # the legacy_init_op, following a restore.
            self.assertEqual(3, ops.get_collection("v")[2].eval())
Esempio n. 37
0
 def add_summary_ops(self, name, value):
     with ops.name_scope(self.name):
         summary_op = summary.scalar(name=name, tensor=value)
         fops.add_to_collection(fops.GraphKeys.SUMMARIES, summary_op)
Esempio n. 38
0
 def fit(self,
         x=None,
         y=None,
         val_x=None,
         val_y=None,
         batch_size=None,
         shuffle=True,
         epochs=1,
         steps_per_epoch=None,
         validation_steps=None):
     if batch_size is None and steps_per_epoch is None:
         batch_size = 32
     if x is None and y is None and steps_per_epoch is None:
         raise ValueError("When fitting from data tensors,"
                          " `steps_per_epoch` must be specified")
     # prepare global_step
     step = self._load_global_step()
     if training_util.get_global_step() is None:
         fops.add_to_collection(
             name=fops.GraphKeys.GLOBAL_STEP,
             value=variables.Variable(
                 step, name='global_step', trainable=False))
     # build train function
     x, y = self._standardize_data(x, y)
     data = x + y
     if self.uses_learning_phase:  # [1.] flag for training
         data += [1.]
     num_train_samples = utils.check_num_samples(
         data, batch_size=batch_size,
         steps=steps_per_epoch)
     train_steps = steps_per_epoch or (
             (num_train_samples + batch_size - 1) // batch_size)
     initial_epoch = step // train_steps
     if epochs is not None:
         if epochs <= initial_epoch:
             logging.info("=>Skipping training since max epoch has already arrived")
             exit(0)
     self._prepare_train_hooks(epochs=epochs,
                               steps_per_epoch=train_steps,
                               initial_epoch=initial_epoch)
     self._make_train_function()
     # build val function
     validation = False
     num_val_samples = None
     if val_x is not None and val_y is not None:
         validation = True
         val_x, val_y = self._standardize_data(val_x, val_y)
         val_data = val_x + val_y
     elif validation_steps:
         validation = True
         val_data = []
     else:
         val_data = []
     if validation:
         if self.uses_learning_phase:  # [0.] flag for evaluation
             val_data += [0.]
         num_val_samples = utils.check_num_samples(
             val_data, batch_size=batch_size,
             steps=validation_steps)
         val_steps = validation_steps or (
                 (num_val_samples + batch_size - 1) // batch_size)
         self._prepare_val_hooks(epochs=epochs,
                                 steps_per_epoch=val_steps,
                                 initial_epoch=initial_epoch)
         self._make_eval_function()
     self.fit_loop(data=data,
                   val_data=val_data,
                   batch_size=batch_size,
                   shuffle=shuffle,
                   epochs=epochs,
                   initial_epoch=initial_epoch,
                   steps_per_epoch=steps_per_epoch,
                   validation_steps=validation_steps,
                   num_train_samples=num_train_samples,
                   num_val_samples=num_val_samples)
Esempio n. 39
0
  def apply(self, var_list=None):
    """Maintains moving averages of variables.

    `var_list` must be a list of `Variable` objects.  This method
    creates shadow variables (holding the moving averages)
    for all elements of `var_list`, and
    updates the moving averages using the current `var_list` values. Shadow
    variables for `Variable` objects are initialized to the variable's initial
    value.

    Shadow variables are created with `trainable=False`. To access them you
    can use the EMA object's `average` method. Note that `EMA` objects are
    not trackable by checkpoints, so if you want to checkpoint or restore the
    moving variables you will need to manually grab the shadow
    variables via `average()` and assign them as `tf.Module` properties or
    directly pass them to your `tf.train.Checkpoint`.

    Note that `apply()` can be called multiple times. When eager execution is
    enabled each call to apply will update the variables once, so this needs to
    be called in a loop.

    In legacy TF 1.x graphs, this method returns an op that updates all
    shadow variables from the current value of their associated variables. In
    TF 1.x graphs without automatically control dependencies this op needs to be
    manually run.

    Args:
      var_list: A list of Variable objects. The variables
        must be of types bfloat16, float16, float32, or float64.
        (In legacy TF 1.x graphs these may be tensors, but this is unsupported
        when eager execution is enabled.)

    Returns:
      An Operation that updates the moving averages.

    Raises:
      TypeError: If the arguments are not an allowed type.
    """
    # TODO(touts): op_scope
    if var_list is None:
      var_list = variables.trainable_variables()
    for v in var_list:
      if (isinstance(v, ops.Tensor)
          and ops.executing_eagerly_outside_functions()):
        raise TypeError(
            "tf.train.ExponentialMovingAverage does not support non-Variable"
            " tensors when eager execution is enabled.")
    zero_debias_true = set()  # set of vars to set `zero_debias=True`
    for var in var_list:
      if var.dtype.base_dtype not in [
          dtypes.bfloat16, dtypes.float16, dtypes.float32, dtypes.float64
      ]:
        raise TypeError("The variables must be half, float, or double: %s" %
                        var.name)

      if var.ref() not in self._averages:
        # For variables: to lower communication bandwidth across devices we keep
        # the moving averages on the same device as the variables. For other
        # tensors, we rely on the existing device allocation mechanism.
        with ops.init_scope():
          if isinstance(var, variables.Variable):
            with ops.device(var.device):
              initialized_value = var.initialized_value()
            avg = slot_creator.create_slot(
                var,
                initialized_value,
                self.name,
                colocate_with_primary=True,
                copy_xla_sharding=True)
            # NOTE(mrry): We only add `tf.Variable` objects to the
            # `MOVING_AVERAGE_VARIABLES` collection.
            ops.add_to_collection(ops.GraphKeys.MOVING_AVERAGE_VARIABLES, var)
          else:
            avg = slot_creator.create_zeros_slot(
                var,
                self.name,
                colocate_with_primary=(var.op.type in [
                    "Variable", "VariableV2", "VarHandleOp"
                ]),
                copy_xla_sharding=True)
            if self._zero_debias:
              zero_debias_true.add(avg.ref())
        self._averages[var.ref()] = avg

    with ops.name_scope(self.name) as scope:
      decay = ops.convert_to_tensor(
          self._decay, dtype=dtypes.float32, name="decay")
      if self._num_updates is not None:
        num_updates = math_ops.cast(
            self._num_updates, dtypes.float32, name="num_updates")
        decay = math_ops.minimum(decay,
                                 (1.0 + num_updates) / (10.0 + num_updates))
      updates = []
      for var in var_list:
        avg = self._averages[var.ref()]
        zero_debias = avg.ref() in zero_debias_true
        updates.append(assign_moving_average(avg, var, decay, zero_debias))
      return control_flow_ops.group(*updates, name=scope)
    def input_layer_with_layer_annotations(features,
                                           feature_columns,
                                           weight_collections=None,
                                           trainable=True,
                                           cols_to_vars=None,
                                           scope=None,
                                           cols_to_output_tensors=None,
                                           from_template=False):
        """Returns a dense `Tensor` as input layer based on given `feature_columns`.

    Generally a single example in training data is described with
    FeatureColumns.
    At the first layer of the model, this column oriented data should be
    converted
    to a single `Tensor`.

    This is like tf.feature_column.input_layer, except with added
    Integrated-Gradient annotations.

    Args:
      features: A mapping from key to tensors. `_FeatureColumn`s look up via
        these keys. For example `numeric_column('price')` will look at 'price'
        key in this dict. Values can be a `SparseTensor` or a `Tensor` depends
        on corresponding `_FeatureColumn`.
      feature_columns: An iterable containing the FeatureColumns to use as
        inputs to your model. All items should be instances of classes derived
        from `_DenseColumn` such as `numeric_column`, `embedding_column`,
        `bucketized_column`, `indicator_column`. If you have categorical
        features, you can wrap them with an `embedding_column` or
        `indicator_column`.
      weight_collections: A list of collection names to which the Variable will
        be added. Note that variables will also be added to collections
        `tf.GraphKeys.GLOBAL_VARIABLES` and `ops.GraphKeys.MODEL_VARIABLES`.
      trainable: If `True` also add the variable to the graph collection
        `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
      cols_to_vars: If not `None`, must be a dictionary that will be filled with
        a mapping from `_FeatureColumn` to list of `Variable`s.  For example,
        after the call, we might have cols_to_vars = {_EmbeddingColumn(
        categorical_column=_HashedCategoricalColumn( key='sparse_feature',
        hash_bucket_size=5, dtype=tf.string), dimension=10): [<tf.Variable
        'some_variable:0' shape=(5, 10), <tf.Variable 'some_variable:1'
          shape=(5, 10)]} If a column creates no variables, its value will be an
          empty list.
      scope: A name or variable scope to use
      cols_to_output_tensors: If not `None`, must be a dictionary that will be
        filled with a mapping from '_FeatureColumn' to the associated output
        `Tensor`s.
      from_template: True if the method is being instantiated from a
        `make_template`.

    Returns:
      A `Tensor` which represents input layer of a model. Its shape
      is (batch_size, first_layer_dimension) and its dtype is `float32`.
      first_layer_dimension is determined based on given `feature_columns`.

    Raises:
      ValueError: features and feature_columns have different lengths.
    """

        local_cols_to_output_tensors = {}
        input_layer = original_input_layer(
            features=features,
            feature_columns=feature_columns,
            weight_collections=weight_collections,
            trainable=trainable,
            cols_to_vars=cols_to_vars,
            scope=scope,
            cols_to_output_tensors=local_cols_to_output_tensors,
            from_template=from_template)

        if cols_to_output_tensors is not None:
            cols_to_output_tensors = local_cols_to_output_tensors

        # Annotate features.
        # These are the parsed Tensors, before embedding.

        # Only annotate features used by FeatureColumns.
        # We figure which ones are used by FeatureColumns by creating a parsing
        # spec and looking at the keys.
        spec = feature_column_lib.make_parse_example_spec(feature_columns)
        for key in spec.keys():
            tensor = ops.convert_to_tensor_or_indexed_slices(features[key])
            ops.add_to_collection(
                LayerAnnotationsCollectionNames.keys(
                    LayerAnnotationsCollectionNames.UNPROCESSED_FEATURES), key)
            ops.add_to_collection(
                LayerAnnotationsCollectionNames.values(
                    LayerAnnotationsCollectionNames.UNPROCESSED_FEATURES),
                _to_any_wrapped_tensor_info(tensor))

        # Annotate feature columns.
        for column in feature_columns:
            # TODO(cyfoo): Find a better way to serialize and deserialize
            # _FeatureColumn.
            ops.add_to_collection(
                LayerAnnotationsCollectionNames.FEATURE_COLUMNS,
                serialize_feature_column(column))

        for column, tensor in local_cols_to_output_tensors.items():
            ops.add_to_collection(
                LayerAnnotationsCollectionNames.keys(
                    LayerAnnotationsCollectionNames.PROCESSED_FEATURES),
                column.name)
            ops.add_to_collection(
                LayerAnnotationsCollectionNames.values(
                    LayerAnnotationsCollectionNames.PROCESSED_FEATURES),
                _to_any_wrapped_tensor_info(tensor))

        return input_layer
    def _testBasics(self, num_unroll, length, pad, expected_seq1_batch1,
                    expected_seq2_batch1, expected_seq1_batch2,
                    expected_seq2_batch2, expected_seq3_batch1,
                    expected_seq3_batch2, expected_seq4_batch1,
                    expected_seq4_batch2):

        with self.test_session() as sess:
            next_batch = sqss.batch_sequences_with_states(
                input_key=self.key,
                input_sequences=self.sequences,
                input_context=self.context,
                input_length=length,
                initial_states=self.initial_states,
                num_unroll=num_unroll,
                batch_size=self.batch_size,
                num_threads=3,
                # to enforce that we only move on to the next examples after finishing
                # all segments of the first ones.
                capacity=2,
                pad=pad)

            state1 = next_batch.state("state1")
            state2 = next_batch.state("state2")
            state1_update = next_batch.save_state("state1", state1 + 1)
            state2_update = next_batch.save_state("state2", state2 - 1)

            # Make sure queue runner with SQSS is added properly to meta graph def.
            # Saver requires at least one variable.
            v0 = variables.Variable(10.0, name="v0")
            ops.add_to_collection("variable_collection", v0)
            variables.global_variables_initializer()
            save = saver.Saver([v0])
            test_dir = os.path.join(test.get_temp_dir(), "sqss_test")
            filename = os.path.join(test_dir, "metafile")
            meta_graph_def = save.export_meta_graph(filename)
            qr_saved = meta_graph_def.collection_def[
                ops.GraphKeys.QUEUE_RUNNERS]
            self.assertTrue(qr_saved.bytes_list.value is not None)

            coord = coordinator.Coordinator()
            threads = queue_runner_impl.start_queue_runners(coord=coord)

            # Step 1
            (key_value, next_key_value, seq1_value, seq2_value, seq3_value,
             seq4_value, context1_value, state1_value, state2_value,
             length_value, _, _) = sess.run(
                 (next_batch.key, next_batch.next_key,
                  next_batch.sequences["seq1"], next_batch.sequences["seq2"],
                  next_batch.sequences["seq3"], next_batch.sequences["seq4"],
                  next_batch.context["context1"], state1, state2,
                  next_batch.length, state1_update, state2_update))
            expected_first_keys = set([b"00000_of_00002"])
            expected_second_keys = set([b"00001_of_00002"])
            expected_final_keys = set([b"STOP"])

            self.assertEqual(expected_first_keys, self._prefix(key_value))
            self.assertEqual(expected_second_keys,
                             self._prefix(next_key_value))
            self.assertAllEqual(
                np.tile(self.context["context1"], (self.batch_size, 1)),
                context1_value)
            self.assertAllEqual(expected_seq1_batch1, seq1_value)
            self.assertAllEqual(expected_seq2_batch1, seq2_value)
            self.assertAllEqual(expected_seq3_batch1.indices,
                                seq3_value.indices)
            self.assertAllEqual(expected_seq3_batch1.values, seq3_value.values)
            self.assertAllEqual(expected_seq3_batch1.dense_shape,
                                seq3_value.dense_shape)
            self.assertAllEqual(expected_seq4_batch1.indices,
                                seq4_value.indices)
            self.assertAllEqual(expected_seq4_batch1.values, seq4_value.values)
            self.assertAllEqual(expected_seq4_batch1.dense_shape,
                                seq4_value.dense_shape)
            self.assertAllEqual(
                np.tile(self.initial_states["state1"],
                        (self.batch_size, 1, 1)), state1_value)
            self.assertAllEqual(
                np.tile(self.initial_states["state2"], (self.batch_size, 1)),
                state2_value)
            self.assertAllEqual(length_value, [num_unroll, num_unroll])

            # Step 2
            (key_value, next_key_value, seq1_value, seq2_value, seq3_value,
             seq4_value, context1_value, state1_value, state2_value,
             length_value, _, _) = sess.run(
                 (next_batch.key, next_batch.next_key,
                  next_batch.sequences["seq1"], next_batch.sequences["seq2"],
                  next_batch.sequences["seq3"], next_batch.sequences["seq4"],
                  next_batch.context["context1"], state1, state2,
                  next_batch.length, state1_update, state2_update))

            self.assertEqual(expected_second_keys, self._prefix(key_value))
            self.assertEqual(expected_final_keys, self._prefix(next_key_value))
            self.assertAllEqual(
                np.tile(self.context["context1"], (self.batch_size, 1)),
                context1_value)
            self.assertAllEqual(expected_seq1_batch2, seq1_value)
            self.assertAllEqual(expected_seq2_batch2, seq2_value)
            self.assertAllEqual(expected_seq3_batch2.indices,
                                seq3_value.indices)
            self.assertAllEqual(expected_seq3_batch2.values, seq3_value.values)
            self.assertAllEqual(expected_seq3_batch2.dense_shape,
                                seq3_value.dense_shape)
            self.assertAllEqual(expected_seq4_batch2.indices,
                                seq4_value.indices)
            self.assertAllEqual(expected_seq4_batch2.values, seq4_value.values)
            self.assertAllEqual(expected_seq4_batch2.dense_shape,
                                seq4_value.dense_shape)
            self.assertAllEqual(
                1 + np.tile(self.initial_states["state1"],
                            (self.batch_size, 1, 1)), state1_value)
            self.assertAllEqual(
                -1 + np.tile(self.initial_states["state2"],
                             (self.batch_size, 1)), state2_value)
            self.assertAllEqual([1, 1], length_value)

            coord.request_stop()
            coord.join(threads, stop_grace_period_secs=2)
Esempio n. 42
0
def _Collect(val, collections, default_collections):
    if collections is None:
        collections = default_collections
    for key in collections:
        ops.add_to_collection(key, val)
Esempio n. 43
0
 def __init__(self):
   # Add self to this graph's Stochsatic Tensor collection for
   # purposes of later performing correct surrogate loss calculation.
   ops.add_to_collection(STOCHASTIC_TENSOR_COLLECTION, self)
Esempio n. 44
0
    def _train_model(self,
                     input_fn,
                     steps,
                     feed_fn=None,
                     init_op=None,
                     init_feed_fn=None,
                     init_fn=None,
                     device_fn=None,
                     monitors=None,
                     log_every_steps=100,
                     fail_on_nan_loss=True,
                     max_steps=None):
        # TODO(wicke): Remove this once Model and associated code are gone.
        if hasattr(self._config, 'execution_mode'):
            if self._config.execution_mode not in ('all', 'train'):
                return

            # Stagger startup of worker sessions based on task id.
            sleep_secs = min(
                self._config.training_worker_max_startup_secs,
                self._config.task *
                self._config.training_worker_session_startup_stagger_secs)
            if sleep_secs:
                logging.info('Waiting %d secs before starting task %d.',
                             sleep_secs, self._config.task)
                time.sleep(sleep_secs)

        # Device allocation
        device_fn = device_fn or self._device_fn

        self._graph = ops.Graph()
        with self._graph.as_default() as g, g.device(device_fn):
            random_seed.set_random_seed(self._config.tf_random_seed)
            global_step = contrib_framework.create_global_step(g)
            features, targets = input_fn()
            self._check_inputs(features, targets)
            train_op, loss_op = self._get_train_ops(features, targets)

            # Add default monitors.
            if monitors is None:
                monitors = []

            hooks = [
                m for m in monitors
                if isinstance(m, session_run_hook.SessionRunHook)
            ]

            deprecated_monitors = [
                m for m in monitors
                if not isinstance(m, session_run_hook.SessionRunHook)
            ]

            supervisor_is_chief = self._config.is_chief
            if not supervisor_is_chief:
                # Prune list of monitor to the ones runnable on all workers.
                deprecated_monitors = [
                    m for m in deprecated_monitors if m.run_on_all_workers
                ]

            # Setup monitors.
            for monitor in deprecated_monitors:
                monitor.set_estimator(self)

            if deprecated_monitors:
                hooks.append(
                    monitor_lib.RunHookAdapterForMonitors(deprecated_monitors))

            ops.add_to_collection(ops.GraphKeys.LOSSES, loss_op)
            return graph_actions._monitored_train(  # pylint: disable=protected-access
                graph=g,
                output_dir=self._model_dir,
                train_op=train_op,
                loss_op=loss_op,
                global_step_tensor=global_step,
                init_op=init_op,
                init_feed_dict=init_feed_fn()
                if init_feed_fn is not None else None,
                init_fn=init_fn,
                log_every_steps=log_every_steps,
                supervisor_is_chief=supervisor_is_chief,
                supervisor_master=self._config.master,
                supervisor_save_model_secs=self._config.save_checkpoints_secs,
                supervisor_save_model_steps=self._config.
                save_checkpoints_steps,
                supervisor_save_summaries_steps=self._config.
                save_summary_steps,
                keep_checkpoint_max=self._config.keep_checkpoint_max,
                feed_fn=feed_fn,
                steps=steps,
                fail_on_nan_loss=fail_on_nan_loss,
                hooks=hooks,
                max_steps=max_steps)
Esempio n. 45
0
    def __init__(
            self,  # pylint: disable=super-init-not-called
            initial_value=None,
            trainable=None,
            caching_device=None,
            name=None,
            dtype=None,
            constraint=None,
            add_initializers_to=None,
            lifted_initializer_graph=None,
            synchronization=None,
            aggregation=None,
            **unused_kwargs):
        """Creates a variable.

    Args:
      initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
        which is the initial value for the Variable. The initial value must have
        a shape specified unless `validate_shape` is set to False. Can also be a
        callable with no argument that returns the initial value when called.
        (Note that initializer functions from init_ops.py must first be bound
         to a shape before being used here.)
      trainable: If `True`, GradientTapes automatically watch uses of this
        Variable.
      caching_device: Optional device string or function describing where the
        Variable should be cached for reading.  Defaults to the Variable's
        device.  If not `None`, caches on another device.  Typical use is to
        cache on the device where the Ops using the Variable reside, to
        deduplicate copying through `Switch` and other conditional statements.
      name: Optional name for the variable. Defaults to `'Variable'` and gets
        uniquified automatically.
      dtype: If set, initial_value will be converted to the given type.
        If None, either the datatype will be kept (if initial_value is
       a Tensor) or float32 will be used (if it is a Python object convertible
       to a Tensor).
      constraint: An optional projection function to be applied to the variable
        after being updated by an `Optimizer` (e.g. used to implement norm
        constraints or value constraints for layer weights). The function must
        take as input the unprojected Tensor representing the value of the
        variable and return the Tensor for the projected value
        (which must have the same shape). Constraints are not safe to
        use when doing asynchronous distributed training.
      add_initializers_to: if not None and not in legacy graph mode, the
        initializer tensor will be added to this map in addition to adding the
        assignment to the function.
      lifted_initializer_graph: FuncGraph to try to lift initializers to.
      synchronization: Indicates when a distributed a variable will be
        aggregated. Accepted values are constants defined in the class
        `tf.VariableSynchronization`. By default the synchronization is set to
        `AUTO` and the current `DistributionStrategy` chooses
        when to synchronize. If `synchronization` is set to `ON_READ`,
        `trainable` must not be set to `True`.
      aggregation: Indicates how a distributed variable will be aggregated.
        Accepted values are constants defined in the class
        `tf.VariableAggregation`.

    Raises:
      ValueError: If the initial value is not specified, or does not have a
        shape and `validate_shape` is `True`.
      RuntimeError: If called outside of a function definition.
    """
        if not ops.inside_function():
            # If we've been init_scope()d out of the function definition nothing to do
            # here; we can't really do the capturing or conditional logic.
            resource_variable_ops.ResourceVariable.__init__(
                self,
                initial_value=initial_value,
                trainable=trainable,
                caching_device=caching_device,
                name=name,
                dtype=dtype,
                constraint=constraint)
            return
        with ops.init_scope():
            self._in_graph_mode = not context.executing_eagerly()
        if initial_value is None:
            raise ValueError("initial_value must be specified.")
        init_from_fn = callable(initial_value)

        if constraint is not None and not callable(constraint):
            raise ValueError("The `constraint` argument must be a callable.")

        if isinstance(initial_value, trackable.CheckpointInitialValue):
            self._maybe_initialize_trackable()
            self._update_uid = initial_value.checkpoint_position.restore_uid
            initial_value = initial_value.wrapped_value

        synchronization, aggregation, trainable = (
            variables.validate_synchronization_aggregation_trainable(
                synchronization, aggregation, trainable, name))
        self._trainable = trainable
        self._synchronization = synchronization
        self._aggregation = aggregation
        self._save_slice_info = None
        self._initial_value = None
        self._initializer_op = None
        self._is_initialized_op = None
        self._graph_element = None
        self._cached_value = None
        # Store the graph key so optimizers know how to only retrieve variables from
        # this graph. Guaranteed to be the same as the eager graph_key.
        self._graph_key = ops.get_default_graph()._graph_key  # pylint: disable=protected-access
        with ops.name_scope(name, "Variable",
                            [] if init_from_fn else [initial_value]) as name:
            # pylint: disable=protected-access
            with ops.init_scope():
                handle_name = ops.name_from_scope_name(name)
                unique_id = "%s_%d" % (handle_name, ops.uid())
                shared_name = context.shared_name(unique_id)
            with ops.name_scope("Initializer"), ops.device(None):
                initial_value = ops.convert_to_tensor(
                    initial_value() if init_from_fn else initial_value,
                    name="initial_value",
                    dtype=dtype)
            with ops.init_scope():
                self._handle = resource_variable_ops.eager_safe_variable_handle(
                    initial_value=initial_value,
                    shared_name=shared_name,
                    name=name,
                    graph_mode=self._in_graph_mode)
            self._shape = initial_value.shape
            self._unique_id = unique_id
            self._handle_name = handle_name + ":0"
            self._dtype = initial_value.dtype.base_dtype
            self._constraint = constraint
            assert initial_value is not None
            if self._in_graph_mode:
                with ops.init_scope():
                    outer_graph = ops.get_default_graph()
                func_graph = ops.get_default_graph()
                function_placeholders = (func_graph.inputs +
                                         func_graph.internal_captures)
                placeholder_ops = set(
                    [tensor.op for tensor in function_placeholders])
                lifted_initializer = lift_to_graph.lift_to_graph(
                    [initial_value],
                    outer_graph,
                    disallowed_placeholders=placeholder_ops)[initial_value]
                with ops.init_scope():
                    self._initial_value = lifted_initializer
                    with ops.name_scope("IsInitialized"):
                        self._is_initialized_op = (
                            resource_variable_ops.var_is_initialized_op(
                                self._handle))
                    if initial_value is not None:
                        with ops.name_scope("Assign") as n, ops.colocate_with(
                                self._handle):
                            self._initializer_op = resource_variable_ops.assign_variable_op(
                                self._handle, lifted_initializer, name=n)
                    with ops.name_scope("Read"), ops.colocate_with(
                            self._handle):
                        # Manually assign reads to the handle's device to avoid log
                        # messages.
                        with ops.device(self._handle.device):
                            value = self._read_variable_op()
                        self._graph_element = value
                    ops.add_to_collection(ops.GraphKeys.GLOBAL_VARIABLES, self)
            else:
                if add_initializers_to is not None:
                    add_initializers_to[self] = initial_value

                def assign_fn():
                    with ops.name_scope("Assign") as n, ops.colocate_with(
                            self._handle):
                        resource_variable_ops.assign_variable_op(self._handle,
                                                                 initial_value,
                                                                 name=n)
                        # Returning values to keep tf.cond happy.
                    return ops.convert_to_tensor(1)

                def not_assign_fn():
                    return ops.convert_to_tensor(0)

                # Note: this cond is always guaranteed to run because we're inside a
                # defun which will insert automatic control dependencies.
                control_flow_ops.cond(
                    resource_variable_ops.var_is_initialized_op(self._handle),
                    not_assign_fn, assign_fn)

        # After the handle has been created, set up a way to clean it up when
        # executing eagerly. We'll hold the only reference to the deleter, so that
        # when this object is garbage collected the deleter will be too. This
        # means ResourceVariables can be part of reference cycles without those
        # cycles being uncollectable.
        if not self._in_graph_mode:
            self._handle_deleter = resource_variable_ops.EagerResourceDeleter(
                handle=self._handle, handle_device=self._handle.device)
        self._cached_shape_as_list = None
 def fn():
     x_const = constant_op.constant(ops.get_collection('x')[0])
     y_const = constant_op.constant(ops.get_collection('y')[0])
     z = math_ops.add(x_const, y_const)
     ops.add_to_collection('z', 7)
     return z
Esempio n. 47
0
def write(tag, tensor, step=None, metadata=None, name=None):
    """Writes a generic summary to the default SummaryWriter if one exists.

  This exists primarily to support the definition of type-specific summary ops
  like scalar() and image(), and is not intended for direct use unless defining
  a new type-specific summary op.

  Args:
    tag: string tag used to identify the summary (e.g. in TensorBoard), usually
      generated with `tf.summary.summary_scope`
    tensor: the Tensor holding the summary data to write or a callable that
      returns this Tensor. If a callable is passed, it will only be called when
      a default SummaryWriter exists and the recording condition specified by
      `record_if()` is met.
    step: Explicit `int64`-castable monotonic step value for this summary. If
      omitted, this defaults to `tf.summary.experimental.get_step()`, which must
      not be None.
    metadata: Optional SummaryMetadata, as a proto or serialized bytes
    name: Optional string name for this op.

  Returns:
    True on success, or false if no summary was written because no default
    summary writer was available.

  Raises:
    ValueError: if a default writer exists, but no step was provided and
      `tf.summary.experimental.get_step()` is None.
  """
    with ops.name_scope(name, "write_summary") as scope:
        if _summary_state.writer is None:
            return constant_op.constant(False)
        if step is None:
            step = get_step()
            if step is None:
                raise ValueError("No step set via 'step' argument or "
                                 "tf.summary.experimental.set_step()")
        if metadata is None:
            serialized_metadata = b""
        elif hasattr(metadata, "SerializeToString"):
            serialized_metadata = metadata.SerializeToString()
        else:
            serialized_metadata = metadata

        def record():
            """Record the actual summary and return True."""
            # Note the identity to move the tensor to the CPU.
            with ops.device("cpu:0"):
                summary_tensor = tensor() if callable(
                    tensor) else array_ops.identity(tensor)
                write_summary_op = gen_summary_ops.write_summary(
                    _summary_state.writer._resource,  # pylint: disable=protected-access
                    step,
                    summary_tensor,
                    tag,
                    serialized_metadata,
                    name=scope)
                with ops.control_dependencies([write_summary_op]):
                    return constant_op.constant(True)

        op = smart_cond.smart_cond(_should_record_summaries_v2(),
                                   record,
                                   _nothing,
                                   name="summary_cond")
        if not context.executing_eagerly():
            ops.add_to_collection(ops.GraphKeys._SUMMARY_COLLECTION, op)  # pylint: disable=protected-access
        return op
Esempio n. 48
0
 def true_fn():
     z = math_ops.add(x, y)
     ops.add_to_collection("z", 7)
     return math_ops.mul(x, z)
Esempio n. 49
0
    def __init__(self,
                 initial_value,
                 trainable=True,
                 collections=None,
                 validate_shape=True,
                 name=None):
        """Creates a new variable with value `initial_value`.

    The new variable is added to the graph collections listed in `collections`,
    which defaults to `[GraphKeys.VARIABLES]`.

    If `trainable` is `True` the variable is also added to the graph collection
    `GraphKeys.TRAINABLE_VARIABLES`.

    This constructor creates both a `variable` Op and an `assign` Op to set the
    variable to its initial value.

    Args:
      initial_value: A `Tensor`, or Python object convertible to a `Tensor`.
        The initial value for the Variable. Must have a shape specified unless
        `validate_shape` is set to False.
      trainable: If `True`, the default, also adds the variable to the graph
        collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
        the default list of variables to use by the `Optimizer` classes.
      collections: List of graph collections keys. The new variable is added to
        these collections. Defaults to `[GraphKeys.VARIABLES]`.
      validate_shape: If `False`, allows the variable to be initialized with a
        value of unknown shape. If `True`, the default, the shape of
        `initial_value` must be known.
      name: Optional name for the variable. Defaults to `'Variable'` and gets
        uniquified automatically.

    Returns:
      A Variable.

    Raises:
      ValueError: If the initial value does not have a shape and
        `validate_shape` is `True`.
    """
        if collections is None:
            collections = [ops.GraphKeys.VARIABLES]
        if trainable and ops.GraphKeys.TRAINABLE_VARIABLES not in collections:
            # pylint: disable=g-no-augmented-assignment
            #
            # Pylint wants us to write collections += [...TRAINABLE_VARIABLES] which
            # is not the same (it modifies the list in place.)  Here, we only want to
            # modify the value of the variable, not the list.
            collections = collections + [ops.GraphKeys.TRAINABLE_VARIABLES]
            # pylint: enable=g-no-augmented-assignment
        with ops.op_scope([initial_value], name, "Variable") as name:
            self._initial_value = ops.convert_to_tensor(initial_value,
                                                        name="initial_value")
            if not self._initial_value.get_shape().is_fully_defined():
                if validate_shape:
                    raise ValueError(
                        "initial_value must have a shape specified: %s" %
                        self._initial_value)
                self._variable = state_ops.variable_op(
                    [],
                    self._initial_value.dtype.base_dtype,
                    set_shape=False,
                    name=name)
                with ops.device(self._variable.device):
                    self._initializer_op = state_ops.assign(
                        self._variable,
                        self._initial_value,
                        validate_shape=False).op
                    self._snapshot = array_ops.identity(self._variable,
                                                        name="read")
            else:
                self._variable = state_ops.variable_op(
                    self._initial_value.get_shape(),
                    self._initial_value.dtype.base_dtype,
                    name=name)
                with ops.device(self._variable.device):
                    self._initializer_op = state_ops.assign(
                        self._variable, self._initial_value).op
                    self._snapshot = array_ops.identity(self._variable,
                                                        name="read")
        for key in collections:
            ops.add_to_collection(key, self)
        self._save_slice_info = None
Esempio n. 50
0
def _get_ready_op():
  ready_op = _get_first_op_from_collection(ops.GraphKeys.READY_OP)
  if ready_op is None:
    ready_op = variables.report_uninitialized_variables()
    ops.add_to_collection(ops.GraphKeys.READY_OP, ready_op)
  return ready_op
Esempio n. 51
0
  def _add_iterator_ops_to_collection(self,
                                      init_op,
                                      get_next,
                                      ds_fn,
                                      sparse_tensors=False):
    ops.add_to_collection("iterator_ops", init_op)
    # `get_next` may be a tuple e.g. in TensorSliceDataset. Since Collections
    # do not support tuples we flatten the tensors and restore the shape in
    # `_get_iterator_ops_from_collection`.
    if sparse_tensors:  # specific for deprecated `from_sparse_tensor_slices`.
      ops.add_to_collection("iterator_ops", get_next.indices)
      ops.add_to_collection("iterator_ops", get_next.values)
      ops.add_to_collection("iterator_ops", get_next.dense_shape)
      return

    get_next_list = nest.flatten(get_next)
    for i, output_class in enumerate(
        nest.flatten(self._get_output_classes(ds_fn))):
      if output_class is sparse_tensor.SparseTensor:
        ops.add_to_collection("iterator_ops", get_next_list[i].indices)
        ops.add_to_collection("iterator_ops", get_next_list[i].values)
        ops.add_to_collection("iterator_ops", get_next_list[i].dense_shape)
      else:
        ops.add_to_collection("iterator_ops", get_next_list[i])
Esempio n. 52
0
    def apply(self, var_list=None):
        """Maintains moving averages of variables.

    `var_list` must be a list of `Variable` or `Tensor` objects.  This method
    creates shadow variables for all elements of `var_list`.  Shadow variables
    for `Variable` objects are initialized to the variable's initial value.
    They will be added to the `GraphKeys.MOVING_AVERAGE_VARIABLES` collection.
    For `Tensor` objects, the shadow variables are initialized to 0.

    shadow variables are created with `trainable=False` and added to the
    `GraphKeys.ALL_VARIABLES` collection.  They will be returned by calls to
    `tf.all_variables()`.

    Returns an op that updates all shadow variables as described above.

    Note that `apply()` can be called multiple times with different lists of
    variables.

    Args:
      var_list: A list of Variable or Tensor objects. The variables
        and Tensors must be of types float32 or float64.

    Returns:
      An Operation that updates the moving averages.

    Raises:
      TypeError: If the arguments are not all float32 or float64.
      ValueError: If the moving average of one of the variables is already
        being computed.
    """
        # TODO(touts): op_scope
        if var_list is None:
            var_list = variables.trainable_variables()
        for var in var_list:
            if var.dtype.base_dtype not in [dtypes.float32, dtypes.float64]:
                raise TypeError("The variables must be float or double: %s" %
                                var.name)
            if var in self._averages:
                raise ValueError("Moving average already computed for: %s" %
                                 var.name)

            # For variables: to lower communication bandwidth across devices we keep
            # the moving averages on the same device as the variables. For other
            # tensors, we rely on the existing device allocation mechanism.
            with ops.control_dependencies(None):
                if isinstance(var, variables.Variable):
                    avg = slot_creator.create_slot(var,
                                                   var.initialized_value(),
                                                   self._name,
                                                   colocate_with_primary=True)
                    # NOTE(mrry): We only add `tf.Variable` objects to the
                    # `MOVING_AVERAGE_VARIABLES` collection.
                    ops.add_to_collection(
                        ops.GraphKeys.MOVING_AVERAGE_VARIABLES, var)
                else:
                    avg = slot_creator.create_zeros_slot(
                        var,
                        self._name,
                        colocate_with_primary=(var.op.type == "Variable"))
            self._averages[var] = avg

        with ops.name_scope(self._name) as scope:
            decay = ops.convert_to_tensor(self._decay, name="decay")
            if self._num_updates is not None:
                num_updates = math_ops.cast(self._num_updates,
                                            dtypes.float32,
                                            name="num_updates")
                decay = math_ops.minimum(decay, (1.0 + num_updates) /
                                         (10.0 + num_updates))
            updates = []
            for var in var_list:
                updates.append(
                    assign_moving_average(self._averages[var], var, decay))
            return control_flow_ops.group(*updates, name=scope)
Esempio n. 53
0
    def _train_model(self, input_fn, hooks):
        all_hooks = []
        with ops.Graph().as_default() as g, g.device(self._device_fn):
            random_seed.set_random_seed(self._config.tf_random_seed)
            global_step = training.get_or_create_global_step(g)
            features, labels = self._get_features_and_labels_from_input_fn(
                input_fn, Modes.TRAIN)
            estimator_spec = self._call_model_fn(features, labels, Modes.TRAIN)
            ops.add_to_collection(ops.GraphKeys.LOSSES, estimator_spec.loss)
            all_hooks.extend([
                plx_hooks.NanTensorHook(estimator_spec.loss),
                plx_hooks.StepLoggingTensorHook(
                    {
                        'loss': estimator_spec.loss,
                        'step': global_step
                    },
                    every_n_iter=100)
            ])
            all_hooks.extend(hooks)
            all_hooks.extend(estimator_spec.training_hooks)

            scaffold = estimator_spec.scaffold
            if not (scaffold.saver
                    or ops.get_collection(ops.GraphKeys.SAVERS)):
                ops.add_to_collection(
                    ops.GraphKeys.SAVERS,  # TODO remove non restorable vars
                    saver.Saver(
                        sharded=True,
                        max_to_keep=self._config.keep_checkpoint_max,
                        keep_checkpoint_every_n_hours=(
                            self._config.keep_checkpoint_every_n_hours),
                        defer_build=True,
                        save_relative_paths=True))

            chief_hooks = []
            if self._config.save_checkpoints_secs or self._config.save_checkpoints_steps:
                saver_hook_exists = any([
                    isinstance(h, plx_hooks.StepCheckpointSaverHook)
                    for h in (all_hooks + chief_hooks +
                              list(estimator_spec.training_chief_hooks))
                ])
                if not saver_hook_exists:
                    chief_hooks += [
                        plx_hooks.StepCheckpointSaverHook(
                            self._model_dir,
                            save_secs=self._config.save_checkpoints_secs,
                            save_steps=self._config.save_checkpoints_steps,
                            scaffold=scaffold)
                    ]
            if self._config.save_summary_steps:
                saver_hook_exists = any([
                    isinstance(h, plx_hooks.StepSummarySaverHook)
                    for h in (all_hooks + chief_hooks +
                              list(estimator_spec.training_chief_hooks))
                ])
                if not saver_hook_exists:
                    chief_hooks += [
                        plx_hooks.StepSummarySaverHook(
                            scaffold=scaffold,
                            save_steps=self._config.save_summary_steps,
                            output_dir=self._model_dir,
                        )
                    ]

            with monitored_session.MonitoredTrainingSession(
                    master=self._config.master,
                    is_chief=self._config.is_chief,
                    checkpoint_dir=self._model_dir,
                    scaffold=scaffold,
                    hooks=all_hooks,
                    chief_only_hooks=chief_hooks +
                    list(estimator_spec.training_chief_hooks),
                    save_checkpoint_secs=
                    0,  # Saving checkpoint is handled by a hook.
                    save_summaries_steps=
                    0,  # Saving summaries is handled by a hook.
                    config=self._session_config) as mon_sess:
                loss = None
                while not mon_sess.should_stop():
                    _, loss = mon_sess.run(
                        [estimator_spec.train_op, estimator_spec.loss])
            return loss
Esempio n. 54
0
    def _train_model(self, input_fn, hooks):
        all_hooks = []
        self._graph = ops.Graph()
        with self._graph.as_default() as g, g.device(self._device_fn):
            random_seed.set_random_seed(self._config.tf_random_seed)
            global_step = training.get_or_create_global_step(g)
            features, labels = input_fn()
            estimator_spec = self._call_model_fn(features, labels,
                                                 ModeKeys.TRAIN)
            all_hooks.extend([
                plx_hooks.NanTensorHook(estimator_spec.loss),
                plx_hooks.LoggingTensorHook(
                    {
                        'loss': estimator_spec.loss,
                        'step': global_step
                    },
                    every_n_iter=100)
            ])
            all_hooks.extend(hooks)
            all_hooks.extend(estimator_spec.training_hooks)

            scaffold = estimator_spec.scaffold or monitored_session.Scaffold()
            if not (scaffold.saver
                    or ops.get_collection(ops.GraphKeys.SAVERS)):
                ops.add_to_collection(
                    ops.GraphKeys.SAVERS,  # TODO remove non restorable vars
                    saver.Saver(
                        sharded=True,  # TODO `var_list`
                        max_to_keep=self._config.keep_checkpoint_max,
                        defer_build=True))

            chief_hooks = []
            if self._config.save_checkpoints_secs or self._config.save_checkpoints_steps:
                saver_hook_exists = any([
                    isinstance(h, plx_hooks.CheckpointSaverHook)
                    for h in (all_hooks + estimator_spec.training_hooks +
                              chief_hooks +
                              estimator_spec.training_chief_hooks)
                ])
                if not saver_hook_exists:
                    chief_hooks = [
                        plx_hooks.CheckpointSaverHook(
                            self._model_dir,
                            save_secs=self._config.save_checkpoints_secs,
                            save_steps=self._config.save_checkpoints_steps,
                            scaffold=scaffold)
                    ]
            with monitored_session.MonitoredTrainingSession(
                    master=self._config.master,
                    is_chief=self._config.is_chief,
                    checkpoint_dir=self._model_dir,
                    scaffold=scaffold,
                    hooks=all_hooks + estimator_spec.training_hooks,
                    chief_only_hooks=chief_hooks +
                    estimator_spec.training_chief_hooks,
                    save_checkpoint_secs=0,  # Saving is handled by a hook.
                    save_summaries_steps=self._config.save_summary_steps,
                    config=self._session_config) as mon_sess:
                loss = None
                while not mon_sess.should_stop():
                    _, loss = mon_sess.run(
                        [estimator_spec.train_op, estimator_spec.loss])
            summary_io.SummaryWriterCache.clear()
            return loss
Esempio n. 55
0
    def __init__(self,
                 key_dtype,
                 value_dtype,
                 default_value,
                 shared_name=None,
                 name="MutableHashTable",
                 checkpoint=True):
        """Creates an empty `MutableHashTable` object.

    Creates a table, the type of its keys and values are specified by key_dtype
    and value_dtype, respectively.

    Args:
      key_dtype: the type of the key tensors.
      value_dtype: the type of the value tensors.
      default_value: The value to use if a key is missing in the table.
      shared_name: If non-empty, this table will be shared under
        the given name across multiple sessions.
      name: A name for the operation (optional).
      checkpoint: if True, the contents of the table are saved to and restored
        from checkpoints. If `shared_name` is empty for a checkpointed table, it
        is shared using the table node name.

    Returns:
      A `MutableHashTable` object.

    Raises:
      ValueError: If checkpoint is True and no name was specified.
    """
        self._default_value = ops.convert_to_tensor(default_value,
                                                    dtype=value_dtype)
        self._value_shape = self._default_value.get_shape()

        # The table must be shared if checkpointing is requested for multi-worker
        # training to work correctly. Use the node name if no shared_name has been
        # explicitly specified.
        use_node_name_sharing = checkpoint and shared_name is None
        # pylint: disable=protected-access
        if self._default_value.get_shape().ndims == 0:
            self._table_ref = gen_lookup_ops._mutable_hash_table_v2(
                shared_name=shared_name,
                use_node_name_sharing=use_node_name_sharing,
                key_dtype=key_dtype,
                value_dtype=value_dtype,
                name=name)
        else:
            self._table_ref = gen_lookup_ops._mutable_hash_table_of_tensors_v2(
                shared_name=shared_name,
                use_node_name_sharing=use_node_name_sharing,
                key_dtype=key_dtype,
                value_dtype=value_dtype,
                value_shape=self._default_value.get_shape(),
                name=name)
        # pylint: enable=protected-access
        super(MutableHashTable,
              self).__init__(key_dtype, value_dtype,
                             self._table_ref.op.name.split("/")[-1])

        if checkpoint:
            saveable = MutableHashTable._Saveable(self, name)
            ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)
Esempio n. 56
0
    def _testScopedExport(self, test_dir, exported_filenames):
        graph = ops.Graph()
        with graph.as_default():
            # Creates an inference graph.
            # Hidden 1
            colocate_constraint = constant_op.constant(1.2, name="constraint")
            images = constant_op.constant(1.2,
                                          dtypes.float32,
                                          shape=[100, 28],
                                          name="images")
            with ops.name_scope("hidden1"):
                with graph.colocate_with(colocate_constraint.op):
                    weights1 = variables.Variable(random_ops.truncated_normal(
                        [28, 128], stddev=1.0 / math.sqrt(float(28))),
                                                  name="weights")
                # The use of control_flow_ops.cond here is purely for adding test
                # coverage the save and restore of control flow context (which doesn't
                # make any sense here from a machine learning perspective).  The typical
                # biases is a simple Variable without the conditions.
                biases1 = variables.Variable(control_flow_ops.cond(
                    math_ops.less(random.random(),
                                  0.5), lambda: array_ops.ones([128]),
                    lambda: array_ops.zeros([128])),
                                             name="biases")
                hidden1 = nn_ops.relu(
                    math_ops.matmul(images, weights1) + biases1)

            # Hidden 2
            with ops.name_scope("hidden2"):
                weights2 = variables.Variable(random_ops.truncated_normal(
                    [128, 32], stddev=1.0 / math.sqrt(float(128))),
                                              name="weights")

                # The use of control_flow_ops.while_loop here is purely for adding test
                # coverage the save and restore of control flow context (which doesn't
                # make any sense here from a machine learning perspective).  The typical
                # biases is a simple Variable without the conditions.
                def loop_cond(it, _):
                    return it < 2

                def loop_body(it, biases2):
                    biases2 += constant_op.constant(0.1, shape=[32])
                    return it + 1, biases2

                _, biases2 = control_flow_ops.while_loop(
                    loop_cond, loop_body, [
                        constant_op.constant(0),
                        variables.Variable(array_ops.zeros([32]),
                                           name="biases")
                    ])
                hidden2 = nn_ops.relu(
                    math_ops.matmul(hidden1, weights2) + biases2)
            # Linear
            with ops.name_scope("softmax_linear"):
                weights3 = variables.Variable(random_ops.truncated_normal(
                    [32, 10], stddev=1.0 / math.sqrt(float(32))),
                                              name="weights")
                biases3 = variables.Variable(array_ops.zeros([10]),
                                             name="biases")
                logits = math_ops.matmul(hidden2, weights3) + biases3
                ops.add_to_collection("logits", logits)

            # Exports each sub-graph.
            # Exports the first one with unbound_inputs_col_name set to default.
            orig_meta_graph1, var_list = meta_graph.export_scoped_meta_graph(
                filename=os.path.join(test_dir, exported_filenames[0]),
                graph=ops.get_default_graph(),
                export_scope="hidden1")
            self.assertEqual(["biases:0", "weights:0"],
                             sorted(var_list.keys()))
            var_names = [v.name for _, v in var_list.items()]
            self.assertEqual(["hidden1/biases:0", "hidden1/weights:0"],
                             sorted(var_names))

            # Exports the rest with no unbound_inputs_col_name.
            orig_meta_graph2, _ = meta_graph.export_scoped_meta_graph(
                filename=os.path.join(test_dir, exported_filenames[1]),
                graph=ops.get_default_graph(),
                export_scope="hidden2",
                unbound_inputs_col_name=None)
            orig_meta_graph3, _ = meta_graph.export_scoped_meta_graph(
                filename=os.path.join(test_dir, exported_filenames[2]),
                graph=ops.get_default_graph(),
                export_scope="softmax_linear",
                unbound_inputs_col_name=None)

        return [orig_meta_graph1, orig_meta_graph2, orig_meta_graph3]
Esempio n. 57
0
  def _train_model(self, input_fn, hooks, saving_listeners):
    worker_hooks = []
    with ops.Graph().as_default() as g, g.device(self._device_fn):
      random_seed.set_random_seed(self._config.tf_random_seed)
      global_step_tensor = self._create_and_assert_global_step(g)
      global_step_read_tensor = training_util._get_or_create_global_step_read()  # pylint: disable=protected-access
      features, labels = self._get_features_and_labels_from_input_fn(
          input_fn, model_fn_lib.ModeKeys.TRAIN)
      with ops.control_dependencies([global_step_read_tensor]):
        estimator_spec = self._call_model_fn(
            features, labels, model_fn_lib.ModeKeys.TRAIN, self.config)
      # Check if the user created a loss summary, and add one if they didn't.
      # We assume here that the summary is called 'loss'. If it is not, we will
      # make another one with the name 'loss' to ensure it shows up in the right
      # graph in TensorBoard.
      if not any([x.op.name == 'loss'
                  for x in ops.get_collection(ops.GraphKeys.SUMMARIES)]):
        summary.scalar('loss', estimator_spec.loss)
      ops.add_to_collection(ops.GraphKeys.LOSSES, estimator_spec.loss)
      worker_hooks.extend(hooks)
      worker_hooks.extend([
          training.NanTensorHook(estimator_spec.loss),
          training.LoggingTensorHook(
              {
                  'loss': estimator_spec.loss,
                  'step': global_step_tensor
              },
              every_n_iter=100)
      ])
      worker_hooks.extend(estimator_spec.training_hooks)

      if not (estimator_spec.scaffold.saver or
              ops.get_collection(ops.GraphKeys.SAVERS)):
        ops.add_to_collection(
            ops.GraphKeys.SAVERS,
            training.Saver(
                sharded=True,
                max_to_keep=self._config.keep_checkpoint_max,
                keep_checkpoint_every_n_hours=(
                    self._config.keep_checkpoint_every_n_hours),
                defer_build=True,
                save_relative_paths=True))

      chief_hooks = []
      all_hooks = worker_hooks + list(estimator_spec.training_chief_hooks)
      saver_hooks = [
          h for h in all_hooks if isinstance(h, training.CheckpointSaverHook)]
      if (self._config.save_checkpoints_secs or
          self._config.save_checkpoints_steps):
        if not saver_hooks:
          chief_hooks = [
              training.CheckpointSaverHook(
                  self._model_dir,
                  save_secs=self._config.save_checkpoints_secs,
                  save_steps=self._config.save_checkpoints_steps,
                  scaffold=estimator_spec.scaffold)
          ]
          saver_hooks = [chief_hooks[0]]
      if saving_listeners:
        if not saver_hooks:
          raise ValueError(
              'There should be a CheckpointSaverHook to use saving_listeners. '
              'Please set one of the RunConfig.save_checkpoints_steps or '
              'RunConfig.save_checkpoints_secs.')
        else:
          # It is expected to have one CheckpointSaverHook. If multiple, we pick
          # up the first one to add listener.
          saver_hooks[0]._listeners.extend(saving_listeners)  # pylint: disable=protected-access
      with training.MonitoredTrainingSession(
          master=self._config.master,
          is_chief=self._config.is_chief,
          checkpoint_dir=self._model_dir,
          scaffold=estimator_spec.scaffold,
          hooks=worker_hooks,
          chief_only_hooks=(
              tuple(chief_hooks) + tuple(estimator_spec.training_chief_hooks)),
          save_checkpoint_secs=0,  # Saving is handled by a hook.
          save_summaries_steps=self._config.save_summary_steps,
          config=self._session_config,
          log_step_count_steps=self._config.log_step_count_steps) as mon_sess:
        loss = None
        while not mon_sess.should_stop():
          _, loss = mon_sess.run([estimator_spec.train_op, estimator_spec.loss])
      return loss
    def __init__(self,
                 key_dtype,
                 value_dtype,
                 default_value,
                 empty_key,
                 deleted_key,
                 initial_num_buckets=None,
                 shared_name=None,
                 name="MutableDenseHashTable",
                 checkpoint=True):
        """Creates an empty `_MutableDenseHashTable` object.

    Creates a table, the type of its keys and values are specified by key_dtype
    and value_dtype, respectively.

    Args:
      key_dtype: the type of the key tensors.
      value_dtype: the type of the value tensors.
      default_value: The value to use if a key is missing in the table.
      empty_key: the key to use to represent empty buckets internally. Must not
        be used in insert, remove or lookup operations.
      deleted_key: the key to use to represent deleted buckets internally. Must
        not be used in insert, remove or lookup operations and be different from
        the empty_key.
      initial_num_buckets: the initial number of buckets.
      shared_name: If non-empty, this table will be shared under
        the given name across multiple sessions.
      name: A name for the operation (optional).
      checkpoint: if True, the contents of the table are saved to and restored
        from checkpoints. If `shared_name` is empty for a checkpointed table, it
        is shared using the table node name.

    Returns:
      A `_MutableDenseHashTable` object.

    Raises:
      ValueError: If checkpoint is True and no name was specified.
    """
        self._default_value = ops.convert_to_tensor(default_value,
                                                    dtype=value_dtype,
                                                    name="default_value")
        self._key_dtype = key_dtype
        self._value_dtype = value_dtype
        self._initial_num_buckets = initial_num_buckets
        self._value_shape = self._default_value.get_shape()
        self._checkpoint = checkpoint
        self._name = name

        self._empty_key = ops.convert_to_tensor(empty_key,
                                                dtype=key_dtype,
                                                name="empty_key")
        self._deleted_key = ops.convert_to_tensor(deleted_key,
                                                  dtype=key_dtype,
                                                  name="deleted_key")
        if context.executing_eagerly() and shared_name is None:
            # TODO(allenl): This will leak memory due to kernel caching by the
            # shared_name attribute value (but is better than the alternative of
            # sharing everything by default when executing eagerly; hopefully creating
            # tables in a loop is uncommon).
            shared_name = "table_%d" % (ops.uid(), )
        self._shared_name = shared_name
        super(_MutableDenseHashTable, self).__init__(key_dtype, value_dtype)

        self._resource_handle = self.create_resource()
        if checkpoint:
            saveable = _MutableDenseHashTable._Saveable(self, name)
            if not context.executing_eagerly():
                ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)
Esempio n. 59
0
    def _train_model(self, input_fn, hooks):
        all_hooks = []
        with ops.Graph().as_default() as g, g.device(self._device_fn):
            random_seed.set_random_seed(self._config.tf_random_seed)
            global_step_tensor = training.create_global_step(g)
            with ops.device('/cpu:0'):
                features, labels = input_fn()
            estimator_spec = self._call_model_fn(features, labels,
                                                 model_fn_lib.ModeKeys.TRAIN)
            ops.add_to_collection(ops.GraphKeys.LOSSES, estimator_spec.loss)
            all_hooks.extend([
                training.NanTensorHook(estimator_spec.loss),
                training.LoggingTensorHook(
                    {
                        'loss': estimator_spec.loss,
                        'step': global_step_tensor
                    },
                    every_n_iter=100)
            ])
            all_hooks.extend(hooks)
            all_hooks.extend(estimator_spec.training_hooks)

            if not (estimator_spec.scaffold.saver
                    or ops.get_collection(ops.GraphKeys.SAVERS)):
                ops.add_to_collection(
                    ops.GraphKeys.SAVERS,
                    training.Saver(
                        sharded=True,
                        max_to_keep=self._config.keep_checkpoint_max,
                        defer_build=True,
                        save_relative_paths=True))

            chief_hooks = []
            if (self._config.save_checkpoints_secs
                    or self._config.save_checkpoints_steps):
                saver_hook_exists = any([
                    isinstance(h, training.CheckpointSaverHook)
                    for h in (all_hooks + chief_hooks +
                              list(estimator_spec.training_chief_hooks))
                ])
                if not saver_hook_exists:
                    chief_hooks = [
                        training.CheckpointSaverHook(
                            self._model_dir,
                            save_secs=self._config.save_checkpoints_secs,
                            save_steps=self._config.save_checkpoints_steps,
                            scaffold=estimator_spec.scaffold)
                    ]
            with training.MonitoredTrainingSession(
                    master=self._config.master,
                    is_chief=self._config.is_chief,
                    checkpoint_dir=self._model_dir,
                    scaffold=estimator_spec.scaffold,
                    hooks=all_hooks,
                    chief_only_hooks=(
                        tuple(chief_hooks) +
                        tuple(estimator_spec.training_chief_hooks)),
                    save_checkpoint_secs=0,  # Saving is handled by a hook.
                    save_summaries_steps=self._config.save_summary_steps,
                    config=self._session_config) as mon_sess:
                loss = None
                while not mon_sess.should_stop():
                    _, loss = mon_sess.run(
                        [estimator_spec.train_op, estimator_spec.loss])
            return loss
Esempio n. 60
0
 def _discriminator_fn(self, inputs, _):
     ops.add_to_collection('fake_update_ops', constant_op.constant(1.0))
     return variable_scope.get_variable('dummy_d', initializer=2.0) * inputs