Esempio n. 1
0
	def test_simple(self):
		with self.session():
			TADDR_VALID = 'zrpull://127.0.0.1:5555'
			output = zmq_conn_handle(TADDR_VALID, ZMQ_HWM, 0)
			resources.initialize_resources(resources.local_resources()).run()
			# assertDTypeEqual not working for resource type. it trans tf.dtype to np.dtype and resource is incompatible with numpy
			#self.assertDtypeEqual(output, dtypes.resource.as_numpy_type)
			self.assertEqual(type(output.dtype), type(dtypes.resource))
Esempio n. 2
0
def run_feeds_iter(output_dict, feed_dicts, restore_checkpoint_path=None):
    """Run `output_dict` tensors with each input in `feed_dicts`.

  If `restore_checkpoint_path` is supplied, restore from checkpoint. Otherwise,
  init all variables.

  Args:
    output_dict: A `dict` mapping string names to `Output` objects to run.
      Tensors must all be from the same graph.
    feed_dicts: Iterable of `dict` objects of input values to feed.
    restore_checkpoint_path: A string containing the path to a checkpoint to
      restore.

  Yields:
    A sequence of dicts of values read from `output_dict` tensors, one item
    yielded for each item in `feed_dicts`. Keys are the same as `output_dict`,
    values are the results read from the corresponding `Output` in
    `output_dict`.

  Raises:
    ValueError: if `output_dict` or `feed_dicts` is None or empty.
  """
    if not output_dict:
        raise ValueError('output_dict is invalid: %s.' % output_dict)
    if not feed_dicts:
        raise ValueError('feed_dicts is invalid: %s.' % feed_dicts)

    graph = contrib_ops.get_graph_from_inputs(output_dict.values())
    with graph.as_default() as g:
        with tf_session.Session('') as session:
            session.run(
                resources.initialize_resources(resources.shared_resources() +
                                               resources.local_resources()))
            if restore_checkpoint_path:
                _restore_from_checkpoint(session, g, restore_checkpoint_path)
            else:
                session.run(variables.global_variables_initializer())
            session.run(variables.local_variables_initializer())
            session.run(data_flow_ops.initialize_all_tables())
            coord = coordinator.Coordinator()
            threads = None
            try:
                threads = queue_runner.start_queue_runners(session,
                                                           coord=coord)
                for f in feed_dicts:
                    yield session.run(output_dict, f)
            finally:
                coord.request_stop()
                if threads:
                    coord.join(threads, stop_grace_period_secs=120)
Esempio n. 3
0
def run_feeds_iter(output_dict, feed_dicts, restore_checkpoint_path=None):
  """Run `output_dict` tensors with each input in `feed_dicts`.

  If `restore_checkpoint_path` is supplied, restore from checkpoint. Otherwise,
  init all variables.

  Args:
    output_dict: A `dict` mapping string names to `Tensor` objects to run.
      Tensors must all be from the same graph.
    feed_dicts: Iterable of `dict` objects of input values to feed.
    restore_checkpoint_path: A string containing the path to a checkpoint to
      restore.

  Yields:
    A sequence of dicts of values read from `output_dict` tensors, one item
    yielded for each item in `feed_dicts`. Keys are the same as `output_dict`,
    values are the results read from the corresponding `Tensor` in
    `output_dict`.

  Raises:
    ValueError: if `output_dict` or `feed_dicts` is None or empty.
  """
  if not output_dict:
    raise ValueError('output_dict is invalid: %s.' % output_dict)
  if not feed_dicts:
    raise ValueError('feed_dicts is invalid: %s.' % feed_dicts)

  graph = contrib_ops.get_graph_from_inputs(output_dict.values())
  with graph.as_default() as g:
    with tf_session.Session('') as session:
      session.run(
          resources.initialize_resources(resources.shared_resources() +
                                         resources.local_resources()))
      if restore_checkpoint_path:
        _restore_from_checkpoint(session, g, restore_checkpoint_path)
      else:
        session.run(variables.global_variables_initializer())
      session.run(variables.local_variables_initializer())
      session.run(data_flow_ops.initialize_all_tables())
      coord = coordinator.Coordinator()
      threads = None
      try:
        threads = queue_runner.start_queue_runners(session, coord=coord)
        for f in feed_dicts:
          yield session.run(output_dict, f)
      finally:
        coord.request_stop()
        if threads:
          coord.join(threads, stop_grace_period_secs=120)
  def default_local_init_op():
    """Returns an op that groups the default local init ops.

    This op is used during session initialization when a Scaffold is
    initialized without specifying the local_init_op arg. It includes
    `tf.local_variables_initializer`, `tf.tables_initializer`, and also
    initializes local session resources.

    Returns:
      The default Scaffold local init op.
    """
    return control_flow_ops.group(
        variables.local_variables_initializer(),
        lookup_ops.tables_initializer(),
        resources.initialize_resources(resources.local_resources()))
 def _default_local_init_op():
   return control_flow_ops.group(
       variables.local_variables_initializer(),
       lookup_ops.tables_initializer(),
       resources.initialize_resources(resources.local_resources()))
Esempio n. 6
0
 def _default_local_init_op():
   return control_flow_ops.group(
       variables.local_variables_initializer(),
       lookup_ops.tables_initializer(),
       resources.initialize_resources(resources.local_resources()))
Esempio n. 7
0
def default_local_init_op():
    return tf.group(
        tf.local_variables_initializer(), tf.tables_initializer(),
        resources.initialize_resources(resources.local_resources()))