Esempio n. 1
0
# Utility op.  Open Source. TODO(touts): move to nn?
from tensorflow.python.training.learning_rate_decay import *

# Distributed computing support
from tensorflow.core.protobuf.tensorflow_server_pb2 import ClusterDef
from tensorflow.core.protobuf.tensorflow_server_pb2 import JobDef
from tensorflow.core.protobuf.tensorflow_server_pb2 import ServerDef
from tensorflow.python.training.server_lib import ClusterSpec
from tensorflow.python.training.server_lib import Server

from tensorflow.python.util.all_util import make_all

# Include extra modules for docstrings because:
# * Input methods in tf.train are documented in io_ops.
# * Saver methods in tf.train are documented in state_ops.
__all__ = make_all(__name__, [sys.modules[__name__], io_ops, state_ops])

# Symbols whitelisted for export without documentation.
# TODO(cwhipkey): review these and move to contrib or expose through
# documentation.
__all__.extend([
    "BytesList",
    "Example",
    "Feature",
    "FeatureList",
    "FeatureLists",
    "Features",
    "FloatList",
    "Int64List",
    "LooperThread",
    "SaverDef",
Esempio n. 2
0
  """
  logits, labels = _compute_sampled_logits(
      weights, biases, inputs, labels, num_sampled, num_classes,
      num_true=num_true,
      sampled_values=sampled_values,
      subtract_log_q=True,
      remove_accidental_hits=remove_accidental_hits,
      partition_strategy=partition_strategy,
      name=name)
  sampled_losses = nn_ops.softmax_cross_entropy_with_logits(logits, labels)
  # sampled_losses is a [batch_size] tensor.
  return sampled_losses


# TODO(cwhipkey): sigmoid and tanh should not be exposed from tf.nn.
__all__ = make_all(__name__)
__all__.append("zero_fraction")  # documented in training.py

# Modules whitelisted for reference through tf.nn.
# TODO(cwhipkey): migrate callers to use the submodule directly.
__all__.extend(["nn_ops", "rnn_cell", "seq2seq"])

# Symbols whitelisted for export without documentation.
# TODO(cwhipkey): review these and move to contrib or expose through
# documentation.
__all__.extend([
    "all_candidate_sampler",
    "batch_norm_with_global_normalization",
    "batch_normalization",
    "bidirectional_rnn",
    "conv2d_backprop_filter",
Esempio n. 3
0
  size_value = tensor_util.constant_value(op.inputs[1])
  if size_value is not None:
    height = size_value[0]
    width = size_value[1]
  else:
    height = None
    width = None
  return [tensor_shape.TensorShape(
      [input_shape[0], height, width, input_shape[3]])]


@ops.RegisterShape('CropAndResize')
def _crop_and_resize_shape(op):
  """Shape function for the CropAndResize op."""
  image_shape = op.inputs[0].get_shape().with_rank(4)
  box_shape = op.inputs[1].get_shape().with_rank(2)
  crop_size = tensor_util.constant_value(op.inputs[3])
  if crop_size is not None:
    crop_height = crop_size[0]
    crop_width = crop_size[1]
  else:
    crop_height = None
    crop_width = None
  return [tensor_shape.TensorShape(
      [box_shape[0], crop_height, crop_width, image_shape[3]])]


__all__ = make_all(__name__)
# ResizeMethod is not documented, but is documented in functions that use it.
__all__.append('ResizeMethod')
Esempio n. 4
0
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops


# Don't export modules except for the few we really want
_whitelist = set([app, compat, contrib, errors, flags, gfile, image,
                  logging, nn, python_io, resource_loader, sysconfig, test,
                  train, user_ops])

# Export all symbols directly accessible from 'tf.' by drawing on the doc
# strings of other modules.
__all__ = make_all(__name__,
                   [framework_lib, array_ops, client_lib, constant_op,
                    control_flow_ops, functional_ops, histogram_ops, io_ops,
                    math_ops, nn, script_ops, sparse_ops, state_ops, train])

# Symbols whitelisted for export without documentation.
# TODO(cwhipkey): review these and move to contrib, expose through
# documentation, or remove.
__all__.extend([
    'AttrValue',
    'ClusterDef',
    'ClusterSpec',
    'ConfigProto',
    'Event',
    'GPUOptions',
    'GRAPH_DEF_VERSION',
    'GRAPH_DEF_VERSION_MIN_CONSUMER',
    'GRAPH_DEF_VERSION_MIN_PRODUCER',
Esempio n. 5
0

# Distributed computing support
from tensorflow.core.protobuf.tensorflow_server_pb2 import ClusterDef
from tensorflow.core.protobuf.tensorflow_server_pb2 import JobDef
from tensorflow.core.protobuf.tensorflow_server_pb2 import ServerDef
from tensorflow.python.training.server_lib import ClusterSpec
from tensorflow.python.training.server_lib import Server


from tensorflow.python.util.all_util import make_all

# Include extra modules for docstrings because:
# * Input methods in tf.train are documented in io_ops.
# * Saver methods in tf.train are documented in state_ops.
__all__ = make_all(__name__, [sys.modules[__name__], io_ops, state_ops])

# Symbols whitelisted for export without documentation.
# TODO(cwhipkey): review these and move to contrib or expose through
# documentation.
__all__.extend(
    [
        "BytesList",
        "Example",
        "Feature",
        "FeatureList",
        "FeatureLists",
        "Features",
        "FloatList",
        "Int64List",
        "LooperThread",
Esempio n. 6
0
# Training data protos.
from tensorflow.core.example.example_pb2 import *
from tensorflow.core.example.feature_pb2 import *
from tensorflow.core.protobuf.saver_pb2 import *

# Utility op.  Open Source. TODO(touts): move to nn?
from tensorflow.python.training.learning_rate_decay import exponential_decay

from tensorflow.python.util.all_util import make_all

# Include extra make_all calls because:
# * Input methods in tf.train are documented in io_ops.
# * Saver methods in tf.train are documented in state_ops.
__all__ = list(set(
    make_all(__name__) +
    make_all(__name__, io_ops.__name__) +
    make_all(__name__, state_ops.__name__)))

# Symbols whitelisted for export without documentation.
# TODO(cwhipkey): review these and move to contrib or expose through
# documentation.
__all__.extend([
    "BytesList",
    "Example",
    "Feature",
    "FeatureList",
    "FeatureLists",
    "Features",
    "FloatList",
    "InferenceExample",
Esempio n. 7
0
@@summarize_activations

## Utilities

@@assert_same_float_dtype
@@assert_scalar_int
@@is_numeric_tensor

"""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import sys

# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.layers.python.framework.tensor_util import *
from tensorflow.contrib.layers.python.layers import *
from tensorflow.contrib.layers.python.ops import *
from tensorflow.contrib.layers.python.ops import loss_ops
from tensorflow.python.util.all_util import make_all


# Include loss_ops to get the symbols in - but they are not documented in main
# docs yet.
# TODO(cwhipkey): get the loss_ops documented in the main documentation and do
# this in a better way.
__all__ = make_all(__name__, [sys.modules[__name__], loss_ops])
Esempio n. 8
0
of `summarize_collection` to `VARIABLES`, `WEIGHTS` and `BIASES`, respectively.

@@summarize_activations

## Utilities

@@assert_same_float_dtype
@@assert_scalar_int
@@is_numeric_tensor

"""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import sys

# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.layers.python.framework.tensor_util import *
from tensorflow.contrib.layers.python.layers import *
from tensorflow.contrib.layers.python.ops import *
from tensorflow.contrib.layers.python.ops import loss_ops
from tensorflow.python.util.all_util import make_all

# Include loss_ops to get the symbols in - but they are not documented in main
# docs yet.
# TODO(cwhipkey): get the loss_ops documented in the main documentation and do
# this in a better way.
__all__ = make_all(__name__, [sys.modules[__name__], loss_ops])
Esempio n. 9
0
from tensorflow.python.ops import session_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import tensor_array_ops

# Don't export modules except for the few we really want
_whitelist = set([app, compat, contrib, errors, flags, gfile, image, logging,
                  nn, python_io, resource_loader, sysconfig, test, train,
                  user_ops])

# Export all symbols directly accessible from 'tf.' by drawing on the doc
# strings of other modules.
__all__ = make_all(__name__, [framework_lib, array_ops, client_lib, check_ops,
                              constant_op, control_flow_ops, functional_ops,
                              histogram_ops, io_ops, math_ops, nn, script_ops,
                              session_ops, sparse_ops, state_ops, string_ops,
                              summary, tensor_array_ops, train])

# Symbols whitelisted for export without documentation.
# TODO(cwhipkey): review these and move to contrib, expose through
# documentation, or remove.
__all__.extend([
    'AttrValue',
    'ConfigProto',
    'DeviceSpec',
    'Event',
    'GPUOptions',
    'GRAPH_DEF_VERSION',
    'GRAPH_DEF_VERSION_MIN_CONSUMER',
    'GRAPH_DEF_VERSION_MIN_PRODUCER',
Esempio n. 10
0
of `summarize_collection` to `VARIABLES`, `WEIGHTS` and `BIASES`, respectively.

@@summarize_activations

## Utilities

@@assert_same_float_dtype
@@assert_scalar_int
@@is_numeric_tensor

"""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.layers.python.framework.tensor_util import *
from tensorflow.contrib.layers.python.layers import *
from tensorflow.contrib.layers.python.ops import *
from tensorflow.contrib.layers.python.ops import loss_ops
from tensorflow.python.util.all_util import make_all


# Include loss_ops to get the symbols in - but they are not documented in main
# docs yet.
# TODO(cwhipkey): get the loss_ops documented in the main documentation and do
# this in a better way.
__all__ = (make_all(__name__) +
           make_all(__name__, loss_ops.__name__))
Esempio n. 11
0
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import tensor_array_ops

# Don't export modules except for the few we really want
_whitelist = set([
    app, compat, contrib, errors, flags, gfile, image, logging, nn, python_io,
    resource_loader, sysconfig, test, train, user_ops
])

# Export all symbols directly accessible from 'tf.' by drawing on the doc
# strings of other modules.
__all__ = make_all(__name__, [
    framework_lib, array_ops, client_lib, check_ops, constant_op,
    control_flow_ops, functional_ops, histogram_ops, io_ops, math_ops, nn,
    script_ops, session_ops, sparse_ops, state_ops, string_ops, summary,
    tensor_array_ops, train
])

# Symbols whitelisted for export without documentation.
# TODO(cwhipkey): review these and move to contrib, expose through
# documentation, or remove.
__all__.extend([
    'AttrValue',
    'ConfigProto',
    'DeviceSpec',
    'Event',
    'GPUOptions',
    'GRAPH_DEF_VERSION',
    'GRAPH_DEF_VERSION_MIN_CONSUMER',
    'GRAPH_DEF_VERSION_MIN_PRODUCER',
Esempio n. 12
0
# Training data protos.
from tensorflow.core.example.example_pb2 import *
from tensorflow.core.example.feature_pb2 import *
from tensorflow.core.protobuf.saver_pb2 import *

# Utility op.  Open Source. TODO(touts): move to nn?
from tensorflow.python.training.learning_rate_decay import exponential_decay

from tensorflow.python.util.all_util import make_all

# Include extra make_all calls because:
# * Input methods in tf.train are documented in io_ops.
# * Saver methods in tf.train are documented in state_ops.
__all__ = list(
    set(
        make_all(__name__) + make_all(__name__, io_ops.__name__) +
        make_all(__name__, state_ops.__name__)))

# Symbols whitelisted for export without documentation.
# TODO(cwhipkey): review these and move to contrib or expose through
# documentation.
__all__.extend([
    "BytesList",
    "Example",
    "Feature",
    "FeatureList",
    "FeatureLists",
    "Features",
    "FloatList",
    "InferenceExample",
    "Int64List",