Beispiel #1
0
def _MakeFunc(v, arg_name):
    """Ensure v is a func."""
    if isinstance(v, attr_value_pb2.NameAttrList):
        return v
    if isinstance(v, compat.bytes_or_text_types):
        fn_attr = attr_value_pb2.NameAttrList(name=v)
    elif hasattr(v, "add_to_graph"):
        v.add_to_graph(ops.get_default_graph())
        if hasattr(v, "_as_name_attr_list"):
            fn_attr = v._as_name_attr_list  # pylint: disable=protected-access
        else:
            fn_attr = attr_value_pb2.NameAttrList(name=v.name)
    else:
        raise TypeError("Don't know how to convert {} to a func for "
                        "argument {}".format(v, arg_name))
    return fn_attr
def partitioned_call(args, f, tout=None, executing_eagerly=None):
  """Executes a function while respecting device annotations.

  Currently, only those functions that execute within the same address space
  can be executed.

  Args:
    args: The arguments of the function, including captured inputs.
    f: The function to execute; an instance of `_DefinedFunction` or
      `_EagerDefinedFunction`.
    tout: a list containing the output dtypes enums; if `None`, inferred from
      the signature of `f`.
    executing_eagerly: (Optional) A boolean indicating whether the context is
      executing eagerly. If `None`, fetched from the global context.

  Returns:
    The list of `Tensor`s returned by invoking `f(args)`. If the function does
    not return anything, then returns `None` if eager execution is enabled, or
    the `Operation` if not.
  """

  if tout is None:
    tout = tuple(x.type for x in f.definition.signature.output_arg)

  if executing_eagerly is None:
    executing_eagerly = context.executing_eagerly()

  if executing_eagerly or len(tout):
    if f.stateful_ops:
      outputs = gen_functional_ops.stateful_partitioned_call(
          args=args, Tout=tout, f=f)
    else:
      outputs = gen_functional_ops.partitioned_call(args=args, Tout=tout, f=f)
    return outputs if outputs else None

  # The generated binding returns an empty list for functions that don't
  # return any Tensors, hence the need to use `create_op` directly.
  args = [ops.internal_convert_to_tensor(x) for x in args]
  tin_attr = attr_value_pb2.AttrValue(
      list=attr_value_pb2.AttrValue.ListValue(
          type=[x.dtype.as_datatype_enum for x in args]))
  tout_attr = attr_value_pb2.AttrValue(
      list=attr_value_pb2.AttrValue.ListValue(type=tout))
  func_attr = attr_value_pb2.AttrValue(
      func=attr_value_pb2.NameAttrList(name=f.name))

  graph = ops.get_default_graph()
  f.add_to_graph(graph)
  op_name = "StatefulPartitionedCall" if f.stateful_ops else "PartitionedCall"
  op = graph.create_op(
      op_name,
      args,
      tout,
      compute_shapes=False,
      name="PartitionedFunctionCall",
      attrs={"Tin": tin_attr, "Tout": tout_attr, "f": func_attr})
  outputs = op.outputs
  return outputs if outputs else op
Beispiel #3
0
def _SymGrad(op, out_grads):
    """Backprop through a function call node op given its outputs' gradients."""
    f_in = [x for x in op.inputs] + out_grads
    f_types = [x.dtype for x in op.inputs]
    f = attr_value_pb2.NameAttrList()
    f.name = op.type
    for k in op.node_def.attr:
        f.attr[k].CopyFrom(op.node_def.attr[k])
    in_grads = functional_ops.symbolic_gradient(input=f_in, Tout=f_types, f=f)
    return in_grads
Beispiel #4
0
def _SymGrad(op, out_grads):
    """Backprop through a function call node op given its outputs' gradients."""
    f_in = [x for x in op.inputs] + out_grads
    f_types = [default_gradient.get_zeros_dtype(x) for x in op.inputs]
    f = attr_value_pb2.NameAttrList()
    if _IsPartitionedCall(op):
        f.name = op.get_attr("f").name
    else:
        f.name = op.type
    for k in op.node_def.attr:
        f.attr[k].CopyFrom(op.node_def.attr[k])
    in_grads = functional_ops.symbolic_gradient(input=f_in, Tout=f_types, f=f)
    return in_grads
Beispiel #5
0
def _MakeFunc(v, arg_name):
    """Ensure v is a func."""
    if isinstance(v, attr_value_pb2.NameAttrList):
        return v
    fn_attr = attr_value_pb2.NameAttrList()
    if isinstance(v, compat.bytes_or_text_types):
        fn_attr.name = v
    elif hasattr(v, "add_to_graph"):
        v.add_to_graph(ops.get_default_graph())
        fn_attr.name = v.name
    else:
        raise TypeError("Don't know how to convert {} to a func for "
                        "argument {}".format(v, arg_name))
    return fn_attr
def _SymGrad(op, out_grads):
    """Backprop through a function call node op given its outputs' gradients."""
    f_in = [x for x in op.inputs] + out_grads
    f_types = [x.dtype for x in op.inputs]
    f = attr_value_pb2.NameAttrList()
    f.name = op.type
    for k in op.node_def.attr:
        f.attr[k].CopyFrom(op.node_def.attr[k])
    # TODO(apassos) use a better dtype here
    in_grads = functional_ops.symbolic_gradient(
        input=f_in,
        Tout=[x if x != dtypes.resource else dtypes.float32 for x in f_types],
        f=f)
    return in_grads
Beispiel #7
0
 def serialize_transformation(op_name, attributes):
   proto = attr_value_pb2.NameAttrList(name=op_name)
   if attributes is None or isinstance(attributes, set):
     attributes = dict()
   for (name, value) in attributes.items():
     if isinstance(value, bool):
       proto.attr[name].b = value
     elif isinstance(value, int):
       proto.attr[name].i = value
     elif isinstance(value, str):
       proto.attr[name].s = value.encode()
     else:
       raise ValueError(
           f"attribute value type ({type(value)}) must be bool, int, or str")
   return text_format.MessageToString(proto)
Beispiel #8
0
def partitioned_call(args,
                     f,
                     tout=None,
                     executing_eagerly=None,
                     config=None,
                     executor_type=None):
    """Executes a function while respecting device annotations.

  Currently, only those functions that execute within the same address space
  can be executed.

  Args:
    args: The arguments of the function, including captured inputs.
    f: The function to execute; an instance of `_DefinedFunction` or
      `_EagerDefinedFunction`.
    tout: a list containing the output dtypes enums; if `None`, inferred from
      the signature of `f`.
    executing_eagerly: (Optional) A boolean indicating whether the context is
      executing eagerly. If `None`, fetched from the global context.
    config: (Optional) A `tensorflow::ConfigProto` proto, serialized. If `None`,
      all optimizations are disabled. Currently only handled for eager defined
      functions.
    executor_type: (Optional) A string for the name of the executor to be used
      in the function call. If not set, or set to an empty string, the default
      tensorflow executor will be used.

  Returns:
    The list of `Tensor`s returned by invoking `f(args)`. If the function does
    not return anything, then returns `None` if eager execution is enabled, or
    the `Operation` if not.
  """

    if tout is None:
        tout = tuple(x.type for x in f.definition.signature.output_arg)

    if executing_eagerly is None:
        executing_eagerly = context.executing_eagerly()

    if config is None:
        config = function_utils.get_disabled_rewriter_config()

    if executor_type is None:
        executor_type = ""

    if executing_eagerly:
        if f.stateful_ops:
            outputs = gen_functional_ops.stateful_partitioned_call(
                args=args,
                Tout=tout,
                f=f,
                config_proto=config,
                executor_type=executor_type)
        else:
            outputs = gen_functional_ops.partitioned_call(
                args=args,
                Tout=tout,
                f=f,
                config_proto=config,
                executor_type=executor_type)
        return outputs if outputs else None

    # The generated binding returns an empty list for functions that don't
    # return any Tensors, hence the need to use `create_op` directly.
    args = [ops.convert_to_tensor(x) for x in args]
    tin_attr = attr_value_pb2.AttrValue(
        list=attr_value_pb2.AttrValue.ListValue(
            type=[x.dtype.as_datatype_enum for x in args]))
    tout_attr = attr_value_pb2.AttrValue(
        list=attr_value_pb2.AttrValue.ListValue(type=tout))
    func_attr = attr_value_pb2.AttrValue(func=attr_value_pb2.NameAttrList(
        name=f.name))
    executor_type_attr = attr_value_pb2.AttrValue(
        s=compat.as_bytes(executor_type))

    # When running in graph mode, the graph and function graphs are optimized
    # (i.e. run through grappler) per the session options, so we can disable any
    # eager-specific rewriting.
    config_proto = attr_value_pb2.AttrValue(s=config)

    graph = ops.get_default_graph()
    f.add_to_graph(graph)
    op_name = "StatefulPartitionedCall" if f.stateful_ops else "PartitionedCall"
    op = graph.create_op(op_name,
                         args,
                         tout,
                         name=op_name,
                         attrs={
                             "Tin": tin_attr,
                             "Tout": tout_attr,
                             "f": func_attr,
                             "config_proto": config_proto,
                             "executor_type": executor_type_attr,
                         })
    outputs = op.outputs
    return outputs if outputs else op
Beispiel #9
0
from tensorflow.core.framework import attr_value_pb2

av = attr_value_pb2.AttrValue(i=1)
print(type(av))
print(av)
print(av.SerializeToString())

nal = attr_value_pb2.NameAttrList(name="nal", attr={"av": av})

print(type(nal))
print(nal)
print(nal.SerializeToString())