Пример #1
0
def _SymGrad(op, out_grads):
    """Backprop through a function call node op given its outputs' gradients."""
    f_in = [x for x in op.inputs] + out_grads
    f_types = [x.dtype for x in op.inputs]
    f = attr_value_pb2.NameAttrList()
    f.name = op.type
    for k in op.node_def.attr:
        f.attr[k].CopyFrom(op.node_def.attr[k])
    in_grads = functional_ops.symbolic_gradient(input=f_in, Tout=f_types, f=f)
    return in_grads
Пример #2
0
def _SymGrad(op, out_grads):
  """Backprop through a function call node op given its outputs' gradients."""
  f_in = [x for x in op.inputs] + out_grads
  f_types = [x.dtype for x in op.inputs]
  f = attr_value_pb2.NameAttrList()
  f.name = op.type
  for k in op.node_def.attr:
    f.attr[k].CopyFrom(op.node_def.attr[k])
  # pylint: disable=protected-access
  in_grads = functional_ops.symbolic_gradient(input=f_in, Tout=f_types, f=f)
  # pylint: enable=protected-access
  return in_grads
Пример #3
0
def _SymGrad(op, out_grads):
    """Backprop through a function call node op given its outputs' gradients."""
    f_in = [x for x in op.inputs] + out_grads
    f_types = [default_gradient.get_zeros_dtype(x) for x in op.inputs]
    f = attr_value_pb2.NameAttrList()
    if _IsPartitionedCall(op):
        f.name = op.get_attr("f").name
    else:
        f.name = op.type
    for k in op.node_def.attr:
        f.attr[k].CopyFrom(op.node_def.attr[k])
    in_grads = functional_ops.symbolic_gradient(input=f_in, Tout=f_types, f=f)
    return in_grads
Пример #4
0
 def testSymGradShape(self):
     g = tf.Graph()
     with g.as_default():
         x = tf.placeholder(tf.float32, [25, 4])
         y = tf.placeholder(tf.float32, [200, 100])
         dz = tf.placeholder(tf.float32, [1])
         # We assume Foo is a function of (x, y) -> (z) Then, Foo's
         # gradient function is (x, y, dz) -> (dx, dy).  dx's shape
         # should be the same as x's; and dy's shape should be the same
         # as y's.
         dx, dy = symbolic_gradient(input=[x, y, dz], Tout=[tf.float32] * 2, f="Foo")
         self.assertEquals(x.get_shape(), dx.get_shape())
         self.assertEquals(y.get_shape(), dy.get_shape())
Пример #5
0
def _SymGrad(op, out_grads):
    """Backprop through a function call node op given its outputs' gradients."""
    f_in = [x for x in op.inputs] + out_grads
    f_types = [x.dtype for x in op.inputs]
    f = attr_value_pb2.NameAttrList()
    f.name = op.type
    for k in op.node_def.attr:
        f.attr[k].CopyFrom(op.node_def.attr[k])
    # TODO(apassos) use a better dtype here
    in_grads = functional_ops.symbolic_gradient(
        input=f_in,
        Tout=[x if x != dtypes.resource else dtypes.float32 for x in f_types],
        f=f)
    return in_grads
Пример #6
0
def _SymGrad(op, out_grads):
  """Backprop through a function call node op given its outputs' gradients."""
  f_in = [x for x in op.inputs] + out_grads
  f_types = [x.dtype for x in op.inputs]
  f = attr_value_pb2.NameAttrList()
  f.name = op.type
  for k in op.node_def.attr:
    f.attr[k].CopyFrom(op.node_def.attr[k])
  # TODO(apassos) use a better dtype here
  in_grads = functional_ops.symbolic_gradient(
      input=f_in,
      Tout=[x if x != dtypes.resource else dtypes.float32 for x in f_types],
      f=f)
  return in_grads
Пример #7
0
 def testSymGradShape(self):
     g = tf.Graph()
     with g.as_default():
         x = tf.placeholder(tf.float32, [25, 4])
         y = tf.placeholder(tf.float32, [200, 100])
         dz = tf.placeholder(tf.float32, [1])
         # We assume Foo is a function of (x, y) -> (z) Then, Foo's
         # gradient function is (x, y, dz) -> (dx, dy).  dx's shape
         # should be the same as x's; and dy's shape should be the same
         # as y's.
         dx, dy = symbolic_gradient(input=[x, y, dz],
                                    Tout=[tf.float32] * 2,
                                    f="Foo")
         self.assertEquals(x.get_shape(), dx.get_shape())
         self.assertEquals(y.get_shape(), dy.get_shape())
Пример #8
0
 def XSquarePlusOneGrad(x, dy):
     dx = symbolic_gradient(input=[x, dy],
                            Tout=[tf.float32],
                            f="XSquarePlusOne",
                            name="dx")
     return dx
Пример #9
0
 def XSquarePlusOneGrad(x, dy):
     dx = symbolic_gradient(input=[x, dy], Tout=[tf.float32], f="XSquarePlusOne", name="dx")
     return dx