예제 #1
0
파일: util.py 프로젝트: mysqlsc/probability
def _value_and_gradients(fn, fn_arg_list, result=None, grads=None, name=None):
  """Helper to `maybe_call_fn_and_grads`."""
  with tf.name_scope(name, 'value_and_gradients', [fn_arg_list, result, grads]):
    def _convert_to_tensor(x, name):
      ctt = lambda x_: x_ if x_ is None else tf.convert_to_tensor(x_, name=name)
      return [ctt(x_) for x_ in x] if is_list_like(x) else ctt(x)

    fn_arg_list = (list(fn_arg_list) if is_list_like(fn_arg_list)
                   else [fn_arg_list])
    fn_arg_list = _convert_to_tensor(fn_arg_list, 'fn_arg')

    if result is None:
      result = fn(*fn_arg_list)
      if grads is None and tfe.executing_eagerly():
        # Ensure we disable bijector cacheing in eager mode.
        # TODO(b/72831017): Remove this once bijector cacheing is fixed for
        # eager mode.
        fn_arg_list = [0 + x for x in fn_arg_list]

    result = _convert_to_tensor(result, 'fn_result')

    if grads is not None:
      grads = _convert_to_tensor(grads, 'fn_grad')
      return result, grads

    if tfe.executing_eagerly():
      if is_list_like(result) and len(result) == len(fn_arg_list):
        # Compute the block diagonal of Jacobian.
        # TODO(b/79158574): Guard this calculation by an arg which explicitly
        # requests block diagonal Jacobian calculation.
        def make_fn_slice(i):
          """Needed to prevent `cell-var-from-loop` pylint warning."""
          return lambda *args: fn(*args)[i]
        grads = [
            tfe.gradients_function(make_fn_slice(i))(*fn_arg_list)[i]
            for i in range(len(result))
        ]
      else:
        grads = tfe.gradients_function(fn)(*fn_arg_list)
    else:
      if is_list_like(result) and len(result) == len(fn_arg_list):
        # Compute the block diagonal of Jacobian.
        # TODO(b/79158574): Guard this calculation by an arg which explicitly
        # requests block diagonal Jacobian calculation.
        grads = [tf.gradients(result[i], fn_arg_list[i])[0]
                 for i in range(len(result))]
      else:
        grads = tf.gradients(result, fn_arg_list)

    return result, grads
예제 #2
0
    def test_qnode_gradient_agrees(self, qubit_device_2_wires, tol):
        """Tests that simple gradient example is consistent."""
        @qml.qnode(qubit_device_2_wires, interface='autograd')
        def circuit(phi, theta):
            qml.RX(phi[0], wires=0)
            qml.RY(phi[1], wires=1)
            qml.CNOT(wires=[0, 1])
            qml.PhaseShift(theta[0], wires=0)
            return qml.expval(qml.PauliZ(0))

        @qml.qnode(qubit_device_2_wires, interface='tfe')
        def circuit_tfe(phi, theta):
            qml.RX(phi[0], wires=0)
            qml.RY(phi[1], wires=1)
            qml.CNOT(wires=[0, 1])
            qml.PhaseShift(theta[0], wires=0)
            return qml.expval(qml.PauliZ(0))

        phi = [0.5, 0.1]
        theta = [0.2]

        phi_t = tfe.Variable(phi)
        theta_t = tfe.Variable(theta)

        dcircuit = qml.grad(circuit, [0, 1])
        autograd_grad = dcircuit(phi, theta)

        dcircuit = tfe.gradients_function(circuit_tfe)
        tfe_grad = dcircuit(phi_t, theta_t)

        assert np.allclose(autograd_grad[0], tfe_grad[0], atol=tol, rtol=0)
        assert np.allclose(autograd_grad[1], tfe_grad[1], atol=tol, rtol=0)
예제 #3
0
    def _grad_potential(self, position, check_numerics=True):
        """Get gradient of potential function at current position."""
        if tf.executing_eagerly():
            grad = tfe.gradients_function(self.potential)(position)[0]
        else:
            grad = tf.gradients(self.potential(position), position)[0]

        return grad
예제 #4
0
  def grad_potential(self, position, check_numerics=True):
    """Get gradient of potential function at current location."""

    if tf.executing_eagerly():
      grad = tfe.gradients_function(self.potential)(position)[0]
    else:
      grad = tf.gradients(self.potential(position), position)[0]

    return grad
예제 #5
0
    def testJacobianDiagonal3DListInput(self):
        """Tests that the diagonal of the Jacobian matrix computes correctly."""

        dtype = np.float32
        true_mean = dtype([0, 0, 0])
        true_cov = dtype([[1, 0.25, 0.25], [0.25, 2, 0.25], [0.25, 0.25, 3]])
        chol = tf.linalg.cholesky(true_cov)
        target = tfd.MultivariateNormalTriL(loc=true_mean, scale_tril=chol)

        # Assume that the state is passed as a list of tensors `x` and `y`.
        # Then the target function is defined as follows:
        def target_fn(x, y):
            # Stack the input tensors together
            z = tf.concat([x, y], axis=-1) - true_mean
            return target.log_prob(z)

        sample_shape = [3, 5]
        state = [
            tf.ones(sample_shape + [2], dtype=dtype),
            tf.ones(sample_shape + [1], dtype=dtype)
        ]
        fn_val = target_fn(*state)
        grad_fn = tfe.gradients_function(target_fn)
        if tfe.executing_eagerly():
            grads = grad_fn(*state)
        else:
            grads = tf.gradients(fn_val, state)

        _, diag_jacobian_shape_passed = tfp.math.diag_jacobian(
            xs=state, ys=grads, fn=grad_fn, sample_shape=tf.shape(fn_val))
        _, diag_jacobian_shape_none = tfp.math.diag_jacobian(xs=state,
                                                             ys=grads,
                                                             fn=grad_fn)

        true_diag_jacobian_1 = np.zeros(sample_shape + [2])
        true_diag_jacobian_1[..., 0] = -1.05
        true_diag_jacobian_1[..., 1] = -0.52

        true_diag_jacobian_2 = -0.34 * np.ones(sample_shape + [1])

        self.assertAllClose(self.evaluate(diag_jacobian_shape_passed[0]),
                            true_diag_jacobian_1,
                            atol=0.01,
                            rtol=0.01)
        self.assertAllClose(self.evaluate(diag_jacobian_shape_none[0]),
                            true_diag_jacobian_1,
                            atol=0.01,
                            rtol=0.01)

        self.assertAllClose(self.evaluate(diag_jacobian_shape_passed[1]),
                            true_diag_jacobian_2,
                            atol=0.01,
                            rtol=0.01)
        self.assertAllClose(self.evaluate(diag_jacobian_shape_none[1]),
                            true_diag_jacobian_2,
                            atol=0.01,
                            rtol=0.01)
예제 #6
0
파일: jacobian.py 프로젝트: saforem2/l2hmc
 def _grads(fxi, x):
     x = np.array(x)
     if tf.executing_eagerly():
         grad_fn = tfe.gradients_function(fxi)
         grad = grad_fn(x[0])[0]
     else:
         grad_fn = tf.gradients(fxi, x)
         grad = grad_fn(x[0])[0]
     return grad
예제 #7
0
def _value_and_gradients(fn, fn_arg_list, result=None, grads=None, name=None):
  """Helper to `maybe_call_fn_and_grads`."""
  with tf.name_scope(name, 'value_and_gradients', [fn_arg_list, result, grads]):
    def _convert_to_tensor(x, name):
      ctt = lambda x_: x_ if x_ is None else tf.convert_to_tensor(x_, name=name)
      return [ctt(x_) for x_ in x] if is_list_like(x) else ctt(x)

    fn_arg_list = (list(fn_arg_list) if is_list_like(fn_arg_list)
                   else [fn_arg_list])
    fn_arg_list = _convert_to_tensor(fn_arg_list, 'fn_arg')

    if result is None:
      result = fn(*fn_arg_list)
    result = _convert_to_tensor(result, 'fn_result')

    if grads is not None:
      grads = _convert_to_tensor(grads, 'fn_grad')
      return result, grads

    if tfe.executing_eagerly():
      if is_list_like(result) and len(result) == len(fn_arg_list):
        # Compute the block diagonal of Jacobian.
        # TODO(b/79158574): Guard this calculation by an arg which explicitly
        # requests block diagonal Jacobian calculation.
        def make_fn_slice(i):
          """Needed to prevent `cell-var-from-loop` pylint warning."""
          return lambda *args: fn(*args)[i]
        grads = [
            tfe.gradients_function(make_fn_slice(i))(*fn_arg_list)[i]
            for i in range(len(result))
        ]
      else:
        grads = tfe.gradients_function(fn)(*fn_arg_list)
    else:
      if is_list_like(result) and len(result) == len(fn_arg_list):
        # Compute the block diagonal of Jacobian.
        # TODO(b/79158574): Guard this calculation by an arg which explicitly
        # requests block diagonal Jacobian calculation.
        grads = [tf.gradients(result[i], fn_arg_list[i])[0]
                 for i in range(len(result))]
      else:
        grads = tf.gradients(result, fn_arg_list)

    return result, grads
예제 #8
0
 def loop_body(j):
   """Loop function to compute gradients of the each direction."""
   res = tfe.gradients_function(fn_slice(i, j))(*xs)[i]  # pylint: disable=cell-var-from-loop
   if res is None:
     res = tf.zeros(tf.concat([sample_shape, [1]], -1),
                    dtype=x.dtype)  # pylint: disable=cell-var-from-loop
   else:
     res = tf.reshape(res, tf.concat([sample_shape, [-1]], -1))
     res = res[..., j]
   return  res
예제 #9
0
def tfeTest():
    tfe.enable_eager_execution()

    def square(x):
        return tf.multiply(x, x)

    grad = tfe.gradients_function(square)

    print(square(3.))  # [9.]
    print(grad(3.))
예제 #10
0
 def loop_body(j):
     """Loop function to compute gradients of the each direction."""
     res = tfe.gradients_function(fn_slice(i, j))(*xs)[i]  # pylint: disable=cell-var-from-loop
     if res is None:
         res = tf.zeros(tf.concat([sample_shape, [1]], -1),
                        dtype=x.dtype)  # pylint: disable=cell-var-from-loop
     else:
         res = tf.reshape(res,
                          tf.concat([sample_shape, [-1]], -1))
         res = res[..., j]
     return res
예제 #11
0
    def grad_potential(self, position, check_numerics=True):
        """Get gradient of potential function at current location."""

        if not tf.executing_eagerly():
            # TODO(lxuechen): Change this to tfe.gradients_function when it works
            grad = tf.gradients(self.potential(position), position)[0]
        else:
            grad = tfe.gradients_function(self.potential)(position)[0]

        if check_numerics:
            return tf.check_numerics(grad, message="gradient of potential")

        return grad
예제 #12
0
  def grad_potential(self, position, check_numerics=True):
    """Get gradient of potential function at current location."""

    if not tf.executing_eagerly():
      # TODO(lxuechen): Change this to tfe.gradients_function when it works
      grad = tf.gradients(self.potential(position), position)[0]
    else:
      grad = tfe.gradients_function(self.potential)(position)[0]

    if check_numerics:
      return tf.check_numerics(grad, message="gradient of potential")

    return grad
예제 #13
0
    def testJacobianDiagonal4D(self):
        """Tests that the diagonal of the Jacobian matrix computes correctly."""

        dtype = np.float32
        true_mean = dtype([0, 0, 0, 0])
        true_cov = dtype([[1, 0.25, 0.25, 0.25], [0.25, 2, 0.25, 0.25],
                          [0.25, 0.25, 3, 0.25], [0.25, 0.25, 0.25, 4]])
        chol = tf.linalg.cholesky(true_cov)
        target = tfd.MultivariateNormalTriL(loc=true_mean, scale_tril=chol)

        # Assume that the state is passed as a 2x2 matrix of sample_shape = [5, 3]:
        sample_shape = [5, 3]

        def target_fn(*x):
            z = tf.reshape(x, sample_shape + [4])
            return target.log_prob(z)

        state = [tf.ones(sample_shape + [2, 2], dtype=dtype)]
        fn_val = target_fn(*state)
        grad_fn = tfe.gradients_function(target_fn)
        if tfe.executing_eagerly():
            grads = grad_fn(state)
        else:
            grads = tf.gradients(fn_val, state)

        _, diag_jacobian_shape_passed = tfp.math.diag_jacobian(
            xs=state, ys=grads, fn=grad_fn, sample_shape=tf.shape(fn_val))
        _, diag_jacobian_shape_none = tfp.math.diag_jacobian(xs=state,
                                                             ys=grads,
                                                             fn=grad_fn)

        true_diag_jacobian = np.zeros(sample_shape + [2, 2])
        true_diag_jacobian[..., 0, 0] = -1.06
        true_diag_jacobian[..., 0, 1] = -0.52
        true_diag_jacobian[..., 1, 0] = -0.34
        true_diag_jacobian[..., 1, 1] = -0.26

        self.assertAllClose(self.evaluate(diag_jacobian_shape_passed[0]),
                            true_diag_jacobian,
                            atol=0.01,
                            rtol=0.01)
        self.assertAllClose(self.evaluate(diag_jacobian_shape_none[0]),
                            true_diag_jacobian,
                            atol=0.01,
                            rtol=0.01)
예제 #14
0
    def test_qnode_gradient_agrees(self):
        "Tests that simple gradient example is consistent."
        self.logTestName()

        dev = qml.device('default.qubit', wires=2)

        @qml.qnode(dev, interface='autograd')
        def circuit(phi, theta):
            qml.RX(phi[0], wires=0)
            qml.RY(phi[1], wires=1)
            qml.CNOT(wires=[0, 1])
            qml.PhaseShift(theta[0], wires=0)
            return qml.expval.PauliZ(0)

        @qml.qnode(dev, interface='tfe')
        def circuit_tfe(phi, theta):
            qml.RX(phi[0], wires=0)
            qml.RY(phi[1], wires=1)
            qml.CNOT(wires=[0, 1])
            qml.PhaseShift(theta[0], wires=0)
            return qml.expval.PauliZ(0)

        phi = [0.5, 0.1]
        theta = [0.2]

        phi_t = tfe.Variable(phi)
        theta_t = tfe.Variable(theta)

        dcircuit = qml.grad(circuit, [0, 1])
        autograd_grad = dcircuit(phi, theta)

        dcircuit = tfe.gradients_function(circuit_tfe)
        tfe_grad = dcircuit(phi_t, theta_t)

        self.assertAllAlmostEqual(autograd_grad[0],
                                  tfe_grad[0],
                                  delta=self.tol)
        self.assertAllAlmostEqual(autograd_grad[1],
                                  tfe_grad[1],
                                  delta=self.tol)
예제 #15
0
def log1pexp(x):
    grad_log1pexp = tfe.gradients_function(log1pexp)
예제 #16
0
def grad(f):
    return lambda x: tfe.gradients_function(f)(x)[0]
예제 #17
0
#coding=utf-8

import tensorflow as tf
import tensorflow.contrib.eager as tfe
import numpy as np
tfe.enable_eager_execution()

grad = tfe.gradients_function(lambda x: x * x + 4.0)
print(grad(10))
print(grad(5))
#coding=utf-8

import tensorflow as tf
import tensorflow.contrib.eager as tfe
import numpy as np
tfe.enable_eager_execution()


def leaky_relu(x):
    if x < 0:
        return x * 0.1
    else:
        return x


grad = tfe.gradients_function(leaky_relu)
print(grad(4.0))
print(grad(-3.0))
예제 #19
0
import matplotlib.pyplot as plt

tf.enable_eager_execution()


# Derivatives of a function
def f(x):
    return tf.square(tf.sin(x))


def grad(f):
    return lambda x: tfe.gradients_function(f)(x)[0]


assert f(pi / 2).numpy() == 1.0
grad_f = tfe.gradients_function(f)
assert tf.abs(grad_f(pi / 2)[0]).numpy() < 1e-7
print("continue.....")

x = tf.lin_space(-2 * pi, 2 * pi, 100)
plt.plot(x, f(x), label="f")
plt.plot(x, grad(f)(x), label="first derivative")
plt.plot(x, grad(grad(f))(x), label="second derivative")
plt.plot(x, grad(grad(grad(f)))(x), label="third derivative")
plt.legend()
plt.show()

# GradientTape
x = tf.ones((2, 2))
with tf.GradientTape(persistent=True) as t:
    t.watch(x)
예제 #20
0
    tape.watch(init_x)
    value = fn(init_x)
  grad, = tape.gradient(value, [init_x])
  grad_norm = tf.reduce_sum(grad * grad)
  init_value = value
  while value > init_value - rate * grad_norm:
    x = init_x - rate * grad
    value = fn(x)
    rate /= 2.0
  return x, value


# 4.Additional functions to compute gradients
def square(x):
  return tf.multiply(x, x)
grad = tfe.gradients_function(square)
print("square(3.):", square(3.))
print("grad(3.):", grad(3.))

# The second-order derivative of square:
gradgrad = tfe.gradients_function(lambda x: grad(x)[0])
print("gradgrad(3.):", gradgrad(3.))

# The third-order derivative is None:
gradgradgrad = tfe.gradients_function(lambda x: gradgrad(x)[0])
print("gradgradgrad(3.) :", gradgradgrad(3.) )

# With flow control:
def abs(x):
  return x if x > 0. else -x
예제 #21
0
# n = tf.matmul(x, x)
print(m)
# print(n)


a = tf.constant(12)
counter = 0
while not tf.equal(a, 1):
    if tf.equal(a % 2, 0):
        a = a / 2
    else:
        a = 3 * a + 1
    print(a)

print("===========================")


def square(x):
    return tf.multiply(x, x)


# gradients_function用于计算输入的 square() 偏导数
grad = tfe.gradients_function(square)
print(square(3.))  # [9.]
print(grad(3.))  # [6.]
print(grad(2.))  # [4.]

# 同样的 gradient_function 调用可用于计算 square() 的二阶导数。
gradgrad = tfe.gradients_function(lambda x: grad(x)[0])
print(gradgrad(3.))  # [2.]
예제 #22
0
# coding=utf-8
import time
import tensorflow as tf
import tensorflow.contrib.eager as tfe

import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'


def f(x):
    return tf.multiply(x, x)  # Or x * x


assert 9 == f(3.).numpy()

df = tfe.gradients_function(f)
assert 6 == df(3.)[0].numpy()

# Second order deriviative.
d2f = tfe.gradients_function(lambda x: df(x)[0])
assert 2 == d2f(3.)[0].numpy()

# Third order derivative.
d3f = tfe.gradients_function(lambda x: d2f(x)[0])
assert 0 == d3f(3.)[0].numpy()


def prediction(input, weight, bias):
    return input * weight + bias

예제 #23
0
import os

import tensorflow.contrib.eager as tfe
tfe.enable_eager_execution()
import tensorflow as tf


def f(x):
    # f(x) = x^2 + 3
    return tf.multiply(x, x) + 3


print("f(4) = %.2f" % f(4.))

# First order derivative
df = tfe.gradients_function(f)  # tfe == eager mode
print("df(4) = %.2f" % df(4.)[0])

# Second order derivative
'''TODO: fill in the expression for the second order derivative using Eager mode gradients'''
d2f = tfe.gradients_function(df)
print("d2f(4) = %.2f" % d2f(4.)[0])

a = tf.constant(12)
counter = 0
while not tf.equal(a, 1):
    if tf.equal(a % 2, 0):
        a = a / 2
    else:
        a = 3 * a + 1
    print(a)
예제 #24
0
    target = tfd.MultivariateNormalTriL(loc=true_mean, scale_tril=chol)


    # Assume that the state is passed as a list of tensors `x` and `y`.
    # Then the target function is defined as follows:
    def target_fn(x, y):
        # Stack the input tensors together
        z = tf.concat([x, y], axis=-1) - true_mean
        return target.log_prob(z)


    sample_shape = [3, 5]
    state = [tf.ones(sample_shape + [2], dtype=dtype),
             tf.ones(sample_shape + [1], dtype=dtype)]
    fn_val = target_fn(*state)
    grad_fn = tfe.gradients_function(target_fn)
    if tfe.executing_eagerly():
        grads = grad_fn(*state)
    else:
        grads = tf.gradients(fn_val, state)

    # We can either pass the `sample_shape` of the `state` or not, which impacts
    # computational speed of `diag_jacobian`
    _, diag_jacobian_shape_passed = diag_jacobian(
        xs=state, ys=grads, sample_shape=tf.shape(fn_val))
    _, diag_jacobian_shape_none = diag_jacobian(
        xs=state, ys=grads)

    diag_jacobian_shape_passed_ = sess.run(diag_jacobian_shape_passed)
    diag_jacobian_shape_none_ = sess.run(diag_jacobian_shape_none)
예제 #25
0
        print(x[i, j])

x = tf.constant([1.0, 2.0, 3.0])

assert type(x.numpy()) == np.ndarray
squared = np.square(x)

for i in x:
    print(i)


def square(x):
    return x**2


grad = tfe.gradients_function(square)

print(square(3.))
print(grad(3.))

x = tfe.Variable(2.0)


def loss(y):
    return (y - x**2)**2


grad = tfe.implicit_gradients(loss)

print(loss(7.))
print(grad(7.))
예제 #26
0
for i in x:
    print(i)

## Gradients - Automatic differentiation is built into eager execution
# Under the hood ...
#    - Operations are recorded on a tap
#    - The tape is Played back to compute gradients
#         - This is reverse-mode differentiation (backpropagation)


# Eg: 01
def square(x):
    return x**2


grad = tfe.gradients_function(square)  # differentiation w.r.t input of square

print(square(3.))
print(grad(3.))

# Eg: 02
x = tfe.Variable(2.0)  # use when eager execution is enabled


def loss(y):
    return (y - x**2)**2


grad = tfe.implicit_gradients(
    loss)  # Differentiation w.r.t variables used to compute loss