def test_cv_gradients_multiple_gate_parameters(self, gaussian_dev, tol): "Tests that gates with multiple free parameters yield correct gradients." par = [0.4, -0.3, -0.7, 0.2] def qf(r0, phi0, r1, phi1): qml.Squeezing(r0, phi0, wires=[0]) qml.Squeezing(r1, phi1, wires=[0]) return qml.expval(qml.NumberOperator(0)) q = qml.QNode(qf, gaussian_dev) grad_F = q.jacobian(par, method='F') grad_A = q.jacobian(par, method='A') grad_A2 = q.jacobian(par, method='A', force_order2=True) # analytic method works for every parameter assert q.grad_method_for_par == {i:'A' for i in range(4)} # the different methods agree assert grad_A == pytest.approx(grad_F, abs=tol) assert grad_A2 == pytest.approx(grad_F, abs=tol) # check against the known analytic formula r0, phi0, r1, phi1 = par dn = np.zeros([4]) dn[0] = np.cosh(2 * r1) * np.sinh(2 * r0) + np.cos(phi0 - phi1) * np.cosh(2 * r0) * np.sinh(2 * r1) dn[1] = -0.5 * np.sin(phi0 - phi1) * np.sinh(2 * r0) * np.sinh(2 * r1) dn[2] = np.cos(phi0 - phi1) * np.cosh(2 * r1) * np.sinh(2 * r0) + np.cosh(2 * r0) * np.sinh(2 * r1) dn[3] = 0.5 * np.sin(phi0 - phi1) * np.sinh(2 * r0) * np.sinh(2 * r1) assert dn[np.newaxis, :] == pytest.approx(grad_F, abs=tol)
def test_cv_gradients_multiple_gate_parameters(self): "Tests that gates with multiple free parameters yield correct gradients." self.logTestName() par = [0.4, -0.3, -0.7, 0.2] def qf(r0, phi0, r1, phi1): qml.Squeezing(r0, phi0, wires=[0]) qml.Squeezing(r1, phi1, wires=[0]) return qml.expval(qml.NumberOperator(0)) q = qml.QNode(qf, self.gaussian_dev) grad_F = q.jacobian(par, method='F') grad_A = q.jacobian(par, method='A') grad_A2 = q.jacobian(par, method='A', force_order2=True) # analytic method works for every parameter self.assertTrue(q.grad_method_for_par == {i:'A' for i in range(4)}) # the different methods agree self.assertAllAlmostEqual(grad_A, grad_F, delta=self.tol) self.assertAllAlmostEqual(grad_A2, grad_F, delta=self.tol) # check against the known analytic formula r0, phi0, r1, phi1 = par dn = np.zeros([4]) dn[0] = np.cosh(2 * r1) * np.sinh(2 * r0) + np.cos(phi0 - phi1) * np.cosh(2 * r0) * np.sinh(2 * r1) dn[1] = -0.5 * np.sin(phi0 - phi1) * np.sinh(2 * r0) * np.sinh(2 * r1) dn[2] = np.cos(phi0 - phi1) * np.cosh(2 * r1) * np.sinh(2 * r0) + np.cosh(2 * r0) * np.sinh(2 * r1) dn[3] = 0.5 * np.sin(phi0 - phi1) * np.sinh(2 * r0) * np.sinh(2 * r1) self.assertAllAlmostEqual(grad_A, dn, delta=self.tol)
def test_function_overloading(): a = pe.pseudo_Obs(17, 2.9, 'e1') b = pe.pseudo_Obs(4, 0.8, 'e1') fs = [ lambda x: x[0] + x[1], lambda x: x[1] + x[0], lambda x: x[0] - x[1], lambda x: x[1] - x[0], lambda x: x[0] * x[1], lambda x: x[1] * x[0], lambda x: x[0] / x[1], lambda x: x[1] / x[0], lambda x: np.exp(x[0]), lambda x: np.sin(x[0]), lambda x: np.cos(x[0]), lambda x: np.tan(x[0]), lambda x: np.log(x[0]), lambda x: np.sqrt(np.abs(x[0])), lambda x: np.sinh(x[0]), lambda x: np.cosh(x[0]), lambda x: np.tanh(x[0]) ] for i, f in enumerate(fs): t1 = f([a, b]) t2 = pe.derived_observable(f, [a, b]) c = t2 - t1 assert c.is_zero() assert np.log(np.exp(b)) == b assert np.exp(np.log(b)) == b assert np.sqrt(b**2) == b assert np.sqrt(b)**2 == b np.arcsin(1 / b) np.arccos(1 / b) np.arctan(1 / b) np.arctanh(1 / b) np.sinc(1 / b)
def simulator_1(gl, color): V1 = -1.2 V2 = 18 V3 = 2 V4 = 30 Iap = 100 gca = 4.4 Vca = 120 gk = 8 VK = -84 gl = gl #2 Vl = -60 dt = 0.01 Vot_Threshold = 0 init_V, init_W, time, substep, = -26, 0.1135, 300, 0.2 rV = init_V rW = init_W rV_1 = 0 rW_1 = 0 i = 0 total = [] for step in range(1, int(time / substep)): dv = (1 / 20) * (Iap - gca * ((1 + np.tanh((rV - V1) / V2)) / 2) * (rV - Vca) - gl * (rV - Vl) - rW * (gk * (rV - VK))) dw = 0.04 * ((0.5 * (1 + np.tanh( (rV - V3) / (V4))) - rW) / (1 / np.cosh((rV - V3) / (2 * V4)))) rV_1 = rV rW_1 = rW rV = rV + dv rW = rW + dw total.append(rV) if rV_1 < Vot_Threshold and rV > Vot_Threshold: delt_t = 2 * substep * (0. - rV_1) / (rV - rV_1) i += 1 if i == 2: start = step * substep + delt_t if i == 7: final = step * substep + delt_t plt.plot(numpy.arange(int(len(total))), total, c=color, alpha=0.7, label=str(gl)) plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=3, mode="expand", borderaxespad=0.) return final - start
def lico2_entropic_change_Moura2016(sto, c_p_max): """ Lithium Cobalt Oxide (LiCO2) entropic change in open circuit potential (OCP) at a temperature of 298.15K as a function of the stochiometry. The fit is taken from Scott Moura's FastDFN code [1]. References ---------- .. [1] https://github.com/scott-moura/fastDFN Parameters ---------- sto: double Stochiometry of material (li-fraction) """ # Since the equation for LiCo2 from this ref. has the stretch factor, # should this too? If not, the "bumps" in the OCV don't line up. stretch = 1.062 sto = stretch * sto du_dT = (0.07645 * (-54.4806 / c_p_max) * ((1.0 / np.cosh(30.834 - 54.4806 * sto))**2) + 2.1581 * (-50.294 / c_p_max) * ((np.cosh(52.294 - 50.294 * sto))**(-2)) + 0.14169 * (19.854 / c_p_max) * ((np.cosh(11.0923 - 19.8543 * sto))**(-2)) - 0.2051 * (5.4888 / c_p_max) * ((np.cosh(1.4684 - 5.4888 * sto))**(-2)) - (0.2531 / 0.1316 / c_p_max) * ((np.cosh( (-sto + 0.56478) / 0.1316))**(-2)) - (0.02167 / 0.006 / c_p_max) * ((np.cosh( (sto - 0.525) / 0.006))**(-2))) return du_dT
def test_number_state_gradient(self, gaussian_dev, tol): "Tests that the automatic gradient of a squeezed state with number state expectation is correct." @qml.qnode(gaussian_dev) def circuit(y): qml.Squeezing(y, 0., wires=[0]) return qml.expval(qml.FockStateProjector(np.array([2, 0]), wires=[0, 1])) grad_fn = autograd.grad(circuit, 0) # (d/dr) |<2|S(r)>|^2 = 0.5 tanh(r)^3 (2 csch(r)^2 - 1) sech(r) for r in sqz_vals[1:]: # formula above is not valid for r=0 autograd_val = grad_fn(r) manualgrad_val = 0.5*np.tanh(r)**3 * (2/(np.sinh(r)**2)-1) / np.cosh(r) assert autograd_val == pytest.approx(manualgrad_val, abs=tol)
def test_number_state_gradient(self): "Tests that the automatic gradient of a squeezed state with number state expectation is correct." self.logTestName() @qml.qnode(self.gaussian_dev) def circuit(y): qml.Squeezing(y, 0., wires=[0]) return qml.expval(qml.NumberState(np.array([2, 0]), wires=[0, 1])) grad_fn = autograd.grad(circuit, 0) # (d/dr) |<2|S(r)>|^2 = 0.5 tanh(r)^3 (2 csch(r)^2 - 1) sech(r) for r in sqz_vals[1:]: # formula above is not valid for r=0 autograd_val = grad_fn(r) manualgrad_val = 0.5*np.tanh(r)**3 * (2/(np.sinh(r)**2)-1) / np.cosh(r) self.assertAlmostEqual(autograd_val, manualgrad_val, delta=self.tol)
def test_man_grad(): a = pe.pseudo_Obs(17, 2.9, 'e1') b = pe.pseudo_Obs(4, 0.8, 'e1') fs = [ lambda x: x[0] + x[1], lambda x: x[1] + x[0], lambda x: x[0] - x[1], lambda x: x[1] - x[0], lambda x: x[0] * x[1], lambda x: x[1] * x[0], lambda x: x[0] / x[1], lambda x: x[1] / x[0], lambda x: np.exp(x[0]), lambda x: np.sin(x[0]), lambda x: np.cos(x[0]), lambda x: np.tan(x[0]), lambda x: np.log(x[0]), lambda x: np.sqrt(x[0]), lambda x: np.sinh(x[0]), lambda x: np.cosh(x[0]), lambda x: np.tanh(x[0]) ] for i, f in enumerate(fs): t1 = f([a, b]) t2 = pe.derived_observable(f, [a, b]) c = t2 - t1 assert c.value == 0.0, str(i) assert np.all(np.abs(c.deltas['e1']) < 1e-14), str(i)
def simulator(gl): V1 = -1.2 V2 = 18 V3 = 2 V4 = 30 Iap = 100 gca = 4.4 Vca = 120 gk = 8 VK = -84 gl = gl #2 Vl = -60 dt = 0.01 Vot_Threshold = 0 init_V, init_W, time, substep, = -26, 0.1135, 300, 0.2 rV = init_V rW = init_W rV_1 = 0 rW_1 = 0 i = 0 for step in range(1, int(time / substep)): dv = (1 / 20) * (Iap - gca * ((1 + np.tanh((rV - V1) / V2)) / 2) * (rV - Vca) - gl * (rV - Vl) - rW * (gk * (rV - VK))) dw = 0.04 * ((0.5 * (1 + np.tanh( (rV - V3) / (V4))) - rW) / (1 / np.cosh((rV - V3) / (2 * V4)))) rV_1 = rV rW_1 = rW rV = rV + dv rW = rW + dw if rV_1 < Vot_Threshold and rV > Vot_Threshold: delt_t = 2 * substep * (0. - rV_1) / (rV - rV_1) i += 1 if i == 2: start = step * substep + delt_t if i == 7: final = step * substep + delt_t return final - start
def complicated_fun(a,b,c,d,e,f=1.1, g=9.0): return a + np.sin(b) + np.cosh(c) + np.cos(d) + np.tan(e) + f + g
def test_cosh(): fun = lambda x: 3.0 * np.cosh(x) check_grads(fun)(npr.randn())
anp.absolute.defjvp(lambda g, ans, gvs, vs, x: anp.real(g * anp.conj(x)) / ans) anp.reciprocal.defjvp(lambda g, ans, gvs, vs, x: -g / x**2) anp.exp.defjvp(lambda g, ans, gvs, vs, x: ans * g) anp.exp2.defjvp(lambda g, ans, gvs, vs, x: ans * anp.log(2) * g) anp.expm1.defjvp(lambda g, ans, gvs, vs, x: (ans + 1) * g) anp.log.defjvp(lambda g, ans, gvs, vs, x: g / x) anp.log2.defjvp(lambda g, ans, gvs, vs, x: g / x / anp.log(2)) anp.log10.defjvp(lambda g, ans, gvs, vs, x: g / x / anp.log(10)) anp.log1p.defjvp(lambda g, ans, gvs, vs, x: g / (x + 1)) anp.sin.defjvp(lambda g, ans, gvs, vs, x: g * anp.cos(x)) anp.cos.defjvp(lambda g, ans, gvs, vs, x: -g * anp.sin(x)) anp.tan.defjvp(lambda g, ans, gvs, vs, x: g / anp.cos(x)**2) anp.arcsin.defjvp(lambda g, ans, gvs, vs, x: g / anp.sqrt(1 - x**2)) anp.arccos.defjvp(lambda g, ans, gvs, vs, x: -g / anp.sqrt(1 - x**2)) anp.arctan.defjvp(lambda g, ans, gvs, vs, x: g / (1 + x**2)) anp.sinh.defjvp(lambda g, ans, gvs, vs, x: g * anp.cosh(x)) anp.cosh.defjvp(lambda g, ans, gvs, vs, x: g * anp.sinh(x)) anp.tanh.defjvp(lambda g, ans, gvs, vs, x: g / anp.cosh(x)**2) anp.arcsinh.defjvp(lambda g, ans, gvs, vs, x: g / anp.sqrt(x**2 + 1)) anp.arccosh.defjvp(lambda g, ans, gvs, vs, x: g / anp.sqrt(x**2 - 1)) anp.arctanh.defjvp(lambda g, ans, gvs, vs, x: g / (1 - x**2)) anp.rad2deg.defjvp(lambda g, ans, gvs, vs, x: g / anp.pi * 180.0) anp.degrees.defjvp(lambda g, ans, gvs, vs, x: g / anp.pi * 180.0) anp.deg2rad.defjvp(lambda g, ans, gvs, vs, x: g * anp.pi / 180.0) anp.radians.defjvp(lambda g, ans, gvs, vs, x: g * anp.pi / 180.0) anp.square.defjvp(lambda g, ans, gvs, vs, x: g * 2 * x) anp.sqrt.defjvp(lambda g, ans, gvs, vs, x: g * 0.5 * x**-0.5) anp.sinc.defjvp(lambda g, ans, gvs, vs, x: g * (anp.cos( anp.pi * x) * anp.pi * x - anp.sin(anp.pi * x)) / (anp.pi * x**2)) anp.reshape.defjvp(lambda g, ans, gvs, vs, x, shape, order=None: anp.reshape( g, vs.shape, order=order))
def complicated_fun(a, b, c, d, e, f=1.1, g=9.0): return a + np.sin(b) + np.cosh(c) + np.cos(d) + np.tan(e) + f + g
def simple_fun(a, b): return a + np.sin(a) + np.cosh(b)
def _rnn_grad(x, W, b, Wout, bout, label, n): h1__1_stack, h1__1 = [], None h1__0_stack, h1__0 = [], None out_stack, out = [], None h1_stack = [] h1 = x _for1 = list(range(n)) for i in _for1: h1__1_stack.append(h1__1) h1__1 = np.dot(h1, W) h1__0_stack.append(h1__0) h1__0 = h1__1 + b h1_stack.append(h1) h1 = np.tanh(h1__0) out__0 = np.dot(h1, Wout) out = out__0 + bout loss__2 = label * out loss__7 = -out loss__6 = np.exp(loss__7) loss__5 = 1 + loss__6 loss__4 = np.log(loss__5) loss__3 = out + loss__4 loss__1 = loss__2 - loss__3 # Begin Backward Pass g_loss = 1 g_h1__0 = 0 g_h1__1 = 0 g_b = 0 g_W = 0 # Reverse of: loss = -loss__0 g_loss__0 = -g_loss # Reverse of: loss__0 = np.sum(loss__1) g_loss__1 = repeat_to_match_shape(g_loss__0, loss__1) # Reverse of: loss__1 = loss__2 - loss__3 g_loss__2 = sum_to_match_shape(g_loss__1, loss__2) g_loss__3 = sum_to_match_shape(-g_loss__1, loss__3) # Reverse of: loss__3 = out + loss__4 g_out = sum_to_match_shape(g_loss__3, out) g_loss__4 = sum_to_match_shape(g_loss__3, loss__4) # Reverse of: loss__4 = np.log(loss__5) g_loss__5 = g_loss__4 / loss__5 # Reverse of: loss__5 = 1 + loss__6 g_loss__6 = sum_to_match_shape(g_loss__5, loss__6) # Reverse of: loss__6 = np.exp(loss__7) g_loss__7 = g_loss__6 * np.exp(loss__7) # Reverse of: loss__7 = -out g_out += -g_loss__7 g_out += sum_to_match_shape(g_loss__2 * label, out) # Reverse of: out = out__0 + bout g_out__0 = sum_to_match_shape(g_out, out__0) g_bout = sum_to_match_shape(g_out, bout) # Reverse of: out__0 = np.dot(h1, Wout) g_h1 = grad_dot_A(g_out__0, h1, Wout) g_Wout = grad_dot_B(g_out__0, h1, Wout) _for1 = reversed(_for1) for i in _for1: h1 = h1_stack.pop() tmp_g0 = g_h1 / np.cosh(h1__0)**2.0 g_h1 = 0 g_h1__0 += tmp_g0 h1__0 = h1__0_stack.pop() tmp_g1 = sum_to_match_shape(g_h1__0, h1__1) tmp_g2 = sum_to_match_shape(g_h1__0, b) g_h1__0 = 0 g_h1__1 += tmp_g1 g_b += tmp_g2 h1__1 = h1__1_stack.pop() tmp_g3 = grad_dot_A(g_h1__1, h1, W) tmp_g4 = grad_dot_B(g_h1__1, h1, W) g_h1__1 = 0 g_h1 += tmp_g3 g_W += tmp_g4 return g_W, g_b, g_Wout, g_bout
def f(x): return x[0] * x[1] + np.sin(x[2]) * np.exp( x[3] / x[1] / x[0]) - np.sqrt(2) / np.cosh(x[4] / x[0])
def test_cosh(): fun = lambda x : 3.0 * np.cosh(x) d_fun = grad(fun) check_grads(fun, npr.randn()) check_grads(d_fun, npr.randn())
def logcosh(y_true, y_pred): return np.mean(np.log(np.cosh(y_pred - y_true)), axis=-1)
def test_cosh(): fun = lambda x : 3.0 * np.cosh(x) check_grads(fun)(npr.randn())