def test_against_sympy_jtheta(self): N = 64 sigma = 2 z = sigma * randn(N) + 1.j * sigma * randn(N) z = z.reshape((N, 1)) tau = [[1.0j]] # jtheta inputs w = numpy.pi * z[:, 0] q = numpy.exp(numpy.pi * 1.0j * tau[0][0]) values1 = RiemannTheta(z, tau, epsilon=1e-16) values2 = numpy.array([jtheta(3, wi, q) for wi in w], dtype=numpy.complex) rel_error = abs((values1 - values2) / values1) rel_error_avg = numpy.mean(rel_error) self.assertLess(rel_error_avg, 1e-14) # repeat for different tau tau = [[1.0 + 2.5j]] q = numpy.exp(numpy.pi * 1.0j * tau[0][0]) values1 = RiemannTheta(z, tau, epsilon=1e-16) values2 = numpy.array([jtheta(3, wi, q) for wi in w], dtype=numpy.complex) rel_error = abs((values1 - values2) / values1) rel_error_avg = numpy.mean(rel_error) self.assertLess(rel_error_avg, 1e-14)
def test_value(self): z = [0, 0, 0] Omega = self.Omega3 value = RiemannTheta(z, Omega, epsilon=1e-14) maple = 1.2362529854204190 - 0.52099320642367818e-10j error = abs(value - maple) self.assertLess(error, 1e-8) w = [0.2 + 0.5j, 0.3 - 0.1j, -0.1 + 0.2j] value = RiemannTheta(w, Omega, epsilon=1e-14) maple = 1.2544694041047501 - 0.77493173321770725j error = abs(value - maple) self.assertLess(error, 1e-8)
def test_issue159(self): Omega = [[10j]] z = [5j] theta_actual = 2 theta = RiemannTheta(z, Omega) error = abs(theta - theta_actual) self.assertLess(error, 1e-8)
def test_against_naive_implementation_genus1(self): # tests the genus 1 Riemann theta function against the naive # implementation written above (directly using the summation formula). # first test the relative error using values close to the origin, # avoiding the double-exponential growth N = 64 sigma = 0.1 z = sigma * randn(N) + 1.j * sigma * randn(N) z = z.reshape((N, 1)) tau = [[1.0j]] values1 = RiemannTheta(z, tau, epsilon=1e-16) values2 = thetag1(z, tau[0][0])[:, 0] rel_error = abs((values1 - values2) / values1) rel_error_max = numpy.max(rel_error) rel_error_avg = numpy.mean(rel_error) self.assertLess(rel_error_max, 1e-14) self.assertLess(rel_error_avg, 1e-14) # next, test the relative error using larger magnitude values. we don't # test the max error due to possible numerical roundoff issues sigma = 3 z = sigma * randn(N) + 1.j * sigma * randn(N) z = z.reshape((N, 1)) tau = [[1.0j]] values1 = RiemannTheta(z, tau, epsilon=1e-16) values2 = thetag1(z, tau[0][0])[:, 0] rel_error = abs((values1 - values2) / values1) rel_error_avg = numpy.mean(rel_error) self.assertLess(rel_error_avg, 1e-14) # repeat for different tau tau = [[1.0 + 2.5j]] values1 = RiemannTheta(z, tau, epsilon=1e-16) values2 = thetag1(z, tau[0][0])[:, 0] rel_error = abs((values1 - values2) / values1) rel_error_avg = numpy.mean(rel_error) self.assertLess(rel_error_avg, 1e-14)
def test_issue84_value(self): z = [0.5 - 1.10093687j, -0.11723434j] Omega = [[0.5 + 2j, 0.5 + 1j], [0.5 + 1j, 1 + 1.5j]] theta_actual = 0.963179246467 - 6.2286820685j for _ in range(1000): theta = RiemannTheta(z, Omega) error = abs(theta - theta_actual) self.assertLess( error, 1e-5, '%s not less than %s' '\ntheta: %s\nactual: %s' % (error, 1e-5, theta, theta_actual))
def test_gradient(self): Omega = self.Omega3 # generate random test z-values N = 32 u = numpy.random.rand(N, 3) v = numpy.random.rand(N, 3) W = u + 1.0j * v # manually compute gradients dz0 = RiemannTheta(W, Omega, derivs=[[1, 0, 0]]) dz1 = RiemannTheta(W, Omega, derivs=[[0, 1, 0]]) dz2 = RiemannTheta(W, Omega, derivs=[[0, 0, 1]]) grad1 = numpy.zeros_like(W, dtype=numpy.complex) grad1[:, 0] = dz0 grad1[:, 1] = dz1 grad1[:, 2] = dz2 # compute using "gradient" grad2 = RiemannTheta.gradient(W, Omega) self.assertLess(numpy.linalg.norm(grad1 - grad2), 1e-14) Omega = self.Omega4 # generate random test z-values N = 32 u = numpy.random.rand(N, 4) v = numpy.random.rand(N, 4) W = u + 1.0j * v # manually compute gradients dz0 = RiemannTheta(W, Omega, derivs=[[1, 0, 0, 0]]) dz1 = RiemannTheta(W, Omega, derivs=[[0, 1, 0, 0]]) dz2 = RiemannTheta(W, Omega, derivs=[[0, 0, 1, 0]]) dz3 = RiemannTheta(W, Omega, derivs=[[0, 0, 0, 1]]) grad1 = numpy.zeros_like(W, dtype=numpy.complex) grad1[:, 0] = dz0 grad1[:, 1] = dz1 grad1[:, 2] = dz2 grad1[:, 3] = dz3 # compute using "gradient" grad2 = RiemannTheta.gradient(W, Omega) self.assertLess(numpy.linalg.norm(grad1 - grad2), 1e-14)
def test_first_derivatives(self): w = [0.2 + 0.5j, 0.3 - 0.1j, -0.1 + 0.2j] Omega = self.Omega3 value_z1 = RiemannTheta(w, Omega, epsilon=1e-14, derivs=[[1, 0, 0]]) value_z2 = RiemannTheta(w, Omega, epsilon=1e-14, derivs=[[0, 1, 0]]) value_z3 = RiemannTheta(w, Omega, epsilon=1e-14, derivs=[[0, 0, 1]]) maple_z1 = -5.7295900733729553 - 0.89199375315523345j maple_z2 = -0.16300987772384356 - 0.65079269102999180j maple_z3 = 1.0115406077003542 + 0.030528533907836019j error_z1 = abs(value_z1 - maple_z1) error_z2 = abs(value_z2 - maple_z2) error_z3 = abs(value_z3 - maple_z3) self.assertLess(error_z1, 1e-8) self.assertLess(error_z2, 1e-8) self.assertLess(error_z3, 1e-8) Omega = self.Omega4 w = [0, 0, 0, 0] value_z1 = RiemannTheta(w, Omega, epsilon=1e-14, derivs=[[1, 0, 0, 0]]) value_z2 = RiemannTheta(w, Omega, epsilon=1e-14, derivs=[[0, 1, 0, 0]]) value_z3 = RiemannTheta(w, Omega, epsilon=1e-14, derivs=[[0, 0, 1, 0]]) value_z4 = RiemannTheta(w, Omega, epsilon=1e-14, derivs=[[0, 0, 0, 1]]) maple_z1 = 0.0 maple_z2 = 0.0 maple_z3 = 0.0 maple_z4 = 0.0 error_z1 = abs(value_z1 - maple_z1) error_z2 = abs(value_z2 - maple_z2) error_z3 = abs(value_z3 - maple_z3) error_z4 = abs(value_z4 - maple_z4) self.assertLess(error_z1, 1e-8) self.assertLess(error_z2, 1e-8) self.assertLess(error_z3, 1e-8) self.assertLess(error_z4, 1e-8) # different value of w w = [ -0.37704918 - 0.18456279j, 0.63934426 + 0.42591413j, 0.54918033 + 0.09937996j, -0.21721311 - 0.07808426j ] value_z1 = RiemannTheta(w, Omega, epsilon=1e-14, derivs=[[1, 0, 0, 0]]) value_z2 = RiemannTheta(w, Omega, epsilon=1e-14, derivs=[[0, 1, 0, 0]]) value_z3 = RiemannTheta(w, Omega, epsilon=1e-14, derivs=[[0, 0, 1, 0]]) value_z4 = RiemannTheta(w, Omega, epsilon=1e-14, derivs=[[0, 0, 0, 1]]) maple_z1 = 3.3644150756 + 2.5018071784j maple_z2 = -2.9431860155 + 5.6802762853j maple_z3 = 8.0319838396 + 3.5491434873j maple_z4 = -6.0837267311 - 2.4867680289j error_z1 = abs(value_z1 - maple_z1) error_z2 = abs(value_z2 - maple_z2) error_z3 = abs(value_z3 - maple_z3) error_z4 = abs(value_z4 - maple_z4) self.assertLess(error_z1, 1e-8) self.assertLess(error_z2, 1e-8) self.assertLess(error_z3, 1e-8) self.assertLess(error_z4, 1e-8)