def test_iterator_learning_rate(self): """Test setting the learning rate as iterator.""" def learning_rate(): power = 0.6 constant_coeff = 0.1 def powerlaw(): n = 0 while True: yield constant_coeff * (n**power) n += 1 return powerlaw() def objective(x): return (np.linalg.norm(x) - 1)**2 def grad(x): return 2 * (np.linalg.norm(x) - 1) * x / np.linalg.norm(x) initial_point = np.array([1, 0.5, -2]) optimizer = GradientDescent(maxiter=20, learning_rate=learning_rate) result = optimizer.minimize(objective, initial_point, grad) self.assertLess(result.fun, 1e-5)
def test_pauli_two_design(self): """Test standard gradient descent on the Pauli two-design example.""" circuit = PauliTwoDesign(3, reps=3, seed=2) parameters = list(circuit.parameters) obs = Z ^ Z ^ I expr = ~StateFn(obs) @ StateFn(circuit) initial_point = np.array([ 0.1822308, -0.27254251, 0.83684425, 0.86153976, -0.7111668, 0.82766631, 0.97867993, 0.46136964, 2.27079901, 0.13382699, 0.29589915, 0.64883193, ]) def objective(x): return expr.bind_parameters(dict(zip(parameters, x))).eval().real optimizer = GradientDescent(maxiter=100, learning_rate=0.1, perturbation=0.1) result = optimizer.optimize(circuit.num_parameters, objective, initial_point=initial_point) self.assertLess(result[1], -0.95) # final loss self.assertEqual(result[2], 100) # function evaluations
def test_gradient_descent(self): """cg test""" optimizer = GradientDescent(maxiter=100000, tol=1e-06, learning_rate=1e-3) res = self._optimize(optimizer, grad=True) self.assertLessEqual(res[2], 100000)
def test_gradient_descent(self): """Test GradientDescent is serializable.""" opt = GradientDescent(maxiter=10, learning_rate=0.01) settings = opt.settings self.assertEqual(settings["maxiter"], 10) self.assertEqual(settings["learning_rate"], 0.01)
def test_callback(self): """Test the callback.""" history = [] def callback(*args): history.append(args) optimizer = GradientDescent(maxiter=1, callback=callback) def objective(x): return np.linalg.norm(x) _ = optimizer.minimize(objective, np.array([1, -1])) self.assertEqual(len(history), 1) self.assertIsInstance(history[0][0], int) # nfevs self.assertIsInstance(history[0][1], np.ndarray) # parameters self.assertIsInstance(history[0][2], float) # function value self.assertIsInstance(history[0][3], float) # norm of the gradient
def test_pvqd(self, hamiltonian_type, expectation_cls, gradient, backend_type, num_timesteps): """Test a simple evolution.""" time = 0.02 if hamiltonian_type == "ising": hamiltonian = self.hamiltonian elif hamiltonian_type == "ising_matrix": hamiltonian = self.hamiltonian.to_matrix_op() else: # hamiltonian_type == "pauli": hamiltonian = X ^ X # parse input arguments if gradient: optimizer = GradientDescent(maxiter=1) else: optimizer = L_BFGS_B(maxiter=1) backend = self.sv_backend if backend_type == "sv" else self.qasm_backend expectation = expectation_cls() # run pVQD keeping track of the energy and the magnetization pvqd = PVQD( self.ansatz, self.initial_parameters, num_timesteps=num_timesteps, optimizer=optimizer, quantum_instance=backend, expectation=expectation, ) problem = EvolutionProblem( hamiltonian, time, aux_operators=[hamiltonian, self.observable]) result = pvqd.evolve(problem) self.assertTrue(len(result.fidelities) == 3) self.assertTrue(np.all(result.times == [0.0, 0.01, 0.02])) self.assertTrue(np.asarray(result.observables).shape == (3, 2)) num_parameters = self.ansatz.num_parameters self.assertTrue( len(result.parameters) == 3 and np.all([ len(params) == num_parameters for params in result.parameters ]))
def test_gradient_descent(self): """cg test""" optimizer = GradientDescent(maxiter=100000, tol=1e-06, learning_rate=1e-3) self.run_optimizer(optimizer, grad=True, max_nfev=100000)