Example #1
0
    def test_optimize_grad(self):
        """Test that the gradient of ExpvalCost is accessible and correct when using observable
        optimization and the autograd interface."""
        if not qml.tape_mode_active():
            pytest.skip("This test is only intended for tape mode")

        dev = qml.device("default.qubit", wires=4)
        hamiltonian = big_hamiltonian

        cost = qml.ExpvalCost(qml.templates.StronglyEntanglingLayers,
                              hamiltonian,
                              dev,
                              optimize=True)
        cost2 = qml.ExpvalCost(qml.templates.StronglyEntanglingLayers,
                               hamiltonian,
                               dev,
                               optimize=False)

        w = qml.init.strong_ent_layers_uniform(2, 4, seed=1967)

        dc = qml.grad(cost)(w)
        exec_opt = dev.num_executions
        dev._num_executions = 0

        dc2 = qml.grad(cost2)(w)
        exec_no_opt = dev.num_executions

        assert exec_no_opt > exec_opt
        assert np.allclose(dc, big_hamiltonian_grad)
        assert np.allclose(dc2, big_hamiltonian_grad)
Example #2
0
    def test_optimize_grad(self):
        """Test that the gradient of ExpvalCost is accessible and correct when using observable
        optimization and the autograd interface."""
        dev = qml.device("default.qubit", wires=4)
        hamiltonian = big_hamiltonian

        cost = qml.ExpvalCost(
            qml.templates.StronglyEntanglingLayers, hamiltonian, dev, optimize=True, diff_method="parameter-shift"
        )
        cost2 = qml.ExpvalCost(
            qml.templates.StronglyEntanglingLayers, hamiltonian, dev, optimize=False, diff_method="parameter-shift"
        )

        w = qml.init.strong_ent_layers_uniform(2, 4, seed=1967)

        dc = qml.grad(cost)(w)
        exec_opt = dev.num_executions
        dev._num_executions = 0

        dc2 = qml.grad(cost2)(w)
        exec_no_opt = dev.num_executions

        assert exec_no_opt > exec_opt
        assert np.allclose(dc, big_hamiltonian_grad)
        assert np.allclose(dc2, big_hamiltonian_grad)
Example #3
0
    def test_optimize_grad(self):
        """Test that the gradient of ExpvalCost is accessible and correct when using observable
        optimization and the autograd interface."""
        dev = qml.device("default.qubit", wires=4)
        hamiltonian = big_hamiltonian

        cost = qml.ExpvalCost(
            qml.templates.StronglyEntanglingLayers,
            hamiltonian,
            dev,
            optimize=True,
            diff_method="parameter-shift",
        )
        cost2 = qml.ExpvalCost(
            qml.templates.StronglyEntanglingLayers,
            hamiltonian,
            dev,
            optimize=False,
            diff_method="parameter-shift",
        )

        np.random.seed(1967)
        shape = qml.templates.StronglyEntanglingLayers.shape(n_layers=2, n_wires=4)
        w = np.random.uniform(low=0, high=2 * np.pi, size=shape)

        dc = qml.grad(cost)(w)
        exec_opt = dev.num_executions
        dev._num_executions = 0

        dc2 = qml.grad(cost2)(w)
        exec_no_opt = dev.num_executions

        assert exec_no_opt > exec_opt
        assert np.allclose(dc, big_hamiltonian_grad)
        assert np.allclose(dc2, big_hamiltonian_grad)
Example #4
0
    def test_optimize_multiple_terms(self, interface, tf_support,
                                     torch_support):
        """Test that an ExpvalCost with observable optimization gives the same
        result as another ExpvalCost without observable optimization even when there
        are non-unique Hamiltonian terms."""
        if interface == "tf" and not tf_support:
            pytest.skip("This test requires TensorFlow")
        if interface == "torch" and not torch_support:
            pytest.skip("This test requires Torch")

        dev = qml.device("default.qubit", wires=5)
        obs = [
            qml.PauliZ(wires=[2])
            @ qml.PauliZ(wires=[4]),  # <---- These two terms
            qml.PauliZ(wires=[4]) @ qml.PauliZ(wires=[2]),  # <---- are equal
            qml.PauliZ(wires=[1]),
            qml.PauliZ(wires=[2]),
            qml.PauliZ(wires=[1]) @ qml.PauliZ(wires=[2]),
            qml.PauliZ(wires=[2]) @ qml.PauliZ(wires=[0]),
            qml.PauliZ(wires=[3]) @ qml.PauliZ(wires=[1]),
            qml.PauliZ(wires=[4]) @ qml.PauliZ(wires=[3]),
        ]

        coefs = (np.random.rand(len(obs)) - 0.5) * 2
        hamiltonian = qml.Hamiltonian(coefs, obs)

        cost = qml.ExpvalCost(
            qml.templates.StronglyEntanglingLayers,
            hamiltonian,
            dev,
            optimize=True,
            interface=interface,
            diff_method="parameter-shift",
        )
        cost2 = qml.ExpvalCost(
            qml.templates.StronglyEntanglingLayers,
            hamiltonian,
            dev,
            optimize=False,
            interface=interface,
            diff_method="parameter-shift",
        )

        np.random.seed(1967)
        shape = qml.templates.StronglyEntanglingLayers.shape(n_layers=2,
                                                             n_wires=5)
        w = np.random.random(shape)

        c1 = cost(w)
        exec_opt = dev.num_executions
        dev._num_executions = 0

        c2 = cost2(w)
        exec_no_opt = dev.num_executions

        assert exec_opt == 1  # Number of groups in the Hamiltonian
        assert exec_no_opt == 8

        assert np.allclose(c1, c2)
Example #5
0
    def test_module_example(self, tol):
        """Test the example in the QAOA module docstring"""

        # Defines the wires and the graph on which MaxCut is being performed
        wires = range(3)
        graph = Graph([(0, 1), (1, 2), (2, 0)])

        # Defines the QAOA cost and mixer Hamiltonians
        cost_h, mixer_h = qaoa.maxcut(graph)

        # Defines a layer of the QAOA ansatz from the cost and mixer Hamiltonians
        def qaoa_layer(gamma, alpha):
            qaoa.cost_layer(gamma, cost_h)
            qaoa.mixer_layer(alpha, mixer_h)

        # Repeatedly applies layers of the QAOA ansatz
        def circuit(params, **kwargs):
            for w in wires:
                qml.Hadamard(wires=w)

            qml.layer(qaoa_layer, 2, params[0], params[1])

        # Defines the device and the QAOA cost function
        dev = qml.device("default.qubit", wires=len(wires))
        cost_function = qml.ExpvalCost(circuit, cost_h, dev)

        res = cost_function([[1, 1], [1, 1]])
        expected = -1.8260274380964299

        assert np.allclose(res, expected, atol=tol, rtol=0)
Example #6
0
    def test_single_shots(self, mocker, monkeypatch):
        """Test that, if the shot budget for a single term is 1,
        that the number of dimensions for the returned Jacobian is expanded"""
        coeffs = [0.2, 0.1, 0.1]
        dev = qml.device("default.qubit", wires=2, shots=100)
        H = qml.Hamiltonian(
            coeffs,
            [qml.PauliZ(0),
             qml.PauliX(1),
             qml.PauliZ(0) @ qml.PauliZ(1)])

        expval_cost = qml.ExpvalCost(qml.templates.StronglyEntanglingLayers, H,
                                     dev)
        weights = qml.init.strong_ent_layers_normal(n_layers=3, n_wires=2)

        opt = qml.ShotAdaptiveOptimizer(min_shots=10)

        spy = mocker.spy(qml, "jacobian")
        spy_dims = mocker.spy(np, "expand_dims")
        mocker.patch("scipy.stats._multivariate.multinomial_gen.rvs",
                     return_value=np.array([[4, 1, 5]]))
        grads = opt.weighted_random_sampling(expval_cost.qnodes, coeffs, 10,
                                             [0], weights)

        spy_dims.assert_called_once()
        assert len(spy.call_args_list) == 3
        assert len(grads) == 1
        assert grads[0].shape == (10, *weights.shape)
Example #7
0
    def test_learning_error(self):
        """Test that an exception is raised if the learning rate is beyond the
        lipschitz bound"""
        coeffs = [0.3, 0.1]
        H = qml.Hamiltonian(coeffs, [qml.PauliX(0), qml.PauliZ(0)])
        dev = qml.device("default.qubit", wires=1, shots=100)
        expval_cost = qml.ExpvalCost(lambda x, **kwargs: qml.RX(x, wires=0), H,
                                     dev)

        opt = qml.ShotAdaptiveOptimizer(min_shots=10, stepsize=100.0)

        # lipschitz constant is given by sum(|coeffs|)
        lipschitz = np.sum(np.abs(coeffs))

        assert opt._stepsize > 2 / lipschitz

        with pytest.raises(
                ValueError,
                match=f"The learning rate must be less than {2 / lipschitz}"):
            opt.step(expval_cost, 0.5)

        # for a single QNode, the lipschitz constant is simply 1
        opt = qml.ShotAdaptiveOptimizer(min_shots=10, stepsize=100.0)
        with pytest.raises(
                ValueError,
                match=f"The learning rate must be less than {2 / 1}"):
            opt.step(expval_cost.qnodes[0], 0.5)
Example #8
0
    def test_multiple_devices(self, mocker):
        """Test that passing multiple devices to ExpvalCost works correctly"""

        dev = [qml.device("default.qubit", wires=2), qml.device("default.mixed", wires=2)]
        spy = mocker.spy(DefaultQubit, "apply")
        spy2 = mocker.spy(DefaultMixed, "apply")

        obs = [qml.PauliZ(0), qml.PauliZ(1)]
        h = qml.Hamiltonian([1, 1], obs)

        qnodes = qml.ExpvalCost(qml.templates.BasicEntanglerLayers, h, dev)
        np.random.seed(1967)
        w = np.random.random(qml.templates.BasicEntanglerLayers.shape(n_layers=3, n_wires=2))

        res = qnodes(w)

        spy.assert_called_once()
        spy2.assert_called_once()

        mapped = qml.map(qml.templates.BasicEntanglerLayers, obs, dev)
        exp = sum(mapped(w))

        assert np.allclose(res, exp)

        with pytest.warns(UserWarning, match="ExpvalCost was instantiated with multiple devices."):
            qml.metric_tensor(qnodes, approx="block-diag")(w)
    def test_metric_tensor_tape_mode(self):
        """Test that the metric tensor can be calculated in tape mode, and that it is equal to a
        metric tensor calculated in non-tape mode."""
        if not qml.tape_mode_active():
            pytest.skip("This test is only intended for tape mode")

        dev = qml.device("default.qubit", wires=2)
        p = np.array([1., 1., 1.])

        def ansatz(params, **kwargs):
            qml.RX(params[0], wires=0)
            qml.RY(params[1], wires=0)
            qml.CNOT(wires=[0, 1])
            qml.PhaseShift(params[2], wires=1)

        h = qml.Hamiltonian([1, 1], [qml.PauliZ(0), qml.PauliZ(1)])
        qnodes = qml.ExpvalCost(ansatz, h, dev)
        mt = qml.metric_tensor(qnodes)(p)
        assert qml.tape_mode_active()  # Check that tape mode is still active

        qml.disable_tape()

        @qml.qnode(dev)
        def circuit(params):
            qml.RX(params[0], wires=0)
            qml.RY(params[1], wires=0)
            qml.CNOT(wires=[0, 1])
            qml.PhaseShift(params[2], wires=1)
            return qml.expval(qml.PauliZ(0))

        mt2 = circuit.metric_tensor([p])
        assert np.allclose(mt, mt2)
Example #10
0
    def run_vqe(H, ansatz, params=None):
        from pennylane.optimize import NesterovMomentumOptimizer, AdamOptimizer
        num_qubits = len(H.wires)
        num_layers = 4

        if params is None:
            params = qml.init.strong_ent_layers_uniform(num_layers, num_qubits, 3)

        cost_fn = qml.ExpvalCost(ansatz, H, dev)

        stepsize = 0.1
        opt = NesterovMomentumOptimizer(stepsize)
        max_iterations = 300
        conv_tol = 1e-8

        energy = 0

        for n in range(max_iterations):
            params, prev_energy = opt.step_and_cost(cost_fn, params)
            energy = cost_fn(params)
            conv = np.abs(energy - prev_energy)

            stepsize *= 0.99
            opt.update_stepsize(stepsize)

            if DEBUG and n % 20 == 0:
                print('Iteration = {:},  Energy = {:.8f} Ha'.format(n, energy))

            if conv <= conv_tol:
                break

        return energy, params
def test_integration_observable_to_vqe_cost(monkeypatch, mol_name, terms_ref,
                                            expected_cost, custom_wires, tol):
    r"""Test if `convert_observable()` in qchem integrates with `ExpvalCost()` in pennylane"""

    qOp = QubitOperator()
    if terms_ref is not None:
        monkeypatch.setattr(qOp, "terms", terms_ref)
    vqe_observable = qchem.convert_observable(qOp, custom_wires)

    num_qubits = len(vqe_observable.wires)
    assert vqe_observable.terms.__repr__()  # just to satisfy codecov

    if custom_wires is None:
        wires = num_qubits
    elif isinstance(custom_wires, dict):
        wires = qchem.structure._process_wires(custom_wires)
    else:
        wires = custom_wires[:num_qubits]
    dev = qml.device("default.qubit", wires=wires)

    # can replace the ansatz with more suitable ones later.
    def dummy_ansatz(phis, wires):
        for phi, w in zip(phis, wires):
            qml.RX(phi, wires=w)

    dummy_cost = qml.ExpvalCost(dummy_ansatz, vqe_observable, dev)
    params = [0.1 * i for i in range(num_qubits)]
    res = dummy_cost(params)

    assert np.allclose(res, expected_cost, **tol)
Example #12
0
    def test_vqe_optimization(self):
        """Test that a simple VQE circuit can be optimized"""
        dev = qml.device("default.qubit", wires=2, shots=100)
        coeffs = [0.1, 0.2]
        obs = [qml.PauliZ(0), qml.PauliX(0)]
        H = qml.Hamiltonian(coeffs, obs)

        def ansatz(x, **kwargs):
            qml.Rot(*x[0], wires=0)
            qml.Rot(*x[1], wires=1)
            qml.CNOT(wires=[0, 1])
            qml.Rot(*x[2], wires=0)
            qml.Rot(*x[3], wires=1)
            qml.CNOT(wires=[0, 1])

        cost = qml.ExpvalCost(ansatz, H, dev)
        params = np.random.random((4, 3), requires_grad=True)
        initial_loss = cost(params)

        min_shots = 10
        loss = initial_loss
        opt = qml.ShotAdaptiveOptimizer(min_shots=10)

        for i in range(100):
            params = opt.step(cost, params)
            loss = cost(params)

        assert loss < initial_loss
        assert np.allclose(loss, -1 / (2 * np.sqrt(5)), atol=0.1, rtol=0.2)
        assert opt.shots_used > min_shots
Example #13
0
    def test_optimize_grad_tf(self, tf_support):
        """Test that the gradient of ExpvalCost is accessible and correct when using observable
        optimization and the TensorFlow interface."""
        if not qml.tape_mode_active():
            pytest.skip("This test is only intended for tape mode")
        if not tf_support:
            pytest.skip("This test requires TensorFlow")

        dev = qml.device("default.qubit", wires=4)
        hamiltonian = big_hamiltonian

        cost = qml.ExpvalCost(qml.templates.StronglyEntanglingLayers,
                              hamiltonian,
                              dev,
                              optimize=True,
                              interface="tf")

        w = tf.Variable(qml.init.strong_ent_layers_uniform(2, 4, seed=1967))

        with tf.GradientTape() as tape:
            res = cost(w)

        dc = tape.gradient(res, w).numpy()

        assert np.allclose(dc, big_hamiltonian_grad)
Example #14
0
    def test_optimize_grad_tf(self, tf_support):
        """Test that the gradient of ExpvalCost is accessible and correct when using observable
        optimization and the TensorFlow interface."""
        if not tf_support:
            pytest.skip("This test requires TensorFlow")

        dev = qml.device("default.qubit", wires=4)
        hamiltonian = big_hamiltonian

        cost = qml.ExpvalCost(qml.templates.StronglyEntanglingLayers,
                              hamiltonian,
                              dev,
                              optimize=True,
                              interface="tf")

        np.random.seed(1967)
        shape = qml.templates.StronglyEntanglingLayers.shape(n_layers=2,
                                                             n_wires=4)
        w = np.random.uniform(low=0, high=2 * np.pi, size=shape)
        w = tf.Variable(w)

        with tf.GradientTape() as tape:
            res = cost(w)

        dc = tape.gradient(res, w).numpy()

        assert np.allclose(dc, big_hamiltonian_grad)
Example #15
0
    def test_gradient(self, tol):
        """Test differentiation works"""
        dev = qml.device("default.qubit", wires=1)

        def ansatz(params, **kwargs):
            qml.RX(params[0], wires=0)
            qml.RY(params[1], wires=0)

        coeffs = [0.2, 0.5]
        observables = [qml.PauliX(0), qml.PauliY(0)]

        H = qml.vqe.Hamiltonian(coeffs, observables)
        a, b = 0.54, 0.123
        params = torch.autograd.Variable(torch.tensor([a, b]),
                                         requires_grad=True)

        cost = qml.ExpvalCost(ansatz, H, dev, interface="torch")
        loss = cost(params)
        loss.backward()

        res = params.grad.numpy()

        expected = [
            -coeffs[0] * np.sin(a) * np.sin(b) - coeffs[1] * np.cos(a),
            coeffs[0] * np.cos(a) * np.cos(b),
        ]

        assert np.allclose(res, expected, atol=tol, rtol=0)
Example #16
0
    def test_optimize_grad_torch(self, torch_support):
        """Test that the gradient of ExpvalCost is accessible and correct when using observable
        optimization and the Torch interface."""
        if not torch_support:
            pytest.skip("This test requires Torch")

        dev = qml.device("default.qubit", wires=4)
        hamiltonian = big_hamiltonian

        cost = qml.ExpvalCost(
            qml.templates.StronglyEntanglingLayers,
            hamiltonian,
            dev,
            optimize=True,
            interface="torch",
        )

        np.random.seed(1967)
        shape = qml.templates.StronglyEntanglingLayers.shape(n_layers=2,
                                                             n_wires=4)
        w = np.random.uniform(low=0, high=2 * np.pi, size=shape)
        w = torch.tensor(w, requires_grad=True)

        res = cost(w)
        res.backward()
        dc = w.grad.detach().numpy()

        assert np.allclose(dc, big_hamiltonian_grad)
Example #17
0
    def test_gradient(self, tol):
        """Test differentiation works"""
        dev = qml.device("default.qubit", wires=1)

        def ansatz(params, **kwargs):
            qml.RX(params[0], wires=0)
            qml.RY(params[1], wires=0)

        coeffs = [0.2, 0.5]
        observables = [qml.PauliX(0), qml.PauliY(0)]

        H = qml.vqe.Hamiltonian(coeffs, observables)
        a, b = 0.54, 0.123
        params = Variable([a, b], dtype=tf.float64)
        cost = qml.ExpvalCost(ansatz, H, dev, interface="tf")

        with tf.GradientTape() as tape:
            loss = cost(params)
            res = np.array(tape.gradient(loss, params))

        expected = [
            -coeffs[0] * np.sin(a) * np.sin(b) - coeffs[1] * np.cos(a),
            coeffs[0] * np.cos(a) * np.cos(b),
        ]

        assert np.allclose(res, expected, atol=tol, rtol=0)
Example #18
0
 def test_cost_evaluate(self, params, ansatz, coeffs, observables):
     """Tests that the cost function evaluates properly"""
     hamiltonian = qml.vqe.Hamiltonian(coeffs, observables)
     dev = qml.device("default.qubit", wires=3)
     expval = qml.ExpvalCost(ansatz, hamiltonian, dev)
     assert type(expval(params)) == np.float64
     assert np.shape(expval(params)) == ()  # expval should be scalar
Example #19
0
    def test_optimize_grad_torch(self, torch_support):
        """Test that the gradient of ExpvalCost is accessible and correct when using observable
        optimization and the Torch interface."""
        if not qml.tape_mode_active():
            pytest.skip("This test is only intended for tape mode")
        if not torch_support:
            pytest.skip("This test requires Torch")

        dev = qml.device("default.qubit", wires=4)
        hamiltonian = big_hamiltonian

        cost = qml.ExpvalCost(
            qml.templates.StronglyEntanglingLayers,
            hamiltonian,
            dev,
            optimize=True,
            interface="torch",
        )

        w = torch.tensor(qml.init.strong_ent_layers_uniform(2, 4, seed=1967),
                         requires_grad=True)

        res = cost(w)
        res.backward()
        dc = w.grad.detach().numpy()

        assert np.allclose(dc, big_hamiltonian_grad)
Example #20
0
    def test_gradient(self, tol, interface):
        """Test differentiation works"""
        dev = qml.device("default.qubit", wires=1)

        def ansatz(params, **kwargs):
            qml.RX(params[0], wires=0)
            qml.RY(params[1], wires=0)

        coeffs = [0.2, 0.5]
        observables = [qml.PauliX(0), qml.PauliY(0)]

        H = qml.vqe.Hamiltonian(coeffs, observables)
        a, b = 0.54, 0.123
        params = np.array([a, b])

        cost = qml.ExpvalCost(ansatz, H, dev, interface=interface)
        dcost = qml.grad(cost, argnum=[0])
        res = dcost(params)

        expected = [
            -coeffs[0] * np.sin(a) * np.sin(b) - coeffs[1] * np.cos(a),
            coeffs[0] * np.cos(a) * np.cos(b),
        ]

        assert np.allclose(res, expected, atol=tol, rtol=0)
Example #21
0
    def test_multiple_devices(self, mocker):
        """Test that passing multiple devices to ExpvalCost works correctly"""

        dev = [
            qml.device("default.qubit", wires=2),
            qml.device("default.mixed", wires=2)
        ]
        spy = mocker.spy(DefaultQubit, "apply")
        spy2 = mocker.spy(DefaultMixed, "apply")

        obs = [qml.PauliZ(0), qml.PauliZ(1)]
        h = qml.Hamiltonian([1, 1], obs)

        qnodes = qml.ExpvalCost(qml.templates.BasicEntanglerLayers, h, dev)
        w = qml.init.basic_entangler_layers_uniform(3, 2, seed=1967)

        res = qnodes(w)

        spy.assert_called_once()
        spy2.assert_called_once()

        mapped = qml.map(qml.templates.BasicEntanglerLayers, obs, dev)
        exp = sum(mapped(w))

        assert np.allclose(res, exp)

        with pytest.warns(
                UserWarning,
                match="ExpvalCost was instantiated with multiple devices."):
            qnodes.metric_tensor([w])
Example #22
0
    def test_variance_error(self):
        """Test that an error is raised if attempting to use ExpvalCost to measure
        variances"""
        dev = qml.device("default.qubit", wires=4)
        hamiltonian = big_hamiltonian

        with pytest.raises(ValueError, match="sums of expectation values"):
            qml.ExpvalCost(qml.templates.StronglyEntanglingLayers, hamiltonian, dev, measure="var")
Example #23
0
    def test_multiple_devices_opt_true(self):
        """Test if a ValueError is raised when multiple devices are passed when optimize=True."""
        dev = [qml.device("default.qubit", wires=2), qml.device("default.qubit", wires=2)]

        h = qml.Hamiltonian([1, 1], [qml.PauliZ(0), qml.PauliZ(1)])

        with pytest.raises(ValueError, match="Using multiple devices is not supported when"):
            qml.ExpvalCost(qml.templates.StronglyEntanglingLayers, h, dev, optimize=True)
Example #24
0
def run_vqe(H):
    """Runs the variational quantum eigensolver on the problem Hamiltonian using the
    variational ansatz specified above.

    Fill in the missing parts between the # QHACK # markers below to run the VQE.

    Args:
        H (qml.Hamiltonian): The input Hamiltonian

    Returns:
        The ground state energy of the Hamiltonian.
    """
    energy = 0

    # QHACK #

    # Initialize the quantum device
    num_qubits = len(H.wires)
    dev = qml.device('default.qubit', wires=num_qubits)
    #print(H)

    # Randomly choose initial parameters (how many do you need?)
    params = np.random.uniform(low=-np.pi / 2, high=np.pi / 2,
                               size=(num_qubits - 1))

    # Set up a cost function
    cost_fn = qml.ExpvalCost(variational_ansatz, H, dev)

    # Set up an optimizer
    opt = qml.GradientDescentOptimizer(stepsize=0.01)

    # Run the VQE by iterating over many steps of the optimizer
    max_iterations = 500
    conv_tol = 1e-06

    for n in range(max_iterations):
        params, prev_energy = opt.step_and_cost(cost_fn, params)
        energy = cost_fn(params)
        conv = np.abs(energy - prev_energy)

        # # DEBUG PRINT
        # if n % 20 == 0:
        #     print('Iteration = {:},  Energy = {:.8f} Ha'.format(n, energy))
        #     print(params)

        if conv <= conv_tol:
            break

    # @qml.qnode(dev)
    # def circuit(params):
    #     variational_ansatz(params, H.wires)
    #     return qml.probs(H.wires)
    # print(circuit(params))

    # QHACK #

    # Return the ground state energy
    return energy
Example #25
0
def run_vqe(H):
    """Runs the variational quantum eigensolver on the problem Hamiltonian using the
    variational ansatz specified above.

    Fill in the missing parts between the # QHACK # markers below to run the VQE.

    Args:
        H (qml.Hamiltonian): The input Hamiltonian

    Returns:
        The ground state energy of the Hamiltonian.
    """
    # Initialize parameters
    num_qubits = len(H.wires)
    num_param_sets = (2**num_qubits) - 1
    params = np.random.uniform(low=-np.pi / 2,
                               high=np.pi / 2,
                               size=(num_param_sets, 3))

    energy = 0

    # QHACK #

    # Create a quantum device, set up a cost funtion and optimizer, and run the VQE.
    # (We recommend ~500 iterations to ensure convergence for this problem,
    # or you can design your own convergence criteria)

    # Device Creation
    dev = qml.device('default.qubit', wires=H.wires)

    # Cost Function. Using ExpvalCost useful to use with hamiltonians
    cost_fn = qml.ExpvalCost(variational_ansatz, H, dev)

    # Optimizer
    opt = qml.GradientDescentOptimizer(stepsize=0.1)

    # Optimization loop

    max_iterations = 800
    conv_tol = 1e-05

    for n in range(max_iterations):
        params, prev_energy = opt.step_and_cost(cost_fn, params)
        energy = cost_fn(params)
        conv = np.abs(energy - prev_energy)

        # if n % 20 == 0:
        #     print('Iteration = {:},  Energy = {:.8f} Ha'.format(n, energy))

        if conv <= conv_tol:
            break

    #

    # QHACK #

    # Return the ground state energy
    return energy
    def costs_ses(params):
        energy_cost = qml.ExpvalCost(variational_ansatz, H, dev)(params)
        state = dev.state
        gs_cost = abs(sum(a * np.conj(b)
                          for a, b in zip(state, ground_state)))**2
        fes_cost = abs(
            sum(a * np.conj(b) for a, b in zip(state, first_excited_state)))**2

        return energy_cost, 3 * gs_cost, 9 * fes_cost
Example #27
0
    def test_optimize(self, interface, tf_support, torch_support, shots):
        """Test that an ExpvalCost with observable optimization gives the same result as another
        ExpvalCost without observable optimization."""
        if interface == "tf" and not tf_support:
            pytest.skip("This test requires TensorFlow")
        if interface == "torch" and not torch_support:
            pytest.skip("This test requires Torch")

        dev = qml.device("default.qubit", wires=4, shots=shots)
        hamiltonian = big_hamiltonian

        cost = qml.ExpvalCost(
            qml.templates.StronglyEntanglingLayers,
            hamiltonian,
            dev,
            optimize=True,
            interface=interface,
            diff_method="parameter-shift",
        )
        cost2 = qml.ExpvalCost(
            qml.templates.StronglyEntanglingLayers,
            hamiltonian,
            dev,
            optimize=False,
            interface=interface,
            diff_method="parameter-shift",
        )

        np.random.seed(1967)
        shape = qml.templates.StronglyEntanglingLayers.shape(n_layers=2,
                                                             n_wires=4)
        w = np.random.random(shape)

        c1 = cost(w)
        exec_opt = dev.num_executions
        dev._num_executions = 0

        c2 = cost2(w)
        exec_no_opt = dev.num_executions

        assert exec_opt == 5  # Number of groups in the Hamiltonian
        assert exec_no_opt == 15

        assert np.allclose(c1, c2, atol=1e-1)
Example #28
0
def run_vqe(H):
    """Runs the variational quantum eigensolver on the problem Hamiltonian using the
    variational ansatz specified above.

    Fill in the missing parts between the # QHACK # markers below to run the VQE.

    Args:
        H (qml.Hamiltonian): The input Hamiltonian

    Returns:
        The ground state energy of the Hamiltonian.
    """
    # Initialize parameters
    num_qubits = len(H.wires)
    num_param_sets = (2**num_qubits) - 1
    #np.random.seed(0)
    params = np.random.uniform(low=-np.pi / 2,
                               high=np.pi / 2,
                               size=(num_param_sets, 3))
    #params = np.random.uniform(0, np.pi, size=(num_param_sets, 3))

    energy = 0

    # QHACK #

    # Create a quantum device, set up a cost funtion and optimizer, and run the VQE.
    # (We recommend ~500 iterations to ensure convergence for this problem,
    # or you can design your own convergence criteria)

    # QHACK #
    dev = qml.device('default.qubit', wires=num_qubits)

    # Minimize the circuit
    #opt = qml.GradientDescentOptimizer(stepsize=0.4)
    opt = qml.AdamOptimizer(stepsize=0.2, beta1=0.9, beta2=0.99, eps=1e-08)
    #opt = qml.MomentumOptimizer(stepsize=0.01, momentum=0.99)

    steps = 600

    cost_fn = qml.ExpvalCost(variational_ansatz, H, dev)

    for i in range(steps):

        params, prev_energy = opt.step_and_cost(cost_fn, params)
        energy = cost_fn(params)
        conv = np.abs(energy - prev_energy)

        #if i % 4 == 0:
        #print("Iteration = {:},  E = {:.8f} Ha".format(i, energy))

        if conv <= 1e-06:
            break

    # Return the ground state energy
    return energy
Example #29
0
    def test_all_interfaces_gradient_agree(self, tol):
        """Test the gradient agrees across all interfaces"""
        dev = qml.device("default.qubit", wires=2)

        coeffs = [0.2, 0.5]
        observables = [qml.PauliX(0) @ qml.PauliZ(1), qml.PauliY(0)]

        H = qml.Hamiltonian(coeffs, observables)

        np.random.seed(1)
        shape = qml.templates.StronglyEntanglingLayers.shape(3, 2)
        params = np.random.uniform(low=0, high=2 * np.pi, size=shape)

        # TensorFlow interface
        w = Variable(params)
        ansatz = qml.templates.layers.StronglyEntanglingLayers

        cost = qml.ExpvalCost(ansatz, H, dev, interface="tf")

        with tf.GradientTape() as tape:
            loss = cost(w)
            res_tf = np.array(tape.gradient(loss, w))

        # Torch interface
        w = torch.tensor(params, requires_grad=True)
        w = torch.autograd.Variable(w, requires_grad=True)
        ansatz = qml.templates.layers.StronglyEntanglingLayers

        cost = qml.ExpvalCost(ansatz, H, dev, interface="torch")
        loss = cost(w)
        loss.backward()
        res_torch = w.grad.numpy()

        # NumPy interface
        w = params
        ansatz = qml.templates.layers.StronglyEntanglingLayers
        cost = qml.ExpvalCost(ansatz, H, dev, interface="autograd")
        dcost = qml.grad(cost, argnum=[0])
        res = dcost(w)

        assert np.allclose(res, res_tf, atol=tol, rtol=0)
        assert np.allclose(res, res_torch, atol=tol, rtol=0)
Example #30
0
    def test_all_interfaces_gradient_agree(self, tol):
        """Test the gradient agrees across all interfaces"""
        dev = qml.device("default.qubit", wires=2)

        coeffs = [0.2, 0.5]
        observables = [qml.PauliX(0) @ qml.PauliZ(1), qml.PauliY(0)]

        H = qml.vqe.Hamiltonian(coeffs, observables)

        # TensorFlow interface
        params = Variable(
            qml.init.strong_ent_layers_normal(n_layers=3, n_wires=2, seed=1))
        ansatz = qml.templates.layers.StronglyEntanglingLayers

        cost = qml.ExpvalCost(ansatz, H, dev, interface="tf")

        with tf.GradientTape() as tape:
            loss = cost(params)
            res_tf = np.array(tape.gradient(loss, params))

        # Torch interface
        params = torch.tensor(
            qml.init.strong_ent_layers_normal(n_layers=3, n_wires=2, seed=1))
        params = torch.autograd.Variable(params, requires_grad=True)
        ansatz = qml.templates.layers.StronglyEntanglingLayers

        cost = qml.ExpvalCost(ansatz, H, dev, interface="torch")
        loss = cost(params)
        loss.backward()
        res_torch = params.grad.numpy()

        # NumPy interface
        params = qml.init.strong_ent_layers_normal(n_layers=3,
                                                   n_wires=2,
                                                   seed=1)
        ansatz = qml.templates.layers.StronglyEntanglingLayers
        cost = qml.ExpvalCost(ansatz, H, dev, interface="autograd")
        dcost = qml.grad(cost, argnum=[0])
        res = dcost(params)

        assert np.allclose(res, res_tf, atol=tol, rtol=0)
        assert np.allclose(res, res_torch, atol=tol, rtol=0)