Ejemplo n.º 1
0
def main(nqubits, layers, maxsteps, T_max):
    circuit = models.Circuit(nqubits)
    for l in range(layers):
        circuit.add((gates.RY(q, theta=0) for q in range(nqubits)))
        circuit.add((gates.CZ(q, q + 1) for q in range(0, nqubits - 1, 2)))
        circuit.add((gates.RY(q, theta=0) for q in range(nqubits)))
        circuit.add((gates.CZ(q, q + 1) for q in range(1, nqubits - 2, 2)))
        circuit.add(gates.CZ(0, nqubits - 1))
    circuit.add((gates.RY(q, theta=0) for q in range(nqubits)))
    problem_hamiltonian = hamiltonians.XXZ(nqubits)
    easy_hamiltonian = hamiltonians.X(nqubits)
    s = lambda t: t
    aavqe = models.variational.AAVQE(circuit, easy_hamiltonian, problem_hamiltonian, s, nsteps=maxsteps, t_max=T_max)

    initial_parameters = np.random.uniform(0, 2 * np.pi*0.1,
                                           2 * nqubits * layers + nqubits)
    best, params = aavqe.minimize(initial_parameters)

    print('Final parameters: ', params)
    print('Final energy: ', best)

    #We compute the difference from the exact value to check performance
    eigenvalue = problem_hamiltonian.eigenvalues()
    print('Difference from exact value: ',best - K.real(eigenvalue[0]))
    print('Log difference: ',-np.log10(best - K.real(eigenvalue[0])))
Ejemplo n.º 2
0
    def state_vector_call(self, state):
        if self.evolution is None:
            raise_error(
                ValueError, "Gap callback can only be used in "
                "adiabatic evolution models.")
        hamiltonian = self.evolution.hamiltonian()
        # Call the eigenvectors so that they are cached for the ``exp`` call
        hamiltonian.eigenvectors()
        eigvals = hamiltonian.eigenvalues()
        if isinstance(self.mode, int):
            return K.real(eigvals[self.mode])

        # case: self.mode == "gap"
        excited = 1
        gap = K.real(eigvals[excited] - eigvals[0])
        if not self.check_degenerate:
            return gap

        while K.less(gap, EIGVAL_CUTOFF):
            gap = K.real(eigvals[excited] - eigvals[0])
            excited += 1
        if excited > 1:
            log.warning("The Hamiltonian is degenerate. Using eigenvalue {} "
                        "to calculate gap.".format(excited))
        return gap
Ejemplo n.º 3
0
 def expectation(self, hamiltonian, normalize=False):
     from qibo.abstractions.hamiltonians import TrotterHamiltonian
     if isinstance(hamiltonian, TrotterHamiltonian):
         # use dense form of Trotter Hamiltonians because their
         # multiplication to rank-2 tensors is not implemented
         hamiltonian = hamiltonian.dense
     ev = K.real(K.trace(hamiltonian @ self.tensor))
     if normalize:
         norm = K.real(K.trace(self.tensor))
         ev = ev / norm
     return ev
Ejemplo n.º 4
0
def test_gap(backend, dense, check_degenerate):
    from qibo import hamiltonians
    h0 = hamiltonians.X(4, dense=dense)
    if check_degenerate:
        # use h=0 to make this Hamiltonian degenerate
        h1 = hamiltonians.TFIM(4, h=0, dense=dense)
    else:
        h1 = hamiltonians.TFIM(4, h=1, dense=dense)

    ham = lambda t: (1 - t) * h0.matrix + t * h1.matrix
    targets = {"ground": [], "excited": [], "gap": []}
    for t in np.linspace(0, 1, 11):
        eigvals = K.real(K.eigvalsh(ham(t)))
        targets["ground"].append(eigvals[0])
        targets["excited"].append(eigvals[1])
        targets["gap"].append(eigvals[1] - eigvals[0])
    if check_degenerate:
        targets["gap"][-1] = eigvals[3] - eigvals[0]

    gap = callbacks.Gap(check_degenerate=check_degenerate)
    ground = callbacks.Gap(0)
    excited = callbacks.Gap(1)
    evolution = AdiabaticEvolution(h0,
                                   h1,
                                   lambda t: t,
                                   dt=1e-1,
                                   callbacks=[gap, ground, excited])
    final_state = evolution(final_time=1.0)
    targets = {k: K.stack(v) for k, v in targets.items()}
    K.assert_allclose(ground[:], targets["ground"])
    K.assert_allclose(excited[:], targets["excited"])
    K.assert_allclose(gap[:], targets["gap"])
Ejemplo n.º 5
0
def test_variable_backpropagation(backend):
    if backend == "custom":
        pytest.skip("Custom gates do not support automatic differentiation.")
    original_backend = qibo.get_backend()
    qibo.set_backend(backend)
    if K.name != "tensorflow":
        qibo.set_backend(original_backend)
        pytest.skip("Backpropagation is not supported by {}.".format(K.name))

    theta = K.optimization.Variable(0.1234, dtype=K.dtypes('DTYPE'))
    # TODO: Fix parametrized gates so that `Circuit` can be defined outside
    # of the gradient tape
    with K.optimization.GradientTape() as tape:
        c = Circuit(1)
        c.add(gates.X(0))
        c.add(gates.RZ(0, theta))
        loss = K.real(c()[-1])
    grad = tape.gradient(loss, theta)

    target_loss = np.cos(theta / 2.0)
    np.testing.assert_allclose(loss, target_loss)

    target_grad = -np.sin(theta / 2.0) / 2.0
    np.testing.assert_allclose(grad, target_grad)
    qibo.set_backend(original_backend)
Ejemplo n.º 6
0
    def cost_function(params, count):
        """Evaluates the cost function to be minimized.

        Args:
            params (array or list): values of the parameters.

        Returns:
            Value of the cost function.
        """
        circuit = models.Circuit(nqubits)
        for l in range(layers):
            for q in range(nqubits):
                circuit.add(gates.RY(q, theta=0))
            for q in range(0, nqubits - 1, 2):
                circuit.add(gates.CZ(q, q + 1))
            for q in range(nqubits):
                circuit.add(gates.RY(q, theta=0))
            for q in range(1, nqubits - 2, 2):
                circuit.add(gates.CZ(q, q + 1))
            circuit.add(gates.CZ(0, nqubits - 1))
        for q in range(nqubits):
            circuit.add(gates.RY(q, theta=0))

        cost = 0
        circuit.set_parameters(
            params)  # this will change all thetas to the appropriate values
        for i in range(len(ising_groundstates)):
            final_state = circuit(np.copy(ising_groundstates[i]))
            cost += K.real(encoder.expectation(final_state))

        if count[0] % 50 == 0:
            print(count[0], cost / len(ising_groundstates))
        count[0] += 1

        return K.to_numpy(cost) / len(ising_groundstates)
def test_two_variables_backpropagation(backend):
    if backend == "custom":
        pytest.skip("Custom gates do not support automatic differentiation.")
    original_backend = qibo.get_backend()
    qibo.set_backend(backend)
    if "numpy" in K.name:
        qibo.set_backend(original_backend)
        pytest.skip("Backpropagation is not supported by {}.".format(K.name))

    theta = K.optimization.Variable([0.1234, 0.4321], dtype=K.dtypes('DTYPE'))
    # TODO: Fix parametrized gates so that `Circuit` can be defined outside
    # of the gradient tape
    with K.optimization.GradientTape() as tape:
        c = Circuit(2)
        c.add(gates.RX(0, theta[0]))
        c.add(gates.RY(1, theta[1]))
        loss = K.real(c()[0])
    grad = tape.gradient(loss, theta)

    t = np.array([0.1234, 0.4321]) / 2.0
    target_loss = np.cos(t[0]) * np.cos(t[1])
    np.testing.assert_allclose(loss, target_loss)

    target_grad1 = -np.sin(t[0]) * np.cos(t[1])
    target_grad2 = -np.cos(t[0]) * np.sin(t[1])
    target_grad = np.array([target_grad1, target_grad2]) / 2.0
    np.testing.assert_allclose(grad, target_grad)
    qibo.set_backend(original_backend)
Ejemplo n.º 8
0
 def expectation(self, hamiltonian, normalize=False):
     statec = K.conj(self.tensor)
     hstate = hamiltonian @ self.tensor
     ev = K.real(K.sum(statec * hstate))
     if normalize:
         norm = K.sum(K.square(K.abs(self.tensor)))
         ev = ev / norm
     return ev
Ejemplo n.º 9
0
 def entropy(self, rho):
     """Calculates entropy of a density matrix via exact diagonalization."""
     # Diagonalize
     eigvals = K.real(K.eigvalsh(rho))
     # Treating zero and negative eigenvalues
     drop_condition = eigvals > EIGVAL_CUTOFF
     masked_eigvals = K.gather(eigvals, condition=drop_condition)
     spectrum = -1 * K.log(masked_eigvals)
     if self.compute_spectrum:
         self.spectrum.append(spectrum)
     entropy = K.sum(masked_eigvals * spectrum)
     return entropy / self._log2
Ejemplo n.º 10
0
def test_variable_backpropagation(backend):
    if not K.supports_gradients:
        pytest.skip("Backpropagation is not supported by {}.".format(K.name))

    theta = K.optimization.Variable(0.1234, dtype=K.dtypes('DTYPE'))
    # TODO: Fix parametrized gates so that `Circuit` can be defined outside
    # of the gradient tape
    with K.optimization.GradientTape() as tape:
        c = Circuit(1)
        c.add(gates.X(0))
        c.add(gates.RZ(0, theta))
        loss = K.real(c()[-1])
    grad = tape.gradient(loss, theta)

    target_loss = np.cos(theta / 2.0)
    K.assert_allclose(loss, target_loss)

    target_grad = -np.sin(theta / 2.0) / 2.0
    K.assert_allclose(grad, target_grad)
Ejemplo n.º 11
0
def test_two_variables_backpropagation(backend):
    if not K.supports_gradients:
        pytest.skip("Backpropagation is not supported by {}.".format(K.name))

    theta = K.optimization.Variable([0.1234, 0.4321], dtype=K.dtypes('DTYPE'))
    # TODO: Fix parametrized gates so that `Circuit` can be defined outside
    # of the gradient tape
    with K.optimization.GradientTape() as tape:
        c = Circuit(2)
        c.add(gates.RX(0, theta[0]))
        c.add(gates.RY(1, theta[1]))
        loss = K.real(c()[0])
    grad = tape.gradient(loss, theta)

    t = np.array([0.1234, 0.4321]) / 2.0
    target_loss = np.cos(t[0]) * np.cos(t[1])
    K.assert_allclose(loss, target_loss)

    target_grad1 = -np.sin(t[0]) * np.cos(t[1])
    target_grad2 = -np.cos(t[0]) * np.sin(t[1])
    target_grad = np.array([target_grad1, target_grad2]) / 2.0
    K.assert_allclose(grad, target_grad)
Ejemplo n.º 12
0
 def expectation(self, hamiltonian, normalize=False):
     ev = K.real(K.trace(hamiltonian @ self.tensor))
     if normalize:
         norm = K.real(K.trace(self.tensor))
         ev = ev / norm
     return ev