Exemple #1
0
    def tensor(self):
        """Returns the full state vector as a tensor of shape ``(2 ** nqubits,)``.

        This is done by merging the state pieces to a single tensor.
        Using this method will double memory usage.
        """
        if self.qubits.list == list(range(self.nglobal)):
            with K.on_cpu():
                tensor = K.concatenate([x[K.newaxis] for x in self.pieces],
                                       axis=0)
                tensor = K.reshape(tensor, self.shapes["full"])
        elif self.qubits.list == list(range(self.nlocal, self.nqubits)):
            with K.on_cpu():
                tensor = K.concatenate([x[:, K.newaxis] for x in self.pieces],
                                       axis=1)
                tensor = K.reshape(tensor, self.shapes["full"])
        else:  # fall back to the transpose op
            with K.on_cpu():
                tensor = K.zeros(self.shapes["full"])
                tensor = K.transpose_state(self.pieces, tensor, self.nqubits,
                                           self.qubits.reverse_transpose_order)
        return tensor
Exemple #2
0
    def probabilities(self, qubits=None, measurement_gate=None):
        order = (tuple(sorted(qubits)) +
                 tuple(i for i in range(self.nqubits) if i not in qubits))
        order = order + tuple(i + self.nqubits for i in order)
        shape = 2 * (2**len(qubits), 2**(self.nqubits - len(qubits)))

        state = K.reshape(self.tensor, 2 * self.nqubits * (2, ))
        state = K.reshape(K.transpose(state, order), shape)
        state = K.einsum("abab->a", state)

        return K.reshape(K.cast(state, dtype='DTYPE'), len(qubits) * (2, ))
def test_measurementresult_frequencies(backend):
    import collections
    result = measurements.MeasurementResult((0, 1, 2))
    result.decimal = K.cast([0, 6, 5, 3, 5, 5, 6, 1, 1, 2, 4],
                            dtype='DTYPEINT')
    dfreqs = {0: 1, 1: 2, 2: 1, 3: 1, 4: 1, 5: 3, 6: 2}
    bfreqs = {
        "000": 1,
        "001": 2,
        "010": 1,
        "011": 1,
        "100": 1,
        "101": 3,
        "110": 2
    }
    assert result.frequencies(binary=True) == bfreqs
    assert result.frequencies(binary=False) == dfreqs
Exemple #4
0
 def _control_unitary(unitary):
     shape = tuple(unitary.shape)
     if not isinstance(unitary, K.Tensor):
         unitary = K.cast(unitary)
     if shape != (2, 2):
         raise_error(
             ValueError, "Cannot use ``_control_unitary`` method for "
             "input matrix of shape {}.".format(shape))
     zeros = K.zeros((2, 2), dtype='DTYPECPX')
     part1 = K.concatenate([K.eye(2, dtype='DTYPECPX'), zeros], axis=0)
     part2 = K.concatenate([zeros, unitary], axis=0)
     return K.concatenate([part1, part2], axis=1)
def test_distributed_state_getitem(backend, accelerators):
    from qibo import gates
    theta = np.random.random(4)
    dist_c = Circuit(4, accelerators)
    dist_c.add((gates.RX(i, theta=theta[i]) for i in range(4)))
    state = dist_c()
    c = Circuit(4)
    c.add((gates.RX(i, theta=theta[i]) for i in range(4)))
    target_state = K.to_numpy(c())

    # Check indexing
    state_vector = np.array([state[i] for i in range(2 ** 4)])
    K.assert_allclose(state_vector, target_state)
    # Check slicing
    K.assert_allclose(state[:], target_state)
    K.assert_allclose(state[2:5], target_state[2:5])
    # Check list indexing
    ids = [2, 4, 6]
    target_state = [target_state[i] for i in ids]
    K.assert_allclose(state[ids], target_state)
    # Check error
    with pytest.raises(TypeError):
        state["a"]
Exemple #6
0
    def minimize(self, initial_state, method='Powell', options=None, compile=True):
        """Search for parameters which minimizes the hamiltonian expectation.

        Args:
            initial_state (array): a initial guess for the parameters of the
                variational circuit.
            method (str): the desired minimization method.
                One of ``"cma"`` (genetic optimizer), ``"sgd"`` (gradient descent) or
                any of the methods supported by `scipy.optimize.minimize <https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html>`_.
            options (dict): a dictionary with options for the different optimizers.

        Return:
            The final expectation value.
            The corresponding best parameters.
        """
        def loss(params):
            self.circuit.set_parameters(params)
            final_state = self.circuit()
            return self.hamiltonian.expectation(final_state)

        if compile:
            if not self.circuit.using_tfgates:
                raise_error(RuntimeError, "Cannot compile VQE that uses custom operators. "
                                          "Set the compile flag to False.")
            from qibo import K
            loss = K.function(loss)

        if method == 'sgd':
            # check if gates are using the MatmulEinsum backend
            from qibo.tensorflow.gates import TensorflowGate
            for gate in self.circuit.queue:
                if not isinstance(gate, TensorflowGate):
                    raise_error(RuntimeError, 'SGD VQE requires native Tensorflow '
                                              'gates because gradients are not '
                                              'supported in the custom kernels.')

            result, parameters = self.optimizers.optimize(loss, initial_state,
                                                          "sgd", options,
                                                          compile)
        else:
            result, parameters = self.optimizers.optimize(
                lambda p: loss(p).numpy(), initial_state, method, options)

        self.circuit.set_parameters(parameters)
        return result, parameters
Exemple #7
0
def test_pauli_error(backend, density_matrix, nshots):

    pauli = PauliError(0, 0.2, 0.3)
    noise = NoiseModel()
    noise.add(pauli, gates.X, 1)
    noise.add(pauli, gates.CNOT)
    noise.add(pauli, gates.Z, (0,1))

    circuit = Circuit(3, density_matrix=density_matrix)
    circuit.add(gates.CNOT(0,1))
    circuit.add(gates.Z(1))
    circuit.add(gates.X(1))
    circuit.add(gates.X(2))
    circuit.add(gates.Z(2))
    circuit.add(gates.M(0, 1, 2))

    target_circuit = Circuit(3, density_matrix=density_matrix)
    target_circuit.add(gates.CNOT(0,1))
    target_circuit.add(gates.PauliNoiseChannel(0, 0, 0.2, 0.3))
    target_circuit.add(gates.PauliNoiseChannel(1, 0, 0.2, 0.3))
    target_circuit.add(gates.Z(1))
    target_circuit.add(gates.PauliNoiseChannel(1, 0, 0.2, 0.3))
    target_circuit.add(gates.X(1))
    target_circuit.add(gates.PauliNoiseChannel(1, 0, 0.2, 0.3))
    target_circuit.add(gates.X(2))
    target_circuit.add(gates.Z(2))
    target_circuit.add(gates.M(0, 1, 2))

    initial_psi = random_density_matrix(3) if density_matrix else random_state(3)
    np.random.seed(123)
    K.set_seed(123)
    final_state = noise.apply(circuit)(initial_state=np.copy(initial_psi), nshots=nshots)
    final_state_samples = final_state.samples() if nshots else None
    np.random.seed(123)
    K.set_seed(123)
    target_final_state = target_circuit(initial_state=np.copy(initial_psi), nshots=nshots)
    target_final_state_samples = target_final_state.samples() if nshots else None

    if nshots is None:
        K.assert_allclose(final_state, target_final_state)
    else:
        K.assert_allclose(final_state_samples, target_final_state_samples)
Exemple #8
0
def test_set_precision(backend, precision):
    original_backend = backends.get_backend()
    original_precision = backends.get_precision()
    backends.set_backend(backend)
    backends.set_precision(precision)
    if precision == "single":
        expected_dtype = K.backend.complex64
    else:
        expected_dtype = K.backend.complex128
    assert backends.get_precision() == precision
    assert K.dtypes('DTYPECPX') == expected_dtype
    # Test that circuits use proper precision
    circuit = models.Circuit(2)
    circuit.add([gates.H(0), gates.H(1)])
    final_state = circuit()
    assert final_state.dtype == expected_dtype
    backends.set_precision(original_precision)
    backends.set_backend(original_backend)
Exemple #9
0
def test_qgan_custom_discriminator():
    if not K.check_availability("tensorflow"):  # pragma: no cover
        pytest.skip("Skipping StyleQGAN test because tensorflow backend is not available.")

    from tensorflow.keras.models import Sequential  # pylint: disable=E0611,E0401
    from tensorflow.keras.layers import Dense  # pylint: disable=E0611,E0401
    original_backend = qibo.get_backend()
    qibo.set_backend("tensorflow")
    reference_distribution = generate_distribution(10)
    # use wrong number of qubits so that we capture the error
    nqubits = reference_distribution.shape[1] + 1
    discriminator = Sequential()
    discriminator.add(Dense(200, use_bias=False, input_dim=nqubits))
    discriminator.add(Dense(1, activation='sigmoid'))
    qgan = models.StyleQGAN(latent_dim=2, layers=1, discriminator=discriminator)
    with pytest.raises(ValueError):
        qgan.fit(reference_distribution, n_epochs=1, save=False)
    qibo.set_backend(original_backend)
def test_entropy_numerical(backend):
    """Check that entropy calculation does not fail for tiny eigenvalues."""
    original_backend = qibo.get_backend()
    qibo.set_backend(backend)
    from qibo import K
    eigvals = np.array([
        -1e-10, -1e-15, -2e-17, -1e-18, -5e-60, 1e-48, 4e-32, 5e-14, 1e-14,
        9.9e-13, 9e-13, 5e-13, 1e-13, 1e-12, 1e-11, 1e-10, 1e-9, 1e-7, 1, 4, 10
    ])
    rho = K.cast(np.diag(eigvals))
    callback = callbacks.EntanglementEntropy()
    result = callback.entropy(rho)

    mask = eigvals > 0
    target = -(eigvals[mask] * np.log2(eigvals[mask])).sum()

    np.testing.assert_allclose(result, target)
    qibo.set_backend(original_backend)
Exemple #11
0
    def _repeated_execute(self, nreps, initial_state=None):
        results = []
        for _ in range(nreps):
            state = self._device_execute(initial_state)
            if self.measurement_gate is None:
                results.append(state.tensor)
            else:
                state.measure(self.measurement_gate, nshots=1)
                results.append(state.measurements[0])
                del (state)

        results = K.stack(results, axis=0)
        if self.measurement_gate is None:
            return results
        state = self.state_cls(self.nqubits)
        state.set_measurements(self.measurement_gate.qubits, results,
                               self.measurement_tuples)
        return state
Exemple #12
0
    def _special_gate_execute(self, state, gate: Union["BackendGate"]):
        """Executes special gates on ``memory_device``.

        Currently special gates are ``Flatten`` or ``CallbackGate``.
        This method calculates the full state vector because special gates
        are not implemented for state pieces.
        """
        with K.device(self.memory_device):
            # Reverse all global SWAPs that happened so far
            self._revert_swaps(state, reversed(gate.swap_reset))
            full_state = state.tensor
            if isinstance(gate, gates.CallbackGate):
                gate(full_state)
            else:
                full_state = gate(full_state)
                state.assign_pieces(full_state)
            # Redo all global SWAPs that happened so far
            self._revert_swaps(state, gate.swap_reset)
Exemple #13
0
def test_vector_state_state_deepcopy(deep):
    """Check if deep copy is really deep."""
    # use numpy backend as tensorflow tensors are immutable and cannot
    # change their value for testing
    import qibo
    original_backend = qibo.get_backend()
    qibo.set_backend("numpy")
    vector = np.random.random(32) + 1j * np.random.random(32)
    vector = vector / np.sqrt((np.abs(vector) ** 2).sum())
    state = states.VectorState.from_tensor(vector)
    cstate = state.copy(deep)
    current_value = state.tensor[0]
    state.tensor[0] = 0
    if deep:
        K.assert_allclose(state.tensor[0], 0)
        K.assert_allclose(cstate.tensor[0], current_value)
        K.assert_allclose(cstate.tensor[1:], state.tensor[1:])
    else:
        K.assert_allclose(cstate.tensor, state.tensor)
    qibo.set_backend(original_backend)
Exemple #14
0
def apply_gates(gatelist, nqubits=None, initial_state=None):
    from qibo import K
    if initial_state is None:
        state = K.qnp.zeros(2 ** nqubits)
        state[0] = 1
    elif isinstance(initial_state, np.ndarray):
        state = np.copy(initial_state)
        if nqubits is None:
            nqubits = int(np.log2(len(state)))
        else: # pragma: no cover
            assert nqubits == int(np.log2(len(state)))
    else: # pragma: no cover
        raise_error(TypeError, "Invalid initial state type {}."
                               "".format(type(initial_state)))

    state = K.cast(state)
    for gate in gatelist:
        state = gate(state)
    return state
def test_reset_channel_repeated(backend):
    initial_state = random_state(5)
    c = Circuit(5)
    c.add(gates.ResetChannel(2, p0=0.3, p1=0.3, seed=123))
    final_state = c(K.cast(np.copy(initial_state)), nshots=30)

    np.random.seed(123)
    target_state = []
    collapse = gates.M(2, collapse=True)
    collapse.nqubits = 5
    xgate = gates.X(2)
    for _ in range(30):
        state = K.cast(np.copy(initial_state))
        if np.random.random() < 0.3:
            state = K.state_vector_collapse(collapse, state, [0])
        if np.random.random() < 0.3:
            state = K.state_vector_collapse(collapse, state, [0])
            state = xgate(state)
        target_state.append(K.copy(state))
    target_state = K.stack(target_state)
    K.assert_allclose(final_state, target_state)
Exemple #16
0
def test_adiabatic_evolution_execute_rk(backend, solver, dense, dt):
    """Test adiabatic evolution with Runge-Kutta solver."""
    h0 = hamiltonians.X(3, dense=dense)
    h1 = hamiltonians.TFIM(3, dense=dense)

    target_psi = [np.ones(8) / np.sqrt(8)]
    ham = lambda t: h0 * (1 - t) + h1 * t
    for n in range(int(1 / dt)):
        prop = K.to_numpy(ham(n * dt).exp(dt))
        target_psi.append(prop.dot(target_psi[-1]))

    checker = TimeStepChecker(target_psi, atol=dt)
    adev = models.AdiabaticEvolution(h0,
                                     h1,
                                     lambda t: t,
                                     dt,
                                     solver="rk4",
                                     callbacks=[checker])
    final_psi = adev(final_time=1, initial_state=np.copy(target_psi[0]))
Exemple #17
0
def test_general_channel(backend):
    a1 = np.sqrt(0.4) * np.array([[0, 1], [1, 0]])
    a2 = np.sqrt(0.6) * np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1],
                                  [0, 0, 1, 0]])
    a1, a2 = K.cast(a1), K.cast(a2)
    initial_rho = random_density_matrix(2)
    gate = gates.KrausChannel([((1, ), a1), ((0, 1), a2)])
    assert gate.target_qubits == (0, 1)
    final_rho = gate(np.copy(initial_rho))
    m1 = np.kron(np.eye(2), K.to_numpy(a1))
    m2 = K.to_numpy(a2)
    target_rho = (m1.dot(initial_rho).dot(m1.conj().T) +
                  m2.dot(initial_rho).dot(m2.conj().T))
    K.assert_allclose(final_rho, target_rho)
Exemple #18
0
    def _normalize(self, state):
        """Normalizes state by summing the norms of each state piece.

        To be used after ``Collapse`` gates because normalization should be
        applied collectively and not in each piece seperately.
        The full calculation happens on CPU. (may not be efficient)
        """
        total_norm = 0
        with K.device(self.memory_device):
            for piece in state.pieces:
                total_norm += K.sum(K.square(K.abs(piece)))
            total_norm = K.cast(K.sqrt(total_norm), dtype=state.dtype)
            for piece in state.pieces:
                piece.assign(piece / total_norm)
Exemple #19
0
 def __mul__(self, o):
     if isinstance(o, K.tensor_types):
         o = complex(o)
     elif not isinstance(o, K.numeric_types):
         raise_error(
             NotImplementedError, "Hamiltonian multiplication to {} "
             "not implemented.".format(type(o)))
     new_matrix = self.matrix * o
     r = self.__class__(self.nqubits, new_matrix)
     if self._eigenvalues is not None:
         if K.qnp.cast(o).real >= 0:
             r._eigenvalues = o * self._eigenvalues
         elif not K.issparse(self.matrix):
             r._eigenvalues = o * self._eigenvalues[::-1]
     if self._eigenvectors is not None:
         if K.qnp.cast(o).real > 0:
             r._eigenvectors = self._eigenvectors
         elif o == 0:
             r._eigenvectors = self.eye(int(self._eigenvectors.shape[0]))
     return r
Exemple #20
0
def test_set_backend(backend):
    """Check ``set_backend`` for switching gate backends."""
    original_backend = backends.get_backend()
    backends.set_backend(backend)
    if backend == "defaulteinsum":
        target_name = "tensorflow_defaulteinsum"
    elif backend == "matmuleinsum":
        target_name = "tensorflow_matmuleinsum"
    else:
        target_name = backend
    assert K.name == target_name
    assert str(K) == target_name
    assert repr(K) == target_name
    assert K.executing_eagerly()
    h = gates.H(0)
    if backend == "custom":
        assert K.custom_einsum is None
        assert h.gate_op
    else:
        assert h.gate_op is None
    backends.set_backend(original_backend)
def test_measurement_result_parameters_multiple_qubits(backend):
    initial_state = random_state(4)
    K.set_seed(123)
    c = models.Circuit(4)
    output = c.add(gates.M(0, 1, 2, collapse=True))
    c.add(gates.RY(1, theta=np.pi * output[0] / 5))
    c.add(gates.RX(3, theta=np.pi * output[2] / 3))
    result = c(initial_state=np.copy(initial_state))

    K.set_seed(123)
    collapse = gates.M(0, 1, 2, collapse=True)
    target_state = collapse(K.cast(np.copy(initial_state)))
    # not including in coverage because outcomes are probabilistic and may
    # not occur for the CI run
    if int(collapse.result.outcome(0)):  # pragma: no cover
        target_state = gates.RY(1, theta=np.pi / 5)(target_state)
    if int(collapse.result.outcome(2)):  # pragma: no cover
        target_state = gates.RX(3, theta=np.pi / 3)(target_state)
    K.assert_allclose(result, target_state)
Exemple #22
0
def test_trotterized_adiabatic_evolution(backend, accelerators, nqubits, dt):
    """Test adiabatic evolution using Trotterization."""
    dense_h0 = hamiltonians.X(nqubits)
    dense_h1 = hamiltonians.TFIM(nqubits)

    target_psi = [np.ones(2**nqubits) / np.sqrt(2**nqubits)]
    ham = lambda t: dense_h0 * (1 - t) + dense_h1 * t
    for n in range(int(1 / dt)):
        prop = K.to_numpy(ham(n * dt).exp(dt))
        target_psi.append(prop.dot(target_psi[-1]))

    local_h0 = hamiltonians.X(nqubits, dense=False)
    local_h1 = hamiltonians.TFIM(nqubits, dense=False)
    checker = TimeStepChecker(target_psi, atol=dt)
    adev = models.AdiabaticEvolution(local_h0,
                                     local_h1,
                                     lambda t: t,
                                     dt,
                                     callbacks=[checker],
                                     accelerators=accelerators)
    final_psi = adev(final_time=1)
Exemple #23
0
 def _custom_density_matrix_call(self, state):
     state = K._density_matrix_half_call(self, state)
     matrix = K.conj(K.matrices.Y)
     shape = state.shape
     state = K.reshape(state, (K.np.prod(shape), ))
     original_targets = tuple(self.target_qubits)
     self._target_qubits = self.cache.target_qubits_dm
     self._nqubits *= 2
     self.gate_op = K.op.apply_gate
     self._custom_op_matrix = K.conj(K.matrices.Y)
     state = K.state_vector_matrix_call(self, state)
     self._custom_op_matrix = K.matrices.Y
     self.gate_op = K.op.apply_y
     self._nqubits //= 2
     self._target_qubits = original_targets
     return K.reshape(state, shape)
Exemple #24
0
 def _custom_density_matrix_call(self, state):
     state = K._density_matrix_half_call(self, state)
     matrix = K.conj(K.matrices.Y)
     shape = state.shape
     state = K.reshape(state, (K.np.prod(shape), ))
     original_targets = tuple(self.target_qubits)
     self._target_qubits = self.cache.target_qubits_dm
     self._nqubits *= 2
     self.name = "Unitary"  # change name temporarily so that ``apply_gate`` op is used
     self._custom_op_matrix = K.conj(K.matrices.Y)
     state = K.state_vector_matrix_call(self, state)
     self._custom_op_matrix = K.matrices.Y
     self.name = "y"
     self._nqubits //= 2
     self._target_qubits = original_targets
     return K.reshape(state, shape)
def test_measurementregistersresult_frequencies(backend):
    probs = np.random.random(16)
    probs = K.cast(probs / np.sum(probs), dtype='DTYPE')
    result = measurements.MeasurementResult((0, 1, 2, 3),
                                            probs,
                                            nshots=1000000)
    frequencies = result.frequencies()
    qubits = {"a": (0, 1), "b": (2, 3)}
    result = measurements.MeasurementRegistersResult(qubits, result)
    register_frequencies = result.frequencies(registers=True)
    assert register_frequencies.keys() == qubits.keys()
    rkeys = ["00", "01", "10", "11"]
    target_frequencies_a = {
        k: sum(frequencies[f"{k}{l}"] for l in rkeys)
        for k in rkeys
    }
    target_frequencies_b = {
        k: sum(frequencies[f"{l}{k}"] for l in rkeys)
        for k in rkeys
    }
    assert register_frequencies["a"] == target_frequencies_a
    assert register_frequencies["b"] == target_frequencies_b
def test_backend_sparse_eigh(tested_backend, target_backend, sparse_type):
    if tested_backend == "tensorflow":
        pytest.skip("Temporary skip.")
    tested_backend = K.construct_backend(tested_backend)
    target_backend = K.construct_backend(target_backend)
    from scipy import sparse
    from qibo import hamiltonians
    ham = hamiltonians.TFIM(6, h=1.0)
    m = getattr(sparse, f"{sparse_type}_matrix")(K.to_numpy(ham.matrix))
    eigvals1, eigvecs1 = tested_backend.eigh(tested_backend.cast(m))
    eigvals2, eigvecs2 = target_backend.eigh(target_backend.cast(m))
    eigvals1 = sorted(K.to_numpy(eigvals1))
    eigvals2 = sorted(K.to_numpy(eigvals2))
    tested_backend.assert_allclose(eigvals1, eigvals2)

    eigvals1 = tested_backend.eigvalsh(tested_backend.cast(m))
    eigvals2 = target_backend.eigvalsh(target_backend.cast(m))
    eigvals1 = sorted(K.to_numpy(eigvals1))
    eigvals2 = sorted(K.to_numpy(eigvals2))
    tested_backend.assert_allclose(eigvals1, eigvals2)
Exemple #27
0
def test_trotter_hamiltonian_matmul(nqubits, normalize):
    """Test Trotter Hamiltonian expectation value."""
    local_ham = hamiltonians.TFIM(nqubits, h=1.0, dense=False)
    dense_ham = hamiltonians.TFIM(nqubits, h=1.0)

    state = K.cast(random_complex((2 ** nqubits,)))
    trotter_ev = local_ham.expectation(state, normalize)
    target_ev = dense_ham.expectation(state, normalize)
    K.assert_allclose(trotter_ev, target_ev)

    state = random_complex((2 ** nqubits,))
    trotter_ev = local_ham.expectation(state, normalize)
    target_ev = dense_ham.expectation(state, normalize)
    K.assert_allclose(trotter_ev, target_ev)

    from qibo.core.states import VectorState
    state = VectorState.from_tensor(state)
    trotter_matmul = local_ham @ state
    target_matmul = dense_ham @ state
    K.assert_allclose(trotter_matmul, target_matmul)
Exemple #28
0
    def minimize(self,
                 initial_parameters,
                 method="BFGS",
                 options=None,
                 messages=False):
        """Optimize the free parameters of the scheduling function.

        Args:
            initial_parameters (np.ndarray): Initial guess for the variational
                parameters that are optimized.
                The last element of the given array should correspond to the
                guess for the total evolution time T.
            method (str): The desired minimization method.
                One of ``"cma"`` (genetic optimizer), ``"sgd"`` (gradient descent) or
                any of the methods supported by
                `scipy.optimize.minimize <https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html>`_.
            options (dict): a dictionary with options for the different optimizers.
            messages (bool): If ``True`` the loss evolution is shown during
                optimization.
        """
        self.opt_messages = messages
        if method == "sgd":
            loss = self._loss
        else:
            loss = lambda p, ae, h1, msg, hist: K.to_numpy(
                self._loss(p, ae, h1, msg, hist))

        args = (self, self.hamiltonian.h1, self.opt_messages, self.opt_history)
        result, parameters, extra = optimizers.optimize(loss,
                                                        initial_parameters,
                                                        args=args,
                                                        method=method,
                                                        options=options)
        if isinstance(parameters, K.tensor_types) and not len(
                parameters.shape):  # pragma: no cover
            # some optimizers like ``Powell`` return number instead of list
            parameters = [parameters]
        self.set_parameters(parameters)
        return result, parameters, extra
Exemple #29
0
def test_hamiltonian_eigenvalues(dtype, dense):
    """Testing hamiltonian eigenvalues scaling."""
    H1 = hamiltonians.XXZ(nqubits=2, delta=0.5, dense=dense)

    H1_eigen = H1.eigenvalues()
    hH1_eigen = np.linalg.eigvalsh(H1.matrix)
    K.assert_allclose(H1_eigen, hH1_eigen)

    c1 = dtype(2.5)
    H2 = c1 * H1
    hH2_eigen = np.linalg.eigvalsh(c1 * H1.matrix)
    K.assert_allclose(H2._eigenvalues, hH2_eigen)

    c2 = dtype(-11.1)
    H3 = H1 * c2
    hH3_eigen = np.linalg.eigvalsh(H1.matrix * c2)
    K.assert_allclose(H3._eigenvalues, hH3_eigen)
Exemple #30
0
 def state_vector_call(self, state):
     state = K.reshape(state, self.tensor_shape)
     if self.is_controlled_by:
         ncontrol = len(self.control_qubits)
         nactive = self.nqubits - ncontrol
         state = K.transpose(state, self.control_cache.order(False))
         # Apply `einsum` only to the part of the state where all controls
         # are active. This should be `state[-1]`
         state = K.reshape(state, (2**ncontrol, ) + nactive * (2, ))
         updates = self.einsum(self.calculation_cache.vector, state[-1],
                               self.matrix)
         # Concatenate the updated part of the state `updates` with the
         # part of of the state that remained unaffected `state[:-1]`.
         state = K.concatenate([state[:-1], updates[K.newaxis]], axis=0)
         state = K.reshape(state, self.nqubits * (2, ))
         # Put qubit indices back to their proper places
         state = K.transpose(state, self.control_cache.reverse(False))
     else:
         einsum_str = self.calculation_cache.vector
         state = self.einsum(einsum_str, state, self.matrix)
     return K.reshape(state, self.flat_shape)