def model_cost(params, E_A, E_B, E_C, E_D):
    """Compute the model cost for relative parameters and given function data.
    Args:
        params (array[float]): Relative parameters at which to evaluate the model.
        E_A (float): Coefficients E^(A) in the model.
        E_B (array[float]): Coefficients E^(B) in the model.
        E_C (array[float]): Coefficients E^(C) in the model.
        E_D (array[float]): Coefficients E^(D) in the model.
            The lower left triangular part and diagonal must be 0.
    Returns:
        cost (float): The model cost at the relative parameters.
    """
    A = np.prod(np.cos(0.5 * params)**2)
    # For the other terms we only compute the prefactor relative to A
    B_over_A = 2 * np.tan(0.5 * params)
    C_over_A = B_over_A**2 / 2
    D_over_A = np.outer(B_over_A, B_over_A)
    all_terms_over_A = [
        E_A,
        np.dot(E_B, B_over_A),
        np.dot(E_C, C_over_A),
        np.dot(B_over_A, E_D @ B_over_A),
    ]
    cost = A * np.sum(all_terms_over_A)

    return cost
Exemple #2
0
 def test_adjoint_cv_ops(self, op, size, tol):
     op_d = op.adjoint()
     op_heis = op._heisenberg_rep(op.parameters)
     op_d_heis = op_d._heisenberg_rep(op_d.parameters)
     res1 = np.dot(op_heis, op_d_heis)
     res2 = np.dot(op_d_heis, op_heis)
     np_testing.assert_allclose(res1, np.eye(size), atol=tol)
     np_testing.assert_allclose(res2, np.eye(size), atol=tol)
     assert op.wires == op_d.wires
Exemple #3
0
    def test_dot_product_qnodes_qnodes(self, qnodes, interface, tf_support,
                                       torch_support):
        """Test that the dot product of qnodes.qnodes can be applied using all interfaces"""
        if interface == "torch" and not torch_support:
            pytest.skip("Skipped, no torch support")

        if interface == "tf" and not tf_support:
            pytest.skip("Skipped, no tf support")

        qnode1, qnode2 = qnodes
        qc1 = qml.QNodeCollection([qnode1, qnode2])
        qc2 = qml.QNodeCollection([qnode1, qnode2])

        # test the dot product of qnodes, qnodes
        cost = qml.dot(qc1, qc2)

        params = [0.5643, -0.45]
        res = cost(params)

        qc1val = qc1(params)
        qc2val = qc2(params)

        if interface in ("tf", "torch"):
            res = res.numpy()
            qc1val = qc1val.numpy()
            qc2val = qc2val.numpy()

        expected = np.dot(qc1val, qc2val)
        assert np.all(res == expected)
 def fubini_calculate():
     for i in range(6):
         for j in range(6):
             params[i] += np.pi / 2
             params[j] += np.pi / 2
             plusplus = np.abs(np.dot(base, fubini_qnode(params)))**2
             params[j] -= np.pi
             plusminus = np.abs(np.dot(base, fubini_qnode(params)))**2
             params[i] -= np.pi
             minusminus = np.abs(np.dot(base, fubini_qnode(params)))**2
             params[j] += np.pi
             minusplus = np.abs(np.dot(base, fubini_qnode(params)))**2
             fubini[i, j] = (-plusplus - minusminus + plusminus +
                             minusplus) / 8
             params[i] += np.pi / 2
             params[j] -= np.pi / 2
Exemple #5
0
    def test_dot_product_qnodes_tensor(self, qnodes, interface, tf_support,
                                       torch_support):
        """Test that the dot product of qnodes.tensor can be applied using all interfaces"""
        if interface == "torch" and not torch_support:
            pytest.skip("Skipped, no torch support")

        if interface == "tf" and not tf_support:
            pytest.skip("Skipped, no tf support")

        qnode1, _ = qnodes
        qc = qml.QNodeCollection([qnode1])
        coeffs = [0.5, -0.1]

        if interface == "torch":
            coeffs = torch.tensor(coeffs, dtype=torch.float64)

        if interface == "tf":
            coeffs = tf.cast(coeffs, dtype=tf.float64)

        # test the dot product of qnodes, tensor
        cost = qml.dot(qc, coeffs)

        params = [0.5643, -0.45]
        res = cost(params)
        qcval = qc(params)

        if interface in ("tf", "torch"):
            res = res.numpy()
            qcval = qcval.numpy()
            coeffs = coeffs.numpy()

        expected = np.dot(qcval, coeffs)
        assert np.all(res == expected)
Exemple #6
0
def square_loss(labels, predictions):
    term = 0
    for l, p in zip(labels, predictions):
        lnorm = l / np.linalg.norm(l)
        pnorm = p / np.linalg.norm(p)

        term = term + np.abs(np.dot(lnorm, pnorm.T))**2

    return 1 - term / len(labels)
Exemple #7
0
    def dot(x, y):
        x, y = AutogradBox.unbox_list([x, y])

        if x.ndim == 0 and y.ndim == 0:
            return x * y

        if x.ndim == 2 and y.ndim == 2:
            return x @ y

        return np.dot(x, y)
Exemple #8
0
 def get_F(qnode, params):
     for i in range(6):
         for j in range(6):
             params[i] += s
             params[j] += s
             pp_state = qnode(params)
             pp = np.abs(np.dot(bra_state, pp_state))**2
             params[j] -= 2 * s
             pm_state = qnode(params)
             pm = np.abs(np.dot(bra_state, pm_state))**2
             params[i] -= 2 * s
             params[j] += 2 * s
             mp_state = qnode(params)
             mp = np.abs(np.dot(bra_state, mp_state))**2
             params[j] -= 2 * s
             mm_state = qnode(params)
             mm = np.abs(np.dot(bra_state, mm_state))**2
             F[i, j] = (-pp + pm + mp - mm) / 8
             params[i] += s
             params[j] += s
def fidelity(state1, state2):
    """
    Calculates the fidelity between two state vectors

    Args:
        state1 (array[float]): State vector representation
        state2 (array[float]): State vector representation

    Returns:
        float: fidelity between `state1` and `state2`
    """
    return np.abs(np.dot(np.conj(state1), state2))
Exemple #10
0
    def wn_cost_fcn(params, circuit, ang_array, actual):
        '''
        use MAE to start
        '''
        w = params[:, -1]

        theta = params[:, :-1]
        #print(w.shape,w,theta.shape,theta)
        predictions = np.asarray([
            2. * (1.0 / (1.0 + exp(np.dot(-w, circuit(theta, features=x))))) -
            1. for x in ang_array
        ])
        return mse(actual, predictions)
Exemple #11
0
    def fubu_mat(params,orig, i,j):
        shifted=params.copy()
        u_i = np.zeros(len(params))
        u_i[i]=1
        u_j =np.zeros(len(params)) 
        u_j[j]=1

        pp_1=prob(shifted+np.pi/2*(u_i+u_j))
        p_1=np.dot(orig,pp_1.conjugate())
        p_1= p_1.real**2 + p_1.imag**2

        pp_2=prob(shifted+np.pi/2*(u_i-u_j))
        p_2=np.dot(orig,pp_2.conjugate())
        p_2= p_2.real**2 + p_2.imag**2

        pp_3 =prob(shifted+np.pi/2*(-u_i+u_j))
        p_3=np.dot(orig,pp_3.conjugate())
        p_3= p_3.real**2 + p_3.imag**2
        
        pp_4 =prob(shifted-np.pi/2*(u_i+u_j))
        p_4=np.dot(orig,pp_4.conjugate())
        p_4= p_4.real**2 + p_4.imag**2
        
        return 0.125 * ( -p_1 +p_2 +p_3-p_4)
Exemple #12
0
        def orbital(x, y, z):
            r"""Evaluate a basis function at a given position.

            Args:
                x (float): x component of the position
                y (float): y component of the position
                z (float): z component of the position

            Returns:
                array[float]: value of a basis function
            """
            c = ((x - r[0])**lx) * ((y - r[1])**ly) * ((z - r[2])**lz)
            e = [
                np.exp(-a * ((x - r[0])**2 + (y - r[1])**2 + (z - r[2])**2))
                for a in alpha
            ]
            return c * np.dot(coeff, e)
Exemple #13
0
def make_predictions(circuit, pre_trained_vals, X, Y, **kwargs):
    """

    Args:
      circuit:
      pre_trained_vals:
      X:
      Y:
      **kwargs:

    Returns:

    """

    if kwargs['readout_layer'] == 'one_hot':
        var = pre_trained_vals

    elif kwargs['readout_layer'] == "weighted_neuron":
        var = pre_trained_vals

    # make final predictions
    if kwargs['readout_layer'] == 'one_hot':
        final_predictions = np.stack([circuit(var, x) for x in X])
        acc = ohe_accuracy(Y, predictions)

    elif kwargs['readout_layer'] == 'weighted_neuron':
        from autograd.numpy import exp
        n = kwargs.get('nqubits')
        w = var[:, -1]
        theta = var[:, :-1].numpy()
        final_predictions = [
            int(
                np.round(
                    2. *
                    (1.0 /
                     (1.0 + exp(np.dot(-w, circuit(theta, features=x))))) - 1.,
                    1)) for x in X
        ]
        acc = wn_accuracy(Y, predictions)

    return final_predictions, acc
Exemple #14
0
    def wn_cost_fcn(params, circuit, ang_array, actual):
        """use MAE to start

        Args:
          params:
          circuit:
          ang_array:
          actual:

        Returns:

        """
        w = params[:, -1]

        theta = params[:, :-1]
        print(w.shape, w, theta.shape, theta)
        predictions = np.asarray([
            2. * (1.0 / (1.0 + exp(np.dot(-w, circuit(theta, x))))) - 1.
            for x in ang_array
        ])
        return mse(actual, predictions)
Exemple #15
0
def natural_gradient(params):
    """Calculate the natural gradient of the qnode() cost function.

    The code you write for this challenge should be completely contained within this function
    between the # QHACK # comment markers.

    You should evaluate the metric tensor and the gradient of the QNode, and then combine these
    together using the natural gradient definition. The natural gradient should be returned as a
    NumPy array.

    The metric tensor should be evaluated using the equation provided in the problem text. Hint:
    you will need to define a new QNode that returns the quantum state before measurement.

    Args:
        params (np.ndarray): Input parameters, of dimension 6

    Returns:
        np.ndarray: The natural gradient evaluated at the input parameters, of dimension 6
    """

    def fubu_mat(params,orig, i,j):
        shifted=params.copy()
        u_i = np.zeros(len(params))
        u_i[i]=1
        u_j =np.zeros(len(params)) 
        u_j[j]=1

        pp_1=prob(shifted+np.pi/2*(u_i+u_j))
        p_1=np.dot(orig,pp_1.conjugate())
        p_1= p_1.real**2 + p_1.imag**2

        pp_2=prob(shifted+np.pi/2*(u_i-u_j))
        p_2=np.dot(orig,pp_2.conjugate())
        p_2= p_2.real**2 + p_2.imag**2

        pp_3 =prob(shifted+np.pi/2*(-u_i+u_j))
        p_3=np.dot(orig,pp_3.conjugate())
        p_3= p_3.real**2 + p_3.imag**2
        
        pp_4 =prob(shifted-np.pi/2*(u_i+u_j))
        p_4=np.dot(orig,pp_4.conjugate())
        p_4= p_4.real**2 + p_4.imag**2
        
        return 0.125 * ( -p_1 +p_2 +p_3-p_4)

    natural_grad = np.zeros(6)
    # gradient = np.zeros([6], dtype=np.float64)

    # for i in range(len(params)):
    #     f,b=parameter_shift_term(params, i)
    #     gradient[i] = 0.5 *(f-b)

    gradient =qml.grad(qnode, argnum=0)(params)
    # print(qml.metric_tensor(qnode,diag_approx=True)(params))
    f_mat = np.zeros([6, 6], dtype=np.float64)
    orig=prob(params)
    for i in range(len(params)):
        for j in range(len(params)):
            f_mat[i,j]=fubu_mat(params,orig,i,j)

    f_inv= np.linalg.pinv(f_mat)
    for i in range(len(params)):
        natural_grad[i]=np.dot(f_inv[i],gradient)

    # QHACK #

    # QHACK #

    return natural_grad
    ax1.set_title("Original data", pad=20)
    if len(A[0]) == 2:
        ax1.scatter(A[:, 0], A[:, 1], c='r')
        ax1.scatter(B[:, 0], B[:, 1], c='b')
        ax1.set_ylim((-2, 2))
    elif len(A[0]) == 1:
        ax1.scatter(A[:, 0], np.zeros(len(A)), c='r')
        ax1.scatter(B[:, 0], np.zeros(len(B)), c='b')
        ax1.set_ylim((-0.1, 0.1))

    ax1.set_xlim((-2, 2))
    # Plotting 2: gram matrix in original space
    ax5 = fig.add_subplot(2, 3, 4)
    ax5.grid(False)
    X_normal = [x / len(x) for x in X]
    gram_original = [[np.dot(x1, x2) for x1 in X_normal] for x2 in X_normal]
    ax5.matshow(gram_original, cmap='Greys')
    # Plotting 3: untrained gram matrix in Hilbert space
    ax6 = fig.add_subplot(2, 3, 5)
    ax6.set_title("Step 0", pad=20)
    ax6.grid(False)
    gram_before = [[circuit(init_pars, x1=x1, x2=x2) for x1 in X] for x2 in X]
    cax1 = ax6.matshow(gram_before, vmin=0, vmax=1)
    # Plotting 4: trained gram matrix in Hilbert space
    ax7 = fig.add_subplot(2, 3, 6)
    ax7.set_title("Step " + str(n_steps), pad=20)
    ax7.grid(False)
    gram_after = [[circuit(pars, x1=x1, x2=x2) for x1 in X] for x2 in X]
    cax2 = ax7.matshow(gram_after, vmin=0, vmax=1)
    # Plotting 5: cost
    if len(cst_history) > 0:
A_2 = np.kron(np.kron(X, Id), Id)

A_num = c[0] * A_0 + c[1] * A_1 + c[2] * A_2
b = np.ones(8) / np.sqrt(8)

##############################################################################
# We can print the explicit values of :math:`A` and :math:`b`:

print("A = \n", A_num)
print("b = \n", b)

##############################################################################
# The solution can be computed via a matrix inversion:

A_inv = np.linalg.inv(A_num)
x = np.dot(A_inv, b)

##############################################################################
# Finally, in order to compare :math:`x` with the quantum state :math:`|x\rangle`,
# we normalize and square its elements.
c_probs = (x / np.linalg.norm(x))**2

##############################################################################
# Preparation of the quantum solution
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

##############################################################################
# Given the variational weights ``w`` that we have previously optimized,
# we can generate the quantum state :math:`|x\rangle`. By measuring :math:`|x\rangle`
# in the computational basis we can estimate the probability of each basis state.
#
Exemple #18
0
def train_best(circuit, pre_trained_vals, X_train, Y_train, batch_size,
               learning_rate, **kwargs):
    """train a circuit classifier

    Args:
      circuit(qml.QNode): A circuit that you want to train
      parameter_shape: A tuple describing the shape of the parameters. The first entry is the number of qubits,
      parameter_shape: A tuple describing the shape of the parameters. The first entry is the number of qubits,
    the second one is the number of layers in the circuit architecture.
      X_train(np.ndarray): An array of floats of size (M, n) to be used as training data.
      Y_train(np.ndarray): An array of size (M,) which are the categorical labels
    associated to the training data.
      batch_size(int): Batch size for the circuit training.
      learning_rate(float): The learning rate/step size of the optimizer.
      kwargs: Hyperparameters for the training (passed as keyword arguments). There are the following hyperparameters:
    nsteps (int) : Number of training steps.
    optim (pennylane.optimize instance): Optimizer used during the training of the circuit.
    Pass as qml.OptimizerName.
    Tmax (list): Maximum point T as defined in https://arxiv.org/abs/2010.08512. (Definition 8)
    The first element is the maximum number of parameters among all architectures,
    the second is the maximum inference time among all architectures in terms of computing time,
    the third one is the maximum inference time among all architectures in terms of the number of CNOTS
    in the circuit
    rate_type (string): Determines the type of error rate in the W-coefficient.
    If rate_type == 'accuracy', the inference time of the circuit
    is equal to the time it takes to evaluate the accuracy of the trained circuit with
    respect to a validation batch three times the size of the training batch size and
    the error rate is equal to 1-accuracy (w.r.t. to a validation batch).
    If rate_type == 'accuracy', the inference time of the circuit is equal to the time
    it takes to train the circuit (for nsteps training steps) and compute the cost at
    each step and the error rate is equal to the cost after nsteps training steps.
      pre_trained_vals:
      **kwargs:

    Returns:
      Yprime: final predictions, final accuracy

    """
    from autograd.numpy import exp

    def ohe_cost_fcn(params, circuit, ang_array, actual):
        """use MAE to start

        Args:
          params:
          circuit:
          ang_array:
          actual:

        Returns:

        """
        predictions = (np.stack([circuit(params, x)
                                 for x in ang_array]) + 1) * 0.5
        return mse(actual, predictions)

    def wn_cost_fcn(params, circuit, ang_array, actual):
        """use MAE to start

        Args:
          params:
          circuit:
          ang_array:
          actual:

        Returns:

        """
        w = params[:, -1]

        theta = params[:, :-1]
        print(w.shape, w, theta.shape, theta)
        predictions = np.asarray([
            2. * (1.0 / (1.0 + exp(np.dot(-w, circuit(theta, x))))) - 1.
            for x in ang_array
        ])
        return mse(actual, predictions)

    if kwargs['readout_layer'] == 'one_hot':
        var = pre_trained_vals
    elif kwargs['readout_layer'] == "weighted_neuron":
        var = pre_trained_vals
    rate_type = kwargs['rate_type']
    optim = kwargs['optim']
    num_train = len(Y_train)
    validation_size = int(0.1 * num_train)
    opt = optim(
        stepsize=learning_rate
    )  #all optimizers in autograd module take in argument stepsize, so this works for all

    for _ in range(kwargs['nsteps']):
        batch_index = np.random.randint(0, num_train, (batch_size, ))
        X_train_batch = np.asarray(X_train[batch_index])
        Y_train_batch = np.asarray(Y_train[batch_index])

        if kwargs['readout_layer'] == 'one_hot':
            var, cost = opt.step_and_cost(
                lambda v: ohe_cost_fcn(v, circuit, X_train_batch, Y_train_batch
                                       ), var)
        elif kwargs['readout_layer'] == 'weighted_neuron':
            print(var)
            var, cost = opt.step_and_cost(
                lambda v: wn_cost_fcn(v, circuit, X_train_batch, Y_train_batch
                                      ), var)
        print(_, cost)
        # check for early stopping
        if _ % 5 == 0:
            validation_batch = np.random.randint(0, num_train,
                                                 (validation_size, ))
            X_validation_batch = np.asarray(X_train[validation_batch])
            Y_validation_batch = np.asarray(Y_train[validation_batch])
            if kwargs['rate_type'] == 'accuracy':
                if kwargs['readout_layer'] == 'one_hot':
                    predictions = np.stack(
                        [circuit(var, x) for x in X_validation_batch])
                    acc = ohe_accuracy(Y_validation_batch, predictions)
                elif kwargs['readout_layer'] == 'weighted_neuron':
                    n = kwargs.get('nqubits')
                    w = var[:, -1]
                    theta = var[:, :-1].numpy()
                    predictions = [
                        int(
                            np.round(
                                2. *
                                (1.0 /
                                 (1.0 + exp(np.dot(-w, circuit(theta, x))))) -
                                1., 1)) for x in X_validation_batch
                    ]
                    acc = wn_accuracy(Y_validation_batch, predictions)
                if acc > 0.95:
                    break

            elif kwargs['rate_type'] == 'batch_cost':
                if cost < 0.001:
                    break
    # make final predictions
    if kwargs['readout_layer'] == 'one_hot':
        final_predictions = np.stack([circuit(var, x) for x in X_train])
    elif kwargs['readout_layer'] == 'weighted_neuron':
        n = kwargs.get('nqubits')
        w = var[:, -1]
        theta = var[:, :-1]
        final_predictions = [
            int(
                np.round(
                    2. * (1.0 / (1.0 + exp(np.dot(-w, circuit(theta, x))))) -
                    1., 1)) for x in X_train
        ]
    return var, final_predictions
def grover_operator(indices):

    # DO NOT MODIFY anything in this code block

    return np.dot(diffusion_matrix(), oracle_matrix(indices))
def natural_gradient(params):
    """Calculate the natural gradient of the qnode() cost function.

    The code you write for this challenge should be completely contained within this function
    between the # QHACK # comment markers.

    You should evaluate the metric tensor and the gradient of the QNode, and then combine these
    together using the natural gradient definition. The natural gradient should be returned as a
    NumPy array.

    The metric tensor should be evaluated using the equation provided in the problem text. Hint:
    you will need to define a new QNode that returns the quantum state before measurement.

    Args:
        params (np.ndarray): Input parameters, of dimension 6

    Returns:
        np.ndarray: The natural gradient evaluated at the input parameters, of dimension 6
    """

    natural_grad = np.zeros(6)

    # QHACK #
    import warnings
    warnings.filterwarnings('ignore')

    fsm = np.zeros((6, 6))
    qnode(params)
    default = np.array(dev.state).conj().T
    weight_copy = np.copy(params)
    for i in range(6):
        for j in range(6):
            weight_copy[i] += np.pi / 2
            weight_copy[j] += np.pi / 2
            qnode(weight_copy)
            plus = dev.state
            weight_copy[j] -= np.pi
            qnode(weight_copy)
            minus_1 = dev.state
            weight_copy[i] -= np.pi
            weight_copy[j] += np.pi
            qnode(weight_copy)
            minus_2 = dev.state
            weight_copy[j] -= np.pi
            qnode(weight_copy)
            minus_3 = dev.state
            fsm[i][j] = 1/8 * (-np.dot(default, plus) * np.dot(default, plus).conj() + np.dot(default, minus_1) * np.dot(default, minus_1).conj() + \
                np.dot(default, minus_2) * np.dot(default, minus_2).conj() - np.dot(default, minus_3) * np.dot(default, minus_3).conj())
            weight_copy[i] = params[i]
            weight_copy[j] = params[j]

    weights = params
    s = np.pi / 2
    gradient = np.zeros(6)
    weight_copy = np.copy(weights)
    for i in range(len(weights)):
        weight_copy[i] += s
        plus = qnode(weight_copy)
        weight_copy[i] -= (2 * s)
        minus = qnode(weight_copy)
        gradient[i] = (plus - minus) / (2 * np.sin(s))
        weight_copy[i] = weights[i]

    f_minus = np.linalg.inv(fsm)
    natural_grad = f_minus @ gradient

    # QHACK #

    return natural_grad
def HNode(params):  # Evaluate the QNodeCollection
    return np.dot(coeffs, qnodes(params, size=SIZE, layers=LAYERS))
Exemple #22
0
 def Hamiltonian(params):
     return np.dot(coeffs, qnodes(params, size=SIZE, layers=LAYERS))
def natural_gradient(params):
    """Calculate the natural gradient of the qnode() cost function.

    The code you write for this challenge should be completely contained within this function
    between the # QHACK # comment markers.

    You should evaluate the metric tensor and the gradient of the QNode, and then combine these
    together using the natural gradient definition. The natural gradient should be returned as a
    NumPy array.

    The metric tensor should be evaluated using the equation provided in the problem text. Hint:
    you will need to define a new QNode that returns the quantum state before measurement.

    Args:
        params (np.ndarray): Input parameters, of dimension 6

    Returns:
        np.ndarray: The natural gradient evaluated at the input parameters, of dimension 6
    """

    natural_grad = np.zeros(6)

    # QHACK #
    grad_func = qml.grad(qnode)
    gradient = grad_func(params)[0]
    block_diag_mt = qml.metric_tensor(qnode)(params)
    approx_nat_gradient = np.dot(np.linalg.pinv(block_diag_mt), gradient)

    # print(gradient)
    # print(np.round(block_diag_mt, 8))
    # print(approx_nat_gradient)

    def second_oder_parameter_shift(params, i, j, shift_i, shift_j):
        shifted = params.copy()
        shifted[np.unravel_index(i, shifted.shape)] += shift_i
        shifted[np.unravel_index(j, shifted.shape)] += shift_j
        return shifted

    @qml.qnode(dev)
    def overlap(params, shifted_params):
        variational_circuit(shifted_params)
        qml.inv(qml.template(variational_circuit)(params))
        #obs = qml.expval(qml.PauliZ(0) @ qml.PauliZ(1) @ qml.PauliZ(2))
        return qml.probs([0, 1, 2])

    # print('\n', overlap(params, params)[0])

    f_matrix = np.zeros([6, 6], dtype=np.float64)
    shift = 0.5 * np.pi

    # print('\n', overlap(params, second_oder_parameter_shift(params, 1, 1, shift, shift))[0])

    for i in range(len(gradient)):
        for j in range(i, len(gradient)):
            pp = overlap(
                params, second_oder_parameter_shift(params, i, j, shift,
                                                    shift))[0]
            mp = overlap(
                params, second_oder_parameter_shift(params, i, j, -shift,
                                                    shift))[0]
            pm = overlap(
                params, second_oder_parameter_shift(params, i, j, shift,
                                                    -shift))[0]
            mm = overlap(
                params,
                second_oder_parameter_shift(params, i, j, -shift, -shift))[0]
            fij = (-pp + mp + pm - mm) / 8.0
            f_matrix[i, j] = fij
            f_matrix[j, i] = fij

    # for i in range(len(gradient)):
    #     shifted = params.copy()
    #     shifted[np.unravel_index(i, shifted.shape)] += shift
    #     fii_prob = 1 - 0.5 * (overlap(params, shifted) + 1.0)
    #     f_matrix[i, i] = fii_prob - 1

    # print(np.round(f_matrix, 8))
    natural_grad = np.dot(np.linalg.pinv(f_matrix), gradient)

    # QHACK #

    return natural_grad
Exemple #24
0
def train_circuit(circuit, parameter_shape, X_train, Y_train, batch_size,
                  learning_rate, **kwargs):
    """
    train a circuit classifier
    Args:
        circuit (qml.QNode): A circuit that you want to train
        parameter_shape: A tuple describing the shape of the parameters. The first entry is the number of qubits,
        the second one is the number of layers in the circuit architecture.
        X_train (np.ndarray): An array of floats of size (M, n) to be used as training data.
        Y_train (np.ndarray): An array of size (M,) which are the categorical labels
            associated to the training data.

        batch_size (int): Batch size for the circuit training.

        learning_rate (float): The learning rate/step size of the optimizer.

        kwargs: Hyperparameters for the training (passed as keyword arguments). There are the following hyperparameters:

            nsteps (int) : Number of training steps.

            optim (pennylane.optimize instance): Optimizer used during the training of the circuit.
                Pass as qml.OptimizerName.

            Tmax (list): Maximum point T as defined in https://arxiv.org/abs/2010.08512. (Definition 8)
                    The first element is the maximum number of parameters among all architectures,
                    the second is the maximum inference time among all architectures in terms of computing time,
                    the third one is the maximum inference time among all architectures in terms of the number of CNOTS
                    in the circuit

            rate_type (string): Determines the type of error rate in the W-coefficient.
                    If rate_type == 'accuracy', the inference time of the circuit
                    is equal to the time it takes to evaluate the accuracy of the trained circuit with
                    respect to a validation batch three times the size of the training batch size and
                    the error rate is equal to 1-accuracy (w.r.t. to a validation batch).

                    If rate_type == 'accuracy', the inference time of the circuit is equal to the time
                    it takes to train the circuit (for nsteps training steps) and compute the cost at
                    each step and the error rate is equal to the cost after nsteps training steps.






    Returns:
        (W_,weights): W-coefficient, trained weights
    """

    #print('batch_size',batch_size)
    # fix the seed while debugging
    #np.random.seed(1337)
    def ohe_cost_fcn(params, circuit, ang_array, actual):
        '''
        use MAE to start
        '''
        predictions = (np.stack([circuit(params, x)
                                 for x in ang_array]) + 1) * 0.5
        return mse(actual, predictions)

    def wn_cost_fcn(params, circuit, ang_array, actual):
        '''
        use MAE to start
        '''
        w = params[:, -1]

        theta = params[:, :-1]
        #print(w.shape,w,theta.shape,theta)
        predictions = np.asarray([
            2. * (1.0 / (1.0 + exp(np.dot(-w, circuit(theta, features=x))))) -
            1. for x in ang_array
        ])
        return mse(actual, predictions)

    if kwargs['readout_layer'] == 'one_hot':
        var = np.zeros(parameter_shape)
    elif kwargs['readout_layer'] == "weighted_neuron":
        var = np.hstack(
            (np.zeros(parameter_shape), np.random.random(
                (kwargs['nqubits'], 1)) - 0.5))
    rate_type = kwargs['rate_type']
    inf_time = kwargs['inf_time']
    optim = kwargs['optim']
    numcnots = kwargs['numcnots']

    Tmax = kwargs[
        'Tmax']  #Tmax[0] is maximum parameter size, Tmax[1] maximum inftime (timeit),Tmax[2] maximum number of entangling gates
    num_train = len(Y_train)
    validation_size = int(0.1 * num_train)
    opt = optim(
        stepsize=learning_rate
    )  #all optimizers in autograd module take in argument stepsize, so this works for all
    start = time.time()
    for _ in range(kwargs['nsteps']):
        batch_index = np.random.randint(0, num_train, (batch_size, ))
        X_train_batch = np.asarray(X_train[batch_index])
        Y_train_batch = np.asarray(Y_train[batch_index])
        if kwargs['readout_layer'] == 'one_hot':
            var, cost = opt.step_and_cost(
                lambda v: ohe_cost_fcn(v, circuit, X_train_batch, Y_train_batch
                                       ), var)
        elif kwargs['readout_layer'] == 'weighted_neuron':
            var, cost = opt.step_and_cost(
                lambda v: wn_cost_fcn(v, circuit, X_train_batch, Y_train_batch
                                      ), var)
    end = time.time()
    cost_time = (end - start)

    if kwargs['rate_type'] == 'accuracy':
        validation_batch = np.random.randint(0, num_train, (validation_size, ))
        X_validation_batch = np.asarray(X_train[validation_batch])
        Y_validation_batch = np.asarray(Y_train[validation_batch])
        start = time.time()  # add in timeit function from Wbranch
        if kwargs['readout_layer'] == 'one_hot':
            predictions = np.stack(
                [circuit(var, x) for x in X_validation_batch])
        elif kwargs['readout_layer'] == 'weighted_neuron':
            n = kwargs.get('nqubits')
            w = var[:, -1]
            theta = var[:, :-1]
            predictions = [
                int(
                    np.round(
                        2. *
                        (1.0 /
                         (1.0 + exp(np.dot(-w, circuit(theta, features=x))))) -
                        1., 1)) for x in X_validation_batch
            ]
        end = time.time()
        inftime = (end - start) / len(X_validation_batch)
        if kwargs['readout_layer'] == 'one_hot':
            err_rate = (
                1.0 - ohe_accuracy(Y_validation_batch, predictions)
            ) + 10**-7  #add small epsilon to prevent divide by 0 errors
            #print('error rate:',err_rate)
            #print('weights: ',var)
        elif kwargs['readout_layer'] == 'weighted_neuron':
            err_rate = (
                1.0 - wn_accuracy(Y_validation_batch, predictions)
            ) + 10**-7  #add small epsilon to prevent divide by 0 errors
            #print('error rate:',err_rate)
            #print('weights: ',var)
    elif kwargs['rate_type'] == 'batch_cost':
        err_rate = (
            cost) + 10**-7  #add small epsilon to prevent divide by 0 errors
        #print('error rate:',err_rate)
        #print('weights: ',var)
        inftime = cost_time
    # QHACK #

    if kwargs['inf_time'] == 'timeit':

        W_ = np.abs((Tmax[0] - len(var)) / (Tmax[0])) * np.abs(
            (Tmax[1] - inftime) / (Tmax[1])) * (1. / err_rate)

    elif kwargs['inf_time'] == 'numcnots':
        nc_ = numcnots
        W_ = np.abs((Tmax[0] - len(var)) / (Tmax[0])) * np.abs(
            (Tmax[2] - nc_) / (Tmax[2])) * (1. / err_rate)

    return W_, var
 def HNode(params):
     return np.dot(coeffs, qnodes(params, size=SIZE, layers=LAYERS))
Exemple #26
0
##############################################################################
# We're not quite there yet; we have only calculated the probabilities of the variational circuit
# rotated into the shared eigenbasis---the :math:`|\langle \phi_n |\psi\rangle|^2`. To recover the
# *expectation values* of the two QWC observables from the probabilities, recall that we need one
# final piece of information: their eigenvalues :math:`\lambda_{A, n}` and :math:`\lambda_{B, n}`.
#
# We know that the single-qubit Pauli operators each have eigenvalues :math:`(1, -1)`, while the identity
# operator has eigenvalues :math:`(1, 1)`. We can make use of ``np.kron`` to quickly
# generate the eigenvalues of the full Pauli terms, making sure that the order
# of the eigenvalues in the Kronecker product corresponds to the tensor product.

eigenvalues_XYI = np.kron(np.kron([1, -1], [1, -1]), [1, 1])
eigenvalues_XIZ = np.kron(np.kron([1, -1], [1, 1]), [1, -1])

# Taking the linear combination of the eigenvalues and the probabilities
print("Expectation value of XYI = ", np.dot(eigenvalues_XYI, rotated_probs))
print("Expectation value of XIZ = ", np.dot(eigenvalues_XIZ, rotated_probs))

##############################################################################
# Compare this to the result when we used two circuit evaluations. We have successfully used a
# single circuit evaluation to recover both expectation values!
#
# Luckily, PennyLane automatically performs this QWC grouping under the hood. We simply
# return the two QWC Pauli terms from the QNode:


@qml.qnode(dev)
def circuit(weights):
    qml.templates.StronglyEntanglingLayers(weights, wires=range(3))
    return [
        qml.expval(qml.PauliX(0) @ qml.PauliY(1)),
Exemple #27
0
def natural_gradient(params):
    """Calculate the natural gradient of the qnode() cost function.

    The code you write for this challenge should be completely contained within this function
    between the # QHACK # comment markers.

    You should evaluate the metric tensor and the gradient of the QNode, and then combine these
    together using the natural gradient definition. The natural gradient should be returned as a
    NumPy array.

    The metric tensor should be evaluated using the equation provided in the problem text. Hint:
    you will need to define a new QNode that returns the quantum state before measurement.

    Args:
        params (np.ndarray): Input parameters, of dimension 6

    Returns:
        np.ndarray: The natural gradient evaluated at the input parameters, of dimension 6
    """

    natural_grad = np.zeros(6)

    gradient = np.zeros([natural_grad.shape[0]])
    fim = np.zeros([natural_grad.shape[0], natural_grad.shape[0]])

    eps = np.pi / 2

    for k in range(gradient.shape[0]):
        eps_plus = params.copy()
        eps_plus[k] += eps
        exp_value_plus = qnode(eps_plus)

        eps_minus = params.copy()
        eps_minus[k] -= eps
        exp_value_minus = qnode(eps_minus)

        gradient[k] = (exp_value_plus - exp_value_minus) / (2 * np.sin(eps))

    eps = np.pi / 2

    qnode(params)
    state = dev.state
    for k in range(natural_grad.shape[0]):
        for l in range(gradient.shape[0]):
            if l <= k:
                eps_pp = params.copy()
                eps_pp[k] += eps
                eps_pp[l] += eps

                eps_pm = params.copy()
                eps_pm[k] += eps
                eps_pm[l] -= eps

                eps_mp = params.copy()
                eps_mp[k] -= eps
                eps_mp[l] += eps

                eps_mm = params.copy()
                eps_mm[k] -= eps
                eps_mm[l] -= eps

                qnode(eps_pp)
                state_pp = dev.state
                measure_pp = np.abs(np.conjugate(state) @ state_pp)

                qnode(eps_pm)
                state_pm = dev.state
                measure_pm = np.abs(np.conjugate(state) @ state_pm)

                qnode(eps_mp)
                state_mp = dev.state
                measure_mp = np.abs(np.conjugate(state) @ state_mp)

                qnode(eps_mm)
                state_mm = dev.state
                measure_mm = np.abs(np.conjugate(state) @ state_mm)

                fim[k, l] = (-measure_pp**2 - measure_mm**2 + measure_mp**2 +
                             measure_pm**2) / 8

    for k in range(natural_grad.shape[0]):
        for l in range(natural_grad.shape[0]):
            if l > k:
                fim[k, l] = fim[l, k]

    natural_grad = np.dot(np.linalg.inv(fim), gradient)

    return natural_grad
def natural_gradient(params):
    """Calculate the natural gradient of the qnode() cost function.

    The code you write for this challenge should be completely contained within this function
    between the # QHACK # comment markers.

    You should evaluate the metric tensor and the gradient of the QNode, and then combine these
    together using the natural gradient definition. The natural gradient should be returned as a
    NumPy array.

    The metric tensor should be evaluated using the equation provided in the problem text. Hint:
    you will need to define a new QNode that returns the quantum state before measurement.

    Args:
        params (np.ndarray): Input parameters, of dimension 6

    Returns:
        np.ndarray: The natural gradient evaluated at the input parameters, of dimension 6
    """

    # QHACK #
    def get_state(params):
        """ Get the state before a measurement """
        qnode(params)
        return dev.state

    # Calculate the unshifted state (its conjugate transpose)
    state_unshifted = np.conjugate(get_state(params)).T

    def shift_vector(i):
        vector = np.zeros(6)
        vector[i] = 1
        return vector

    metric_tensor = np.zeros((6, 6))

    for i in range(6):
        for j in range(i + 1):

            state_shifted_1 = get_state(params +
                                        (shift_vector(i) + shift_vector(j)) *
                                        np.pi / 2)
            state_shifted_2 = get_state(params +
                                        (shift_vector(i) - shift_vector(j)) *
                                        np.pi / 2)
            state_shifted_3 = get_state(params +
                                        (-shift_vector(i) + shift_vector(j)) *
                                        np.pi / 2)
            state_shifted_4 = get_state(params -
                                        (shift_vector(i) + shift_vector(j)) *
                                        np.pi / 2)

            metric_tensor[
                i,
                j] = (-np.abs(np.dot(state_unshifted, state_shifted_1))**2 +
                      np.abs(np.dot(state_unshifted, state_shifted_2))**2 +
                      np.abs(np.dot(state_unshifted, state_shifted_3))**2 -
                      np.abs(np.dot(state_unshifted, state_shifted_4))**2) / 8

            if i != j:
                metric_tensor[j, i] = metric_tensor[i, j]

    grad = qml.grad(qnode)
    gradient = grad(params)[0]

    metric_tensor_inv = np.linalg.inv(metric_tensor)

    natural_grad = np.dot(metric_tensor_inv, gradient)

    # QHACK #

    return natural_grad
def natural_gradient(params):
    """Calculate the natural gradient of the qnode() cost function.

    The code you write for this challenge should be completely contained within this function
    between the # QHACK # comment markers.

    You should evaluate the metric tensor and the gradient of the QNode, and then combine these
    together using the natural gradient definition. The natural gradient should be returned as a
    NumPy array.

    The metric tensor should be evaluated using the equation provided in the problem text. Hint:
    you will need to define a new QNode that returns the quantum state before measurement.

    Args:
        params (np.ndarray): Input parameters, of dimension 6

    Returns:
        np.ndarray: The natural gradient evaluated at the input parameters, of dimension 6
    """

    natural_grad = np.zeros(6)

    # QHACK #

    ##GRADIENT CALCULATION
    ##
    gradient = np.zeros([6], dtype=np.float64)

    def parameter_shift_terms(qnode, params, pos):
        shifted = params.copy()

        shifted[pos] += np.pi / 2
        forward = qnode(shifted)

        shifted[pos] -= np.pi
        backward = qnode(shifted)

        return 0.5 * (forward - backward)

    for i in range(6):
        gradient[i] = parameter_shift_terms(qnode, params, i)

    ##
    ##GRADIENT CALCULATION

    ##BUILDING DAGGER OF VARIATIONAL CIRCUIT
    ##

    def non_parametrized_layer_dagger():
        qml.Hadamard(wires=0)
        qml.RZ(-b, wires=1)
        qml.CNOT(wires=[0, 1])
        qml.Hadamard(wires=1)
        qml.RZ(-a, wires=0)
        qml.CNOT(wires=[1, 2])
        qml.CNOT(wires=[0, 1])
        qml.RX(-a, wires=1)
        qml.RX(-b, wires=1)
        qml.RX(-a, wires=0)

    def variational_circuit_dagger(params):
        qml.RZ(-params[5], wires=2)
        qml.RY(-params[4], wires=1)
        qml.RX(-params[3], wires=0)
        non_parametrized_layer_dagger()
        qml.RZ(-params[2], wires=2)
        qml.RY(-params[1], wires=1)
        qml.RX(-params[0], wires=0)
        non_parametrized_layer_dagger()

    @qml.qnode(dev)
    def combined_circuits(shiftedParams, origParams):

        #Creates Psi_{\theta + \frac{\pi}{2}(e_i + e_j)
        variational_circuit(shiftedParams)

        #Creates Psi_{\theta}
        variational_circuit_dagger(origParams)

        return qml.probs(wires=[0, 1, 2])

    ##
    ##BUILDING DAGGER OF VARIATIONAL CIRCUIT

    ##CALCULATING FUBINI MATRIX
    ##
    def fubini_metric(posI, posJ):
        shifted = params.copy()

        shifted[posI] += np.pi / 2
        shifted[posJ] += np.pi / 2
        first = combined_circuits(shifted, params)[0]

        shifted[posJ] -= np.pi
        second = combined_circuits(shifted, params)[0]

        shifted[posI] -= np.pi
        fourth = combined_circuits(shifted, params)[0]

        shifted[posJ] += np.pi
        third = combined_circuits(shifted, params)[0]

        return 0.125 * (-first + second + third - fourth)

    fubini_matrix = np.zeros((6, 6))

    for i in range(6):
        for j in range(6):
            fubini_matrix[i][j] = fubini_metric(i, j)

    fubini_inverse = np.linalg.inv(np.matrix(fubini_matrix))

    ##
    ##CALCULATING FUBINI MATRIX

    natural_grad = np.dot(fubini_inverse, gradient)

    # QHACK #

    return natural_grad
 def hybrid_model(x, w_quantum, w_classical):
     transformed_x = pnp.dot(w_classical, x)
     return quantum_model(transformed_x, w_quantum)