示例#1
0
 def dc_amplitude_encoding(self, input_vector, entangle):
     self.num_qubits = int(len(input_vector)) - 1
     self.quantum_data = range(self.num_qubits)
     newx = np.copy(input_vector)  # pylint: disable=no-member
     betas = []
     Encoding._recursive_compute_beta(newx, betas)
     self._dc_generate_circuit(betas, self.quantum_data, entangle)
def parameter_shift(weights):
    """Compute the gradient of the variational circuit given by the
    ansatz function using the parameter-shift rule.

    Write your code below between the # QHACK # markers—create a device with
    the correct number of qubits, create a QNode that applies the above ansatz,
    and compute the gradient of the provided ansatz using the parameter-shift rule.

    Args:
        weights (array): An array of floating-point numbers with size (2, 3).

    Returns:
        array: The gradient of the variational circuit. The shape should match
        the input weights array.
    """
    dev = qml.device("default.qubit", wires=3)

    @qml.qnode(dev)
    def circuit(weights):
        for i in range(len(weights)):
            qml.RX(weights[i, 0], wires=0)
            qml.RY(weights[i, 1], wires=1)
            qml.RZ(weights[i, 2], wires=2)

            qml.CNOT(wires=[0, 1])
            qml.CNOT(wires=[1, 2])
            qml.CNOT(wires=[2, 0])

        return qml.expval(qml.PauliY(0) @ qml.PauliZ(2))

    gradient = np.zeros_like(weights)

    # QHACK #

    weight_copy = np.copy(weights)
    for i in range(len(weights)):
        for j in range(len(weights[i])):
            weight_copy[i][j] += np.pi/2
            plus = circuit(weight_copy)
            weight_copy[i][j] -= np.pi
            minus = circuit(weight_copy)
            gradient[i][j] = 1/2 * (plus - minus)
            weight_copy[i][j] = weights[i][j]

    # QHACK #

    return gradient
示例#3
0
def optimize_steps(circuit, init_params, iterations=100, opt=None):
    """Generic optimization of a parameterized circuit using initial parameters.

    Args:
        circuit: a Pennylane circuit that accepts `params` as its argument.
            The output of this circuit should an observable that we wish to
            _maximize_.
        init_params (np.ndarray): The set of parameters to start optimization
        iterations: Number of optimization iterations to perform.
        opt: Pennylane optimizer.

    Returns:
        cost_tape (np.ndarray): Shape (iterations,) tape of cost evaluations
        param_tape (np.ndarray): Shape (iterations, len(init_params)) tape of
            parameter values during optimization

    """

    if opt is None:
        opt = qml.AdamOptimizer(stepsize=0.01)

    # Convert to minimization problem
    cost = lambda x: -1 * circuit(x)

    cost_tape = np.zeros(iterations)
    param_tape = np.zeros((iterations, len(init_params)))

    # Optimize
    params = np.copy(init_params)
    for step in range(iterations):
        params = opt.step(cost, params)
        cost_eval = cost(params)
        cost_tape[step] = cost_eval
        param_tape[step, :] = params

    return cost_tape, param_tape
示例#4
0
                       [[ 4.59893525, -0.01877453,  4.86909045,  1.61046237,  4.3342154 ],
                        [ 6.54969706,  0.76974914,  6.13216135,  3.19770538,  0.35820405]],
                       [[-0.06825097,  5.46138114, -0.38685812,  2.62531926,  5.94363286],
                        [ 3.84330489,  7.62532526,  3.31992264,  4.53318486,  2.90021471]],
                       [[ 3.27271762,  6.284331  , -0.0095848 ,  1.71022713,  1.72119449],
                        [ 5.26413732, -0.5363315 ,  0.02694912,  1.85543017,  0.09469438]],
                       [[ 1.61977233,  2.12403094,  1.52887576,  1.87843468,  5.10722657],
                        [ 1.83547388,  0.10519713, -0.14516422,  2.34971729, -0.15396484]],
                       [[ 1.15227788,  4.42815449,  4.77992685,  2.00495827,  4.68944624],
                        [ 1.90477385, -0.22817579,  6.21664772,  0.34922366,  6.44687527]],
                       [[ 4.47834114,  5.80827321,  4.8221783 ,  2.07389821,  0.40258912],
                        [ 6.07380714,  6.33676481,  6.17787822,  1.86149763,  6.59189267]],
                       [[ 5.56242829,  4.49153866,  3.66496649,  4.76465886,  0.80552847],
                        [ 3.36765317,  3.41585518,  1.40441779,  1.24372229,  5.85030332]]], requires_grad=True)
if use_trained_params:
    param = np.copy(opt_param)
    print(f"Using trained parameters with average magnitude {np.mean(np.abs(param))}")
else:
    np.random.seed(43)
    param = np.random.random(size=opt_param.shape) * 4 * np.pi - 2 * np.pi
    print(f"Using untrained parameters with average magnitude {np.mean(np.abs(param))}")


# -

# # Rigetti Circuit

# + tags=[]
def rigetti_layer(x, params, wires, i0=0, inc=1):
    i = i0
    N = len(wires)
示例#5
0
        df = pd.read_pickle(filename[:-5] + '_mitigated.dill')
    except FileNotFoundError:
        # Can not reuse mitigated matrices if the file is not found...
        actually_reuse_mitigated_matrices = False

if not actually_reuse_mitigated_matrices:
    df = pd.DataFrame()
    exact_matrix = kernel_matrices[(0., 0)]
    target = np.outer(y_train, y_train)
    times_per_fun = {fun: 0 for fun in regularizations + mitigations}
    fun_evals = {fun: 0 for fun in regularizations + mitigations}

    for pipeline_name, pipeline in tqdm(filtered_pipelines.items(),
                                        total=len(filtered_pipelines)):
        for key, mat in kernel_matrices.items():
            K = np.copy(mat)
            for fun in pipeline:
                try:
                    fun_start = time.process_time()
                    K = fun(K)
                    times_per_fun[fun] += time.process_time() - fun_start
                    fun_evals[fun] += 1
                    if np.any(np.isinf(K)):
                        raise ValueError
                except Exception as e:
                    K = None
                    align = np.nan
                    target_align = np.nan
                    break
            else:
                normK = np.linalg.norm(K, 'fro')
def natural_gradient(params):
    """Calculate the natural gradient of the qnode() cost function.

    The code you write for this challenge should be completely contained within this function
    between the # QHACK # comment markers.

    You should evaluate the metric tensor and the gradient of the QNode, and then combine these
    together using the natural gradient definition. The natural gradient should be returned as a
    NumPy array.

    The metric tensor should be evaluated using the equation provided in the problem text. Hint:
    you will need to define a new QNode that returns the quantum state before measurement.

    Args:
        params (np.ndarray): Input parameters, of dimension 6

    Returns:
        np.ndarray: The natural gradient evaluated at the input parameters, of dimension 6
    """

    natural_grad = np.zeros(6)

    # QHACK #
    import warnings
    warnings.filterwarnings('ignore')

    fsm = np.zeros((6, 6))
    qnode(params)
    default = np.array(dev.state).conj().T
    weight_copy = np.copy(params)
    for i in range(6):
        for j in range(6):
            weight_copy[i] += np.pi / 2
            weight_copy[j] += np.pi / 2
            qnode(weight_copy)
            plus = dev.state
            weight_copy[j] -= np.pi
            qnode(weight_copy)
            minus_1 = dev.state
            weight_copy[i] -= np.pi
            weight_copy[j] += np.pi
            qnode(weight_copy)
            minus_2 = dev.state
            weight_copy[j] -= np.pi
            qnode(weight_copy)
            minus_3 = dev.state
            fsm[i][j] = 1/8 * (-np.dot(default, plus) * np.dot(default, plus).conj() + np.dot(default, minus_1) * np.dot(default, minus_1).conj() + \
                np.dot(default, minus_2) * np.dot(default, minus_2).conj() - np.dot(default, minus_3) * np.dot(default, minus_3).conj())
            weight_copy[i] = params[i]
            weight_copy[j] = params[j]

    weights = params
    s = np.pi / 2
    gradient = np.zeros(6)
    weight_copy = np.copy(weights)
    for i in range(len(weights)):
        weight_copy[i] += s
        plus = qnode(weight_copy)
        weight_copy[i] -= (2 * s)
        minus = qnode(weight_copy)
        gradient[i] = (plus - minus) / (2 * np.sin(s))
        weight_copy[i] = weights[i]

    f_minus = np.linalg.inv(fsm)
    natural_grad = f_minus @ gradient

    # QHACK #

    return natural_grad