def _preprocess(weights): """Validate and pre-process inputs as follows: * Check that the weights tensor is 2-dimensional. Args: weights (tensor_like): trainable parameters of the template Returns: int: number of times that the ansatz is repeated """ if qml.tape_mode_active(): shape = qml.math.shape(weights) if len(shape) != 2: raise ValueError( f"Weights tensor must be 2-dimensional; got shape {shape}") repeat = shape[0] else: repeat = check_number_of_layers([weights]) n_rots = get_shape(weights)[1] expected_shape = (repeat, n_rots) check_shape( weights, expected_shape, msg="'weights' must be of shape {}; got {}" "".format(expected_shape, get_shape(weights)), ) return repeat
def _preprocess(weights, initial_layer_weights, wires): """Validate and pre-process inputs as follows: * Check the shapes of the two weights tensors. Args: weights (tensor_like): trainable parameters of the template initial_layer_weights (tensor_like): weight tensor for the initial rotation block, shape ``(M,)`` wires (Wires): wires that template acts on Returns: int: number of times that the ansatz is repeated """ if qml.tape_mode_active(): shape = qml.math.shape(weights) repeat = shape[0] if len(shape) > 1: if shape[1] != len(wires) - 1: raise ValueError( f"Weights tensor must have second dimension of length {len(wires) - 1}; got {shape[1]}" ) if shape[2] != 2: raise ValueError( f"Weights tensor must have third dimension of length 2; got {shape[2]}" ) shape2 = qml.math.shape(initial_layer_weights) if shape2 != (len(wires), ): raise ValueError( f"Initial layer weights must be of shape {(len(wires),)}; got {shape2}" ) else: repeat = check_number_of_layers([weights]) expected_shape_initial = (len(wires), ) check_shape( initial_layer_weights, expected_shape_initial, msg="Initial layer weights must be of shape {}; got {}" "".format(expected_shape_initial, get_shape(initial_layer_weights)), ) if len(wires) in [0, 1]: expected_shape_weights = (0, ) else: expected_shape_weights = (repeat, len(wires) - 1, 2) check_shape( weights, expected_shape_weights, msg="Weights tensor must be of shape {}; got {}" "".format(expected_shape_weights, get_shape(weights)), ) return repeat
def CVNeuralNetLayersHomeMade( theta_1, phi_1, varphi_1, r, phi_r, theta_2, phi_2, varphi_2, a, phi_a, k, wires): ############# # Input checks wires = check_wires(wires) n_wires = len(wires) n_if = n_wires * (n_wires - 1) // 2 weights_list = [theta_1, phi_1, varphi_1, r, phi_r, theta_2, phi_2, varphi_2, a, phi_a, k] repeat = check_number_of_layers(weights_list) expected_shapes = [ (repeat, n_if), (repeat, n_if), (repeat, n_wires), (repeat, n_wires), (repeat, n_wires), (repeat, n_if), (repeat, n_if), (repeat, n_wires), (repeat, n_wires), (repeat, n_wires), (repeat, n_wires), ] check_shapes(weights_list, expected_shapes, msg="wrong shape of weight input(s) detected") ############### Do for all layers for l in range(repeat): cv_neural_net_layer( theta_1=theta_1[l], phi_1=phi_1[l], varphi_1=varphi_1[l], r=r[l], phi_r=phi_r[l], theta_2=theta_2[l], phi_2=phi_2[l], varphi_2=varphi_2[l], a=a[l], phi_a=phi_a[l], k=k[l], wires=wires, )
def _preprocess(weights, wires, ranges): """Validate and pre-process inputs as follows: * Check the shape of the weights tensor. * If ranges is None, define a default. Args: weights (tensor_like): trainable parameters of the template wires (Wires): wires that template acts on ranges (Sequence[int]): range for each subsequent layer Returns: int, list[int]: number of times that the ansatz is repeated and preprocessed ranges """ if qml.tape_mode_active(): shape = qml.math.shape(weights) repeat = shape[0] if len(shape) != 3: raise ValueError( f"Weights tensor must be 3-dimensional; got shape {shape}") if shape[1] != len(wires): raise ValueError( f"Weights tensor must have second dimension of length {len(wires)}; got {shape[1]}" ) if shape[2] != 3: raise ValueError( f"Weights tensor must have third dimension of length 3; got {shape[2]}" ) else: repeat = check_number_of_layers([weights]) expected_shape = (repeat, len(wires), 3) check_shape( weights, expected_shape, msg="Weights tensor must be of shape {}; got {}" "".format(expected_shape, get_shape(weights)), ) if len(wires) > 1: if ranges is None: # tile ranges with iterations of range(1, n_wires) ranges = [(l % (len(wires) - 1)) + 1 for l in range(repeat)] else: ranges = [0] * repeat return repeat, ranges
def _preprocess(weights, wires): """Validate and pre-process inputs as follows: * Check the shape of the weights tensor, making sure that the second dimension has length :math:`n`, where :math:`n` is the number of qubits. Args: weights (tensor_like): trainable parameters of the template wires (Wires): wires that template acts on Returns: int: number of times that the ansatz is repeated """ if qml.tape_mode_active(): shape = qml.math.shape(weights) repeat = shape[0] if len(shape) != 2: raise ValueError( f"Weights tensor must be 2-dimensional; got shape {shape}") if shape[1] != len(wires): raise ValueError( f"Weights tensor must have second dimension of length {len(wires)}; got {shape[1]}" ) else: repeat = check_number_of_layers([weights]) expected_shape = (repeat, len(wires)) check_shape( weights, expected_shape, msg= f"Weights tensor must have second dimension of length {len(wires)}; got {get_shape(weights)[1]}", ) return repeat
def _preprocess(theta_1, phi_1, varphi_1, r, phi_r, theta_2, phi_2, varphi_2, a, phi_a, k, wires): """Validate and pre-process inputs as follows: * Check that the first dimensions of all weight tensors match * Check that the other dimensions of all weight tensors are correct for the number of qubits. Args: heta_1 (tensor_like): shape :math:`(L, K)` tensor of transmittivity angles for first interferometer phi_1 (tensor_like): shape :math:`(L, K)` tensor of phase angles for first interferometer varphi_1 (tensor_like): shape :math:`(L, M)` tensor of rotation angles to apply after first interferometer r (tensor_like): shape :math:`(L, M)` tensor of squeezing amounts for :class:`~pennylane.ops.Squeezing` operations phi_r (tensor_like): shape :math:`(L, M)` tensor of squeezing angles for :class:`~pennylane.ops.Squeezing` operations theta_2 (tensor_like): shape :math:`(L, K)` tensor of transmittivity angles for second interferometer phi_2 (tensor_like): shape :math:`(L, K)` tensor of phase angles for second interferometer varphi_2 (tensor_like): shape :math:`(L, M)` tensor of rotation angles to apply after second interferometer a (tensor_like): shape :math:`(L, M)` tensor of displacement magnitudes for :class:`~pennylane.ops.Displacement` operations phi_a (tensor_like): shape :math:`(L, M)` tensor of displacement angles for :class:`~pennylane.ops.Displacement` operations k (tensor_like): shape :math:`(L, M)` tensor of kerr parameters for :class:`~pennylane.ops.Kerr` operations wires (Wires): wires that template acts on Returns: int: number of times that the ansatz is repeated """ n_wires = len(wires) n_if = n_wires * (n_wires - 1) // 2 if qml.tape_mode_active(): # check that first dimension is the same weights_list = [theta_1, phi_1, varphi_1, r, phi_r, theta_2, phi_2, varphi_2, a, phi_a, k] shapes = [qml.math.shape(w) for w in weights_list] first_dims = [s[0] for s in shapes] if len(set(first_dims)) > 1: raise ValueError( f"The first dimension of all parameters needs to be the same, got {first_dims}" ) repeat = shapes[0][0] second_dims = [s[1] for s in shapes] expected = [ n_if, n_if, n_wires, n_wires, n_wires, n_if, n_if, n_wires, n_wires, n_wires, n_wires, ] if not all(e == d for e, d in zip(expected, second_dims)): raise ValueError("Got unexpected shape for one or more parameters.") else: weights_list = [theta_1, phi_1, varphi_1, r, phi_r, theta_2, phi_2, varphi_2, a, phi_a, k] repeat = check_number_of_layers(weights_list) expected_shapes = [ (repeat, n_if), (repeat, n_if), (repeat, n_wires), (repeat, n_wires), (repeat, n_wires), (repeat, n_if), (repeat, n_if), (repeat, n_wires), (repeat, n_wires), (repeat, n_wires), (repeat, n_wires), ] check_shapes( weights_list, expected_shapes, msg="Got unexpected shape for one or more parameters" ) return repeat
def RandomLayers(weights, wires, ratio_imprim=0.3, imprimitive=CNOT, rotations=None, seed=42): r"""Layers of randomly chosen single qubit rotations and 2-qubit entangling gates, acting on randomly chosen qubits. .. warning:: This template uses random number generation inside qnodes. Find more details about how to invoke the desired random behaviour in the "Usage Details" section below. The argument ``weights`` contains the weights for each layer. The number of layers :math:`L` is therefore derived from the first dimension of ``weights``. The two-qubit gates of type ``imprimitive`` and the rotations are distributed randomly in the circuit. The number of random rotations is derived from the second dimension of ``weights``. The number of two-qubit gates is determined by ``ratio_imprim``. For example, a ratio of ``0.3`` with ``30`` rotations will lead to the use of ``10`` two-qubit gates. .. note:: If applied to one qubit only, this template will use no imprimitive gates. This is an example of two 4-qubit random layers with four Pauli-Y/Pauli-Z rotations :math:`R_y, R_z`, controlled-Z gates as imprimitives, as well as ``ratio_imprim=0.3``: .. figure:: ../../_static/layer_rnd.png :align: center :width: 60% :target: javascript:void(0); Args: weights (array[float]): array of weights of shape ``(L, k)``, wires (Iterable or Wires): Wires that the template acts on. Accepts an iterable of numbers or strings, or a Wires object. ratio_imprim (float): value between 0 and 1 that determines the ratio of imprimitive to rotation gates imprimitive (pennylane.ops.Operation): two-qubit gate to use, defaults to :class:`~pennylane.ops.CNOT` rotations (list[pennylane.ops.Operation]): List of Pauli-X, Pauli-Y and/or Pauli-Z gates. The frequency determines how often a particular rotation type is used. Defaults to the use of all three rotations with equal frequency. seed (int): seed to generate random architecture, defaults to 42 Raises: ValueError: if inputs do not have the correct format .. UsageDetails:: **Default seed** ``RandomLayers`` always uses a seed to initialize the construction of a random circuit. This means that the template creates the same circuit every time it is called. If no seed is provided, the default seed of ``42`` is used. .. code-block:: python import pennylane as qml import numpy as np from pennylane.templates.layers import RandomLayers dev = qml.device("default.qubit", wires=2) weights = [[0.1, -2.1, 1.4]] @qml.qnode(dev) def circuit1(weights): RandomLayers(weights=weights, wires=range(2)) return qml.expval(qml.PauliZ(0)) @qml.qnode(dev) def circuit2(weights): RandomLayers(weights=weights, wires=range(2)) return qml.expval(qml.PauliZ(0)) >>> np.allclose(circuit1(weights), circuit2(weights)) >>> True You can verify this by drawing the circuits. >>> print(circuit1.draw()) >>> 0: ──RX(0.1)──RX(-2.1)──╭X──╭X───────────┤ ⟨Z⟩ ... 1: ─────────────────────╰C──╰C──RZ(1.4)──┤ >>> print(circuit2.draw()) >>> 0: ──RX(0.1)──RX(-2.1)──╭X──╭X───────────┤ ⟨Z⟩ ... 1: ─────────────────────╰C──╰C──RZ(1.4)──┤ **Changing the seed** To change the randomly generated circuit architecture, you have to change the seed passed to the template. For example, these two calls of ``RandomLayers`` *do not* create the same circuit: .. code-block:: python @qml.qnode(dev) def circuit_9(weights): RandomLayers(weights=weights, wires=range(2), seed=9) return qml.expval(qml.PauliZ(0)) @qml.qnode(dev) def circuit_12(weights): RandomLayers(weights=weights, wires=range(2), seed=12) return qml.expval(qml.PauliZ(0)) >>> np.allclose(circuit_9(weights), circuit_12(weights)) >>> False >>> print(circuit_9.draw()) >>> 0: ──╭X──RY(-2.1)──RX(1.4)──┤ ⟨Z⟩ ... 1: ──╰C──RX(0.1)────────────┤ >>> print(circuit_12.draw()) >>> 0: ──╭X──RX(-2.1)──╭C──╭X──RZ(1.4)──┤ ⟨Z⟩ ... 1: ──╰C──RZ(0.1)───╰X──╰C───────────┤ **Automatically creating random circuits** To automate the process of creating different circuits with ``RandomLayers``, you can set ``seed=None`` to avoid specifying a seed. However, in this case care needs to be taken. In the default setting, a quantum node is **mutable**, which means that the quantum function is re-evaluated every time it is called. This means that the circuit is re-constructed from scratch each time you call the qnode: .. code-block:: python @qml.qnode(dev) def circuit_rnd(weights): RandomLayers(weights=weights, wires=range(2), seed=None) return qml.expval(qml.PauliZ(0)) first_call = circuit_rnd(weights) second_call = circuit_rnd(weights) >>> np.allclose(first_call, second_call) >>> False This can be rectified by making the quantum node **immutable**. .. code-block:: python @qml.qnode(dev, mutable=False) def circuit_rnd(weights): RandomLayers(weights=weights, wires=range(2), seed=None) return qml.expval(qml.PauliZ(0)) first_call = circuit_rnd(weights) second_call = circuit_rnd(weights) >>> np.allclose(first_call, second_call) >>> True """ if seed is not None: np.random.seed(seed) if rotations is None: rotations = [RX, RY, RZ] ############# # Input checks wires = Wires(wires) check_no_variable(ratio_imprim, msg="'ratio_imprim' cannot be differentiable") check_no_variable(imprimitive, msg="'imprimitive' cannot be differentiable") check_no_variable(rotations, msg="'rotations' cannot be differentiable") check_no_variable(seed, msg="'seed' cannot be differentiable") repeat = check_number_of_layers([weights]) n_rots = get_shape(weights)[1] expected_shape = (repeat, n_rots) check_shape( weights, expected_shape, msg="'weights' must be of shape {}; got {}" "".format(expected_shape, get_shape(weights)), ) check_type( ratio_imprim, [float, type(None)], msg="'ratio_imprim' must be a float; got {}".format(ratio_imprim), ) check_type(n_rots, [int, type(None)], msg="'n_rots' must be an integer; got {}".format(n_rots)) # TODO: Check that 'rotations' contains operations check_type( rotations, [list, type(None)], msg="'rotations' must be a list of PennyLane operations; got {}" "".format(rotations), ) check_type(seed, [int, type(None)], msg="'seed' must be an integer; got {}.".format(seed)) ############### for l in range(repeat): random_layer( weights=weights[l], wires=wires, ratio_imprim=ratio_imprim, imprimitive=imprimitive, rotations=rotations, seed=seed, )
def StronglyEntanglingLayers(weights, wires, ranges=None, imprimitive=CNOT): r"""Layers consisting of single qubit rotations and entanglers, inspired by the circuit-centric classifier design `arXiv:1804.00633 <https://arxiv.org/abs/1804.00633>`_. The argument ``weights`` contains the weights for each layer. The number of layers :math:`L` is therefore derived from the first dimension of ``weights``. The 2-qubit gates, whose type is specified by the ``imprimitive`` argument, act chronologically on the :math:`M` wires, :math:`i = 1,...,M`. The second qubit of each gate is given by :math:`(i+r)\mod M`, where :math:`r` is a hyperparameter called the *range*, and :math:`0 < r < M`. If applied to one qubit only, this template will use no imprimitive gates. This is an example of two 4-qubit strongly entangling layers (ranges :math:`r=1` and :math:`r=2`, respectively) with rotations :math:`R` and CNOTs as imprimitives: .. figure:: ../../_static/layer_sec.png :align: center :width: 60% :target: javascript:void(0); Args: weights (array[float]): array of weights of shape ``(L, M, 3)`` wires (Iterable or Wires): Wires that the template acts on. Accepts an iterable of numbers or strings, or a Wires object. ranges (Sequence[int]): sequence determining the range hyperparameter for each subsequent layer; if None using :math:`r=l \mod M` for the :math:`l`th layer and :math:`M` wires. imprimitive (pennylane.ops.Operation): two-qubit gate to use, defaults to :class:`~pennylane.ops.CNOT` Raises: ValueError: if inputs do not have the correct format """ ############# # Input checks wires = Wires(wires) check_no_variable(ranges, msg="'ranges' cannot be differentiable") check_no_variable(imprimitive, msg="'imprimitive' cannot be differentiable") repeat = check_number_of_layers([weights]) expected_shape = (repeat, len(wires), 3) check_shape( weights, expected_shape, msg="'weights' must be of shape {}; got {}" "".format(expected_shape, get_shape(weights)), ) if len(wires) > 1: if ranges is None: # tile ranges with iterations of range(1, n_wires) ranges = [(l % (len(wires) - 1)) + 1 for l in range(repeat)] expected_shape = (repeat, ) check_shape( ranges, expected_shape, msg="'ranges' must be of shape {}; got {}" "".format(expected_shape, get_shape(weights)), ) check_type(ranges, [list], msg="'ranges' must be a list; got {}" "".format(ranges)) for r in ranges: check_type(r, [int], msg="'ranges' must be a list of integers; got {}" "".format(ranges)) if any((r >= len(wires) or r == 0) for r in ranges): raise ValueError( "the range for all layers needs to be smaller than the number of " "qubits; got ranges {}.".format(ranges)) else: ranges = [0] * repeat ############### for l in range(repeat): strongly_entangling_layer(weights=weights[l], wires=wires, r=ranges[l], imprimitive=imprimitive)
def QAOAEmbedding(features, weights, wires, local_field="Y"): r""" Encodes :math:`N` features into :math:`n>N` qubits, using a layered, trainable quantum circuit that is inspired by the QAOA ansatz. A single layer applies two circuits or "Hamiltonians": The first encodes the features, and the second is a variational ansatz inspired by a 1-dimensional Ising model. The feature-encoding circuit associates features with the angles of :class:`RX` rotations. The Ising ansatz consists of trainable two-qubit ZZ interactions :math:`e^{-i \frac{\alpha}{2} \sigma_z \otimes \sigma_z}` (in PennyLane represented by the :class:`~.MultiRZ` gate), and trainable local fields :math:`e^{-i \frac{\beta}{2} \sigma_{\mu}}`, where :math:`\sigma_{\mu}` can be chosen to be :math:`\sigma_{x}`, :math:`\sigma_{y}` or :math:`\sigma_{z}` (default choice is :math:`\sigma_{y}` or the ``RY`` gate), and :math:`\alpha, \beta` are adjustable gate parameters. The number of features has to be smaller or equal to the number of qubits. If there are fewer features than qubits, the feature-encoding rotation is replaced by a Hadamard gate. The argument ``weights`` contains an array of the :math:`\alpha, \beta` parameters for each layer. The number of layers :math:`L` is derived from the first dimension of ``weights``, which has the following shape: * :math:`(L, 1)`, if the embedding acts on a single wire, * :math:`(L, 3)`, if the embedding acts on two wires, * :math:`(L, 2n)` else. After the :math:`L` th layer, another set of feature-encoding :class:`RX` gates is applied. This is an example for the full embedding circuit using 2 layers, 3 features, 4 wires, and ``RY`` local fields: | .. figure:: ../../_static/qaoa_layers.png :align: center :width: 60% :target: javascript:void(0); | .. note:: ``QAOAEmbedding`` supports gradient computations with respect to both the ``features`` and the ``weights`` arguments. Note that trainable parameters need to be passed to the quantum node as positional arguments. Args: features (array): array of features to encode weights (array): array of weights wires (Iterable or Wires): Wires that the template acts on. Accepts an iterable of numbers or strings, or a Wires object. local_field (str): type of local field used, one of ``'X'``, ``'Y'``, or ``'Z'`` Raises: ValueError: if inputs do not have the correct format .. UsageDetails:: The QAOA embedding encodes an :math:`n`-dimensional feature vector into at most :math:`n` qubits. The embedding applies layers of a circuit, and each layer is defined by a set of weight parameters. .. code-block:: python import pennylane as qml from pennylane.templates import QAOAEmbedding dev = qml.device('default.qubit', wires=2) @qml.qnode(dev) def circuit(weights, f=None): QAOAEmbedding(features=f, weights=weights, wires=range(2)) return qml.expval(qml.PauliZ(0)) features = [1., 2.] layer1 = [0.1, -0.3, 1.5] layer2 = [3.1, 0.2, -2.8] weights = [layer1, layer2] print(circuit(weights, f=features)) **Using parameter initialization functions** The initial weight parameters can alternatively be generated by utility functions from the ``pennylane.init`` module, for example using the function :func:`~.qaoa_embedding_normal`: .. code-block:: python from pennylane.init import qaoa_embedding_normal weights = qaoa_embedding_normal(n_layers=2, n_wires=2, mean=0, std=0.2) **Training the embedding** The embedding is typically trained with respect to a given cost. For example, one can train it to minimize the PauliZ expectation of the first qubit: .. code-block:: python o = GradientDescentOptimizer() for i in range(10): weights = o.step(lambda w : circuit(w, f=features), weights) print("Step ", i, " weights = ", weights) **Training the features** In principle, also the features are trainable, which means that gradients with respect to feature values can be computed. To train both weights and features, they need to be passed to the qnode as positional arguments. If the built-in optimizer is used, they have to be merged to one input: .. code-block:: python @qml.qnode(dev) def circuit2(pars): weights = pars[0] features = pars[1] QAOAEmbedding(features=features, weights=weights, wires=range(2)) return qml.expval(qml.PauliZ(0)) features = [1., 2.] weights = [[0.1, -0.3, 1.5], [3.1, 0.2, -2.8]] pars = [weights, features] o = GradientDescentOptimizer() for i in range(10): pars = o.step(circuit2, pars) print("Step ", i, " weights = ", pars[0], " features = ", pars[1]) **Local Fields** While by default, ``RY`` gates are used as local fields, one may also choose ``local_field='Z'`` or ``local_field='X'`` as hyperparameters of the embedding. .. code-block:: python @qml.qnode(dev) def circuit(weights, f=None): QAOAEmbedding(features=f, weights=weights, wires=range(2), local_field='Z') return qml.expval(qml.PauliZ(0)) Choosing ``'Z'`` fields implements a QAOAEmbedding where the second Hamiltonian is a 1-dimensional Ising model. """ ############# # Input checks wires = Wires(wires) expected_shape = (len(wires),) check_shape( features, expected_shape, bound="max", msg="'features' must be of shape {} or smaller; got {}" "".format((len(wires),), get_shape(features)), ) check_is_in_options( local_field, ["X", "Y", "Z"], msg="did not recognize option {} for 'local_field'" "".format(local_field), ) repeat = check_number_of_layers([weights]) if len(wires) == 1: expected_shape = (repeat, 1) check_shape( weights, expected_shape, msg="'weights' must be of shape {}; got {}" "".format(expected_shape, get_shape(features)), ) elif len(wires) == 2: expected_shape = (repeat, 3) check_shape( weights, expected_shape, msg="'weights' must be of shape {}; got {}" "".format(expected_shape, get_shape(features)), ) else: expected_shape = (repeat, 2 * len(wires)) check_shape( weights, expected_shape, msg="'weights' must be of shape {}; got {}" "".format(expected_shape, get_shape(features)), ) ##################### if local_field == "Z": local_fields = RZ elif local_field == "X": local_fields = RX else: local_fields = RY for l in range(repeat): # apply alternating Hamiltonians qaoa_feature_encoding_hamiltonian(features, wires) qaoa_ising_hamiltonian(weights[l], wires, local_fields) # repeat the feature encoding once more at the end qaoa_feature_encoding_hamiltonian(features, wires)
def BasicEntanglerLayers(weights, wires, rotation=None): r"""Layers consisting of one-parameter single-qubit rotations on each qubit, followed by a closed chain or *ring* of CNOT gates. The ring of CNOT gates connects every qubit with its neighbour, with the last qubit being considered as a neighbour to the first qubit. .. figure:: ../../_static/templates/layers/basic_entangler.png :align: center :width: 40% :target: javascript:void(0); The number of layers :math:`L` is determined by the first dimension of the argument ``weights``. When using a single wire, the template only applies the single qubit gates in each layer. .. note:: This template follows the convention of dropping the entanglement between the last and the first qubit when using only two wires, so the entangler is not repeated on the same wires. In this case, only one CNOT gate is applied in each layer: .. figure:: ../../_static/templates/layers/basic_entangler_2wires.png :align: center :width: 30% :target: javascript:void(0); Args: weights (array[float]): array of weights with shape ``(L, len(wires))``, each weight is used as a parameter for the rotation wires (Iterable or Wires): Wires that the template acts on. Accepts an iterable of numbers or strings, or a Wires object. rotation (pennylane.ops.Operation): one-parameter single-qubit gate to use, if ``None``, :class:`~pennylane.ops.RX` is used as default Raises: ValueError: if inputs do not have the correct format .. UsageDetails:: The template is used inside a qnode: .. code-block:: python import pennylane as qml from pennylane.templates import BasicEntanglerLayers from math import pi n_wires = 3 dev = qml.device('default.qubit', wires=n_wires) @qml.qnode(dev) def circuit(weights): BasicEntanglerLayers(weights=weights, wires=range(n_wires)) return [qml.expval(qml.PauliZ(wires=i)) for i in range(n_wires)] >>> circuit([[pi, pi, pi]]) [1., 1., -1.] **Parameter initialization function** The :mod:`~pennylane.init` module has two parameter initialization functions, ``basic_entangler_layers_normal`` and ``basic_entangler_layers_uniform``. .. code-block:: python from pennylane.init import basic_entangler_layers_normal n_layers = 4 weights = basic_entangler_layers_normal(n_layers=n_layers, n_wires=n_wires) circuit(weights) **No periodic boundary for two wires** When using two wires, the convention is to drop the periodic boundary condition. This means that the connection from the second to the first wire is omitted. .. code-block:: python n_wires = 2 dev = qml.device('default.qubit', wires=n_wires) @qml.qnode(dev) def circuit(weights): BasicEntanglerLayers(weights=weights, wires=range(n_wires)) return [qml.expval(qml.PauliZ(wires=i)) for i in range(n_wires)] >>> circuit([[pi, pi]]) [-1, 1] **Changing the rotation gate** Any single-qubit gate can be used as a rotation gate, as long as it only takes a single parameter. The default is the ``RX`` gate. .. code-block:: python @qml.qnode(dev) def circuit(weights): BasicEntanglerLayers(weights=weights, wires=range(n_wires), rotation=qml.RZ) return [qml.expval(qml.PauliZ(wires=i)) for i in range(n_wires)] Accidentally using a gate that expects more parameters throws a ``ValueError: Wrong number of parameters``. """ ############# # Input checks if rotation is None: rotation = RX wires = Wires(wires) repeat = check_number_of_layers([weights]) expected_shape = (repeat, len(wires)) check_shape( weights, expected_shape, msg="'weights' must be of shape {}; got {}" "".format(expected_shape, get_shape(weights)), ) ############### for layer in range(repeat): broadcast(unitary=rotation, pattern="single", wires=wires, parameters=weights[layer]) broadcast(unitary=CNOT, pattern="ring", wires=wires)
def CVNeuralNetLayers(theta_1, phi_1, varphi_1, r, phi_r, theta_2, phi_2, varphi_2, a, phi_a, k, wires): r"""A sequence of layers of a continuous-variable quantum neural network, as specified in `arXiv:1806.06871 <https://arxiv.org/abs/1806.06871>`_. The layer consists of interferometers, displacement and squeezing gates mimicking the linear transformation of a neural network in the x-basis of the quantum system, and uses a Kerr gate to introduce a 'quantum' nonlinearity. The layers act on the :math:`M` modes given in ``wires``, and include interferometers of :math:`K=M(M-1)/2` beamsplitters. The different weight parameters contain the weights for each layer. The number of layers :math:`L` is therefore derived from the first dimension of ``weights``. This example shows a 4-mode CVNeuralNet layer with squeezing gates :math:`S`, displacement gates :math:`D` and Kerr gates :math:`K`. The two big blocks are interferometers of type :mod:`pennylane.templates.layers.Interferometer`: .. figure:: ../../_static/layer_cvqnn.png :align: center :width: 60% :target: javascript:void(0); .. note:: The CV neural network architecture includes :class:`~pennylane.ops.Kerr` operations. Make sure to use a suitable device, such as the :code:`strawberryfields.fock` device of the `PennyLane-SF <https://github.com/XanaduAI/pennylane-sf>`_ plugin. Args: theta_1 (array[float]): length :math:`(L, K)` array of transmittivity angles for first interferometer phi_1 (array[float]): length :math:`(L, K)` array of phase angles for first interferometer varphi_1 (array[float]): length :math:`(L, M)` array of rotation angles to apply after first interferometer r (array[float]): length :math:`(L, M)` array of squeezing amounts for :class:`~pennylane.ops.Squeezing` operations phi_r (array[float]): length :math:`(L, M)` array of squeezing angles for :class:`~pennylane.ops.Squeezing` operations theta_2 (array[float]): length :math:`(L, K)` array of transmittivity angles for second interferometer phi_2 (array[float]): length :math:`(L, K)` array of phase angles for second interferometer varphi_2 (array[float]): length :math:`(L, M)` array of rotation angles to apply after second interferometer a (array[float]): length :math:`(L, M)` array of displacement magnitudes for :class:`~pennylane.ops.Displacement` operations phi_a (array[float]): length :math:`(L, M)` array of displacement angles for :class:`~pennylane.ops.Displacement` operations k (array[float]): length :math:`(L, M)` array of kerr parameters for :class:`~pennylane.ops.Kerr` operations wires (Sequence[int]): sequence of mode indices that the template acts on Raises: ValueError: if inputs do not have the correct format """ ############# # Input checks wires = check_wires(wires) n_wires = len(wires) n_if = n_wires * (n_wires - 1) // 2 weights_list = [ theta_1, phi_1, varphi_1, r, phi_r, theta_2, phi_2, varphi_2, a, phi_a, k ] repeat = check_number_of_layers(weights_list) expected_shapes = [ (repeat, n_if), (repeat, n_if), (repeat, n_wires), (repeat, n_wires), (repeat, n_wires), (repeat, n_if), (repeat, n_if), (repeat, n_wires), (repeat, n_wires), (repeat, n_wires), (repeat, n_wires), ] check_shapes(weights_list, expected_shapes, msg="wrong shape of weight input(s) detected") ############### for l in range(repeat): cv_neural_net_layer( theta_1=theta_1[l], phi_1=phi_1[l], varphi_1=varphi_1[l], r=r[l], phi_r=phi_r[l], theta_2=theta_2[l], phi_2=phi_2[l], varphi_2=varphi_2[l], a=a[l], phi_a=phi_a[l], k=k[l], wires=wires, )
def test_check_num_layers_exception(self, inpt, repeat): """Tests that layer check throws exception if number of layers not consistent.""" with pytest.raises(ValueError, match="The first dimension of all parameters"): check_number_of_layers(inpt)
def test_check_num_layers(self, inpt, repeat): """Tests that layer check returns correct number of layers.""" n_layers = check_number_of_layers(inpt) assert n_layers == repeat
def _preprocess(features, wires, weights): """Validate and pre-process inputs as follows: * Check that the features tensor is one-dimensional. * Check that the first dimension of the features tensor has length :math:`n` or less, where :math:`n` is the number of qubits. * Check that the shape of the weights tensor is correct for the number of qubits. Args: features (tensor_like): input features to pre-process wires (Wires): wires that template acts on weights (tensor_like): weights of the embedding Returns: int: number of times that embedding is repeated """ if qml.tape_mode_active(): shape = qml.math.shape(features) if len(shape) != 1: raise ValueError(f"Features must be a one-dimensional tensor; got shape {shape}.") n_features = shape[0] if n_features > len(wires): raise ValueError( f"Features must be of length {len(wires)} or less; got length {n_features}." ) shape = qml.math.shape(weights) repeat = shape[0] if len(wires) == 1: if shape != (repeat, 1): raise ValueError(f"Weights tensor must be of shape {(repeat, 1)}; got {shape}") elif len(wires) == 2: if shape != (repeat, 3): raise ValueError(f"Weights tensor must be of shape {(repeat, 3)}; got {shape}") else: if shape != (repeat, 2 * len(wires)): raise ValueError( f"Weights tensor must be of shape {(repeat, 2*len(wires))}; got {shape}" ) else: expected_shape = (len(wires),) check_shape( features, expected_shape, bound="max", msg="Features must be of shape {} or smaller; got {}" "".format((len(wires),), get_shape(features)), ) repeat = check_number_of_layers([weights]) if len(wires) == 1: expected_shape = (repeat, 1) check_shape( weights, expected_shape, msg="Weights tensor must be of shape {}; got {}" "".format(expected_shape, get_shape(features)), ) elif len(wires) == 2: expected_shape = (repeat, 3) check_shape( weights, expected_shape, msg="Weights tensor must be of shape {}; got {}" "".format(expected_shape, get_shape(features)), ) else: expected_shape = (repeat, 2 * len(wires)) check_shape( weights, expected_shape, msg="Weights tensor must be of shape {}; got {}" "".format(expected_shape, get_shape(features)), ) return repeat
def SimplifiedTwoDesign(initial_layer_weights, weights, wires): r""" Layers consisting of a simplified 2-design architecture of Pauli-Y rotations and controlled-Z entanglers proposed in `Cerezo et al. (2020) <https://arxiv.org/abs/2001.00550>`_. A 2-design is an ensemble of unitaries whose statistical properties are the same as sampling random unitaries with respect to the Haar measure up to the first 2 moments. The template is not a strict 2-design, since it does not consist of universal 2-qubit gates as building blocks, but has been shown in `Cerezo et al. (2020) <https://arxiv.org/abs/2001.00550>`_ to exhibit important properties to study "barren plateaus" in quantum optimization landscapes. The template starts with an initial layer of single qubit Pauli-Y rotations, before the main :math:`L` layers are applied. The basic building block of the main layers are controlled-Z entanglers followed by a pair of Pauli-Y rotation gates (one for each wire). Each layer consists of an "even" part whose entanglers start with the first qubit, and an "odd" part that starts with the second qubit. This is an example of two layers, including the initial layer: .. figure:: ../../_static/templates/layers/simplified_two_design.png :align: center :width: 40% :target: javascript:void(0); | The argument ``initial_layer_weights`` contains the rotation angles of the initial layer of Pauli-Y rotations, while ``weights`` contains the pairs of Pauli-Y rotation angles of the respective layers. Each layer takes :math:`\lfloor M/2 \rfloor + \lfloor (M-1)/2 \rfloor = M-1` pairs of angles, where :math:`M` is the number of wires. The number of layers :math:`L` is derived from the first dimension of ``weights``. Args: initial_layer_weights (array[float]): array of weights for the initial rotation block, shape ``(M,)`` weights (array[float]): array of rotation angles for the layers, shape ``(L, M-1, 2)`` wires (Sequence[int] or int): qubit indices that the template acts on Raises: ValueError: if inputs do not have the correct format .. UsageDetails:: template - here shown for two layers - is used inside a :class:`~.QNode`: .. code-block:: python import pennylane as qml from pennylane.templates import SimplifiedTwoDesign from math import pi n_wires = 3 dev = qml.device('default.qubit', wires=n_wires) @qml.qnode(dev) def circuit(init_weights, weights): SimplifiedTwoDesign(initial_layer_weights=init_weights, weights=weights, wires=range(n_wires)) return [qml.expval(qml.PauliZ(wires=i)) for i in range(n_wires)] init_weights = [pi, pi, pi] weights_layer1 = [[0., pi], [0., pi]] weights_layer2 = [[pi, 0.], [pi, 0.]] weights = [weights_layer1, weights_layer2] >>> circuit(init_weights, weights) [1., -1., 1.] **Parameter initialization function** The :mod:`~pennylane.init` module contains four parameter initialization functions: * ``simplified_two_design_initial_layer_normal`` * ``simplified_two_design_initial_layer_uniform`` * ``simplified_two_design_weights_normal``. * ``simplified_two_design_weights_uniform``. They can be used as follows: .. code-block:: python from pennylane.init import (simplified_two_design_initial_layer_normal, simplified_two_design_weights_normal) n_layers = 4 init_weights = simplified_two_design_initial_layer_normal(n_wires) weights = simplified_two_design_weights_normal(n_layers, n_wires) >>> circuit(initial_layer_weights, weights) """ ############# # Input checks wires = check_wires(wires) repeat = check_number_of_layers([weights]) check_type( initial_layer_weights, [list, np.ndarray], msg="'initial_layer_weights' must be of type list or np.ndarray; got type {}".format( type(initial_layer_weights) ), ) check_type( weights, [list, np.ndarray], msg="'weights' must be of type list or np.ndarray; got type {}".format(type(weights)), ) expected_shape_initial = (len(wires),) check_shape( initial_layer_weights, expected_shape_initial, msg="'initial_layer_weights' must be of shape {}; got {}" "".format(expected_shape_initial, get_shape(initial_layer_weights)), ) if len(wires) in [0, 1]: expected_shape_weights = (0,) else: expected_shape_weights = (repeat, len(wires) - 1, 2) check_shape( weights, expected_shape_weights, msg="'weights' must be of shape {}; got {}" "".format(expected_shape_weights, get_shape(weights)), ) ############### # initial rotations broadcast(unitary=RY, pattern="single", wires=wires, parameters=initial_layer_weights) # alternate layers for layer in range(repeat): # even layer weights_even = weights[layer][: len(wires) // 2] broadcast(unitary=entangler, pattern="double", wires=wires, parameters=weights_even) # odd layer weights_odd = weights[layer][len(wires) // 2 :] broadcast(unitary=entangler, pattern="double_odd", wires=wires, parameters=weights_odd)