Example #1
0
    def test_marginal_probability(self, tol):
        """Test that a coherent state marginal probability is correct"""
        cutoff = 10
        dev = qml.device("strawberryfields.fock", wires=2, cutoff_dim=cutoff)

        @qml.qnode(dev)
        def circuit(a, phi):
            qml.Displacement(a, phi, wires=1)
            return qml.probs(wires=1)

        a = 0.4
        phi = -0.12

        alpha = a * np.exp(1j * phi)
        n = np.arange(cutoff)
        ref_probs = np.abs(
            np.exp(-0.5 * np.abs(alpha)**2) * alpha**n / np.sqrt(fac(n)))**2

        res = circuit(a, phi)
        assert np.allclose(res, ref_probs, atol=tol, rtol=0)
    def test_multi_mode_probability(self, tol):
        """Test that a product of coherent states returns the correct probability"""
        dev = qml.device("strawberryfields.gaussian", wires=2)

        @qml.qnode(dev)
        def circuit(a, phi):
            qml.Displacement(a, phi, wires=0)
            qml.Displacement(a, phi, wires=1)
            return qml.probs(wires=[0, 1])

        a = 0.4
        phi = -0.12
        cutoff = 10

        alpha = a * np.exp(1j * phi)
        n = np.arange(cutoff)
        ref_probs = np.abs(np.exp(-0.5 * np.abs(alpha) ** 2) * alpha**n / np.sqrt(fac(n))) ** 2
        ref_probs = np.kron(ref_probs, ref_probs)

        res = circuit(a, phi)
        assert np.allclose(res, ref_probs, atol=tol, rtol=0)
Example #3
0
def test__tape_qchem(tol):
    """The circit Ansatz with a QChem Hamiltonian produces correct results"""

    H, qubits = qml.qchem.molecular_hamiltonian(
        ["H", "H"], np.array([0.0, 0.1, 0.0, 0.0, -0.1, 0.0]))

    def circuit(params):
        circuit_ansatz(params, wires=range(4))
        return qml.expval(H)

    params = np.arange(30) * 0.111

    dev_lq = qml.device("lightning.qubit", wires=4)
    dev_dq = qml.device("default.qubit", wires=4)

    circuit_lq = qml.QNode(circuit, dev_lq, diff_method="adjoint")
    circuit_dq = qml.QNode(circuit, dev_lq, diff_method="parameter-shift")

    assert np.allclose(
        qml.grad(circuit_lq)(params),
        qml.grad(circuit_dq)(params), tol)
Example #4
0
    def test_finite_diff_coherent(self, tol):
        """Test that the jacobian of the probability for a coherent states is
        approximated well with finite differences"""
        cutoff = 10

        dev = qml.device("strawberryfields.fock", wires=1, cutoff_dim=cutoff)

        @qml.qnode(dev)
        def circuit(a, phi):
            qml.Displacement(a, phi, wires=0)
            return qml.probs(wires=[0])

        a = 0.4
        phi = -0.12

        n = np.arange(cutoff)

        # construct tape
        circuit.construct([a, phi], {})

        # differentiate with respect to parameter a
        circuit.qtape.trainable_params = {0}

        tapes, fn = qml.gradients.finite_diff(circuit.qtape)
        res_F = fn(dev.batch_execute(tapes)).flatten()
        expected_gradient = 2 * np.exp(-(a**2)) * a**(2 * n -
                                                      1) * (n - a**2) / fac(n)
        assert np.allclose(res_F, expected_gradient, atol=tol, rtol=0)

        # re-construct tape to reset trainable_params
        circuit.construct([a, phi], {})

        # differentiate with respect to parameter phi
        circuit.qtape.trainable_params = {1}
        tapes, fn = qml.gradients.finite_diff(circuit.qtape)
        res_F = fn(dev.batch_execute(tapes)).flatten()
        expected_gradient = 0
        assert np.allclose(res_F, expected_gradient, atol=tol, rtol=0)
Example #5
0
def get_cells(vert, horz, iterations=None):
    """Given boolean boundary matrices, obtain cells via spreading iterations
    Args:
        vert (ndarray<bool>, shape=(m,n-1)): Vertical boundaries
        horz (ndarray<bool>, shape=(m-1,n)): Horizontal boundaries
        iterations=None (int): Number of spreading iterations. If None, defaults to max(m, n)
    
    Returns:
        mat (ndarray<int>, shape=(m,n)): Cells, given as labeling of matrix elements. The labels are contiguous.
    """
    num_rows = vert.shape[0]  # This is m in the docstring.
    num_cols = horz.shape[1]  # This is n in the docstring.
    if iterations is None:
        iterations = max(num_rows, num_cols)
    mat = np.arange(num_rows * num_cols, dtype=int).reshape(
        (num_rows, num_cols))
    for _ in range(iterations):
        for i in range(num_rows):
            for j in range(num_cols):
                nghbhood = [(i, j)]
                if j > 0 and not vert[i, j - 1]:
                    nghbhood.append((i, j - 1))
                if j < num_cols - 1 and not vert[i, j]:
                    nghbhood.append((i, j + 1))
                if i > 0 and not horz[i - 1, j]:
                    nghbhood.append((i - 1, j))
                if i < num_rows - 1 and not horz[i, j]:
                    nghbhood.append((i + 1, j))
                nghb_min = np.min([mat[_i, _j] for _i, _j in nghbhood])
                for _i, _j in nghbhood:
                    mat[_i, _j] = nghb_min

    _map = {val: count for count, val in enumerate(np.unique(mat))}
    for i in range(num_rows):
        for j in range(num_cols):
            mat[i, j] = _map[mat[i, j]]

    return mat
    def test_device_wire_expansion(self, tol):
        """Test that the transformation works correctly
        for the case where the transformation applies to more wires
        than the observable."""

        # create a 3-mode symmetric transformation
        wires = qml.wires.Wires([0, "a", 2])
        ndim = 1 + 2 * len(wires)

        Z = np.arange(ndim**2).reshape(ndim, ndim)
        Z = Z.T + Z

        obs = qml.NumberOperator(0)
        res = _transform_observable(obs, Z, device_wires=wires)

        # The Heisenberg representation of the number operator
        # is (X^2 + P^2) / (2*hbar) - 1/2. We use the ordering
        # I, X0, Xa, X2, P0, Pa, P2.
        A = np.diag([-0.5, 0.25, 0.25, 0, 0, 0, 0])
        expected = A @ Z + Z @ A

        assert isinstance(res, qml.PolyXP)
        assert res.wires == wires
        assert np.allclose(res.data[0], expected, atol=tol, rtol=0)
Example #7
0
X_grid = [np.array([x, y]) for x, y in zip(xx.flatten(), yy.flatten())]

# preprocess grid points like data inputs above
padding = 0.3 * np.ones((len(X_grid), 1))
X_grid = np.c_[np.c_[X_grid, padding], np.zeros((len(X_grid), 1))]  # pad each input
normalization = np.sqrt(np.sum(X_grid ** 2, -1))
X_grid = (X_grid.T / normalization).T  # normalize each input
features_grid = np.array(
    [get_angles(x) for x in X_grid]
)  # angles for state preparation are new features
predictions_grid = [variational_classifier(var, f) for f in features_grid]
Z = np.reshape(predictions_grid, xx.shape)

# plot decision regions
cnt = plt.contourf(
    xx, yy, Z, levels=np.arange(-1, 1.1, 0.1), cmap=cm, alpha=0.8, extend="both"
)
plt.contour(
    xx, yy, Z, levels=[0.0], colors=("black",), linestyles=("--",), linewidths=(0.8,)
)
plt.colorbar(cnt, ticks=[-1, 0, 1])

# plot data
plt.scatter(
    X_train[:, 0][Y_train == 1],
    X_train[:, 1][Y_train == 1],
    c="b",
    marker="o",
    edgecolors="k",
    label="class 1 train",
)
Example #8
0
        train_acc.append(acc_train)
        test_acc.append(acc_test)

    return costs, train_acc, test_acc


# We now run our training algorithm and plot the results. Note that
# for plotting, the matplotlib library is required

features, Y = load_and_process_data()
costs, train_acc, test_acc = training(features, Y)

import matplotlib.pyplot as plt

fig, ax1 = plt.subplots()
iters = np.arange(0, total_iterations, 1)
colors = ["tab:red", "tab:blue"]
ax1.set_xlabel("Iteration", fontsize=17)
ax1.set_ylabel("Cost", fontsize=17, color=colors[0])
ax1.plot(iters, costs, color=colors[0], linewidth=4)
ax1.tick_params(axis="y", labelsize=14, labelcolor=colors[0])

ax2 = ax1.twinx()
ax2.set_ylabel("Test Acc.", fontsize=17, color=colors[1])
ax2.plot(iters, test_acc, color=colors[1], linewidth=4)

ax2.tick_params(axis="x", labelsize=14)
ax2.tick_params(axis="y", labelsize=14, labelcolor=colors[1])

plt.grid(False)
plt.tight_layout()
Example #9
0
##############################################################################
# During doubly stochastic gradient descent, we are sampling from terms of the
# analytic cost function, so it is not entirely instructive to plot the cost
# versus optimization step---partial sums of the terms in the Hamiltonian
# may have minimum energy below the ground state energy of the total Hamiltonian.
# Nevertheless, we can keep track of the cost value moving average during doubly
# stochastic gradient descent as an indicator of convergence.


def moving_average(data, n=3):
    ret = np.cumsum(data, dtype=np.float64)
    ret[n:] = ret[n:] - ret[:-n]
    return ret[n - 1:] / n


average = np.vstack([np.arange(25, 200), moving_average(cost, n=50)[:-26]])

plt.plot(cost_GD, label="Vanilla gradient descent")
plt.plot(cost, ".", label="Doubly QSGD")
plt.plot(average[0], average[1], "--", label="Doubly QSGD (moving average)")
plt.hlines(min_energy, 0, 200, linestyles=":", label="Ground state energy")

plt.ylabel("Cost function value")
plt.xlabel("Optimization steps")
plt.xlim(-2, 200)
plt.legend()
plt.show()

##############################################################################
# Finally, verifying that the doubly stochastic gradient descent optimization
# correctly provides the ground state energy when evaluated for a larger
Example #10
0
 def test_fft_subpackage(self):
     """Test that the fft subpackage is correctly wrapped"""
     x = np.fft.fft(np.arange(8))
     assert isinstance(x, np.tensor)
Example #11
0
# This comparison is fair since the number of circuit
# evaluations involved in a cycle of Rotosolve is similar to those required to calculate
# the gradient of the circuit and step in this direction. Evidently, the Rotosolve algorithm
# converges on the minimum after the first cycle for this simple circuit.

params_gd = init_params.copy()
opt = qml.GradientDescentOptimizer(stepsize=0.5)
costs_gd = []
for i in range(n_steps):
    costs_gd.append(cost(params_gd))
    params_gd = opt.step(cost, params_gd)

# plot cost function optimization using the 2 techniques
import matplotlib.pyplot as plt

steps = np.arange(0, n_steps)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(7, 3))
plt.subplot(1, 2, 1)
plt.plot(steps, costs_gd, "o-")
plt.title("grad. desc.")
plt.xlabel("steps")
plt.ylabel("cost")
plt.subplot(1, 2, 2)
plt.plot(steps, costs_rotosolve, "o-")
plt.title("rotosolve")
plt.xlabel("cycles")
plt.ylabel("cost")
plt.tight_layout()
plt.show()

##############################################################################
Example #12
0
def visualize_trained(var, X_train, X_val, Y_train, Y_val):
    plt.figure()
    cm = plt.cm.RdBu

    # make data for decision regions
    xx, yy = np.meshgrid(np.linspace(0.0, 1.5, 20), np.linspace(0.0, 1.5, 20))
    X_grid = [np.array([x, y]) for x, y in zip(xx.flatten(), yy.flatten())]

    # preprocess grid points like data inputs above
    padding = 0.3 * np.ones((len(X_grid), 1))
    X_grid = np.c_[np.c_[X_grid, padding],
                   np.zeros((len(X_grid), 1))]  # pad each input
    normalization = np.sqrt(np.sum(X_grid**2, -1))
    X_grid = (X_grid.T / normalization).T  # normalize each input
    features_grid = np.array(
        [get_angles(x)
         for x in X_grid])  # angles for state preparation are new features
    predictions_grid = [
        variational_classifier(var, angles=f) for f in features_grid
    ]
    Z = np.reshape(predictions_grid, xx.shape)

    # plot decision regions
    cnt = plt.contourf(xx,
                       yy,
                       Z,
                       levels=np.arange(-1, 1.1, 0.1),
                       cmap=cm,
                       alpha=.8,
                       extend='both')
    plt.contour(xx,
                yy,
                Z,
                levels=[0.0],
                colors=('black', ),
                linestyles=('--', ),
                linewidths=(0.8, ))
    plt.colorbar(cnt, ticks=[-1, 0, 1])

    # plot data
    plt.scatter(X_train[:, 0][Y_train == 1],
                X_train[:, 1][Y_train == 1],
                c='b',
                marker='o',
                edgecolors='k',
                label="class 1 train")
    plt.scatter(X_val[:, 0][Y_val == 1],
                X_val[:, 1][Y_val == 1],
                c='b',
                marker='^',
                edgecolors='k',
                label="class 1 validation")
    plt.scatter(X_train[:, 0][Y_train == -1],
                X_train[:, 1][Y_train == -1],
                c='r',
                marker='o',
                edgecolors='k',
                label="class -1 train")
    plt.scatter(X_val[:, 0][Y_val == -1],
                X_val[:, 1][Y_val == -1],
                c='r',
                marker='^',
                edgecolors='k',
                label="class -1 validation")

    plt.legend()
    plt.show()
Example #13
0
for _ in range(steps):
    thetart = opt.step(circuit, thetart)
    Rotosolve_Cost.append(circuit(thetart))

#%% ADAM OPTIMIZER
Adam_Cost = [circuit(init_params)]

opt = qml.AdamOptimizer(0.01)

thetaadam = init_params
for _ in range(steps):
    thetaadam = opt.step(circuit, thetaadam)
    Adam_Cost.append(circuit(thetaadam))

#%% Plotting
plt.plot(np.arange(1, 402) * 2,
         GradientDescentCost,
         label='Standard gradient descent')
plt.plot(np.arange(1, 201) * 4,
         Quantum_natural_GD_Cost[:200],
         label='Quantum Natural Gradient')
plt.plot(np.arange(1, 101) * 7,
         Rotosolve_Cost[:100],
         label='Rotosolve analytic minimum')
plt.plot(np.arange(1, 402) * 2, Adam_Cost, label="Adam optimiser")
plt.legend()
plt.xlabel("Number of circuit evaluations")
plt.ylabel("Cost function")
plt.title("Optimiser performance considering circuit evaluation requirements")
plt.savefig("Images/descentcomparefair.png")
Example #14
0
    # Return the accuracy and an adversarial example
    return final_acc, adv_examples

accuracies = []
examples = []

# Run test for each epsilon
for eps in epsilons:
    acc, ex = test(model, device, test_loader, eps)
    accuracies.append(acc)
    examples.append(ex)

plt.figure(figsize=(5,5))
plt.plot(epsilons, accuracies, "*-")
plt.yticks(np.arange(0, 1.1, step=0.1))
plt.xticks(np.arange(0, 1.0, step=0.1))
plt.title("Accuracy vs Epsilon")
plt.xlabel("Epsilon")
plt.ylabel("Accuracy")
plt.show()

cnt = 0
plt.figure(figsize=(8,10))
for i in range(len(epsilons)):
    for j in range(len(examples[i])):
        cnt += 1
        plt.subplot(len(epsilons),len(examples[0]),cnt)
        plt.xticks([], [])
        plt.yticks([], [])
        if j == 0:
Example #15
0
##############################################################################
# In the case where we set ``n_layers=2``, we recover the optimal
# objective function :math:`C=4`

##############################################################################
# Plotting the results
# --------------------
# We can plot the distribution of measurements obtained from the optimized circuits. As
# expected for this graph, the partitions 0101 and 1010 are measured with the highest frequencies,
# and in the case where we set ``n_layers=2`` we obtain one of the optimal partitions with 100% certainty.

import matplotlib.pyplot as plt

xticks = range(0, 16)
xtick_labels = list(map(lambda x: format(x, "04b"), xticks))
bins = np.arange(0, 17) - 0.5

fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4))
plt.subplot(1, 2, 1)
plt.title("n_layers=1")
plt.xlabel("bitstrings")
plt.ylabel("freq.")
plt.xticks(xticks, xtick_labels, rotation="vertical")
plt.hist(bitstrings1, bins=bins)
plt.subplot(1, 2, 2)
plt.title("n_layers=2")
plt.xlabel("bitstrings")
plt.ylabel("freq.")
plt.xticks(xticks, xtick_labels, rotation="vertical")
plt.hist(bitstrings2, bins=bins)
plt.tight_layout()
Example #16
0
    def test_caching_param_shift_hessian(self, num_params, tol):
        """Test that, when using parameter-shift transform,
        caching reduces the number of evaluations to their optimum
        when computing Hessians."""
        dev = qml.device("default.qubit", wires=2)
        params = np.arange(1, num_params + 1) / 10

        N = len(params)

        def cost(x, cache):
            with qml.tape.JacobianTape() as tape:
                qml.RX(x[0], wires=[0])
                qml.RY(x[1], wires=[1])

                for i in range(2, num_params):
                    qml.RZ(x[i], wires=[i % 2])

                qml.CNOT(wires=[0, 1])
                qml.var(qml.PauliZ(0) @ qml.PauliX(1))

            return execute([tape],
                           dev,
                           gradient_fn=param_shift,
                           cache=cache,
                           max_diff=2)[0]

        # No caching: number of executions is not ideal
        hess1 = qml.jacobian(qml.grad(cost))(params, cache=False)

        if num_params == 2:
            # compare to theoretical result
            x, y, *_ = params
            expected = np.array([
                [
                    2 * np.cos(2 * x) * np.sin(y)**2,
                    np.sin(2 * x) * np.sin(2 * y)
                ],
                [
                    np.sin(2 * x) * np.sin(2 * y),
                    -2 * np.cos(x)**2 * np.cos(2 * y)
                ],
            ])
            assert np.allclose(expected, hess1, atol=tol, rtol=0)

        expected_runs = 1  # forward pass
        expected_runs += 2 * N  # Jacobian
        expected_runs += 4 * N + 1  # Hessian diagonal
        expected_runs += 4 * N**2  # Hessian off-diagonal
        assert dev.num_executions == expected_runs

        # Use caching: number of executions is ideal
        dev._num_executions = 0
        hess2 = qml.jacobian(qml.grad(cost))(params, cache=True)
        assert np.allclose(hess1, hess2, atol=tol, rtol=0)

        expected_runs_ideal = 1  # forward pass
        expected_runs_ideal += 2 * N  # Jacobian
        expected_runs_ideal += 2 * N + 1  # Hessian diagonal
        expected_runs_ideal += 4 * N * (N - 1) // 2  # Hessian off-diagonal
        assert dev.num_executions == expected_runs_ideal
        assert expected_runs_ideal < expected_runs
Example #17
0
        
    relative_p = base_p * ( np.abs(angle) / (2*np.pi) )
    noise_ops = [Depolarize(relative_p, wires=wire) for wire in wires]
    return noise_ops


# # Noise mitigation computations
#
# ## Compute noisy kernel matrix

# +
# Parallelization of the previous cell
rigetti_ansatz_mapped = lambda x, params: rigetti_ansatz(x, params, range(num_wires))

shot_numbers = [10, 30, 100, 300, 1000, 3000, 0]
noise_probabilities = np.arange(0.0, 0.1, 0.002)
    
start = time.time()

def run(shots):
    sub_start = time.process_time()
    sub_kernel_matrices = {}
    for noise_p in noise_probabilities:
        analytic_device = (shots==0)
        shots_device = None if shots==0 else shots # shots=0 raises an error...

        dev = qml.device("cirq.mixedsimulator", wires=num_wires, shots=shots_device, analytic=analytic_device)
        k = khf.noisy_kernel(
            rigetti_ansatz_mapped,
            dev,
            noise_channel=noise_channel,
##############################################################################
# We now have everything at hand to model the quantum part of our experiment
# as a QNode. We will return the output probabilities necessary to compute the
# Classical Fisher Information Matrix.
@qml.qnode(dev)
def experiment(weights, phi, gamma=0.0):
    ansatz(weights[:NUM_ANSATZ_PARAMETERS])
    encoding(phi, gamma)
    measurement(weights[NUM_ANSATZ_PARAMETERS:])

    return qml.probs(wires=[0, 1, 2])


# Make a dry run to be able to draw
experiment(
    np.arange(NUM_ANSATZ_PARAMETERS + NUM_MEASUREMENT_PARAMETERS),
    np.zeros(3),
    gamma=0.2,
)
print(experiment.draw())


##############################################################################
# Evaluating the cost function
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Now, let's turn to the cost function itself. The most important ingredient
# is the Classical Fisher Information Matrix, which we compute using a separate
# function that uses the explicit `parameter-shift rule <https://pennylane.ai/qml/glossary/parameter_shift.html>`_
# to enable differentiation.
def CFIM(weights, phi, gamma):
Example #19
0
Y_neg = []
Y_pos = []
for i in range(len(Y)):
    if Y[i] < 0:
        X_neg.append(X[i])
        Y_neg.append(Y[i])
    else:
        X_pos.append(X[i])
        Y_pos.append(Y[i])
X_neg = np.asarray(X_neg)
X_pos = np.asarray(X_pos)
Y_neg = np.asarray(Y_neg)
Y_pos = np.asarray(Y_pos)

# shuffle our data, positive and negative samples seperately
randomize_neg = np.arange(len(X_neg))
np.random.shuffle(randomize_neg)
X_neg = X_neg[randomize_neg]
Y_neg = Y_neg[randomize_neg]
randomize_pos = np.arange(len(X_pos))
np.random.shuffle(randomize_pos)
X_pos = X_pos[randomize_pos]
Y_pos = Y_pos[randomize_pos]

# first the stitching and reshufling of the train data
X_train = np.vstack((X_neg[0:cut_off], X_pos[0:cut_off]))
Y_train = np.hstack((Y_neg[0:cut_off], Y_pos[0:cut_off]))
randomize_all = np.arange(len(X_train))
np.random.shuffle(randomize_all)
X_train = X_train[randomize_all]
Y_train = Y_train[randomize_all]
X_grid = np.c_[np.c_[X_grid, padding],
               np.zeros((len(X_grid), 1))]  # pad each input
normalization = np.sqrt(np.sum(X_grid**2, -1))
X_grid = (X_grid.T / normalization).T  # normalize each input
features_grid = np.array([get_angles(x) for x in X_grid
                          ])  # angles for state preparation are new features
predictions_grid = [
    variational_classifier(var, angles=f) for f in features_grid
]
Z = np.reshape(predictions_grid, xx.shape)

# plot decision regions
cnt = plt.contourf(xx,
                   yy,
                   Z,
                   levels=np.arange(-1, 1.1, 0.1),
                   cmap=cm,
                   alpha=0.8,
                   extend="both")
plt.contour(xx,
            yy,
            Z,
            levels=[0.0],
            colors=("black", ),
            linestyles=("--", ),
            linewidths=(0.8, ))
plt.colorbar(cnt, ticks=[-1, 0, 1])

# plot data
plt.scatter(
    X_train[:, 0][Y_train == 1],
Example #21
0
# select number of epochs
n_epochs = 5

# draw random quantum node weights
theta = strong_ent_layers_uniform(n_layers, n_qubits, seed=15)

# train the variational classifier

# start of main learning loop
# build the optimizer object
pennylane_opt = NesterovMomentumOptimizer()

log = []
# split training data into batches
X_batches = np.array_split(np.arange(len(X_train)), batches)
for it, batch_index in enumerate(chain(*(n_epochs * [X_batches]))):
    # Update the weights by one optimizer step
    batch_cost = \
        lambda t: cost(t, X_train[batch_index], e_train[batch_index])
    theta = pennylane_opt.step(batch_cost, theta)
    log.append({"theta": theta})
# end of learning loop

# convert scores to classes
scores = np.array([circuit(theta, x=x) for x in X_test])
y_pred = sgn(scores)

print(metrics.accuracy_score(y_test, y_pred))
print(metrics.confusion_matrix(y_test, y_pred))
Example #22
0
def circuit(params, n=None):
    StronglyEntanglingLayers(weights=params, wires=[0, 1])
    idx = np.random.choice(np.arange(5), size=n, replace=False)
    A = np.sum(terms[idx], axis=0)
    return expval(qml.Hermitian(A, wires=[0, 1]))
def run_tree_architecture_search(config: dict, dev_type: str):
    """The main workhorse for running the algorithm

    Args:
      config: Dictionary with configuration parameters for the algorithm. Possible keys are:
    - nqubits: Integer. The number of qubits in the circuit
    - min_tree_depth: Integer. Minimum circuit depth before we start pruning
    - max_tree_depth: Integer. Maximum circuit depth
    - prune_rate: Integer. Percentage of nodes that we throw away when we prune
    - prune_step: Integer. How often do we prune
    - plot_trees: Boolean. Do we want to plot the tree at every depth?
    - data_set: String. Which dataset are we learning? Can be 'moons' or 'circles'
    - nsteps: Integer. The number of steps for training.
    - opt: qml.Optimizer. Pennylane optimizer
    - batch_size: Integer. Batch size for training.
    - n_samples: Integer. Number of samples that we want to take from the data set.
    - learning_rate: Float. Optimizer learning rate.
    - save_frequency: Integer. How often do we want to save the tree? Set to 0 for no saving.
    - save_path: String. Location to store the data.

    Returns:

    """
    # build in:  circuit type
    # if circuit_type=='schuld' use controlled rotation gates and cycle layout for entangling layers
    # if circuit_type=='hardware' use minimal gate set and path layout for entangling layers
    # Parse configuration parameters.
    NQUBITS = config['nqubits']
    NSAMPLES = config['n_samples']
    PATH = config['save_path']
    if dev_type == "local":
        dev = qml.device("default.qubit.autograd", wires=NQUBITS)
    elif dev_type == "remote":
        my_bucket = "amazon-braket-0fc49b964f85"  # the name of the bucket
        my_prefix = PATH.split('/')[1]  # name of the folder in the bucket is the same as experiment name
        s3_folder = (my_bucket, my_prefix)
        device_arn = "arn:aws:braket:::device/quantum-simulator/amazon/sv1"
        dev = qml.device("braket.aws.qubit", device_arn=device_arn, wires=NQUBITS, s3_destination_folder=s3_folder,
                         parallel=True, max_parallel=10, poll_timeout_seconds=30)

    MIN_TREE_DEPTH = config['min_tree_depth']
    MAX_TREE_DEPTH = config['max_tree_depth']
    SAVE_FREQUENCY = config['save_frequency']

    PRUNE_DEPTH_STEP = config['prune_step']  # EVERY ith step is a prune step
    PRUNE_RATE = config['prune_rate']  # Percentage of nodes to throw away at each layer
    PLOT_INTERMEDIATE_TREES = config['plot_trees']

    assert MIN_TREE_DEPTH < MAX_TREE_DEPTH, 'MIN_TREE_DEPTH must be smaller than MAX_TREE_DEPTH'
    assert 0.0 < PRUNE_RATE < 1.0, f'The PRUNE_RATE must be between 0 and 1, found {PRUNE_RATE}'

    if config['data_set'] == 'circles':
        X_train, y_train = datasets.make_circles(n_samples=NSAMPLES, factor=.5, noise=.05)
    elif config['data_set'] == 'moons':
        X_train, y_train = datasets.make_moons(n_samples=NSAMPLES, noise=.05)
    # rescale data to -1 1
    X_train = np.multiply(1.0, np.subtract(np.multiply(np.divide(np.subtract(X_train, X_train.min()),
                                                                 (X_train.max() - X_train.min())), 2.0), 1.0))
    if config['readout_layer'] == 'one_hot':
        # one hot encode labels
        y_train_ohe = np.zeros((y_train.size, y_train.max() + 1))
        y_train_ohe[np.arange(y_train.size), y_train] = 1
    elif config['readout_layer'] == 'weighted_neuron':
        y_train_ohe = y_train
    # automatically determine the number of classes
    NCLASSES = len(np.unique(y_train))
    assert NQUBITS >= NCLASSES, 'The number of qubits must be equal or larger than the number of classes'
    save_timing = config.get('save_timing', False)
    if save_timing:
        print('saving timing info')
        import time
    # Create a directed graph.
    G = nx.DiGraph()
    # Add the root
    G.add_node("ROOT")
    G.nodes['ROOT']["W"] = 0.0
    # nx.set_node_attributes(G, {'ROOT': 0.0}, 'W')
    # Define allowed layers
    ct_ = config.get('circuit_type', None)
    if ct_ == 'schuld':
        possible_layers = ['ZZ', 'X', 'Y', 'Z']
        config['parameterized_gates'] = ['ZZ', 'X', 'Y', 'Z']
    if ct_ == 'hardware':
        possible_layers = ['hw_CNOT', 'X', 'Y', 'Z']
        config['parameterized_gates'] = ['X', 'Y', 'Z']
    possible_embeddings = [config['embedding'], ]
    assert all([l in string_to_layer_mapping.keys() for l in
                possible_layers]), 'No valid mapping from string to function found'
    assert all([l in string_to_embedding_mapping.keys() for l in
                possible_embeddings]), 'No valid mapping from string to function found'
    leaves_at_depth_d = dict(zip(range(MAX_TREE_DEPTH), [[] for _ in range(MAX_TREE_DEPTH)]))
    leaves_at_depth_d[0].append('ROOT')
    # Iteratively construct tree, pruning at set rate

    ### PICKLE ALL STUFF FIRST
    pickled_data_for_MPI = [NQUBITS, NCLASSES, dev, config, X_train, y_train_ohe]
    with open(config['save_path'] + '/MPI_data.pickle', 'wb') as pdata:
        pickle.dump(pickled_data_for_MPI, pdata)

    for d in range(1, MAX_TREE_DEPTH):
        print(f"Depth = {d}")
        # Save trees
        if (SAVE_FREQUENCY > 0) & ~(d % SAVE_FREQUENCY):
            nx.write_gpickle(G, config['save_path'] + f'/tree_depth_{d}.pickle')
        # Plot trees
        if PLOT_INTERMEDIATE_TREES:
            plot_tree(G)
        # If we are not passed MIN_TREE_DEPTH, don't prune
        if d < MIN_TREE_DEPTH:
            # First depth connects to root
            if d == 1:
                tree_grow_root(G, leaves_at_depth_d, possible_embeddings)
                # At the embedding level we don't need to train because there are no params.
                for v in leaves_at_depth_d[d]:
                    G.nodes[v]['W'] = 1.0
                print('current graph: ', list(G.nodes(data=True)))
                # nx.set_node_attributes(G, {v: 1.0}, 'W')
            else:
                tree_grow(G, leaves_at_depth_d, d, possible_layers)
                best_arch = max(nx.get_node_attributes(G, 'W').items(), key=operator.itemgetter(1))[0]
                print('Current best architecture: ', best_arch)
                print('max W:', G.nodes[best_arch]['W'])
                # For every leaf, create a circuit and run the optimization.
                train_all_leaves_parallel(G, leaves_at_depth_d, d, config)

        else:
            # Check that we are at the correct prune depth step.
            if not (d - MIN_TREE_DEPTH) % PRUNE_DEPTH_STEP:
                print('Prune Tree')
                best_arch = max(nx.get_node_attributes(G, 'W').items(), key=operator.itemgetter(1))[0]
                print('Current best architecture: ', best_arch)
                print('max W:', G.nodes[best_arch]['W'])
                # print(nx.get_node_attributes(G,'W'))
                tree_prune(G, leaves_at_depth_d, d, PRUNE_RATE)
                print('Grow Pruned Tree')
                tree_grow(G, leaves_at_depth_d, d, possible_layers)
                # For every leaf, create a circuit and run the optimization.
                train_all_leaves_parallel(G, leaves_at_depth_d, d, config)
            else:
                print('Grow Tree')
                best_arch = max(nx.get_node_attributes(G, 'W').items(), key=operator.itemgetter(1))[0]
                print('Current best architecture: ', best_arch)
                print('max W:', G.nodes[best_arch]['W'])
                tree_grow(G, leaves_at_depth_d, d, possible_layers)
                train_all_leaves_parallel(G, leaves_at_depth_d, d, config)

    best_arch = max(nx.get_node_attributes(G, 'W').items(), key=operator.itemgetter(1))[0]
    print('architecture with max W: ', best_arch)
    print('max W:', G.nodes[best_arch]['W'])
    print('weights: ', G.nodes[best_arch]['weights'])
    import pandas as pd
    pd.DataFrame.from_dict(nx.get_node_attributes(G, 'W'), orient='index').to_csv('tree_weights.csv')
# Comparison
# ^^^^^^^^^^
#
# Let us print the classical result.
print("x_n^2 =\n", c_probs)

##############################################################################
# The previous probabilities should match the following quantum state probabilities.
print("|<x|n>|^2=\n", q_probs)

##############################################################################
# Let us graphically visualize both distributions.

fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(7, 4))

ax1.bar(np.arange(0, 2**n_qubits), c_probs, color="blue")
ax1.set_xlim(-0.5, 2**n_qubits - 0.5)
ax1.set_xlabel("Vector space basis")
ax1.set_title("Classical probabilities")

ax2.bar(np.arange(0, 2**n_qubits), q_probs, color="green")
ax2.set_xlim(-0.5, 2**n_qubits - 0.5)
ax2.set_xlabel("Hilbert space basis")
ax2.set_title("Quantum probabilities")

plt.show()

##############################################################################
# References
# ----------
#
Example #25
0
xx, yy = np.meshgrid(np.linspace(0.0, 1.5, 20), np.linspace(0.0, 1.5, 20))
X_grid = [np.array([x, y]) for x, y in zip(xx.flatten(), yy.flatten())]

# preprocess grid points like data inputs above
padding = 0.3 * np.ones((len(X_grid), 1))
X_grid = np.c_[np.c_[X_grid, padding], np.zeros((len(X_grid), 1))]  # pad each input
normalization = np.sqrt(np.sum(X_grid ** 2, -1))
X_grid = (X_grid.T / normalization).T  # normalize each input
features_grid = np.array(
    [get_angles(x) for x in X_grid]
)  # angles for state preparation are new features
predictions_grid = [variational_classifier(var, angles=f) for f in features_grid]
Z = np.reshape(predictions_grid, xx.shape)

# plot decision regions
cnt = plt.contourf(xx, yy, Z, levels=np.arange(-1, 1.1, 0.1), cmap=cm, alpha=0.8, extend="both")
plt.contour(xx, yy, Z, levels=[0.0], colors=("black",), linestyles=("--",), linewidths=(0.8,))
plt.colorbar(cnt, ticks=[-1, 0, 1])

# plot data
plt.scatter(
    X_train[:, 0][Y_train == 1],
    X_train[:, 1][Y_train == 1],
    c="b",
    marker="o",
    edgecolors="k",
    label="class 1 train",
)
plt.scatter(
    X_val[:, 0][Y_val == 1],
    X_val[:, 1][Y_val == 1],