예제 #1
0
def test_kernel(binary_matrix, result):
    r"""Test that _kernel returns the correct result."""

    # get the kernel from the gaussian elimination.
    pivots = (binary_matrix.T != 0).argmax(axis=0)
    nonpivots = np.setdiff1d(range(len(binary_matrix[0])), pivots)

    kernel = []
    for col in nonpivots:
        col_vector = binary_matrix[:, col]
        null_vector = np.zeros((binary_matrix.shape[1]), dtype=int)
        null_vector[col] = 1
        for i in pivots:
            first_entry = np.where(binary_matrix[:, i] == 1)[0][0]
            if col_vector[first_entry] == 1:
                null_vector[i] = 1
        kernel.append(null_vector.tolist())

    # get the nullspace from the _kernel function.
    nullspace = _kernel(binary_matrix)

    for nullvec in kernel:
        assert nullvec in nullspace.tolist()

    assert (nullspace == result).all()
예제 #2
0
def optimal_sector(qubit_op, generators, active_electrons):
    r"""Get the optimal sector which contains the ground state.

    To obtain the optimal sector, we need to choose the right eigenvalues for the symmetry generators :math:`\bm{\tau}`.
    We can do so by using the following relation between the Pauli-Z qubit operator and the occupation number under a
    Jordan-Wigner transform.

    .. math::

        \sigma_{i}^{z} = I - 2a_{i}^{\dagger}a_{i}

    According to this relation, the occupied and unoccupied fermionic modes correspond to the -1 and +1 eigenvalues of
    the Pauli-Z operator, respectively. Since all of the generators :math:`\bm{\tau}` consist only of :math:`I` and
    Pauli-Z operators, the correct eigenvalue for each :math:`\tau` operator can be simply obtained by applying it on
    the reference Hartree-Fock (HF) state, and looking at the overlap between the wires on which the Pauli-Z operators
    act and the wires that correspond to occupied orbitals in the HF state.

    Args:
        qubit_op (Hamiltonian): Hamiltonian for which symmetries are being generated to perform tapering
        generators (list[Hamiltonian]): list of symmetry generators for the Hamiltonian
        active_electrons (int): The number of active electrons in the system for generating the Hartree-Fock bitstring

    Returns:
        list[int]: eigenvalues corresponding to the optimal sector which contains the ground state

    **Example**

    >>> symbols = ['H', 'H']
    >>> geometry = np.array([[0., 0., -0.66140414], [0., 0., 0.66140414]])
    >>> mol = qml.hf.Molecule(symbols, geometry)
    >>> H = qml.hf.generate_hamiltonian(mol)(geometry)
    >>> generators, paulix_ops = qml.hf.generate_symmetries(H, len(H.wires))
    >>> qml.hf.optimal_sector(H, generators, 2)
        [1, -1, -1]
    """

    if active_electrons < 1:
        raise ValueError(
            f"The number of active electrons must be greater than zero;"
            f"got 'electrons'={active_electrons}")

    num_orbitals = len(qubit_op.wires)

    if active_electrons > num_orbitals:
        raise ValueError(
            f"Number of active orbitals cannot be smaller than number of active electrons;"
            f" got 'orbitals'={num_orbitals} < 'electrons'={active_electrons}."
        )

    hf_str = np.where(np.arange(num_orbitals) < active_electrons, 1, 0)

    perm = []
    for tau in generators:
        symmstr = np.array(
            [1 if wire in tau.ops[0].wires else 0 for wire in qubit_op.wires])
        coeff = -1 if numpy.logical_xor.reduce(
            numpy.logical_and(symmstr, hf_str)) else 1
        perm.append(coeff)

    return perm
예제 #3
0
def get_cell_centers(cells):
    """Get average coordinates per cell
    Args:
        cells (ndarray<int>): Cells as computed by `get_cells`
    Returns:
        centers (dict): Map from cell labels to mean coordinates of cells
        
    """
    centers = {}
    for _id in np.unique(cells):
        wheres = np.where(cells == _id)
        centers[_id] = np.array(
            [np.mean(where.astype(float)) for where in wheres])
    return centers
예제 #4
0
    def predict(self, features):
        """Predicts certain obervations.

        Args:
            features (array):observations to be predicted

        Returns:
            preds: float or int
                prediction of the model
        """
        model_output = np.array(
            [self.neural_network(self.var, features=x_) for x_ in features])

        if self.type_problem == "classification":
            return np.where(model_output > 0., 1, 0)
        elif self.type_problem == "multiclassification":
            soft_outputs = np.exp(model_output) / \
                    np.sum(np.exp(model_output), axis=1)[:, None]
            return np.argmax(soft_outputs, axis=1)
        return model_output
예제 #5
0
def get_cell_label_pos(cells):
    """Get proper label position per cell
    Args:
        cells (ndarray<int>): Cells as computed by `get_cells`
    Returns:
        label_pos (dict): Map from cell labels to label coordinates for cells
        
    """
    label_pos = get_cell_centers(cells)
    ids = label_pos.keys()
    for _id in ids:
        center = label_pos[_id]
        x, y = map(int, np.round(center))
        if cells[x, y] != _id:
            where = np.where(cells == _id)
            dists = [(coord, np.linalg.norm(center - coord, 2))
                     for coord in zip(where[0], where[1])]
            label_pos[_id] = min(dists, key=lambda x: x[1])[0]

    return label_pos
예제 #6
0
def _group_operations(tape):
    """Divide all operations of a tape into trainable operations and blocks
    of untrainable operations after each trainable one."""

    # Extract tape operations list
    ops = tape.operations
    # Find the indices of trainable operations in the tape operations list
    trainables = np.where([qml.operation.is_trainable(op) for op in ops])[0]
    # Add the indices incremented by one to the trainable indices
    split_ids = list(chain.from_iterable([idx, idx + 1] for idx in trainables))

    # Split at trainable and incremented indices to get groups after trainable
    # operations and single trainable operations (in alternating order)
    all_groups = np.split(ops, split_ids)

    # Collect trainable operations and groups after trainable operations
    # the first set of non-trainable ops are the ops "after the -1st" trainable op
    group_after_trainable_op = dict(enumerate(all_groups[::2], start=-1))
    trainable_operations = list(chain.from_iterable(all_groups[1::2]))

    return trainable_operations, group_after_trainable_op
예제 #7
0
 def where(condition, x, y):
     return np.where(condition, *AutogradBox.unbox_list([x, y]))
예제 #8
0
for j in range(10):
    ax = plt.subplot(gs[0, j])
    plt.imshow(X_test[j], cmap=plt.get_cmap('gray'))
    ax.axis("off")

    ax = plt.subplot(gs[1, j])
    plt.imshow(X_test[5 + j], cmap=plt.get_cmap('gray'))
    ax.axis("off")
plt.show()
# +
# Create a single set with coordinates for zero images
## perform once to populate the list
## number of pixels to sample from each image
samples_per_image = 3
## threshold greyscale on [0,1]
x_0, x_1 = np.where(train_X0[0] >= 0.95)
## sample $size datapoints from this image
sample_indices = np.random.randint(low=0,
                                   high=len(x_0),
                                   size=samples_per_image)
## add the coordinates to our dataset
zero_samples = [x_0[sample_indices], x_1[sample_indices]]
## fill the remained in the list with this loop
for i in range(1, 500):
    ## threshold
    x_0, x_1 = np.where(train_X0[i] >= 0.95)
    ## sample $size datapoints from this image
    sample_indices = np.random.randint(low=0,
                                       high=len(x_0),
                                       size=samples_per_image)
    ## add the coordinates to out dataset
예제 #9
0
# + tags=[]
num_wires = 5
use_trained_params = False

sub_filename = f'data/noisy_sim/sub_kernel_matrices_Checkerboard_{"" if use_trained_params else "un"}trained.dill'
filename = f'data/noisy_sim/kernel_matrices_Checkerboard_{"" if use_trained_params else "un"}trained.dill'
# -

# # Checkerboard dataset

# + tags=[]
np.random.seed(43)
X_train, y_train, X_test, y_test = checkerboard(30, 30, 4, 4)

print("The training data is as follows:")
plt.scatter(X_train[np.where(y_train == 1)[0],0], X_train[np.where(y_train == 1)[0],1], color="b", marker=".", label="train, 1")
plt.scatter(X_train[np.where(y_train == -1)[0],0], X_train[np.where(y_train == -1)[0],1], color="r", marker=".", label="train, -1")
print("The test data is as follows:")
plt.scatter(X_test[np.where(y_train == 1)[0],0], X_test[np.where(y_train == 1)[0],1], color="b", marker="x", label="test, 1")
plt.scatter(X_test[np.where(y_train == -1)[0],0], X_test[np.where(y_train == -1)[0],1], color="r", marker="x", label="test, -1")
plt.ylim([0, 1])
plt.xlim([0, 1])
plt.legend()

X = X_train

opt_param = np.tensor([[[ 6.22961793,  6.1909463 ,  6.24821366,  1.88800397,  1.6515437 ],
                        [-3.50578116,  2.87429701, -0.55558014, -2.97461847,  4.3646466 ]],
                       [[ 4.59893525, -0.01877453,  4.86909045,  1.61046237,  4.3342154 ],
                        [ 6.54969706,  0.76974914,  6.13216135,  3.19770538,  0.35820405]],
                       [[-0.06825097,  5.46138114, -0.38685812,  2.62531926,  5.94363286],
예제 #10
0
def plot(X, y, log, name="", density=23):
    import matplotlib.pyplot as plt
    from matplotlib.backends.backend_pdf import PdfPages

    # data = [(i, step["batch_cost"]) for i, step in enumerate(log)]
    # data = list(zip(*data))
    # plt.figure(figsize=(11, 11))
    # plt.plot(*data)
    # plt.title(f"Learning curve")
    # plt.xlabel("Learning step")
    # plt.ylabel("Batch loss")
    # plt.savefig("learning_curve.pdf")
    # plt.close()
    # exit(-1)

    with PdfPages(f"{name}.pdf") as pdf:
        for i, step in enumerate(log):
            theta = step["theta"]

            plt.figure(figsize=(8, 8))

            extent = X[:, 0].min(), X[:, 0].max(), X[:, 1].min(), X[:, 1].max()
            extent = 0, np.pi, 0, np.pi

            xx = np.linspace(*extent[0:2], density)
            yy = np.linspace(*extent[2:4], density)
            xx, yy = np.meshgrid(xx, yy)
            Xfull = np.c_[xx.ravel(), yy.ravel()]

            # Xfull = np.random.rand(density**2,2)*np.pi

            # View probabilities:
            scores_full = np.array([circuit(theta, x=x) for x in Xfull])

            vmin, vmax = -1, 1

            scores = np.array([circuit(theta, x=x) for x in X])
            y_pred = sgn(scores)

            print(metrics.confusion_matrix(y, y_pred))

            accuracy = metrics.accuracy_score(y, y_pred)
            plt.title(f"Classification score, accuracy={accuracy:1.2f} ")
            plt.xlabel("feature 1")
            plt.ylabel("feature 2")

            imshow_handle = plt.contourf(xx,
                                         yy,
                                         scores_full.reshape(
                                             (density, density)),
                                         vmin=vmin,
                                         vmax=vmax,
                                         cmap='seismic')

            plt.xticks(np.linspace(0, np.pi, 5))
            plt.yticks(np.linspace(0, np.pi, 5))

            for cls_val, cls_col in {0: 'b', 1: 'r'}.items():
                # get row indexes for samples with this class
                row_ix = np.where(y == cls_val)
                # create scatter of these samples
                plt.scatter(X[row_ix, 0],
                            X[row_ix, 1],
                            cmap='seismic',
                            c=cls_col,
                            lw=1,
                            edgecolor='k')

            ax = plt.axes([0.91, 0.1, 0.02, 0.8])
            plt.colorbar(imshow_handle, cax=ax, orientation='vertical')
            plt.clim(-1, 1)

            pdf.savefig()
            plt.close()
예제 #11
0
                              key=pipeline_sorting_key)

for pipeline in unfilt_all_pipelines:
    this_pipe_df = same_pipeline(na_is_neg_df, pipeline)

    # Filter out group 1
    if all(this_pipe_df.q >= zero):
        groups[1].append(pipeline)
        continue

    # Check in which group a pipeline belongs per number of shots.
    group_features = []
    for shots in all_shots:
        this_df = same_shots(this_pipe_df, shots)
        # Check whether all negative entries are in one group from the first base noise rate onwards
        neg_ids = np.where(this_df.q < zero)[0]
        neg_is_contiguous = np.allclose(neg_ids, list(range(len(neg_ids))))
        # Check whether all non-negative entries are in one group from the first base noise rate onwards
        nonneg_ids = np.where(this_df.q >= zero)[0]
        nonneg_is_contiguous = np.allclose(nonneg_ids,
                                           list(range(len(nonneg_ids))))

        # Sort into group, based on data for this shots number
        if nonneg_is_contiguous and len(nonneg_ids) > 0:
            group_features.append(2)
        elif neg_is_contiguous:
            group_features.append(3)
        else:
            group_features.append(4)

    # Conclude the group for the pipeline based on all group features per number of shots