Esempio n. 1
0
    def test_wrapped_function_nontrainable_list_input(self):
        """Test that a wrapped function with signature of the form
        func([arr1, arr2, ...]) acting on non-trainable input returns non-trainable output"""
        arr1 = np.array([0, 1], requires_grad=False)
        arr2 = np.array([2, 3], requires_grad=False)
        arr3 = np.array([4, 5], requires_grad=False)

        res = np.vstack([arr1, arr2, arr3])
        assert not res.requires_grad

        # If one of the inputs is trainable, the output always is.
        arr1.requires_grad = True
        res = np.vstack([arr1, arr2, arr3])
        assert res.requires_grad
 def GenProbDist(self, params):
     '''
     For generating a prob dits corres-
     ponding to product mized state.
     Dist of ith is accessed by Dits[i]
     '''
     return np.vstack([sigmoid(params), 1-sigmoid(params)]).T
Esempio n. 3
0
def next_batch(X, y, batchSize):
    rem = len(X) // batchSize
    out = len(X) % batchSize
    # loop over our dataset `X` in mini-batches of size `batchSize`
    for id, i in enumerate(np.arange(0, X.shape[0], batchSize)):
        # yield a tuple of the current batched data and labels
        if id == rem and out != 0:
            temp = abs(batchSize - out)
            d = X[i:i + batchSize]
            l = y[i:i + batchSize]
            batch = np.random.choice(622, temp, replace=False)

            dd, ll = X[batch], y[batch]

            return (np.vstack(
                (d, dd)), np.vstack((l.reshape(-1, 1), ll.reshape(-1, 1))))
        yield (X[i:i + batchSize], y[i:i + batchSize])
Esempio n. 4
0
def predict(Xnew, var, X, Y):
    newinput = Xnew
    x = np.delete(X, 0, 0)
    y = np.delete(Y, 0, 0)
    newinput = np.tile(newinput, (len(x), 1))  # create copies of new input
    Xdata = np.vstack((newinput, x))
    Ydata = np.tile(y, (2, 1))
    result1, result2 = circuit(var, Xdata, Ydata)
    return result2*result1
Esempio n. 5
0
    def test_eval_tf(self, qnodes, skip_if_no_tf_support):
        """Test correct evaluation of the QNodeCollection using
        the tf interface"""
        qnode1, qnode2 = qnodes
        qc = qml.QNodeCollection([qnode1, qnode2])
        params = [0.5643, -0.45]

        res = qc(params).numpy()
        expected = np.vstack([qnode1(params), qnode2(params)])
        assert np.all(res == expected)
Esempio n. 6
0
    def test_eval_autograd(self, qnodes):
        """Test correct evaluation of the QNodeCollection using
        the Autograd interface"""
        qnode1, qnode2 = qnodes
        qc = qml.QNodeCollection([qnode1, qnode2])
        params = [0.5643, -0.45]

        res = qc(params)
        expected = np.vstack([qnode1(params), qnode2(params)])
        assert np.all(res == expected)
Esempio n. 7
0
def test(Xtest, Ytest, X, Y, var):
    count = 0
    x = np.delete(X, 0, 0)
    y = np.delete(Y, 0, 0)
    Ydata = np.tile(y, (2, 1))
    for i in enumerate(Xtest):
        idx = i[0]
        newinput = i[1]
        newinput = np.tile(newinput, (len(x), 1))
        Xdata = np.vstack((newinput, x))
        result1, result2 = circuit(var, X=Xdata, Y=Ydata)
        if np.round(result1*result2)==int(Ytest[idx]):
            count += 1
    accuracy = count/len(Xtest)
    return accuracy
Esempio n. 8
0
    def __init__(self, num_sectors):
        self.num_sectors = num_sectors

        x1, y1, labels1 = self._make_circular_data()
        x2, y2, labels2 = self._make_circular_data()

        # x and y coordinates of the datapoints
        self.x = np.hstack([x1, .5 * x2])
        self.y = np.hstack([y1, .5 * y2])

        # Canonical form of dataset
        self.X = np.vstack([self.x, self.y]).T

        self.labels = np.hstack([labels1, -1 * labels2])

        # Canonical form of labels
        self.Y = self.labels.astype(int)
Esempio n. 9
0
def cost(var, X, Y): # specifically for dataset = 5 points # 4 data points and 1 test input
    MSE = 0
    for i in enumerate(X):
        idx = i[0]
        newinput = i[1]
        x = np.delete(X, i[0], 0)  #  remove chosen data point (which is the "new input")
        if idx == len(x) - 1: break
        newinput = np.tile(newinput, (len(x), 1))  # create copies of new input
        Xdatacost = np.vstack((newinput, x))  # stack 4 data points and copies of new input for the circuit
        ytrue = int(Y[idx])  # select the true y label of the new input
        y = np.delete(Y, idx, 0)  # remove chosen data point's label
        Ydatacost = np.tile(y, (2, 1)) # Create copy of labels
        result1, result2 = circuit(var, X=Xdatacost, Y=Ydatacost)
        ypred = result1 * result2
        loss = (ypred - ytrue) ** 2
        MSE = MSE + loss
    return MSE / len(x)
Esempio n. 10
0
def cost(var, Xdata, Y):
    MSE = 0
    for i in enumerate(X):
        idx = i[0]
        newinput = i[1]
        x = np.delete(
            Xdata, i[0], 0
        )  #  remove chosen data point (which is the "new input") so that only the other 4 data points go into the cost
        if idx == len(x) - 1: break
        newinput = np.tile(newinput, (len(x), 1))  # create copies of new input
        Xdata = np.vstack(
            (newinput,
             x))  # stack 4 data points and copies of new input for the circuit
        ytrue = int(Y[idx])  # select the true y label of the new input
        y = np.delete(Y, idx, 0)  # remove chosen data point's label
        Ydata = np.tile(y, (2, 1))  # Create copy of labels
        result1, result2 = variational_circ(var=var, Xdata=Xdata, Y=Ydata)
        ypred = result1 * result2
        loss = (ypred - ytrue)**2
        MSE = MSE + loss
    print(MSE)
    return MSE / len(x)
Esempio n. 11
0
##############################################################################
# During doubly stochastic gradient descent, we are sampling from terms of the
# analytic cost function, so it is not entirely instructive to plot the cost
# versus optimization step---partial sums of the terms in the Hamiltonian
# may have minimum energy below the ground state energy of the total Hamiltonian.
# Nevertheless, we can keep track of the cost value moving average during doubly
# stochastic gradient descent as an indicator of convergence.


def moving_average(data, n=3):
    ret = np.cumsum(data, dtype=np.float64)
    ret[n:] = ret[n:] - ret[:-n]
    return ret[n - 1:] / n


average = np.vstack([np.arange(25, 200), moving_average(cost, n=50)[:-26]])

plt.plot(cost_GD, label="Vanilla gradient descent")
plt.plot(cost, ".", label="Doubly QSGD")
plt.plot(average[0], average[1], "--", label="Doubly QSGD (moving average)")
plt.hlines(min_energy, 0, 200, linestyles=":", label="Ground state energy")

plt.ylabel("Cost function value")
plt.xlabel("Optimization steps")
plt.xlim(-2, 200)
plt.legend()
plt.show()

##############################################################################
# Finally, verifying that the doubly stochastic gradient descent optimization
# correctly provides the ground state energy when evaluated for a larger
Esempio n. 12
0
# +
# load the dataset and split
(train_X, train_y), (test_X, test_y) = mnist.load_data()

sample_size = 1000  # leave high here, if you need lower, change it in next step

train_idx0 = np.argwhere(
    train_y == 0)[:sample_size]  # get the images labelled 0
train_X0 = train_X[train_idx0].squeeze() * np.pi / 255  # normalize

train_idx1 = np.argwhere(
    train_y == 1)[:sample_size]  # get the images labelled 1
train_X1 = train_X[train_idx1].squeeze() * np.pi / 255  # normalized

X_train = np.vstack([train_X0[:sample_size], train_X1[:sample_size]])  # stack
y_train = np.hstack([[-1] * sample_size, [1] * sample_size])  # generate labels

test_idx0 = np.argwhere(test_y == 0)[:sample_size]  # same for test
test_X0 = test_X[test_idx0].squeeze() * np.pi / 255

test_idx1 = np.argwhere(test_y == 1)[:sample_size]
test_X1 = test_X[test_idx1].squeeze() * np.pi / 255

X_test = np.vstack([test_X0[:sample_size], test_X1[:sample_size]])
y_test = np.hstack([[-1] * sample_size, [1] * sample_size])

# +
# visual check
gs = mpl.gridspec.GridSpec(2, 10)
fig = plt.figure(figsize=(8, 2))
Esempio n. 13
0
X_pos = np.asarray(X_pos)
Y_neg = np.asarray(Y_neg)
Y_pos = np.asarray(Y_pos)

# shuffle our data, positive and negative samples seperately
randomize_neg = np.arange(len(X_neg))
np.random.shuffle(randomize_neg)
X_neg = X_neg[randomize_neg]
Y_neg = Y_neg[randomize_neg]
randomize_pos = np.arange(len(X_pos))
np.random.shuffle(randomize_pos)
X_pos = X_pos[randomize_pos]
Y_pos = Y_pos[randomize_pos]

# first the stitching and reshufling of the train data
X_train = np.vstack((X_neg[0:cut_off], X_pos[0:cut_off]))
Y_train = np.hstack((Y_neg[0:cut_off], Y_pos[0:cut_off]))
randomize_all = np.arange(len(X_train))
np.random.shuffle(randomize_all)
X_train = X_train[randomize_all]
Y_train = Y_train[randomize_all]
# then the stitching and reshufling of the validation data
X_val = np.vstack((X_neg[cut_off:2 * cut_off], X_pos[cut_off:2 * cut_off]))
Y_val = np.hstack((Y_neg[cut_off:2 * cut_off], Y_pos[cut_off:2 * cut_off]))
randomize_val = np.arange(len(X_val))
np.random.shuffle(randomize_val)
X_val = X_val[randomize_val]
Y_val = Y_val[randomize_val]

if printing == True:
    print("X_train", X_train)