def test_hermitian(self, theta, phi, varphi, monkeypatch, tol): """Test that a tensor product involving qml.Hermitian works correctly""" dev = qml.device("default.tensor", wires=3) dev.reset() dev.apply("RX", wires=[0], par=[theta]) dev.apply("RX", wires=[1], par=[phi]) dev.apply("RX", wires=[2], par=[varphi]) dev.apply("CNOT", wires=[0, 1], par=[]) dev.apply("CNOT", wires=[1, 2], par=[]) A = np.array([ [-6, 2 + 1j, -3, -5 + 2j], [2 - 1j, 0, 2 - 1j, -5 + 4j], [-3, 2 + 1j, 0, -4 + 3j], [-5 - 2j, -5 - 4j, -4 - 3j, -6], ]) with monkeypatch.context() as m: m.setattr("numpy.random.choice", lambda x, y, p: (x, p)) s1, p = dev.sample(["PauliZ", "Hermitian"], [[0], [1, 2]], [[], [A]]) # s1 should only contain the eigenvalues of # the hermitian matrix tensor product Z Z = np.diag([1, -1]) eigvals = np.linalg.eigvalsh(np.kron(Z, A)) assert set(np.round(s1, 8)).issubset(set(np.round(eigvals, 8))) mean = s1 @ p expected = 0.5 * (-6 * np.cos(theta) * (np.cos(varphi) + 1) - 2 * np.sin(varphi) * (np.cos(theta) + np.sin(phi) - 2 * np.cos(phi)) + 3 * np.cos(varphi) * np.sin(phi) + np.sin(phi)) assert np.allclose(mean, expected, atol=tol, rtol=0) var = (s1**2) @ p - (s1 @ p).real**2 expected = ( 1057 - np.cos(2 * phi) + 12 * (27 + np.cos(2 * phi)) * np.cos(varphi) - 2 * np.cos(2 * varphi) * np.sin(phi) * (16 * np.cos(phi) + 21 * np.sin(phi)) + 16 * np.sin(2 * phi) - 8 * (-17 + np.cos(2 * phi) + 2 * np.sin(2 * phi)) * np.sin(varphi) - 8 * np.cos(2 * theta) * (3 + 3 * np.cos(varphi) + np.sin(varphi))**2 - 24 * np.cos(phi) * (np.cos(phi) + 2 * np.sin(phi)) * np.sin(2 * varphi) - 8 * np.cos(theta) * (4 * np.cos(phi) * (4 + 8 * np.cos(varphi) + np.cos(2 * varphi) - (1 + 6 * np.cos(varphi)) * np.sin(varphi)) + np.sin(phi) * (15 + 8 * np.cos(varphi) - 11 * np.cos(2 * varphi) + 42 * np.sin(varphi) + 3 * np.sin(2 * varphi)))) / 16 assert np.allclose(var, expected, atol=tol, rtol=0)
def test(Xtest, Ytest, X, Y, var): count = 0 x = np.delete(X, 0, 0) y = np.delete(Y, 0, 0) Ydata = np.tile(y, (2, 1)) for i in enumerate(Xtest): idx = i[0] newinput = i[1] newinput = np.tile(newinput, (len(x), 1)) Xdata = np.vstack((newinput, x)) result1, result2 = circuit(var, X=Xdata, Y=Ydata) if np.round(result1*result2)==int(Ytest[idx]): count += 1 accuracy = count/len(Xtest) return accuracy
def get_cell_label_pos(cells): """Get proper label position per cell Args: cells (ndarray<int>): Cells as computed by `get_cells` Returns: label_pos (dict): Map from cell labels to label coordinates for cells """ label_pos = get_cell_centers(cells) ids = label_pos.keys() for _id in ids: center = label_pos[_id] x, y = map(int, np.round(center)) if cells[x, y] != _id: where = np.where(cells == _id) dists = [(coord, np.linalg.norm(center - coord, 2)) for coord in zip(where[0], where[1])] label_pos[_id] = min(dists, key=lambda x: x[1])[0] return label_pos
def make_predictions(circuit, pre_trained_vals, X, Y, **kwargs): """ Args: circuit: pre_trained_vals: X: Y: **kwargs: Returns: """ if kwargs['readout_layer'] == 'one_hot': var = pre_trained_vals elif kwargs['readout_layer'] == "weighted_neuron": var = pre_trained_vals # make final predictions if kwargs['readout_layer'] == 'one_hot': final_predictions = np.stack([circuit(var, x) for x in X]) acc = ohe_accuracy(Y, predictions) elif kwargs['readout_layer'] == 'weighted_neuron': from autograd.numpy import exp n = kwargs.get('nqubits') w = var[:, -1] theta = var[:, :-1].numpy() final_predictions = [ int( np.round( 2. * (1.0 / (1.0 + exp(np.dot(-w, circuit(theta, features=x))))) - 1., 1)) for x in X ] acc = wn_accuracy(Y, predictions) return final_predictions, acc
return qml.expval(qml.PauliY(0) @ qml.PauliZ(2)) gradient = np.zeros_like(weights) one_hot = np.zeros_like(weights) s = 1.0 denom = 2 * np.sin(s) # QHACK # for i in range(len(weights)): for j in range(3): one_hot[i, j] = s gradient[i, j] = (circuit(weights + one_hot) - circuit(weights - one_hot)) / denom one_hot[i, j] = 0 # QHACK # return gradient if __name__ == "__main__": # DO NOT MODIFY anything in this code block weights = sys.stdin.read() weights = np.array([row.split(",") for row in weights.split("S") if row], dtype=np.float64) gradient = np.round(parameter_shift(weights), 10) output_array = gradient.flatten() print(",".join([str(val) for val in output_array]))
def train_circuit(circuit, parameter_shape, X_train, Y_train, batch_size, learning_rate, **kwargs): """ train a circuit classifier Args: circuit (qml.QNode): A circuit that you want to train parameter_shape: A tuple describing the shape of the parameters. The first entry is the number of qubits, the second one is the number of layers in the circuit architecture. X_train (np.ndarray): An array of floats of size (M, n) to be used as training data. Y_train (np.ndarray): An array of size (M,) which are the categorical labels associated to the training data. batch_size (int): Batch size for the circuit training. learning_rate (float): The learning rate/step size of the optimizer. kwargs: Hyperparameters for the training (passed as keyword arguments). There are the following hyperparameters: nsteps (int) : Number of training steps. optim (pennylane.optimize instance): Optimizer used during the training of the circuit. Pass as qml.OptimizerName. Tmax (list): Maximum point T as defined in https://arxiv.org/abs/2010.08512. (Definition 8) The first element is the maximum number of parameters among all architectures, the second is the maximum inference time among all architectures in terms of computing time, the third one is the maximum inference time among all architectures in terms of the number of CNOTS in the circuit rate_type (string): Determines the type of error rate in the W-coefficient. If rate_type == 'accuracy', the inference time of the circuit is equal to the time it takes to evaluate the accuracy of the trained circuit with respect to a validation batch three times the size of the training batch size and the error rate is equal to 1-accuracy (w.r.t. to a validation batch). If rate_type == 'accuracy', the inference time of the circuit is equal to the time it takes to train the circuit (for nsteps training steps) and compute the cost at each step and the error rate is equal to the cost after nsteps training steps. Returns: (W_,weights): W-coefficient, trained weights """ #print('batch_size',batch_size) # fix the seed while debugging #np.random.seed(1337) def ohe_cost_fcn(params, circuit, ang_array, actual): ''' use MAE to start ''' predictions = (np.stack([circuit(params, x) for x in ang_array]) + 1) * 0.5 return mse(actual, predictions) def wn_cost_fcn(params, circuit, ang_array, actual): ''' use MAE to start ''' w = params[:, -1] theta = params[:, :-1] #print(w.shape,w,theta.shape,theta) predictions = np.asarray([ 2. * (1.0 / (1.0 + exp(np.dot(-w, circuit(theta, features=x))))) - 1. for x in ang_array ]) return mse(actual, predictions) if kwargs['readout_layer'] == 'one_hot': var = np.zeros(parameter_shape) elif kwargs['readout_layer'] == "weighted_neuron": var = np.hstack( (np.zeros(parameter_shape), np.random.random( (kwargs['nqubits'], 1)) - 0.5)) rate_type = kwargs['rate_type'] inf_time = kwargs['inf_time'] optim = kwargs['optim'] numcnots = kwargs['numcnots'] Tmax = kwargs[ 'Tmax'] #Tmax[0] is maximum parameter size, Tmax[1] maximum inftime (timeit),Tmax[2] maximum number of entangling gates num_train = len(Y_train) validation_size = int(0.1 * num_train) opt = optim( stepsize=learning_rate ) #all optimizers in autograd module take in argument stepsize, so this works for all start = time.time() for _ in range(kwargs['nsteps']): batch_index = np.random.randint(0, num_train, (batch_size, )) X_train_batch = np.asarray(X_train[batch_index]) Y_train_batch = np.asarray(Y_train[batch_index]) if kwargs['readout_layer'] == 'one_hot': var, cost = opt.step_and_cost( lambda v: ohe_cost_fcn(v, circuit, X_train_batch, Y_train_batch ), var) elif kwargs['readout_layer'] == 'weighted_neuron': var, cost = opt.step_and_cost( lambda v: wn_cost_fcn(v, circuit, X_train_batch, Y_train_batch ), var) end = time.time() cost_time = (end - start) if kwargs['rate_type'] == 'accuracy': validation_batch = np.random.randint(0, num_train, (validation_size, )) X_validation_batch = np.asarray(X_train[validation_batch]) Y_validation_batch = np.asarray(Y_train[validation_batch]) start = time.time() # add in timeit function from Wbranch if kwargs['readout_layer'] == 'one_hot': predictions = np.stack( [circuit(var, x) for x in X_validation_batch]) elif kwargs['readout_layer'] == 'weighted_neuron': n = kwargs.get('nqubits') w = var[:, -1] theta = var[:, :-1] predictions = [ int( np.round( 2. * (1.0 / (1.0 + exp(np.dot(-w, circuit(theta, features=x))))) - 1., 1)) for x in X_validation_batch ] end = time.time() inftime = (end - start) / len(X_validation_batch) if kwargs['readout_layer'] == 'one_hot': err_rate = ( 1.0 - ohe_accuracy(Y_validation_batch, predictions) ) + 10**-7 #add small epsilon to prevent divide by 0 errors #print('error rate:',err_rate) #print('weights: ',var) elif kwargs['readout_layer'] == 'weighted_neuron': err_rate = ( 1.0 - wn_accuracy(Y_validation_batch, predictions) ) + 10**-7 #add small epsilon to prevent divide by 0 errors #print('error rate:',err_rate) #print('weights: ',var) elif kwargs['rate_type'] == 'batch_cost': err_rate = ( cost) + 10**-7 #add small epsilon to prevent divide by 0 errors #print('error rate:',err_rate) #print('weights: ',var) inftime = cost_time # QHACK # if kwargs['inf_time'] == 'timeit': W_ = np.abs((Tmax[0] - len(var)) / (Tmax[0])) * np.abs( (Tmax[1] - inftime) / (Tmax[1])) * (1. / err_rate) elif kwargs['inf_time'] == 'numcnots': nc_ = numcnots W_ = np.abs((Tmax[0] - len(var)) / (Tmax[0])) * np.abs( (Tmax[2] - nc_) / (Tmax[2])) * (1. / err_rate) return W_, var
init_params K_init # ## Train an SVM # + # train alpha and beta svm = SVC( kernel=lambda X1, X2: qml.kernels.kernel_matrix(X1, X2, init_kernel)).fit( X_train, Y_train) # check the performance Y_pred = svm.predict(X_train) random_on_train = acc(Y_pred, Y_train) random_on_train_l1 = np.round(mean_absolute_error(Y_pred, Y_train), 3) random_on_train_l2 = np.round(mean_squared_error(Y_pred, Y_train), 3) Y_pred = svm.predict(X_val) random_on_val = acc(Y_pred, Y_val) random_on_val_l1 = np.round(mean_absolute_error(Y_pred, Y_val), 3) random_on_val_l2 = np.round(mean_squared_error(Y_pred, Y_val), 3) if printing == True: print(f"Random parameter accuracy on train {random_on_train:.3f}") print(f"Random parameter accuracy on validate {random_on_val:.3f}") #init_plot_data = plot_decision_boundaries(svm, plt.gca()) # - predictions = svm.predict(X_train) svm.support_vectors_ svm.__dict__
normK = np.linalg.norm(K, 'fro') if np.isclose(normK, 0.): align = np.nan target_align = np.nan else: align = float( qml.math.frobenius_inner_product(K, exact_matrix, normalize=True)) target_align = float( qml.math.frobenius_inner_product(K, target, normalize=True)) df = df.append( pd.Series({ 'base_noise_rate': np.round(key[0], 3), 'shots': key[1], 'pipeline': pipeline_name, 'alignment': align, 'target_alignment': target_align, # We will use shots=1e10 for the analytic case to obtain correctly sorted results 'shots_sort': key[1] if key[1] > 0 else int(1e10), }), ignore_index=True, ) try: print(f"Average execution times of postprocessing functions:") for key, num in fun_evals.items(): if num > 0: print(f"{key} - {times_per_fun[key]/num}")
# Then, construct a classical shadow consisting of 1000 snapshots. num_snapshots = 1000 params = [] shadow = calculate_classical_shadow( bell_state_circuit, params, num_snapshots, num_qubits ) print(shadow[0]) print(shadow[1]) ############################################################################## # To reconstruct the Bell state we use ``shadow_state_reconstruction``. shadow_state = shadow_state_reconstruction(shadow) print(np.round(shadow_state, decimals=6)) ############################################################################## # Note the resemblance to the exact Bell state density matrix. bell_state = np.array([[0.5, 0, 0, 0.5], [0, 0, 0, 0], [0, 0, 0, 0], [0.5, 0, 0, 0.5]]) ############################################################################## # To measure the closeness we can use the operator norm. def operator_2_norm(R): """ Calculate the operator 2-norm. Args:
def train_best(circuit, pre_trained_vals, X_train, Y_train, batch_size, learning_rate, **kwargs): """train a circuit classifier Args: circuit(qml.QNode): A circuit that you want to train parameter_shape: A tuple describing the shape of the parameters. The first entry is the number of qubits, parameter_shape: A tuple describing the shape of the parameters. The first entry is the number of qubits, the second one is the number of layers in the circuit architecture. X_train(np.ndarray): An array of floats of size (M, n) to be used as training data. Y_train(np.ndarray): An array of size (M,) which are the categorical labels associated to the training data. batch_size(int): Batch size for the circuit training. learning_rate(float): The learning rate/step size of the optimizer. kwargs: Hyperparameters for the training (passed as keyword arguments). There are the following hyperparameters: nsteps (int) : Number of training steps. optim (pennylane.optimize instance): Optimizer used during the training of the circuit. Pass as qml.OptimizerName. Tmax (list): Maximum point T as defined in https://arxiv.org/abs/2010.08512. (Definition 8) The first element is the maximum number of parameters among all architectures, the second is the maximum inference time among all architectures in terms of computing time, the third one is the maximum inference time among all architectures in terms of the number of CNOTS in the circuit rate_type (string): Determines the type of error rate in the W-coefficient. If rate_type == 'accuracy', the inference time of the circuit is equal to the time it takes to evaluate the accuracy of the trained circuit with respect to a validation batch three times the size of the training batch size and the error rate is equal to 1-accuracy (w.r.t. to a validation batch). If rate_type == 'accuracy', the inference time of the circuit is equal to the time it takes to train the circuit (for nsteps training steps) and compute the cost at each step and the error rate is equal to the cost after nsteps training steps. pre_trained_vals: **kwargs: Returns: Yprime: final predictions, final accuracy """ from autograd.numpy import exp def ohe_cost_fcn(params, circuit, ang_array, actual): """use MAE to start Args: params: circuit: ang_array: actual: Returns: """ predictions = (np.stack([circuit(params, x) for x in ang_array]) + 1) * 0.5 return mse(actual, predictions) def wn_cost_fcn(params, circuit, ang_array, actual): """use MAE to start Args: params: circuit: ang_array: actual: Returns: """ w = params[:, -1] theta = params[:, :-1] print(w.shape, w, theta.shape, theta) predictions = np.asarray([ 2. * (1.0 / (1.0 + exp(np.dot(-w, circuit(theta, x))))) - 1. for x in ang_array ]) return mse(actual, predictions) if kwargs['readout_layer'] == 'one_hot': var = pre_trained_vals elif kwargs['readout_layer'] == "weighted_neuron": var = pre_trained_vals rate_type = kwargs['rate_type'] optim = kwargs['optim'] num_train = len(Y_train) validation_size = int(0.1 * num_train) opt = optim( stepsize=learning_rate ) #all optimizers in autograd module take in argument stepsize, so this works for all for _ in range(kwargs['nsteps']): batch_index = np.random.randint(0, num_train, (batch_size, )) X_train_batch = np.asarray(X_train[batch_index]) Y_train_batch = np.asarray(Y_train[batch_index]) if kwargs['readout_layer'] == 'one_hot': var, cost = opt.step_and_cost( lambda v: ohe_cost_fcn(v, circuit, X_train_batch, Y_train_batch ), var) elif kwargs['readout_layer'] == 'weighted_neuron': print(var) var, cost = opt.step_and_cost( lambda v: wn_cost_fcn(v, circuit, X_train_batch, Y_train_batch ), var) print(_, cost) # check for early stopping if _ % 5 == 0: validation_batch = np.random.randint(0, num_train, (validation_size, )) X_validation_batch = np.asarray(X_train[validation_batch]) Y_validation_batch = np.asarray(Y_train[validation_batch]) if kwargs['rate_type'] == 'accuracy': if kwargs['readout_layer'] == 'one_hot': predictions = np.stack( [circuit(var, x) for x in X_validation_batch]) acc = ohe_accuracy(Y_validation_batch, predictions) elif kwargs['readout_layer'] == 'weighted_neuron': n = kwargs.get('nqubits') w = var[:, -1] theta = var[:, :-1].numpy() predictions = [ int( np.round( 2. * (1.0 / (1.0 + exp(np.dot(-w, circuit(theta, x))))) - 1., 1)) for x in X_validation_batch ] acc = wn_accuracy(Y_validation_batch, predictions) if acc > 0.95: break elif kwargs['rate_type'] == 'batch_cost': if cost < 0.001: break # make final predictions if kwargs['readout_layer'] == 'one_hot': final_predictions = np.stack([circuit(var, x) for x in X_train]) elif kwargs['readout_layer'] == 'weighted_neuron': n = kwargs.get('nqubits') w = var[:, -1] theta = var[:, :-1] final_predictions = [ int( np.round( 2. * (1.0 / (1.0 + exp(np.dot(-w, circuit(theta, x))))) - 1., 1)) for x in X_train ] return var, final_predictions