def test_usage_in_vqc(self): """Test using the circuit the a single VQC iteration works.""" # specify quantum instance and random seed random_seed = 12345 quantum_instance = QuantumInstance( Aer.get_backend('statevector_simulator'), seed_simulator=random_seed, seed_transpiler=random_seed) np.random.seed(random_seed) # construct data num_samples = 10 num_inputs = 4 X = np.random.rand(num_samples, num_inputs) # pylint: disable=invalid-name y = 1.0 * (np.sum(X, axis=1) <= 2) while len(np.unique(y, axis=0)) == 1: y = 1.0 * (np.sum(X, axis=1) <= 2) y = np.array([y, 1 - y]).transpose() feature_map = RawFeatureVector(feature_dimension=num_inputs) vqc = VQC(feature_map=feature_map, ansatz=RealAmplitudes(feature_map.num_qubits, reps=1), optimizer=COBYLA(maxiter=10), quantum_instance=quantum_instance) vqc.fit(X, y) score = vqc.score(X, y) self.assertGreater(score, 0.5)
def test_warm_start(self, config): """Test VQC with warm_start=True.""" opt, q_i = config if q_i == "statevector": quantum_instance = self.sv_quantum_instance elif q_i == "qasm": quantum_instance = self.qasm_quantum_instance else: quantum_instance = None if opt == "bfgs": optimizer = L_BFGS_B(maxiter=5) elif opt == "cobyla": optimizer = COBYLA(maxiter=25) else: optimizer = None num_inputs = 2 feature_map = ZZFeatureMap(num_inputs) ansatz = RealAmplitudes(num_inputs, reps=1) # Construct the data. num_samples = 10 # pylint: disable=invalid-name X = algorithm_globals.random.random((num_samples, num_inputs)) y = 1.0 * (np.sum(X, axis=1) <= 1) while len(np.unique(y)) == 1: X = algorithm_globals.random.random((num_samples, num_inputs)) y = 1.0 * (np.sum(X, axis=1) <= 1) y = np.array([y, 1 - y ]).transpose() # VQC requires one-hot encoded input. # Initialize the VQC. classifier = VQC( feature_map=feature_map, ansatz=ansatz, optimizer=optimizer, warm_start=True, quantum_instance=quantum_instance, ) # Fit the VQC to the first half of the data. num_start = num_samples // 2 classifier.fit(X[:num_start, :], y[:num_start]) first_fit_final_point = classifier._fit_result.x # Fit the VQC to the second half of the data with a warm start. classifier.fit(X[num_start:, :], y[num_start:]) second_fit_initial_point = classifier._initial_point # Check the final optimization point from the first fit was used to start the second fit. np.testing.assert_allclose(first_fit_final_point, second_fit_initial_point) # Check score. score = classifier.score(X, y) self.assertGreater(score, 0.5)
def test_vqc(self, config): """Test VQC.""" opt, q_i = config if q_i == "statevector": quantum_instance = self.sv_quantum_instance elif q_i == "qasm": quantum_instance = self.qasm_quantum_instance else: quantum_instance = None if opt == "bfgs": optimizer = L_BFGS_B(maxiter=5) elif opt == "cobyla": optimizer = COBYLA(maxiter=25) else: optimizer = None num_inputs = 2 feature_map = ZZFeatureMap(num_inputs) ansatz = RealAmplitudes(num_inputs, reps=1) # fix the initial point initial_point = np.array([0.5] * ansatz.num_parameters) # construct classifier - note: CrossEntropy requires eval_probabilities=True! classifier = VQC( feature_map=feature_map, ansatz=ansatz, optimizer=optimizer, quantum_instance=quantum_instance, initial_point=initial_point, ) # construct data num_samples = 5 # pylint: disable=invalid-name X = algorithm_globals.random.random((num_samples, num_inputs)) y = 1.0 * (np.sum(X, axis=1) <= 1) while len(np.unique(y)) == 1: X = algorithm_globals.random.random((num_samples, num_inputs)) y = 1.0 * (np.sum(X, axis=1) <= 1) y = np.array([y, 1 - y]).transpose() # VQC requires one-hot encoded input # fit to data classifier.fit(X, y) # score score = classifier.score(X, y) self.assertGreater(score, 0.5)
def test_readme_sample(self): """ readme sample test """ # pylint: disable=import-outside-toplevel,redefined-builtin def print(*args): """ overloads print to log values """ if args: self.log.debug(args[0], *args[1:]) # --- Exact copy of sample code ---------------------------------------- from qiskit import BasicAer from qiskit.utils import QuantumInstance, algorithm_globals from qiskit.algorithms.optimizers import COBYLA from qiskit.circuit.library import TwoLocal from qiskit_machine_learning.algorithms import VQC from qiskit_machine_learning.datasets import wine from qiskit_machine_learning.circuit.library import RawFeatureVector seed = 1376 algorithm_globals.random_seed = seed # Use Wine data set for training and test data feature_dim = 4 # dimension of each data point training_size = 12 test_size = 4 # training features, training labels, test features, test labels as np.array, # one hot encoding for labels training_features, training_labels, test_features, test_labels = \ wine(training_size=training_size, test_size=test_size, n=feature_dim) feature_map = RawFeatureVector(feature_dimension=feature_dim) ansatz = TwoLocal(feature_map.num_qubits, ['ry', 'rz'], 'cz', reps=3) vqc = VQC(feature_map=feature_map, ansatz=ansatz, optimizer=COBYLA(maxiter=100), quantum_instance=QuantumInstance( BasicAer.get_backend('statevector_simulator'), shots=1024, seed_simulator=seed, seed_transpiler=seed)) vqc.fit(training_features, training_labels) score = vqc.score(test_features, test_labels) print('Testing accuracy: {:0.2f}'.format(score)) # ---------------------------------------------------------------------- self.assertGreater(score, 0.8)
def _test_sparse_arrays(self, quantum_instance: QuantumInstance, loss: str): classifier = VQC(num_qubits=2, loss=loss, quantum_instance=quantum_instance) features = scipy.sparse.csr_matrix([[0, 0], [1, 1]]) labels = scipy.sparse.csr_matrix([[1, 0], [0, 1]]) # fit to data classifier.fit(features, labels) # score score = classifier.score(features, labels) self.assertGreater(score, 0.5)
def test_vqc(self, config): """ Test VQC.""" opt, q_i = config if q_i == 'statevector': quantum_instance = self.sv_quantum_instance else: quantum_instance = self.qasm_quantum_instance if opt == 'bfgs': optimizer = L_BFGS_B(maxiter=5) else: optimizer = COBYLA(maxiter=25) num_inputs = 2 feature_map = ZZFeatureMap(num_inputs) ansatz = RealAmplitudes(num_inputs, reps=1) # construct classifier - note: CrossEntropy requires eval_probabilities=True! classifier = VQC(feature_map=feature_map, ansatz=ansatz, optimizer=optimizer, quantum_instance=quantum_instance) # construct data num_samples = 5 X = np.random.rand(num_samples, num_inputs) # pylint: disable=invalid-name y = 1.0 * (np.sum(X, axis=1) <= 1) while len(np.unique(y)) == 1: X = np.random.rand(num_samples, num_inputs) # pylint: disable=invalid-name y = 1.0 * (np.sum(X, axis=1) <= 1) y = np.array([y, 1 - y]).transpose() # VQC requires one-hot encoded input # fit to data classifier.fit(X, y) # score score = classifier.score(X, y) self.assertGreater(score, 0.5)
def test_default_parameters(self, config): """Test VQC instantiation with default parameters.""" provide_num_qubits, provide_feature_map, provide_ansatz = config num_inputs = 2 num_qubits, feature_map, ansatz = None, None, None if provide_num_qubits: num_qubits = num_inputs if provide_feature_map: feature_map = ZZFeatureMap(num_inputs) if provide_ansatz: ansatz = RealAmplitudes(num_inputs, reps=1) classifier = VQC( num_qubits=num_qubits, feature_map=feature_map, ansatz=ansatz, quantum_instance=self.qasm_quantum_instance, ) # construct data num_samples = 5 # pylint: disable=invalid-name X = algorithm_globals.random.random((num_samples, num_inputs)) y = 1.0 * (np.sum(X, axis=1) <= 1) while len(np.unique(y)) == 1: X = algorithm_globals.random.random((num_samples, num_inputs)) y = 1.0 * (np.sum(X, axis=1) <= 1) y = np.array([y, 1 - y]).transpose() # VQC requires one-hot encoded input # fit to data classifier.fit(X, y) # score score = classifier.score(X, y) self.assertGreater(score, 0.5)
def test_usage_in_vqc(self): """Test using the circuit the a single VQC iteration works.""" # specify quantum instance and random seed algorithm_globals.random_seed = 12345 quantum_instance = QuantumInstance( Aer.get_backend("aer_simulator_statevector"), seed_simulator=algorithm_globals.random_seed, seed_transpiler=algorithm_globals.random_seed, ) # construct data num_samples = 10 num_inputs = 4 X = algorithm_globals.random.random( # pylint: disable=invalid-name (num_samples, num_inputs)) y = 1.0 * (np.sum(X, axis=1) <= 2) while len(np.unique(y, axis=0)) == 1: y = 1.0 * (np.sum(X, axis=1) <= 2) y = np.array([y, 1 - y]).transpose() feature_map = RawFeatureVector(feature_dimension=num_inputs) ansatz = RealAmplitudes(feature_map.num_qubits, reps=1) # classification may fail sometimes, so let's fix initial point initial_point = np.array([0.5] * ansatz.num_parameters) vqc = VQC( feature_map=feature_map, ansatz=ansatz, optimizer=COBYLA(maxiter=10), quantum_instance=quantum_instance, initial_point=initial_point, ) vqc.fit(X, y) score = vqc.score(X, y) self.assertGreater(score, 0.5)
def test_batches_with_incomplete_labels(self, config): """Test VQC when some batches do not include all possible labels.""" opt, q_i = config if q_i == "statevector": quantum_instance = self.sv_quantum_instance elif q_i == "qasm": quantum_instance = self.qasm_quantum_instance else: quantum_instance = None if opt == "bfgs": optimizer = L_BFGS_B(maxiter=5) elif opt == "cobyla": optimizer = COBYLA(maxiter=25) else: optimizer = None num_inputs = 2 feature_map = ZZFeatureMap(num_inputs) ansatz = RealAmplitudes(num_inputs, reps=1) # Construct the data. features = algorithm_globals.random.random((15, num_inputs)) target = np.asarray([0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2]) num_classes = len(np.unique(target)) # One-hot encode the target. target_onehot = np.zeros((target.size, int(target.max() + 1))) target_onehot[np.arange(target.size), target.astype(int)] = 1 # Initialize the VQC. classifier = VQC( feature_map=feature_map, ansatz=ansatz, optimizer=optimizer, warm_start=True, quantum_instance=quantum_instance, ) classifier._get_interpret = self.get_num_classes( classifier._get_interpret) # Fit the VQC to the first third of the data. classifier.fit(features[:5, :], target_onehot[:5]) # Fit the VQC to the second third of the data with a warm start. classifier.fit(features[5:10, :], target_onehot[5:10]) # Fit the VQC to the third third of the data with a warm start. classifier.fit(features[10:, :], target_onehot[10:]) # Check all batches assume the correct number of classes self.assertTrue( (np.asarray(self.num_classes_by_batch) == num_classes).all())
from qiskit_machine_learning.datasets import ad_hoc_data from qiskit_machine_learning.algorithms import VQC from qiskit.circuit.library import ZZFeatureMap, RealAmplitudes from qiskit.algorithms.optimizers import L_BFGS_B from qiskit.providers.aer import QasmSimulator X_train, y_train, X_test, y_test = ad_hoc_data(20, 10, 2, 0.1) num_qubits = 2 vqc = VQC(feature_map=ZZFeatureMap(num_qubits), ansatz=RealAmplitudes(num_qubits, reps=1), loss='cross_entropy', optimizer=L_BFGS_B(), quantum_instance=QasmSimulator()) vqc.fit(X_train, y_train) vqc.score(X_test, y_test)
def test_multiclass(self, config): """Test multiclass VQC.""" opt, q_i = config if q_i == "statevector": quantum_instance = self.sv_quantum_instance elif q_i == "qasm": quantum_instance = self.qasm_quantum_instance else: quantum_instance = None if opt == "bfgs": optimizer = L_BFGS_B(maxiter=5) elif opt == "cobyla": optimizer = COBYLA(maxiter=25) else: optimizer = None num_inputs = 2 feature_map = ZZFeatureMap(num_inputs) ansatz = RealAmplitudes(num_inputs, reps=1) # fix the initial point initial_point = np.array([0.5] * ansatz.num_parameters) # construct classifier - note: CrossEntropy requires eval_probabilities=True! classifier = VQC( feature_map=feature_map, ansatz=ansatz, optimizer=optimizer, quantum_instance=quantum_instance, initial_point=initial_point, ) # construct data num_samples = 5 num_classes = 5 # pylint: disable=invalid-name # We create a dataset that is random, but has some training signal, as follows: # First, we create a random feature matrix X, but sort it by the row-wise sum in ascending # order. X = algorithm_globals.random.random((num_samples, num_inputs)) X = X[X.sum(1).argsort()] # Next we create an array which contains all class labels, multiple times if num_samples < # num_classes, and in ascending order (e.g. [0, 0, 1, 1, 2]). So now we have a dataset # where the row-sum of X is correlated with the class label (i.e. smaller row-sum is more # likely to belong to class 0, and big row-sum is more likely to belong to class >0) y_indices = (np.digitize(np.arange(0, 1, 1 / num_samples), np.arange(0, 1, 1 / num_classes)) - 1) # Third, we random shuffle both X and y_indices permutation = np.random.permutation(np.arange(num_samples)) X = X[permutation] y_indices = y_indices[permutation] # Lastly we create a 1-hot label matrix y y = np.zeros((num_samples, num_classes)) for e, index in enumerate(y_indices): y[e, index] = 1 # fit to data classifier.fit(X, y) # score score = classifier.score(X, y) self.assertGreater(score, 1 / num_classes)