def test_average_data_list_observable(self): """Test average_data for list observable input.""" qr = qiskit.QuantumRegister(3) cr = qiskit.ClassicalRegister(3) qc = qiskit.QuantumCircuit(qr, cr, name="qc") qc.h(qr[0]) qc.cx(qr[0], qr[1]) qc.cx(qr[0], qr[2]) qc.measure(qr[0], cr[0]) qc.measure(qr[1], cr[1]) qc.measure(qr[2], cr[2]) shots = 10000 backend = BasicAer.get_backend("qasm_simulator") result = qiskit.execute(qc, backend, shots=shots).result() counts = result.get_counts(qc) observable = [1, -1, -1, 1, -1, 1, 1, -1] mean_zzz = average_data(counts=counts, observable=observable) observable = [1, 1, 1, 1, -1, -1, -1, -1] mean_zii = average_data(counts, observable) observable = [1, 1, -1, -1, 1, 1, -1, -1] mean_izi = average_data(counts, observable) observable = [1, 1, -1, -1, -1, -1, 1, 1] mean_zzi = average_data(counts, observable) self.assertAlmostEqual(mean_zzz, 0, places=1) self.assertAlmostEqual(mean_zii, 0, places=1) self.assertAlmostEqual(mean_izi, 0, places=1) self.assertAlmostEqual(mean_zzi, 1, places=1)
def _evaluate_expectation(self): """ Utility function for evaluating expectation value of two-qubit Pauli measurements. Returns: A dict where keys are pairs of Pauli operators, e.g. ('X', 'Z') or ('I', 'X'), and values are the expectation values. """ paulis = ['I', 'X', 'Y', 'Z'] keys = product(paulis, paulis) result = {} for key in keys: if key[0] == 'I': if key[1] == 'I': pass else: result[key] = average_data(self._data[('Z', key[1])], _OBSERVABLE_SECOND) elif key[1] == 'I': result[key] = average_data(self._data[(key[0], 'Z')], _OBSERVABLE_FIRST) else: result[key] = average_data(self._data[key], _OBSERVABLE_CORRELATED) return result
def test_average_data_dict_observable(self): """Test average_data for dictionary observable input""" qr = qiskit.QuantumRegister(2) cr = qiskit.ClassicalRegister(2) qc = qiskit.QuantumCircuit(qr, cr, name="qc") qc.h(qr[0]) qc.cx(qr[0], qr[1]) qc.measure(qr[0], cr[0]) qc.measure(qr[1], cr[1]) shots = 10000 backend = BasicAer.get_backend("qasm_simulator") result = qiskit.execute(qc, backend, shots=shots).result() counts = result.get_counts(qc) observable = {"00": 1, "11": 1, "01": -1, "10": -1} mean_zz = average_data(counts=counts, observable=observable) observable = {"00": 1, "11": -1, "01": 1, "10": -1} mean_zi = average_data(counts, observable) observable = {"00": 1, "11": -1, "01": -1, "10": 1} mean_iz = average_data(counts, observable) self.assertAlmostEqual(mean_zz, 1, places=1) self.assertAlmostEqual(mean_zi, 0, places=1) self.assertAlmostEqual(mean_iz, 0, places=1)
def calc_data(self): """ Measure the purity calculation into an internal variable _raw_data which is a 3-dimensional list, where item (i,j,k) is the purity of the set of qubits in pattern "i" for seed no. j and vector length self._cliff_lengths[i][k]. Additional information: Assumes that 'result' was executed is the output of circuits generated by randomized_becnhmarking_seq, """ circ_counts = {} circ_shots = {} result_count = 0 # Calculating the result output for _, seed in enumerate(self.rbfit_pur.seeds): for pur in range(self._npurity): self._circ_name_type = self.rbfit_pur.results[ result_count].results[0].header.name.split("_length")[0] result_count += 1 for circ, _ in enumerate(self._cliff_lengths[0]): circ_name = self._circ_name_type + '_length_%d_seed_%d' \ % (circ, seed) count_list = [] for result in self.rbfit_pur.results: try: count_list.append(result.get_counts(circ_name)) except (QiskitError, KeyError): pass circ_name = 'rb_purity_' + str(pur) + \ '_length_%d_seed_%d' % (circ, seed) circ_counts[circ_name] = build_counts_dict_from_list( count_list) circ_shots[circ_name] = sum( circ_counts[circ_name].values()) # Calculating raw_data self.rbfit_pur.raw_data = [] startind = 0 # for each pattern for patt_ind, _ in enumerate(self._rb_pattern): endind = startind + len(self._rb_pattern[patt_ind]) self.rbfit_pur.raw_data.append([]) # for each seed for seedidx, seed in enumerate(self.rbfit_pur.seeds): self.rbfit_pur.raw_data[-1].append([]) # for each length for k, _ in enumerate(self._cliff_lengths[0]): # vector of the 4^n correlators and counts corr_vec = [0] * (4**self._nq) count_vec = [0] * (4**self._nq) # corr_list = [[] for e in range(4 ** self._nq)] for pur in range(self._npurity): circ_name = 'rb_purity_' + str(pur) + \ '_length_%d_seed_%d' % (k, seed) # marginal counts for the pattern counts_subspace = marginal_counts( circ_counts[circ_name], np.arange(startind, endind)) # calculating the vector of 4^n correlators for indcorr in range(2**self._nq): zcorr = average_data(counts_subspace, self._zdict_ops[indcorr]) zind = self.F234(self._nq, indcorr, pur) corr_vec[zind] += zcorr count_vec[zind] += 1 # calculating the purity purity = 0 for idx, _ in enumerate(corr_vec): purity += (corr_vec[idx] / count_vec[idx])**2 purity = purity / (2**self._nq) self.rbfit_pur.raw_data[-1][seedidx].append(purity) startind = endind