Example #1
0
def run_with_measure(qiskit_schedule, backend_name, meas_level=1):
    try:
        from qiskit import providers, assemble
        from qiskit.pulse import DriveChannel, SetFrequency
        from qiskit.pulse.macros import measure
        from qiskit.result.result import Result

        if qiskit_schedule.duration == 0:
            return Result(backend_name, None, None, None, None, None)
        if backend_name == 'Armonk':
            backend = get_qiskit_backend(backend_name)
            pulse_sim = providers.aer.PulseSimulator.from_backend(backend)
            pulse_qobj = assemble(qiskit_schedule, backend=pulse_sim)
            measure_qubits = []
            for channel in qiskit_schedule.channels:
                if isinstance(channel, DriveChannel):
                    measure_qubits.append(channel.index)
            frequency = None
            for start_time, instruction in qiskit_schedule.instructions:
                if isinstance(instruction, SetFrequency):
                    frequency = {instruction.channel: instruction.frequency}
                    break

            def strip_frequencies(instruction):
                if isinstance(instruction[1], SetFrequency):
                    return False
                return True

            # Setting frequences isn't supported on simulators, so instead we use `schedule_los` to set a single frequency
            # and subsequently strip any SetFrequency instructions from the schedule.
            qiskit_schedule = qiskit_schedule.filter(strip_frequencies)
            qiskit_schedule += measure(measure_qubits,
                                       pulse_sim) << qiskit_schedule.duration
            pulse_qobj = assemble(qiskit_schedule,
                                  backend=pulse_sim,
                                  meas_level=meas_level,
                                  schedule_los=frequency)
            job = pulse_sim.run(pulse_qobj)
            return job.result()
        else:
            print(
                "Only FakeArmonk is supported for simulation currently because other backends are too slow"
            )
            return Result(backend_name, None, None, None, None, None)
    except ImportError:
        pass
Example #2
0
def load_results_from_json(json_path: str):
    """
    loads run results from json file
    Args:
        json_path: the path of the json file to load the results from

    Returns:
        list: results object that was saved in the json file (list of qiskit Results)
    """
    with open(json_path, "r") as results_file:
        results_json = json.load(results_file)
    return [Result.from_dict(result) for result in results_json]
 def test_accred_fitter(self):
     """ Test the fitter with some saved result data"""
     # ideal results
     with open(
             os.path.join(os.path.dirname(__file__),
                          'accred_ideal_results.json'), "r") as saved_file:
         ideal_results = json.load(saved_file)
     all_results = [
         Result.from_dict(result) for result in ideal_results['all_results']
     ]
     all_postp_list = ideal_results['all_postp_list']
     all_v_zero = ideal_results['all_v_zero']
     test_1 = accred.AccreditationFitter()
     for a, b, c in zip(all_results, all_postp_list, all_v_zero):
         test_1.single_protocol_run(a, b, c)
         self.assertEqual(test_1.flag, 'accepted',
                          "Error: Ideal outcomes not passing accred")
     # noisy results
     with open(
             os.path.join(os.path.dirname(__file__),
                          'accred_noisy_results.json'), "r") as saved_file:
         noisy_results = json.load(saved_file)
     all_results = [
         Result.from_dict(result) for result in noisy_results['all_results']
     ]
     all_postp_list = noisy_results['all_postp_list']
     all_v_zero = noisy_results['all_v_zero']
     all_acc = noisy_results['all_acc']
     test_1 = accred.AccreditationFitter()
     for a, b, c, d in zip(all_results, all_postp_list, all_v_zero,
                           all_acc):
         test_1.single_protocol_run(a, b, c)
         self.assertEqual(test_1.flag, d,
                          "Error: Noisy outcomes not correct accred")
     test_1.bound_variation_distance(noisy_results['theta'])
     bound = test_1.bound
     self.assertEqual(bound, noisy_results['bound'],
                      "Error: Incorrect bound for noisy outcomes")
Example #4
0
    def test_accred_fitter(self):
        """ Test the fitter with some saved result data"""
        # ideal results
        with open(
                os.path.join(os.path.dirname(__file__),
                             'accred_ideal_results.json'), "r") as saved_file:
            ideal_results = json.load(saved_file)
        all_results = [
            Result.from_dict(result) for result in ideal_results['all_results']
        ]
        all_postp_list = ideal_results['all_postp_list']
        all_v_zero = ideal_results['all_v_zero']
        test_1 = accred.AccreditationFitter()
        for a, b, c in zip(all_results, all_postp_list, all_v_zero):
            test_1.AppendResults(a, b, c)
        (_, bnd, conf) = test_1.FullAccreditation(0.95)
        self.assertEqual(test_1._Nruns, test_1._Nacc,
                         "Error: Ideal outcomes not passing accred")

        theta = np.sqrt(np.log(2 / (1 - conf)) / (2 * len(all_postp_list)))
        bound = 1.7 / len(all_postp_list[0])
        bound = bound / (1.0 - theta)
        self.assertAlmostEqual(
            bound, bnd, msg="Error: Ideal outcomes not giving correct bound")
        # noisy results
        with open(
                os.path.join(os.path.dirname(__file__),
                             'accred_noisy_results.json'), "r") as saved_file:
            noisy_results = json.load(saved_file)
        all_strings = noisy_results['all_strings']
        all_postp_list = noisy_results['all_postp_list']
        all_v_zero = noisy_results['all_v_zero']
        confidence = noisy_results['confidence']
        accred_full = noisy_results['accred_full']
        accred_mean = noisy_results['accred_mean']

        test_1 = accred.AccreditationFitter()
        for a, b, c in zip(all_strings, all_postp_list, all_v_zero):
            test_1.AppendStrings(a, b, c)

        accred_full_test = test_1.FullAccreditation(confidence)
        accred_mean_test = test_1.MeanAccreditation(confidence)
        self.assertEqual(accred_full_test[1], accred_full[1],
                         "Error: Noisy outcomes fail full accred")

        self.assertEqual(accred_mean[1], accred_mean_test[1],
                         "Error: Noisy outcomes fail mean accred")
Example #5
0
    def test_tensored_meas_fitter_with_noise(self):
        """Test the TensoredFitter with noise."""

        # pre-generated results with noise
        # load from json file
        with open(
                os.path.join(os.path.dirname(__file__),
                             'test_tensored_meas_results.json'),
                "r") as saved_file:
            saved_info = json.load(saved_file)
        saved_info['cal_results'] = Result.from_dict(saved_info['cal_results'])
        saved_info['results'] = Result.from_dict(saved_info['results'])

        meas_cal = TensoredMeasFitter(saved_info['cal_results'],
                                      mit_pattern=saved_info['mit_pattern'])

        # Calculate the fidelity
        fidelity = meas_cal.readout_fidelity(0) * meas_cal.readout_fidelity(1)
        # Compare with expected fidelity and expected results
        self.assertAlmostEqual(fidelity, saved_info['fidelity'], places=0)

        meas_filter = meas_cal.filter

        # Calculate the results after mitigation
        output_results_pseudo_inverse = meas_filter.apply(
            saved_info['results'].get_counts(0), method='pseudo_inverse')
        output_results_least_square = meas_filter.apply(saved_info['results'],
                                                        method='least_squares')

        self.assertAlmostEqual(output_results_pseudo_inverse['000'],
                               saved_info['results_pseudo_inverse']['000'],
                               places=0)

        self.assertAlmostEqual(
            output_results_least_square.get_counts(0)['000'],
            saved_info['results_least_square']['000'],
            places=0)

        self.assertAlmostEqual(output_results_pseudo_inverse['111'],
                               saved_info['results_pseudo_inverse']['111'],
                               places=0)

        self.assertAlmostEqual(
            output_results_least_square.get_counts(0)['111'],
            saved_info['results_least_square']['111'],
            places=0)

        substates_list = []
        for qubit_list in saved_info['mit_pattern']:
            substates_list.append(count_keys(len(qubit_list))[::-1])

        fitter_other_order = TensoredMeasFitter(
            saved_info['cal_results'],
            substate_labels_list=substates_list,
            mit_pattern=saved_info['mit_pattern'])

        fidelity = fitter_other_order.readout_fidelity(0) * \
            meas_cal.readout_fidelity(1)

        self.assertAlmostEqual(fidelity, saved_info['fidelity'], places=0)

        meas_filter = fitter_other_order.filter

        # Calculate the results after mitigation
        output_results_pseudo_inverse = meas_filter.apply(
            saved_info['results'].get_counts(0), method='pseudo_inverse')
        output_results_least_square = meas_filter.apply(saved_info['results'],
                                                        method='least_squares')

        self.assertAlmostEqual(output_results_pseudo_inverse['000'],
                               saved_info['results_pseudo_inverse']['000'],
                               places=0)

        self.assertAlmostEqual(
            output_results_least_square.get_counts(0)['000'],
            saved_info['results_least_square']['000'],
            places=0)

        self.assertAlmostEqual(output_results_pseudo_inverse['111'],
                               saved_info['results_pseudo_inverse']['111'],
                               places=0)

        self.assertAlmostEqual(
            output_results_least_square.get_counts(0)['111'],
            saved_info['results_least_square']['111'],
            places=0)
Example #6
0
    def _process_job_result(self, job_result:Result, batch:Batch) -> Dict[str, Result]:
        """Post-process the job result corresponding to a batch. Recreate the single results by adding up the shots of multiple executions

        Args:
            job_result (Result)
            batch (Batch): corresponding batch to the job_result

        Returns:
            Dict[str, Result]: Maps the keys of the initial QuantumExecutionJobs to their Results
        """
        results = {}
        exp_number = 0
        # get the Result as dict and delete the results 
        result_dict = job_result.to_dict()

        index = batch.batch_number
        backend_name = batch.backend_name
        try:
            previous_key = self._previous_key[backend_name]
            previous_memory = self._previous_memory[backend_name]
            previous_counts = self._previous_counts[backend_name]
        except KeyError:
            previous_key = None
            previous_memory = None
            previous_counts = None
        
        self._log.info(f"Process result of job {index}")

        for exp in batch.experiments:
            key = exp["key"]
            circ = exp["circuit"]
            reps = exp["reps"]
            shots = exp["shots"]
            total_shots = exp["total_shots"]
            memory = []
            counts = {}
            result_data = None

            

            if previous_memory:
                # there is data from the previous job
                assert(previous_key==key)
                memory.extend(previous_memory)
                counts.update(previous_counts)
                shots += len(previous_memory)
                total_shots += len(previous_memory)
                previous_memory = None
                previous_counts = None
                previous_key = None
            
            # get ExperimentResult as dict
            job_exp_result_dict = job_result._get_experiment(exp_number).to_dict() 

            if not (shots == total_shots and reps == 1 and len(memory) == 0):
                # do not run this block if it is only one experiment (shots == total_shots) with one repetition and no previous data is available
                for exp_index in range(exp_number, exp_number+reps):
                    mem = job_result.data(exp_index)['memory']
                    memory.extend(mem)
                    cnts = job_result.data(exp_index)['counts']
                    
                    if exp_index == exp_number+reps-1 and shots == total_shots:
                        # last experiment for this circuit
                        if len(memory) > total_shots:
                            # trim memory and counts w.r.t. number of shots
                            too_much = len(memory) - total_shots
                            memory = memory[:total_shots]
                            mem = mem[:-too_much]
                            cnts = dict(Counter(mem))

                    counts = self._add_dicts(counts, cnts)
                
                if shots < total_shots:
                    previous_memory = copy.deepcopy(memory)
                    previous_counts = copy.deepcopy(counts)
                    previous_key = key
                    continue
                if self._memory:
                    result_data = ExperimentResultData(counts=counts, memory=memory).to_dict()
                else:
                    result_data = ExperimentResultData(counts=counts).to_dict()

                # overwrite the data and the shots
                job_exp_result_dict["data"] = result_data
                job_exp_result_dict["shots"] = total_shots

            else:
                if not self._memory:
                    counts = job_result.data(exp_number)['counts']
                    result_data = ExperimentResultData(counts=counts).to_dict()
                    job_exp_result_dict["data"] = result_data

            # overwrite the results with the computed result
            result_dict["results"] = [job_exp_result_dict]
            results[key] = Result.from_dict(result_dict)
            exp_number += reps
        self._previous_key[backend_name] = previous_key
        self._previous_memory[backend_name] = previous_memory
        self._previous_counts[backend_name] = previous_counts
        return results
Example #7
0
    def test_tensored_meas_fitter_with_noise(self):
        """Test the TensoredFitter with noise."""
        cal_results, mit_pattern, circuit_results, meas_layout = tensored_calib_circ_execution(
            1000, SEED
        )

        meas_cal = TensoredMeasFitter(cal_results, mit_pattern=mit_pattern)
        meas_filter = meas_cal.filter

        # Calculate the results after mitigation
        results_pseudo_inverse = meas_filter.apply(
            circuit_results.get_counts(), method="pseudo_inverse", meas_layout=meas_layout
        )
        results_least_square = meas_filter.apply(
            circuit_results.get_counts(), method="least_squares", meas_layout=meas_layout
        )
        saved_info = {
            "cal_results": cal_results.to_dict(),
            "results": circuit_results.to_dict(),
            "mit_pattern": mit_pattern,
            "meas_layout": meas_layout,
            "fidelity": meas_cal.readout_fidelity(),
            "results_pseudo_inverse": results_pseudo_inverse,
            "results_least_square": results_least_square,
        }

        saved_info["cal_results"] = Result.from_dict(saved_info["cal_results"])
        saved_info["results"] = Result.from_dict(saved_info["results"])

        meas_cal = TensoredMeasFitter(
            saved_info["cal_results"], mit_pattern=saved_info["mit_pattern"]
        )

        # Calculate the fidelity
        fidelity = meas_cal.readout_fidelity(0) * meas_cal.readout_fidelity(1)
        # Compare with expected fidelity and expected results
        self.assertAlmostEqual(fidelity, saved_info["fidelity"], places=0)

        meas_filter = meas_cal.filter

        # Calculate the results after mitigation
        output_results_pseudo_inverse = meas_filter.apply(
            saved_info["results"].get_counts(0),
            method="pseudo_inverse",
            meas_layout=saved_info["meas_layout"],
        )
        output_results_least_square = meas_filter.apply(
            saved_info["results"], method="least_squares", meas_layout=saved_info["meas_layout"]
        )

        self.assertAlmostEqual(
            output_results_pseudo_inverse["000"],
            saved_info["results_pseudo_inverse"]["000"],
            places=0,
        )

        self.assertAlmostEqual(
            output_results_least_square.get_counts(0)["000"],
            saved_info["results_least_square"]["000"],
            places=0,
        )

        self.assertAlmostEqual(
            output_results_pseudo_inverse["111"],
            saved_info["results_pseudo_inverse"]["111"],
            places=0,
        )

        self.assertAlmostEqual(
            output_results_least_square.get_counts(0)["111"],
            saved_info["results_least_square"]["111"],
            places=0,
        )

        substates_list = []
        for qubit_list in saved_info["mit_pattern"]:
            substates_list.append(count_keys(len(qubit_list))[::-1])

        fitter_other_order = TensoredMeasFitter(
            saved_info["cal_results"],
            substate_labels_list=substates_list,
            mit_pattern=saved_info["mit_pattern"],
        )

        fidelity = fitter_other_order.readout_fidelity(0) * meas_cal.readout_fidelity(1)

        self.assertAlmostEqual(fidelity, saved_info["fidelity"], places=0)

        meas_filter = fitter_other_order.filter

        # Calculate the results after mitigation
        output_results_pseudo_inverse = meas_filter.apply(
            saved_info["results"].get_counts(0),
            method="pseudo_inverse",
            meas_layout=saved_info["meas_layout"],
        )
        output_results_least_square = meas_filter.apply(
            saved_info["results"], method="least_squares", meas_layout=saved_info["meas_layout"]
        )

        self.assertAlmostEqual(
            output_results_pseudo_inverse["000"],
            saved_info["results_pseudo_inverse"]["000"],
            places=0,
        )

        self.assertAlmostEqual(
            output_results_least_square.get_counts(0)["000"],
            saved_info["results_least_square"]["000"],
            places=0,
        )

        self.assertAlmostEqual(
            output_results_pseudo_inverse["111"],
            saved_info["results_pseudo_inverse"]["111"],
            places=0,
        )

        self.assertAlmostEqual(
            output_results_least_square.get_counts(0)["111"],
            saved_info["results_least_square"]["111"],
            places=0,
        )