Example #1
0
def calculate_results(delay_dutation, job_id_path, save_data_path, backend,
                      simulator, nseed):
    exp_data = pickle_load(job_id_path)
    job_sim = simulator.retrieve_job(exp_data["simulator"]["job_id"])
    job_delay_before = [
        backend.retrieve_job(job_id)
        for job_id in exp_data["delay_before"]["job_id"]
    ]
    job_delay_after = [
        backend.retrieve_job(job_id)
        for job_id in exp_data["delay_after"]["job_id"]
    ]
    delay_duration_list = exp_data["delay_duration_list"]

    eval_delay = EvaluateDelay(job_sim,
                               job_delay_before,
                               job_delay_after,
                               delay_duration_list,
                               nseed=nseed,
                               initial_layout=[0])
    counts_before_list_list, counts_after_list_list = eval_delay.evaluate(
    )  # return [[seed1 counts_dict], [seed2 counts_dict], ... ]

    # calculate mean and sem (standard error mean) of counts
    counts_before_list_mean, counts_before_list_sem = eval_delay.stat(
        counts_before_list_list)
    counts_after_list_mean, counts_after_list_sem = eval_delay.stat(
        counts_after_list_list)

    # calculate jsd
    before_jsd_all, before_jsd_mean, before_jsd_sem = eval_delay.js_divergence(
        counts_before_list_list)
    after_jsd_all, after_jsd_mean, after_jsd_sem = eval_delay.js_divergence(
        counts_after_list_list)

    save_data = {
        "before_op": {
            "counts_list_all": counts_before_list_list,
            "counts_list_mean": counts_before_list_mean,
            "counts_list_sem": counts_before_list_sem,
            "jsd_all": before_jsd_all,
            "jsd_mean": before_jsd_mean,
            "jsd_sem": before_jsd_sem,
        },
        "after_op": {
            "counts_list_all": counts_after_list_list,
            "counts_list_mean": counts_after_list_mean,
            "counts_list_sem": counts_after_list_sem,
            "jsd_all": after_jsd_all,
            "jsd_mean": after_jsd_mean,
            "jsd_sem": after_jsd_sem,
        },
        "delay_duration": delay_dutation,
        "nseed": nseed
    }
    pickle_dump(save_data, save_data_path)
    return save_data
Example #2
0
def result(dir_path, backend_name, save_path=None):
    # get path to this file and parent dir
    job_files = glob.glob(dir_path + '/*.pickle')
    # job_files = job_dir

    # open job files
    job_id_set = []
    bench_name_list = []
    for job_file in job_files:
        job_data = pickle_load(job_file)
        job_id_set.append(job_data["job"])
        bench_name_list.append(job_data["bench_names"])

    # load ibmq backend
    backend = get_IBM_backend(backend_name)
    simulator = get_IBM_backend('ibmq_qasm_simulator')

    # retrieve jobs and  results
    # simulator
    counts_sim_set = _retrieve_result(job_id_set,
                                      bench_name_list,
                                      device=simulator,
                                      type='simulator')

    # Dense layout
    counts_dense_set = _retrieve_result(job_id_set,
                                        bench_name_list,
                                        device=backend,
                                        type='dense')

    # noise-adaptive layout
    counts_noise_set = _retrieve_result(job_id_set,
                                        bench_name_list,
                                        device=backend,
                                        type='noise')

    # sabre layout
    counts_sabre_set = _retrieve_result(job_id_set,
                                        bench_name_list,
                                        device=backend,
                                        type='sabre')

    # xtalk adaptive transpiler
    counts_xtalk_set = _retrieve_result(job_id_set,
                                        bench_name_list,
                                        device=backend,
                                        type='xtalk')

    if save_path:
        dir_path = os.path.dirname(save_path)
        if not os.path.exists(dir_path):
            print('make directory: ', dir_path)
            os.mkdir(dir_path)
        _save_results(counts_sim_set, counts_dense_set, counts_noise_set,
                      counts_sabre_set, counts_xtalk_set, save_path)
Example #3
0
def plot_sched(data_path, save_dir):

    data = pickle_load(data_path)

    data_nonsched = data['nonsched']
    data_alap = data['alap']

    i = 0
    for _nonsched, _alap in zip(data_nonsched, data_alap):
        save_path = save_dir + '/' + str(i) + '.png'
        _plot(_nonsched, _alap, save_path)
        i += 1
Example #4
0
def result_sched(dir_path, backend_name, save_path=None): 
    # get path to this file and parent dir
    job_files = glob.glob(dir_path+'/*.pickle')
    # job_files = job_dir


    # open job files
    job_id_set = []
    bench_name_list = []
    for job_file in job_files:
        job_data = pickle_load(job_file)
        job_id_set.append(job_data["job"])
        bench_name_list.append(job_data["bench_names"])
        

    # load ibmq backend
    backend = get_IBM_backend(backend_name)
    simulator = get_IBM_backend('ibmq_qasm_simulator')

    # retrieve jobs and get load results
    
    # simulator
    counts_sim_set = _retrieve_load_result(job_id_set, bench_name_list, device=simulator, type='simulator')
    # pprint(counts_sim_set)

    # nonsched layout
    counts_set_nonsched = _retrieve_load_result(job_id_set, bench_name_list, device=backend, type='nonsched')
    # pprint(counts_set_nonsched)
    jsd_nonsched = _analyze_results(counts_sim_set, counts_set_nonsched)
    pprint(jsd_nonsched)

    # alap adaptive transpiler
    counts_set_alap = _retrieve_load_result(job_id_set, bench_name_list, device=backend, type='alap')
    # print(counts_set_alap)
    jsd_alap = _analyze_results(counts_sim_set, counts_set_alap)
    pprint(jsd_alap)

    eval_dict = {
        'nonsched': jsd_nonsched,
        'alap': jsd_alap
    }
    if save_path: 
        dir_path = os.path.dirname(save_path)
        if not os.path.exists(dir_path):
            print('make directory: ', dir_path)
            os.mkdir(dir_path)
        pickle_dump(eval_dict, save_path)
Example #5
0
def plot_xtalk(data_path, save_dir):

    data = pickle_load(data_path)
    print(data)

    data_dense = data['dense']
    data_noise = data['noise']
    data_sabre = data['sabre']
    data_xtalk = data['xtalk']

    i = 0
    for _dense, _noise, _sabre, _xtalk in zip(data_dense, data_noise,
                                              data_sabre, data_xtalk):
        try:
            save_path = save_dir + '_nonlabel/' + str(i) + '.png'
            _plot(_dense, _noise, _sabre, _xtalk, save_path)
        except:
            print("failed: ", i)
        i += 1
Example #6
0
def test_transpile_on_regurated_hw():
    multi_circuit_components = {"QFT_2": 1, "Toffoli": 3}
    jobfile_dir = "/Users/Yasuhiro/Documents/aqua/gp/experiments/test/test_jobfile/"
    benchmark = CompilerBenchmark(backend_name="ibmq_toronto",
                                  reservations=False)

    xtalk_row_value = pickle_load(
        "/Users/Yasuhiro/Documents/aqua/gp/errors_information/toronto_from20201030/xtalk_data_daily/epc/2020-10-30.pickle"
    )
    xtalk_prop = value_to_ratio(xtalk_row_value, threshold=1.25)
    pprint(xtalk_prop)

    multi_circ = benchmark.run(multi_circuit_components, jobfile_dir,
                               xtalk_prop)

    for circ in multi_circ:
        print(
            "#################################################################"
        )
        print(circ)
Example #7
0
def execute_set4():
    # prepare benchmark qc
    name_list_set = load_gspread(worksheet_name='set_4', num_set=11, num_qc=4)
    qc_path = '/Users/Yasuhiro/Documents/aqua/gp/experiments/xtalk_compiler/benchmark_qc/qasmbench.pickle'
    
    for label, name_list in enumerate(name_list_set): 
        qasmbench = PrepQASMBench(name_list, qc_path)
        qc_list = qasmbench.qc_list()

        # prepare execution environment
        backend = get_IBM_backend('ibmq_toronto')
        simulator = get_IBM_backend('ibmq_qasm_simulator')
        shots_single=8192
        shots_multi=8192
        xtalk_prop = pickle_load('/Users/Yasuhiro/Documents/aqua/gp/errors_information/toronto_from20201224/xtalk_data_daily/ratio/2021-01-12.pickle')
        save_path = '/Users/Yasuhiro/Documents/aqua/gp/experiments/xtalk_compiler/ibmq_toronto/job_id/2021-01-12_set4/'+  str(label) +'.pickle'

        data = execute_xtalk(name_list, qc_list, backend, simulator, shots_single, shots_multi, xtalk_prop, save_path)

        pprint(data)
Example #8
0
def test_evaluate_delay():
    backend = get_IBM_backend("ibmq_rome")
    simulator = get_IBM_backend("ibmq_qasm_simulator")

    # load experiments data
    data_path = "/Users/Yasuhiro/Documents/aqua/gp/experiments/waiting_duration/job_id/2020-11-10_qc_name_is_ibmq_rome.pickle"
    exp_data = pickle_load(data_path)

    job_sim = simulator.retrieve_job(exp_data["simulator"]["job_id"])
    job_delay_op = [backend.retrieve_job(job_id) for job_id in exp_data["delay_op"]["job_id"]]
    job_delay_meas = [backend.retrieve_job(job_id) for job_id in exp_data["delay_meas"]["job_id"]]
    delay_duration_list = exp_data["delay_duration_list"]

    initial_layout=[0, 1, 2]

    eval_delay = EvaluateDelay(job_sim, job_delay_op, job_delay_meas, delay_duration_list, nseed=1, initial_layout=initial_layout)
    op_list, meas_list = eval_delay.evaluate()


    jsd_list_list, mean_list, sem_list = eval_delay.js_divergence(op_list)
    print(jsd_list_list, mean_list, sem_list)
Example #9
0
def execute_set6(today):
    experiments_path = '/Users/rum/Documents/aqua/gp/experiments'
    errorinfo_path = '/Users/rum/Documents/aqua/gp/errors_information'
    # prepare benchmark qc
    name_list_set = load_gspread(worksheet_name='set_6',
                                 num_set=7,
                                 num_qc=6,
                                 shift=6)
    qc_path = experiments_path + '/xtalk_compiler/benchmark_qc/qasmbench.pickle'

    #################
    pointer = 0

    #################

    for label, name_list in enumerate(name_list_set):
        try:
            qasmbench = PrepQASMBench(name_list, qc_path)
            qc_list = qasmbench.qc_list()

            # prepare execution environment
            backend = get_IBM_backend('ibmq_toronto')
            simulator = get_IBM_backend('ibmq_qasm_simulator')
            shots_single = 8192
            shots_multi = 8192
            xtalk_prop = pickle_load(
                errorinfo_path +
                '/toronto_from20201224/xtalk_data_daily/ratio/' + str(today) +
                '.pickle')
            save_path = experiments_path + '/xtalk_compiler/ibmq_toronto/job_id/' + str(
                today) + '_set6/' + str(label + pointer) + '.pickle'

            data = execute_xtalk(qc_list, backend, simulator, shots_single,
                                 shots_multi, xtalk_prop, save_path)

            pprint(data)
        except:
            print('Failed: set6 label:', label, ' ', name_list)
Example #10
0
    def evaluate(self, experiments_data_path_list: list):
        obj_list = []
        for experiments_data_path in experiments_data_path_list:
            
            exp_data = pickle_load(experiments_data_path)
            job_sim = self.simulator.retrieve_job(exp_data["simulator"]["job_id"])
            job_delay_before = [self.backend.retrieve_job(job_id) for job_id in exp_data["delay_before"]["job_id"]]
            job_delay_after = [self.backend.retrieve_job(job_id) for job_id in exp_data["delay_after"]["job_id"]]
            delay_duration_list = exp_data["delay_duration_list"]
            
            eval_delay = EvaluateDelay(job_sim, job_delay_before, job_delay_after, delay_duration_list, nseed=self.nseed, initial_layout=self.initial_layout_list)
            counts_before_list_list, counts_after_list_list = eval_delay.evaluate() # return [[seed1 counts_dict], [seed2 counts_dict], ... ]

            before_jsd_all, before_jsd_mean, before_jsd_sem = eval_delay.js_divergence(counts_before_list_list)
            after_jsd_all, after_jsd_mean, after_jsd_sem = eval_delay.js_divergence(counts_mean_list_list)

            obj = {
                "before_op": {
                    "jsd_add": before_jsd_all, 
                    "jsd_mean": before_jsd_mean, 
                    "jsd_sem": before_jsd_sem, 
                },  
                "after_op": {
                    "jsd_add": after_jsd_all, 
                    "jsd_mean": after_jsd_mean, 
                    "jsd_sem": after_jsd_sem,
                }, 
                "nseed": self.nseed
            }
            obj_list.append(obj)

            # save as pickle file
            path_to_dir = "/Users/Yasuhiro/Documents/aqua/gp/experiments/waiting_duration/data/jsd/plus_state_decay/"
            file_name = self.backend_name + "_" + str(date.today()) + "_qubit-" + str(i) + ".pickle"
            save_path = path_to_dir + file_name
            pickle_dump(obj, save_path)
        
        return obj_list
Example #11
0
savedata_path_e5 = save_data_path(date, "e5", initial_state, initial_layout)

# +
# e5 = calculate_results(delay_duration_e5, jobid_path_e5, savedata_path_e5, backend, simulator, nseed)
# pprint(e5)
# -

savedata_path_e4 = save_data_path(date, "e4", initial_state, initial_layout)

# +
# e4 = calculate_results(delay_duration_e4, jobid_path_e4, savedata_path_e4, backend, simulator, nseed)
# pprint(e4)
# -

# ## Plot results

from experiments.waiting_duration import plot_decay, save_plot_path

save_plot_path_e6 = save_plot_path(date, "e6", initial_state, initial_layout)
e6 = pickle_load(savedata_path_e6)
plot_decay(e6, delay_duration_e6, save_plot_path_e6, ymin=0, ymax=0.8)

save_plot_path_e5 = save_plot_path(date, "e5", initial_state, initial_layout)
e5 = pickle_load(savedata_path_e5)
plot_decay(e5, delay_duration_e5, save_plot_path_e5, ymin=0, ymax=0.5)

save_plot_path_e4 = save_plot_path(date, "e4", initial_state, initial_layout)
e4 = pickle_load(savedata_path_e4)
plot_decay(e4, delay_duration_e4, save_plot_path_e4, ymin=0, ymax=0.25)
Example #12
0
    backend=backend,
    simulator=simulator,
    initial_layout=initial_layout,
    delay_duration_list=delay_duration_e4,
    nseed=nseed,
)

# ## calculate results

# +
path_ = "/Users/Yasuhiro/Documents/aqua/gp/experiments/waiting_duration/job_id/" + backend_name + "/" + str(
    date.today()) + "_pulse state_e6_" + backend_name + "_["
_path = "].pickle"
experiments_data_path = path_ + str(0) + _path

exp_data = pickle_load(experiments_data_path)
job_sim = simulator.retrieve_job(exp_data["simulator"]["job_id"])
job_delay_before = [
    backend.retrieve_job(job_id)
    for job_id in exp_data["delay_before"]["job_id"]
]
job_delay_after = [
    backend.retrieve_job(job_id)
    for job_id in exp_data["delay_after"]["job_id"]
]
delay_duration_list = exp_data["delay_duration_list"]

eval_delay = EvaluateDelay(job_sim,
                           job_delay_before,
                           job_delay_after,
                           delay_duration_list,
Example #13
0
def plot_pst(data_path, save_path):

    data = pickle_load(data_path)
    df = _process_data(data)
    _plot(df, save_path)
Example #14
0
def _run_experiments(
    multi_circuit_components,
    backend=None,
    crosstalk_prop=None,
    crosstalk_info_filepath=None,
    returnCircuit=False,
    onlyCircuit=False,
    multi_opt=False,
    basis_gates=None,
    coupling_map=None,  # circuit transpile options
    backend_properties=None,
    initial_layout=None,
    seed_transpiler=None,
    optimization_level=None,
    pass_manager=None,
    qobj_id=None,
    qobj_header=None,
    shots=1024,  # common run options
    memory=False,
    max_credits=10,
    seed_simulator=None,
    default_qubit_los=None,
    default_meas_los=None,  # schedule run options
    schedule_los=None,
    meas_level=MeasLevel.CLASSIFIED,
    meas_return=MeasReturnType.AVERAGE,
    memory_slots=None,
    memory_slot_size=100,
    rep_time=None,
    rep_delay=None,
    parameter_binds=None,
    schedule_circuit=False,
    inst_map=None,
    meas_map=None,
    scheduling_method=None,
    init_qubits=None,
    **run_config
):
    circuit_list = make_benckmarks(multi_circuit_components)

    if crosstalk_prop is None and isinstance(crosstalk_info_filepath, str):
        cprop = pickle_load(crosstalk_info_filepath)
    else:
        cprop = crosstalk_prop

    experiments = multitasking_transpile(
        multi_circuits=circuit_list,
        backend=backend,
        backend_properties=backend_properties,
        multi_opt=multi_opt,
        crosstalk_prop=cprop,
        optimization_level=optimization_level,
    )

    if not onlyCircuit:
        qobj = assemble(
            experiments,
            qobj_id=qobj_id,
            qobj_header=qobj_header,
            shots=shots,
            memory=memory,
            max_credits=max_credits,
            seed_simulator=seed_simulator,
            default_qubit_los=default_qubit_los,
            default_meas_los=default_meas_los,
            schedule_los=schedule_los,
            meas_level=meas_level,
            meas_return=meas_return,
            memory_slots=memory_slots,
            memory_slot_size=memory_slot_size,
            rep_time=rep_time,
            rep_delay=rep_delay,
            parameter_binds=parameter_binds,
            backend=backend,
            init_qubits=init_qubits,
            **run_config
        )

        # executing the circuits on the backend and returning the job
        start_time = time()
        job = backend.run(qobj, **run_config)
        end_time = time()
        _log_submission_time(start_time, end_time)

        if returnCircuit:
            return job, experiments
        return job
    return experiments