Пример #1
0
def _save_experiments( 
        qc_list, multi_sim, job_sim, 
        single_nonsched, multi_nonsched, job_nonsched, 
        single_alap, multi_alap, job_alap, 
        backend, shots_single, shots_multi, save_path):

    job_sim_s, job_sim_m = job_sim
    job_non_s, job_non_m = job_nonsched
    job_alap_s, job_alap_m = job_alap

    name_list = [qc.name for qc in qc_list]
    # compose experimental data
    experiments_data = {
        "job": {
            "simulator": {
                "qc": {
                    'single': qc_list, 
                    'multi': multi_sim
                },
                "job_id": {
                    'single': job_sim_s.job_id(),
                    'multi': job_sim_m.job_id(),
                },  
            }, 
            "nonsched": {
                "qc": {
                    'single': single_nonsched, 
                    'multi': multi_nonsched
                },
                "job_id": {
                    'single': job_non_s.job_id(),
                    'multi': job_non_m.job_id()
                },  
            }, 
            "alap": {
                "qc": {
                    'single': single_alap, 
                    'multi': multi_alap
                },
                "job_id": {
                    'single': job_alap_s.job_id(),
                    'multi': job_alap_m.job_id()
                },  
            }, 
        },
        "bench_names": name_list,
        "backend": backend.name(), 
        'shots': {
            'single': shots_single, 
            'multi': shots_multi
        }
    }

    dir_path = os.path.dirname(save_path)
    if not os.path.exists(dir_path): 
        print('make directory: ', dir_path)
        os.mkdir(dir_path)
    pickle_dump(experiments_data, save_path)

    return experiments_data
Пример #2
0
def analyze_pst(result_path, save_path=None): 

    result_dict = pickle_load(result_path)
    counts_sim_set = result_dict['sim']
    counts_dense_set = result_dict['dense']
    counts_noise_set = result_dict['noise']
    counts_sabre_set = result_dict['sabre']
    counts_xtalk_set = result_dict['xtalk']
    
    # Dense layout
    pst_dense = _analyze_pst(counts_sim_set, counts_dense_set)

    # noise-adaptive layout
    pst_noise = _analyze_pst(counts_sim_set, counts_noise_set)

    # sabre layout
    pst_sabre = _analyze_pst(counts_sim_set, counts_sabre_set)

    # xtalk adaptive transpiler
    pst_xtalk = _analyze_pst(counts_sim_set, counts_xtalk_set)

    eval_dict = {
        'dense': pst_dense,
        'noise': pst_noise,
        'sabre': pst_sabre,
        'xtalk': pst_xtalk
    }
    if save_path: 
        dir_path = os.path.dirname(save_path)
        if not os.path.exists(dir_path): 
            print('make directory: ', dir_path)
            os.mkdir(dir_path)
        pickle_dump(eval_dict, save_path)
Пример #3
0
def evaluate_alap(data_dir, save_eval_path=None): 
    """
    result_data = {
        "simulator": {
            "qc": qc_sim,
            "result": result_sim,
            "count": count_sim,
        }, 
        "non_scheduling": {
            "qc": qc,
            "result": result_list,
            "count": count_list,
        }, 
        "alap": {
            "qc": qc_alap,
            "result": result_alap_list,
            "count": count_alap_list, 
        }, 
        "qc_names": qc_names,
        "date": date, 
    }
    """
    result_list = [pickle_load(data_dir + "/" + f) for f in os.listdir(data_dir)]
    jsd_dict = convert_to_jsd(result_list)

    if save_eval_path: 
        pickle_dump(jsd_dict, save_eval_path)
    
    return jsd_dict
Пример #4
0
def result_xtalk(dir_path, backend_name, save_path=None): 
    # get path to this file and parent dir
    job_files = glob.glob(dir_path+'/*.pickle')
    # job_files = job_dir

    # open job files
    job_id_set = []
    bench_name_list = []
    for job_file in job_files:
        job_data = pickle_load(job_file)
        job_id_set.append(job_data["job"])
        bench_name_list.append(job_data["bench_names"])
        

    # load ibmq backend
    backend = get_IBM_backend(backend_name)
    simulator = get_IBM_backend('ibmq_qasm_simulator')

    # retrieve jobs and get load results
    
    # simulator
    counts_sim_set = _retrieve_load_result(job_id_set, bench_name_list, device=simulator, type='simulator')

    # Dense layout
    counts_set_dense = _retrieve_load_result(job_id_set, bench_name_list, device=backend, type='dense')
    # print(counts_set_dense)
    jsd_dense = _analyze_results(counts_sim_set, counts_set_dense)
    pprint(jsd_dense)

    # noise-adaptive layout
    counts_set_noise = _retrieve_load_result(job_id_set, bench_name_list, device=backend, type='noise')
    # print(counts_set_noise)
    jsd_noise = _analyze_results(counts_sim_set, counts_set_noise)
    pprint(jsd_noise)

    # sabre layout
    counts_set_sabre = _retrieve_load_result(job_id_set, bench_name_list, device=backend, type='sabre')
    # print(counts_set_sabre)
    jsd_sabre = _analyze_results(counts_sim_set, counts_set_sabre)
    pprint(jsd_sabre)

    # xtalk adaptive transpiler
    counts_set_xtalk = _retrieve_load_result(job_id_set, bench_name_list, device=backend, type='xtalk')
    # print(counts_set_xtalk)
    jsd_xtalk = _analyze_results(counts_sim_set, counts_set_xtalk)
    pprint(jsd_xtalk)

    eval_dict = {
        'dense': jsd_dense,
        'noise': jsd_noise,
        'sabre': jsd_sabre,
        'xtalk': jsd_xtalk
    }
    if save_path: 
        dir_path = os.path.dirname(save_path)
        if not os.path.exists(dir_path): 
            print('make directory: ', dir_path)
            os.mkdir(dir_path)
        pickle_dump(eval_dict, save_path)
Пример #5
0
def calculate_results(delay_dutation, job_id_path, save_data_path, backend,
                      simulator, nseed):
    exp_data = pickle_load(job_id_path)
    job_sim = simulator.retrieve_job(exp_data["simulator"]["job_id"])
    job_delay_before = [
        backend.retrieve_job(job_id)
        for job_id in exp_data["delay_before"]["job_id"]
    ]
    job_delay_after = [
        backend.retrieve_job(job_id)
        for job_id in exp_data["delay_after"]["job_id"]
    ]
    delay_duration_list = exp_data["delay_duration_list"]

    eval_delay = EvaluateDelay(job_sim,
                               job_delay_before,
                               job_delay_after,
                               delay_duration_list,
                               nseed=nseed,
                               initial_layout=[0])
    counts_before_list_list, counts_after_list_list = eval_delay.evaluate(
    )  # return [[seed1 counts_dict], [seed2 counts_dict], ... ]

    # calculate mean and sem (standard error mean) of counts
    counts_before_list_mean, counts_before_list_sem = eval_delay.stat(
        counts_before_list_list)
    counts_after_list_mean, counts_after_list_sem = eval_delay.stat(
        counts_after_list_list)

    # calculate jsd
    before_jsd_all, before_jsd_mean, before_jsd_sem = eval_delay.js_divergence(
        counts_before_list_list)
    after_jsd_all, after_jsd_mean, after_jsd_sem = eval_delay.js_divergence(
        counts_after_list_list)

    save_data = {
        "before_op": {
            "counts_list_all": counts_before_list_list,
            "counts_list_mean": counts_before_list_mean,
            "counts_list_sem": counts_before_list_sem,
            "jsd_all": before_jsd_all,
            "jsd_mean": before_jsd_mean,
            "jsd_sem": before_jsd_sem,
        },
        "after_op": {
            "counts_list_all": counts_after_list_list,
            "counts_list_mean": counts_after_list_mean,
            "counts_list_sem": counts_after_list_sem,
            "jsd_all": after_jsd_all,
            "jsd_mean": after_jsd_mean,
            "jsd_sem": after_jsd_sem,
        },
        "delay_duration": delay_dutation,
        "nseed": nseed
    }
    pickle_dump(save_data, save_data_path)
    return save_data
Пример #6
0
def _save_results(counts_sim_set, counts_dense_set, counts_noise_set,
                  counts_sabre_set, counts_xtalk_set, save_path):
    result_dict = {
        'sim': counts_sim_set,
        'dense': counts_dense_set,
        'noise': counts_noise_set,
        'sabre': counts_sabre_set,
        'xtalk': counts_xtalk_set,
    }
    pickle_dump(result_dict, save_path)
Пример #7
0
def result_sched(dir_path, backend_name, save_path=None): 
    # get path to this file and parent dir
    job_files = glob.glob(dir_path+'/*.pickle')
    # job_files = job_dir


    # open job files
    job_id_set = []
    bench_name_list = []
    for job_file in job_files:
        job_data = pickle_load(job_file)
        job_id_set.append(job_data["job"])
        bench_name_list.append(job_data["bench_names"])
        

    # load ibmq backend
    backend = get_IBM_backend(backend_name)
    simulator = get_IBM_backend('ibmq_qasm_simulator')

    # retrieve jobs and get load results
    
    # simulator
    counts_sim_set = _retrieve_load_result(job_id_set, bench_name_list, device=simulator, type='simulator')
    # pprint(counts_sim_set)

    # nonsched layout
    counts_set_nonsched = _retrieve_load_result(job_id_set, bench_name_list, device=backend, type='nonsched')
    # pprint(counts_set_nonsched)
    jsd_nonsched = _analyze_results(counts_sim_set, counts_set_nonsched)
    pprint(jsd_nonsched)

    # alap adaptive transpiler
    counts_set_alap = _retrieve_load_result(job_id_set, bench_name_list, device=backend, type='alap')
    # print(counts_set_alap)
    jsd_alap = _analyze_results(counts_sim_set, counts_set_alap)
    pprint(jsd_alap)

    eval_dict = {
        'nonsched': jsd_nonsched,
        'alap': jsd_alap
    }
    if save_path: 
        dir_path = os.path.dirname(save_path)
        if not os.path.exists(dir_path):
            print('make directory: ', dir_path)
            os.mkdir(dir_path)
        pickle_dump(eval_dict, save_path)
Пример #8
0
    def evaluate(self, experiments_data_path_list: list):
        obj_list = []
        for experiments_data_path in experiments_data_path_list:
            
            exp_data = pickle_load(experiments_data_path)
            job_sim = self.simulator.retrieve_job(exp_data["simulator"]["job_id"])
            job_delay_before = [self.backend.retrieve_job(job_id) for job_id in exp_data["delay_before"]["job_id"]]
            job_delay_after = [self.backend.retrieve_job(job_id) for job_id in exp_data["delay_after"]["job_id"]]
            delay_duration_list = exp_data["delay_duration_list"]
            
            eval_delay = EvaluateDelay(job_sim, job_delay_before, job_delay_after, delay_duration_list, nseed=self.nseed, initial_layout=self.initial_layout_list)
            counts_before_list_list, counts_after_list_list = eval_delay.evaluate() # return [[seed1 counts_dict], [seed2 counts_dict], ... ]

            before_jsd_all, before_jsd_mean, before_jsd_sem = eval_delay.js_divergence(counts_before_list_list)
            after_jsd_all, after_jsd_mean, after_jsd_sem = eval_delay.js_divergence(counts_mean_list_list)

            obj = {
                "before_op": {
                    "jsd_add": before_jsd_all, 
                    "jsd_mean": before_jsd_mean, 
                    "jsd_sem": before_jsd_sem, 
                },  
                "after_op": {
                    "jsd_add": after_jsd_all, 
                    "jsd_mean": after_jsd_mean, 
                    "jsd_sem": after_jsd_sem,
                }, 
                "nseed": self.nseed
            }
            obj_list.append(obj)

            # save as pickle file
            path_to_dir = "/Users/Yasuhiro/Documents/aqua/gp/experiments/waiting_duration/data/jsd/plus_state_decay/"
            file_name = self.backend_name + "_" + str(date.today()) + "_qubit-" + str(i) + ".pickle"
            save_path = path_to_dir + file_name
            pickle_dump(obj, save_path)
        
        return obj_list
Пример #9
0
def _save_experiments(qc_sim, qc, qc_alap, job_id_sim, job_id, job_id_alap,
                      names, nseed, save_path):

    # compose experimental data
    experiments_data = {
        "simulator": {
            "qc": qc_sim,
            "job_id": job_id_sim,
        },
        "non_scheduling": {
            "qc": qc,
            "job_id": job_id,
            "nseed": nseed,
        },
        "alap": {
            "qc": qc_alap,
            "job_id": job_id_alap,
            "nseed": nseed,
        },
        "qc_names": names,
    }

    pickle_dump(experiments_data, save_path)
Пример #10
0
def save_results(qc_sim, qc, qc_alap, result_sim, result_list,
                 result_alap_list, count_sim, count_list, count_alap_list,
                 qc_names, date, save_path):
    data = {
        "simulator": {
            "qc": qc_sim,
            "result": result_sim,
            "count": count_sim,
        },
        "non_scheduling": {
            "qc": qc,
            "result": result_list,
            "count": count_list,
        },
        "alap": {
            "qc": qc_alap,
            "result": result_alap_list,
            "count": count_alap_list,
        },
        "qc_names": qc_names,
        "date": date,
    }
    pickle_dump(data, save_path)
Пример #11
0
def save_experiments(qc, backend, job_sim, job_delay_before_list,
                     job_delay_after_list, nseed, delay_duration_list,
                     initial_layout, save_path):
    # # define path to save
    # path_to_dir = "/Users/Yasuhiro/Documents/aqua/gp/experiments/waiting_duration/job_id/"+ backend.name() +"/"
    # file_name = str(date.today()) + "_" + qc.name + "_" + backend.name()
    # if initial_layout is not None:
    #     file_name += "_" + str(initial_layout)
    # save_path = path_to_dir +  file_name + ".pickle"

    # get job_id
    job_id_sim = job_sim.job_id()
    job_id_delay_before_list = [
        job_delay_before.job_id() for job_delay_before in job_delay_before_list
    ]
    job_id_delay_after_list = [
        job_delay_after.job_id() for job_delay_after in job_delay_after_list
    ]

    # compose experimental data
    experiments_data = {
        "simulator": {
            "job_id": job_id_sim,
        },
        "delay_before": {
            "job_id": job_id_delay_before_list,
            "nseed": nseed,
        },
        "delay_after": {
            "job_id": job_id_delay_after_list,
            "nseed": nseed,
        },
        "qc_name": qc.name,
        "delay_duration_list": delay_duration_list,
    }

    pickle_dump(experiments_data, save_path)
Пример #12
0
def _save_experiments( 
        qc_list, multi_sim, job_sim, 
        single_dense, multi_dense, job_dense, layouts_dense,
        single_na, multi_na, job_na, layouts_na,
        single_sabre, multi_sabre, job_sabre, layouts_sabre,
        single_xtalk, multi_xtalk, job_xtalk,layouts_xtalk,
        backend, shots_single, shots_multi, save_path):

    job_sim_s, job_sim_m = job_sim
    job_dense_s, job_dense_m = job_dense
    job_na_s, job_na_m = job_na
    job_sabre_s, job_sabre_m = job_sabre
    job_xtalk_s, job_xtalk_m = job_xtalk

    name_list = [qc.name for qc in qc_list]
    # compose experimental data
    experiments_data = {
        "job": {
            "simulator": {
                "qc": {
                    'single': qc_list, 
                    'multi': multi_sim
                },
                "job_id": {
                    'single': job_sim_s.job_id(),
                    'multi': job_sim_m.job_id(),
                },  
            }, 
            "dense": {
                "qc": {
                    'single': single_dense, 
                    'multi': multi_dense
                },
                "job_id": {
                    'single': job_dense_s.job_id(),
                    'multi': job_dense_m.job_id()
                },  
                "layout": layouts_dense,
            }, 
            "noise": {
                "qc": {
                    'single': single_na, 
                    'multi': multi_na
                },
                "job_id": {
                    'single': job_na_s.job_id(),
                    'multi': job_na_m.job_id()
                },  
                "layout": layouts_na,
            }, 
            "sabre": {
                "qc": {
                    'single': single_sabre, 
                    'multi': multi_sabre
                },
                "job_id": {
                    'single': job_sabre_s.job_id(),
                    'multi': job_sabre_m.job_id()
                },  
                "layout": layouts_sabre,
            }, 
            "xtalk": {
                "qc": {
                    'single': single_xtalk, 
                    'multi': multi_xtalk
                },
                "job_id": {
                    'single': job_xtalk_s.job_id(),
                    'multi': job_xtalk_m.job_id()
                },  
                "layout": layouts_xtalk,
            }, 
        },
        "bench_names": name_list,
        "backend": backend.name(), 
        'shots': {
            'single': shots_single, 
            'multi': shots_multi
        }
    }

    dir_path = os.path.dirname(save_path)
    if not os.path.exists(dir_path): 
        print('make directory: ', dir_path)
        os.mkdir(dir_path)
    pickle_dump(experiments_data, save_path)

    return experiments_data
Пример #13
0
        "jsd_sem": before_jsd_sem,
    },
    "after_op": {
        "jsd_all": after_jsd_all,
        "jsd_mean": after_jsd_mean,
        "jsd_sem": after_jsd_sem,
    },
    "delay_duration": delay_duration_e6,
    "nseed": nseed
}
# -

pprint(e6)
save_path = "/Users/Yasuhiro/Documents/aqua/gp/experiments/waiting_duration/data/jsd/pulse_state_decay/" + backend_name + "/" + str(
    date.today()) + "_pulse state_e6_[0].pickle"
pickle_dump(e6, save_path)

# +
path_ = "/Users/Yasuhiro/Documents/aqua/gp/experiments/waiting_duration/job_id/" + backend_name + "/" + str(
    date.today()) + "_pulse state_e5_" + backend_name + "_["
_path = "].pickle"
experiments_data_path = path_ + str(0) + _path

exp_data = pickle_load(experiments_data_path)
job_sim = simulator.retrieve_job(exp_data["simulator"]["job_id"])
job_delay_before = [
    backend.retrieve_job(job_id)
    for job_id in exp_data["delay_before"]["job_id"]
]
job_delay_after = [
    backend.retrieve_job(job_id)
Пример #14
0
def run_experiments(
    jobfile_dir,
    multi_circuit_components,
    backend,
    simulator,
    shots=1024,
    crosstalk_info_filepath=None,
    crosstalk_prop=None,
    fake_device=None,
):
    """FIXME!
    以下の実験パート
    量子ビット使用数が増加すると、measurement error mitigation ができなくなるので、
    一時的にコメントアウトしている。
    削除するかどうか、要検討。
    """

    # edit backend property
    if fake_device:
        _backend = fake_device
        backend = simulator
    else:
        _backend = backend

    selected_bprop = xtalk_scoped_bprop(_backend)
    onlyCircuit = False

    # qiskit
    # job, tranpiled_circuit = _run_experiments(
    #     multi_circuit_components,
    #     backend,
    #     shots=shots,
    #     onlyCircuit=onlyCircuit,
    #     returnCircuit=True,
    #     optimization_level=3,
    # )

    # multi-tasking
    job_multi, circuit_multi = _run_experiments(
        multi_circuit_components,
        backend,
        shots=shots,
        onlyCircuit=onlyCircuit,
        returnCircuit=True,
        backend_properties=selected_bprop,
        multi_opt=True,
    )
    # job_multi_cal, state_labels_multi = run_meas_mitigation(
    #     circuit_multi, backend)

    # multi-tasking with xtalk noise
    job_xtalk, circuit_xtalk = _run_experiments(
        multi_circuit_components,
        backend,
        shots=shots,
        onlyCircuit=onlyCircuit,
        returnCircuit=True,
        backend_properties=selected_bprop,
        multi_opt=True,
        crosstalk_info_filepath=crosstalk_info_filepath,
        crosstalk_prop=crosstalk_prop,
    )
    # job_xtalk_cal, state_labels_xtalk = run_meas_mitigation(
    #     circuit_xtalk, backend)

    # run on simulator
    job_sim, original_circuit = _run_experiments(
        multi_circuit_components,
        backend=simulator,
        onlyCircuit=onlyCircuit,
        optimization_level=3,
        shots=shots,
        returnCircuit=True,
    )

    # get the job id
    # job_id = job.job_id()
    # job_id_cal = job_cal.job_id()

    job_id_multi = job_multi.job_id()
    # job_id_multi_cal = job_multi_cal.job_id()

    job_id_xtalk = job_xtalk.job_id()
    # job_id_xtalk_cal = job_xtalk_cal.job_id()

    job_id_sim = job_sim.job_id()

    return_dict = {
        # "qiskit": {
        #     "job": job_id,
        #     "circuit": tranpiled_circuit,
        #     # 'job_cal': job_id_cal,
        #     # 'state_labels': state_labels,
        # },
        "multi_opt": {
            "job": job_id_multi,
            "circuit": circuit_multi,
            # 'job_cal': job_id_multi_cal,
            # 'state_labels': state_labels_multi,
        },
        "xtalk_aware": {
            "job": job_id_xtalk,
            "circuit": circuit_xtalk,
            # 'job_cal': job_id_xtalk_cal,
            # 'state_labels': state_labels_xtalk,
        },
        "simulator": {"job": job_id_sim, "circuit": original_circuit},
    }

    execution_datetime = datetime.datetime.now().isoformat(timespec="seconds")

    benchmarking_circuits = ""
    for circ, num in multi_circuit_components.items():
        benchmarking_circuits = benchmarking_circuits + "_" + str(circ) + "-" + str(num)
    jobfile_path = jobfile_dir + execution_datetime + benchmarking_circuits + ".pickle"
    pickle_dump(return_dict, jobfile_path)
    print("############### successfully saved! ###############")
    # url = "https://hooks.slack.com/services/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"
    # send_slack('Experiments was done.', url)

    return [
        # tranpiled_circuit,
        circuit_multi,
        circuit_xtalk,
        original_circuit,
    ]