def single_qubit_process_tomography(operation, n_measurements=2000, n_particles=4000): """ :param operation: A Q# operation of type ((Pauli, Pauli) => Result) whose inputs are named `prep` and `meas` (respectively), representing a state preparation, evolution, and measurement. """ if qt is None: raise ImportError("This function requires QuTiP.") if qi is None: raise ImportError("This function requires QInfer.") print("Preparing tomography model...") state_basis = qi.tomography.pauli_basis(1) prior = qi.tomography.BCSZChoiDistribution(state_basis) model = qi.tomography.TomographyModel(prior.basis) updater = qi.SMCUpdater(model, n_particles, prior) print("Performing tomography...") for idx_experiment in range(n_measurements): prep = qsharp.Pauli.sample() meas = qsharp.Pauli.sample() # Convert into a QuTiP object by using the standard transformation # between state and process tomography. qobj = 2.0 * qt.tensor( projector(prep.as_qobj()).trans(), projector(meas.as_qobj())) expparams = np.array([(model.basis.state_to_modelparams(qobj), )], dtype=model.expparams_dtype) datum = 1 - operation.simulate(prep=prep, meas=meas) updater.update(datum, expparams) return { # We multiply by 2 to turn into a Choi–Jamiłkowski operator instead # of a Choi–Jamiłkowski state. 'est_channel': 2.0 * model.basis.modelparams_to_state(updater.est_mean()), # Returning the updater allows for exploring properties not extracted # elsewhere. 'posterior': updater }
def run_qle(param_list, op_list, n_particles, n_experiments, resample_thresh, resample_a): # set mean and covariance based on true parameter list mean = np.zeros((len(param_list[0]))) cov = np.zeros((len(param_list[0]), len(param_list[0]))) for a in range(len(param_list[0])): mean[a] = param_list[0, a] + 0.08 cov[a, a] = 0.2 # mean[a] = param_list[0,a] + 0.15 # cov[a,a] = param_list[0,a] + 0.11 prior = qi.MultivariateNormalDistribution(mean, cov) model = simulated_QLE(op_list=op_list, param_list=param_list) param_names = model.modelparam_names updater = qi.SMCUpdater(model, n_particles, prior, resample_thresh=resample_thresh, resample_a=resample_a) heuristic = tHeurist(updater, t_field='ts') track_parameters = np.zeros((len(param_list[0]), n_experiments)) track_locations = np.zeros( [n_particles, len(param_list[0]), n_experiments]) track_weights = np.empty([n_particles, n_experiments]) track_loss = np.empty(n_experiments) track_time = np.empty(n_experiments) resample_points = list() resample_points.append(0) probe_counter = 0 for idx in range(n_experiments): probe_counter += 1 experiment = heuristic() datum = model.simulate_experiment(param_list, experiment) updater.update(datum, experiment) new_eval = updater.est_mean() for param in range(len(param_list[0])): track_parameters[param, idx] = new_eval[param] new_loss = eval_loss(model, new_eval, param_list) track_loss[idx] = new_loss[0] track_locations[:, :, idx] = updater.particle_locations track_weights[:, idx] = updater.particle_weights track_time[idx] = experiment[0][0] # time given by heuristic if updater.just_resampled is True: resample_points.append(idx) return track_parameters, track_loss, track_locations, track_weights, track_time, param_names, resample_points
def single_qubit_process_tomography(simulator, channel, n_measurements=2000, n_particles=4000): from Microsoft.Quantum.Canon import SingleQubitProcessTomographyMeasurement print("Preparing tomography model...") state_basis = qi.tomography.pauli_basis(1) prior = qi.tomography.BCSZChoiDistribution(state_basis) model = qi.tomography.TomographyModel(prior.basis) updater = qi.SMCUpdater(model, n_particles, prior) print("Performing tomography...") for idx_experiment in range(n_measurements): prep = qsharp.Pauli.random() meas = qsharp.Pauli.random() # Convert into a QuTiP object by using the standard transformation # between state and process tomography. qobj = 2.0 * qt.tensor( projector(prep.as_qobj()).trans(), projector(meas.as_qobj())) expparams = np.array([(model.basis.state_to_modelparams(qobj), )], dtype=model.expparams_dtype) datum = 1 - simulator.run(SingleQubitProcessTomographyMeasurement, prep, meas, channel).Result updater.update(datum, expparams) return { # We multiply by 2 to turn into a Choi–Jamiłkowski operator instead # of a Choi–Jamiłkowski state. 'est_channel': 2.0 * model.basis.modelparams_to_state(updater.est_mean()), # Returning the updater allows for exploring properties not extracted # elsewhere. 'posterior': updater }
def __init__( self, model_id, qid, opponent, qmla_core_info_database=None, learned_model_info=None, host_name='localhost', port_number=6379, log_file='QMD_log.log', ): self.log_file = log_file self.qmla_id = qid self.model_id = model_id self.opponent = int(opponent) # Get essential data if qmla_core_info_database is None: redis_databases = qmla.redis_settings.get_redis_databases_by_qmla_id( host_name, port_number, qid) qmla_core_info_database = redis_databases[ 'qmla_core_info_database'] qmla_core_info_dict = pickle.loads( qmla_core_info_database.get('qmla_settings')) self.probes_system = pickle.loads( qmla_core_info_database['probes_system']) self.probes_simulator = pickle.loads( qmla_core_info_database['probes_simulator']) else: qmla_core_info_dict = qmla_core_info_database.get('qmla_settings') self.probes_system = qmla_core_info_database['probes_system'] self.probes_simulator = qmla_core_info_database['probes_simulator'] self.plot_probes = pickle.load( open(qmla_core_info_dict['probes_plot_file'], 'rb')) self.plots_directory = qmla_core_info_dict['plots_directory'] self.debug_mode = qmla_core_info_dict['debug_mode'] self.plot_level = qmla_core_info_dict['plot_level'] self.figure_format = qmla_core_info_dict['figure_format'] # Assign attributes based on core data self.num_experiments = qmla_core_info_dict['num_experiments'] self.num_particles = qmla_core_info_dict['num_particles'] self.probe_number = qmla_core_info_dict['num_probes'] self.true_model_constituent_operators = qmla_core_info_dict[ 'true_oplist'] self.true_model_params = qmla_core_info_dict['true_model_terms_params'] self.true_model_name = qmla_core_info_dict['true_name'] self.true_param_dict = qmla_core_info_dict['true_param_dict'] self.experimental_measurements = qmla_core_info_dict[ 'experimental_measurements'] self.experimental_measurement_times = qmla_core_info_dict[ 'experimental_measurement_times'] self.results_directory = qmla_core_info_dict['results_directory'] if learned_model_info is None: # Get data specific to this model, learned elsewhere and stored on # redis database try: redis_databases = qmla.redis_settings.get_redis_databases_by_qmla_id( host_name, port_number, qid) learned_models_info_db = redis_databases[ 'learned_models_info_db'] except BaseException: print("Unable to retrieve redis database.") raise model_id_str = str(float(model_id)) try: learned_model_info = pickle.loads( learned_models_info_db.get(model_id_str), encoding='latin1') except BaseException: try: learned_model_info = pickle.loads( learned_models_info_db.get(model_id_str)) except: self.log_print( ["Failed to unload model data for comparison"]) # Assign parameters from model learned info, retrieved from database self.model_name = learned_model_info['name'] self.times_learned_over = learned_model_info['times_learned_over'] self.final_learned_params = learned_model_info['final_learned_params'] self.exploration_strategy_of_this_model = learned_model_info[ 'exploration_strategy_of_this_model'] self.posterior_marginal = learned_model_info['posterior_marginal'] self.model_normalization_record = learned_model_info[ 'model_normalization_record'] self.log_total_likelihood = learned_model_info['log_total_likelihood'] self.estimated_mean_params = learned_model_info[ 'estimated_mean_params'] self.qhl_final_param_estimates = learned_model_info[ 'qhl_final_param_estimates'] self.qhl_final_param_uncertainties = learned_model_info[ 'qhl_final_param_uncertainties'] self.covariance_mtx_final = learned_model_info['covariance_mtx_final'] self.expectation_values = learned_model_info['expectation_values'] self.learned_hamiltonian = learned_model_info['learned_hamiltonian'] self.track_experiment_parameters = learned_model_info[ 'track_experiment_parameters'] self.log_print( ["Track exp params eg:", self.track_experiment_parameters[0]]) # Process data from learned info if self.model_name == self.true_model_name: self.is_true_model = True self.log_print(["This is the true model for comparison."]) else: self.is_true_model = False op = qmla.construct_models.Operator(self.model_name) self.model_terms_matrices = op.constituents_operators self.model_terms_parameters_final = np.array(self.final_learned_params) self.exploration_class = qmla.get_exploration_strategy.get_exploration_class( exploration_rules=self.exploration_strategy_of_this_model, log_file=self.log_file, qmla_id=self.qmla_id, ) self.model_name_latex = self.exploration_class.latex_name( self.model_name) # New instances of model and updater used by QInfer self.log_print(["Getting QInfer model"]) self.qinfer_model = self.exploration_class.get_qinfer_model( model_name=self.model_name, modelparams=self.model_terms_parameters_final, oplist=self.model_terms_matrices, true_oplist=self.true_model_constituent_operators, truename=self.true_model_name, trueparams=self.true_model_params, true_param_dict=self.true_param_dict, num_probes=self.probe_number, probe_dict=self.probes_system, sim_probe_dict=self.probes_simulator, exploration_rules=self.exploration_strategy_of_this_model, experimental_measurements=self.experimental_measurements, experimental_measurement_times=self.experimental_measurement_times, qmla_id=self.qmla_id, log_file=self.log_file, debug_mode=self.debug_mode, ) # Reconstruct the updater from results of learning self.reconstruct_updater = True # optionally just load it if self.reconstruct_updater: try: # TODO this can cause problems - some models have singular cov mt - WHY? posterior_distribution = qi.MultivariateNormalDistribution( self.estimated_mean_params, self.covariance_mtx_final) except: self.log_print([ "cov mtx is singular in trying to reconstruct SMC updater.\n", self.covariance_mtx_final ]) raise num_particles_for_bf = max( 5, int(self.exploration_class.fraction_particles_for_bf * self.num_particles) ) # this allows the exploration strategy to use less particles for the comparison stage self.qinfer_updater = qi.SMCUpdater( model=self.qinfer_model, n_particles=num_particles_for_bf, prior=posterior_distribution, resample_thresh=self.exploration_class. qinfer_resampler_threshold, resampler=qi.LiuWestResampler( a=self.exploration_class.qinfer_resampler_a), ) self.qinfer_updater._normalization_record = self.model_normalization_record else: # Optionally pickle the entire updater # (first include updater in ModelInstanceForLearning.learned_info_dict()) self.qinfer_updater = pickle.loads(learned_model_info['updater']) # Fresh experiment design heuristic self.experiment_design_heuristic = self.exploration_class.get_heuristic( model_id=self.model_id, updater=self.qinfer_updater, oplist=self.model_terms_matrices, num_experiments=self.num_experiments, num_probes=self.probe_number, log_file=self.log_file, inv_field=[ item[0] for item in self.qinfer_model.expparams_dtype[1:] ], max_time_to_enforce=self.exploration_class.max_time_to_consider, figure_format=self.figure_format) # Delete extra data now that everything useful is extracted del qmla_core_info_dict, learned_model_info self.log_print(["Instantiated."])