def verify(self, circuits, num_nodes, num_threads, qubit_limit, eval_mode): if self.verbose: print('*'*20,'Verify','*'*20) self.circuits = circuits errors = {} row_format = '{:<20} {:<10} {:<30}' if self.verbose: print(row_format.format('Circuit Name','QPU','Error')) for circuit_name in self.circuits: max_subcircuit_qubit = self.circuits[circuit_name]['max_subcircuit_qubit'] source_folder = get_dirname(circuit_name=circuit_name,max_subcircuit_qubit=max_subcircuit_qubit, eval_mode=None,num_threads=None,qubit_limit=None,field='cutter') cut_solution = read_dict(filename='%s/cut_solution.pckl'%source_folder) circuit = cut_solution['circuit'] complete_path_map = cut_solution['complete_path_map'] subcircuits = cut_solution['subcircuits'] summation_terms = pickle.load(open('%s/summation_terms.pckl'%source_folder,'rb')) smart_order = [x[0] for x in summation_terms[0]] dest_folder = get_dirname(circuit_name=circuit_name,max_subcircuit_qubit=max_subcircuit_qubit, num_threads=num_threads,eval_mode=eval_mode,qubit_limit=qubit_limit,field='build') build_output = read_dict(filename='%s/build_output.pckl'%dest_folder) reconstructed_prob = build_output['reconstructed_prob'] num_summation_terms_sampled = build_output['num_summation_terms_sampled'] num_summation_terms = build_output['num_summation_terms'] squared_error = verify(full_circuit=circuit,unordered=reconstructed_prob,complete_path_map=complete_path_map,subcircuits=subcircuits,smart_order=smart_order) key = (circuit_name,eval_mode) errors[key] = squared_error if self.verbose: print(row_format.format(*key,'%.1e'%squared_error)) if self.verbose: print() return errors
def generate_meta_data(recursion_layer, counter, recursion_qubit, dest_folder, verbose): if recursion_layer == 0: dynamic_definition_schedule = initialize_dynamic_definition_schedule( counter=counter, recursion_qubit=recursion_qubit, verbose=verbose) return { 'counter': counter, 'dynamic_definition_schedule': dynamic_definition_schedule } else: meta_data = read_dict(filename='%s/meta_data.pckl' % (dest_folder)) max_recursion_layer = find_max_recursion_layer( curr_recursion_layer=recursion_layer, dest_folder=dest_folder, meta_data=meta_data) if max_recursion_layer == -1: print('-' * 50, 'DD recursions DONE', '-' * 50, flush=True) return None dynamic_definition_folder = '%s/dynamic_definition_%d' % ( dest_folder, max_recursion_layer) build_output = read_dict(filename='%s/build_output.pckl' % (dynamic_definition_folder)) zoomed_ctr = build_output['zoomed_ctr'] max_states = build_output['max_states'] reconstructed_prob = build_output['reconstructed_prob'] schedule = meta_data['dynamic_definition_schedule'][ max_recursion_layer] print('Zoom in for results of recursion_layer %d' % max_recursion_layer, schedule, flush=True) print('state_idx = %d, p = %e' % (max_states[zoomed_ctr], reconstructed_prob[max_states[zoomed_ctr]]), flush=True) next_schedule = next_dynamic_definition_schedule( recursion_layer=max_recursion_layer, schedule=copy.deepcopy(schedule), state_idx=max_states[zoomed_ctr], recursion_qubit=recursion_qubit, verbose=verbose) build_output['zoomed_ctr'] += 1 pickle.dump( build_output, open('%s/build_output.pckl' % (dynamic_definition_folder), 'wb')) meta_data['dynamic_definition_schedule'][ recursion_layer] = next_schedule return meta_data
def find_max_recursion_layer(curr_recursion_layer, dest_folder): max_subgroup_prob = 0 max_recursion_layer = -1 for recursion_layer in range(curr_recursion_layer): dynamic_definition_folder = '%s/dynamic_definition_%d' % ( dest_folder, recursion_layer) build_output = read_dict(filename='%s/build_output.pckl' % (dynamic_definition_folder)) zoomed_ctr = build_output['zoomed_ctr'] max_states = build_output['max_states'] reconstructed_prob = build_output['reconstructed_prob'] schedule = build_output['dd_schedule'] # print('layer %d schedule'%recursion_layer,schedule) num_merged = 0 for subcircuit_idx in schedule['subcircuit_state']: num_merged += schedule['subcircuit_state'][subcircuit_idx].count( -2) if num_merged == 0 or zoomed_ctr == len(max_states): ''' num_merged==0 : all qubits have been computed for this DD Layer zoomed_ctr==len(max_states) : all bins have been computed for this DD layer ''' continue print( 'Examine recursion_layer %d, zoomed_ctr = %d, max_state = %d, p = %e' % (recursion_layer, zoomed_ctr, max_states[zoomed_ctr], reconstructed_prob[max_states[zoomed_ctr]])) if reconstructed_prob[max_states[ zoomed_ctr]] > max_subgroup_prob and reconstructed_prob[ max_states[zoomed_ctr]] > 1e-16: max_subgroup_prob = reconstructed_prob[max_states[zoomed_ctr]] max_recursion_layer = recursion_layer return max_recursion_layer
def write_subcircuit(key, eval_folder, counter, subcircuit_inst_prob, eval_mode): all_indexed_combinations = read_dict('%s/all_indexed_combinations.pckl' % (eval_folder)) subcircuit_idx, inits, meas = key mutated_meas = mutate_measurement_basis(meas) for meas in mutated_meas: index = all_indexed_combinations[subcircuit_idx][(tuple(inits), tuple(meas))] if eval_mode == 'runtime': eval_file_name = '%s/raw_%d_0.txt' % (eval_folder, subcircuit_idx) else: eval_file_name = '%s/raw_%d_%d.txt' % (eval_folder, subcircuit_idx, index) # print('writting',eval_file_name) eval_file = open(eval_file_name, 'w') eval_file.write('d=%d effective=%d\n' % (counter[subcircuit_idx]['d'], counter[subcircuit_idx]['effective'])) [eval_file.write('%s ' % x) for x in inits] eval_file.write('\n') [eval_file.write('%s ' % x) for x in meas] eval_file.write('\n') [eval_file.write('%e ' % x) for x in subcircuit_inst_prob ] if type(subcircuit_inst_prob) == np.ndarray else eval_file.write( '%e ' % subcircuit_inst_prob) eval_file.close() if eval_mode == 'runtime': break
def find_max_recursion_layer(curr_recursion_layer, dest_folder, meta_data): max_subgroup_prob = 0 max_recursion_layer = -1 for recursion_layer in range(curr_recursion_layer): dynamic_definition_folder = '%s/dynamic_definition_%d' % ( dest_folder, recursion_layer) build_output = read_dict(filename='%s/build_output.pckl' % (dynamic_definition_folder)) zoomed_ctr = build_output['zoomed_ctr'] max_states = build_output['max_states'] reconstructed_prob = build_output['reconstructed_prob'] schedule = meta_data['dynamic_definition_schedule'][recursion_layer] num_merged = 0 for subcircuit_idx in schedule['subcircuit_state']: num_merged += schedule['subcircuit_state'][subcircuit_idx].count( 'merged') if num_merged == 0 or zoomed_ctr == len(max_states): continue # print('Examine recursion_layer %d, zoomed_ctr = %d, max_state = %d, p = %e'%( # recursion_layer,zoomed_ctr,max_states[zoomed_ctr],reconstructed_prob[max_states[zoomed_ctr]])) # print(schedule) if reconstructed_prob[max_states[ zoomed_ctr]] > max_subgroup_prob and reconstructed_prob[ max_states[zoomed_ctr]] > 1e-16: max_subgroup_prob = reconstructed_prob[max_states[zoomed_ctr]] max_recursion_layer = recursion_layer return max_recursion_layer
def _attribute_shots(self,subcircuit_results,eval_mode,all_subcircuit_entries_sampled): ''' Attribute the shots into respective subcircuit entries ''' row_format = '{:<15} {:<15} {:<25} {:<30}' if self.verbose: print('--> Attribute shots') print(row_format.format('circuit_name','subcircuit_idx','subcircuit_instance_idx','coefficient, subcircuit_entry_idx')) ctr = 0 subcircuit_entry_probs = {} for key in subcircuit_results: ctr += 1 circuit_name, subcircuit_idx, init, meas = key subcircuit_entries_sampled = all_subcircuit_entries_sampled[circuit_name] subcircuit_instance_prob = subcircuit_results[key] max_subcircuit_qubit = self.circuits[circuit_name]['max_subcircuit_qubit'] source_folder = get_dirname(circuit_name=circuit_name,max_subcircuit_qubit=max_subcircuit_qubit, eval_mode=None,num_threads=None,qubit_limit=None,field='cutter') subcircuit_instances_idx = read_dict(filename='%s/subcircuit_instances_idx.pckl'%source_folder) subcircuit_instance_idx = subcircuit_instances_idx[subcircuit_idx][(init,meas)] subcircuit_instance_attribution = read_dict(filename='%s/subcircuit_instance_attribution.pckl'%source_folder) attributions = subcircuit_instance_attribution[subcircuit_idx][subcircuit_instance_idx] if self.verbose and ctr<=10: print(row_format.format(circuit_name,subcircuit_idx,subcircuit_instance_idx,str(attributions)[:30])) eval_folder = get_dirname(circuit_name=circuit_name,max_subcircuit_qubit=max_subcircuit_qubit, eval_mode=eval_mode,num_threads=None,qubit_limit=None,field='evaluator') for item in attributions: coefficient, subcircuit_entry_idx = item if (subcircuit_idx,subcircuit_entry_idx) not in subcircuit_entries_sampled: continue subcircuit_entry_prob_key = (eval_folder,subcircuit_idx,subcircuit_entry_idx) if subcircuit_entry_prob_key in subcircuit_entry_probs: subcircuit_entry_probs[subcircuit_entry_prob_key] += coefficient*subcircuit_instance_prob else: subcircuit_entry_probs[subcircuit_entry_prob_key] = coefficient*subcircuit_instance_prob for subcircuit_entry_prob_key in subcircuit_entry_probs: subcircuit_entry_prob = subcircuit_entry_probs[subcircuit_entry_prob_key] eval_folder,subcircuit_idx,subcircuit_entry_idx = subcircuit_entry_prob_key subcircuit_entry_file = open('%s/%d_%d.txt'%(eval_folder,subcircuit_idx,subcircuit_entry_idx),'w') [subcircuit_entry_file.write('%e '%x) for x in subcircuit_entry_prob] subcircuit_entry_file.close() if self.verbose: print('... Total %d subcircuit results attributed\n'%ctr)
def get_device_info(token, hub, group, project, device_name, fields, datetime): dirname = './devices/%s' % datetime.date() filename = '%s/%s.pckl' % (dirname, device_name) _device_info = read_dict(filename=filename) if len(_device_info) == 0: if not os.path.exists(dirname): os.makedirs(dirname) else: subprocess.run(['rm', '-r', dirname]) os.makedirs(dirname) provider = load_IBMQ(token=token, hub=hub, group=group, project=project) for x in provider.backends(): if 'qasm' not in str(x): device = provider.get_backend(str(x)) properties = device.properties(datetime=datetime) num_qubits = len(properties.qubits) print('Download device_info for %d-qubit %s' % (num_qubits, x)) coupling_map = CouplingMap(device.configuration().coupling_map) noise_model = NoiseModel.from_backend(properties) basis_gates = noise_model.basis_gates _device_info = { 'properties': properties, 'coupling_map': coupling_map, 'noise_model': noise_model, 'basis_gates': basis_gates } pickle.dump(_device_info, open('%s/%s.pckl' % (dirname, str(x)), 'wb')) print('-' * 50) _device_info = read_dict(filename=filename) device_info = {} for field in fields: if field == 'device': provider = load_IBMQ(token=token, hub=hub, group=group, project=project) device = provider.get_backend(device_name) device_info[field] = device else: device_info[field] = _device_info[field] return device_info
def _measure(self, eval_mode, num_nodes, num_threads): subprocess.run(['rm', './cutqc/measure']) subprocess.run( ['icc', './cutqc/measure.c', '-o', './cutqc/measure', '-lm']) for circuit_case in self.circuit_cases: circuit_name = circuit_case.split('|')[0] max_subcircuit_qubit = int(circuit_case.split('|')[1]) source_folder = get_dirname( circuit_name=circuit_name, max_subcircuit_qubit=max_subcircuit_qubit, early_termination=None, eval_mode=None, num_threads=None, qubit_limit=None, field='cutter') cut_solution = read_dict('%s/subcircuits.pckl' % (source_folder)) if len(cut_solution) == 0: continue assert ( max_subcircuit_qubit == cut_solution['max_subcircuit_qubit']) full_circuit = cut_solution['circuit'] subcircuits = cut_solution['subcircuits'] eval_folder = get_dirname( circuit_name=circuit_name, max_subcircuit_qubit=max_subcircuit_qubit, early_termination=None, num_threads=None, eval_mode=eval_mode, qubit_limit=None, field='evaluator') for subcircuit_idx in range(len(subcircuits)): eval_files = glob.glob('%s/raw_%d_*.txt' % (eval_folder, subcircuit_idx)) child_processes = [] for rank in range(num_threads): process_eval_files = find_process_jobs( jobs=range(len(eval_files)), rank=rank, num_workers=num_threads) process_eval_files = [str(x) for x in process_eval_files] if rank == 0 and self.verbose: print( '%s subcircuit %d : rank %d/%d needs to measure %d/%d instances' % (circuit_case, subcircuit_idx, rank, num_threads, len(process_eval_files), len(eval_files)), flush=True) p = subprocess.Popen(args=[ './cutqc/measure', '%d' % rank, eval_folder, eval_mode, '%d' % full_circuit.num_qubits, '%d' % subcircuit_idx, '%d' % len(process_eval_files), *process_eval_files ]) child_processes.append(p) [cp.wait() for cp in child_processes]
def _gather_subcircuits(self,eval_mode): circ_dict = {} all_subcircuit_entries_sampled = {} for circuit_name in self.circuits: circuit = self.circuits[circuit_name]['circuit'] max_subcircuit_qubit = self.circuits[circuit_name]['max_subcircuit_qubit'] max_cuts = self.circuits[circuit_name]['max_cuts'] num_subcircuits = self.circuits[circuit_name]['num_subcircuits'] source_folder = get_dirname(circuit_name=circuit_name,max_subcircuit_qubit=max_subcircuit_qubit, eval_mode=None,num_threads=None,qubit_limit=None,field='cutter') eval_folder = get_dirname(circuit_name=circuit_name,max_subcircuit_qubit=max_subcircuit_qubit, num_threads=None,eval_mode=eval_mode,qubit_limit=None,field='evaluator') if os.path.exists(eval_folder): subprocess.run(['rm','-r',eval_folder]) os.makedirs(eval_folder) subcircuit_instances = read_dict(filename='%s/subcircuit_instances.pckl'%source_folder) summation_terms = pickle.load(open('%s/summation_terms.pckl'%source_folder,'rb')) subcircuit_entries = read_dict(filename='%s/subcircuit_entries.pckl'%source_folder) summation_terms_sampled = dummy_sample(summation_terms=summation_terms) subcircuit_entries_sampled = get_subcircuit_entries_sampled(summation_terms=summation_terms_sampled) all_subcircuit_entries_sampled[circuit_name] = subcircuit_entries_sampled subcircuit_instances_sampled = get_subcircuit_instances_sampled(subcircuit_entries=subcircuit_entries,subcircuit_entry_samples=subcircuit_entries_sampled) for subcircuit_instance in subcircuit_instances_sampled: subcircuit_idx, subcircuit_instance_idx = subcircuit_instance parent_subcircuit_instance_idx = subcircuit_instances[subcircuit_idx][subcircuit_instance_idx]['parent'] circuit = subcircuit_instances[subcircuit_idx][parent_subcircuit_instance_idx]['circuit'] shots = subcircuit_instances[subcircuit_idx][parent_subcircuit_instance_idx]['shots'] init = subcircuit_instances[subcircuit_idx][subcircuit_instance_idx]['init'] meas = subcircuit_instances[subcircuit_idx][subcircuit_instance_idx]['meas'] circ_dict_key = (circuit_name,subcircuit_idx,parent_subcircuit_instance_idx) if circ_dict_key in circ_dict: assert circ_dict[circ_dict_key]['init'] == init circ_dict[circ_dict_key]['meas'].append(meas) else: circ_dict[circ_dict_key] = { 'circuit':circuit, 'shots':shots, 'init':init, 'meas':[meas]} pickle.dump(summation_terms_sampled, open('%s/summation_terms_sampled.pckl'%(eval_folder),'wb')) return circ_dict, all_subcircuit_entries_sampled
def distribute(circuit_name, max_subcircuit_qubit, eval_mode, early_termination, num_threads, qubit_limit, recursion_layer, recursion_qubit, verbose): source_folder = get_dirname(circuit_name=circuit_name, max_subcircuit_qubit=max_subcircuit_qubit, early_termination=None, eval_mode=None, num_threads=None, qubit_limit=None, field='cutter') eval_folder = get_dirname(circuit_name=circuit_name, max_subcircuit_qubit=max_subcircuit_qubit, early_termination=None, eval_mode=eval_mode, num_threads=None, qubit_limit=None, field='evaluator') vertical_collapse_folder = get_dirname( circuit_name=circuit_name, max_subcircuit_qubit=max_subcircuit_qubit, early_termination=early_termination, num_threads=None, eval_mode=eval_mode, qubit_limit=None, field='vertical_collapse') dest_folder = get_dirname(circuit_name=circuit_name, max_subcircuit_qubit=max_subcircuit_qubit, early_termination=early_termination, num_threads=num_threads, eval_mode=eval_mode, qubit_limit=qubit_limit, field='build') case_dict = read_dict(filename='%s/subcircuits.pckl' % source_folder) all_indexed_combinations = read_dict( filename='%s/all_indexed_combinations.pckl' % (eval_folder)) if len(case_dict) == 0: return full_circuit = case_dict['circuit'] subcircuits = case_dict['subcircuits'] complete_path_map = case_dict['complete_path_map'] counter = case_dict['counter'] num_subcircuits = len(subcircuits) meta_data = generate_meta_data(recursion_layer=recursion_layer, counter=counter, recursion_qubit=recursion_qubit, dest_folder=dest_folder, verbose=verbose) pickle.dump(meta_data, open('%s/meta_data.pckl' % (dest_folder), 'wb')) O_rho_pairs, combinations = get_combinations( complete_path_map=complete_path_map) num_cuts = len(O_rho_pairs) _, summation_terms = build( full_circuit=full_circuit, combinations=combinations, O_rho_pairs=O_rho_pairs, subcircuits=subcircuits, all_indexed_combinations=all_indexed_combinations) dynamic_definition_folder = '%s/dynamic_definition_%d' % (dest_folder, recursion_layer) if os.path.exists(dynamic_definition_folder): subprocess.run(['rm', '-r', dynamic_definition_folder]) os.makedirs(dynamic_definition_folder) write_files( recursion_layer=recursion_layer, num_threads=num_threads, counter=counter, vertical_collapse_folder=vertical_collapse_folder, dynamic_definition_folder=dynamic_definition_folder, dynamic_definition_schedule=meta_data['dynamic_definition_schedule'], summation_terms=summation_terms, num_cuts=num_cuts, num_subcircuits=num_subcircuits, verbose=verbose)
def _run_subcircuits(self, eval_mode, num_nodes, num_threads, ibmq): for circuit_case in self.circuit_cases: circuit_name = circuit_case.split('|')[0] max_subcircuit_qubit = int(circuit_case.split('|')[1]) source_folder = get_dirname( circuit_name=circuit_name, max_subcircuit_qubit=max_subcircuit_qubit, early_termination=None, eval_mode=None, num_threads=None, qubit_limit=None, field='cutter') cut_solution = read_dict('%s/subcircuits.pckl' % (source_folder)) if len(cut_solution) == 0: continue assert ( max_subcircuit_qubit == cut_solution['max_subcircuit_qubit']) subcircuits = cut_solution['subcircuits'] complete_path_map = cut_solution['complete_path_map'] counter = cut_solution['counter'] eval_folder = get_dirname( circuit_name=circuit_name, max_subcircuit_qubit=max_subcircuit_qubit, early_termination=None, num_threads=None, eval_mode=eval_mode, qubit_limit=None, field='evaluator') if os.path.exists(eval_folder): subprocess.run(['rm', '-r', eval_folder]) os.makedirs(eval_folder) circ_dict, all_indexed_combinations = generate_subcircuit_instances( subcircuits=subcircuits, complete_path_map=complete_path_map) pickle.dump( all_indexed_combinations, open('%s/all_indexed_combinations.pckl' % (eval_folder), 'wb')) if eval_mode == 'sv': data = [] for key in circ_dict: data.append([ key, circ_dict[key]['circuit'], eval_mode, eval_folder, counter ]) random.shuffle( data) # Ensure a somewhat fair distribution of workloads chunksize = max(len(data) // num_threads // 10, 1) pool = mp.Pool(processes=num_threads) pool.starmap(simulate_subcircuit, data, chunksize=chunksize) pool.close() elif eval_mode == 'runtime': data = [] # NOTE: only compute one instance per subcircuit subcircuit_idx_written = [] for key in circ_dict: subcircuit_idx, _, _ = key if subcircuit_idx not in subcircuit_idx_written: subcircuit_idx_written.append(subcircuit_idx) data.append([ key, circ_dict[key]['circuit'], eval_mode, eval_folder, counter ]) random.shuffle( data) # Ensure a somewhat fair distribution of workloads chunksize = max(len(data) // num_threads // 10, 1) pool = mp.Pool(processes=num_threads) pool.starmap(simulate_subcircuit, data, chunksize=chunksize) pool.close() elif 'ibmq' in eval_mode: # NOTE: control whether to use real device scheduler = Scheduler(circ_dict=circ_dict, token=ibmq['token'], hub=ibmq['hub'], group=ibmq['group'], project=ibmq['project'], device_name=eval_mode, datetime=datetime.now()) scheduler.submit_jobs(real_device=True, transpilation=True, verbose=True) scheduler.retrieve_jobs(force_prob=True, save_memory=False, save_directory=None, verbose=True) data = [] for key in scheduler.circ_dict: data.append([ key, eval_folder, counter, scheduler.circ_dict[key]['prob'], eval_mode ]) random.shuffle( data) # Ensure a somewhat fair distribution of workloads chunksize = max(len(data) // num_threads // 10, 1) pool = mp.Pool(processes=num_threads) pool.starmap(write_subcircuit, data, chunksize=chunksize) pool.close() else: raise NotImplementedError
def _build(self, eval_mode, qubit_limit, num_nodes, num_threads): if self.verbose: print('--> Build') row_format = '{:<15} {:<20} {:<30}' print(row_format.format('circuit_name','summation_term_idx','summation_term')) reconstructed_probs = {} for circuit_name in self.circuits: circuit = self.circuits[circuit_name]['circuit'] max_subcircuit_qubit = self.circuits[circuit_name]['max_subcircuit_qubit'] max_cuts = self.circuits[circuit_name]['max_cuts'] num_subcircuits = self.circuits[circuit_name]['num_subcircuits'] source_folder = get_dirname(circuit_name=circuit_name,max_subcircuit_qubit=max_subcircuit_qubit, eval_mode=None,num_threads=None,qubit_limit=None,field='cutter') summation_terms = pickle.load(open('%s/summation_terms.pckl'%source_folder,'rb')) eval_folder = get_dirname(circuit_name=circuit_name,max_subcircuit_qubit=max_subcircuit_qubit, eval_mode=eval_mode,num_threads=None,qubit_limit=None,field='evaluator') summation_terms_sampled = pickle.load(open('%s/summation_terms_sampled.pckl'%eval_folder,'rb')) if self.verbose: [print(row_format.format(circuit_name,x['summation_term_idx'],str(x['summation_term'])[:30])) for x in summation_terms_sampled[:10]] print('... Total %d summation terms sampled\n'%len(summation_terms_sampled)) cut_solution = read_dict(filename='%s/cut_solution.pckl'%source_folder) full_circuit = cut_solution['circuit'] subcircuits = cut_solution['subcircuits'] complete_path_map = cut_solution['complete_path_map'] counter = cut_solution['counter'] dest_folder = get_dirname(circuit_name=circuit_name,max_subcircuit_qubit=max_subcircuit_qubit, eval_mode=eval_mode,num_threads=num_threads,qubit_limit=qubit_limit,field='build') if os.path.exists(dest_folder): subprocess.run(['rm','-r',dest_folder]) os.makedirs(dest_folder) ''' TODO: handle the new summation_terms_sampled format 1. Get rid of repeated summation term computations ''' num_samples = 1 child_processes = [] for rank in range(num_threads): rank_summation_terms = find_process_jobs(jobs=summation_terms_sampled,rank=rank,num_workers=num_threads) build_command = './cutqc/build %d %s %s %d %d %d %d %d'%( rank,eval_folder,dest_folder,int(2**full_circuit.num_qubits),len(cut_solution['positions']),len(rank_summation_terms),len(subcircuits),num_samples) build_command_file = open('%s/build_command_%d.txt'%(dest_folder,rank),'w') for rank_summation_term in rank_summation_terms: build_command_file.write('%e '%rank_summation_term['sampling_prob']) build_command_file.write('%d '%rank_summation_term['frequency']) for item in rank_summation_term['summation_term']: subcircuit_idx, subcircuit_entry_idx = item build_command_file.write('%d %d %d '%(subcircuit_idx,subcircuit_entry_idx,int(2**counter[subcircuit_idx]['effective']))) build_command_file.close() p = subprocess.Popen(args=build_command.split(' ')) child_processes.append(p) for rank in range(num_threads): cp = child_processes[rank] cp.wait() time.sleep(1) elapsed = [] reconstructed_prob = None for rank in range(num_threads): rank_logs = open('%s/rank_%d_summary.txt'%(dest_folder,rank), 'r') lines = rank_logs.readlines() assert lines[-2].split(' = ')[0]=='Total build time' and lines[-1] == 'DONE\n' elapsed.append(float(lines[-2].split(' = ')[1])) fp = open('%s/build_%d.txt'%(dest_folder,rank), 'r') for i, line in enumerate(fp): rank_reconstructed_prob = line.split(' ')[:-1] rank_reconstructed_prob = np.array(rank_reconstructed_prob) rank_reconstructed_prob = rank_reconstructed_prob.astype(np.float) if i>0: raise Exception('C build_output should not have more than 1 line') fp.close() subprocess.run(['rm','%s/build_%d.txt'%(dest_folder,rank)]) if isinstance(reconstructed_prob,np.ndarray): reconstructed_prob += rank_reconstructed_prob else: reconstructed_prob = rank_reconstructed_prob elapsed = np.array(elapsed) reconstructed_probs[circuit_name] = reconstructed_prob if self.verbose: print('%s _build took %.3e seconds'%(circuit_name,np.mean(elapsed)),flush=True) print('Sampled %d/%d summation terms'%(len(summation_terms_sampled),len(summation_terms))) pickle.dump({'reconstructed_prob':reconstructed_prob,'num_summation_terms_sampled':len(summation_terms_sampled),'num_summation_terms':len(summation_terms)},open('%s/build_output.pckl'%(dest_folder),'wb')) return reconstructed_probs
def _generate_subcircuits(self, circuit_name): ''' Generate subcircuit variations and the summation terms ''' circuit = self.circuits[circuit_name]['circuit'] max_subcircuit_qubit = self.circuits[circuit_name]['max_subcircuit_qubit'] max_cuts = self.circuits[circuit_name]['max_cuts'] num_subcircuits = self.circuits[circuit_name]['num_subcircuits'] source_folder = get_dirname(circuit_name=circuit_name,max_subcircuit_qubit=max_subcircuit_qubit, eval_mode=None,num_threads=None,qubit_limit=None,field='cutter') cut_solution = read_dict('%s/cut_solution.pckl'%(source_folder)) assert(max_subcircuit_qubit == cut_solution['max_subcircuit_qubit']) full_circuit = cut_solution['circuit'] subcircuits = cut_solution['subcircuits'] complete_path_map = cut_solution['complete_path_map'] counter = cut_solution['counter'] subcircuit_instances, subcircuit_instances_idx = generate_subcircuit_instances(subcircuits=subcircuits,complete_path_map=complete_path_map) summation_terms, subcircuit_entries, subcircuit_instance_attribution = generate_summation_terms(full_circuit=full_circuit,subcircuits=subcircuits,complete_path_map=complete_path_map,subcircuit_instances_idx=subcircuit_instances_idx,counter=counter) if self.verbose: print('--> %s subcircuit_instances:'%circuit_name) row_format = '{:<30} {:<10} {:<30} {:<30}' for subcircuit_idx in subcircuit_instances: print(row_format.format('subcircuit_%d_instance_idx'%subcircuit_idx,'#shots','init','meas')) for subcircuit_instance_idx in subcircuit_instances[subcircuit_idx]: circuit = subcircuit_instances[subcircuit_idx][subcircuit_instance_idx]['circuit'] shots = subcircuit_instances[subcircuit_idx][subcircuit_instance_idx]['shots'] init = subcircuit_instances[subcircuit_idx][subcircuit_instance_idx]['init'] meas = subcircuit_instances[subcircuit_idx][subcircuit_instance_idx]['meas'] print(row_format.format(subcircuit_instance_idx,shots,str(init)[:30],str(meas)[:30])) print(circuit) print('--> %s subcircuit_entries:'%circuit_name) row_format = '{:<30} {:<30}' for subcircuit_idx in subcircuit_entries: print(row_format.format('subcircuit_%d_entry_idx'%subcircuit_idx,'kronecker term (coeff, instance)')) ctr = 0 for subcircuit_entry_idx in subcircuit_entries[subcircuit_idx]: if type(subcircuit_entry_idx) is int: ctr += 1 if ctr<=10: print(row_format.format(subcircuit_entry_idx,str(subcircuit_entries[subcircuit_idx][subcircuit_entry_idx])[:30])) print('... Total %d subcircuit entries\n'%ctr) print('--> %s subcircuit_instance_attribution:'%circuit_name) row_format = '{:<30} {:<50}' for subcircuit_idx in subcircuit_instance_attribution: print(row_format.format('subcircuit_%d_instance_idx'%subcircuit_idx,'coefficient, subcircuit_entry_idx')) ctr = 0 for subcircuit_instance_idx in subcircuit_instance_attribution[subcircuit_idx]: ctr += 1 if ctr>10: break print(row_format.format(subcircuit_instance_idx,str(subcircuit_instance_attribution[subcircuit_idx][subcircuit_instance_idx])[:50])) print('... Total %d subcircuit instances to attribute\n'%len(subcircuit_instance_attribution[subcircuit_idx])) print('--> %s summation_terms:'%circuit_name) row_format = '{:<10}'*len(subcircuits) for summation_term in summation_terms[:10]: row = [] for subcircuit_entry in summation_term: subcircuit_idx, subcircuit_entry_idx = subcircuit_entry row.append('%d,%d'%(subcircuit_idx,subcircuit_entry_idx)) print(row_format.format(*row)) print('... Total %d summations\n'%len(summation_terms)) pickle.dump(subcircuit_instances, open('%s/subcircuit_instances.pckl'%(source_folder),'wb')) pickle.dump(subcircuit_instances_idx, open('%s/subcircuit_instances_idx.pckl'%(source_folder),'wb')) pickle.dump(subcircuit_instance_attribution, open('%s/subcircuit_instance_attribution.pckl'%(source_folder),'wb')) pickle.dump(summation_terms, open('%s/summation_terms.pckl'%(source_folder),'wb')) pickle.dump(subcircuit_entries, open('%s/subcircuit_entries.pckl'%(source_folder),'wb'))
source_folder = get_dirname(circuit_name=args.circuit_name, max_subcircuit_qubit=args.max_subcircuit_qubit, early_termination=None, eval_mode=None, num_threads=None, qubit_limit=None, field='cutter') dest_folder = get_dirname(circuit_name=args.circuit_name, max_subcircuit_qubit=args.max_subcircuit_qubit, early_termination=args.early_termination, num_threads=args.num_threads, eval_mode=args.eval_mode, qubit_limit=args.qubit_limit, field='build') case_dict = read_dict(filename='%s/subcircuits.pckl' % source_folder) if len(case_dict) == 0: exit(0) full_circuit = case_dict['circuit'] subcircuits = case_dict['subcircuits'] complete_path_map = case_dict['complete_path_map'] # [print(x,complete_path_map[x]) for x in complete_path_map] print('--> Verifying %s <--' % (args.circuit_name)) sv = evaluate_circ(circuit=full_circuit, backend='statevector_simulator') # for state, p in enumerate(sv): # if p>1e-5: # print(bin(state)[2:].zfill(full_circ_size),p) x_ticks_to_plot = np.arange(2**full_circuit.num_qubits)
def post_process(self, circuit_cases, eval_mode, num_nodes, num_threads, early_termination, qubit_limit, recursion_depth): if self.verbose: print('-' * 20, 'Postprocess, mode = %s' % eval_mode, '-' * 20) self.circuit_cases = circuit_cases subprocess.run(['rm', './cutqc/merge']) subprocess.run( ['icc', '-mkl', './cutqc/merge.c', '-o', './cutqc/merge', '-lm']) subprocess.run(['rm', './cutqc/build']) subprocess.run([ 'icc', '-fopenmp', '-mkl', '-lpthread', '-march=native', './cutqc/build.c', '-o', './cutqc/build', '-lm' ]) for circuit_case in self.circuit_cases: circuit_name = circuit_case.split('|')[0] max_subcircuit_qubit = int(circuit_case.split('|')[1]) source_folder = get_dirname( circuit_name=circuit_name, max_subcircuit_qubit=max_subcircuit_qubit, early_termination=None, eval_mode=None, num_threads=None, qubit_limit=None, field='cutter') cut_solution = read_dict('%s/subcircuits.pckl' % (source_folder)) if len(cut_solution) == 0: continue assert ( max_subcircuit_qubit == cut_solution['max_subcircuit_qubit']) full_circuit = cut_solution['circuit'] subcircuits = cut_solution['subcircuits'] complete_path_map = cut_solution['complete_path_map'] counter = cut_solution['counter'] dest_folder = get_dirname( circuit_name=circuit_name, max_subcircuit_qubit=max_subcircuit_qubit, early_termination=early_termination, num_threads=num_threads, eval_mode=eval_mode, qubit_limit=qubit_limit, field='build') if os.path.exists('%s' % dest_folder): subprocess.run(['rm', '-r', dest_folder]) os.makedirs(dest_folder) vertical_collapse_folder = get_dirname( circuit_name=circuit_name, max_subcircuit_qubit=max_subcircuit_qubit, early_termination=early_termination, num_threads=None, eval_mode=eval_mode, qubit_limit=None, field='vertical_collapse') for recursion_layer in range(recursion_depth): if self.verbose: print('*' * 20, '%s Recursion Layer %d' % (circuit_case, recursion_layer), '*' * 20, flush=True) recursion_qubit = qubit_limit if self.verbose: print('__Distribute__', flush=True) distribute(circuit_name=circuit_name, max_subcircuit_qubit=max_subcircuit_qubit, eval_mode=eval_mode, early_termination=early_termination, num_threads=num_threads, qubit_limit=qubit_limit, recursion_layer=recursion_layer, recursion_qubit=recursion_qubit, verbose=self.verbose) if self.verbose: print('__Merge__', flush=True) terminated = self._merge( circuit_case=circuit_case, vertical_collapse_folder=vertical_collapse_folder, dest_folder=dest_folder, recursion_layer=recursion_layer, eval_mode=eval_mode) if terminated: break if self.verbose: print('__Build__', flush=True) reconstructed_prob = self._build( circuit_case=circuit_case, dest_folder=dest_folder, recursion_layer=recursion_layer, eval_mode=eval_mode)
def _vertical_collapse(self, early_termination, eval_mode): subprocess.run(['rm', './cutqc/vertical_collapse']) subprocess.run([ 'icc', '-mkl', './cutqc/vertical_collapse.c', '-o', './cutqc/vertical_collapse', '-lm' ]) for circuit_case in self.circuit_cases: circuit_name = circuit_case.split('|')[0] max_subcircuit_qubit = int(circuit_case.split('|')[1]) source_folder = get_dirname( circuit_name=circuit_name, max_subcircuit_qubit=max_subcircuit_qubit, early_termination=None, eval_mode=None, num_threads=None, qubit_limit=None, field='cutter') cut_solution = read_dict('%s/subcircuits.pckl' % (source_folder)) if len(cut_solution) == 0: continue assert ( max_subcircuit_qubit == cut_solution['max_subcircuit_qubit']) full_circuit = cut_solution['circuit'] subcircuits = cut_solution['subcircuits'] complete_path_map = cut_solution['complete_path_map'] counter = cut_solution['counter'] eval_folder = get_dirname( circuit_name=circuit_name, max_subcircuit_qubit=max_subcircuit_qubit, early_termination=None, num_threads=None, eval_mode=eval_mode, qubit_limit=None, field='evaluator') vertical_collapse_folder = get_dirname( circuit_name=circuit_name, max_subcircuit_qubit=max_subcircuit_qubit, early_termination=early_termination, num_threads=None, eval_mode=eval_mode, qubit_limit=None, field='vertical_collapse') rank_files = glob.glob('%s/subcircuit_kron_terms_*.txt' % eval_folder) if len(rank_files) == 0: raise Exception( 'There are no rank_files for _vertical_collapse') if os.path.exists(vertical_collapse_folder): subprocess.run(['rm', '-r', vertical_collapse_folder]) os.makedirs(vertical_collapse_folder) child_processes = [] for rank in range(len(rank_files)): subcircuit_kron_terms_file = '%s/subcircuit_kron_terms_%d.txt' % ( eval_folder, rank) p = subprocess.Popen(args=[ './cutqc/vertical_collapse', '%d' % full_circuit.num_qubits, '%s' % subcircuit_kron_terms_file, '%s' % eval_folder, '%s' % vertical_collapse_folder, '%d' % early_termination, '%d' % rank, '%s' % eval_mode ]) child_processes.append(p) [cp.wait() for cp in child_processes] if early_termination == 1: measured_files = glob.glob('%s/measured*.txt' % eval_folder) [ subprocess.run(['rm', measured_file]) for measured_file in measured_files ] [ subprocess.run([ 'rm', '%s/subcircuit_kron_terms_%d.txt' % (eval_folder, rank) ]) for rank in range(len(rank_files)) ]
def _organize(self, eval_mode, num_threads): ''' Organize parallel processing for the subsequent vertical collapse procedure ''' for circuit_case in self.circuit_cases: circuit_name = circuit_case.split('|')[0] max_subcircuit_qubit = int(circuit_case.split('|')[1]) source_folder = get_dirname( circuit_name=circuit_name, max_subcircuit_qubit=max_subcircuit_qubit, early_termination=None, eval_mode=None, num_threads=None, qubit_limit=None, field='cutter') cut_solution = read_dict('%s/subcircuits.pckl' % (source_folder)) if len(cut_solution) == 0: continue assert ( max_subcircuit_qubit == cut_solution['max_subcircuit_qubit']) full_circuit = cut_solution['circuit'] subcircuits = cut_solution['subcircuits'] complete_path_map = cut_solution['complete_path_map'] counter = cut_solution['counter'] eval_folder = get_dirname( circuit_name=circuit_name, max_subcircuit_qubit=max_subcircuit_qubit, early_termination=None, num_threads=None, eval_mode=eval_mode, qubit_limit=None, field='evaluator') all_indexed_combinations = read_dict( filename='%s/all_indexed_combinations.pckl' % (eval_folder)) O_rho_pairs, combinations = get_combinations( complete_path_map=complete_path_map) kronecker_terms, _ = build( full_circuit=full_circuit, combinations=combinations, O_rho_pairs=O_rho_pairs, subcircuits=subcircuits, all_indexed_combinations=all_indexed_combinations) for rank in range(num_threads): subcircuit_kron_terms_file = open( '%s/subcircuit_kron_terms_%d.txt' % (eval_folder, rank), 'w') subcircuit_kron_terms_file.write('%d subcircuits\n' % len(kronecker_terms)) for subcircuit_idx in kronecker_terms: if eval_mode == 'runtime': rank_subcircuit_kron_terms = [ list(kronecker_terms[subcircuit_idx].keys())[0] ] else: rank_subcircuit_kron_terms = find_process_jobs( jobs=list(kronecker_terms[subcircuit_idx].keys()), rank=rank, num_workers=num_threads) subcircuit_kron_terms_file.write( 'subcircuit %d kron_terms %d num_effective %d\n' % (subcircuit_idx, len(rank_subcircuit_kron_terms), counter[subcircuit_idx]['effective'])) for subcircuit_kron_term in rank_subcircuit_kron_terms: subcircuit_kron_terms_file.write( 'subcircuit_kron_index=%d kron_term_len=%d\n' % (kronecker_terms[subcircuit_idx] [subcircuit_kron_term], len(subcircuit_kron_term))) if eval_mode == 'runtime': [ subcircuit_kron_terms_file.write('%d,0 ' % (x[0])) for x in subcircuit_kron_term ] else: [ subcircuit_kron_terms_file.write('%d,%d ' % (x[0], x[1])) for x in subcircuit_kron_term ] subcircuit_kron_terms_file.write('\n') if rank == 0 and self.verbose: print( '%s subcircuit %d : rank %d/%d needs to vertical collapse %d/%d instances' % (circuit_case, subcircuit_idx, rank, num_threads, len(rank_subcircuit_kron_terms), len(kronecker_terms[subcircuit_idx])), flush=True) subcircuit_kron_terms_file.close()