def process_init(state): """Do all necessary process specific tasks before running grape. Args: state :: ProcessState - encapulates the task of one process Returns: nothing """ log_file = state.file_name + ".log" log_file_path = os.path.join(DATA_PATH, log_file) with open(log_file_path, "w") as log: # Redirect everything to a log file. sys.stdout = sys.stderr = log # Display pid, time, slice id, angle, circuit. print("PID={}\nWALL_TIME={}\nSLICE_ID={}\nANGLE={}\n{}" "".format(os.getpid(), time.time(), state.slice_index, state.angle, state.uccsdslice.circuit)) # Gather necessary grape parameters. U = state.uccsdslice.unitary() pulse_time = state.pulse_time steps = int(pulse_time * SPN) convergence = CONVERGENCE convergence.update({ "rate": state.lr, "learning_rate_decay": state.decay, }) # Run grape. print("GRAPE_START_TIME={}".format(time.time())) grape_sess = Grape(U=U, total_time=pulse_time, steps=steps, convergence=convergence, **GRAPE_CONFIG) print("GRAPE_END_TIME={}".format(time.time()))
def binary_search_for_shortest_pulse_time(min_time, max_time, tolerance=1): """Search between [min_time, max_time] up to 1ns tolerance. Assumes 20 steps per ns.""" min_steps, max_steps = min_time * 20, max_time * 20 while min_steps + 20 * tolerance < max_steps: # just estimate to +- 1ns mid_steps = int((min_steps + max_steps) / 2) total_time = mid_steps / 20.0 print('\n\ntrying total_time: %s for unitary of size %s' % (str(total_time), str(U.shape))) SS = Grape(H0, Hops, Hnames, U, total_time, mid_steps, states_concerned_list, convergence, reg_coeffs=reg_coeffs, use_gpu=False, sparse_H=False, method='Adam', maxA=maxA, show_plots=False, file_name=file_name, data_path=data_path) if SS.l < SS.conv.conv_target: # if converged, search lower half max_steps = mid_steps else: min_steps = mid_steps return mid_steps / 20
def binary_search_for_shortest_pulse_time(state, min_steps, max_steps): """Search between [min_steps, max_steps] (inclusive). Args: state :: ProcessState - the state encapsulating the slice to binary search on min_steps :: int - the minimum number of steps to consider max_steps :: int - the maximum number of steps to consider """ # Get grape arguments. U = state.unitary convergence = GRAPE_CONVERGENCE convergence.update({ 'rate': state.lr, 'learning_rate_decay': state.decay, }) # mid_steps is the number of steps we try for the pulse on each # iteration of binary search. It is in the "middle" of max_steps # and min_steps. # The most recent mid_steps that achieves convergence is the best. # If no mid_steps converge, display -1. prev_converged_mid_steps = -1 while min_steps + BSG < max_steps: print("\n") mid_steps = int((min_steps + max_steps) / 2) pulse_time = mid_steps / SPN print("MAX_STEPS={}\nMIN_STEPS={}\nMID_STEPS={}\nTRIAL_PULSE_TIME={}" "\nGRAPE_START_TIME={}" "".format(max_steps, min_steps, mid_steps, pulse_time, time.time())) sess = Grape(U=U, total_time=pulse_time, steps=mid_steps, convergence=convergence, data_path=state.data_path, file_name=state.file_name, **state.grape_config) print("GRAPE_END_TIME={}".format(time.time())) converged = sess.l < sess.conv.conv_target print("CONVERGED={}".format(converged)) # If the tiral converged, lower the ceiling. # If the tiral did not converge, raise the floor. if converged: max_steps = mid_steps prev_converged_mid_steps = mid_steps else: min_steps = mid_steps # ENDWHILE # Display results. best_time = prev_converged_mid_steps / SPN best_steps = prev_converged_mid_steps print("BEST_STEPS={}, BEST_TIME={}" "".format(best_steps, best_time)) # Log results. result_entry = {"time": best_time, "steps": best_steps, "lr": state.lr, "decay": state.decay} result_file = "{}.json".format(state.file_name) result_file_path = os.path.join(state.data_path, result_file) with open(result_file_path, "w") as f: f.write("{}\n".format(json.dumps(result_entry ,cls=CustomJSONEncoder)))
def objective(state, config, reporter): """This function takes hyperparameters and reports their loss. Args: state :: ProcessState - contains information about the slice config :: dict - contains the hyperparameters to evaluate and other information ray or we specified reporter :: ray.tune.function_runner.StatusReporter - report the loss to this object Returns: nothing """ # Unpack config. Log parameters. lr = config["lr"] decay = config["decay"] print("LEARNING_RATE={}\nDECAY={}" "".format(lr, decay)) # Build necessary grape arguments using parameters. U = state.unitary convergence = { 'rate': lr, 'max_iterations': GRAPE_MAX_ITERATIONS, 'learning_rate_decay': decay } pulse_time = state.pulse_time steps = int(pulse_time * SPN) # Run grape. grape_start_time = time.time() print("GRAPE_START_TIME={}".format(grape_start_time)) grape_sess = Grape(U=U, total_time=pulse_time, steps=steps, convergence=convergence, file_name=state.file_name, data_path=state.data_path, **state.grape_config) grape_end_time = time.time() print("GRAPE_END_TIME={}".format(grape_end_time)) # Log results. loss = grape_sess.l print("LOSS={}".format(loss)) trial = { 'lr': lr, 'decay': decay, 'loss': loss, 'wall_run_time': grape_end_time - grape_start_time, } trial_entry = "{}\n".format(json.dumps(trial, cls=CustomJSONEncoder)) trial_file = "{}.json".format(state.file_name) trial_file_path = os.path.join(state.data_path, trial_file) with open(trial_file_path, "a+") as trial_file: fcntl.flock(trial_file, fcntl.LOCK_EX) trial_file.write(trial_entry) fcntl.flock(trial_file, fcntl.LOCK_UN) # Report results. reporter(neg_loss=-loss, done=True)
def evol_pulse(pulse, U=None, save=True, out_file=file_name, out_path=data_path): """ """ if (U is None): U = pulse.U SS = Grape(pulse.H0, pulse.Hops, pulse.Hnames, U, pulse.total_time, pulse.steps, pulse.states_concerned_list, {}, initial_guess=pulse.uks, reg_coeffs={}, use_gpu=False, sparse_H=False, method='EVOLVE', maxA=pulse.maxA, show_plots=False, save=save, file_name=out_file, data_path=out_path) return SS
def evol_pulse_from_file(filename, U=None, save=True, out_file=file_name, out_path=data_path): N = None d = None qubits = None pulse = Pulse(N, d, qubits, fname=filename) if (U == None): U = pulse.U res = Grape(pulse.H0, pulse.Hops, pulse.Hnames, U, pulse.total_time, pulse.steps, pulse.states_concerned_list, {}, initial_guess=pulse.uks, reg_coeffs={}, use_gpu=False, sparse_H=False, method='EVOLVE', maxA=pulse.maxA, show_plots=False, save=save, file_name=out_file, data_path=out_path) return res
def process_init(state): """Carry out a computation specified by state. Args: state :: ProcessState - encapsulates the computation to perform Returns: nothing """ # Redirect everything to a log file. with open(state.log_file_path, "w+") as log: sys.stdout = sys.stderr = log print("PID={}\nWALL_TIME={}\nSLICE_INDEX={}\nPULSE_TIME={}\nANGLE={}" "\nLR={}\nDECAY={}\n{}" "".format(os.getpid(), time.time(), state.slice_index, state.pulse_time, state.angle, state.lr, state.decay, state.uccsdslice.circuit)) # Build necessary grape arguments using parameters. U = state.uccsdslice.unitary() convergence = { 'rate': state.lr, 'max_iterations': GRAPE_MAX_ITERATIONS, 'learning_rate_decay': state.decay } pulse_time = state.pulse_time steps = int(pulse_time * SPN) # Run grape. print("GRAPE_START_TIME={}".format(time.time())) grape_sess = Grape(U=U, total_time=pulse_time, steps=steps, convergence=convergence, file_name=state.log_file_name, data_path=state.data_path, **GRAPE_CONFIG) print("GRAPE_END_TIME={}".format(time.time())) # Log results. loss = grape_sess.l print("LOSS={}".format(loss)) trial_entry = { "loss": loss, "lr": state.lr, "decay": state.decay, } with open(state.trial_file_path, "a+") as f: fcntl.flock(f, fcntl.LOCK_EX) f.write(json.dumps(trial_entry, cls=CustomJSONEncoder) + "\n") fcntl.flock(f, fcntl.LOCK_UN)
def get_opt_pulses(seeds, convergence, reg_coeffs={}, method='ADAM'): """ TODO: multi-threading for slices of pulses Args: seeds :: list of initial pulses """ opt_pulses = [] for s in seeds: #TODO: parallelize for loop res = Grape(s.H0, s.Hops, s.Hnames, s.U, s.total_time, s.steps, s.states_concerned_list, convergence, initial_guess = s.uks, reg_coeffs=reg_coeffs, use_gpu=False, sparse_H=False, method=method, maxA=s.maxA, show_plots=False, save=False) opt_pulse = Pulse(s.N, s.d, s.qubits, uks=res.uks, total_time=s.total_time, steps=s.steps, H0=s.H0, Hops=s.Hops, Hnames=s.Hnames, U=s.U, error=s.error, states_concerned_list=s.states_concerned_list, maxA=s.maxA) opt_pulses.append(opt_pulse) return opt_pulses
def concat_and_evol(N, d, pulses, U, file_name=file_name, data_path=data_path): """ Assume each pulse has the same dt interval, same complete Hops! """ total_time = 0.0 steps = 0 uks = [] maxA = [] #Hops, Hnames = hamiltonian.get_Hops_and_Hnames(N, d) for (i, p) in enumerate(pulses): p = _extend_uks(p, N, d) if (i==0): H0 = p.H0 Hops = p.Hops Hnames = p.Hnames states_concerned_list = p.states_concerned_list uks = p.uks maxA = p.maxA else: assert(len(uks) == len(p.uks)) assert(len(maxA) == len(p.maxA)) assert(_ops_all_equal(Hops, p.Hops)) #assert(Hnames == p.Hnames) uks = [np.concatenate((uks[ll],p.uks[ll]), axis=0) for ll in range(len(p.uks))] maxA = [max(maxA[ll], p.maxA[ll]) for ll in range(len(p.maxA))] total_time += p.total_time steps += p.steps res = Grape(H0, Hops, Hnames, U, total_time, steps, states_concerned_list, {},initial_guess=uks, reg_coeffs={},use_gpu=False, sparse_H=False, method='EVOLVE', maxA=maxA,show_plots=False, file_name=file_name, data_path = data_path) return res
U = transmon_gate(U, d) print("Target U initialized.", flush=True) max_iterations = 8 # proof of concept for now. Change this if want to see convergence. decay = max_iterations / 2 convergence = { 'rate': 0.01, 'update_step': 1, 'max_iterations': max_iterations, 'conv_target': 1e-4, 'learning_rate_decay': decay } reg_coeffs = {'speed_up': 0.001} uks, U_f = Grape(H0, Hops, Hnames, U, total_time, steps, states_concerned_list, convergence, reg_coeffs=reg_coeffs, use_gpu=False, sparse_H=False, method='L-BFGS-B', maxA=[2 * np.pi * 0.3] * len(Hops), show_plots=False, file_name='uccsd4', data_path=data_path)
def process_init(uccsdslice, slice_index, angles, file_names): """Do all necessary process specific tasks before running grape. Args: ugly Returns: nothing """ # Redirect output to a log file. log_file = "s{}.log".format(slice_index) log_file_path = os.path.join(DATA_PATH, log_file) with open(log_file_path, "w") as log: # sys.stdout = sys.stderr = log # Display pid, time, slice id, and circuit. print("PID={}\nTIME={}\nSLICE_ID={}" "".format(os.getpid(), time.time(), slice_index)) print(uccsdslice.circuit) # Define search space. # time_upper_bound is the pulse time for a trivial # gate lookup that we should always beat. time_upper_bound = get_max_pulse_time(uccsdslice.circuit) print("TIME_UPPER_BOUND={}".format(time_upper_bound)) # min_steps and max_steps are the min/max steps for the # the search on the current angle. mid_steps is the steps # we will try for the current search. min_steps = 0 max_steps = time_upper_bound * spn mid_steps = int((min_steps + max_steps) / 2) prev_converged_min_steps = None prev_converged_max_steps = None prev_converged_mid_steps = None prev_converged_sess = None initial_guess = None # We begin with no initial guess. grape_sess = None for i, angle in enumerate(angles): # Get and display necessary information, update slice angles. print("\nANGLE={}".format(angle)) file_name = file_names[i] uccsdslice.update_angles([angle] * len(uccsdslice.angles)) U = uccsdslice.unitary() search_converged = False # We run the first trial for the same pulse time that the # last angle converged to. if prev_converged_mid_steps is not None: min_steps = prev_converged_min_steps max_steps = prev_converged_max_steps mid_steps = prev_converged_mid_steps initial_guess = prev_converged_sess.uks # Binary search for the minimum pulse time on the current angle. while not search_converged: # Search in the search space until we have a convergence window # of BNS_GRANULARITY while min_steps + BNS_GRANULARITY < max_steps: if initial_guess is not None: initial_guess = resize_uks(initial_guess, mid_steps) total_time = mid_steps * nps print("\nMAX_STEPS={}\nMIN_STEPS={}\nMID_STEPS={}\nTIME={}" "\nGRAPE_START_TIME={}" "".format(max_steps, min_steps, mid_steps, total_time, time.time())) grape_sess = Grape(H0, Hops, Hnames, U, total_time, mid_steps, convergence = convergence, reg_coeffs = reg_coeffs, use_gpu = use_gpu, sparse_H = sparse_H, method = method, maxA = maxA, states_concerned_list = states_concerned_list, show_plots = show_plots, file_name = file_name, data_path = DATA_PATH , initial_guess = initial_guess) print("GRAPE_END_TIME={}".format(time.time())) # If the trial converged, lower the upper bound. # If the tiral did not converge, raise the lower bound. trial_converged = grape_sess.l <= grape_sess.conv.conv_target print("TRIAL_CONVERGED={}".format(trial_converged)) if trial_converged: search_converged = True prev_converged_mid_steps = mid_steps prev_converged_max_steps = max_steps prev_converged_min_steps = min_steps prev_converged_sess = grape_sess max_steps = mid_steps else: min_steps = mid_steps # Update mid_steps to run for the next trial. mid_steps = int((max_steps + min_steps) / 2) # ENDWHILE # If binary search did not converge, then the pulse time is # too short and should be backed off. print("SEARCH_CONVERGED={}".format(search_converged)) if not search_converged: max_steps *= BACKOFF mid_steps = int((max_steps + min_steps) / 2) # ENDWHILE print("CONVERGED_STEPS={}\nCONVERGED_TIME={}" "".format(prev_converged_mid_steps, prev_converged_mid_steps * nps))
## forbid + pulse reg reg_coeffs = { 'amplitude': 0.01, 'dwdt': 0.00007, 'd2wdt2': 0.0, 'forbidden_coeff_list': [10] * len(states_forbidden_list), 'states_forbidden_list': states_forbidden_list, 'forbid_dressed': False } uks, U_f = Grape(H0, Hops, Hnames, U, total_time, steps, psi0, convergence=convergence, method='L-BFGS-B', draw=[states_draw_list, states_draw_names], maxA=ops_max_amp, use_gpu=False, sparse_H=False, reg_coeffs=reg_coeffs, unitary_error=1e-08, show_plots=False, dressed_info=dressed_info, file_name='transmon_transmon_CNOT', Taylor_terms=[20, 0], data_path=data_path)
def objective(state, params): """This is the function to minimize. Args: state :: ProcessState - the state that encapsulates the optimization params :: dict - the new parameters to run the objective for Returns: results :: dict - a results dictionary interpretable by hyperopt """ # Grab and log parameters. lr = params['lr'] decay = params['decay'] print("\nITERATION={}\nLEARNING_RATE={}\nDECAY={}" "".format(state.iteration_count, lr, decay)) # Build necessary grape arguments using parameters. U = state.uccsdslice.unitary() convergence = { 'rate': lr, 'max_iterations': MAX_GRAPE_ITERATIONS, 'learning_rate_decay': decay } pulse_time = state.pulse_time steps = int(pulse_time * SPN) # Run grape. grape_start_time = time.time() print("GRAPE_START_TIME={}".format(grape_start_time)) grape_sess = Grape(H0, Hops, Hnames, U, pulse_time, steps, STATES_CONCERNED_LIST, convergence=convergence, reg_coeffs=REG_COEFFS, method=METHOD, maxA=MAX_AMPLITUDE, use_gpu=USE_GPU, sparse_H=SPARSE_H, show_plots=SHOW_PLOTS, file_name=state.file_name, data_path=DATA_PATH) grape_end_time = time.time() print("GRAPE_END_TIME={}".format(grape_end_time)) # Log results. print("LOSS={}".format(grape_sess.l)) trial = { 'iter': state.iteration_count, 'lr': lr, 'decay': decay, 'loss': grape_sess.l, 'wall_run_time': grape_end_time - grape_start_time, } trial_file = state.file_name + ".json" trial_file_path = os.path.join(DATA_PATH, trial_file) with open(trial_file_path, "a+") as trial_file: trial_file.write(json.dumps(trial, cls=CustomJSONEncoder) + "\n") # Update state. state.trials.append(trial) state.iteration_count += 1 return { 'loss': grape_sess.l, 'status': STATUS_OK, }