def _setup(self, config): self.qoc_config = copy(GRAPE_CONFIG) self.log_config = {} learning_rate = config["lr"] evolution_time = config["time"] optimizer = Adam(learning_rate=learning_rate) system_eval_count = control_eval_count = int(evolution_time) costs = get_costs(control_eval_count, evolution_time) self.qoc_config.update({ "control_eval_count": control_eval_count, "costs": costs, "evolution_time": evolution_time, "optimizer": optimizer, "system_eval_count": system_eval_count, }) self.log_config.update({ "lr": learning_rate, "time": evolution_time, })
# + np.divide(CHI_PRIME, 2) * np.kron(A_DAGGER_2_A_2_C, N_T)) H_CONTROL_0_0 = np.kron(A_C, I_T) H_CONTROL_0_1 = np.kron(A_DAGGER_C, I_T) H_CONTROL_1_0 = np.kron(I_C, A_T) H_CONTROL_1_1 = np.kron(I_C, A_DAGGER_T) hamiltonian = (lambda params, t: H_SYSTEM_0 + params[ 0] * H_CONTROL_0_0 + anp.conjugate(params[0]) * H_CONTROL_0_1 + params[1] * H_CONTROL_1_0 + anp.conjugate(params[1]) * H_CONTROL_1_1) # Define the optimization. PARAM_COUNT = 2 MAX_PARAM_NORMS = ( MAX_AMP_C, MAX_AMP_T, ) OPTIMIZER = Adam() PULSE_TIME = 500 PULSE_STEP_COUNT = 500 ITERATION_COUNT = 5000 # Define the problem. INITIAL_STATE_0 = np.kron(CAVITY_ZERO, TRANSMON_G) TARGET_STATE_0 = np.kron(CAVITY_ONE, TRANSMON_G) INITIAL_STATES = np.stack((INITIAL_STATE_0, ), ) TARGET_STATES = np.stack((TARGET_STATE_0, ), ) COSTS = (TargetInfidelity(TARGET_STATES), ParamVariation(MAX_PARAM_NORMS, PARAM_COUNT, PULSE_STEP_COUNT)) # Define the output. LOG_ITERATION_STEP = 1 SAVE_ITERATION_STEP = 1
def grape_schroedinger_discrete( control_count, control_eval_count, costs, evolution_time, hamiltonian, initial_states, system_eval_count, complex_controls=False, cost_eval_step=1, impose_control_conditions=None, initial_controls=None, interpolation_policy=InterpolationPolicy.LINEAR, iteration_count=1000, log_iteration_step=10, magnus_policy=MagnusPolicy.M2, max_control_norms=None, min_error=0, optimizer=Adam(), save_file_path=None, save_intermediate_states=False, save_iteration_step=0, ): """ This method optimizes the evolution of a set of states under the schroedinger equation for time-discrete control parameters. Args: control_count :: int - This is the number of control parameters that qoc should optimize over. I.e. it is the length of the `controls` array passed to the hamiltonian. control_eval_count :: int >= 2 - This value determines where definite values of the control parameters are evaluated. This value is used as: `control_eval_times`= numpy.linspace(0, `evolution_time`, `control_eval_count`). costs :: iterable(qoc.models.cost.Cost) - This list specifies all the cost functions that the optimizer should evaluate. This list defines the criteria for an "optimal" control set. evolution_time :: float - This value specifies the duration of the system's evolution. hamiltonian :: (controls :: ndarray (control_count), time :: float) -> hamiltonian_matrix :: ndarray (hilbert_size x hilbert_size) - This function provides the system's hamiltonian given a set of control parameters and a time value. initial_states :: ndarray (state_count x hilbert_size x 1) - This array specifies the states that should be evolved under the specified system. These are the states at the beginning of the evolution. system_eval_count :: int >= 2 - This value determines how many times during the evolution the system is evaluated, including the initial value of the system. For the schroedinger evolution, this value determines the time step of integration. This value is used as: `system_eval_times` = numpy.linspace(0, `evolution_time`, `system_eval_count`). complex_controls :: bool - This value determines if the control parameters are complex-valued. If some controls are real only or imaginary only while others are complex, real only and imaginary only controls can be simulated by taking the real or imaginary part of a complex control. cost_eval_step :: int >= 1- This value determines how often step-costs are evaluated. The units of this value are in system_eval steps. E.g. if this value is 2, step-costs will be computed every 2 system_eval steps. impose_control_conditions :: (controls :: (control_eval_count x control_count)) -> (controls :: (control_eval_count x control_count)) - This function is called after every optimization update. Example uses include setting boundary conditions on the control parameters. initial_controls :: ndarray (control_step_count x control_count) - This array specifies the control parameters at each control step. These values will be used to determine the `controls` argument passed to the `hamiltonian` function at each time step for the first iteration of optimization. interpolation_policy :: qoc.models.interpolationpolicy.InterpolationPolicy - This value specifies how control parameters should be interpreted at points where they are not defined. iteration_count :: int - This value determines how many total system evolutions the optimizer will perform to determine the optimal control set. log_iteration_step :: int - This value determines how often qoc logs progress to stdout. This value is specified in units of system steps, of which there are `control_step_count` * `system_step_multiplier`. Set this value to 0 to disable logging. magnus_policy :: qoc.models.magnuspolicy.MagnusPolicy - This value specifies what method should be used to perform the magnus expansion of the system matrix for ode integration. Choosing a higher order magnus expansion will yield more accuracy, but it will result in a longer compute time. max_control_norms :: ndarray (control_count) - This array specifies the element-wise maximum norm that each control is allowed to achieve. If, in optimization, the value of a control exceeds its maximum norm, the control will be rescaled to its maximum norm. Note that for non-complex values, this feature acts exactly as absolute value clipping. min_error :: float - This value is the threshold below which optimization will terminate. optimizer :: class instance - This optimizer object defines the gradient-based procedure for minimizing the total contribution of all cost functions with respect to the control parameters. save_file_path :: str - This is the full path to the file where information about program execution will be stored. E.g. "./out/foo.h5" save_intermediate_densities :: bool - If this value is set to True, qoc will write the densities to the save file after every system_eval step. save_intermediate_states :: bool - If this value is set to True, qoc will write the states to the save file after every system_eval step. save_iteration_step :: int - This value determines how often qoc saves progress to the save file specified by `save_file_path`. This value is specified in units of system steps, of which there are `control_step_count` * `system_step_multiplier`. Set this value to 0 to disable saving. Returns: result :: qoc.models.schroedingermodels.GrapeSchroedingerResult """ # Initialize the controls. initial_controls, max_control_norms = initialize_controls( complex_controls, control_count, control_eval_count, evolution_time, initial_controls, max_control_norms) # Construct the program state. pstate = GrapeSchroedingerDiscreteState( complex_controls, control_count, control_eval_count, cost_eval_step, costs, evolution_time, hamiltonian, impose_control_conditions, initial_controls, initial_states, interpolation_policy, iteration_count, log_iteration_step, max_control_norms, magnus_policy, min_error, optimizer, save_file_path, save_intermediate_states, save_iteration_step, system_eval_count, ) pstate.log_and_save_initial() # Autograd does not allow multiple return values from # a differentiable function. # Scipy's minimization algorithms require us to provide # functions that they evaluate on their own schedule. # The best solution to track mutable objects, that I can think of, # is to use a reporter object. reporter = Dummy() reporter.iteration = 0 result = GrapeSchroedingerResult() # Convert the controls from cost function format to optimizer format. initial_controls = strip_controls(pstate.complex_controls, pstate.initial_controls) # Run the optimization. pstate.optimizer.run(_esd_wrap, pstate.iteration_count, initial_controls, _esdj_wrap, args=(pstate, reporter, result)) return result
#ENDWITH INITIAL_CONTROLS = controls else: tarr = np.linspace(0, 2 * np.pi, CONTROL_EVAL_COUNT) cos_arr = 0.25 * np.cos(6 * tarr) sin_arr = 0.25 * np.sin(6 * tarr) INITIAL_CONTROLS = np.stack( (max_amp_qubit * sin_arr, max_amp_qubit * sin_arr, max_amp_mode_a * sin_arr, max_amp_mode_a * sin_arr, max_amp_mode_b * sin_arr, max_amp_mode_b * sin_arr), axis=1) INITIAL_CONTROLS = None # ============ # OPTIMIZER = Adam(learning_rate=LEARNING_RATE) SAVE_INTERMEDIATE_STATES = True def impose_control_conditions(controls): # Impose zero at boundaries controls[0, :] = controls[-1, :] = 0 return controls GRAPE_CONFIG = { "control_count": CONTROL_COUNT, "control_eval_count": CONTROL_EVAL_COUNT, "costs": COSTS, "evolution_time": EVOLUTION_TIME,
def grape_lindblad_discrete( control_count, control_step_count, costs, evolution_time, initial_densities, complex_controls=False, hamiltonian=None, initial_controls=None, interpolation_policy=InterpolationPolicy.LINEAR, iteration_count=1000, lindblad_data=None, log_iteration_step=10, max_control_norms=None, minimum_error=0, operation_policy=OperationPolicy.CPU, optimizer=Adam(), save_file_path=None, save_iteration_step=0, system_step_multiplier=1, ): """ This method optimizes the evolution of a set of states under the lindblad equation for time-discrete control parameters. Args: complex_controls control_count control_step_count costs evolution_time hamiltonian initial_controls initial_densities interpolation_policy iteration_count lindblad_data log_iteration_step max_control_norms minimum_error operation_policy optimizer save_file_path save_iteration_step system_step_multiplier Returns: result """ # Initialize the controls. initial_controls, max_control_norms = initialize_controls( complex_controls, control_count, control_step_count, evolution_time, initial_controls, max_control_norms, ) # Construct the program state. pstate = GrapeLindbladDiscreteState( complex_controls, control_count, control_step_count, costs, evolution_time, hamiltonian, initial_controls, initial_densities, interpolation_policy, iteration_count, lindblad_data, log_iteration_step, max_control_norms, minimum_error, operation_policy, optimizer, save_file_path, save_iteration_step, system_step_multiplier, ) pstate.log_and_save_initial() # Autograd does not allow multiple return values from # a differentiable function. # Scipy's minimization algorithms require us to provide # functions that they evaluate on their own schedule. # The best solution to track mutable objects, that I can think of, # is to use a reporter object. reporter = Dummy() reporter.iteration = 0 result = GrapeLindbladResult() # Convert the controls from cost function format to optimizer format. initial_controls = strip_controls(pstate.complex_controls, pstate.initial_controls) # Run the optimization. pstate.optimizer.run(_eld_wrap, pstate.iteration_count, initial_controls, _eldj_wrap, args=(pstate, reporter, result)) return result
def grape_schroedinger_discrete( control_count, control_step_count, costs, evolution_time, hamiltonian, initial_states, complex_controls=False, initial_controls=None, interpolation_policy=InterpolationPolicy.LINEAR, iteration_count=1000, log_iteration_step=10, magnus_policy=MagnusPolicy.M2, max_control_norms=None, minimum_error=0, operation_policy=OperationPolicy.CPU, optimizer=Adam(), performance_policy=PerformancePolicy.TIME, save_file_path=None, save_iteration_step=0, system_step_multiplier=1, ): """ This method optimizes the evolution of a set of states under the schroedinger equation for time-discrete control parameters. Args: complex_controls control_count control_step_count costs evolution_time hamiltonian initial_controls initial_states interpolation_policy iteration_count log_iteration_step magnus_policy max_control_norms minimum_error operation_policy optimizer performance_policy save_file_path save_iteration_step system_step_multiplier Returns: result """ # Initialize the controls. initial_controls, max_control_norms = initialize_controls( complex_controls, control_count, control_step_count, evolution_time, initial_controls, max_control_norms, ) # Construct the program state. pstate = GrapeSchroedingerDiscreteState( complex_controls, control_count, control_step_count, costs, evolution_time, hamiltonian, initial_controls, initial_states, interpolation_policy, iteration_count, log_iteration_step, magnus_policy, max_control_norms, minimum_error, operation_policy, optimizer, performance_policy, save_file_path, save_iteration_step, system_step_multiplier, ) pstate.log_and_save_initial() # Switch on the GRAPE implementation method. if performance_policy == PerformancePolicy.TIME: result = _grape_schroedinger_discrete_time(pstate) else: result = _grape_schroedinger_discrete_memory(pstate) return result