def prepare(self, objective, initial_values=None, variables=None, gradient=None): active_angles, passive_angles, variables = self.initialize_variables(objective, initial_values, variables) comp = self.compile_objective(objective=objective) compile_gradient = True dE = None if isinstance(gradient, str): if gradient.lower() == 'qng': compile_gradient = False combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend, samples=self.samples, noise=self.noise, backend_options=self.backend_options) dE = QNGVector(combos) else: gradient = {"method": gradient, "stepsize": 1.e-4} if compile_gradient: grad_obj, comp_grad_obj = self.compile_gradient(objective=objective, variables=variables, gradient=gradient) dE = CallableVector([comp_grad_obj[k] for k in comp_grad_obj.keys()]) ostring = id(comp) if not self.silent: print(self) print("{:15} : {} expectationvalues".format("Objective", objective.count_expectationvalues())) if compile_gradient: counts = [x.count_expectationvalues() for x in comp_grad_obj.values()] print("{:15} : {} expectationvalues".format("Gradient", sum(counts))) print("{:15} : {}".format("gradient instr", gradient)) print("{:15} : {}".format("active variables", len(active_angles))) vec_len = len(active_angles) first = numpy.zeros(vec_len) second = numpy.zeros(vec_len) self.gradient_lookup[ostring] = dE self.active_key_lookup[ostring] = active_angles.keys() self.moments_lookup[ostring] = (first, second) self.moments_trajectory[ostring] = [(first, second)] self.step_lookup[ostring] = 0 return comp
def __call__(self, objective: Objective, initial_values: typing.Dict[Variable, numbers.Real], variables: typing.List[Variable], gradient: typing.Dict[Variable, Objective] = None, qng: bool = False, hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None, samples: int = None, backend: str = None, backend_options: dict = None, noise: NoiseModel = None, reset_history: bool = True, *args, **kwargs) -> SciPyReturnType: """ Optimizes with scipy and gives back the optimized angles Get the optimized energies over the history :param objective: The tequila Objective to minimize :param initial_valuesxx: initial values for the objective :param return_scipy_output: chose if the full scipy output shall be returned :param reset_history: reset the history before optimization starts (has no effect if self.save_history is False) :return: tuple of optimized energy ,optimized angles and scipy output """ infostring = "Starting {method} optimization\n".format(method=self.method) infostring += "Objective: {} expectationvalues\n".format(objective.count_expectationvalues()) if self.save_history and reset_history: self.reset_history() active_angles = {} for v in variables: active_angles[v] = initial_values[v] passive_angles = {} for k, v in initial_values.items(): if k not in active_angles.keys(): passive_angles[k] = v # Transform the initial value directory into (ordered) arrays param_keys, param_values = zip(*active_angles.items()) param_values = numpy.array(param_values) bounds = None if self.method_bounds is not None: bounds = {k: None for k in active_angles} for k, v in self.method_bounds.items(): if k in bounds: bounds[k] = v infostring += "bounds : {}\n".format(self.method_bounds) names, bounds = zip(*bounds.items()) assert (names == param_keys) # make sure the bounds are not shuffled # do the compilation here to avoid costly recompilation during the optimization compiled_objective = compile(objective=objective, variables=initial_values, backend=backend, noise=noise, samples=samples, *args, **kwargs) E = _EvalContainer(objective=compiled_objective, param_keys=param_keys, samples=samples, passive_angles=passive_angles, save_history=self.save_history, backend_options = backend_options, silent=self.silent) # compile gradients if self.method in self.gradient_based_methods + self.hessian_based_methods and not isinstance(gradient, str): compiled_grad_objectives = dict() if gradient is None: gradient = {assign_variable(k): grad(objective=objective, variable=k) for k in active_angles.keys()} else: gradient = {assign_variable(k): v for k, v in gradient.items()} grad_exval = [] for k in active_angles.keys(): if k not in gradient: raise Exception("No gradient for variable {}".format(k)) grad_exval.append(gradient[k].count_expectationvalues()) compiled_grad_objectives[k] = compile(objective=gradient[k], variables=initial_values, samples=samples, noise=noise, backend=backend, *args, **kwargs) if qng: combos = get_qng_combos(objective, samples=samples, backend=backend, noise=noise, initial_values=initial_values) dE = _QngContainer(combos=combos, param_keys=param_keys, samples=samples, passive_angles=passive_angles, save_history=self.save_history, silent=self.silent, backend_options=backend_options) else: dE = _GradContainer(objective=compiled_grad_objectives, param_keys=param_keys, samples=samples, passive_angles=passive_angles, save_history=self.save_history, silent=self.silent, backend_options=backend_options) infostring += "Gradients: {} expectationvalues (min={}, max={})\n".format(sum(grad_exval), min(grad_exval), max(grad_exval)) else: # use numerical gradient dE = gradient infostring += "Gradients: {}\n".format(gradient) # compile hessian if self.method in self.hessian_based_methods and not isinstance(hessian, str): if isinstance(gradient, str): raise TequilaScipyException("Can not use numerical gradients for Hessian based methods") if qng is True: raise TequilaScipyException('Quantum Natural Hessian not yet well-defined, sorry!') compiled_hess_objectives = dict() hess_exval = [] for i, k in enumerate(active_angles.keys()): for j, l in enumerate(active_angles.keys()): if j > i: continue hess = grad(gradient[k], l) compiled_hess = compile(objective=hess, variables=initial_values, samples=samples, noise=noise, backend=backend, *args, **kwargs) compiled_hess_objectives[(k, l)] = compiled_hess compiled_hess_objectives[(l, k)] = compiled_hess hess_exval.append(compiled_hess.count_expectationvalues()) ddE = _HessContainer(objective=compiled_hess_objectives, param_keys=param_keys, samples=samples, passive_angles=passive_angles, save_history=self.save_history, silent=self.silent) infostring += "Hessian: {} expectationvalues (min={}, max={})\n".format(sum(hess_exval), min(hess_exval), max(hess_exval)) else: infostring += "Hessian: {}\n".format(hessian) if self.method != "TRUST-CONSTR" and hessian is not None: raise TequilaScipyException("numerical hessians only for trust-constr method") ddE = hessian if not self.silent: print("ObjectiveType is {}".format(type(compiled_objective))) print(infostring) print("backend: {}".format(compiled_objective.backend)) print("samples: {}".format(samples)) print("{} active variables".format(len(active_angles))) # get the number of real scipy iterations for better histories real_iterations = [] Es = [] callback = lambda x, *args: real_iterations.append(len(E.history) - 1) res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE, args=(Es,), method=self.method, tol=self.tol, bounds=bounds, constraints=self.method_constraints, options=self.method_options, callback=callback) # failsafe since callback is not implemented everywhere if len(real_iterations) == 0: real_iterations = range(len(E.history)) else: real_iterations = [0] + real_iterations if self.save_history: self.history.energies = [E.history[i] for i in real_iterations] self.history.energy_evaluations = E.history self.history.angles = [E.history_angles[i] for i in real_iterations] self.history.angles_evaluations = E.history_angles if dE is not None and not isinstance(dE, str): # can currently only save gradients if explicitly evaluated # and will fail for hessian based approaches # need better callback functions try: if self.method not in self.hessian_based_methods: self.history.gradients = [dE.history[i] for i in real_iterations] except: print("WARNING: History could not assign the stored gradients") self.history.gradients_evaluations = dE.history if ddE is not None and not isinstance(ddE, str): # hessians are not evaluated in the same frequencies as energies # therefore we can not store the "real" iterations currently self.history.hessians_evaluations = ddE.history E_final = res.fun angles_final = dict((param_keys[i], res.x[i]) for i in range(len(param_keys))) angles_final = {**angles_final, **passive_angles} return SciPyReturnType(energy=E_final, angles=format_variable_dictionary(angles_final), history=self.history, scipy_output=res)
def __call__(self, objective: Objective, variables: typing.List[Variable] = None, initial_values: typing.Dict[Variable, numbers.Real] = None, gradient: typing.Dict[Variable, Objective] = None, hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None, reset_history: bool = True, *args, **kwargs) -> SciPyResults: """ Perform optimization using scipy optimizers. Parameters ---------- objective: Objective: the objective to optimize. variables: list, optional: the variables of objective to optimize. If None: optimize all. initial_values: dict, optional: a starting point from which to begin optimization. Will be generated if None. gradient: optional: Information or object used to calculate the gradient of objective. Defaults to None: get analytically. hessian: optional: Information or object used to calculate the hessian of objective. Defaults to None: get analytically. reset_history: bool: Default = True: whether or not to reset all history before optimizing. args kwargs Returns ------- ScipyReturnType: the results of optimization. """ objective = objective.contract() infostring = "{:15} : {}\n".format("Method", self.method) infostring += "{:15} : {} expectationvalues\n".format( "Objective", objective.count_expectationvalues()) if gradient is not None: infostring += "{:15} : {}\n".format("grad instr", gradient) if hessian is not None: infostring += "{:15} : {}\n".format("hess_instr", hessian) if self.save_history and reset_history: self.reset_history() active_angles, passive_angles, variables = self.initialize_variables( objective, initial_values, variables) # Transform the initial value directory into (ordered) arrays param_keys, param_values = zip(*active_angles.items()) param_values = numpy.array(param_values) # process and initialize scipy bounds bounds = None if self.method_bounds is not None: bounds = {k: None for k in active_angles} for k, v in self.method_bounds.items(): if k in bounds: bounds[k] = v infostring += "{:15} : {}\n".format("bounds", self.method_bounds) names, bounds = zip(*bounds.items()) assert (names == param_keys ) # make sure the bounds are not shuffled # do the compilation here to avoid costly recompilation during the optimization compiled_objective = self.compile_objective(objective=objective, *args, **kwargs) E = _EvalContainer(objective=compiled_objective, param_keys=param_keys, samples=self.samples, passive_angles=passive_angles, save_history=self.save_history, print_level=self.print_level) compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods) compile_hessian = self.method in self.hessian_based_methods dE = None ddE = None # detect if numerical gradients shall be used # switch off compiling if so if isinstance(gradient, str): if gradient.lower() == 'qng': compile_gradient = False if compile_hessian: raise TequilaException( 'Sorry, QNG and hessian not yet tested together.') combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend, samples=self.samples, noise=self.noise) dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles) infostring += "{:15} : QNG {}\n".format("gradient", dE) else: dE = gradient compile_gradient = False if compile_hessian: compile_hessian = False if hessian is None: hessian = gradient infostring += "{:15} : scipy numerical {}\n".format( "gradient", dE) infostring += "{:15} : scipy numerical {}\n".format( "hessian", ddE) if isinstance(gradient, dict): if gradient['method'] == 'qng': func = gradient['function'] compile_gradient = False if compile_hessian: raise TequilaException( 'Sorry, QNG and hessian not yet tested together.') combos = get_qng_combos(objective, func=func, initial_values=initial_values, backend=self.backend, samples=self.samples, noise=self.noise) dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles) infostring += "{:15} : QNG {}\n".format("gradient", dE) if isinstance(hessian, str): ddE = hessian compile_hessian = False if compile_gradient: grad_obj, comp_grad_obj = self.compile_gradient( objective=objective, variables=variables, gradient=gradient, *args, **kwargs) expvals = sum( [o.count_expectationvalues() for o in comp_grad_obj.values()]) infostring += "{:15} : {} expectationvalues\n".format( "gradient", expvals) dE = _GradContainer(objective=comp_grad_obj, param_keys=param_keys, samples=self.samples, passive_angles=passive_angles, save_history=self.save_history, print_level=self.print_level) if compile_hessian: hess_obj, comp_hess_obj = self.compile_hessian( variables=variables, hessian=hessian, grad_obj=grad_obj, comp_grad_obj=comp_grad_obj, *args, **kwargs) expvals = sum( [o.count_expectationvalues() for o in comp_hess_obj.values()]) infostring += "{:15} : {} expectationvalues\n".format( "hessian", expvals) ddE = _HessContainer(objective=comp_hess_obj, param_keys=param_keys, samples=self.samples, passive_angles=passive_angles, save_history=self.save_history, print_level=self.print_level) if self.print_level > 0: print(self) print(infostring) print("{:15} : {}\n".format("active variables", len(active_angles))) Es = [] optimizer_instance = self class SciPyCallback: energies = [] gradients = [] hessians = [] angles = [] real_iterations = 0 def __call__(self, *args, **kwargs): self.energies.append(E.history[-1]) self.angles.append(E.history_angles[-1]) if dE is not None and not isinstance(dE, str): self.gradients.append(dE.history[-1]) if ddE is not None and not isinstance(ddE, str): self.hessians.append(ddE.history[-1]) self.real_iterations += 1 if 'callback' in optimizer_instance.kwargs: optimizer_instance.kwargs['callback'](E.history_angles[-1]) callback = SciPyCallback() res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE, args=(Es, ), method=self.method, tol=self.tol, bounds=bounds, constraints=self.method_constraints, options=self.method_options, callback=callback) # failsafe since callback is not implemented everywhere if callback.real_iterations == 0: real_iterations = range(len(E.history)) if self.save_history: self.history.energies = callback.energies self.history.energy_evaluations = E.history self.history.angles = callback.angles self.history.angles_evaluations = E.history_angles self.history.gradients = callback.gradients self.history.hessians = callback.hessians if dE is not None and not isinstance(dE, str): self.history.gradients_evaluations = dE.history if ddE is not None and not isinstance(ddE, str): self.history.hessians_evaluations = ddE.history # some methods like "cobyla" do not support callback functions if len(self.history.energies) == 0: self.history.energies = E.history self.history.angles = E.history_angles # some scipy methods always give back the last value and not the minimum (e.g. cobyla) ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0]) E_final = ea[0][0] angles_final = ea[0][ 1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys))) angles_final = {**angles_final, **passive_angles} return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def prepare(self, objective: Objective, initial_values: dict = None, variables: list = None, gradient=None): """ perform all initialization for an objective, register it with lookup tables, and return it compiled. MUST be called before step is used. Parameters ---------- objective: Objective: the objective to ready for optimization. initial_values: dict, optional: the initial values of to prepare the optimizer with. Default: choose randomly. variables: list, optional: which variables to optimize over, and hence prepare gradients for. Default value: optimize over all variables in objective. gradient: optional: extra keyword; information used to compile alternate gradients. Default: prepare the standard, analytical gradient. Returns ------- Objective: compiled version of objective. """ active_angles, passive_angles, variables = self.initialize_variables(objective, initial_values, variables) comp = self.compile_objective(objective=objective) for arg in comp.args: if hasattr(arg,'U'): if arg.U.device is not None: # don't retrieve computer 100 times; pyquil errors out if this happens! self.device = arg.U.device break compile_gradient = True dE = None if isinstance(gradient, str): if gradient.lower() == 'qng': compile_gradient = False combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend, device=self.device, samples=self.samples, noise=self.noise, ) dE = QNGVector(combos) else: gradient = {"method": gradient, "stepsize": 1.e-4} elif isinstance(gradient,dict): if gradient['method'] == 'qng': func = gradient['function'] compile_gradient = False combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend, device=self.device, samples=self.samples, noise=self.noise) dE = QNGVector(combos) if compile_gradient: grad_obj, comp_grad_obj = self.compile_gradient(objective=objective, variables=variables, gradient=gradient) dE = CallableVector([comp_grad_obj[k] for k in comp_grad_obj.keys()]) ostring = id(comp) if not self.silent: print(self) print("{:15} : {} expectationvalues".format("Objective", objective.count_expectationvalues())) if compile_gradient: counts = [x.count_expectationvalues() for x in comp_grad_obj.values()] print("{:15} : {} expectationvalues".format("Gradient", sum(counts))) print("{:15} : {}".format("gradient instr", gradient)) print("{:15} : {}".format("active variables", len(active_angles))) vec_len = len(active_angles) first = numpy.zeros(vec_len) second = numpy.zeros(vec_len) self.gradient_lookup[ostring] = dE self.active_key_lookup[ostring] = active_angles.keys() self.moments_lookup[ostring] = (first, second) self.moments_trajectory[ostring] = [(first, second)] self.step_lookup[ostring] = 0 return comp
def __call__(self, objective: Objective, variables: typing.List[Variable] = None, initial_values: typing.Dict[Variable, numbers.Real] = None, gradient: typing.Dict[Variable, Objective] = None, hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None, reset_history: bool = True, *args, **kwargs) -> SciPyReturnType: """ Optimizes with scipy and gives back the optimized angles Get the optimized energies over the history :param objective: The tequila Objective to minimize :param initial_values: initial values for the objective :param return_scipy_output: chose if the full scipy output shall be returned :param reset_history: reset the history before optimization starts (has no effect if self.save_history is False) :return: tuple of optimized energy ,optimized angles and scipy output """ infostring = "{:15} : {}\n".format("Method", self.method) infostring += "{:15} : {} expectationvalues\n".format( "Objective", objective.count_expectationvalues()) if gradient is not None: infostring += "{:15} : {}\n".format("grad instr", gradient) if hessian is not None: infostring += "{:15} : {}\n".format("hess_instr", hessian) if self.save_history and reset_history: self.reset_history() active_angles, passive_angles, variables = self.initialize_variables( objective, initial_values, variables) # Transform the initial value directory into (ordered) arrays param_keys, param_values = zip(*active_angles.items()) param_values = numpy.array(param_values) # process and initialize scipy bounds bounds = None if self.method_bounds is not None: bounds = {k: None for k in active_angles} for k, v in self.method_bounds.items(): if k in bounds: bounds[k] = v infostring += "{:15} : {}\n".format("bounds", self.method_bounds) names, bounds = zip(*bounds.items()) assert (names == param_keys ) # make sure the bounds are not shuffled # do the compilation here to avoid costly recompilation during the optimization compiled_objective = self.compile_objective(objective=objective) E = _EvalContainer(objective=compiled_objective, param_keys=param_keys, samples=self.samples, passive_angles=passive_angles, save_history=self.save_history, backend_options=self.backend_options, print_level=self.print_level) compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods) compile_hessian = self.method in self.hessian_based_methods dE = None ddE = None # detect if numerical gradients shall be used # switch off compiling if so if isinstance(gradient, str): if gradient.lower() == 'qng': compile_gradient = False if compile_hessian: raise TequilaException( 'Sorry, QNG and hessian not yet tested together.') combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend, samples=self.samples, noise=self.noise, backend_options=self.backend_options) dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles) infostring += "{:15} : QNG {}\n".format("gradient", dE) else: dE = gradient compile_gradient = False if compile_hessian: compile_hessian = False if hessian is None: hessian = gradient infostring += "{:15} : scipy numerical {}\n".format( "gradient", dE) infostring += "{:15} : scipy numerical {}\n".format( "hessian", ddE) if isinstance(hessian, str): ddE = hessian compile_hessian = False if compile_gradient: grad_obj, comp_grad_obj = self.compile_gradient( objective=objective, variables=variables, gradient=gradient) expvals = sum( [o.count_expectationvalues() for o in comp_grad_obj.values()]) infostring += "{:15} : {} expectationvalues\n".format( "gradient", expvals) dE = _GradContainer(objective=comp_grad_obj, param_keys=param_keys, samples=self.samples, passive_angles=passive_angles, save_history=self.save_history, print_level=self.print_level, backend_options=self.backend_options) if compile_hessian: hess_obj, comp_hess_obj = self.compile_hessian( variables=variables, hessian=hessian, grad_obj=grad_obj, comp_grad_obj=comp_grad_obj) expvals = sum( [o.count_expectationvalues() for o in comp_hess_obj.values()]) infostring += "{:15} : {} expectationvalues\n".format( "hessian", expvals) ddE = _HessContainer(objective=comp_hess_obj, param_keys=param_keys, samples=self.samples, passive_angles=passive_angles, save_history=self.save_history, print_level=self.print_level, backend_options=self.backend_options) if self.print_level > 0: print(self) print(infostring) print("{:15} : {}\n".format("active variables", len(active_angles))) Es = [] class SciPyCallback: energies = [] gradients = [] hessians = [] angles = [] real_iterations = 0 def __call__(self, *args, **kwargs): self.energies.append(E.history[-1]) self.angles.append(E.history_angles[-1]) if dE is not None and not isinstance(dE, str): self.gradients.append(dE.history[-1]) if ddE is not None and not isinstance(ddE, str): self.hessians.append(ddE.history[-1]) self.real_iterations += 1 callback = SciPyCallback() res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE, args=(Es, ), method=self.method, tol=self.tol, bounds=bounds, constraints=self.method_constraints, options=self.method_options, callback=callback) # failsafe since callback is not implemented everywhere if callback.real_iterations == 0: real_iterations = range(len(E.history)) if self.save_history: self.history.energies = callback.energies self.history.energy_evaluations = E.history self.history.angles = callback.angles self.history.angles_evaluations = E.history_angles self.history.gradients = callback.gradients self.history.hessians = callback.hessians if dE is not None and not isinstance(dE, str): self.history.gradients_evaluations = dE.history if ddE is not None and not isinstance(ddE, str): self.history.hessians_evaluations = ddE.history # some methods like "cobyla" do not support callback functions if len(self.history.energies) == 0: self.history.energies = E.history self.history.angles = E.history_angles E_final = res.fun angles_final = dict( (param_keys[i], res.x[i]) for i in range(len(param_keys))) angles_final = {**angles_final, **passive_angles} return SciPyReturnType(energy=E_final, angles=format_variable_dictionary(angles_final), history=self.history, scipy_output=res)
def __call__(self, objective: Objective, maxiter, lr: float = .01, method: str = 'sgd', qng: bool = False, stop_count: int = None, initial_values: typing.Dict[Variable, numbers.Real] = None, variables: typing.List[Variable] = None, samples: int = None, backend: str = None, noise: NoiseModel = None, reset_history: bool = True, *args, **kwargs) -> GDReturnType: """ Optimizes with a variation of gradient descent and gives back the optimized angles Get the optimized energies over the history :param objective: The tequila Objective to minimize :param maxiter: how many iterations to run, at maximum. :param method: what method to optimize via. :param qng: whether or not to use the QNG to calculate gradients. :param stop_count: how many steps after which to abort if no improvement occurs. :param initial_values: initial values for the objective :param variables: which variables to optimize over. Default None: all the variables of the objective. :param samples: the number of samples to use. Default None: Wavefunction simulation used instead. :param backend: which simulation backend to use. Default None: let Tequila Pick! :param noise: the NoiseModel to apply to sampling. Default None. Affects chosen simulator. :param reset_history: reset the history before optimization starts (has no effect if self.save_history is False) :return: tuple of optimized energy ,optimized angles and scipy output """ if self.save_history and reset_history: self.reset_history() active_angles = {} for v in variables: active_angles[v] = initial_values[v] passive_angles = {} for k, v in initial_values.items(): if k not in active_angles.keys(): passive_angles[k] = v # Transform the initial value directory into (ordered) arrays comp = compile(objective=objective, variables=initial_values, backend=backend, noise=noise, samples=samples) if not qng: g_list = [] for k in active_angles.keys(): g = grad(objective, k) g_comp = compile(objective=g, variables=initial_values, backend=backend, noise=noise, samples=samples) g_list.append(g_comp) gradients = CallableVector(g_list) else: if method.lower() == 'adagrad': print( 'Warning! you have chosen to use QNG with adagrad ; convergence is not likely.' .format(method)) gradients = QNGVector( get_qng_combos(objective=objective, initial_values=initial_values, backend=backend, noise=noise, samples=samples)) if not self.silent: print("backend: {}".format(comp.backend)) print("samples: {}".format(samples)) print("{} active variables".format(len(active_angles))) print("qng: {}".format(str(qng))) ### prefactor. Early stopping, initialization, etc. handled here if maxiter is None: maxiter = self.maxiter if stop_count == None: stop_count = maxiter ### the actual algorithm acts here: f = self.method_dict[method.lower()] v = initial_values vec_len = len(active_angles) best = None best_angles = None first = numpy.zeros(vec_len) second = numpy.zeros(vec_len) moments = [first, second] all_moments = [moments] tally = 0 for step in range(maxiter): e = comp(v, samples=samples) self.history.energies.append(e) self.history.angles.append(v) ### saving best performance and counting the stop tally. if step == 0: best = e best_angles = v tally = 0 else: if e < best: best = e best_angles = v tally = 0 else: tally += 1 if not self.silent: string = "Iteration: {} , Energy: {}, angles: {}".format( str(step), str(e), v) print(string) ### check if its time to stop! if tally == stop_count: if not self.silent: print( 'no improvement after {} epochs. Stopping optimization.' .format(str(stop_count))) break new, moments, grads = f(lr=lr, step=step, gradients=gradients, v=v, moments=moments, active_angles=active_angles, samples=samples, **kwargs) save_grad = {} if passive_angles != None: v = {**new, **passive_angles} else: v = new for i, k in enumerate(active_angles.keys()): save_grad[k] = grads[i] self.history.gradients.append(save_grad) all_moments.append(moments) E_final, angles_final = best, best_angles angles_final = {**angles_final, **passive_angles} return GDReturnType(energy=E_final, angles=format_variable_dictionary(angles_final), history=self.history, moments=all_moments)