Пример #1
0
    def __call__(self, variables, samples: int = None, *args, **kwargs):

        variables = format_variable_dictionary(variables=variables)
        if self._variables is not None and len(self._variables) > 0:
            if variables is None or (
                    not set(self._variables) <= set(variables.keys())):
                raise TequilaException(
                    "BackendExpectationValue received not all variables. Circuit depends on variables {}, you gave {}"
                    .format(self._variables, variables))

        if samples is None:
            data = self.simulate(variables=variables, *args, **kwargs)
        else:
            data = self.sample(variables=variables,
                               samples=samples,
                               *args,
                               **kwargs)

        if self._shape is None and self._contraction is None:
            # this is the default
            return numpy.sum(data)

        if self._shape is not None:
            data = data.reshape(self._shape)
        if self._contraction is None:
            return data
        else:
            return self._contraction(data)
Пример #2
0
    def initialize_variables(self, objective, initial_values, variables):
        # bring into right format
        variables = format_variable_list(variables)
        initial_values = format_variable_dictionary(initial_values)
        all_variables = objective.extract_variables()
        if variables is None:
            variables = all_variables
        if initial_values is None:
            initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
        else:
            # autocomplete initial values, warn if you did
            detected = False
            for k in all_variables:
                if k not in initial_values:
                    initial_values[k] = numpy.random.uniform(0, 2 * numpy.pi)
                    detected = True
            if detected and not self.silent:
                print("WARNING: initial_variables given but not complete: Autocomplete with random number")

        active_angles = {}
        for v in variables:
            active_angles[v] = initial_values[v]

        passive_angles = {}
        for k, v in initial_values.items():
            if k not in active_angles.keys():
                passive_angles[k] = v
        return active_angles, passive_angles, variables
Пример #3
0
def preamble(objective: Objective,
             compile_args: dict = None,
             input_vars: list = None):
    """
    Helper function for interfaces to ml backends.
    Parameters
    ----------
    objective: Objective:
        the objective to manipulate and compile.
    compile_args: dict, optional:
        a dictionary of args that can be passed as kwargs to tq.compile
    input_vars: list, optional:
        a list of variables of the objective to specify as input, rather than itnernal weights.

    Returns
    -------
    tuple
        the compiled objective, it's compile arguments, its weight variables, dicts for the weight and input gradients,
        and a dictionary that links positions in an array to each variable (parses parameters).
    """
    def var_sorter(e):
        return hash(e.name)

    all_vars = objective.extract_variables()
    all_vars.sort(key=var_sorter)
    compile_args = check_compiler_args(compile_args)

    weight_vars = []
    if input_vars is None:
        input_vars = []
        weight_vars = all_vars
    else:
        input_vars = [assign_variable(v) for v in input_vars]
        for var in all_vars:
            if var not in input_vars:
                weight_vars.append(assign_variable(var))

    init_vals = compile_args['initial_values']
    if init_vals is not None:
        for k in init_vals.keys():
            if assign_variable(k) in input_vars:
                raise TequilaMLException(
                    'initial_values contained key {},'
                    'which is meant to be an input variable.'.format(k))
        compile_args['initial_values'] = format_variable_dictionary(init_vals)

    comped = compile(objective, **compile_args)
    gradients = get_gradients(objective, compile_args)
    w_grad, i_grad = separate_gradients(gradients,
                                        weight_vars=weight_vars,
                                        input_vars=input_vars)
    first, second = get_variable_orders(weight_vars, input_vars)
    return comped, compile_args, input_vars, weight_vars, i_grad, w_grad, first, second
Пример #4
0
 def __call__(self,
              variables: typing.Dict[Variable, numbers.Real] = None,
              samples: int = None,
              *args,
              **kwargs):
     variables = format_variable_dictionary(variables=variables)
     if self._variables is not None and len(self._variables) > 0:
         if variables is None or set(self._variables) != set(variables.keys()):
             raise TequilaException("BackendCircuit received not all variables. Circuit depends on variables {}, you gave {}".format(self._variables, variables))
     if samples is None:
         return self.simulate(variables=variables, noise_model=self.noise_model, *args, **kwargs)
     else:
         return self.sample(variables=variables, samples=samples, noise_model=self.noise_model, *args, **kwargs)
Пример #5
0
    def initialize_variables(self, objective, initial_values, variables):
        """
        Convenience function to format the variables of some objective recieved in calls to optimzers.

        Parameters
        ----------
        objective: Objective:
            the objective being optimized.
        initial_values: dict:
            initial values for the variables of objective, as a dictionary.
        variables: list:
            the variables being optimized over.

        Returns
        -------
        tuple:
            active_angles, a dict of those variables being optimized.
            passive_angles, a dict of those variables NOT being optimized.
            variables: formatted list of the variables being optimized.
        """
        # bring into right format
        variables = format_variable_list(variables)
        initial_values = format_variable_dictionary(initial_values)
        all_variables = objective.extract_variables()
        if variables is None:
            variables = all_variables
        if initial_values is None:
            initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
        else:
            # autocomplete initial values, warn if you did
            detected = False
            for k in all_variables:
                if k not in initial_values:
                    initial_values[k] = numpy.random.uniform(0, 2 * numpy.pi)
                    detected = True
            if detected and not self.silent:
                print("WARNING: initial_variables given but not complete: Autocomplete with random number")

        active_angles = {}
        for v in variables:
            active_angles[v] = initial_values[v]

        passive_angles = {}
        for k, v in initial_values.items():
            if k not in active_angles.keys():
                passive_angles[k] = v
        return active_angles, passive_angles, variables
Пример #6
0
    def __call__(self,
                 variables: typing.Dict[Variable, numbers.Real] = None,
                 samples: int = None,
                 *args,
                 **kwargs):
        """
        Simulate or sample the backend circuit.

        Parameters
        ----------
        variables: dict:
            dictionary assigning values to the variables of the circuit.
        samples: int, optional:
            how many shots to sample with. If None, perform full wavefunction simulation.
        args
        kwargs

        Returns
        -------
        Float:
            the result of simulating or sampling the circuit.
        """

        variables = format_variable_dictionary(variables=variables)
        if self._variables is not None and len(self._variables) > 0:
            if variables is None or set(self._variables) != set(
                    variables.keys()):
                raise TequilaException(
                    "BackendCircuit received not all variables. Circuit depends on variables {}, you gave {}"
                    .format(self._variables, variables))
        if samples is None:
            return self.simulate(variables=variables,
                                 noise=self.noise,
                                 *args,
                                 **kwargs)
        else:
            return self.sample(variables=variables,
                               samples=samples,
                               noise=self.noise,
                               *args,
                               **kwargs)
Пример #7
0
def minimize(objective: Objective,
             gradient: typing.Union[str, typing.Dict[Variable,
                                                     Objective]] = None,
             hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable,
                                                                 Variable],
                                                    Objective]] = None,
             initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
             variables: typing.List[typing.Hashable] = None,
             samples: int = None,
             maxiter: int = 100,
             backend: str = None,
             backend_options: dict = None,
             noise: NoiseModel = None,
             device: str = None,
             method: str = "BFGS",
             tol: float = 1.e-3,
             method_options: dict = None,
             method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
             method_constraints=None,
             silent: bool = False,
             save_history: bool = True,
             *args,
             **kwargs) -> SciPyResults:
    """

    Parameters
    ----------
    objective: Objective :
        The tequila objective to optimize
    gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
        '2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
        dictionary of variables and tequila objective to define own gradient,
        None for automatic construction (default)
        Other options include 'qng' to use the quantum natural gradient.
    hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
        '2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
        dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
        None for automatic construction (default)
    initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
        Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
    variables: typing.List[typing.Hashable], optional:
         List of Variables to optimize
    samples: int, optional:
         samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
    maxiter: int : (Default value = 100):
         max iters to use.
    backend: str, optional:
         Simulator backend, will be automatically chosen if set to None
    backend_options: dict, optional:
         Additional options for the backend
         Will be unpacked and passed to the compiled objective in every call
    noise: NoiseModel, optional:
         a NoiseModel to apply to all expectation values in the objective.
    method: str : (Default = "BFGS"):
         Optimization method (see scipy documentation, or 'available methods')
    tol: float : (Default = 1.e-3):
         Convergence tolerance for optimization (see scipy documentation)
    method_options: dict, optional:
         Dictionary of options
         (see scipy documentation)
    method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
        bounds for the variables (see scipy documentation)
    method_constraints: optional:
         (see scipy documentation
    silent: bool :
         No printout if True
    save_history: bool:
        Save the history throughout the optimization

    Returns
    -------
    SciPyReturnType:
        the results of optimization
    """

    if isinstance(gradient, dict) or hasattr(gradient, "items"):
        if all([isinstance(x, Objective) for x in gradient.values()]):
            gradient = format_variable_dictionary(gradient)
    if isinstance(hessian, dict) or hasattr(hessian, "items"):
        if all([isinstance(x, Objective) for x in hessian.values()]):
            hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v
                       for k, v in hessian.items()}
    method_bounds = format_variable_dictionary(method_bounds)

    # set defaults

    optimizer = OptimizerSciPy(save_history=save_history,
                               maxiter=maxiter,
                               method=method,
                               method_options=method_options,
                               method_bounds=method_bounds,
                               method_constraints=method_constraints,
                               silent=silent,
                               backend=backend,
                               backend_options=backend_options,
                               device=device,
                               samples=samples,
                               noise=noise,
                               tol=tol,
                               *args,
                               **kwargs)
    if initial_values is not None:
        initial_values = {
            assign_variable(k): v
            for k, v in initial_values.items()
        }
    return optimizer(objective=objective,
                     gradient=gradient,
                     hessian=hessian,
                     initial_values=initial_values,
                     variables=variables,
                     *args,
                     **kwargs)
Пример #8
0
    def __call__(self,
                 objective: Objective,
                 variables: typing.List[Variable] = None,
                 initial_values: typing.Dict[Variable, numbers.Real] = None,
                 gradient: typing.Dict[Variable, Objective] = None,
                 hessian: typing.Dict[typing.Tuple[Variable, Variable],
                                      Objective] = None,
                 reset_history: bool = True,
                 *args,
                 **kwargs) -> SciPyResults:
        """
        Perform optimization using scipy optimizers.

        Parameters
        ----------
        objective: Objective:
            the objective to optimize.
        variables: list, optional:
            the variables of objective to optimize. If None: optimize all.
        initial_values: dict, optional:
            a starting point from which to begin optimization. Will be generated if None.
        gradient: optional:
            Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
        hessian: optional:
            Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
        reset_history: bool: Default = True:
            whether or not to reset all history before optimizing.
        args
        kwargs

        Returns
        -------
        ScipyReturnType:
            the results of optimization.
        """

        objective = objective.contract()
        infostring = "{:15} : {}\n".format("Method", self.method)
        infostring += "{:15} : {} expectationvalues\n".format(
            "Objective", objective.count_expectationvalues())

        if gradient is not None:
            infostring += "{:15} : {}\n".format("grad instr", gradient)
        if hessian is not None:
            infostring += "{:15} : {}\n".format("hess_instr", hessian)

        if self.save_history and reset_history:
            self.reset_history()

        active_angles, passive_angles, variables = self.initialize_variables(
            objective, initial_values, variables)

        # Transform the initial value directory into (ordered) arrays
        param_keys, param_values = zip(*active_angles.items())
        param_values = numpy.array(param_values)

        # process and initialize scipy bounds
        bounds = None
        if self.method_bounds is not None:
            bounds = {k: None for k in active_angles}
            for k, v in self.method_bounds.items():
                if k in bounds:
                    bounds[k] = v
            infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
            names, bounds = zip(*bounds.items())
            assert (names == param_keys
                    )  # make sure the bounds are not shuffled

        # do the compilation here to avoid costly recompilation during the optimization
        compiled_objective = self.compile_objective(objective=objective,
                                                    *args,
                                                    **kwargs)
        E = _EvalContainer(objective=compiled_objective,
                           param_keys=param_keys,
                           samples=self.samples,
                           passive_angles=passive_angles,
                           save_history=self.save_history,
                           print_level=self.print_level)

        compile_gradient = self.method in (self.gradient_based_methods +
                                           self.hessian_based_methods)
        compile_hessian = self.method in self.hessian_based_methods

        dE = None
        ddE = None
        # detect if numerical gradients shall be used
        # switch off compiling if so
        if isinstance(gradient, str):
            if gradient.lower() == 'qng':
                compile_gradient = False
                if compile_hessian:
                    raise TequilaException(
                        'Sorry, QNG and hessian not yet tested together.')

                combos = get_qng_combos(objective,
                                        initial_values=initial_values,
                                        backend=self.backend,
                                        samples=self.samples,
                                        noise=self.noise)
                dE = _QngContainer(combos=combos,
                                   param_keys=param_keys,
                                   passive_angles=passive_angles)
                infostring += "{:15} : QNG {}\n".format("gradient", dE)
            else:
                dE = gradient
                compile_gradient = False
                if compile_hessian:
                    compile_hessian = False
                    if hessian is None:
                        hessian = gradient
                infostring += "{:15} : scipy numerical {}\n".format(
                    "gradient", dE)
                infostring += "{:15} : scipy numerical {}\n".format(
                    "hessian", ddE)

        if isinstance(gradient, dict):
            if gradient['method'] == 'qng':
                func = gradient['function']
                compile_gradient = False
                if compile_hessian:
                    raise TequilaException(
                        'Sorry, QNG and hessian not yet tested together.')

                combos = get_qng_combos(objective,
                                        func=func,
                                        initial_values=initial_values,
                                        backend=self.backend,
                                        samples=self.samples,
                                        noise=self.noise)
                dE = _QngContainer(combos=combos,
                                   param_keys=param_keys,
                                   passive_angles=passive_angles)
                infostring += "{:15} : QNG {}\n".format("gradient", dE)

        if isinstance(hessian, str):
            ddE = hessian
            compile_hessian = False

        if compile_gradient:
            grad_obj, comp_grad_obj = self.compile_gradient(
                objective=objective,
                variables=variables,
                gradient=gradient,
                *args,
                **kwargs)
            expvals = sum(
                [o.count_expectationvalues() for o in comp_grad_obj.values()])
            infostring += "{:15} : {} expectationvalues\n".format(
                "gradient", expvals)
            dE = _GradContainer(objective=comp_grad_obj,
                                param_keys=param_keys,
                                samples=self.samples,
                                passive_angles=passive_angles,
                                save_history=self.save_history,
                                print_level=self.print_level)
        if compile_hessian:
            hess_obj, comp_hess_obj = self.compile_hessian(
                variables=variables,
                hessian=hessian,
                grad_obj=grad_obj,
                comp_grad_obj=comp_grad_obj,
                *args,
                **kwargs)
            expvals = sum(
                [o.count_expectationvalues() for o in comp_hess_obj.values()])
            infostring += "{:15} : {} expectationvalues\n".format(
                "hessian", expvals)
            ddE = _HessContainer(objective=comp_hess_obj,
                                 param_keys=param_keys,
                                 samples=self.samples,
                                 passive_angles=passive_angles,
                                 save_history=self.save_history,
                                 print_level=self.print_level)
        if self.print_level > 0:
            print(self)
            print(infostring)
            print("{:15} : {}\n".format("active variables",
                                        len(active_angles)))

        Es = []

        optimizer_instance = self

        class SciPyCallback:
            energies = []
            gradients = []
            hessians = []
            angles = []
            real_iterations = 0

            def __call__(self, *args, **kwargs):
                self.energies.append(E.history[-1])
                self.angles.append(E.history_angles[-1])
                if dE is not None and not isinstance(dE, str):
                    self.gradients.append(dE.history[-1])
                if ddE is not None and not isinstance(ddE, str):
                    self.hessians.append(ddE.history[-1])
                self.real_iterations += 1
                if 'callback' in optimizer_instance.kwargs:
                    optimizer_instance.kwargs['callback'](E.history_angles[-1])

        callback = SciPyCallback()
        res = scipy.optimize.minimize(E,
                                      x0=param_values,
                                      jac=dE,
                                      hess=ddE,
                                      args=(Es, ),
                                      method=self.method,
                                      tol=self.tol,
                                      bounds=bounds,
                                      constraints=self.method_constraints,
                                      options=self.method_options,
                                      callback=callback)

        # failsafe since callback is not implemented everywhere
        if callback.real_iterations == 0:
            real_iterations = range(len(E.history))

        if self.save_history:
            self.history.energies = callback.energies
            self.history.energy_evaluations = E.history
            self.history.angles = callback.angles
            self.history.angles_evaluations = E.history_angles
            self.history.gradients = callback.gradients
            self.history.hessians = callback.hessians
            if dE is not None and not isinstance(dE, str):
                self.history.gradients_evaluations = dE.history
            if ddE is not None and not isinstance(ddE, str):
                self.history.hessians_evaluations = ddE.history

            # some methods like "cobyla" do not support callback functions
            if len(self.history.energies) == 0:
                self.history.energies = E.history
                self.history.angles = E.history_angles

        # some scipy methods always give back the last value and not the minimum (e.g. cobyla)
        ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
        E_final = ea[0][0]
        angles_final = ea[0][
            1]  #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
        angles_final = {**angles_final, **passive_angles}

        return SciPyResults(energy=E_final,
                            history=self.history,
                            variables=format_variable_dictionary(angles_final),
                            scipy_result=res)
Пример #9
0
    def initialize_variables(self, objective, initial_values, variables):
        """
        Convenience function to format the variables of some objective recieved in calls to optimzers.

        Parameters
        ----------
        objective: Objective:
            the objective being optimized.
        initial_values: dict or string:
            initial values for the variables of objective, as a dictionary.
            if string: can be `zero` or `random`
            if callable: custom function that initializes when keys are passed
            if None: random initialization between 0 and 2pi (not recommended)
        variables: list:
            the variables being optimized over.

        Returns
        -------
        tuple:
            active_angles, a dict of those variables being optimized.
            passive_angles, a dict of those variables NOT being optimized.
            variables: formatted list of the variables being optimized.
        """
        # bring into right format
        variables = format_variable_list(variables)
        initial_values = format_variable_dictionary(initial_values)
        all_variables = objective.extract_variables()
        if variables is None:
            variables = all_variables
        if initial_values is None:
            initial_values = {
                k: numpy.random.uniform(0, 2 * numpy.pi)
                for k in all_variables
            }
        elif hasattr(initial_values, "lower"):
            if initial_values.lower() == "zero":
                initial_values = {k: 0.0 for k in all_variables}
            elif initial_values.lower() == "random":
                initial_values = {
                    k: numpy.random.uniform(0, 2 * numpy.pi)
                    for k in all_variables
                }
            else:
                raise TequilaOptimizerException(
                    "unknown initialization instruction: {}".format(
                        initial_values))
        elif callable(initial_values):
            initial_values = {k: initial_values(k) for k in all_variables}
        elif isinstance(initial_values, numbers.Number):
            initial_values = {k: initial_values for k in all_variables}
        else:
            # autocomplete initial values, warn if you did
            detected = False
            for k in all_variables:
                if k not in initial_values:
                    initial_values[k] = numpy.random.uniform(0, 2 * numpy.pi)
                    detected = True
            if detected and not self.silent:
                warnings.warn(
                    "initial_variables given but not complete: Autocompleted with random numbers",
                    TequilaWarning)

        active_angles = {}
        for v in variables:
            active_angles[v] = initial_values[v]

        passive_angles = {}
        for k, v in initial_values.items():
            if k not in active_angles.keys():
                passive_angles[k] = v
        return active_angles, passive_angles, variables
Пример #10
0
    def __call__(self, objective: Objective,
                 maxiter: int = None,
                 initial_values: typing.Dict[Variable, numbers.Real] = None,
                 variables: typing.List[Variable] = None,
                 reset_history: bool = True,
                 method_options: dict = None,
                 gradient=None,
                 *args, **kwargs) -> GDResults:

        """
        perform a gradient descent optimization of an objective.

        Parameters
        ----------
        objective: Objective:
            the objective to optimize.
        maxiter: int, optional:
            Overrides the optimizer to specify maximum number of iterations to perform.
            Default value: use the maxiter supplied to __init__.
        initial_values: dict, optional:
            initial point at which to begin optimization.
            Default None: will be chosen randomly.
        variables: list, optional:
            which variables to optimize. Note that all variables not to be optimized must be specified in initial_values
            Default: optimize all variables of objective.
        reset_history: bool: Default = True:
            whether or not to wipe the self.history object.
        method_options: dict, optional:
            dummy keyword to play well with tq.minimize. Does nothing.
        gradient: optional:
            how to calculate gradients. if str '2-point', will use 2-point numerical gradients;
            if str 'qng' will use the default qng optimizer. Other more complex options possible.
        args
        kwargs

        Returns
        -------
        GDResults
            all the results of optimization.
        """


        if self.save_history and reset_history:
            self.reset_history()

        active_angles, passive_angles, variables = self.initialize_variables(objective, initial_values, variables)
        v = {**active_angles, **passive_angles}

        comp = self.prepare(objective=objective, initial_values=v, variables=variables, gradient=gradient)

        ### prefactor. Early stopping, initialization, etc. handled here

        if maxiter is None:
            maxiter = self.maxiter

        ### the actual algorithm acts here:
        e = comp(v, samples=self.samples)
        self.history.energies.append(e)
        self.history.angles.append(v)
        best = e
        best_angles = v
        v = self.step(comp, v)
        last = e
        for step in range(1, maxiter):
            e = comp(v, samples=self.samples)
            self.history.energies.append(e)
            self.history.angles.append(v)
            ### saving best performance
            if e < best:
                best = e
                best_angles = v

            if not self.silent:
                if self.print_level > 2:
                    string = "Iteration: {} , Energy: {:+2.8f}, angles: {}".format(str(step), e, v)
                else:
                    string = "Iteration: {} , Energy: {:+2.8f}".format(str(step), e)
                print(string)

            if self.tol != None:
                if numpy.abs(e - last) <= self.tol:
                    if not self.silent:
                        print('delta f smaller than tolerance {}. Stopping optimization.'.format(str(self.tol)))
                    break

            ### get new parameters with self.step!
            v = self.step(comp, v)
            last = e
        E_final, angles_final = best, best_angles
        return GDResults(energy=E_final, variables=format_variable_dictionary(angles_final), history=self.history,
                            moments=self.moments_trajectory[id(comp)])
Пример #11
0
    def __call__(self, objective: Objective,
                 maxiter: int = None,
                 initial_values: typing.Dict[Variable, numbers.Real] = None,
                 variables: typing.List[Variable] = None,
                 reset_history: bool = True,
                 method_options: dict = None,
                 gradient=None,
                 *args, **kwargs) -> GDResults:

        """
        perform a gradient descent optimization of an objective.

        Parameters
        ----------
        objective: Objective:
            the objective to optimize.
        maxiter: int, optional:
            Overrides the optimizer to specify maximum number of iterations to perform.
            Default value: use the maxiter supplied to __init__.
        initial_values: dict, optional:
            initial point at which to begin optimization.
            Default None: will be chosen randomly.
        variables: list, optional:
            which variables to optimize. Note that all variables not to be optimized must be specified in initial_values
            Default: optimize all variables of objective.
        reset_history: bool: Default = True:
            whether or not to wipe the self.history object.
        method_options: dict, optional:
            dummy keyword to play well with tq.minimize. Does nothing.
        gradient: optional:
            how to calculate gradients. if str '2-point', will use 2-point numerical gradients;
            if str 'qng' will use the default qng optimizer. Other more complex options possible.
        args
        kwargs

        Returns
        -------
        GDResults
            all the results of optimization.
        """


        if self.save_history and reset_history:
            self.reset_history()

        active_angles, passive_angles, variables = self.initialize_variables(objective, initial_values, variables)
        v = {**active_angles, **passive_angles}

        comp = self.prepare(objective=objective, initial_values=v, variables=variables, gradient=gradient)

        ### prefactor. Early stopping, initialization, etc. handled here

        if maxiter is None:
            maxiter = self.maxiter

        ### the actual algorithm acts here:
        e = comp(v, samples=self.samples)
        self.history.energies.append(e)
        self.history.angles.append(v)
        best = e
        best_angles = v
        v = self.step(comp, v)
        last = e

        if not self.silent:
            print("iter.        <O>          Δ<O>      max(d<O>)   rms(d<O>)")   

        for step in range(1, maxiter):
            comment = ""
            e = comp(v, samples=self.samples)
            self.history.energies.append(e)
            self.history.angles.append(v)
            ### saving best performance
            if e < best:
                best = e
                best_angles = v

            if self.tol != None:
                if numpy.abs(e - last) <= self.tol:
                    if not self.silent:
                        print('delta f smaller than tolerance {}. Stopping optimization.'.format(str(self.tol)))
                    break

            ### get new parameters with self.step!
            vn = self.step(comp, v)

            # From http://vergil.chemistry.gatech.edu/notes/diis/node3.html
            if self.__diis:
                self.__diis.push(
                    numpy.array([vn[k] for k in active_angles]),
                    numpy.array([vn[k]-v[k] for k in active_angles]))

                new = self.__diis.update()
                if new is not None:
                    self.reset_momenta()
                    comment = "DIIS"
                    for i,k in enumerate(active_angles):
                        vn[k] = new[i]


            if not self.silent:
                print("%3i   %+15.8f   %+7.2e   %7.3e   %7.3e    %s"
                      % (step,
                         e,
                         e-last,
                         numpy.max(abs(self.__dx)),
                         numpy.sqrt(numpy.average(self.__dx**2)),
                         comment))


            last = e
            v = vn
        E_final, angles_final = best, best_angles
        return GDResults(energy=E_final, variables=format_variable_dictionary(angles_final), history=self.history,
                            moments=self.moments_trajectory[id(comp)])
Пример #12
0
    def __call__(self,
                 objective: Objective,
                 variables: typing.List[Variable] = None,
                 initial_values: typing.Dict[Variable, numbers.Real] = None,
                 gradient: typing.Dict[Variable, Objective] = None,
                 hessian: typing.Dict[typing.Tuple[Variable, Variable],
                                      Objective] = None,
                 reset_history: bool = True,
                 *args,
                 **kwargs) -> SciPyReturnType:
        """
        Optimizes with scipy and gives back the optimized angles
        Get the optimized energies over the history
        :param objective: The tequila Objective to minimize
        :param initial_values: initial values for the objective
        :param return_scipy_output: chose if the full scipy output shall be returned
        :param reset_history: reset the history before optimization starts (has no effect if self.save_history is False)
        :return: tuple of optimized energy ,optimized angles and scipy output
        """

        infostring = "{:15} : {}\n".format("Method", self.method)
        infostring += "{:15} : {} expectationvalues\n".format(
            "Objective", objective.count_expectationvalues())

        if gradient is not None:
            infostring += "{:15} : {}\n".format("grad instr", gradient)
        if hessian is not None:
            infostring += "{:15} : {}\n".format("hess_instr", hessian)

        if self.save_history and reset_history:
            self.reset_history()

        active_angles, passive_angles, variables = self.initialize_variables(
            objective, initial_values, variables)

        # Transform the initial value directory into (ordered) arrays
        param_keys, param_values = zip(*active_angles.items())
        param_values = numpy.array(param_values)

        # process and initialize scipy bounds
        bounds = None
        if self.method_bounds is not None:
            bounds = {k: None for k in active_angles}
            for k, v in self.method_bounds.items():
                if k in bounds:
                    bounds[k] = v
            infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
            names, bounds = zip(*bounds.items())
            assert (names == param_keys
                    )  # make sure the bounds are not shuffled

        # do the compilation here to avoid costly recompilation during the optimization
        compiled_objective = self.compile_objective(objective=objective)
        E = _EvalContainer(objective=compiled_objective,
                           param_keys=param_keys,
                           samples=self.samples,
                           passive_angles=passive_angles,
                           save_history=self.save_history,
                           backend_options=self.backend_options,
                           print_level=self.print_level)

        compile_gradient = self.method in (self.gradient_based_methods +
                                           self.hessian_based_methods)
        compile_hessian = self.method in self.hessian_based_methods

        dE = None
        ddE = None
        # detect if numerical gradients shall be used
        # switch off compiling if so
        if isinstance(gradient, str):
            if gradient.lower() == 'qng':
                compile_gradient = False
                if compile_hessian:
                    raise TequilaException(
                        'Sorry, QNG and hessian not yet tested together.')

                combos = get_qng_combos(objective,
                                        initial_values=initial_values,
                                        backend=self.backend,
                                        samples=self.samples,
                                        noise=self.noise,
                                        backend_options=self.backend_options)
                dE = _QngContainer(combos=combos,
                                   param_keys=param_keys,
                                   passive_angles=passive_angles)
                infostring += "{:15} : QNG {}\n".format("gradient", dE)
            else:
                dE = gradient
                compile_gradient = False
                if compile_hessian:
                    compile_hessian = False
                    if hessian is None:
                        hessian = gradient
                infostring += "{:15} : scipy numerical {}\n".format(
                    "gradient", dE)
                infostring += "{:15} : scipy numerical {}\n".format(
                    "hessian", ddE)

        if isinstance(hessian, str):
            ddE = hessian
            compile_hessian = False

        if compile_gradient:
            grad_obj, comp_grad_obj = self.compile_gradient(
                objective=objective, variables=variables, gradient=gradient)
            expvals = sum(
                [o.count_expectationvalues() for o in comp_grad_obj.values()])
            infostring += "{:15} : {} expectationvalues\n".format(
                "gradient", expvals)
            dE = _GradContainer(objective=comp_grad_obj,
                                param_keys=param_keys,
                                samples=self.samples,
                                passive_angles=passive_angles,
                                save_history=self.save_history,
                                print_level=self.print_level,
                                backend_options=self.backend_options)

        if compile_hessian:
            hess_obj, comp_hess_obj = self.compile_hessian(
                variables=variables,
                hessian=hessian,
                grad_obj=grad_obj,
                comp_grad_obj=comp_grad_obj)
            expvals = sum(
                [o.count_expectationvalues() for o in comp_hess_obj.values()])
            infostring += "{:15} : {} expectationvalues\n".format(
                "hessian", expvals)
            ddE = _HessContainer(objective=comp_hess_obj,
                                 param_keys=param_keys,
                                 samples=self.samples,
                                 passive_angles=passive_angles,
                                 save_history=self.save_history,
                                 print_level=self.print_level,
                                 backend_options=self.backend_options)

        if self.print_level > 0:
            print(self)
            print(infostring)
            print("{:15} : {}\n".format("active variables",
                                        len(active_angles)))

        Es = []

        class SciPyCallback:
            energies = []
            gradients = []
            hessians = []
            angles = []
            real_iterations = 0

            def __call__(self, *args, **kwargs):
                self.energies.append(E.history[-1])
                self.angles.append(E.history_angles[-1])
                if dE is not None and not isinstance(dE, str):
                    self.gradients.append(dE.history[-1])
                if ddE is not None and not isinstance(ddE, str):
                    self.hessians.append(ddE.history[-1])
                self.real_iterations += 1

        callback = SciPyCallback()
        res = scipy.optimize.minimize(E,
                                      x0=param_values,
                                      jac=dE,
                                      hess=ddE,
                                      args=(Es, ),
                                      method=self.method,
                                      tol=self.tol,
                                      bounds=bounds,
                                      constraints=self.method_constraints,
                                      options=self.method_options,
                                      callback=callback)

        # failsafe since callback is not implemented everywhere
        if callback.real_iterations == 0:
            real_iterations = range(len(E.history))

        if self.save_history:
            self.history.energies = callback.energies
            self.history.energy_evaluations = E.history
            self.history.angles = callback.angles
            self.history.angles_evaluations = E.history_angles
            self.history.gradients = callback.gradients
            self.history.hessians = callback.hessians
            if dE is not None and not isinstance(dE, str):
                self.history.gradients_evaluations = dE.history
            if ddE is not None and not isinstance(ddE, str):
                self.history.hessians_evaluations = ddE.history

            # some methods like "cobyla" do not support callback functions
            if len(self.history.energies) == 0:
                self.history.energies = E.history
                self.history.angles = E.history_angles

        E_final = res.fun
        angles_final = dict(
            (param_keys[i], res.x[i]) for i in range(len(param_keys)))
        angles_final = {**angles_final, **passive_angles}

        return SciPyReturnType(energy=E_final,
                               angles=format_variable_dictionary(angles_final),
                               history=self.history,
                               scipy_output=res)
Пример #13
0
    def __call__(self,
                 objective: Objective,
                 maxiter,
                 lr: float = .01,
                 method: str = 'sgd',
                 qng: bool = False,
                 stop_count: int = None,
                 initial_values: typing.Dict[Variable, numbers.Real] = None,
                 variables: typing.List[Variable] = None,
                 samples: int = None,
                 backend: str = None,
                 noise: NoiseModel = None,
                 reset_history: bool = True,
                 *args,
                 **kwargs) -> GDReturnType:
        """
        Optimizes with a variation of gradient descent and gives back the optimized angles
        Get the optimized energies over the history
        :param objective: The tequila Objective to minimize
        :param maxiter: how many iterations to run, at maximum.
        :param method: what method to optimize via.
        :param qng: whether or not to use the QNG to calculate gradients.
        :param stop_count: how many steps after which to abort if no improvement occurs.
        :param initial_values: initial values for the objective
        :param variables: which variables to optimize over. Default None: all the variables of the objective.
        :param samples: the number of samples to use. Default None: Wavefunction simulation used instead.
        :param backend: which simulation backend to use. Default None: let Tequila Pick!
        :param noise: the NoiseModel to apply to sampling. Default None. Affects chosen simulator.
        :param reset_history: reset the history before optimization starts (has no effect if self.save_history is False)
        :return: tuple of optimized energy ,optimized angles and scipy output
        """

        if self.save_history and reset_history:
            self.reset_history()

        active_angles = {}
        for v in variables:
            active_angles[v] = initial_values[v]

        passive_angles = {}
        for k, v in initial_values.items():
            if k not in active_angles.keys():
                passive_angles[k] = v

        # Transform the initial value directory into (ordered) arrays

        comp = compile(objective=objective,
                       variables=initial_values,
                       backend=backend,
                       noise=noise,
                       samples=samples)

        if not qng:
            g_list = []
            for k in active_angles.keys():
                g = grad(objective, k)
                g_comp = compile(objective=g,
                                 variables=initial_values,
                                 backend=backend,
                                 noise=noise,
                                 samples=samples)
                g_list.append(g_comp)

            gradients = CallableVector(g_list)
        else:
            if method.lower() == 'adagrad':
                print(
                    'Warning! you have chosen to use QNG with adagrad ; convergence is not likely.'
                    .format(method))
            gradients = QNGVector(
                get_qng_combos(objective=objective,
                               initial_values=initial_values,
                               backend=backend,
                               noise=noise,
                               samples=samples))

        if not self.silent:
            print("backend: {}".format(comp.backend))
            print("samples: {}".format(samples))
            print("{} active variables".format(len(active_angles)))
            print("qng: {}".format(str(qng)))

        ### prefactor. Early stopping, initialization, etc. handled here

        if maxiter is None:
            maxiter = self.maxiter
        if stop_count == None:
            stop_count = maxiter

        ### the actual algorithm acts here:

        f = self.method_dict[method.lower()]
        v = initial_values
        vec_len = len(active_angles)
        best = None
        best_angles = None
        first = numpy.zeros(vec_len)
        second = numpy.zeros(vec_len)
        moments = [first, second]
        all_moments = [moments]
        tally = 0
        for step in range(maxiter):
            e = comp(v, samples=samples)
            self.history.energies.append(e)
            self.history.angles.append(v)

            ### saving best performance and counting the stop tally.
            if step == 0:
                best = e
                best_angles = v
                tally = 0
            else:
                if e < best:
                    best = e
                    best_angles = v
                    tally = 0
                else:
                    tally += 1

            if not self.silent:
                string = "Iteration: {} , Energy: {}, angles: {}".format(
                    str(step), str(e), v)
                print(string)

            ### check if its time to stop!
            if tally == stop_count:
                if not self.silent:
                    print(
                        'no improvement after {} epochs. Stopping optimization.'
                        .format(str(stop_count)))
                break

            new, moments, grads = f(lr=lr,
                                    step=step,
                                    gradients=gradients,
                                    v=v,
                                    moments=moments,
                                    active_angles=active_angles,
                                    samples=samples,
                                    **kwargs)
            save_grad = {}
            if passive_angles != None:
                v = {**new, **passive_angles}
            else:
                v = new
            for i, k in enumerate(active_angles.keys()):
                save_grad[k] = grads[i]
            self.history.gradients.append(save_grad)
            all_moments.append(moments)
        E_final, angles_final = best, best_angles
        angles_final = {**angles_final, **passive_angles}
        return GDReturnType(energy=E_final,
                            angles=format_variable_dictionary(angles_final),
                            history=self.history,
                            moments=all_moments)
Пример #14
0
def minimize(objective: Objective,
             lr=0.01,
             method='sgd',
             qng: bool = False,
             stop_count=None,
             initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
             variables: typing.List[typing.Hashable] = None,
             samples: int = None,
             maxiter: int = 100,
             backend: str = None,
             noise: NoiseModel = None,
             silent: bool = False,
             save_history: bool = True,
             *args,
             **kwargs) -> GDReturnType:
    """

    Parameters
    ----------
    objective: Objective :
        The tequila objective to optimize
    lr: float >0:
        the learning rate. Default 0.01.
    method: string:
        which variation on Gradient Descent to use. Options include 'sgd','adam','nesterov','adagrad','rmsprop',
    qng: bool:
        whether or not the gradient calculated should be the quantum natural gradient or not. defaults to False.
    stop_count: int:
        how many steps after which to cease training if no improvement occurs. Default None results in going till maxiter is complete
    initial_values: typing.Dict[typing.Hashable, numbers.Real]: (Default value = None):
        Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
    variables: typing.List[typing.Hashable] :
         (Default value = None)
         List of Variables to optimize
    samples: int :
         (Default value = None)
         samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
    maxiter: int :
         (Default value = 100)
    backend: str :
         (Default value = None)
         Simulator backend, will be automatically chosen if set to None
    noise: NoiseModel:
         (Default value = None)
         a NoiseModel to apply to all expectation values in the objective.
    stop_count: int :
         (Default value = None)
         Convergence tolerance for optimization; if no improvement after this many epochs, stop.
    silent: bool :
         (Default value = False)
         No printout if True
    save_history: bool:
        (Default value = True)
        Save the history throughout the optimization


    optional kwargs may include beta, beta2, and rho, parameters which affect (but do not need to be altered) the various
    method algorithms.
    Returns
    -------

    """

    # bring into right format
    variables = format_variable_list(variables)
    initial_values = format_variable_dictionary(initial_values)

    # set defaults
    all_variables = objective.extract_variables()
    if variables is None:
        variables = all_variables
    if initial_values is None:
        initial_values = {
            k: numpy.random.uniform(0, 2 * numpy.pi)
            for k in all_variables
        }
    else:
        # autocomplete initial values, warn if you did
        detected = False
        for k in all_variables:
            if k not in initial_values:
                initial_values[k] = numpy.random.uniform(0, 2 * numpy.pi)
                detected = True
        if detected and not silent:
            print(
                "WARNING: initial_variables given but not complete: Autocomplete with random number"
            )

    optimizer = OptimizerGD(save_history=save_history,
                            maxiter=maxiter,
                            silent=silent)
    if initial_values is not None:
        initial_values = {
            assign_variable(k): v
            for k, v in initial_values.items()
        }
    return optimizer(objective=objective,
                     maxiter=maxiter,
                     lr=lr,
                     method=method,
                     qng=qng,
                     stop_count=stop_count,
                     backend=backend,
                     initial_values=initial_values,
                     variables=variables,
                     noise=noise,
                     samples=samples,
                     *args,
                     **kwargs)
Пример #15
0
    def __call__(self, objective: Objective,
                 maxiter,
                 initial_values: typing.Dict[Variable, numbers.Real] = None,
                 variables: typing.List[Variable] = None,
                 reset_history: bool = True,
                 method_options: dict = None,
                 gradient: str = None,
                 *args, **kwargs) -> GDReturnType:
        """
        Optimizes with a variation of gradient descent and gives back the optimized angles
        Get the optimized energies over the history
        :param objective: The tequila Objective to minimize
        :param maxiter: how many iterations to run, at maximum.
        :param qng: whether or not to use the QNG to calculate gradients.
        :param initial_values: initial values for the objective
        :param variables: which variables to optimize over. Default None: all the variables of the objective.
        :param reset_history: reset the history before optimization starts (has no effect if self.save_history is False)
        :return: tuple of optimized energy ,optimized angles and scipy output
        """

        if self.save_history and reset_history:
            self.reset_history()

        active_angles, passive_angles, variables = self.initialize_variables(objective, initial_values, variables)
        v = {**active_angles, **passive_angles}

        comp = self.prepare(objective=objective, initial_values=v, variables=variables, gradient=gradient)

        ### prefactor. Early stopping, initialization, etc. handled here

        if maxiter is None:
            maxiter = self.maxiter

        ### the actual algorithm acts here:
        e = comp(v, samples=self.samples)
        self.history.energies.append(e)
        self.history.angles.append(v)
        best = e
        best_angles = v
        v = self.step(comp, v)
        last = e
        for step in range(1, maxiter):
            e = comp(v, samples=self.samples)
            self.history.energies.append(e)
            self.history.angles.append(v)
            ### saving best performance and counting the stop tally.
            if e < best:
                best = e
                best_angles = v

            if not self.silent:
                if self.print_level > 2:
                    string = "Iteration: {} , Energy: {:+2.8f}, angles: {}".format(str(step), e, v)
                else:
                    string = "Iteration: {} , Energy: {:+2.8f}".format(str(step), e)
                print(string)

            if self.tol != None:
                if numpy.abs(e - last) <= self.tol:
                    if not self.silent:
                        print('delta f smaller than tolerance {}. Stopping optimization.'.format(str(self.tol)))
                    break

            ### get new parameters with self.step!
            v = self.step(comp, v)
            last = e
        E_final, angles_final = best, best_angles
        return GDReturnType(energy=E_final, angles=format_variable_dictionary(angles_final), history=self.history,
                            moments=self.moments_trajectory[id(comp)])
Пример #16
0
def minimize(objective: Objective,
             gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
             hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
             qng: bool = None,
             initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
             variables: typing.List[typing.Hashable] = None,
             samples: int = None,
             maxiter: int = 100,
             backend: str = None,
             backend_options: dict = None,
             noise: NoiseModel = None,
             method: str = "BFGS",
             tol: float = 1.e-3,
             method_options: dict = None,
             method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
             method_constraints=None,
             silent: bool = False,
             save_history: bool = True,
             *args,
             **kwargs) -> SciPyReturnType:
    """

    Parameters
    ----------
    objective: Objective :
        The tequila objective to optimize
    gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : (Default value = None) :
        '2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
        dictionary of variables and tequila objective to define own gradient,
        None for automatic construction (default)
    hessian: typing.Union[str, typing.Dict[Variable, Objective], None] : (Default value = None) :
        '2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
        dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
        None for automatic construction (default)
    qng: bool : (Default value = False) :
        whether or not, in the event that a gradient-based method is to be used, the qng, rather than the standard gradient,
        should be employed. NOTE: throws an error for anything but a single expectationvalue with no passive angles.
    initial_values: typing.Dict[typing.Hashable, numbers.Real]: (Default value = None):
        Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
    variables: typing.List[typing.Hashable] :
         (Default value = None)
         List of Variables to optimize
    samples: int :
         (Default value = None)
         samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
    maxiter: int :
         (Default value = 100)
    backend: str :
         (Default value = None)
         Simulator backend, will be automatically chosen if set to None
    backend_options: dict:
         (Default value = None)
         Additional options for the backend
         Will be unpacked and passed to the compiled objective in every call
    noise: NoiseModel:
         (Default value =None)
         a NoiseModel to apply to all expectation values in the objective.
    method: str :
         (Default value = "BFGS")
         Optimization method (see scipy documentation, or 'available methods')
    tol: float :
         (Default value = 1.e-3)
         Convergence tolerance for optimization (see scipy documentation)
    method_options: dict :
         (Default value = None)
         Dictionary of options
         (see scipy documentation)
    method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]]:
        (Default value = None)
        bounds for the variables (see scipy documentation)
    method_constraints :
         (Default value = None)
         (see scipy documentation
    silent: bool :
         (Default value = False)
         No printout if True
    save_history: bool:
        (Default value = True)
        Save the history throughout the optimization

    Returns
    -------

    """

    # bring into right format
    variables = format_variable_list(variables)
    initial_values = format_variable_dictionary(initial_values)
    if isinstance(gradient, dict) or hasattr(gradient, "items"):
        gradient = format_variable_dictionary(gradient)
    if isinstance(hessian, dict) or hasattr(hessian, "items"):
        hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
    method_bounds = format_variable_dictionary(method_bounds)

    # set defaults
    all_variables = objective.extract_variables()
    if variables is None:
        variables = all_variables
    if initial_values is None:
        initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
    else:
        # autocomplete initial values, warn if you did
        detected = False
        for k in all_variables:
            if k not in initial_values:
                initial_values[k] = numpy.random.uniform(0, 2 * numpy.pi)
                detected = True
        if detected and not silent:
            print("WARNING: initial_variables given but not complete: Autocomplete with random number")

    optimizer = OptimizerSciPy(save_history=save_history,
                               maxiter=maxiter,
                               method=method,
                               method_options=method_options,
                               method_bounds=method_bounds,
                               method_constraints=method_constraints,
                               silent=silent,
                               tol=tol)
    if initial_values is not None:
        initial_values = {assign_variable(k): v for k, v in initial_values.items()}
    return optimizer(objective=objective, qng=qng,
                     backend=backend, backend_options=backend_options, gradient=gradient, hessian=hessian, initial_values=initial_values,
                     variables=variables, noise=noise,
                     samples=samples, *args, **kwargs)
Пример #17
0
    def __call__(self, objective: Objective,
                 initial_values: typing.Dict[Variable, numbers.Real],
                 variables: typing.List[Variable],
                 gradient: typing.Dict[Variable, Objective] = None,
                 qng: bool = False,
                 hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
                 samples: int = None,
                 backend: str = None,
                 backend_options: dict = None,
                 noise: NoiseModel = None,
                 reset_history: bool = True,
                 *args,
                 **kwargs) -> SciPyReturnType:
        """
        Optimizes with scipy and gives back the optimized angles
        Get the optimized energies over the history
        :param objective: The tequila Objective to minimize
        :param initial_valuesxx: initial values for the objective
        :param return_scipy_output: chose if the full scipy output shall be returned
        :param reset_history: reset the history before optimization starts (has no effect if self.save_history is False)
        :return: tuple of optimized energy ,optimized angles and scipy output
        """

        infostring = "Starting {method} optimization\n".format(method=self.method)
        infostring += "Objective: {} expectationvalues\n".format(objective.count_expectationvalues())

        if self.save_history and reset_history:
            self.reset_history()

        active_angles = {}
        for v in variables:
            active_angles[v] = initial_values[v]

        passive_angles = {}
        for k, v in initial_values.items():
            if k not in active_angles.keys():
                passive_angles[k] = v

        # Transform the initial value directory into (ordered) arrays
        param_keys, param_values = zip(*active_angles.items())
        param_values = numpy.array(param_values)

        bounds = None
        if self.method_bounds is not None:
            bounds = {k: None for k in active_angles}
            for k, v in self.method_bounds.items():
                if k in bounds:
                    bounds[k] = v
            infostring += "bounds : {}\n".format(self.method_bounds)
            names, bounds = zip(*bounds.items())
            assert (names == param_keys)  # make sure the bounds are not shuffled

        # do the compilation here to avoid costly recompilation during the optimization
        compiled_objective = compile(objective=objective, variables=initial_values, backend=backend, noise=noise,
                                     samples=samples, *args, **kwargs)

        E = _EvalContainer(objective=compiled_objective,
                           param_keys=param_keys,
                           samples=samples,
                           passive_angles=passive_angles,
                           save_history=self.save_history,
                           backend_options = backend_options,
                           silent=self.silent)

        # compile gradients
        if self.method in self.gradient_based_methods + self.hessian_based_methods and not isinstance(gradient, str):
            compiled_grad_objectives = dict()
            if gradient is None:
                gradient = {assign_variable(k): grad(objective=objective, variable=k) for k in active_angles.keys()}
            else:
                gradient = {assign_variable(k): v for k, v in gradient.items()}

            grad_exval = []
            for k in active_angles.keys():
                if k not in gradient:
                    raise Exception("No gradient for variable {}".format(k))
                grad_exval.append(gradient[k].count_expectationvalues())
                compiled_grad_objectives[k] = compile(objective=gradient[k], variables=initial_values,
                                                      samples=samples, noise=noise, backend=backend, *args, **kwargs)

            if qng:
                combos = get_qng_combos(objective, samples=samples, backend=backend,
                                        noise=noise, initial_values=initial_values)

                dE = _QngContainer(combos=combos,
                                   param_keys=param_keys,
                                   samples=samples,
                                   passive_angles=passive_angles,
                                   save_history=self.save_history,
                                   silent=self.silent,
                                   backend_options=backend_options)
            else:

                dE = _GradContainer(objective=compiled_grad_objectives,
                                    param_keys=param_keys,
                                    samples=samples,
                                    passive_angles=passive_angles,
                                    save_history=self.save_history,
                                    silent=self.silent,
                                    backend_options=backend_options)

                infostring += "Gradients: {} expectationvalues (min={}, max={})\n".format(sum(grad_exval),
                                                                                          min(grad_exval),
                                                                                          max(grad_exval))
        else:
            # use numerical gradient
            dE = gradient
            infostring += "Gradients: {}\n".format(gradient)

        # compile hessian

        if self.method in self.hessian_based_methods and not isinstance(hessian, str):

            if isinstance(gradient, str):
                raise TequilaScipyException("Can not use numerical gradients for Hessian based methods")
            if qng is True:
                raise TequilaScipyException('Quantum Natural Hessian not yet well-defined, sorry!')
            compiled_hess_objectives = dict()
            hess_exval = []
            for i, k in enumerate(active_angles.keys()):
                for j, l in enumerate(active_angles.keys()):
                    if j > i: continue
                    hess = grad(gradient[k], l)
                    compiled_hess = compile(objective=hess, variables=initial_values, samples=samples,
                                            noise=noise,
                                            backend=backend, *args, **kwargs)
                    compiled_hess_objectives[(k, l)] = compiled_hess
                    compiled_hess_objectives[(l, k)] = compiled_hess
                    hess_exval.append(compiled_hess.count_expectationvalues())

            ddE = _HessContainer(objective=compiled_hess_objectives,
                                 param_keys=param_keys,
                                 samples=samples,
                                 passive_angles=passive_angles,
                                 save_history=self.save_history,
                                 silent=self.silent)

            infostring += "Hessian: {} expectationvalues (min={}, max={})\n".format(sum(hess_exval), min(hess_exval),
                                                                                    max(hess_exval))

        else:
            infostring += "Hessian: {}\n".format(hessian)
            if self.method != "TRUST-CONSTR" and hessian is not None:
                raise TequilaScipyException("numerical hessians only for trust-constr method")
            ddE = hessian

        if not self.silent:
            print("ObjectiveType is {}".format(type(compiled_objective)))
            print(infostring)
            print("backend: {}".format(compiled_objective.backend))
            print("samples: {}".format(samples))
            print("{} active variables".format(len(active_angles)))

        # get the number of real scipy iterations for better histories
        real_iterations = []

        Es = []
        callback = lambda x, *args: real_iterations.append(len(E.history) - 1)
        res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
                                      args=(Es,),
                                      method=self.method, tol=self.tol,
                                      bounds=bounds,
                                      constraints=self.method_constraints,
                                      options=self.method_options,
                                      callback=callback)

        # failsafe since callback is not implemented everywhere
        if len(real_iterations) == 0:
            real_iterations = range(len(E.history))
        else:
            real_iterations = [0] + real_iterations
        if self.save_history:
            self.history.energies = [E.history[i] for i in real_iterations]
            self.history.energy_evaluations = E.history
            self.history.angles = [E.history_angles[i] for i in real_iterations]
            self.history.angles_evaluations = E.history_angles
            if dE is not None and not isinstance(dE, str):
                # can currently only save gradients if explicitly evaluated
                # and will fail for hessian based approaches
                # need better callback functions
                try:
                    if self.method not in self.hessian_based_methods:
                        self.history.gradients = [dE.history[i] for i in real_iterations]
                except:
                    print("WARNING: History could not assign the stored gradients")
                self.history.gradients_evaluations = dE.history
            if ddE is not None and not isinstance(ddE, str):
                # hessians are not evaluated in the same frequencies as energies
                # therefore we can not store the "real" iterations currently
                self.history.hessians_evaluations = ddE.history

        E_final = res.fun
        angles_final = dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
        angles_final = {**angles_final, **passive_angles}

        return SciPyReturnType(energy=E_final, angles=format_variable_dictionary(angles_final), history=self.history,
                               scipy_output=res)
Пример #18
0
def minimize(objective: Objective,
             lr=0.1,
             method='sgd',
             initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
             variables: typing.List[typing.Hashable] = None,
             gradient: str = None,
             samples: int = None,
             maxiter: int = 100,
             backend: str = None,
             noise: NoiseModel = None,
             device: str = None,
             tol: float = None,
             silent: bool = False,
             save_history: bool = True,
             beta: float = 0.9,
             rho: float = 0.999,
             epsilon: float = 1. * 10 ** (-7),
             *args,
             **kwargs) -> GDResults:

    """ Initialize and call the GD optimizer.
    Parameters
    ----------
    objective: Objective :
        The tequila objective to optimize
    lr: float >0:
        the learning rate. Default 0.1.
    beta: float >0:
        scaling factor for first moments. default 0.9
    rho: float >0:
        scaling factor for second moments. default 0.999
    epsilon: float>0:
        small float for stability of division. default 10^-7

    method: string: Default = 'sgd'
        which variation on Gradient Descent to use. Options include 'sgd','adam','nesterov','adagrad','rmsprop', etc.
    initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
        Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None,
         they will all be set to zero
    variables: typing.List[typing.Hashable], optional:
         List of Variables to optimize
    gradient: optional:
        the gradient to use. If None, calculated in the usual way. if str='qng', then the qng is calculated.
        If a dictionary of objectives, those objectives are used. If another dictionary,
        an attempt will be made to interpret that dictionary to get, say, numerical gradients.
    samples: int, optional:
         samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
    maxiter: int : Default = 100:
         the maximum number of iterations to run.
    backend: str, optional:
         Simulation backend which will be automatically chosen if set to None
    noise: NoiseModel, optional:
         a NoiseModel to apply to all expectation values in the objective.
    device: optional:
        the device from which to (potentially, simulatedly) sample all quantum circuits employed in optimization.
    tol: float : Default = 10^-4
         Convergence tolerance for optimization; if abs(delta f) smaller than tol, stop.
    silent: bool : Default = False:
         No printout if True
    save_history: bool: Default = True:
        Save the history throughout the optimization

    Note
    ----

    optional kwargs may include beta, beta2, and rho, parameters which affect
    (but do not need to be altered) the various method algorithms.

    Returns
    -------
    GDResults:
        the results of an optimization.

    """
    if isinstance(gradient, dict) or hasattr(gradient, "items"):
        if all([isinstance(x, Objective) for x in gradient.values()]):
            gradient = format_variable_dictionary(gradient)
    optimizer = OptimizerGD(save_history=save_history,
                            method=method,
                            lr=lr,
                            beta=beta,
                            rho=rho,
                            tol=tol,
                            epsilon=epsilon,
                            samples=samples, backend=backend,
                            device=device,
                            noise=noise,
                            maxiter=maxiter,
                            silent=silent)
    return optimizer(objective=objective,
                     maxiter=maxiter,
                     gradient=gradient,
                     initial_values=initial_values,
                     variables=variables, *args, **kwargs)