Example #1
0
class ObjectiveFactoryBase:
    """
    Default class to create the objective in the Adapt solver
    This just creates the single ExpectationValue <H>_{Upre + U + Upost}
    and U will be the circuit that is adaptively constructed
    """

    Upre: QCircuit=QCircuit()
    Upost: QCircuit=QCircuit()
    H : QubitHamiltonian=None

    def __init__(self, H=None, Upre=None, Upost=None, *args, **kwargs):
        if H is None:
            raise TequilaException("No Hamiltonian was given to Adapt!")

        self.H = H
        if Upre is not None:
            self.Upre = Upre
        else:
            self.Upre = QCircuit()
        if Upost is not None:
            self.Upost = Upost
        else:
            self.Upost = QCircuit()

    def __call__(self, U, screening=False, *args, **kwargs):
        return ExpectationValue(H=self.H, U=self.Upre + U + self.Upost, *args, **kwargs)

    def grad_objective(self, *args, **kwargs):
        return self(*args, **kwargs)

    def __str__(self):
        return "{}".format(type(self).__name__)
Example #2
0
    def __init__(self, H=None, Upre=None, Upost=None, *args, **kwargs):
        if H is None:
            raise TequilaException("No Hamiltonian was given to Adapt!")

        self.H = H
        if Upre is not None:
            self.Upre = Upre
        else:
            self.Upre = QCircuit()
        if Upost is not None:
            self.Upost = Upost
        else:
            self.Upost = QCircuit()
Example #3
0
 def make_unitary(self, k, label):
     U = QCircuit()
     for idx in self.generators[k]:
         combined_variable = self.generators[k][0]
         U += self.molecule.make_excitation_gate(indices=idx,
                                                 angle=(combined_variable,
                                                        label))
     return U
Example #4
0
    def __call__(self, static_variables = None, mp_pool=None, label=None, variables=None, *args, **kwargs):

        print("Starting Adaptive Solver")
        print(self)

        # count resources
        screening_cycles = 0
        objective_expval_evaluations = 0
        gradient_expval_evaluations = 0
        histories = []

        if static_variables is None:
            static_variables = {}

        if variables is None:
            variables = {**static_variables}
        else:
            variables = {**variables, **static_variables}

        U = QCircuit()

        initial_objective = self.make_objective(U, variables = variables)
        for k in initial_objective.extract_variables():
            if k not in variables:
                warnings.warn("variable {} of initial objective not given, setting to 0.0 and activate optimization".format(k), TequilaWarning)
                variables[k] = 0.0

        energy = simulate(initial_objective, variables=variables)
        for iter in range(self.parameters.maxiter):
            current_label = (iter,0)
            if label is not None:
                current_label = (iter, label)

            gradients = self.screen_gradients(U=U, variables=variables, mp_pool=mp_pool)

            grad_values = numpy.asarray(list(gradients.values()))
            max_grad = max(grad_values)
            grad_norm = numpy.linalg.norm(grad_values)

            if grad_norm < self.parameters.gradient_convergence:
                print("pool gradient norm is {:+2.8f}, convergence criterion met".format(grad_norm))
                break
            if numpy.abs(max_grad) < self.parameters.max_gradient_convergence:
                print("max pool gradient is {:+2.8f}, convergence criterion |max(grad)|<{} met".format(max_grad, self.parameters.max_gradient_convergence))
                break

            batch_size = self.parameters.batch_size

            # detect degeneracies
            degeneracies = [k for k in range(batch_size, len(grad_values))
                            if numpy.isclose(grad_values[batch_size-1],grad_values[k], rtol=self.parameters.degeneracy_threshold) ]

            if len(degeneracies) > 0:
                batch_size += len(degeneracies)
                print("detected degeneracies: increasing batch size temporarily from {} to {}".format(self.parameters.batch_size, batch_size))

            count = 0

            for k,v in gradients.items():
                Ux = self.operator_pool.make_unitary(k, label=current_label)
                U += Ux
                count += 1
                if count >= batch_size:
                    break

            variables = {**variables, **{k:0.0 for k in U.extract_variables() if k not in variables}}
            active_variables = [k for k in variables if k not in static_variables]

            objective = self.make_objective(U, variables=variables)
            result = minimize(objective=objective,
                                 variables=active_variables,
                                 initial_values=variables,
                                 **self.parameters.compile_args, **self.parameters.optimizer_args)

            diff = energy - result.energy
            energy = result.energy
            variables = result.variables

            print("-------------------------------------")
            print("Finished iteration {}".format(iter))
            print("current energy : {:+2.8f}".format(energy))
            print("difference     : {:+2.8f}".format(diff))
            print("grad_norm      : {:+2.8f}".format(grad_norm))
            print("max_grad       : {:+2.8f}".format(max_grad))
            print("circuit size   : {}".format(len(U.gates)))

            screening_cycles += 1
            mini_iter=len(result.history.extract_energies())
            gradient_expval = sum([v.count_expectationvalues() for k, v in grad(objective).items()])
            objective_expval_evaluations += mini_iter*objective.count_expectationvalues()
            gradient_expval_evaluations += mini_iter*gradient_expval
            histories.append(result.history)

            if self.parameters.energy_convergence is not None and numpy.abs(diff) < self.parameters.energy_convergence:
                print("energy difference is {:+2.8f}, convergence criterion met".format(diff))
                break

            if iter == self.parameters.maxiter - 1:
                print("reached maximum number of iterations")
                break

        @dataclasses.dataclass
        class AdaptReturn:
            U:QCircuit=None
            objective_factory:ObjectiveFactoryBase=None
            variables:dict=None
            energy: float = None
            histories: list = None
            screening_cycles: int = None
            objective_expval_evaluations: int =None
            gradient_expval_evaluations: int =None

        return AdaptReturn(U=U,
                           variables=variables,
                           objective_factory=self.objective_factory,
                           energy=energy,
                           histories=histories,
                           screening_cycles = screening_cycles,
                           objective_expval_evaluations=objective_expval_evaluations,
                           gradient_expval_evaluations=gradient_expval_evaluations)