Exemplo n.º 1
0
def test_fermionic_gates(assume_real, trafo):
    mol = tq.chemistry.Molecule(geometry="H 0.0 0.0 0.7\nLi 0.0 0.0 0.0",
                                basis_set="sto-3g")
    U1 = mol.prepare_reference()
    U2 = mol.prepare_reference()
    variable_count = {}
    for i in [0, 1, 0]:
        for a in numpy.random.randint(2, 5, 3):
            idx = [(2 * i, 2 * a), (2 * i + 1, 2 * a + 1)]
            U1 += mol.make_excitation_gate(indices=idx,
                                           angle=(i, a),
                                           assume_real=assume_real)
            g = mol.make_excitation_generator(indices=idx)
            U2 += tq.gates.Trotterized(generators=[g],
                                       angles=[(i, a)],
                                       steps=1)
            if (i, a) in variable_count:
                variable_count[(i, a)] += 1
            else:
                variable_count[(i, a)] = 1

    a = numpy.random.choice(U1.extract_variables(), 1)[0]

    H = mol.make_hamiltonian()
    E = tq.ExpectationValue(H=H, U=U1)
    dE = tq.grad(E, a)
    if not assume_real:
        assert dE.count_expectationvalues() == 4 * variable_count[a.name]
    else:
        assert dE.count_expectationvalues() == 2 * variable_count[a.name]

    E2 = tq.ExpectationValue(H=H, U=U2)
    dE2 = tq.grad(E2, a)

    variables = {
        k: numpy.random.uniform(0.0, 2.0 * numpy.pi, 1)[0]
        for k in E.extract_variables()
    }
    test1 = tq.simulate(E, variables=variables)
    test1x = tq.simulate(E2, variables=variables)
    test2 = tq.simulate(dE, variables=variables)
    test2x = tq.simulate(dE2, variables=variables)

    assert numpy.isclose(test1, test1x, atol=1.e-6)
    assert numpy.isclose(test2, test2x, atol=1.e-6)
Exemplo n.º 2
0
    def do_screening(self, arg):
        Ux = self.operator_pool.make_unitary(k=arg["k"], label="tmp")
        Utmp = arg["U"] + Ux
        variables = {**arg["variables"]}
        objective = self.make_objective(Utmp, screening=True, variables=variables)


        dEs = []
        for k in Ux.extract_variables():
            variables[k] = 0.0
            dEs.append(grad(objective, k))

        gradients=[numpy.abs(simulate(objective=dE, variables=variables, **self.parameters.compile_args)) for dE in dEs]

        return arg["k"], sum(gradients)
Exemplo n.º 3
0
    def __call__(self, static_variables = None, mp_pool=None, label=None, variables=None, *args, **kwargs):

        print("Starting Adaptive Solver")
        print(self)

        # count resources
        screening_cycles = 0
        objective_expval_evaluations = 0
        gradient_expval_evaluations = 0
        histories = []

        if static_variables is None:
            static_variables = {}

        if variables is None:
            variables = {**static_variables}
        else:
            variables = {**variables, **static_variables}

        U = QCircuit()

        initial_objective = self.make_objective(U, variables = variables)
        for k in initial_objective.extract_variables():
            if k not in variables:
                warnings.warn("variable {} of initial objective not given, setting to 0.0 and activate optimization".format(k), TequilaWarning)
                variables[k] = 0.0

        energy = simulate(initial_objective, variables=variables)
        for iter in range(self.parameters.maxiter):
            current_label = (iter,0)
            if label is not None:
                current_label = (iter, label)

            gradients = self.screen_gradients(U=U, variables=variables, mp_pool=mp_pool)

            grad_values = numpy.asarray(list(gradients.values()))
            max_grad = max(grad_values)
            grad_norm = numpy.linalg.norm(grad_values)

            if grad_norm < self.parameters.gradient_convergence:
                print("pool gradient norm is {:+2.8f}, convergence criterion met".format(grad_norm))
                break
            if numpy.abs(max_grad) < self.parameters.max_gradient_convergence:
                print("max pool gradient is {:+2.8f}, convergence criterion |max(grad)|<{} met".format(max_grad, self.parameters.max_gradient_convergence))
                break

            batch_size = self.parameters.batch_size

            # detect degeneracies
            degeneracies = [k for k in range(batch_size, len(grad_values))
                            if numpy.isclose(grad_values[batch_size-1],grad_values[k], rtol=self.parameters.degeneracy_threshold) ]

            if len(degeneracies) > 0:
                batch_size += len(degeneracies)
                print("detected degeneracies: increasing batch size temporarily from {} to {}".format(self.parameters.batch_size, batch_size))

            count = 0

            for k,v in gradients.items():
                Ux = self.operator_pool.make_unitary(k, label=current_label)
                U += Ux
                count += 1
                if count >= batch_size:
                    break

            variables = {**variables, **{k:0.0 for k in U.extract_variables() if k not in variables}}
            active_variables = [k for k in variables if k not in static_variables]

            objective = self.make_objective(U, variables=variables)
            result = minimize(objective=objective,
                                 variables=active_variables,
                                 initial_values=variables,
                                 **self.parameters.compile_args, **self.parameters.optimizer_args)

            diff = energy - result.energy
            energy = result.energy
            variables = result.variables

            print("-------------------------------------")
            print("Finished iteration {}".format(iter))
            print("current energy : {:+2.8f}".format(energy))
            print("difference     : {:+2.8f}".format(diff))
            print("grad_norm      : {:+2.8f}".format(grad_norm))
            print("max_grad       : {:+2.8f}".format(max_grad))
            print("circuit size   : {}".format(len(U.gates)))

            screening_cycles += 1
            mini_iter=len(result.history.extract_energies())
            gradient_expval = sum([v.count_expectationvalues() for k, v in grad(objective).items()])
            objective_expval_evaluations += mini_iter*objective.count_expectationvalues()
            gradient_expval_evaluations += mini_iter*gradient_expval
            histories.append(result.history)

            if self.parameters.energy_convergence is not None and numpy.abs(diff) < self.parameters.energy_convergence:
                print("energy difference is {:+2.8f}, convergence criterion met".format(diff))
                break

            if iter == self.parameters.maxiter - 1:
                print("reached maximum number of iterations")
                break

        @dataclasses.dataclass
        class AdaptReturn:
            U:QCircuit=None
            objective_factory:ObjectiveFactoryBase=None
            variables:dict=None
            energy: float = None
            histories: list = None
            screening_cycles: int = None
            objective_expval_evaluations: int =None
            gradient_expval_evaluations: int =None

        return AdaptReturn(U=U,
                           variables=variables,
                           objective_factory=self.objective_factory,
                           energy=energy,
                           histories=histories,
                           screening_cycles = screening_cycles,
                           objective_expval_evaluations=objective_expval_evaluations,
                           gradient_expval_evaluations=gradient_expval_evaluations)