Ejemplo n.º 1
0
def test_gradient_UX_HY_wfnsim(simulator, angle, controlled, silent=True):
    # same as before just with wavefunction simulation

    # case YX
    # U = cos(angle/2) + sin(-angle/2)*i*X
    # O = cos*sin*i*<0|YX|0> + sin*cos*(-i)<0|XY|0>
    #   = 0.5*sin(-angle)*i <0|[YX,XY]|0>
    #   = -sin(angle)

    angle_value = angle
    angle = Variable(name="angle")
    variables = {angle: angle_value}

    qubit = 0
    H = paulis.Y(qubit=qubit)
    if controlled:
        control = 1
        U = gates.X(target=control) + gates.Rx(
            target=qubit, control=control, angle=angle)
    else:
        U = gates.Rx(target=qubit, angle=angle)
    O = ExpectationValue(U=U, H=H)
    E = simulate(O, variables=variables, backend=simulator)
    dO = grad(objective=O, variable=angle)
    dE = simulate(dO, variables=variables)
    assert (numpy.isclose(E, -numpy.sin(angle(variables)), atol=0.0001))
    assert (numpy.isclose(dE, -numpy.cos(angle(variables)), atol=0.0001))
    if not silent:
        print("E         =", E)
        print("-sin(angle)=", -numpy.sin(angle(variables)))
        print("dE        =", dE)
        print("-cos(angle)=", -numpy.cos(angle(variables)))
Ejemplo n.º 2
0
def test_gradient_UY_HX(simulator, angle_value, controlled, silent=True):
    # case X Y
    # U = cos(angle/2) + sin(-angle/2)*i*Y
    # <0|Ud H U |0> = cos^2(angle/2)*<0|X|0>
    # + sin^2(-angle/2) <0|YXY|0>
    # + cos(angle/2)*sin(angle/2)*i<0|XY|0>
    # + sin(-angle/2)*cos(angle/2)*(-i) <0|YX|0>
    # = cos^2*0 + sin^2*0 + cos*sin*i(<0|[XY,YX]|0>)
    # = 0.5*sin(-angle)*i <0|[XY,YX]|0> = -0.5*sin(angle)*i * 2 i <0|Z|0>
    # = sin(angle)

    angle = Variable(name="angle")
    variables = {angle: angle_value}

    qubit = 0
    H = paulis.X(qubit=qubit)
    if controlled:
        control = 1
        U = gates.X(target=control) + gates.Ry(
            target=qubit, control=control, angle=angle)
    else:
        U = gates.X(target=qubit) + gates.X(target=qubit) + gates.Ry(
            target=qubit, angle=angle)
    O = ExpectationValue(U=U, H=H)
    E = simulate(O, variables=variables, backend=simulator)
    print("O={type}".format(type=type(O)))
    dO = grad(objective=O, variable=angle)
    dE = simulate(dO, variables=variables, backend=simulator)
    assert (numpy.isclose(E, numpy.sin(angle(variables)), atol=1.e-4))
    assert (numpy.isclose(dE, numpy.cos(angle(variables)), atol=1.e-4))
    if not silent:
        print("E         =", E)
        print("sin(angle)=", numpy.sin(angle()))
        print("dE        =", dE)
        print("cos(angle)=", numpy.cos(angle()))
Ejemplo n.º 3
0
def test_gradient_PHASE_HY(simulator, angle_value, controlled, silent=False):
    angle = Variable(name="angle")
    variables = {angle: angle_value}

    qubit = 0
    H = paulis.Y(qubit=qubit)
    if controlled:
        control = 1
        U = gates.X(target=control) + gates.H(target=qubit) + gates.Phase(
            target=qubit, control=control, phi=angle) + gates.H(target=qubit)
    else:
        U = gates.H(target=qubit) + gates.Phase(
            target=qubit, phi=angle) + gates.H(target=qubit)

    O = ExpectationValue(U=U, H=H)
    E = simulate(O, variables=variables, backend=simulator)
    dO = grad(objective=O, variable='angle')
    dE = simulate(dO, variables=variables)
    assert (numpy.isclose(E, -numpy.sin(angle(variables)), atol=1.e-4))
    assert (numpy.isclose(dE, -numpy.cos(angle(variables)), atol=1.e-4))
    if not silent:
        print("E         =", E)
        print("-sin(angle)=", -numpy.sin(angle(variables)))
        print("dE        =", dE)
        print("-cos(angle)=", -numpy.cos(angle(variables)))
Ejemplo n.º 4
0
def test_gradient():
    a = Variable(name='a')
    variables = {a: 3.0}
    b = a + 2 - 2
    c = (b * 5) / 5
    d = -(-c)

    assert grad(d, a)(variables) == 1.0
Ejemplo n.º 5
0
    def compile_gradient(self,
                         objective: Objective,
                         variables: typing.List[Variable],
                         gradient=None,
                         *args,
                         **kwargs) -> typing.Tuple[typing.Dict, typing.Dict]:
        """
        convenience function to compile gradient objects and relavant types. For use by inheritors.

        Parameters
        ----------
        objective: Objective:
            the objective whose gradient is to be calculated.
        variables: list:
            the variables to take gradients with resepct to.
        gradient, optional:
            special argument to change what structure is used to calculate the gradient, like numerical, or QNG.
            Default: use regular, analytic gradients.
        args
        kwargs

        Returns
        -------
        tuple:
            both the uncompiled and compiled gradients of objective, w.r.t variables.
        """
        if gradient is None:
            dO = {
                k: grad(objective=objective, variable=k, *args, **kwargs)
                for k in variables
            }
            compiled_grad = {
                k: self.compile_objective(objective=dO[k], *args, **kwargs)
                for k in variables
            }

        elif isinstance(gradient, dict):
            if all([isinstance(x, Objective) for x in gradient.values()]):
                dO = gradient
                compiled_grad = {
                    k: self.compile_objective(objective=dO[k], *args, **kwargs)
                    for k in variables
                }
            else:
                dO = None
                compiled = self.compile_objective(objective=objective)
                compiled_grad = {
                    k: _NumGrad(objective=compiled, variable=k, **gradient)
                    for k in variables
                }
        else:
            raise TequilaOptimizerException(
                "unknown gradient instruction of type {} : {}".format(
                    type(gradient), gradient))

        return dO, compiled_grad
Ejemplo n.º 6
0
def test_qubit_excitations():

    H = paulis.Projector("1.0*|100>")
    U1 = gates.X(0) + gates.QubitExcitation(
        target=[0, 1], angle="a", assume_real=True)
    U2 = gates.X(0) + gates.Trotterized(
        generators=[U1.gates[1].make_generator()], angles=["a"], steps=1)
    E1 = ExpectationValue(H=H, U=U1)
    E2 = ExpectationValue(H=H, U=U2)
    dE1 = grad(E1, "a")
    dE2 = grad(E2, "a")

    for a in numpy.random.uniform(-numpy.pi, numpy.pi, 5):
        a = float(a)
        variables = {"a": a}
        wfn1 = simulate(U1, variables=variables)
        wfn2 = simulate(U2, variables=variables)
        F = numpy.abs(wfn1.inner(wfn2))**2
        assert numpy.isclose(F, 1.0, 1.e-4)
        eval1 = simulate(dE1, variables=variables)
        eval2 = simulate(dE2, variables=variables)
        assert numpy.isclose(eval1, eval2, 1.e-4)

    H = paulis.Projector("1.0*|0110>")
    U1 = gates.X([1, 2]) + gates.QubitExcitation(
        target=[0, 1, 3, 2], angle="a", assume_real=True)
    U2 = gates.X([1, 2]) + gates.Trotterized(
        generators=[U1.gates[2].make_generator()], angles=["a"], steps=1)
    E1 = ExpectationValue(H=H, U=U1)
    E2 = ExpectationValue(H=H, U=U2)
    dE1 = grad(E1, "a")
    dE2 = grad(E2, "a")

    for a in numpy.random.uniform(-numpy.pi, numpy.pi, 5):
        a = float(a)
        variables = {"a": a}
        wfn1 = simulate(U1, variables=variables)
        wfn2 = simulate(U2, variables=variables)
        F = numpy.abs(wfn1.inner(wfn2))**2
        assert numpy.isclose(F, 1.0, 1.e-4)
        eval1 = simulate(dE1, variables=variables)
        eval2 = simulate(dE2, variables=variables)
        assert numpy.isclose(eval1, eval2, 1.e-4)
Ejemplo n.º 7
0
def test_really_awfull_thing(
    simulator,
    value1=(numpy.random.randint(0, 1000) / 1000.0 * (numpy.pi / 2.0)),
    value2=(numpy.random.randint(0, 1000) / 1000.0 * (numpy.pi / 2.0))):
    angle1 = Variable(name="angle1")
    angle2 = Variable(name="angle2")
    variables = {angle1: value1, angle2: value2}

    prod = angle1 * angle2
    qubit = 0
    control = None
    H = paulis.Y(qubit=qubit)
    U = gates.Rx(target=qubit, control=control, angle=prod)
    Up = gates.Rx(target=qubit, control=control, angle=prod + np.pi / 2)
    Down = gates.Rx(target=qubit, control=control, angle=prod - np.pi / 2)
    e1 = ExpectationValue(U=U, H=H)
    en1 = simulate(e1, variables=variables, backend=simulator)
    uen = simulate(0.5 * ExpectationValue(Up, H),
                   variables=variables,
                   backend=simulator)
    den = simulate(-0.5 * ExpectationValue(Down, H),
                   variables=variables,
                   backend=simulator)
    an1 = -np.sin(prod(variables=variables))
    anval = prod(variables=variables)
    an2 = angle2(variables=variables)
    added = angle1 * e1
    raised = added.wrap(np.sin)
    dO = grad(raised, 'angle1')
    dE = grad(e1, 'angle1')
    dA = grad(added, 'angle1')
    val = simulate(added, variables=variables, backend=simulator)
    dave = simulate(dA, variables=variables, backend=simulator)
    deval = simulate(dE, variables=variables, backend=simulator)
    doval = simulate(dO, variables=variables, backend=simulator)
    dtrue = np.cos(val) * dave
    assert np.isclose(en1, an1, atol=1.e-4)
    assert np.isclose(deval, an2 * (uen + den), atol=1.e-4)
    assert np.isclose(doval, dtrue, atol=1.e-4)
Ejemplo n.º 8
0
def test_heterogeneous_gradient_r_div(simulator):
    ### the reason we don't test float power here is that it keeps coming up NAN, because the argument is too small
    angle1 = Variable(name="angle1")
    value = (numpy.random.randint(100, 1000) / 1000.0 * (numpy.pi / 2.0))
    variables = {angle1: value}
    qubit = 0
    control = 1
    H1 = paulis.Y(qubit=qubit)
    U1 = gates.X(target=control) + gates.Rx(target=qubit, control=control, angle=angle1)
    e1 = ExpectationValue(U=U1, H=H1)
    added = Objective(args=[e1.args[0], angle1], transformation=np.true_divide)
    val = simulate(added, variables=variables, backend=simulator)
    en1 = simulate(e1, variables=variables, backend=simulator)
    an1 = -np.sin(angle1(variables=variables))
    anval = angle1(variables=variables)
    dO = grad(added, 'angle1')
    dE = grad(e1, 'angle1')
    deval = simulate(dE, variables=variables, backend=simulator)
    doval = simulate(dO, variables=variables, backend=simulator)
    dtrue = deval / anval - en1 / (anval ** 2)
    assert np.isclose(float(val), float(np.true_divide(en1, anval)))
    assert np.isclose(en1, an1, atol=1.e-4)
    assert np.isclose(doval, dtrue, atol=1.e-4)
Ejemplo n.º 9
0
def test_total_type_jumble(
    simulator,
    value1=(numpy.random.randint(0, 1000) / 1000.0 * (numpy.pi / 2.0)),
    value2=(numpy.random.randint(0, 1000) / 1000.0 * (numpy.pi / 2.0))):
    a = Variable('a')
    b = Variable('b')
    values = {a: value1, b: value2}
    H1 = tq.paulis.X(0)
    H2 = tq.paulis.Y(0)
    U1 = tq.gates.Ry(angle=a, target=0)
    U2 = tq.gates.Rx(angle=b, target=0)
    e1 = ExpectationValue(U1, H1)
    e2 = ExpectationValue(U2, H2)
    stacked = tq.objective.vectorize([e1, e2])
    stacked = stacked * a * e2
    out = simulate(stacked, variables=values, backend=simulator)
    v1 = out[0]
    v2 = out[1]
    appendage = a(values) * -np.sin(b(values))
    an1 = np.sin(a(values)) * appendage
    an2 = -np.sin(b(values)) * appendage
    assert np.isclose(v1 + v2, an1 + an2)
    # not gonna contract, lets make gradient do some real work
    ga = grad(stacked, a)
    gb = grad(stacked, b)
    la = [tq.simulate(x, variables=values) for x in ga]
    print(la)
    lb = [tq.simulate(x, variables=values) for x in gb]
    print(lb)
    tota = np.sum(np.array(la))
    totb = np.sum(np.array(lb))
    gan1 = np.cos(a(values)) * appendage + (
        np.sin(a(values)) * -np.sin(b(values))) - (np.sin(b(values)) *
                                                   -np.sin(b(values)))
    gan2 = np.sin(a(values)) * a(values) * -np.cos(b(values)) + 2 * (
        -np.cos(b(values)) * appendage)
    assert np.isclose(tota + totb, gan1 + gan2)
Ejemplo n.º 10
0
def test_exotic_gradients(gradvar):
    # a and b will fail for autograd not with jax
    a = Variable('a')
    b = Variable('b')
    c = Variable('c')
    d = Variable('d')
    e = Variable('e')
    f = Variable('f')
    variables = {a: 2.0, b: 3.0, c: 4.0, d: 5.0, e: 6.0, f: 7.0}

    t = c * a**b + b / c - Objective(
        args=[c], transformation=np.cos) + f / (d * e) + a * Objective(
            args=[d], transformation=np.exp) / (f + b) + Objective(
                args=[e], transformation=np.tanh) + Objective(
                    args=[f], transformation=np.sinc)
    g = grad(t, gradvar)
    if gradvar == 'a':
        assert np.isclose(
            g(variables),
            c(variables) * b(variables) * (a(variables)**(b(variables) - 1.)) +
            np.exp(d(variables)) / (f(variables) + b(variables)))
    if gradvar == 'b':
        assert np.isclose(
            g(variables),
            (c(variables) * a(variables)**b(variables)) * np.log(a(variables))
            + 1. / c(variables) - a(variables) * np.exp(d(variables)) /
            (f(variables) + b(variables))**2.0)
    if gradvar == 'c':
        assert np.isclose(
            g(variables),
            a(variables)**b(variables) - b(variables) / c(variables)**2. +
            np.sin(c(variables)))
    if gradvar == 'd':
        assert np.isclose(
            g(variables),
            -f(variables) / (np.square(d(variables)) * e(variables)) +
            a(variables) * np.exp(d(variables)) /
            (f(variables) + b(variables)))
    if gradvar == 'e':
        assert np.isclose(
            g(variables), 2. / (1. + np.cosh(2 * e(variables))) -
            f(variables) / (d(variables) * e(variables)**2.))
    if gradvar == 'f':
        assert np.isclose(
            g(variables), 1. / (d(variables) * e(variables)) -
            a(variables) * np.exp(d(variables)) /
            (f(variables) + b(variables))**2. +
            np.cos(np.pi * f(variables)) / f(variables) -
            np.sin(np.pi * f(variables)) / (np.pi * f(variables)**2.))
Ejemplo n.º 11
0
def test_gradient_deep_controlled_Y(simulator, power, controls):
    if controls > 2 and simulator == "qiskit":
        # does not work yet
        return
    qubit = 0
    control = [i for i in range(1, controls + 1)]
    angle = Variable(name="angle")
    U = gates.X(target=control) + gates.Y(target=qubit, power=angle, control=control)
    angle = Variable(name="angle")
    variables = {angle: power}
    H = paulis.X(qubit=qubit)
    O = ExpectationValue(U=U, H=H)
    E = simulate(O, variables=variables, backend=simulator)
    dO = grad(objective=O, variable=angle)
    dE = simulate(dO, variables=variables, backend=simulator)
    assert (numpy.isclose(E, numpy.sin(angle(variables) * (numpy.pi)), atol=1.e-4))
    assert (numpy.isclose(dE, numpy.pi * numpy.cos(angle(variables) * (numpy.pi)), atol=1.e-4))
Ejemplo n.º 12
0
def test_gradient_X(simulator, power, controlled):
    qubit = 0
    control = 1
    angle = Variable(name="angle")
    if controlled:
        U = gates.X(target=control) + gates.X(target=qubit, power=angle, control=control)
    else:
        U = gates.X(target=qubit, power=angle)
    angle = Variable(name="angle")
    variables = {angle: power}
    H = paulis.Y(qubit=qubit)
    O = ExpectationValue(U=U, H=H)
    E = simulate(O, variables=variables, backend=simulator)
    dO = grad(objective=O, variable=angle)
    dE = simulate(dO, variables=variables, backend=simulator)
    assert (numpy.isclose(E, -numpy.sin(angle(variables) * (numpy.pi)), atol=1.e-4))
    assert (numpy.isclose(dE, -numpy.pi * numpy.cos(angle(variables) * (numpy.pi)), atol=1.e-4))
Ejemplo n.º 13
0
    def compile_hessian(self,
                        variables: typing.List[Variable],
                        grad_obj: typing.Dict[Variable, Objective],
                        comp_grad_obj: typing.Dict[Variable, Objective],
                        hessian: dict = None,
                        *args,
                        **kwargs) -> tuple:

        dO = grad_obj
        cdO = comp_grad_obj

        if hessian is None:
            if dO is None:
                raise TequilaOptimizerException("Can not combine analytical Hessian with numerical Gradient\n"
                                                "hessian instruction was: {}".format(hessian))

            compiled_hessian = {}
            ddO = {}
            for k in variables:
                dOk = dO[k]
                for l in variables:
                    ddO[(k, l)] = grad(objective=dOk, variable=l)
                    compiled_hessian[(k, l)] = self.compile_objective(ddO[(k, l)])
                    ddO[(l, k)] = ddO[(k, l)]
                    compiled_hessian[(l, k)] = compiled_hessian[(k, l)]

        elif isinstance(hessian, dict):
            if all([isinstance(x, Objective) for x in hessian.values()]):
                ddO = hessian
                compiled_hessian = {k: self.compile_objective(objective=ddO[k], *args, **kwargs) for k in
                                    hessian.keys()}
            else:
                ddO = None
                compiled_hessian = {}
                for k in variables:
                    for l in variables:
                        compiled_hessian[(k, l)] = _NumGrad(objective=cdO[k], variable=l, **hessian)
                        compiled_hessian[(l, k)] = _NumGrad(objective=cdO[l], variable=k, **hessian)
        else:
            raise TequilaOptimizerException("unknown hessian instruction: {}".format(hessian))

        return ddO, compiled_hessian
Ejemplo n.º 14
0
def test_gradient_UY_HX_wfnsim(simulator,
                               angle_value,
                               controlled,
                               silent=True):
    # same as before just with wavefunction simulation

    # case X Y
    # U = cos(angle/2) + sin(-angle/2)*i*Y
    # <0|Ud H U |0> = cos^2(angle/2)*<0|X|0>
    # + sin^2(-angle/2) <0|YXY|0>
    # + cos(angle/2)*sin(angle/2)*i<0|XY|0>
    # + sin(-angle/2)*cos(angle/2)*(-i) <0|YX|0>
    # = cos^2*0 + sin^2*0 + cos*sin*i(<0|[XY,YX]|0>)
    # = 0.5*sin(-angle)*i <0|[XY,YX]|0> = -0.5*sin(angle)*i * 2 i <0|Z|0>
    # = sin(angle)

    angle = Variable(name="angle")
    variables = {angle: angle_value}

    qubit = 0
    H = paulis.X(qubit=qubit)
    if controlled:
        control = 1
        U = gates.X(target=control) + gates.Ry(
            target=qubit, control=control, angle=angle)
    else:
        U = gates.Ry(target=qubit, angle=angle)
    O = ExpectationValue(U=U, H=H)
    E = simulate(O, variables=variables, backend=simulator)
    dO = grad(objective=O, variable='angle')
    dE = simulate(dO, variables=variables, backend=simulator)
    E = numpy.float(E)  # for isclose
    dE = numpy.float(dE)  # for isclose
    assert (numpy.isclose(E, numpy.sin(angle(variables)), atol=0.0001))
    assert (numpy.isclose(dE, numpy.cos(angle(variables)), atol=0.0001))
    if not silent:
        print("E         =", E)
        print("sin(angle)=", numpy.sin(angle(variables)))
        print("dE        =", dE)
        print("cos(angle)=", numpy.cos(angle(variables)))
Ejemplo n.º 15
0
    def compile_gradient(self, objective: Objective,
                         variables: typing.List[Variable],
                         gradient=None,
                         *args, **kwargs) -> typing.Tuple[
        typing.Dict, typing.Dict]:

        if gradient is None:
            dO = {k: grad(objective=objective, variable=k, *args, **kwargs) for k in variables}
            compiled_grad = {k: self.compile_objective(objective=dO[k], *args, **kwargs) for k in variables}

        elif isinstance(gradient, dict):
            if all([isinstance(x, Objective) for x in gradient.values()]):
                dO = gradient
                compiled_grad = {k: self.compile_objective(objective=dO[k], *args, **kwargs) for k in variables}
            else:
                dO = None
                compiled = self.compile_objective(objective=objective)
                compiled_grad = {k: _NumGrad(objective=compiled, variable=k, **gradient) for k in variables}
        else:
            raise TequilaOptimizerException(
                "unknown gradient instruction of type {} : {}".format(type(gradient), gradient))

        return dO, compiled_grad
Ejemplo n.º 16
0
    def __call__(self,
                 objective: Objective,
                 maxiter,
                 lr: float = .01,
                 method: str = 'sgd',
                 qng: bool = False,
                 stop_count: int = None,
                 initial_values: typing.Dict[Variable, numbers.Real] = None,
                 variables: typing.List[Variable] = None,
                 samples: int = None,
                 backend: str = None,
                 noise: NoiseModel = None,
                 reset_history: bool = True,
                 *args,
                 **kwargs) -> GDReturnType:
        """
        Optimizes with a variation of gradient descent and gives back the optimized angles
        Get the optimized energies over the history
        :param objective: The tequila Objective to minimize
        :param maxiter: how many iterations to run, at maximum.
        :param method: what method to optimize via.
        :param qng: whether or not to use the QNG to calculate gradients.
        :param stop_count: how many steps after which to abort if no improvement occurs.
        :param initial_values: initial values for the objective
        :param variables: which variables to optimize over. Default None: all the variables of the objective.
        :param samples: the number of samples to use. Default None: Wavefunction simulation used instead.
        :param backend: which simulation backend to use. Default None: let Tequila Pick!
        :param noise: the NoiseModel to apply to sampling. Default None. Affects chosen simulator.
        :param reset_history: reset the history before optimization starts (has no effect if self.save_history is False)
        :return: tuple of optimized energy ,optimized angles and scipy output
        """

        if self.save_history and reset_history:
            self.reset_history()

        active_angles = {}
        for v in variables:
            active_angles[v] = initial_values[v]

        passive_angles = {}
        for k, v in initial_values.items():
            if k not in active_angles.keys():
                passive_angles[k] = v

        # Transform the initial value directory into (ordered) arrays

        comp = compile(objective=objective,
                       variables=initial_values,
                       backend=backend,
                       noise=noise,
                       samples=samples)

        if not qng:
            g_list = []
            for k in active_angles.keys():
                g = grad(objective, k)
                g_comp = compile(objective=g,
                                 variables=initial_values,
                                 backend=backend,
                                 noise=noise,
                                 samples=samples)
                g_list.append(g_comp)

            gradients = CallableVector(g_list)
        else:
            if method.lower() == 'adagrad':
                print(
                    'Warning! you have chosen to use QNG with adagrad ; convergence is not likely.'
                    .format(method))
            gradients = QNGVector(
                get_qng_combos(objective=objective,
                               initial_values=initial_values,
                               backend=backend,
                               noise=noise,
                               samples=samples))

        if not self.silent:
            print("backend: {}".format(comp.backend))
            print("samples: {}".format(samples))
            print("{} active variables".format(len(active_angles)))
            print("qng: {}".format(str(qng)))

        ### prefactor. Early stopping, initialization, etc. handled here

        if maxiter is None:
            maxiter = self.maxiter
        if stop_count == None:
            stop_count = maxiter

        ### the actual algorithm acts here:

        f = self.method_dict[method.lower()]
        v = initial_values
        vec_len = len(active_angles)
        best = None
        best_angles = None
        first = numpy.zeros(vec_len)
        second = numpy.zeros(vec_len)
        moments = [first, second]
        all_moments = [moments]
        tally = 0
        for step in range(maxiter):
            e = comp(v, samples=samples)
            self.history.energies.append(e)
            self.history.angles.append(v)

            ### saving best performance and counting the stop tally.
            if step == 0:
                best = e
                best_angles = v
                tally = 0
            else:
                if e < best:
                    best = e
                    best_angles = v
                    tally = 0
                else:
                    tally += 1

            if not self.silent:
                string = "Iteration: {} , Energy: {}, angles: {}".format(
                    str(step), str(e), v)
                print(string)

            ### check if its time to stop!
            if tally == stop_count:
                if not self.silent:
                    print(
                        'no improvement after {} epochs. Stopping optimization.'
                        .format(str(stop_count)))
                break

            new, moments, grads = f(lr=lr,
                                    step=step,
                                    gradients=gradients,
                                    v=v,
                                    moments=moments,
                                    active_angles=active_angles,
                                    samples=samples,
                                    **kwargs)
            save_grad = {}
            if passive_angles != None:
                v = {**new, **passive_angles}
            else:
                v = new
            for i, k in enumerate(active_angles.keys()):
                save_grad[k] = grads[i]
            self.history.gradients.append(save_grad)
            all_moments.append(moments)
        E_final, angles_final = best, best_angles
        angles_final = {**angles_final, **passive_angles}
        return GDReturnType(energy=E_final,
                            angles=format_variable_dictionary(angles_final),
                            history=self.history,
                            moments=all_moments)
Ejemplo n.º 17
0
    def __call__(self, objective: Objective,
                 initial_values: typing.Dict[Variable, numbers.Real],
                 variables: typing.List[Variable],
                 gradient: typing.Dict[Variable, Objective] = None,
                 qng: bool = False,
                 hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
                 samples: int = None,
                 backend: str = None,
                 backend_options: dict = None,
                 noise: NoiseModel = None,
                 reset_history: bool = True,
                 *args,
                 **kwargs) -> SciPyReturnType:
        """
        Optimizes with scipy and gives back the optimized angles
        Get the optimized energies over the history
        :param objective: The tequila Objective to minimize
        :param initial_valuesxx: initial values for the objective
        :param return_scipy_output: chose if the full scipy output shall be returned
        :param reset_history: reset the history before optimization starts (has no effect if self.save_history is False)
        :return: tuple of optimized energy ,optimized angles and scipy output
        """

        infostring = "Starting {method} optimization\n".format(method=self.method)
        infostring += "Objective: {} expectationvalues\n".format(objective.count_expectationvalues())

        if self.save_history and reset_history:
            self.reset_history()

        active_angles = {}
        for v in variables:
            active_angles[v] = initial_values[v]

        passive_angles = {}
        for k, v in initial_values.items():
            if k not in active_angles.keys():
                passive_angles[k] = v

        # Transform the initial value directory into (ordered) arrays
        param_keys, param_values = zip(*active_angles.items())
        param_values = numpy.array(param_values)

        bounds = None
        if self.method_bounds is not None:
            bounds = {k: None for k in active_angles}
            for k, v in self.method_bounds.items():
                if k in bounds:
                    bounds[k] = v
            infostring += "bounds : {}\n".format(self.method_bounds)
            names, bounds = zip(*bounds.items())
            assert (names == param_keys)  # make sure the bounds are not shuffled

        # do the compilation here to avoid costly recompilation during the optimization
        compiled_objective = compile(objective=objective, variables=initial_values, backend=backend, noise=noise,
                                     samples=samples, *args, **kwargs)

        E = _EvalContainer(objective=compiled_objective,
                           param_keys=param_keys,
                           samples=samples,
                           passive_angles=passive_angles,
                           save_history=self.save_history,
                           backend_options = backend_options,
                           silent=self.silent)

        # compile gradients
        if self.method in self.gradient_based_methods + self.hessian_based_methods and not isinstance(gradient, str):
            compiled_grad_objectives = dict()
            if gradient is None:
                gradient = {assign_variable(k): grad(objective=objective, variable=k) for k in active_angles.keys()}
            else:
                gradient = {assign_variable(k): v for k, v in gradient.items()}

            grad_exval = []
            for k in active_angles.keys():
                if k not in gradient:
                    raise Exception("No gradient for variable {}".format(k))
                grad_exval.append(gradient[k].count_expectationvalues())
                compiled_grad_objectives[k] = compile(objective=gradient[k], variables=initial_values,
                                                      samples=samples, noise=noise, backend=backend, *args, **kwargs)

            if qng:
                combos = get_qng_combos(objective, samples=samples, backend=backend,
                                        noise=noise, initial_values=initial_values)

                dE = _QngContainer(combos=combos,
                                   param_keys=param_keys,
                                   samples=samples,
                                   passive_angles=passive_angles,
                                   save_history=self.save_history,
                                   silent=self.silent,
                                   backend_options=backend_options)
            else:

                dE = _GradContainer(objective=compiled_grad_objectives,
                                    param_keys=param_keys,
                                    samples=samples,
                                    passive_angles=passive_angles,
                                    save_history=self.save_history,
                                    silent=self.silent,
                                    backend_options=backend_options)

                infostring += "Gradients: {} expectationvalues (min={}, max={})\n".format(sum(grad_exval),
                                                                                          min(grad_exval),
                                                                                          max(grad_exval))
        else:
            # use numerical gradient
            dE = gradient
            infostring += "Gradients: {}\n".format(gradient)

        # compile hessian

        if self.method in self.hessian_based_methods and not isinstance(hessian, str):

            if isinstance(gradient, str):
                raise TequilaScipyException("Can not use numerical gradients for Hessian based methods")
            if qng is True:
                raise TequilaScipyException('Quantum Natural Hessian not yet well-defined, sorry!')
            compiled_hess_objectives = dict()
            hess_exval = []
            for i, k in enumerate(active_angles.keys()):
                for j, l in enumerate(active_angles.keys()):
                    if j > i: continue
                    hess = grad(gradient[k], l)
                    compiled_hess = compile(objective=hess, variables=initial_values, samples=samples,
                                            noise=noise,
                                            backend=backend, *args, **kwargs)
                    compiled_hess_objectives[(k, l)] = compiled_hess
                    compiled_hess_objectives[(l, k)] = compiled_hess
                    hess_exval.append(compiled_hess.count_expectationvalues())

            ddE = _HessContainer(objective=compiled_hess_objectives,
                                 param_keys=param_keys,
                                 samples=samples,
                                 passive_angles=passive_angles,
                                 save_history=self.save_history,
                                 silent=self.silent)

            infostring += "Hessian: {} expectationvalues (min={}, max={})\n".format(sum(hess_exval), min(hess_exval),
                                                                                    max(hess_exval))

        else:
            infostring += "Hessian: {}\n".format(hessian)
            if self.method != "TRUST-CONSTR" and hessian is not None:
                raise TequilaScipyException("numerical hessians only for trust-constr method")
            ddE = hessian

        if not self.silent:
            print("ObjectiveType is {}".format(type(compiled_objective)))
            print(infostring)
            print("backend: {}".format(compiled_objective.backend))
            print("samples: {}".format(samples))
            print("{} active variables".format(len(active_angles)))

        # get the number of real scipy iterations for better histories
        real_iterations = []

        Es = []
        callback = lambda x, *args: real_iterations.append(len(E.history) - 1)
        res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
                                      args=(Es,),
                                      method=self.method, tol=self.tol,
                                      bounds=bounds,
                                      constraints=self.method_constraints,
                                      options=self.method_options,
                                      callback=callback)

        # failsafe since callback is not implemented everywhere
        if len(real_iterations) == 0:
            real_iterations = range(len(E.history))
        else:
            real_iterations = [0] + real_iterations
        if self.save_history:
            self.history.energies = [E.history[i] for i in real_iterations]
            self.history.energy_evaluations = E.history
            self.history.angles = [E.history_angles[i] for i in real_iterations]
            self.history.angles_evaluations = E.history_angles
            if dE is not None and not isinstance(dE, str):
                # can currently only save gradients if explicitly evaluated
                # and will fail for hessian based approaches
                # need better callback functions
                try:
                    if self.method not in self.hessian_based_methods:
                        self.history.gradients = [dE.history[i] for i in real_iterations]
                except:
                    print("WARNING: History could not assign the stored gradients")
                self.history.gradients_evaluations = dE.history
            if ddE is not None and not isinstance(ddE, str):
                # hessians are not evaluated in the same frequencies as energies
                # therefore we can not store the "real" iterations currently
                self.history.hessians_evaluations = ddE.history

        E_final = res.fun
        angles_final = dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
        angles_final = {**angles_final, **passive_angles}

        return SciPyReturnType(energy=E_final, angles=format_variable_dictionary(angles_final), history=self.history,
                               scipy_output=res)
Ejemplo n.º 18
0
    def compile_hessian(self,
                        variables: typing.List[Variable],
                        grad_obj: typing.Dict[Variable, Objective],
                        comp_grad_obj: typing.Dict[Variable, Objective],
                        hessian: dict = None,
                        *args,
                        **kwargs) -> tuple:
        """
        convenience function to compile hessians for optimizers which require it.
        Parameters
        ----------
        variables:
            the variables of the hessian.
        grad_obj:
            the gradient object, to be differentiated once more
        comp_grad_obj:
            the compiled gradient object, used for further compilation of the hessian.
        hessian: optional:
            extra information to modulate compilation of the hessian.
        args
        kwargs

        Returns
        -------
        tuple:
            uncompiled and compiled hessian objects, in that order
        """
        dO = grad_obj
        cdO = comp_grad_obj

        if hessian is None:
            if dO is None:
                raise TequilaOptimizerException(
                    "Can not combine analytical Hessian with numerical Gradient\n"
                    "hessian instruction was: {}".format(hessian))

            compiled_hessian = {}
            ddO = {}
            for k in variables:
                dOk = dO[k]
                for l in variables:
                    ddO[(k, l)] = grad(objective=dOk, variable=l)
                    compiled_hessian[(k, l)] = self.compile_objective(ddO[(k,
                                                                           l)])
                    ddO[(l, k)] = ddO[(k, l)]
                    compiled_hessian[(l, k)] = compiled_hessian[(k, l)]

        elif isinstance(hessian, dict):
            if all([isinstance(x, Objective) for x in hessian.values()]):
                ddO = hessian
                compiled_hessian = {
                    k: self.compile_objective(objective=ddO[k],
                                              *args,
                                              **kwargs)
                    for k in hessian.keys()
                }
            else:
                ddO = None
                compiled_hessian = {}
                for k in variables:
                    for l in variables:
                        compiled_hessian[(k, l)] = _NumGrad(objective=cdO[k],
                                                            variable=l,
                                                            **hessian)
                        compiled_hessian[(l, k)] = _NumGrad(objective=cdO[l],
                                                            variable=k,
                                                            **hessian)
        else:
            raise TequilaOptimizerException(
                "unknown hessian instruction: {}".format(hessian))

        return ddO, compiled_hessian