Пример #1
0
def qng_grad_gaussian(unitary, g, i, hamiltonian):
    '''
    qng function for getting the gradients of gaussian gates.
    THIS variant of the function does not seek out underlying gate parameters; it treats each variable 'as is'.
    This treatment is necessary for the QNG but is incorrect elsewhere.
    :param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
    :param g: a parametrized: the gate being differentiated
    :param i: Int: the position in unitary at which g appears
    :param variable: Variable or String: the variable with respect to which gate g is being differentiated
    :param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
        is contained within an ExpectationValue
    :return: a list of objectives; the gradient of the Exp. with respect to each of its (internal) parameters
    '''

    if not hasattr(g, "shift"):
        raise TequilaException("No shift found for gate {}".format(g))

    # neo_a and neo_b are the shifted versions of gate g needed to evaluate its gradient
    shift_a = g._parameter + numpy.pi / (4 * g.shift)
    shift_b = g._parameter - numpy.pi / (4 * g.shift)
    neo_a = copy.deepcopy(g)
    neo_a._parameter = shift_a
    neo_b = copy.deepcopy(g)
    neo_b._parameter = shift_b

    U1 = unitary.replace_gates(positions=[i], circuits=[neo_a])
    w1 = g.shift

    U2 = unitary.replace_gates(positions=[i], circuits=[neo_b])
    w2 = -g.shift

    Oplus = ExpectationValueImpl(U=U1, H=hamiltonian)
    Ominus = ExpectationValueImpl(U=U2, H=hamiltonian)
    dOinc = w1 * Objective(args=[Oplus]) + w2 * Objective(args=[Ominus])
    return dOinc
Пример #2
0
def __grad_gaussian(unitary, g, i, variable, hamiltonian):
    '''
    function for getting the gradients of gaussian gates. NOTE: you had better compile first.
    :param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
    :param g: a parametrized: the gate being differentiated
    :param i: Int: the position in unitary at which g appears
    :param variable: Variable or String: the variable with respect to which gate g is being differentiated
    :param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
        is contained within an ExpectationValue
    :return: an Objective, whose calculation yields the gradient of g w.r.t variable
    '''

    if not hasattr(g, "shift"):
        raise TequilaException("No shift found for gate {}".format(g))

    # neo_a and neo_b are the shifted versions of gate g needed to evaluate its gradient
    shift_a = g._parameter + np.pi / (4 * g.shift)
    shift_b = g._parameter - np.pi / (4 * g.shift)
    neo_a = copy.deepcopy(g)
    neo_a._parameter = shift_a
    neo_b = copy.deepcopy(g)
    neo_b._parameter = shift_b

    U1 = unitary.replace_gates(positions=[i], circuits=[neo_a])
    w1 = g.shift * __grad_inner(g.parameter, variable)

    U2 = unitary.replace_gates(positions=[i], circuits=[neo_b])
    w2 = -g.shift * __grad_inner(g.parameter, variable)

    Oplus = ExpectationValueImpl(U=U1, H=hamiltonian)
    Ominus = ExpectationValueImpl(U=U2, H=hamiltonian)
    dOinc = w1 * Objective(args=[Oplus]) + w2 * Objective(args=[Ominus])
    return dOinc
Пример #3
0
def __grad_vector_objective(objective: typing.Union[Objective,
                                                    VectorObjective],
                            variable: Variable):
    argsets = objective.argsets
    transformations = objective._transformations
    outputs = []
    for pos in range(len(objective)):
        args = argsets[pos]
        transformation = transformations[pos]
        dO = None

        processed_expectationvalues = {}
        for i, arg in enumerate(args):
            if __AUTOGRAD__BACKEND__ == "jax":
                df = jax.grad(transformation, argnums=i)
            elif __AUTOGRAD__BACKEND__ == "autograd":
                df = jax.grad(transformation, argnum=i)
            else:
                raise TequilaException(
                    "Can't differentiate without autograd or jax")

            # We can detect one simple case where the outer derivative is const=1
            if transformation is None or transformation == identity:
                outer = 1.0
            else:
                outer = Objective(args=args, transformation=df)

            if hasattr(arg, "U"):
                # save redundancies
                if arg in processed_expectationvalues:
                    inner = processed_expectationvalues[arg]
                else:
                    inner = __grad_inner(arg=arg, variable=variable)
                    processed_expectationvalues[arg] = inner
            else:
                # this means this inner derivative is purely variable dependent
                inner = __grad_inner(arg=arg, variable=variable)

            if inner == 0.0:
                # don't pile up zero expectationvalues
                continue

            if dO is None:
                dO = outer * inner
            else:
                dO = dO + outer * inner

        if dO is None:
            dO = Objective()
        outputs.append(dO)
    if len(outputs) == 1:
        return outputs[0]
    return outputs
Пример #4
0
def test_exotic_gradients(gradvar):
    # a and b will fail for autograd not with jax
    a = Variable('a')
    b = Variable('b')
    c = Variable('c')
    d = Variable('d')
    e = Variable('e')
    f = Variable('f')
    variables = {a: 2.0, b: 3.0, c: 4.0, d: 5.0, e: 6.0, f: 7.0}

    t = c * a**b + b / c - Objective(
        args=[c], transformation=np.cos) + f / (d * e) + a * Objective(
            args=[d], transformation=np.exp) / (f + b) + Objective(
                args=[e], transformation=np.tanh) + Objective(
                    args=[f], transformation=np.sinc)
    g = grad(t, gradvar)
    if gradvar == 'a':
        assert np.isclose(
            g(variables),
            c(variables) * b(variables) * (a(variables)**(b(variables) - 1.)) +
            np.exp(d(variables)) / (f(variables) + b(variables)))
    if gradvar == 'b':
        assert np.isclose(
            g(variables),
            (c(variables) * a(variables)**b(variables)) * np.log(a(variables))
            + 1. / c(variables) - a(variables) * np.exp(d(variables)) /
            (f(variables) + b(variables))**2.0)
    if gradvar == 'c':
        assert np.isclose(
            g(variables),
            a(variables)**b(variables) - b(variables) / c(variables)**2. +
            np.sin(c(variables)))
    if gradvar == 'd':
        assert np.isclose(
            g(variables),
            -f(variables) / (np.square(d(variables)) * e(variables)) +
            a(variables) * np.exp(d(variables)) /
            (f(variables) + b(variables)))
    if gradvar == 'e':
        assert np.isclose(
            g(variables), 2. / (1. + np.cosh(2 * e(variables))) -
            f(variables) / (d(variables) * e(variables)**2.))
    if gradvar == 'f':
        assert np.isclose(
            g(variables), 1. / (d(variables) * e(variables)) -
            a(variables) * np.exp(d(variables)) /
            (f(variables) + b(variables))**2. +
            np.cos(np.pi * f(variables)) / f(variables) -
            np.sin(np.pi * f(variables)) / (np.pi * f(variables)**2.))
Пример #5
0
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
    '''
    function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
    :param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
    :param g: a parametrized: the gate being differentiated
    :param i: Int: the position in unitary at which g appears
    :param variable: Variable or String: the variable with respect to which gate g is being differentiated
    :param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
        is contained within an ExpectationValue
    :return: an Objective, whose calculation yields the gradient of g w.r.t variable
    '''

    # possibility for overwride in custom gate construction
    if hasattr(g, "shifted_gates"):
        inner_grad = __grad_inner(g.parameter, variable)
        shifted = g.shifted_gates()
        dOinc = Objective()
        for x in shifted:
            w, g = x
            Ux = unitary.replace_gates(positions=[i], circuits=[g])
            wx = w * inner_grad
            Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
            dOinc += wx * Ex
        return dOinc

    if not hasattr(g, "eigenvalues_magnitude"):
        raise TequilaException(
            "No shift-rule found for gate {}. Neither shifted_gates nor eigenvalues_magnitude not defined"
            .format(g))

    # neo_a and neo_b are the shifted versions of gate g needed to evaluate its gradient
    shift_a = g._parameter + pi / (4 * g.eigenvalues_magnitude)
    shift_b = g._parameter - pi / (4 * g.eigenvalues_magnitude)
    neo_a = copy.deepcopy(g)
    neo_a._parameter = shift_a
    neo_b = copy.deepcopy(g)
    neo_b._parameter = shift_b

    U1 = unitary.replace_gates(positions=[i], circuits=[neo_a])
    w1 = g.eigenvalues_magnitude * __grad_inner(g.parameter, variable)

    U2 = unitary.replace_gates(positions=[i], circuits=[neo_b])
    w2 = -g.eigenvalues_magnitude * __grad_inner(g.parameter, variable)

    Oplus = ExpectationValueImpl(U=U1, H=hamiltonian)
    Ominus = ExpectationValueImpl(U=U2, H=hamiltonian)
    dOinc = w1 * Objective(args=[Oplus]) + w2 * Objective(args=[Ominus])
    return dOinc
Пример #6
0
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
    '''
    implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
    :param unitary: the unitary whose gradient should be obtained
    :param variables (list, dict, str): the variables with respect to which differentiation should be performed.
    :return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
    '''
    hamiltonian = E.H
    unitary = E.U
    assert (unitary.verify())

    # fast return if possible
    if variable not in unitary.extract_variables():
        return 0.0

    param_gates = unitary._parameter_map[variable]

    dO = Objective()
    for idx_g in param_gates:
        idx, g = idx_g
        # failsafe
        if g.is_controlled():
            raise TequilaException(
                "controlled gate in gradient: Compiler was not called. Gate is {}"
                .format(g))
        if not hasattr(g, "eigenvalues_magnitude"):
            raise TequilaException('No shift found for gate {}'.format(g))

        dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)

        dO += dOinc

    assert dO is not None
    return dO
Пример #7
0
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
    '''
    function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
    :param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
    :param g: a parametrized: the gate being differentiated
    :param i: Int: the position in unitary at which g appears
    :param variable: Variable or String: the variable with respect to which gate g is being differentiated
    :param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
        is contained within an ExpectationValue
    :return: an Objective, whose calculation yields the gradient of g w.r.t variable
    '''


    # possibility for overwride in custom gate construction
    if hasattr(g, "shifted_gates"):
        inner_grad=__grad_inner(g.parameter, variable)
        shifted = g.shifted_gates()
        dOinc = Objective()
        for x in shifted:
            w,g = x
            Ux = unitary.replace_gates(positions=[i], circuits=[g])
            wx = w*inner_grad
            Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
            dOinc += wx*Ex
        return dOinc
    else:
        raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
Пример #8
0
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
    '''
    implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
    :param unitary: the unitary whose gradient should be obtained
    :param variables (list, dict, str): the variables with respect to which differentiation should be performed.
    :return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
    '''

    hamiltonian = E.H
    unitary = E.U
    if not (unitary.verify()):
        raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))

    # fast return if possible
    if variable not in unitary.extract_variables():
        return 0.0

    param_gates = unitary._parameter_map[variable]

    dO = Objective()
    for idx_g in param_gates:
        idx, g = idx_g
        dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
        dO += dOinc

    assert dO is not None
    return dO
Пример #9
0
def qng_grad_gaussian(unitary, g, i, hamiltonian) -> Objective:
    """
    get the gradient of an expectationvalue of a unitary and a hamiltonian with respect to gaussian gate g.
    THIS variant of the function does not seek out underlying gate parameters; it treats each variable 'as is'.
    This treatment is necessary for the QNG but is incorrect elsewhere.

    Parameters
    ----------
    unitary: QCircuit:
        the QCircuit object containing the gate to be differentiated
    g: parametrized gate:
        the gate being differentiated
    i: int:
        the position in unitary at which g appears.
    hamiltonian: QubitHamiltonian:
        the hamiltonian with respect to which unitary is to be measured, in the case that unitary
        is contained within an ExpectationValue
    Returns
    -------
    Objective:
        the analytical gradient of  <U,H> w.r.t g=g(theta_g)
    """

    ### unlike grad_gaussian, this doesn't dig below, into a gate's underlying parametrization.
    ### In other words, if a gate is Rx(y), y=f(x), this gives you back d Rx / dy.

    if not hasattr(g, "shift"):
        raise TequilaException("No shift found for gate {}".format(g))

    # neo_a and neo_b are the shifted versions of gate g needed to evaluate its gradient
    shift_a = g._parameter + numpy.pi / (4 * g.shift)
    shift_b = g._parameter - numpy.pi / (4 * g.shift)
    neo_a = copy.deepcopy(g)
    neo_a._parameter = shift_a
    neo_b = copy.deepcopy(g)
    neo_b._parameter = shift_b

    U1 = unitary.replace_gates(positions=[i], circuits=[neo_a])
    w1 = g.shift

    U2 = unitary.replace_gates(positions=[i], circuits=[neo_b])
    w2 = -g.shift

    Oplus = ExpectationValueImpl(U=U1, H=hamiltonian)
    Ominus = ExpectationValueImpl(U=U2, H=hamiltonian)
    dOinc = w1 * Objective(args=[Oplus]) + w2 * Objective(args=[Ominus])
    return dOinc
Пример #10
0
def __grad_objective(objective: Objective, variable: Variable):
    args = objective.args
    transformation = objective.transformation
    dO = None

    processed_expectationvalues = {}
    for i, arg in enumerate(args):
        if __AUTOGRAD__BACKEND__ == "jax":
            df = jax.grad(transformation, argnums=i)
        elif __AUTOGRAD__BACKEND__ == "autograd":
            df = jax.grad(transformation, argnum=i)
        else:
            raise TequilaException(
                "Can't differentiate without autograd or jax")

        # We can detect one simple case where the outer derivative is const=1
        if objective.transformation is None:
            outer = 1.0
        else:
            outer = Objective(args=args, transformation=df)

        if hasattr(arg, "U"):
            # save redundancies
            if arg in processed_expectationvalues:
                inner = processed_expectationvalues[arg]
            else:
                inner = __grad_inner(arg=arg, variable=variable)
                processed_expectationvalues[arg] = inner
        else:
            # this means this inner derivative is purely variable dependent
            inner = __grad_inner(arg=arg, variable=variable)

        if inner == 0.0:
            # don't pile up zero expectationvalues
            continue

        if dO is None:
            dO = outer * inner
        else:
            dO = dO + outer * inner

    if dO is None:
        raise TequilaException("caught None in __grad_objective")
    return dO
Пример #11
0
def get_qng_combos(objective,
                   func=stokes_block,
                   initial_values=None,
                   samples=None,
                   backend=None,
                   device=None,
                   noise=None) -> typing.List[typing.Dict]:
    """
    get all the objects needed to evaluate the qng for some objective; return them in a list of dictionaries.

    Parameters
    ----------
    objective: Objective:
        the Objective whose qng is sought.
    func: callable: (Default = stokes_block):
        the function used to obtain the (blocks of) the qgt. Default uses stokes_block, defined above.
    initial_values: dict, optional:
        a dictionary indicating the intial parameters with which to compile all objectives appearing in the qng.
    samples: int, optional:
        the number of samples with which to compile all objectives appearing in the qng. Default: none.
    backend: str, optional:
        the backend with which to compile all objectives appearing in the qng. default: pick for you.
    device: optional:
        the device with which to compile all objectives appearing in the qng. Default: no device use or emulation.
    noise: str or NoiseModel, optional:
        the noise model with which to compile all objectives appearing in the qng. Default: no noise.

    Returns
    -------
    list of dicts:
        a list of dictionaries, each entry corresponding to the qng for 1 argument of objective, in the order
        of said objectives.

    """

    combos = []
    vars = objective.extract_variables()
    compiled = compile_multitarget(gate=objective)
    compiled = compile_trotterized_gate(gate=compiled)
    compiled = compile_h_power(gate=compiled)
    compiled = compile_power_gate(gate=compiled)
    compiled = compile_controlled_phase(gate=compiled)
    compiled = compile_controlled_rotation(gate=compiled)
    for i, arg in enumerate(compiled.args):
        if not isinstance(arg, ExpectationValueImpl):
            ### this is a variable, no QNG involved
            mat = QngMatrix([[[1]]])
            vec = CallableVector([__grad_inner(arg, arg)])
            mapping = {0: {v: __grad_inner(arg, v) for v in vars}}
        else:
            ### if the arg is an expectationvalue, we need to build some qngs and mappings!
            blocks = func(arg,
                          initial_values=initial_values,
                          samples=samples,
                          device=device,
                          backend=backend,
                          noise=noise)
            mat = QngMatrix(blocks)

            vec = subvector_procedure(arg,
                                      initial_values=initial_values,
                                      samples=samples,
                                      device=device,
                                      backend=backend,
                                      noise=noise)

            mapping = {}
            self_pars = get_self_pars(arg.U)
            for j, p in enumerate(self_pars):
                indict = {}
                for v in p.extract_variables():
                    gi = __grad_inner(p, v)
                    if isinstance(gi, Objective):
                        g = compile_objective(gi,
                                              variables=initial_values,
                                              samples=samples,
                                              device=device,
                                              backend=backend,
                                              noise=noise)
                    else:
                        g = gi
                    indict[v] = g
                mapping[j] = indict

        posarg = jax.grad(compiled.transformation, i)

        p = Objective(compiled.args, transformation=posarg)

        pos = compile_objective(p,
                                variables=initial_values,
                                samples=samples,
                                device=device,
                                backend=backend,
                                noise=noise)
        combos.append(qng_dict(arg, mat, vec, mapping, pos))
    return combos
Пример #12
0
def get_qng_combos(objective,
                   initial_values=None,
                   samples=None,
                   backend=None,
                   backend_options=None,
                   noise=None):
    combos = []
    vars = objective.extract_variables()
    compiled = compile_multitarget(gate=objective)
    compiled = compile_trotterized_gate(gate=compiled)
    compiled = compile_h_power(gate=compiled)
    compiled = compile_power_gate(gate=compiled)
    compiled = compile_controlled_phase(gate=compiled)
    compiled = compile_controlled_rotation(gate=compiled)
    for i, arg in enumerate(compiled.args):
        if not isinstance(arg, ExpectationValueImpl):
            ### this is a variable, no QNG involved
            mat = QngMatrix([[[1]]])
            vec = CallableVector([__grad_inner(arg, arg)])
            mapping = {0: {v: __grad_inner(arg, v) for v in vars}}
        else:
            ### if the arg is an expectationvalue, we need to build some qngs and mappings!
            blocks = qng_metric_tensor_blocks(arg,
                                              initial_values=initial_values,
                                              samples=samples,
                                              backend=backend,
                                              noise=noise,
                                              backend_options=backend_options)
            mat = QngMatrix(blocks)

            vec = subvector_procedure(arg,
                                      initial_values=initial_values,
                                      samples=samples,
                                      backend=backend,
                                      noise=noise,
                                      backend_options=backend_options)

            mapping = {}
            self_pars = get_self_pars(arg.U)
            for j, p in enumerate(self_pars):
                indict = {}
                for v in p.extract_variables():
                    gi = __grad_inner(p, v)
                    if isinstance(gi, Objective):
                        g = compile_objective(gi,
                                              variables=initial_values,
                                              samples=samples,
                                              backend=backend,
                                              noise=noise,
                                              backend_options=backend_options)
                    else:
                        g = gi
                    indict[v] = g
                mapping[j] = indict

        posarg = jax.grad(compiled.transformation, argnums=i)
        p = Objective(compiled.args, transformation=posarg)

        pos = compile_objective(p,
                                variables=initial_values,
                                samples=samples,
                                backend=backend,
                                noise=noise,
                                backend_options=backend_options)
        combos.append(qng_dict(arg, mat, vec, mapping, pos))
    return combos
Пример #13
0
def test_transform_update():
    a = Variable('a')
    b = Variable('a.')
    t = Objective(transformation=operator.add, args=[a, b])
    variables = {a: 8, b: 1, a: 9, "c": 17}
    assert np.isclose(float(t(variables)), 10.0)