Esempio n. 1
0
    def minimize(self,
                 initial_state,
                 method='Powell',
                 options=None,
                 compile=False,
                 processes=None):
        """Search for parameters which minimizes the hamiltonian expectation.

        Args:
            initial_state (array): a initial guess for the parameters of the
                variational circuit.
            method (str): the desired minimization method.
                See :meth:`qibo.optimizers.optimize` for available optimization
                methods.
            options (dict): a dictionary with options for the different optimizers.
            compile (bool): whether the TensorFlow graph should be compiled.
            processes (int): number of processes when using the paralle BFGS method.

        Return:
            The final expectation value.
            The corresponding best parameters.
        """
        def _loss(params, circuit, hamiltonian):
            circuit.set_parameters(params)
            final_state = circuit()
            return hamiltonian.expectation(final_state)

        if compile:
            if get_backend() == "custom":
                raise_error(
                    RuntimeError,
                    "Cannot compile VQE that uses custom operators. "
                    "Set the compile flag to False.")
            from qibo import K
            loss = K.function(_loss)

        if method == 'sgd':
            # check if gates are using the MatmulEinsum backend
            from qibo.tensorflow.gates import TensorflowGate
            for gate in self.circuit.queue:
                if not isinstance(gate, TensorflowGate):
                    raise_error(
                        RuntimeError, 'SGD VQE requires native Tensorflow '
                        'gates because gradients are not '
                        'supported in the custom kernels.')
            loss = _loss
        else:
            loss = lambda p, c, h: _loss(p, c, h).numpy()
        result, parameters = self.optimizers.optimize(loss,
                                                      initial_state,
                                                      args=(self.circuit,
                                                            self.hamiltonian),
                                                      method=method,
                                                      options=options,
                                                      compile=compile,
                                                      processes=processes)
        self.circuit.set_parameters(parameters)
        return result, parameters
Esempio n. 2
0
def sgd(loss, initial_parameters, options=None, compile=False):
    """Stochastic Gradient Descent (SGD) optimizer using Tensorflow backpropagation.

    See `tf.keras.Optimizers <https://www.tensorflow.org/api_docs/python/tf/keras/optimizers>`_
    for a list of the available optimizers.

    Args:
        loss (callable): Loss as a function of variational parameters to be
            optimized.
        initial_parameters (np.ndarray): Initial guess for the variational
            parameters.
        options (dict): Dictionary with options for the SGD optimizer. Supports
            the following keys:
              - ``'optimizer'`` (str, default: ``'Adagrad'``): Name of optimizer.
              - ``'learning_rate'`` (float, default: ``'1e-3'``): Learning rate.
              - ``'nepochs'`` (int, default: ``1e6``): Number of epochs for optimization.
              - ``'nmessage'`` (int, default: ``1e3``): Every how many epochs to print
                a message of the loss function.
    """
    from qibo import K
    from qibo.config import log
    sgd_options = {
        "nepochs": 1000000,
        "nmessage": 1000,
        "optimizer": "Adagrad",
        "learning_rate": 0.001
    }
    if options is not None:
        sgd_options.update(options)

    # proceed with the training
    vparams = K.Variable(initial_parameters)
    optimizer = getattr(
        K.optimizers,
        sgd_options["optimizer"])(learning_rate=sgd_options["learning_rate"])

    def opt_step():
        with K.GradientTape() as tape:
            l = loss(vparams)
        grads = tape.gradient(l, [vparams])
        optimizer.apply_gradients(zip(grads, [vparams]))
        return l

    if compile:
        opt_step = K.function(opt_step)

    for e in range(sgd_options["nepochs"]):
        l = opt_step()
        if e % sgd_options["nmessage"] == 1:
            log.info('ite %d : loss %f', e, l.numpy())

    return loss(vparams).numpy(), vparams.numpy()
Esempio n. 3
0
    def minimize(self, initial_state, method='Powell', options=None, compile=True):
        """Search for parameters which minimizes the hamiltonian expectation.

        Args:
            initial_state (array): a initial guess for the parameters of the
                variational circuit.
            method (str): the desired minimization method.
                One of ``"cma"`` (genetic optimizer), ``"sgd"`` (gradient descent) or
                any of the methods supported by `scipy.optimize.minimize <https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html>`_.
            options (dict): a dictionary with options for the different optimizers.

        Return:
            The final expectation value.
            The corresponding best parameters.
        """
        def loss(params):
            self.circuit.set_parameters(params)
            final_state = self.circuit()
            return self.hamiltonian.expectation(final_state)

        if compile:
            if not self.circuit.using_tfgates:
                raise_error(RuntimeError, "Cannot compile VQE that uses custom operators. "
                                          "Set the compile flag to False.")
            from qibo import K
            loss = K.function(loss)

        if method == 'sgd':
            # check if gates are using the MatmulEinsum backend
            from qibo.tensorflow.gates import TensorflowGate
            for gate in self.circuit.queue:
                if not isinstance(gate, TensorflowGate):
                    raise_error(RuntimeError, 'SGD VQE requires native Tensorflow '
                                              'gates because gradients are not '
                                              'supported in the custom kernels.')

            result, parameters = self.optimizers.optimize(loss, initial_state,
                                                          "sgd", options,
                                                          compile)
        else:
            result, parameters = self.optimizers.optimize(
                lambda p: loss(p).numpy(), initial_state, method, options)

        self.circuit.set_parameters(parameters)
        return result, parameters
Esempio n. 4
0
    def minimize(self, method='BFGS', options=None, compile=True):
        loss = self.cost_function_fidelity

        if method == 'cma':
            # Genetic optimizer
            import cma
            r = cma.fmin2(lambda p: K.to_numpy(loss(p)), self.params, 2)
            result = r[1].result.fbest
            parameters = r[1].result.xbest

        elif method == 'sgd':
            circuit = self.circuit(self.training_set[0])
            for gate in circuit.queue:
                if not K.supports_gradients:
                    from qibo.config import raise_error
                    raise_error(
                        RuntimeError,
                        'Use tensorflow backend in order to compute gradients.'
                    )

            sgd_options = {
                "nepochs": 5001,
                "nmessage": 1000,
                "optimizer": "Adamax",
                "learning_rate": 0.5
            }
            if options is not None:
                sgd_options.update(options)

            # proceed with the training
            vparams = K.Variable(self.params)
            optimizer = getattr(K.optimizers, sgd_options["optimizer"])(
                learning_rate=sgd_options["learning_rate"])

            def opt_step():
                with K.GradientTape() as tape:
                    l = loss(vparams)
                grads = tape.gradient(l, [vparams])
                optimizer.apply_gradients(zip(grads, [vparams]))
                return l, vparams

            if compile:
                opt_step = K.function(opt_step)

            l_optimal, params_optimal = 10, self.params
            for e in range(sgd_options["nepochs"]):
                l, vparams = opt_step()
                if l < l_optimal:
                    l_optimal, params_optimal = l, vparams
                if e % sgd_options["nmessage"] == 0:
                    print('ite %d : loss %f' % (e, K.to_numpy(l)))

            result = K.to_numpy(self.cost_function(params_optimal))
            parameters = K.to_numpy(params_optimal)

        else:
            import numpy as np
            from scipy.optimize import minimize
            m = minimize(lambda p: K.to_numpy(loss(p)),
                         self.params,
                         method=method,
                         options=options)
            result = m.fun
            parameters = m.x

        return result, parameters
Esempio n. 5
0
def sgd(loss, initial_parameters, args=(), options=None, compile=False):
    """Stochastic Gradient Descent (SGD) optimizer using Tensorflow backpropagation.

    See `tf.keras.Optimizers <https://www.tensorflow.org/api_docs/python/tf/keras/optimizers>`_
    for a list of the available optimizers.

    Args:
        loss (callable): Loss as a function of variational parameters to be
            optimized.
        initial_parameters (np.ndarray): Initial guess for the variational
            parameters.
        args (tuple): optional arguments for the loss function.
        options (dict): Dictionary with options for the SGD optimizer. Supports
            the following keys:
              - ``'optimizer'`` (str, default: ``'Adagrad'``): Name of optimizer.
              - ``'learning_rate'`` (float, default: ``'1e-3'``): Learning rate.
              - ``'nepochs'`` (int, default: ``1e6``): Number of epochs for optimization.
              - ``'nmessage'`` (int, default: ``1e3``): Every how many epochs to print
                a message of the loss function.
    """
    # check if gates are using the MatmulEinsum backend
    from qibo.tensorflow.gates import TensorflowGate
    from qibo.tensorflow.circuit import TensorflowCircuit
    for argument in args:
        if isinstance(argument, TensorflowCircuit):
            circuit = argument
            for gate in circuit.queue:
                if not isinstance(gate, TensorflowGate):  # pragma: no cover
                    from qibo.config import raise_error
                    raise_error(
                        RuntimeError, 'SGD requires native Tensorflow '
                        'gates because gradients are not '
                        'supported in the custom kernels.')

    from qibo import K
    from qibo.config import log
    sgd_options = {
        "nepochs": 1000000,
        "nmessage": 1000,
        "optimizer": "Adagrad",
        "learning_rate": 0.001
    }
    if options is not None:
        sgd_options.update(options)

    # proceed with the training
    vparams = K.Variable(initial_parameters)
    optimizer = getattr(
        K.optimizers,
        sgd_options["optimizer"])(learning_rate=sgd_options["learning_rate"])

    def opt_step():
        with K.GradientTape() as tape:
            l = loss(vparams, *args)
        grads = tape.gradient(l, [vparams])
        optimizer.apply_gradients(zip(grads, [vparams]))
        return l

    if compile:
        opt_step = K.function(opt_step)

    for e in range(sgd_options["nepochs"]):
        l = opt_step()
        if e % sgd_options["nmessage"] == 1:
            log.info('ite %d : loss %f', e, l.numpy())

    return loss(vparams, *args).numpy(), vparams.numpy()