コード例 #1
0
def sgd(loss, initial_parameters, options=None, compile=False):
    """Stochastic Gradient Descent (SGD) optimizer using Tensorflow backpropagation.

    See `tf.keras.Optimizers <https://www.tensorflow.org/api_docs/python/tf/keras/optimizers>`_
    for a list of the available optimizers.

    Args:
        loss (callable): Loss as a function of variational parameters to be
            optimized.
        initial_parameters (np.ndarray): Initial guess for the variational
            parameters.
        options (dict): Dictionary with options for the SGD optimizer. Supports
            the following keys:
              - ``'optimizer'`` (str, default: ``'Adagrad'``): Name of optimizer.
              - ``'learning_rate'`` (float, default: ``'1e-3'``): Learning rate.
              - ``'nepochs'`` (int, default: ``1e6``): Number of epochs for optimization.
              - ``'nmessage'`` (int, default: ``1e3``): Every how many epochs to print
                a message of the loss function.
    """
    from qibo import K
    from qibo.config import log
    sgd_options = {
        "nepochs": 1000000,
        "nmessage": 1000,
        "optimizer": "Adagrad",
        "learning_rate": 0.001
    }
    if options is not None:
        sgd_options.update(options)

    # proceed with the training
    vparams = K.Variable(initial_parameters)
    optimizer = getattr(
        K.optimizers,
        sgd_options["optimizer"])(learning_rate=sgd_options["learning_rate"])

    def opt_step():
        with K.GradientTape() as tape:
            l = loss(vparams)
        grads = tape.gradient(l, [vparams])
        optimizer.apply_gradients(zip(grads, [vparams]))
        return l

    if compile:
        opt_step = K.function(opt_step)

    for e in range(sgd_options["nepochs"]):
        l = opt_step()
        if e % sgd_options["nmessage"] == 1:
            log.info('ite %d : loss %f', e, l.numpy())

    return loss(vparams).numpy(), vparams.numpy()
コード例 #2
0
    def minimize(self, method='BFGS', options=None, compile=True):
        loss = self.cost_function_fidelity

        if method == 'cma':
            # Genetic optimizer
            import cma
            r = cma.fmin2(lambda p: K.to_numpy(loss(p)), self.params, 2)
            result = r[1].result.fbest
            parameters = r[1].result.xbest

        elif method == 'sgd':
            circuit = self.circuit(self.training_set[0])
            for gate in circuit.queue:
                if not K.supports_gradients:
                    from qibo.config import raise_error
                    raise_error(
                        RuntimeError,
                        'Use tensorflow backend in order to compute gradients.'
                    )

            sgd_options = {
                "nepochs": 5001,
                "nmessage": 1000,
                "optimizer": "Adamax",
                "learning_rate": 0.5
            }
            if options is not None:
                sgd_options.update(options)

            # proceed with the training
            vparams = K.Variable(self.params)
            optimizer = getattr(K.optimizers, sgd_options["optimizer"])(
                learning_rate=sgd_options["learning_rate"])

            def opt_step():
                with K.GradientTape() as tape:
                    l = loss(vparams)
                grads = tape.gradient(l, [vparams])
                optimizer.apply_gradients(zip(grads, [vparams]))
                return l, vparams

            if compile:
                opt_step = K.function(opt_step)

            l_optimal, params_optimal = 10, self.params
            for e in range(sgd_options["nepochs"]):
                l, vparams = opt_step()
                if l < l_optimal:
                    l_optimal, params_optimal = l, vparams
                if e % sgd_options["nmessage"] == 0:
                    print('ite %d : loss %f' % (e, K.to_numpy(l)))

            result = K.to_numpy(self.cost_function(params_optimal))
            parameters = K.to_numpy(params_optimal)

        else:
            import numpy as np
            from scipy.optimize import minimize
            m = minimize(lambda p: K.to_numpy(loss(p)),
                         self.params,
                         method=method,
                         options=options)
            result = m.fun
            parameters = m.x

        return result, parameters
コード例 #3
0
ファイル: optimizers.py プロジェクト: tuliplan/qibo
def sgd(loss, initial_parameters, args=(), options=None, compile=False):
    """Stochastic Gradient Descent (SGD) optimizer using Tensorflow backpropagation.

    See `tf.keras.Optimizers <https://www.tensorflow.org/api_docs/python/tf/keras/optimizers>`_
    for a list of the available optimizers.

    Args:
        loss (callable): Loss as a function of variational parameters to be
            optimized.
        initial_parameters (np.ndarray): Initial guess for the variational
            parameters.
        args (tuple): optional arguments for the loss function.
        options (dict): Dictionary with options for the SGD optimizer. Supports
            the following keys:
              - ``'optimizer'`` (str, default: ``'Adagrad'``): Name of optimizer.
              - ``'learning_rate'`` (float, default: ``'1e-3'``): Learning rate.
              - ``'nepochs'`` (int, default: ``1e6``): Number of epochs for optimization.
              - ``'nmessage'`` (int, default: ``1e3``): Every how many epochs to print
                a message of the loss function.
    """
    # check if gates are using the MatmulEinsum backend
    from qibo.tensorflow.gates import TensorflowGate
    from qibo.tensorflow.circuit import TensorflowCircuit
    for argument in args:
        if isinstance(argument, TensorflowCircuit):
            circuit = argument
            for gate in circuit.queue:
                if not isinstance(gate, TensorflowGate):  # pragma: no cover
                    from qibo.config import raise_error
                    raise_error(
                        RuntimeError, 'SGD requires native Tensorflow '
                        'gates because gradients are not '
                        'supported in the custom kernels.')

    from qibo import K
    from qibo.config import log
    sgd_options = {
        "nepochs": 1000000,
        "nmessage": 1000,
        "optimizer": "Adagrad",
        "learning_rate": 0.001
    }
    if options is not None:
        sgd_options.update(options)

    # proceed with the training
    vparams = K.Variable(initial_parameters)
    optimizer = getattr(
        K.optimizers,
        sgd_options["optimizer"])(learning_rate=sgd_options["learning_rate"])

    def opt_step():
        with K.GradientTape() as tape:
            l = loss(vparams, *args)
        grads = tape.gradient(l, [vparams])
        optimizer.apply_gradients(zip(grads, [vparams]))
        return l

    if compile:
        opt_step = K.function(opt_step)

    for e in range(sgd_options["nepochs"]):
        l = opt_step()
        if e % sgd_options["nmessage"] == 1:
            log.info('ite %d : loss %f', e, l.numpy())

    return loss(vparams, *args).numpy(), vparams.numpy()