예제 #1
0
 def _create_normalize_state(self, solver):
     if "rk" in solver:
         norm = Norm()
         log.info('Normalizing state during RK solution.')
         return lambda s: s / K.cast(norm(s), dtype=s.dtype)
     else:
         return lambda s: s
예제 #2
0
파일: evolution.py 프로젝트: hixio-mh/qibo
    def __init__(self,
                 hamiltonian,
                 dt,
                 solver="exp",
                 callbacks=[],
                 accelerators=None,
                 memory_device="/CPU:0"):
        if isinstance(hamiltonian, hamiltonians.HAMILTONIAN_TYPES):
            ham = hamiltonian
        else:
            ham = hamiltonian(0)
            if not isinstance(ham, hamiltonians.HAMILTONIAN_TYPES):
                raise TypeError("Hamiltonian type {} not understood."
                                "".format(type(ham)))
        self.nqubits = ham.nqubits
        if dt <= 0:
            raise_error(ValueError,
                        f"Time step dt should be positive but is {dt}.")
        self.dt = dt

        if (accelerators is not None
                and (not isinstance(ham, hamiltonians.TrotterHamiltonian)
                     or solver != "exp")):
            raise_error(
                NotImplementedError, "Distributed evolution is only "
                "implemented using the Trotter "
                "exponential solver.")
        if isinstance(ham, hamiltonians.TrotterHamiltonian):
            ham.circuit(dt, accelerators, memory_device)
        self.solver = solvers.factory[solver](self.dt, hamiltonian)

        self.callbacks = callbacks
        if "rk" in solver:
            norm = Norm()
            self.normalize_state = lambda s: s / K.cast(norm(s), dtype=s.dtype)
            log.info('Normalizing state during RK solution.')
        else:
            self.normalize_state = lambda s: s

        self.accelerators = accelerators

        def calculate_callbacks(state):
            for callback in self.callbacks:
                callback.append(callback(state))

        if accelerators is None:
            self._calculate_callbacks = calculate_callbacks
        else:

            def calculate_callbacks_distributed(state):
                with K.device(memory_device):
                    if not isinstance(state, (np.ndarray, K.Tensor)):
                        state = state.vector
                    calculate_callbacks(state)

            self._calculate_callbacks = calculate_callbacks_distributed
예제 #3
0
파일: qlassifier.py 프로젝트: tuliplan/qibo
 def opt_step():
     with K.GradientTape() as tape:
         l = loss(vparams)
     grads = tape.gradient(l, [vparams])
     optimizer.apply_gradients(zip(grads, [vparams]))
     return l, vparams
예제 #4
0
파일: qlassifier.py 프로젝트: tuliplan/qibo
    def minimize(self, method='BFGS', options=None, compile=True):
        loss = self.cost_function_fidelity

        if method == 'cma':
            # Genetic optimizer
            import cma
            r = cma.fmin2(lambda p: loss(p).numpy(), self.params, 2)
            result = r[1].result.fbest
            parameters = r[1].result.xbest

        elif method == 'sgd':
            from qibo.tensorflow.gates import TensorflowGate
            circuit = self.circuit(self.training_set[0])
            for gate in circuit.queue:
                if not isinstance(gate, TensorflowGate):
                    raise RuntimeError('SGD VQE requires native Tensorflow '
                                       'gates because gradients are not '
                                       'supported in the custom kernels.')

            sgd_options = {
                "nepochs": 5001,
                "nmessage": 1000,
                "optimizer": "Adamax",
                "learning_rate": 0.5
            }
            if options is not None:
                sgd_options.update(options)

            # proceed with the training
            from qibo.config import K
            vparams = K.Variable(self.params)
            optimizer = getattr(K.optimizers, sgd_options["optimizer"])(
                learning_rate=sgd_options["learning_rate"])

            def opt_step():
                with K.GradientTape() as tape:
                    l = loss(vparams)
                grads = tape.gradient(l, [vparams])
                optimizer.apply_gradients(zip(grads, [vparams]))
                return l, vparams

            if compile:
                opt_step = K.function(opt_step)

            l_optimal, params_optimal = 10, self.params
            for e in range(sgd_options["nepochs"]):
                l, vparams = opt_step()
                if l < l_optimal:
                    l_optimal, params_optimal = l, vparams
                if e % sgd_options["nmessage"] == 0:
                    print('ite %d : loss %f' % (e, l.numpy()))

            result = self.cost_function(params_optimal).numpy()
            parameters = params_optimal.numpy()

        else:
            import numpy as np
            from scipy.optimize import minimize
            m = minimize(lambda p: loss(p).numpy(),
                         self.params,
                         method=method,
                         options=options)
            result = m.fun
            parameters = m.x

        return result, parameters
예제 #5
0
 def calculate_callbacks_distributed(state):
     with K.device(memory_device):
         if not isinstance(state, (np.ndarray, K.Tensor)):
             state = state.vector
         calculate_callbacks(state)