Ejemplo n.º 1
0
    def optimizer(self, optimizer: Optional[Optimizer] = None) -> None:
        """
        Set optimizer.

        Args:
            optimizer (Optimizer): optimizer to use with the generator.

        Raises:
            QiskitMachineLearningError: invalid input.
        """
        if optimizer:
            if isinstance(optimizer, Optimizer):
                self._optimizer = optimizer
            else:
                raise QiskitMachineLearningError(
                    'Please provide an Optimizer object to use'
                    'as the generator optimizer.')
        else:
            self._optimizer = ADAM(maxiter=1,
                                   tol=1e-6,
                                   lr=1e-3,
                                   beta_1=0.7,
                                   beta_2=0.99,
                                   noise_factor=1e-6,
                                   eps=1e-6,
                                   amsgrad=True,
                                   snapshot_dir=self._snapshot_dir)
Ejemplo n.º 2
0
    def __init__(
        self,
        method: str,
        optimizer_kwargs: Optional[Dict] = None,
        recorder: RecorderFactory = _recorder,
    ):
        """
        Args:
            method: specifies optimizer to be used. Currently supports "ADAM", "AMSGRAD" and "SPSA".
            optimizer_kwargs: dictionary with additional optimizer_kwargs for the optimizer.
            recorder: recorder object which defines how to store the optimization history.

        """
        super().__init__(recorder=recorder)
        self.method = method
        if optimizer_kwargs is None:
            self.optimizer_kwargs = {}
        else:
            self.optimizer_kwargs = optimizer_kwargs

        if self.method == "SPSA":
            self.optimizer = SPSA(**self.optimizer_kwargs)
        elif self.method == "ADAM" or self.method == "AMSGRAD":
            if self.method == "AMSGRAD":
                self.optimizer_kwargs["amsgrad"] = True
            self.optimizer = ADAM(**self.optimizer_kwargs)
    def test_adam(self):
        """Test ADAM is serializable."""

        adam = ADAM(maxiter=100, amsgrad=True)
        settings = adam.settings

        self.assertEqual(settings["maxiter"], 100)
        self.assertTrue(settings["amsgrad"])
Ejemplo n.º 4
0
    def __init__(self, n_features: int = 1, n_out: int = 1) -> None:
        """
        Args:
            n_features: Dimension of input data vector.
            n_out: Dimension of the discriminator's output vector.
        """
        super().__init__()
        self._n_features = n_features
        self._n_out = n_out
        self._discriminator = DiscriminatorNet(self._n_features, self._n_out)
        self._optimizer = ADAM(maxiter=1,
                               tol=1e-6,
                               lr=1e-3,
                               beta_1=0.7,
                               beta_2=0.99,
                               noise_factor=1e-4,
                               eps=1e-6,
                               amsgrad=True)

        self._ret = {}  # type: Dict[str, Any]
 def test_adam(self):
     """ adam test """
     optimizer = ADAM(maxiter=10000, tol=1e-06)
     res = self._optimize(optimizer)
     self.assertLessEqual(res[2], 10000)
Ejemplo n.º 6
0
class NumPyDiscriminator(DiscriminativeNetwork):
    """
    Discriminator based on NumPy
    """
    def __init__(self, n_features: int = 1, n_out: int = 1) -> None:
        """
        Args:
            n_features: Dimension of input data vector.
            n_out: Dimension of the discriminator's output vector.
        """
        super().__init__()
        self._n_features = n_features
        self._n_out = n_out
        self._discriminator = DiscriminatorNet(self._n_features, self._n_out)
        self._optimizer = ADAM(maxiter=1,
                               tol=1e-6,
                               lr=1e-3,
                               beta_1=0.7,
                               beta_2=0.99,
                               noise_factor=1e-4,
                               eps=1e-6,
                               amsgrad=True)

        self._ret = {}  # type: Dict[str, Any]

    def set_seed(self, seed):
        """
        Set seed.
        Args:
            seed (int): seed
        """
        algorithm_globals.random_seed = seed

    def save_model(self, snapshot_dir):
        """
        Save discriminator model

        Args:
            snapshot_dir (str): directory path for saving the model
        """
        # save self._discriminator.params_values
        np.save(
            os.path.join(snapshot_dir, 'np_discriminator_architecture.csv'),
            self._discriminator.architecture)
        np.save(os.path.join(snapshot_dir, 'np_discriminator_memory.csv'),
                self._discriminator.memory)
        np.save(os.path.join(snapshot_dir, 'np_discriminator_params.csv'),
                self._discriminator.parameters)
        self._optimizer.save_params(snapshot_dir)

    def load_model(self, load_dir):
        """
        Load discriminator model

        Args:
            load_dir (str): file with stored pytorch discriminator model to be loaded
        """
        self._discriminator.architecture = \
            np.load(os.path.join(load_dir, 'np_discriminator_architecture.csv'))
        self._discriminator.memory = np.load(
            os.path.join(load_dir, 'np_discriminator_memory.csv'))
        self._discriminator.parameters = np.load(
            os.path.join(load_dir, 'np_discriminator_params.csv'))
        self._optimizer.load_params(load_dir)

    @property
    def discriminator_net(self):
        """
        Get discriminator

        Returns:
            DiscriminatorNet: discriminator object
        """
        return self._discriminator

    @discriminator_net.setter
    def discriminator_net(self, net):
        self._discriminator = net

    def get_label(self, x, detach=False):  # pylint: disable=arguments-differ,unused-argument
        """
        Get data sample labels, i.e. true or fake.

        Args:
            x (numpy.ndarray): Discriminator input, i.e. data sample.
            detach (bool): depreciated for numpy network

        Returns:
            numpy.ndarray: Discriminator output, i.e. data label
        """

        return self._discriminator.forward(x)

    def loss(self, x, y, weights=None):
        """
        Loss function

        Args:
            x (numpy.ndarray): sample label (equivalent to discriminator output)
            y (numpy.ndarray): target label
            weights(numpy.ndarray): customized scaling for each sample (optional)

        Returns:
            float: loss function
       """
        if weights is not None:
            # Use weights as scaling factors for the samples and compute the sum
            return (-1) * np.dot(
                np.multiply(
                    y, np.log(np.maximum(np.ones(np.shape(x)) * 1e-4, x))) +
                np.multiply(
                    np.ones(np.shape(y)) - y,
                    np.log(
                        np.maximum(
                            np.ones(np.shape(x)) * 1e-4,
                            np.ones(np.shape(x)) - x))), weights)
        else:
            # Compute the mean
            return (-1) * np.mean(
                np.multiply(
                    y, np.log(np.maximum(np.ones(np.shape(x)) * 1e-4, x))) +
                np.multiply(
                    np.ones(np.shape(y)) - y,
                    np.log(
                        np.maximum(
                            np.ones(np.shape(x)) * 1e-4,
                            np.ones(np.shape(x)) - x))))

    def _get_objective_function(self, data, weights):
        """
        Get the objective function

        Args:
            data (tuple): training and generated data
            weights (numpy.ndarray): weights corresponding to training resp. generated data

        Returns:
            objective_function: objective function for the optimization
        """
        real_batch = data[0]
        real_prob = weights[0]
        generated_batch = data[1]
        generated_prob = weights[1]

        def objective_function(params):
            self._discriminator.parameters = params
            # Train on Real Data
            prediction_real = self.get_label(real_batch)
            loss_real = self.loss(prediction_real,
                                  np.ones(np.shape(prediction_real)),
                                  real_prob)
            prediction_fake = self.get_label(generated_batch)
            loss_fake = self.loss(prediction_fake,
                                  np.zeros(np.shape(prediction_fake)),
                                  generated_prob)
            return 0.5 * (loss_real[0] + loss_fake[0])

        return objective_function

    def _get_gradient_function(self, data, weights):
        """
        Get the gradient function

        Args:
            data (tuple): training and generated data
            weights (numpy.ndarray): weights corresponding to training resp. generated data

        Returns:
            gradient_function: Gradient function for the optimization
        """
        real_batch = data[0]
        real_prob = weights[0]
        generated_batch = data[1]
        generated_prob = weights[1]

        def gradient_function(params):
            self._discriminator.parameters = params
            prediction_real = self.get_label(real_batch)
            grad_real = self._discriminator.backward(
                prediction_real, np.ones(np.shape(prediction_real)), real_prob)
            prediction_generated = self.get_label(generated_batch)
            grad_generated = self._discriminator.backward(
                prediction_generated, np.zeros(np.shape(prediction_generated)),
                generated_prob)
            return np.add(grad_real, grad_generated)

        return gradient_function

    def train(self,
              data,
              weights,
              penalty=False,
              quantum_instance=None,
              shots=None) -> Dict[str, Any]:
        """
        Perform one training step w.r.t to the discriminator's parameters

        Args:
            data (tuple(numpy.ndarray, numpy.ndarray)):
                real_batch: array, Training data batch.
                generated_batch: array, Generated data batch.
            weights (tuple):real problem, generated problem
            penalty (bool): Depreciated for classical networks.
            quantum_instance (QuantumInstance): Depreciated for classical networks.
            shots (int): Number of shots for hardware or qasm execution.
                Ignored for classical networks.

        Returns:
            dict: with Discriminator loss and updated parameters.
        """

        # Train on Generated Data
        # Force single optimization iteration
        self._optimizer._maxiter = 1
        self._optimizer._t = 0
        objective = self._get_objective_function(data, weights)
        gradient = self._get_gradient_function(data, weights)
        self._discriminator.parameters, loss, _ = \
            self._optimizer.optimize(num_vars=len(self._discriminator.parameters),
                                     objective_function=objective,
                                     initial_point=np.array(self._discriminator.parameters),
                                     gradient_function=gradient)

        self._ret['loss'] = loss
        self._ret['params'] = self._discriminator.parameters

        return self._ret
Ejemplo n.º 7
0
class QuantumGenerator(GenerativeNetwork):
    """Quantum Generator.

    The quantum generator is a parametrized quantum circuit which can be trained with the
    :class:`~qiskit_machine_learning.algorithms.QGAN` algorithm
    to generate a quantum state which approximates the probability
    distribution of given training data. At the beginning of the training the parameters will
    be set randomly, thus, the output will is random. Throughout the training the quantum
    generator learns to represent the target distribution.
    Eventually, the trained generator can be used for state preparation e.g. in QAE.
    """
    def __init__(self,
                 bounds: np.ndarray,
                 num_qubits: Union[List[int], np.ndarray],
                 generator_circuit: Optional[QuantumCircuit] = None,
                 init_params: Optional[Union[List[float], np.ndarray]] = None,
                 optimizer: Optional[Optimizer] = None,
                 gradient_function: Optional[Union[Callable, Gradient]] = None,
                 snapshot_dir: Optional[str] = None) -> None:
        """
        Args:
            bounds: k min/max data values [[min_1,max_1],...,[min_k,max_k]],
                given input data dim k
            num_qubits: k numbers of qubits to determine representation resolution,
                i.e. n qubits enable the representation of 2**n values [n_1,..., n_k]
            generator_circuit: a QuantumCircuit implementing the generator.
            init_params: 1D numpy array or list, Initialization for
                the generator's parameters.
            optimizer: optimizer to be used for the training of the generator
            gradient_function: A Gradient object, or a function returning partial
                derivatives of the loss function w.r.t. the generator variational
                params.
            snapshot_dir: str or None, if not None save the optimizer's parameter after every
                update step to the given directory

        Raises:
            QiskitMachineLearningError: Set multivariate variational distribution
                                        to represent multivariate data
        """
        super().__init__()
        self._bounds = bounds
        self._num_qubits = num_qubits
        self.generator_circuit = generator_circuit
        if generator_circuit is None:
            circuit = QuantumCircuit(sum(num_qubits))
            circuit.h(circuit.qubits)
            var_form = TwoLocal(sum(num_qubits),
                                'ry',
                                'cz',
                                reps=1,
                                entanglement='circular')
            circuit.compose(var_form, inplace=True)

            # Set generator circuit
            self.generator_circuit = circuit

        self._free_parameters = sorted(self.generator_circuit.parameters,
                                       key=lambda p: p.name)

        if init_params is None:
            init_params = \
                algorithm_globals.random.random(self.generator_circuit.num_parameters) * 2e-2

        self._bound_parameters = init_params

        # Set optimizer for updating the generator network
        self._snapshot_dir = snapshot_dir
        self.optimizer = optimizer

        self._gradient_function = gradient_function

        if np.ndim(self._bounds) == 1:
            bounds = np.reshape(self._bounds, (1, len(self._bounds)))
        else:
            bounds = self._bounds
        for j, prec in enumerate(self._num_qubits):
            # prepare data grid for dim j
            grid = np.linspace(bounds[j, 0], bounds[j, 1], (2**prec))
            if j == 0:
                if len(self._num_qubits) > 1:
                    self._data_grid = [grid]
                else:
                    self._data_grid = grid  # type: ignore
                self._grid_elements = grid
            elif j == 1:
                self._data_grid.append(grid)
                temp = []
                for g_e in self._grid_elements:
                    for g in grid:
                        temp0 = [g_e]
                        temp0.append(g)
                        temp.append(temp0)
                self._grid_elements = temp  # type: ignore
            else:
                self._data_grid.append(grid)
                temp = []
                for g_e in self._grid_elements:
                    for g in grid:
                        temp0 = deepcopy(g_e)
                        temp0.append(g)
                        temp.append(temp0)
                self._grid_elements = deepcopy(temp)  # type: ignore
        self._data_grid = np.array(self._data_grid,
                                   dtype=object)  # type: ignore

        self._seed = 7
        self._shots = None
        self._discriminator: Optional[DiscriminativeNetwork] = None
        self._ret: Dict[str, Any] = {}

    @property
    def parameter_values(self) -> Union[List, np.ndarray]:
        """
        Get parameter values from the quantum generator

        Returns:
            Current parameter values
        """
        return self._bound_parameters

    @parameter_values.setter
    def parameter_values(self, p_values: Union[List, np.ndarray]) -> None:
        """
        Set parameter values for the quantum generator

        Args:
            p_values: Parameter values
        """
        self._bound_parameters = p_values

    @property
    def seed(self) -> int:
        """
        Get seed.
        """
        return self._seed

    @seed.setter
    def seed(self, seed: int) -> None:
        """
        Set seed.

        Args:
            seed (int): seed to use.
        """
        self._seed = seed
        algorithm_globals.random_seed = seed

    @property
    def discriminator(self) -> DiscriminativeNetwork:
        """
        Get discriminator.
        """
        return self._discriminator

    @discriminator.setter
    def discriminator(self, discriminator: DiscriminativeNetwork) -> None:
        """
        Set discriminator.

        Args:
            discriminator (DiscriminativeNetwork): Discriminator used to
                compute the loss function.
        """
        self._discriminator = discriminator

    @property
    def optimizer(self) -> Optimizer:
        """
        Get optimizer.
        """
        return self._optimizer

    @optimizer.setter
    def optimizer(self, optimizer: Optional[Optimizer] = None) -> None:
        """
        Set optimizer.

        Args:
            optimizer (Optimizer): optimizer to use with the generator.

        Raises:
            QiskitMachineLearningError: invalid input.
        """
        if optimizer:
            if isinstance(optimizer, Optimizer):
                self._optimizer = optimizer
            else:
                raise QiskitMachineLearningError(
                    'Please provide an Optimizer object to use'
                    'as the generator optimizer.')
        else:
            self._optimizer = ADAM(maxiter=1,
                                   tol=1e-6,
                                   lr=1e-3,
                                   beta_1=0.7,
                                   beta_2=0.99,
                                   noise_factor=1e-6,
                                   eps=1e-6,
                                   amsgrad=True,
                                   snapshot_dir=self._snapshot_dir)

    def construct_circuit(self, params=None):
        """
        Construct generator circuit.

        Args:
            params (list | dict): parameters which should be used to run the generator.

        Returns:
            Instruction: construct the quantum circuit and return as gate
        """
        if params is None:
            return self.generator_circuit

        if isinstance(params, (list, np.ndarray)):
            params = dict(zip(self._free_parameters, params))

        return self.generator_circuit.assign_parameters(params)
        #     self.generator_circuit.build(qc=qc, q=q)
        # else:
        #     generator_circuit_copy = deepcopy(self.generator_circuit)
        #     generator_circuit_copy.params = params
        #     generator_circuit_copy.build(qc=qc, q=q)

        # # return qc.copy(name='qc')
        # return qc.to_instruction()

    def get_output(self,
                   quantum_instance: QuantumInstance,
                   params: Optional[np.ndarray] = None,
                   shots: Optional[int] = None) -> Tuple[List, List]:
        """
        Get classical data samples from the generator.
        Running the quantum generator circuit results in a quantum state.
        To train this generator with a classical discriminator, we need to sample classical outputs
        by measuring the quantum state and mapping them to feature space defined by the training
        data.

        Args:
            quantum_instance: Quantum Instance, used to run the generator
                circuit.
            params: array or None, parameters which should
                be used to run the generator, if None use self._params
            shots: if not None use a number of shots that is different from the
                number set in quantum_instance

        Returns:
            generated samples, array: sample occurrence in percentage
        """
        instance_shots = quantum_instance.run_config.shots
        q = QuantumRegister(sum(self._num_qubits), name='q')
        qc = QuantumCircuit(q)
        if params is None:
            params = cast(np.ndarray, self._bound_parameters)
        qc.append(self.construct_circuit(params), q)
        if quantum_instance.is_statevector:
            pass
        else:
            c = ClassicalRegister(sum(self._num_qubits), name='c')
            qc.add_register(c)
            qc.measure(q, c)

        if shots is not None:
            quantum_instance.set_config(shots=shots)

        result = quantum_instance.execute(qc)

        generated_samples = []
        if quantum_instance.is_statevector:
            result = result.get_statevector(qc)
            values = np.multiply(result, np.conj(result))
            values = list(values.real)
            keys = []
            for j in range(len(values)):
                keys.append(np.binary_repr(j, int(sum(self._num_qubits))))
        else:
            result = result.get_counts(qc)
            keys = list(result)
            values = list(result.values())
            values = [float(v) / np.sum(values) for v in values]
        generated_samples_weights = values
        for i, _ in enumerate(keys):
            index = 0
            temp = []
            for k, p in enumerate(self._num_qubits):
                bin_rep = 0
                j = 0
                while j < p:
                    bin_rep += int(keys[i][index]) * 2**(int(p) - j - 1)
                    j += 1
                    index += 1
                if len(self._num_qubits) > 1:
                    temp.append(self._data_grid[k][int(bin_rep)])
                else:
                    temp.append(self._data_grid[int(bin_rep)])
            generated_samples.append(temp)

        # self.generator_circuit._probabilities = generated_samples_weights
        if shots is not None:
            # Restore the initial quantum_instance configuration
            quantum_instance.set_config(shots=instance_shots)
        return generated_samples, generated_samples_weights

    def loss(self, x, weights):  # pylint: disable=arguments-differ
        """
        Loss function for training the generator's parameters.

        Args:
            x (numpy.ndarray): sample label (equivalent to discriminator output)
            weights (numpy.ndarray): probability for measuring the sample

        Returns:
            float: loss function
        """
        try:
            # pylint: disable=no-member
            loss = (-1) * np.dot(np.log(x).transpose(), weights)
        except Exception:  # pylint: disable=broad-except
            loss = (-1) * np.dot(np.log(x), weights)
        return loss.flatten()

    def _get_objective_function(self, quantum_instance, discriminator):
        """
        Get objective function

        Args:
            quantum_instance (QuantumInstance): used to run the quantum circuit.
            discriminator (torch.nn.Module): discriminator network to compute the sample labels.

        Returns:
            objective_function: objective function for quantum generator optimization
        """
        def objective_function(params):
            """
            Objective function

            Args:
                params (numpy.ndarray): generator parameters

            Returns:
                self.loss: loss function
            """
            generated_data, generated_prob = self.get_output(quantum_instance,
                                                             params=params,
                                                             shots=self._shots)
            prediction_generated = discriminator.get_label(generated_data,
                                                           detach=True)
            return self.loss(prediction_generated, generated_prob)

        return objective_function

    def _convert_to_gradient_function(self, gradient_object, quantum_instance,
                                      discriminator):
        """
        Convert to gradient function

        Args:
            gradient_object (Gradient): the gradient object to be used to
                compute analytical gradients.
            quantum_instance (QuantumInstance): used to run the quantum circuit.
            discriminator (torch.nn.Module): discriminator network to compute the sample labels.

        Returns:
            gradient_function: gradient function that takes the current
                parameter values and returns partial derivatives of the loss
                function w.r.t. the variational parameters.
        """
        def gradient_function(current_point):
            """
            Gradient function

            Args:
                current_point (np.ndarray): Current values for the variational parameters.

            Returns:
                np.ndarray: array of partial derivatives of the loss
                    function w.r.t. the variational parameters.
            """
            free_params = self._free_parameters
            generated_data, _ = self.get_output(quantum_instance,
                                                params=current_point,
                                                shots=self._shots)
            prediction_generated = discriminator.get_label(generated_data,
                                                           detach=True)
            op = ~CircuitStateFn(primitive=self.generator_circuit)
            grad_object = gradient_object.convert(operator=op,
                                                  params=free_params)
            value_dict = {
                free_params[i]: current_point[i]
                for i in range(len(free_params))
            }
            analytical_gradients = np.array(
                grad_object.assign_parameters(value_dict).eval())
            loss_gradients = self.loss(prediction_generated,
                                       analytical_gradients).real
            return loss_gradients

        return gradient_function

    def train(self, quantum_instance=None, shots=None):
        """
        Perform one training step w.r.t to the generator's parameters

        Args:
            quantum_instance (QuantumInstance): used to run the generator circuit.
            shots (int): Number of shots for hardware or qasm execution.

        Returns:
            dict: generator loss(float) and updated parameters (array).
        """
        self._shots = shots

        # TODO Improve access to maxiter, say via options getter, to avoid private member access
        # and since not all optimizers have that exact naming figure something better as well to
        # allow the checking below to not have to warn if it has something else and max iterations
        # is truly 1 anyway.
        try:
            if self._optimizer._maxiter != 1:
                warnings.warn(
                    'Please set the the optimizer maxiter argument to 1 '
                    'to ensure that the generator '
                    'and discriminator are updated in an alternating fashion.')
        except AttributeError:
            maxiter = self._optimizer._options.get('maxiter')
            if maxiter is not None and maxiter != 1:
                warnings.warn(
                    'Please set the the optimizer maxiter argument to 1 '
                    'to ensure that the generator '
                    'and discriminator are updated in an alternating fashion.')
            elif maxiter is None:
                warnings.warn(
                    'Please ensure the optimizer max iterations are set to 1 '
                    'to ensure that the generator '
                    'and discriminator are updated in an alternating fashion.')

        if isinstance(self._gradient_function, Gradient):
            self._gradient_function = self._convert_to_gradient_function(
                self._gradient_function, quantum_instance, self._discriminator)

        objective = self._get_objective_function(quantum_instance,
                                                 self._discriminator)
        self._bound_parameters, loss, _ = self._optimizer.optimize(
            num_vars=len(self._bound_parameters),
            objective_function=objective,
            initial_point=self._bound_parameters,
            gradient_function=self._gradient_function)

        self._ret['loss'] = loss
        self._ret['params'] = self._bound_parameters

        return self._ret
Ejemplo n.º 8
0
class QiskitOptimizer(Optimizer):
    def __init__(
        self,
        method: str,
        optimizer_kwargs: Optional[Dict] = None,
        recorder: RecorderFactory = _recorder,
    ):
        """
        Args:
            method: specifies optimizer to be used. Currently supports "ADAM", "AMSGRAD" and "SPSA".
            optimizer_kwargs: dictionary with additional optimizer_kwargs for the optimizer.
            recorder: recorder object which defines how to store the optimization history.

        """
        super().__init__(recorder=recorder)
        self.method = method
        if optimizer_kwargs is None:
            self.optimizer_kwargs = {}
        else:
            self.optimizer_kwargs = optimizer_kwargs

        if self.method == "SPSA":
            self.optimizer = SPSA(**self.optimizer_kwargs)
        elif self.method == "ADAM" or self.method == "AMSGRAD":
            if self.method == "AMSGRAD":
                self.optimizer_kwargs["amsgrad"] = True
            self.optimizer = ADAM(**self.optimizer_kwargs)

    def _minimize(
        self,
        cost_function: CallableWithGradient,
        initial_params: np.ndarray = None,
        keep_history: bool = False,
    ):
        """
        Minimizes given cost function using optimizers from Qiskit Aqua.

        Args:
            cost_function: python method which takes numpy.ndarray as input
            initial_params(np.ndarray): initial parameters to be used for optimization

        Returns:
            optimization_results(scipy.optimize.OptimizeResults): results of the optimization.
        """
        history = []

        number_of_variables = len(initial_params)

        gradient_function = None
        if hasattr(cost_function, "gradient") and callable(
            getattr(cost_function, "gradient")
        ):
            gradient_function = cost_function.gradient

        solution, value, nfev = self.optimizer.optimize(
            num_vars=number_of_variables,
            objective_function=cost_function,
            initial_point=initial_params,
            gradient_function=gradient_function,
        )

        if self.method == "ADAM" or self.method == "AMSGRAD":
            nit = self.optimizer._t
        else:
            nit = self.optimizer.maxiter

        return optimization_result(
            opt_value=value,
            opt_params=solution,
            nit=nit,
            nfev=nfev,
            **construct_history_info(cost_function, keep_history)
        )
Ejemplo n.º 9
0
 def test_adam(self):
     """adam test"""
     optimizer = ADAM(maxiter=10000, tol=1e-06)
     self.run_optimizer(optimizer, max_nfev=10000)