def test_minimize(self):
        """Testing the bisect line-search algorithm."""
        self.logger.info(
            "Test for binary line search  ... ")

        def fun_test(x):
            return x ** 2 - 1

        self.fun = CFunction(fun=fun_test)

        line_search = CLineSearchBisect(fun=self.fun, max_iter=40)
        line_search.verbose = 2

        x = CArray([-5.0])
        fx = self.fun.fun(x)
        self.logger.info("x: " + str(x))
        self.logger.info("f(x): " + str(fx))

        d = CArray([1.0])
        x0, f0 = line_search.minimize(x, d, fx=fx)

        self.logger.info("x*: " + str(x0))
        self.logger.info("f(x*): " + str(f0))
        self.logger.info("num. iter.: " + str(line_search.n_iter))
        self.logger.info("num. eval.: " + str(self.fun.n_fun_eval))

        self._save_fig()

        self.assertTrue(x0.norm() <= 1e-6,
                        "Correct solution found, x0 = 0.")
    def _create_poly(d):
        """Creates a polynomial function in d dimensions."""
        def _poly_fun(x):
            return (x**4).sum() + x.sum()**2

        def _poly_grad(x):
            return (4 * x**3) + 2 * x

        int_fun = CFunction(fun=_poly_fun, gradient=_poly_grad)
        int_fun.global_min = lambda: 0.
        int_fun.global_min_x = lambda: CArray.zeros(d, )

        return int_fun
    def _init_solver(self):
        """Create solver instance."""

        if self._solver_clf is None or self.discrete is None:
            raise ValueError('Solver not set properly!')

        # map attributes to fun, constr, box
        fun = CFunction(fun=self._objective_function,
                        gradient=self._objective_function_gradient,
                        n_dim=self._classifier.n_features)

        bounds, constr = self._constraint_creation()

        # FIXME: many solvers do now work in discrete spaces.
        #  this is a workaround to raise a proper error, but we should better
        #  handle these problems
        solver_params = self.solver_params
        if self.discrete is True:
            solver_params['discrete'] = True

        self._solver = COptimizer.create(
            self._solver_type,
            fun=fun, constr=constr,
            bounds=bounds,
            **self.solver_params)

        self._solver.verbose = 0
        self._warm_start = None
示例#4
0
    def test_grad(self):
        """Compare analytical gradients with its numerical approximation."""
        def _loss_wrapper(scores, loss, true_labels):
            return loss.loss(true_labels, scores)

        def _dloss_wrapper(scores, loss, true_labels):
            return loss.dloss(true_labels, scores)

        for loss_id in ('hinge', 'hinge-squared', 'square', 'log'):
            self.logger.info("Creating loss: {:}".format(loss_id))
            loss_class = CLoss.create(loss_id)

            n_elemes = 1
            y_true = CArray.randint(0, 2, n_elemes).todense()
            score = CArray.randn((n_elemes, ))

            check_grad_val = CFunction(
                _loss_wrapper, _dloss_wrapper).check_grad(score,
                                                          1e-8,
                                                          loss=loss_class,
                                                          true_labels=y_true)
            self.logger.info(
                "Gradient difference between analytical svm "
                "gradient and numerical gradient: %s", str(check_grad_val))
            self.assertLess(
                check_grad_val, 1e-4,
                "the gradient is wrong {:} for {:} loss".format(
                    check_grad_val, loss_id))
    def _test_grad_tr_params(self, clf):
        """Compare `grad_tr_params` output with numerical gradient.

        Parameters
        ----------
        clf : CClassifier

        """
        i = self.ds.X.randsample(
            CArray.arange(self.ds.num_samples), 1, random_state=self.seed)
        x, y = self.ds.X[i, :], self.ds.Y[i]
        self.logger.info("idx {:}: x {:}, y {:}".format(i.item(), x, y))

        params = self.clf_grads_class.params(clf)

        # Compare the analytical grad with the numerical grad
        gradient = clf.grad_tr_params(x, y).ravel()
        num_gradient = CFunction(self._grad_tr_fun).approx_fprime(
            params, epsilon=1e-6,
            x0=x, y0=y, clf_grads=self.clf_grads_class, clf=clf)

        error = (gradient - num_gradient).norm()
        self.logger.info("Analytical gradient:\n{:}".format(gradient))
        self.logger.info("Numerical gradient:\n{:}".format(num_gradient))
        self.logger.info("norm(grad - grad_num): {:}".format(error))
        self.assertLess(error, 1e-2)

        self.assertTrue(gradient.is_vector_like)
        self.assertEqual(params.size, gradient.size)
        self.assertEqual(params.issparse, gradient.issparse)
        self.assertIsSubDtype(gradient.dtype, float)
示例#6
0
    def _init_solver(self):
        """Create solver instance."""
        if self._solver_clf is None or self.distance is None \
                or self.discrete is None:
            raise ValueError('Solver not set properly!')

        # map attributes to fun, constr, box
        fun = CFunction(fun=self._objective_function,
                        gradient=self._objective_function_gradient,
                        n_dim=self.n_dim)

        constr = CConstraint.create(self._distance)
        constr.center = self._x0
        constr.radius = self.dmax

        # only feature increments or decrements are allowed
        lb = self._x0.todense() if self.lb == 'x0' else self.lb
        ub = self._x0.todense() if self.ub == 'x0' else self.ub

        bounds = CConstraint.create('box', lb=lb, ub=ub)

        self._solver = COptimizer.create(self._solver_type,
                                         fun=fun,
                                         constr=constr,
                                         bounds=bounds,
                                         discrete=self._discrete,
                                         **self._solver_params)

        # TODO: fix this verbose level propagation
        self._solver.verbose = self.verbose
示例#7
0
    def _test_gradient(self, c, p, th=1e-6):
        """Compare the analytical with the numerical gradient.

        Parameters
        ----------
        c : CConstraint
        p : CArray
            The point on which the gradient is computed.
        th : float
            Tolerance for approximation check.

        """
        self.logger.info("Testing `.gradient({:})` for {:}".format(p, c))
        gradient = c.gradient(p)

        self.assertTrue(gradient.is_vector_like)
        self.assertEqual(p.size, gradient.size)

        # Numerical gradient
        num_gradient = CFunction(c.constraint).approx_fprime(p, 1e-8)

        # Compute the norm of the difference
        error = (gradient - num_gradient).norm()

        self.logger.info("Analytic grad: {:}".format(gradient))
        self.logger.info("Numeric grad: {:}".format(num_gradient))

        self.logger.info("norm(grad - num_grad): {:}".format(error))
        self.assertLess(error, th)

        self.assertIsSubDtype(gradient.dtype, float)
    def test_multiclass_gradient(self):
        """Test if gradient is correct when requesting for all classes with w"""

        multiclass = CClassifierMulticlassOVA(classifier=CClassifierSVM,
                                              class_weight='balanced')
        multiclass.fit(self.dataset.X, self.dataset.Y)
        div = CArray.rand(shape=multiclass.n_classes, random_state=0)

        def f_x(x):
            x = multiclass.predict(x, return_decision_function=True)[1]
            return CArray((x / div).mean())

        def grad_f_x(x):
            w = CArray.ones(shape=multiclass.n_classes) / \
                (div * multiclass.n_classes)
            return multiclass.gradient(x, w=w)

        i = 5  # Sample to test
        x = self.dataset.X[i, :]

        from secml.optim.function import CFunction
        check_grad_val = CFunction(f_x, grad_f_x).check_grad(x, epsilon=1e-1)
        self.logger.info(
            "norm(grad - num_grad): %s", str(check_grad_val))
        self.assertLess(check_grad_val, 1e-3)
    def setUp(self):

        avail_funcs = ['3h-camel', 'beale', 'mc-cormick', 'rosenbrock']

        # Instancing the available functions to test optimizer
        self.funcs = {}
        for fun_id in avail_funcs:
            self.funcs[fun_id] = CFunction.create(fun_id)
    def test_2D(self):
        """Plot of a 2D example."""
        grid_limits = [(-4, 4), (-4, 4)]

        A = CArray.eye(2, 2)
        b = CArray.zeros(2).T
        circle = CFunction.create('quadratic', A, b, 0)

        self._test_2D(circle, grid_limits, levels=[16])
示例#11
0
        def compare_analytical_and_numerical_grad(array, norm_type):
            def _get_transform_component(x, y):
                trans = norm.transform(x).todense()
                return trans[y]

            norm = CNormalizerUnitNorm(norm=norm_type).fit(array)

            if norm_type == "l1":
                # if the norm is one we are computing a sub-gradient
                decimal = 1
            else:
                decimal = 4

            # check if they are almost equal
            self.logger.info("Norm: {:}".format(norm))

            # check the gradient comparing it with the numerical one
            n_feats = array.size

            for f in range(n_feats):
                self.logger.info(
                    "Compare the gradient of feature: {:}".format(f))

                # compute analytical gradient
                w = CArray.zeros(array.size)
                w[f] = 1

                an_grad = norm.gradient(array, w=w)
                self.logger.info("Analytical gradient is: {:}".format(
                    an_grad.todense()))

                num_grad = CFunction(_get_transform_component).approx_fprime(
                    array.todense(), epsilon=1e-5, y=f)
                self.logger.info("Numerical gradient is: {:}".format(
                    num_grad.todense()))

                self.assert_array_almost_equal(an_grad,
                                               num_grad,
                                               decimal=decimal)
	def _init_internal_solver(self, x0: CArray):
		fun = CFunction(
			fun=self._objective_function,
			gradient=self._objective_function_gradient,
			n_dim=self.n_dim,
		)
		constraint = CConstraint.create("l1")
		constraint.center = x0
		constraint.radius = 0
		lb = x0.todense() if self.lb == "x0" else self.lb
		ub = x0.todense() if self.ub == "x0" else self.ub
		bounds = CConstraint.create("box", lb=lb, ub=ub)
		self._solver = COptimizer.create(
			"pgd-ls", fun=fun, constr=constraint, bounds=bounds, discrete=True
		)
    def test_softmax_gradient(self):
        """Unittests for softmax gradient:
           Compare analytical gradients with its numerical approximation."""

        self.softmax = CSoftmax()

        def _sigma_pos_label(s, y):
            """
            Compute the sigmoid for the scores in s and return the i-th
            element of the vector that contains the results

            Parameters
            ----------
            s: CArray
                scores
            pos_label: index of the considered score into the vector

            Returns
            -------
            softmax: CArray
            """
            softmax = self.softmax.softmax(s).ravel()
            return softmax[y]

        score = self.scores[0, :]

        for pos_label in (0, 1, 2):
            self.logger.info("POS_LABEL: {:}".format(pos_label))

            # real value of the gradient on x
            grad = self.softmax.gradient(score, pos_label)

            self.logger.info("ANALITICAL GRAD: {:}".format(grad))

            approx = CFunction(_sigma_pos_label).approx_fprime(
                score, 1e-5, pos_label)

            self.logger.info("NUMERICAL GRADIENT: {:}".format(approx))

            check_grad_val = (grad - approx).norm()

            self.logger.info(
                "The norm of the difference bettween the "
                "analytical and the numerical gradient is: %s",
                str(check_grad_val))
            self.assertLess(check_grad_val, 1e-4,
                            "the gradient is wrong {:}".format(check_grad_val))
class TestLineSearch(CUnitTest):
    """Test for COptimizer class."""

    def test_minimize(self):
        """Testing the bisect line-search algorithm."""
        self.logger.info(
            "Test for binary line search  ... ")

        def fun_test(x):
            return x ** 2 - 1

        self.fun = CFunction(fun=fun_test)

        line_search = CLineSearchBisect(fun=self.fun, max_iter=40)
        line_search.verbose = 2

        x = CArray([-5.0])
        fx = self.fun.fun(x)
        self.logger.info("x: " + str(x))
        self.logger.info("f(x): " + str(fx))

        d = CArray([1.0])
        x0, f0 = line_search.minimize(x, d, fx=fx)

        self.logger.info("x*: " + str(x0))
        self.logger.info("f(x*): " + str(f0))
        self.logger.info("num. iter.: " + str(line_search.n_iter))
        self.logger.info("num. eval.: " + str(self.fun.n_fun_eval))

        self._save_fig()

        self.assertTrue(x0.norm() <= 1e-6,
                        "Correct solution found, x0 = 0.")

    def _save_fig(self):
        """Visualizing the function being optimized with line search."""
        x_range = CArray.arange(-5, 20, 0.5, )
        score_range = x_range.T.apply_along_axis(self.fun.fun, axis=1)
        ref_line = CArray.zeros(x_range.size)
        fig = CFigure(height=6, width=12)
        fig.sp.plot(x_range, score_range, color='b')
        fig.sp.plot(x_range, ref_line, color='k')
        filename = fm.join(fm.abspath(__file__), 'test_line_search_bisect.pdf')
        fig.savefig(filename)
示例#15
0
    def maximize(self, x_init, args=(), **kwargs):
        """Interface for maximizers.

        Implementing:
            max fun(x)
            s.t. constraint

        This is implemented by inverting the sign of fun and gradient and
        running the `COptimizer.minimize()`.

        Parameters
        ----------
        x_init : CArray
            The initial input point.
        args : tuple, optional
            Extra arguments passed to the objective function and its gradient.
        kwargs
            Additional parameters of the minimization method.

        """

        # Invert sign of fun(x) and grad(x) and run minimize
        # We use def statements and partial to respect PEP8 and scopes

        def fun_inv(wrapped_fun, z, *f_args, **f_kwargs):
            return -wrapped_fun(z, *f_args, **f_kwargs)

        def grad_inv(wrapped_grad, z, *f_args, **f_kwargs):
            return -wrapped_grad(z, *f_args, **f_kwargs)

        self._fun = CFunction(fun=partial(fun_inv, self._f.fun),
                              gradient=partial(grad_inv, self._f.gradient))

        x = self.minimize(x_init, args=args, **kwargs)

        # fix solution variables
        self._f_seq = -self._f_seq

        # restore fun to its default
        self._fun = self.f

        return x
    def _quadratic_fun(d):
        """Creates a quadratic function in d dimensions."""
        def _quadratic_fun_min(A, b):
            from scipy import linalg
            min_x_scipy = linalg.solve((2 * A).tondarray(),
                                       -b.tondarray(),
                                       sym_pos=True)
            return CArray(min_x_scipy).ravel()

        A = CArray.eye(d, d)
        b = CArray.ones((d, 1)) * 2

        discr_fun = CFunction.create('quadratic', A, b, c=0)

        min_x = _quadratic_fun_min(A, b)
        min_val = discr_fun.fun(min_x)

        discr_fun.global_min = lambda: min_val
        discr_fun.global_min_x = lambda: min_x

        return discr_fun
示例#17
0
    def _init_solver(self):
        """Create solver instance."""

        if self.classifier is None:
            raise ValueError('Solver not set properly!')

        # map attributes to fun, constr, box
        fun = CFunction(fun=self.objective_function,
                        gradient=self.objective_function_gradient,
                        n_dim=self._classifier.n_features)

        bounds, constr = self._constraint_creation()

        self._solver = COptimizer.create(
            self._solver_type,
            fun=fun, constr=constr,
            bounds=bounds,
            **self.solver_params)

        self._solver.verbose = 0
        self._warm_start = None
    def _test_gradient(self):
        """Test for kernel gradients with dense points."""

        if not self._has_gradient():
            self.logger.info(
                "Gradient is not implemented for %s. "
                "Skipping gradient dense tests.", self.kernel.class_type)
            return

        # we invert the order of input patterns as we compute the kernel
        # gradient wrt the second point but check_grad needs it as first input
        def kern_f_for_test(p2, p1, kernel_func):
            return kernel_func.k(p2, p1)

        def kern_grad_for_test(p2, p1, kernel_func):
            kernel_func.rv = p1
            return kernel_func.gradient(p2)

        self.logger.info("Testing gradient with dense data.")
        self.logger.info("Kernel type: %s", self.kernel.class_type)

        for i in range(self.d_dense.num_samples):
            self.logger.info("x point: " + str(self.p2_dense))
            self.logger.info("y point: " + str(self.d_dense.X[i, :]))

            # TODO: implement centered numerical differences.
            # if analytical gradient is zero, numerical estimation does not
            # work, as it is using one-side estimation. We should use centered
            # numerical differences to gain precision.
            self.kernel.rv = self.d_dense.X[i, :]
            grad = self.kernel.gradient(self.p2_dense)
            if grad.norm() >= 1e-10:
                grad_error = CFunction(kern_f_for_test,
                                       kern_grad_for_test).check_grad(
                                           self.p2_dense, 1e-8,
                                           self.d_dense.X[i, :], self.kernel)
                self.logger.info("Gradient approx. error: {:}"
                                 "".format(grad_error))
                self.assertTrue(grad_error < 1e-4)
示例#19
0
    def __init__(self, clf, out_dims=None):

        self._clf = clf

        if isinstance(clf, CClassifierReject):
            raise ValueError("classifier with reject cannot be "
                             "converted to a tensorflow model")

        if not clf.is_fitted():
            raise NotFittedError("The classifier should be already trained!")

        self._out_dims = out_dims

        # classifier output tensor name. Either "probs" or "logits".
        self._output_layer = 'logits'

        # Given a trained CClassifier, creates a tensorflow node for the
        # network output and one for its gradient
        self._fun = CFunction(fun=self._decision_function,
                              gradient=clf.gradient)
        self._callable_fn = _CClassifierToTF(self._fun, self._out_dims)

        super(_CModelCleverhans, self).__init__(nb_classes=clf.n_classes)
    def _single_param_grad_check(self, xc, f_param, df_param, param_name):
        """

        Parameters
        ----------
        xc CArray
            poisoning point
        f_param function
            the function that update the parameter value
        df_param function
            the function that compute the gradient value
        param_name the parameter name
        """

        # Compare analytical gradient with its numerical approximation
        check_grad_val = CFunction(f_param, df_param).check_grad(xc,
                                                                 epsilon=100)
        self.logger.info(
            "Gradient difference between analytical {:} "
            "gradient and numerical gradient: %s".format(param_name),
            str(check_grad_val))
        self.assertLess(
            check_grad_val, 1,
            "poisoning gradient is wrong {:}".format(check_grad_val))
示例#21
0
    def test_grad(self):
        """Compare analytical gradients with its numerical approximation."""
        def _loss_wrapper(scores, loss, true_labels):
            return loss.loss(true_labels, scores)

        loss_class = CLossCrossEntropy()

        y_true = CArray.randint(0, 2, 1)
        score = CArray.randn((1, 3))

        self.logger.info("Y_TRUE: {:} SCORES: {:}".format(y_true, score))

        for pos_label in (None, 0, 1, 2):
            self.logger.info("POS_LABEL: {:}".format(pos_label))

            # real value of the gradient on x
            grad = loss_class.dloss(y_true, score, pos_label)

            self.logger.info("GRAD: {:}".format(grad))

            approx = CFunction(_loss_wrapper).approx_fprime(
                score, eps, loss_class, y_true)
            self.logger.info("APPROX (FULL): {:}".format(approx))

            pos_label = pos_label if pos_label is not None else y_true.item()
            approx = approx[pos_label]

            self.logger.info("APPROX (POS_LABEL): {:}".format(approx))

            check_grad_val = (grad - approx).norm()

            self.logger.info("Gradient difference between analytical svm "
                             "gradient and numerical gradient: %s",
                             str(check_grad_val))
            self.assertLess(check_grad_val, 1e-4,
                            "the gradient is wrong {:}".format(check_grad_val))
 def setUp(self):
     self.fun = CFunction.create('3h-camel')
示例#23
0
 def setUp(self):
     self.fun = CFunction.create('mc-cormick')
 def setUp(self):
     self.fun = CFunction.create('rosenbrock')
 def setUp(self):
     A = CArray.eye(2, 2)
     b = CArray.zeros((2, 1))
     c = 0
     self.fun = CFunction.create('quadratic', A, b, c)
示例#26
0
    def test_aspreprocess(self):
        """Test for normalizer used as preprocess."""
        from secml.ml.classifiers import CClassifierSVM
        from secml.ml.classifiers.multiclass import CClassifierMulticlassOVA

        model = mlp(input_dims=20, hidden_dims=(40,), output_dims=3)
        loss = nn.CrossEntropyLoss()
        optimizer = optim.SGD(model.parameters(), lr=1e-1)
        net = CClassifierPyTorch(model=model, loss=loss,
                                 optimizer=optimizer, random_state=0,
                                 epochs=10, preprocess='min-max')
        net.fit(self.ds.X, self.ds.Y)

        norm = CNormalizerDNN(net=net)

        clf = CClassifierMulticlassOVA(
            classifier=CClassifierSVM, preprocess=norm)

        self.logger.info("Testing last layer")

        clf.fit(self.ds.X, self.ds.Y)

        y_pred, scores = clf.predict(
            self.ds.X, return_decision_function=True)
        self.logger.info("TRUE:\n{:}".format(self.ds.Y.tolist()))
        self.logger.info("Predictions:\n{:}".format(y_pred.tolist()))
        self.logger.info("Scores:\n{:}".format(scores))

        x = self.ds.X[0, :]

        self.logger.info("Testing last layer gradient")

        for c in self.ds.classes:
            self.logger.info("Gradient w.r.t. class {:}".format(c))

            grad = clf.grad_f_x(x, y=c)

            self.logger.info("Output of grad_f_x:\n{:}".format(grad))

            check_grad_val = CFunction(
                clf.decision_function, clf.grad_f_x).check_grad(
                    x, y=c, epsilon=1e-1)
            self.logger.info(
                "norm(grad - num_grad): %s", str(check_grad_val))
            self.assertLess(check_grad_val, 1e-3)

            self.assertTrue(grad.is_vector_like)
            self.assertEqual(x.size, grad.size)

        layer = 'linear1'
        norm.out_layer = layer

        self.logger.info("Testing layer {:}".format(norm.out_layer))

        clf.fit(self.ds.X, self.ds.Y)

        y_pred, scores = clf.predict(
            self.ds.X, return_decision_function=True)
        self.logger.info("TRUE:\n{:}".format(self.ds.Y.tolist()))
        self.logger.info("Predictions:\n{:}".format(y_pred.tolist()))
        self.logger.info("Scores:\n{:}".format(scores))

        self.logger.info("Testing 'linear1' layer gradient")
        grad = clf.grad_f_x(x, y=0)  # y is required for multiclassova
        self.logger.info("Output of grad_f_x:\n{:}".format(grad))

        self.assertTrue(grad.is_vector_like)
        self.assertEqual(x.size, grad.size)
 def setUp(self):
     self.fun = CFunction.create('beale')
    def _test_gradient_numerical(self,
                                 clf,
                                 x,
                                 extra_classes=None,
                                 th=1e-3,
                                 epsilon=eps,
                                 **grad_kwargs):
        """Test for clf.grad_f_x comparing to numerical gradient.

        Parameters
        ----------
        clf : CClassifier
        x : CArray
        extra_classes : None or list of int, optional
            Any extra class which gradient wrt should be tested
        th : float, optional
            The threshold for the check with numerical gradient.
        epsilon : float, optional
            The epsilon to use for computing the numerical gradient.
        grad_kwargs : kwargs
            Any extra parameter for the gradient function.

        Returns
        -------
        grads : list of CArray
            A list with the gradients computed wrt each class.

        """
        if 'y' in grad_kwargs:
            raise ValueError("`y` cannot be passed to this unittest.")

        if extra_classes is not None:
            classes = clf.classes.append(extra_classes)
        else:
            classes = clf.classes

        grads = []
        for c in classes:
            grad_kwargs['y'] = c  # Appending class to test_f_x

            # Analytical gradient
            gradient = clf.grad_f_x(x, **grad_kwargs)
            grads.append(gradient)

            self.assertTrue(gradient.is_vector_like)
            self.assertEqual(x.size, gradient.size)
            self.assertEqual(x.issparse, gradient.issparse)

            # Numerical gradient
            num_gradient = CFunction(clf.decision_function).approx_fprime(
                x.todense(), epsilon, y=c)

            # Compute the norm of the difference
            error = (gradient - num_gradient).norm()

            self.logger.info("Analytic grad wrt. class {:}:\n{:}".format(
                c, gradient))
            self.logger.info("Numeric gradient wrt. class {:}:\n{:}".format(
                c, num_gradient))

            self.logger.info("norm(grad - num_grad): {:}".format(error))
            self.assertLess(error, th)

            self.assertIsSubDtype(gradient.dtype, float)

        return grads
    def setUp(self):

        self.test_funcs = dict()

        # Instancing the available functions to test optimizer
        self.test_funcs['3h-camel'] = {
            'fun': CFunction.create('3h-camel'),
            'x0': CArray([1, 1]),
            'grid_limits': [(-2, 2), (-2, 2)],
            'vmin': 0,
            'vmax': 5
        }
        self.test_funcs['beale'] = {
            'fun': CFunction.create('beale'),
            'x0': CArray([0, 0]),
            'grid_limits': [(-1, 4.5), (-1, 1.5)],
            'vmin': 0,
            'vmax': 1
        }
        self.test_funcs['mc-cormick'] = {
            'fun': CFunction.create('mc-cormick'),
            'x0': CArray([0, 1]),
            'grid_limits': [(-2, 3), (-3, 1)],
            'vmin': -2,
            'vmax': 2
        }
        self.test_funcs['rosenbrock'] = {
            'fun': CFunction.create('rosenbrock'),
            'x0': CArray([-1, -1]),
            'grid_limits': [(-2.1, 1.1), (-2.1, 1.1)],
            'vmin': 0,
            'vmax': 10
        }
        quad = self._quadratic_fun(2)
        self.test_funcs['quad-2'] = {
            'fun': quad,
            'x0': CArray([4, -4]),
            'grid_limits': [(-5, 5), (-5, 5)],
            'vmin': None,
            'vmax': None
        }
        n = 100
        quad = self._quadratic_fun(n)
        self.test_funcs['quad-100-sparse'] = {
            'fun': quad,
            'x0': CArray.zeros((n, ), dtype=int).tosparse(dtype=int),
        }
        n = 2
        poly = self._create_poly(d=n)
        self.test_funcs['poly-2'] = {
            'fun': poly,
            'x0': CArray.ones((n, )) * 2,
            'vmin': -10,
            'vmax': 5,
            'grid_limits': [(-1, 1), (-1, 1)]
        }
        n = 100
        # x0 is a sparse CArray and the solution is a zero vector
        poly = self._create_poly(d=n)
        self.test_funcs['poly-100-int'] = {
            'fun': poly,
            'x0': CArray.ones((n, ), dtype=int) * 2
        }
        n = 100
        poly = self._create_poly(d=n)
        self.test_funcs['poly-100-int-sparse'] = {
            'fun': poly,
            'x0': CArray.ones((n, ), dtype=int).tosparse(dtype=int) * 2
        }
    def test_approx_fprime_check_param_passage(self):
        """Test the functions COptimizer.approx_fprime() and .check_grad()
        are correctly passing the extra parameters to the main function and
        the one that computes the gradient.

        """
        self.logger.info("Test the parameters passage made up by "
                         "COptimizer.approx_fprime() "
                         "and .check_grad() methods.")

        x0 = CArray([1.])  # Starting point for minimization
        epsilon = 0.1

        self.logger.info(
            "Testing when the function and the gradient have two parameter")

        fun = CFunction(fun=self._fun_2_params, gradient=self._dfun_2_params)
        self.logger.info("Testing check_grad")

        grad_err = fun.check_grad(x0, epsilon, 1)
        self.logger.info("Grad error: {:}".format(grad_err))
        self.assertEqual(0, grad_err)

        self.logger.info("Testing approx_fprime")

        grad_err = fun.approx_fprime(x0, epsilon, 1).item()
        self.logger.info("Grad error: {:}".format(grad_err))
        self.assertEqual(0, grad_err)

        self.logger.info("Testing fun/grad accepting only *args")

        fun = CFunction(fun=self._fun_args, gradient=self._dfun_args)

        self.logger.info("Testing check_grad ")

        grad_err = fun.check_grad(x0, epsilon, 1)
        self.logger.info("Grad error: {:}".format(grad_err))
        self.assertEqual(0, grad_err)

        self.logger.info("Testing approx_fprime ")

        grad_err = fun.approx_fprime(x0, epsilon, 1)
        self.logger.info("Grad error: {:}".format(grad_err))
        self.assertEqual(0, grad_err)

        # TypeError expected as `_fun_args` does not accept kwargs
        with self.assertRaises(TypeError):
            fun.approx_fprime(x0, epsilon, y=1)

        self.logger.info("Testing fun/grad accepting only **kwargs")

        fun = CFunction(fun=self._fun_kwargs, gradient=self._dfun_kwargs)

        self.logger.info("Testing check_grad ")

        grad_err = fun.check_grad(x0, epsilon, y=1)
        self.logger.info("Grad error: {:}".format(grad_err))
        self.assertEqual(0, grad_err)

        self.logger.info("Testing approx_fprime ")

        grad_err = fun.approx_fprime(x0, epsilon, y=1)
        self.logger.info("Grad error: {:}".format(grad_err))
        self.assertEqual(0, grad_err)