Beispiel #1
0
    def _inverse_transform(self, x):
        """Map data back to its original space.

        Parameters
        ----------
        x : CArray
            Array to transform back to its original space.

        Returns
        --------
        CArray
            Input array mapped back to its original space.

        Examples
        --------
        >>> from secml.array import CArray
        >>> from secml.ml.features.reduction import CPCA

        >>> array = CArray([[1., 0., 2.], [2., 5., 0.], [0., 1., -9.]])
        >>> pca = CPCA().fit(array)
        >>> array_pca = pca.transform(array)
        >>> pca.inverse_transform(array_pca).round(6)
        CArray(3, 3)(dense: [[ 1. -0.  2.] [ 2.  5. -0.] [-0.  1. -9.]])

        """
        data_carray = CArray(x).atleast_2d()
        if data_carray.shape[1] != self.n_components:
            raise ValueError("array to revert must have {:} "
                             "features (columns).".format(self.n_components))

        out = CArray(data_carray.dot(self._components) + self.mean)

        return out.atleast_2d() if x.ndim >= 2 else out
Beispiel #2
0
    def _gradient_fk_xc(self, xc, yc, clf, loss_grad, tr, k=None):
        """
        Derivative of the classifier's discriminant function f(xk)
        computed on a set of points xk w.r.t. a single poisoning point xc

        This is a classifier-specific implementation, so we delegate its
        implementation to inherited classes.

        """
        xc0 = xc.deepcopy()

        d = xc.size

        if hasattr(clf, 'C'):
            C = clf.C
        elif hasattr(clf, 'alpha'):
            C = 1.0 / clf.alpha
        else:
            raise ValueError("Error: The classifier does not have neither C "
                             "nor alpha")

        H = clf.hessian_tr_params(tr.X, tr.Y)

        # change vector dimensions to match the mathematical formulation...
        yc = convert_binary_labels(yc)
        xc = CArray(xc.ravel()).atleast_2d()  # xc is a row vector

        w = CArray(clf.w.ravel()).T  # column vector
        b = clf.b
        grad_loss_fk = CArray(loss_grad.ravel()).T  # column vector

        # validation points
        xk = self.val.X.atleast_2d()

        # handle normalizer, if present
        xc = xc if clf.preprocess is None else clf.preprocess.transform(xc)

        s_c = self._s(xc, w, b)
        sigm_c = self._sigm(yc, s_c)
        z_c = sigm_c * (1 - sigm_c)

        dbx_c = z_c * w  # column vector
        dwx_c = ((yc * (-1 + sigm_c)) *
                 CArray.eye(d, d)) + z_c * (w.dot(xc))  # matrix d*d

        G = C * (dwx_c.append(dbx_c, axis=1))

        fd_params = self.classifier.grad_f_params(xk)
        grad_loss_params = fd_params.dot(grad_loss_fk)

        gt = self._compute_grad_inv(G, H, grad_loss_params)
        # gt = self._compute_grad_solve(G, H, grad_loss_params)
        # gt = self._compute_grad_solve_iterative(G, H, grad_loss_params) #*

        # propagating gradient back to input space
        if clf.preprocess is not None:
            return clf.preprocess.gradient(xc0, w=gt)

        return gt
def test_dot():
    a = CArray([1, 2, 3])
    b = CArray([10, 20, 30])
    return a.dot(b)