Exemple #1
0
    def test_plot_density(self):

        N = 200
        np.random.seed(1)
        X = np.concatenate((np.random.normal(0, 1, int(0.3 * N)),
                            np.random.normal(5, 1, int(0.7 * N))))[:, np.newaxis]

        X_plot = CArray(np.linspace(-5, 10, 1000)[:, np.newaxis])

        true_dens = CArray(0.3 * norm(0, 1).pdf(X_plot[:, 0].tondarray())
                           + 0.7 * norm(5, 1).pdf(X_plot[:, 0].tondarray()))

        fig = CFigure(width=7)
        fig.sp._sp.fill(X_plot[:, 0].tondarray(), true_dens.tondarray(),
                        fc='black', alpha=0.2,
                        label='input distribution')

        for kernel in ['gaussian', 'tophat', 'epanechnikov']:
            kde = CDensityEstimation(kernel=kernel, bandwidth=0.5)
            x, y = kde.estimate_density(CArray(X), n_points=N)
            fig.sp.plot(x, y, '-',
                        label="kernel = '{0}'".format(kernel))

        fig.sp.text(6, 0.38, "N={0} points".format(N))

        fig.sp.legend(loc='upper left')
        fig.sp.plot(X[:, 0], -0.005 - 0.01 * np.random.random(X.shape[0]), '+k')

        fig.sp.xlim(-4, 9)
        fig.sp.ylim(-0.02, 0.4)
        fig.show()
Exemple #2
0
    def test_explain(self):
        """Unittest for explain method."""
        i = 67
        x = self.ds.X[i, :]

        attr = self.explainer.explain(x, y=1)

        self.logger.info("Attributions:\n{:}".format(attr.tolist()))

        self.assertIsInstance(attr, CArray)
        self.assertEqual(attr.shape, attr.shape)

        fig = CFigure(height=3, width=6)

        # Plotting original image
        fig.subplot(1, 2, 1)
        fig.sp.imshow(attr.reshape((8, 8)), cmap='gray')

        th = max(abs(attr.min()), abs(attr.max()))

        # Plotting attributions
        fig.subplot(1, 2, 2)
        fig.sp.imshow(attr.reshape((8, 8)),
                      cmap='seismic',
                      vmin=-1 * th,
                      vmax=th)

        fig.show()
def plot_loss_after_attack(evasAttack):
    """
	This function plots the evolution of the loss function of the surrogate classifier
	after an attack is performed.
	The loss function is normalized between 0 and 1.
	It helps to know whether parameters given to the attack algorithm are well tuned are not;
	the loss should be as minimal as possible.
	The script is inspired from https://secml.gitlab.io/tutorials/11-ImageNet_advanced.html#Visualize-and-check-the-attack-optimization
	"""
    n_iter = evasAttack.x_seq.shape[0]
    itrs = CArray.arange(n_iter)

    # create a plot that shows the loss during the attack iterations
    # note that the loss is not available for all attacks
    fig = CFigure(width=10, height=4, fontsize=14)

    # apply a linear scaling to have the loss in [0,1]
    loss = evasAttack.f_seq
    if loss is not None:
        loss = CNormalizerMinMax().fit_transform(CArray(loss).T).ravel()
        fig.subplot(1, 2, 1)
        fig.sp.xlabel('iteration')
        fig.sp.ylabel('loss')
        fig.sp.plot(itrs, loss, c='black')

    fig.tight_layout()
    fig.show()
Exemple #4
0
    def test_plot(self):
        """Test for LDA. Check LDA Result Graphically.

        Apply Lda to Sklearn Iris Dataset and compare it with
        "Linear Discriminant Analysis bit by bit" by Sebastian Raschka
        http://sebastianraschka.com/Articles/2014_python_lda.html
        into the plot we must see approximatively:
        x axes: from -2 to -1 virginica, from -1 to 0 versicolor, from 1 to 2,3 setosa
        y axes: from -1 to -1 virginica, from -1 to 0.5 versicolor, from -1 to 1 setosa

        """
        from sklearn.datasets import load_iris

        iris_db = load_iris()
        patterns = CArray(iris_db.data)
        labels = CArray(iris_db.target)

        lda = CLDA()
        lda.fit(patterns, labels)
        # store dataset reduced with pca
        red_dts = lda.fit_transform(patterns, labels)

        fig = CFigure(width=10, markersize=8)
        fig.sp.scatter(red_dts[:, 0].ravel(),
                       red_dts[:, 1].ravel(),
                       c=labels)
        fig.show()
Exemple #5
0
    def test_fun(self):
        """Test for CPlotFunction.plot_fun method."""
        fig = CFigure()
        fig.sp.plot_ds(self.dataset)

        fig.sp.plot_fun(self.clf.decision_function, y=1)
        fig.show()
    def test_plot_decision_function(self):
        """Test plot of multiclass classifier decision function."""
        # generate synthetic data
        ds = CDLRandom(n_classes=3, n_features=2, n_redundant=0,
                       n_clusters_per_class=1, class_sep=1,
                       random_state=0).load()

        multiclass = CClassifierMulticlassOVA(
            classifier=CClassifierSVM,
            class_weight='balanced',
            preprocess='min-max')

        # Training and classification
        multiclass.fit(ds.X, ds.Y)
        y_pred, score_pred = multiclass.predict(
            ds.X, return_decision_function=True)

        def plot_hyperplane(img, clf, min_v, max_v, linestyle, label):
            """Plot the hyperplane associated to the OVA clf."""
            xx = CArray.linspace(
                min_v - 5, max_v + 5)  # make sure the line is long enough
            # get the separating hyperplane
            yy = -(clf.w[0] * xx + clf.b) / clf.w[1]
            img.sp.plot(xx, yy, linestyle, label=label)

        fig = CFigure(height=7, width=8)
        fig.sp.title('{:} ({:})'.format(multiclass.__class__.__name__,
                                        multiclass.classifier.__name__))

        x_bounds, y_bounds = ds.get_bounds()

        styles = ['go-', 'yp--', 'rs-.', 'bD--', 'c-.', 'm-', 'y-.']

        for c_idx, c in enumerate(ds.classes):
            # Plot boundary and predicted label for each OVA classifier

            plot_hyperplane(fig, multiclass._binary_classifiers[c_idx],
                            x_bounds[0], x_bounds[1], styles[c_idx],
                            'Boundary\nfor class {:}'.format(c))

            fig.sp.scatter(ds.X[ds.Y == c, 0],
                           ds.X[ds.Y == c, 1],
                           s=40, c=styles[c_idx][0])
            fig.sp.scatter(ds.X[y_pred == c, 0], ds.X[y_pred == c, 1], s=160,
                           edgecolors=styles[c_idx][0],
                           facecolors='none', linewidths=2)

        # Plotting multiclass decision function
        fig.sp.plot_decision_regions(multiclass, n_grid_points=100,
                                     grid_limits=ds.get_bounds(offset=5))

        fig.sp.xlim(x_bounds[0] - .5 * x_bounds[1],
                    x_bounds[1] + .5 * x_bounds[1])
        fig.sp.ylim(y_bounds[0] - .5 * y_bounds[1],
                    y_bounds[1] + .5 * y_bounds[1])

        fig.sp.legend(loc=4)  # lower, right

        fig.show()
Exemple #7
0
    def test_compute(self):

        self.roc.compute(self.ds1.Y, self.s1[:, 1].ravel())

        fig = CFigure()
        fig.sp.semilogx(self.roc.fpr, self.roc.tpr)
        fig.sp.grid()
        fig.show()
Exemple #8
0
    def test_fgrads(self):
        """Test for CPlotFunction.plot_fgrads method."""
        fig = CFigure()
        fig.sp.plot_ds(self.dataset)

        fig.sp.plot_fun(self.clf.decision_function, y=1)
        fig.sp.plot_fgrads(lambda x: self.clf.grad_f_x(x, y=1))
        fig.show()
Exemple #9
0
def test_simple():
    """Plot the result of a dot product operation."""
    def test_dot():
        a = CArray([1, 2, 3])
        b = CArray([10, 20, 30])
        return a.dot(b)

    fig = CFigure()
    fig.sp.plot(test_dot(), marker='o')
    fig.show()
Exemple #10
0
 def test_confusion_matrix(self):
     """Test for `CPlot.plot_confusion_matrix()` method."""
     y_true = CArray([2, 0, 2, 2, 0, 1])
     y_pred = CArray([0, 0, 2, 2, 0, 2])
     fig = CFigure()
     fig.sp.plot_confusion_matrix(y_true,
                                  y_pred,
                                  labels=['one', 'two', 'three'],
                                  colorbar=True,
                                  normalize=False)
     fig.show()
    def _plot_sec_eval(sec_eval):

        figure = CFigure(height=5, width=5)

        figure.sp.plot_sec_eval(sec_eval.sec_eval_data,
                                label='SVM', marker='o',
                                show_average=True, mean=True)

        figure.sp.title(sec_eval.attack.__class__.__name__)
        figure.subplots_adjust()
        figure.show()
Exemple #12
0
    def test_standard(self):
        """Plot of standard ROC."""

        # Testing without input CFigure
        roc_plot = CFigure()
        roc_plot.sp.title('ROC Curve Standard')
        # Plotting 2 times (to show multiple curves)
        # add one curve for repetition and call it rep 0 and rep 1 of roc 1
        roc_plot.sp.plot_roc(self.roc_wmean.mean_fpr, self.roc_wmean.mean_tpr)

        roc_plot.show()
Exemple #13
0
    def _plot_sec_eval(self):
        # figure creation
        figure = CFigure(height=5, width=5)

        sec_eval_data = [
            sec_eval.sec_eval_data for sec_eval in self.sec_eval]
        # plot security evaluation
        figure.sp.plot_sec_eval(sec_eval_data, label='SVM', marker='o',
                                show_average=True, mean=True)

        figure.subplots_adjust()
        figure.show()
Exemple #14
0
    def test_mean(self):

        self.roc.compute([self.ds1.Y, self.ds2.Y],
                         [self.s1[:, 1].ravel(), self.s2[:, 1].ravel()])
        mean_fp, mean_tp, mean_std = self.roc.average(return_std=True)
        fig = CFigure(linewidth=2)
        fig.sp.errorbar(self.roc.mean_fpr, self.roc.mean_tpr, yerr=mean_std)
        for rep in range(self.roc.n_reps):
            fig.sp.semilogx(self.roc.fpr[rep], self.roc.tpr[rep])
        fig.sp.semilogx(mean_fp, mean_tp)
        fig.sp.grid()
        fig.show()
Exemple #15
0
    def test_explain(self):
        """Unittest for explain method."""
        i = 67
        ds_i = self.ds[i, :]
        x, y_true = ds_i.X, ds_i.Y.item()

        self.logger.info("Explaining P{:} c{:}".format(i, y_true))

        x_pred, x_score = self.clf.predict(x, return_decision_function=True)

        self.logger.info("Predicted class {:}, scores:\n{:}".format(
            x_pred.item(), x_score))
        self.logger.info("Candidates: {:}".format(x_score.argsort()[::-1]))

        fig = CFigure(height=1.5, width=12)

        # Plotting original image
        fig.subplot(1, self.ds.num_classes + 1, 1)
        fig.sp.imshow(x.reshape((8, 8)), cmap='gray')
        fig.sp.title("Origin c{:}".format(y_true))
        fig.sp.yticks([])
        fig.sp.xticks([])

        attr = CArray.empty(shape=(self.ds.num_classes, x.size))

        # Computing attributions
        for c in self.ds.classes:

            attr_c = self.explainer.explain(x, y=c)
            attr[c, :] = attr_c
            self.logger.info("Attributions class {:}:\n{:}".format(
                c, attr_c.tolist()))

            self.assertIsInstance(attr, CArray)
            self.assertEqual(attr.shape, attr.shape)

        th = max(abs(attr.min()), abs(attr.max()))

        # Plotting attributions
        for c in self.ds.classes:

            fig.subplot(1, self.ds.num_classes + 1, 2 + c)
            fig.sp.imshow(attr[c, :].reshape((8, 8)),
                          cmap='seismic',
                          vmin=-1 * th,
                          vmax=th)
            fig.sp.title("Attr c{:}".format(c))
            fig.sp.yticks([])
            fig.sp.xticks([])

        fig.tight_layout()

        fig.show()
Exemple #16
0
    def test_single(self):
        """Plot of ROC repetitions."""

        # Testing without input CFigure
        roc_plot = CFigure()
        roc_plot.sp.title('ROC Curve Repetitions')
        # Plotting 2 times (to show multiple curves)
        # add one curve for repetition and call it rep 0 and rep 1 of roc 1
        roc_plot.sp.plot_roc_reps(self.roc_nomean, label='roc1')
        # add one curve for repetition and call it rep 0 and rep 1 of roc 2
        roc_plot.sp.plot_roc_reps(self.roc_nomean, label='roc2')

        roc_plot.show()
    def test_explain(self):
        """Unittest for explain method."""
        i = 67
        ds_i = self.ds[i, :]
        x, y_true = ds_i.X, ds_i.Y.item()

        self.logger.info("Explaining P{:} c{:}".format(i, y_true))

        x_pred, x_score = self.clf.predict(x, return_decision_function=True)

        self.logger.info("Predicted class {:}, scores:\n{:}".format(
            x_pred.item(), x_score))
        self.logger.info("Candidates: {:}".format(x_score.argsort()[::-1]))

        ref_img = None  # Use default reference image
        m = 100  # Number of steps

        attr = CArray.empty(shape=(0, x.shape[1]), sparse=x.issparse)
        for c in self.ds.classes:  # Compute attributions for each class
            a = self.explainer.explain(x, y=c, reference=ref_img, m=m)
            attr = attr.append(a, axis=0)

        self.assertIsInstance(attr, CArray)
        self.assertEqual(attr.shape[1], x.shape[1])
        self.assertEqual(attr.shape[0], self.ds.num_classes)

        fig = CFigure(height=1.5, width=12)

        # Plotting original image
        fig.subplot(1, self.ds.num_classes + 1, 1)
        fig.sp.imshow(x.reshape((8, 8)), cmap='gray')
        fig.sp.title("Origin c{:}".format(y_true))
        fig.sp.yticks([])
        fig.sp.xticks([])

        th = max(abs(attr.min()), abs(attr.max()))

        # Plotting attributions
        for c in self.ds.classes:
            fig.subplot(1, self.ds.num_classes + 1, 2 + c)
            fig.sp.imshow(attr[c, :].reshape((8, 8)),
                          cmap='seismic',
                          vmin=-1 * th,
                          vmax=th)
            fig.sp.title("Attr c{:}".format(c))
            fig.sp.yticks([])
            fig.sp.xticks([])

        fig.tight_layout()
        fig.show()
Exemple #18
0
    def test_custom_params(self):
        """Plot of ROC altering default parameters."""

        # Testing without input CFigure
        roc_plot = CFigure()
        roc_plot.sp.title('ROC Curve - Custom')
        roc_plot.sp.xlim(0.1, 100)
        roc_plot.sp.ylim(30, 100)
        roc_plot.sp.yticks([70, 80, 90, 100])
        roc_plot.sp.yticklabels(['70', '80', '90', '100'])
        # Plotting 2 times (to show 2 curves)
        roc_plot.sp.plot_roc_mean(self.roc_wmean, label='roc1')
        roc_plot.sp.plot_roc_mean(self.roc_wmean, label='roc2')

        roc_plot.show()
Exemple #19
0
    def test_plot_decision_regions(self):
        """Test for `.plot_decision_regions` method."""
        fig = CFigure(width=10, height=5)

        fig.subplot(1, 2, 1)
        fig.sp.plot_ds(self.dataset)
        fig.sp.plot_decision_regions(
            self.clf, n_grid_points=200, plot_background=False)

        fig.subplot(1, 2, 2)
        fig.sp.plot_ds(self.dataset)
        fig.sp.plot_decision_regions(
            self.clf, n_grid_points=200)

        fig.show()
Exemple #20
0
    def test_mean(self):
        """Plot of average ROC."""

        # Testing without input CFigure
        roc_plot = CFigure()
        roc_plot.sp.title('ROC Curve')
        # Plotting 2 times (to show 2 curves)
        roc_plot.sp.plot_roc_mean(self.roc_wmean,
                                  label='roc1 mean',
                                  plot_std=True)
        roc_plot.sp.plot_roc_reps(self.roc_wmean, label='roc1')

        roc_plot.show()

        # Testing mean plot with no average
        with self.assertRaises(ValueError):
            roc_plot.sp.plot_roc_mean(self.roc_nomean)
Exemple #21
0
    def test_quiver(self):
        """Test for `CPlot.quiver()` method."""

        # gradient values creation
        xv = CArray.arange(0, 2 * constants.pi, .2)
        yv = CArray.arange(0, 2 * constants.pi, .2)

        X, Y = CArray.meshgrid((xv, yv))
        U = CArray.cos(X)
        V = CArray.sin(Y)

        plot = CFigure()
        plot.sp.title('Gradient arrow')

        plot.sp.quiver(U, V)

        plot.show()
    def test_plot(self):

        ds = CDLRandom(n_samples=100,
                       n_features=2,
                       n_redundant=0,
                       random_state=100).load()

        self.logger.info("Train Sec SVM")
        sec_svm = CClassifierSecSVM(C=1, eta=0.1, eps=1e-3, lb=-0.1, ub=0.5)
        sec_svm.verbose = 2
        sec_svm.fit(ds.X, ds.Y)

        self.logger.info("Train SVM")
        svm = CClassifierSVM(C=1)
        svm.fit(ds.X, ds.Y)

        self._compute_alignment(ds, sec_svm, svm)

        fig = CFigure(height=5, width=8)
        fig.subplot(1, 2, 1)
        # Plot dataset points
        fig.sp.plot_ds(ds)
        # Plot objective function
        fig.sp.plot_fun(svm.predict,
                        multipoint=True,
                        plot_background=True,
                        plot_levels=False,
                        n_grid_points=100,
                        grid_limits=ds.get_bounds())
        fig.sp.title("SVM")

        fig.subplot(1, 2, 2)
        # Plot dataset points
        fig.sp.plot_ds(ds)
        # Plot objective function
        fig.sp.plot_fun(sec_svm.predict,
                        multipoint=True,
                        plot_background=True,
                        plot_levels=False,
                        n_grid_points=100,
                        grid_limits=ds.get_bounds())
        fig.sp.title("Sec-SVM")

        fig.show()
Exemple #23
0
    def test_draw(self):
        """Drawing the loss functions.

        Inspired by: https://en.wikipedia.org/wiki/Loss_functions_for_classification

        """
        fig = CFigure()
        x = CArray.arange(-1, 3.01, 0.01)

        for loss_id in ('e-insensitive', 'e-insensitive-squared', 'quadratic'):

            self.logger.info("Creating loss: {:}".format(loss_id))
            loss_class = CLoss.create(loss_id)
            fig.sp.plot(x, loss_class.loss(CArray([1]), x), label=loss_id)

        fig.sp.grid()
        fig.sp.legend()

        fig.show()
class TestCFigure(CUnitTest):
    """Unittest for CFigure."""
    def test_svm(self):

        self.X = CArray([[1, 2], [3, 4], [5, 6], [7, 8]])
        self.Y = CArray([[0], [1], [1], [0]]).ravel()
        self.dataset = CDataset(self.X, self.Y)

        self.classifier = CClassifierSVM(kernel=CKernelRBF())
        self.classifier.fit(self.dataset)

        self.x_min, self.x_max = (self.X[:, [0]].min() - 1,
                                  self.X[:, [0]].max() + 1)
        self.y_min, self.y_max = (self.X[:, [1]].min() - 1,
                                  self.X[:, [1]].max() + 1)

        self.fig = CFigure(height=7,
                           width=10,
                           linewidth=5,
                           fontsize=24,
                           markersize=20)
        self.fig.sp.title("Svm Test")

        self.logger.info("Test plot dataset method...")

        self.fig.sp.plot_ds(self.dataset)

        self.logger.info("Test plot path method...")
        path = CArray([[1, 2], [1, 3], [1.5, 5]])
        self.fig.sp.plot_path(path)

        self.logger.info("Test plot function method...")
        bounds = [(self.x_min, self.x_max), (self.y_min, self.y_max)]
        self.fig.sp.plot_fun(self.classifier.decision_function,
                             plot_levels=False,
                             grid_limits=bounds,
                             y=1)

        self.fig.sp.xlim(self.x_min, self.x_max)
        self.fig.sp.ylim(self.y_min, self.y_max)

        self.fig.show()
    def test_draw(self):
        """ Compare the classifiers graphically"""
        self.logger.info("Testing classifiers graphically")

        fig = CFigure(width=10, markersize=8)
        # Plot dataset points

        # mark the rejected samples
        y = self.clf.predict(self.dataset.X)
        fig.sp.plot_ds(
            self.dataset[y == -1, :], colors=['k', 'k'], markersize=12)

        # plot the dataset
        fig.sp.plot_ds(self.dataset)

        # Plot objective function
        fig.sp.plot_fun(self.clf.decision_function,
                        grid_limits=self.dataset.get_bounds(),
                        levels=[0], y=1)
        fig.sp.title('Classifier with reject threshold')

        fig.show()
Exemple #26
0
    def test_draw(self):
        """Drawing the loss functions.

        Inspired by: https://en.wikipedia.org/wiki/Loss_functions_for_classification

        """
        fig = CFigure()
        x = CArray.arange(-1, 3.01, 0.01)

        fig.sp.plot(x,
                    CArray([1 if i <= 0 else 0 for i in x]),
                    label='0-1 indicator')

        for loss_id in ('hinge', 'hinge-squared', 'square', 'log'):

            self.logger.info("Creating loss: {:}".format(loss_id))
            loss_class = CLoss.create(loss_id)
            fig.sp.plot(x, loss_class.loss(CArray([1]), x), label=loss_id)

        fig.sp.grid()
        fig.sp.legend()

        fig.show()
Exemple #27
0
    def test_compare_sklearn(self):

        import numpy as np

        from sklearn import svm, datasets
        from sklearn.metrics import roc_curve, auc
        from sklearn.model_selection import StratifiedKFold

        from secml.figure import CFigure
        roc_fig = CFigure(width=12)

        # import some data to play with
        iris = datasets.load_iris()
        X = iris.data
        y = iris.target
        X, y = X[y != 2], y[y != 2]
        n_samples, n_features = X.shape

        # Add noisy features
        random_state = np.random.RandomState(0)
        X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]

        # Classification and ROC analysis

        # Run classifier with cross-validation and plot ROC curves
        classifier = svm.SVC(kernel='linear',
                             probability=True,
                             random_state=random_state)

        roc_fig.subplot(1, 2, 1)

        mean_tpr = 0.0
        mean_fpr = np.linspace(0, 1, 1000)

        cv = StratifiedKFold(n_splits=6)
        for i, (train, test) in enumerate(cv.split(X, y)):
            probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
            # Compute ROC curve and area the curve
            fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1])
            mean_tpr += np.interp(mean_fpr, fpr, tpr)
            mean_tpr[0] = 0.0
            roc_auc = auc(fpr, tpr)
            roc_fig.sp.plot(fpr,
                            tpr,
                            linewidth=1,
                            label='ROC fold %d (area = %0.2f)' % (i, roc_auc))

        roc_fig.sp.plot([0, 1], [0, 1],
                        '--',
                        color=(0.6, 0.6, 0.6),
                        label='Luck')

        mean_tpr /= cv.get_n_splits()
        mean_tpr[-1] = 1.0
        mean_auc = auc(mean_fpr, mean_tpr)

        roc_fig.sp.plot(mean_fpr,
                        mean_tpr,
                        'k--',
                        label='Mean ROC (area = %0.2f)' % mean_auc,
                        linewidth=2)

        roc_fig.sp.xlim([-0.05, 1.05])
        roc_fig.sp.ylim([-0.05, 1.05])
        roc_fig.sp.xlabel('False Positive Rate')
        roc_fig.sp.ylabel('True Positive Rate')
        roc_fig.sp.title('Sklearn Receiver operating characteristic example')
        roc_fig.sp.legend(loc="lower right")
        roc_fig.sp.grid()

        self.logger.info("Plotting using our CPLotRoc")

        roc_fig.subplot(1, 2, 2)

        score = []
        true_y = []
        for i, (train, test) in enumerate(cv.split(X, y)):
            probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
            true_y.append(CArray(y[test]))
            score.append(CArray(probas_[:, 1]))

        self.roc_wmean = CRoc()
        self.roc_wmean.compute(true_y, score)
        fp, tp = self.roc_wmean.average()

        roc_fig.sp.plot([0, 100], [0, 100],
                        '--',
                        color=(0.6, 0.6, 0.6),
                        label='Luck')

        roc_fig.sp.xticks([0, 20, 40, 60, 80, 100])
        roc_fig.sp.xticklabels(['0', '20', '40', '60', '80', '100'])

        roc_fig.sp.plot_roc_mean(self.roc_wmean,
                                 plot_std=True,
                                 logx=False,
                                 style='go-',
                                 label='Mean ROC (area = %0.2f)' %
                                 (auc(fp.tondarray(), tp.tondarray())))

        roc_fig.sp.xlim([-0.05 * 100, 1.05 * 100])
        roc_fig.sp.ylim([-0.05 * 100, 1.05 * 100])
        roc_fig.sp.title('SecML Receiver operating characteristic example')
        roc_fig.sp.legend(loc="lower right")
        roc_fig.show()
Exemple #28
0
from secml.array import CArray
from secml.figure import CFigure

fig = CFigure(fontsize=14)
fig.title('loglog base 4 on x')

t = CArray.arange(0.01, 20.0, 0.01)
fig.sp.loglog(t, 20 * (-t / 10.0).exp(), basex=2)

fig.sp.grid()
fig.show()
 def test_constraint(self):
     """Test for CPlotFunction.plot_fun method."""
     fig = CFigure()
     for constraint in self.constraints:
         fig.sp.plot_constraint(constraint)
     fig.show()