示例#1
0
    def test_grad(self):
        """Compare analytical gradients with its numerical approximation."""
        def _loss_wrapper(scores, loss, true_labels):
            return loss.loss(true_labels, scores)

        def _dloss_wrapper(scores, loss, true_labels):
            return loss.dloss(true_labels, scores)

        for loss_id in ('hinge', 'hinge-squared', 'square', 'log'):
            self.logger.info("Creating loss: {:}".format(loss_id))
            loss_class = CLoss.create(loss_id)

            n_elemes = 1
            y_true = CArray.randint(0, 2, n_elemes).todense()
            score = CArray.randn((n_elemes, ))

            check_grad_val = CFunction(
                _loss_wrapper, _dloss_wrapper).check_grad(score,
                                                          1e-8,
                                                          loss=loss_class,
                                                          true_labels=y_true)
            self.logger.info(
                "Gradient difference between analytical svm "
                "gradient and numerical gradient: %s", str(check_grad_val))
            self.assertLess(
                check_grad_val, 1e-4,
                "the gradient is wrong {:} for {:} loss".format(
                    check_grad_val, loss_id))
示例#2
0
    def test_randn(self):
        """Test for CArray.randn() classmethod."""
        self.logger.info("Test for CArray.randn() classmethod.")

        for shape in [(1, ), (2, ), (1, 2), (2, 1), (2, 2)]:
            res = CArray.randn(shape=shape)
            self.logger.info("CArray.randn(shape={:}):\n{:}".format(
                shape, res))

            self.assertIsInstance(res, CArray)
            self.assertEqual(res.shape, shape)
            self.assertIsSubDtype(res.dtype, float)
示例#3
0
    def test_grad(self):
        """Compare analytical gradients with its numerical approximation."""
        def _loss_wrapper(scores, loss, true_labels):
            return loss.loss(true_labels, scores)

        loss_class = CLossCrossEntropy()

        y_true = CArray.randint(0, 2, 1)
        score = CArray.randn((1, 3))

        self.logger.info("Y_TRUE: {:} SCORES: {:}".format(y_true, score))

        for pos_label in (None, 0, 1, 2):
            self.logger.info("POS_LABEL: {:}".format(pos_label))

            # real value of the gradient on x
            grad = loss_class.dloss(y_true, score, pos_label)

            self.logger.info("GRAD: {:}".format(grad))

            approx = CFunction(_loss_wrapper).approx_fprime(
                score, eps, loss_class, y_true)
            self.logger.info("APPROX (FULL): {:}".format(approx))

            pos_label = pos_label if pos_label is not None else y_true.item()
            approx = approx[pos_label]

            self.logger.info("APPROX (POS_LABEL): {:}".format(approx))

            check_grad_val = (grad - approx).norm()

            self.logger.info("Gradient difference between analytical svm "
                             "gradient and numerical gradient: %s",
                             str(check_grad_val))
            self.assertLess(check_grad_val, 1e-4,
                            "the gradient is wrong {:}".format(check_grad_val))
from secml.array import CArray
from secml.figure import CFigure

fig = CFigure(fontsize=14)

# example data
mu = 100  # mean of distribution
sigma = 15  # standard deviation of distribution
x = mu + sigma * CArray.randn((10000, ))
num_bins = 50
# the histogram of the data
n, bins, patches = fig.sp.hist(x,
                               num_bins,
                               density=1,
                               facecolor='green',
                               alpha=0.5)
# add a 'best fit' line
y = bins.normpdf(mu, sigma)
fig.sp.plot(bins, y, 'r--')
fig.sp.xlabel('Smarts')
fig.sp.ylabel('Probability')
fig.title(r'Histogram of IQ: $\mu=100$, $\sigma=15$')

# Tweak spacing to prevent clipping of ylabel
fig.subplots_adjust(left=0.15)

fig.sp.grid()
fig.show()