Exemplo n.º 1
0
def test_lasagne_gradient(num_classes):
    bounds = (0, 255)
    channels = num_classes

    def mean_brightness_net(images):
        logits = GlobalPoolLayer(images)
        return logits

    images_var = T.tensor4('images', dtype='float32')
    images = InputLayer((None, channels, 5, 5), images_var)
    logits = mean_brightness_net(images)

    preprocessing = (np.arange(num_classes)[None, None],
                     np.random.uniform(size=(5, 5, channels)) + 1)

    model = LasagneModel(images,
                         logits,
                         preprocessing=preprocessing,
                         bounds=bounds)

    epsilon = 1e-2

    np.random.seed(23)
    test_image = np.random.rand(channels, 5, 5).astype(np.float32)
    test_label = 7

    _, g1 = model.predictions_and_gradient(test_image, test_label)

    l1 = model._loss_fn(test_image[None] - epsilon / 2 * g1, [test_label])[0]
    l2 = model._loss_fn(test_image[None] + epsilon / 2 * g1, [test_label])[0]

    # make sure that gradient is numerically correct
    np.testing.assert_array_almost_equal(1e4 * (l2 - l1),
                                         1e4 * epsilon * np.linalg.norm(g1)**2,
                                         decimal=1)
def test_lasagne_gradient(num_classes):
    bounds = (0, 255)
    channels = num_classes

    def mean_brightness_net(images):
        logits = GlobalPoolLayer(images)
        return logits

    images_var = T.tensor4('images', dtype='float32')
    images = InputLayer((None, channels, 5, 5), images_var)
    logits = mean_brightness_net(images)

    preprocessing = (np.arange(num_classes)[:, None, None],
                     np.random.uniform(size=(channels, 5, 5)) + 1)

    model = LasagneModel(
        images,
        logits,
        preprocessing=preprocessing,
        bounds=bounds)

    # theano and lasagne calculate the cross-entropy from the probbilities
    # rather than combining softmax and cross-entropy calculation; they
    # therefore have lower numerical accuracy
    epsilon = 1e-3

    np.random.seed(23)
    test_image = np.random.rand(channels, 5, 5).astype(np.float32)
    test_label = 7

    _, g1 = model.predictions_and_gradient(test_image, test_label)

    l1 = model._loss_fn(test_image[None] - epsilon / 2 * g1, [test_label])[0]
    l2 = model._loss_fn(test_image[None] + epsilon / 2 * g1, [test_label])[0]

    assert 1e5 * (l2 - l1) > 1

    # make sure that gradient is numerically correct
    np.testing.assert_array_almost_equal(
        1e5 * (l2 - l1),
        1e5 * epsilon * np.linalg.norm(g1)**2,
        decimal=1)
Exemplo n.º 3
0
def test_lasagne_gradient(num_classes):
    bounds = (0, 255)
    channels = num_classes

    def mean_brightness_net(images):
        logits = GlobalPoolLayer(images)
        return logits

    images_var = T.tensor4('images', dtype='float32')
    images = InputLayer((None, channels, 5, 5), images_var)
    logits = mean_brightness_net(images)

    preprocessing = (np.arange(num_classes)[None, None],
                     np.random.uniform(size=(5, 5, channels)) + 1)

    model = LasagneModel(
        images,
        logits,
        preprocessing=preprocessing,
        bounds=bounds)

    epsilon = 1e-2

    np.random.seed(23)
    test_image = np.random.rand(channels, 5, 5).astype(np.float32)
    test_label = 7

    _, g1 = model.predictions_and_gradient(test_image, test_label)

    l1 = model._loss_fn(test_image[None] - epsilon / 2 * g1, [test_label])[0]
    l2 = model._loss_fn(test_image[None] + epsilon / 2 * g1, [test_label])[0]

    # make sure that gradient is numerically correct
    np.testing.assert_array_almost_equal(
        1e4 * (l2 - l1),
        1e4 * epsilon * np.linalg.norm(g1)**2,
        decimal=1)