def test_dsoftmax_matrix():
    tensor_shape = [random.randint(2, 10) for _ in range(2)]

    helpers.check_gradient(lambda X: calculate.softmax(X),
                           lambda X: calculate.dsoftmax(calculate.softmax(X)),
                           f_arg_tensor=numpy.random.random(tensor_shape),
                           f_shape='jac-stack')
Example #2
0
def test_softmax_transfer():
    assert list(calculate.softmax(numpy.array([1.0, 1.0]))) == [0.5, 0.5]

    assert helpers.approx_equal(calculate.softmax(numpy.array([1.0, 0.0])),
                                [0.7310585, 0.2689414])

    softmax_out = calculate.softmax(numpy.array([1.0, -1.0]))
    assert softmax_out[0] > 0.5 and softmax_out[1] < 0.5
    assert helpers.approx_equal(sum(softmax_out), 1.0)
def test_softmax_matrix():
    assert helpers.approx_equal(
        calculate.softmax(numpy.array([[1.0, 1.0], [1.0, 0.0]])),
        [[0.5, 0.5], [0.7310585, 0.2689414]])

    assert helpers.approx_equal(
        calculate.softmax(numpy.array([[1.0, 0.0], [0.5, 0.5]])),
        [[0.7310585, 0.2689414], [0.5, 0.5]])

    shape = (random.randint(2, 10), random.randint(2, 10))
    softmax_out = calculate.softmax(
        numpy.sort(numpy.random.random(shape), axis=1))
    assert (numpy.sort(softmax_out, axis=1) == softmax_out).all()
    assert helpers.approx_equal(numpy.sum(softmax_out, axis=1),
                                numpy.ones(shape[0]))
def test_softmax_vector():
    assert list(calculate.softmax(numpy.array([1.0, 1.0]))) == [0.5, 0.5]

    assert helpers.approx_equal(calculate.softmax(numpy.array([1.0, 0.0])),
                                [0.7310585, 0.2689414])

    softmax_out = calculate.softmax(numpy.array([1.0, -1.0]))
    assert softmax_out[0] > 0.5 and softmax_out[1] < 0.5
    assert helpers.approx_equal(sum(softmax_out), 1.0)

    shape = random.randint(2, 10)
    softmax_out = calculate.softmax(
        numpy.array(sorted(numpy.random.random(shape))))
    assert sorted(softmax_out) == list(softmax_out)
    assert helpers.approx_equal(sum(softmax_out), 1.0)
Example #5
0
def test_softmax_large_input():
    """Softmax includes an exponential, which can cause overflows.

    Our softmax implementation should protect against overflow.
    """
    assert list(calculate.softmax(numpy.array([-1000.0,
                                               1000.0]))) == [0.0, 1.0]
Example #6
0
def test_dsoftmax():
    helpers.check_gradient(calculate.softmax,
                           lambda x: calculate.dsoftmax(calculate.softmax(x)),
                           f_shape='jac')
Example #7
0
 def __call__(self, input_vec):
     return calculate.softmax(input_vec)