Esempio n. 1
0
def test_softmax_activation():
    # Simple case
    lay = SoftmaxLayer(1, 1)
    input_arr = np.array([[7]])
    expected_arr = np.array([[1]])
    actual_arr = lay.activation(input_arr)
    np.testing.assert_array_equal(expected_arr, actual_arr)

    # Simple case with batches
    lay = SoftmaxLayer(1, 1)
    input_arr = np.array([[2], [7]])
    expected_arr = np.array([[1], [1]])
    actual_arr = lay.activation(input_arr)
    np.testing.assert_array_equal(expected_arr, actual_arr)

    # Multiple inputs
    lay = SoftmaxLayer(4, 4)
    input_arr = np.array([[1, 2, 3, 4]])
    exp_sum = np.exp(1) + np.exp(2) + np.exp(3) + np.exp(4)
    expected_arr = np.exp(np.array([[1, 2, 3, 4]])) / exp_sum
    actual_arr = lay.activation(input_arr)

    # round down to nearest 10 decimal places
    # because floating point sucks
    expected_arr = np.round(expected_arr, 10)
    actual_arr = np.round(actual_arr, 10)
    np.testing.assert_array_equal(expected_arr, actual_arr)

    # Multiple inputs
    lay = SoftmaxLayer(4, 4)
    input_arr = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])

    exp_sum = np.exp(1) + np.exp(2) + np.exp(3) + np.exp(4)
    expected_arr1 = np.exp(np.array([1, 2, 3, 4])) / exp_sum

    exp_sum = np.exp(5) + np.exp(6) + np.exp(7) + np.exp(8)
    expected_arr2 = np.exp(np.array([5, 6, 7, 8])) / exp_sum

    expected_arr = np.array([expected_arr1, expected_arr2])

    actual_arr = lay.activation(input_arr)

    # round down to nearest 10 decimal places
    # because floating point sucks
    expected_arr = np.round(expected_arr, 10)
    actual_arr = np.round(actual_arr, 10)
    np.testing.assert_array_equal(expected_arr, actual_arr)