コード例 #1
0
def test_loss():
    d_scores = CategoricalCrossentropy().get_grad(scores0, labels0)
    assert d_scores.dtype == "float32"
    assert d_scores.shape == scores0.shape
    d_scores = SequenceCategoricalCrossentropy().get_grad([scores0], [labels0])
    assert d_scores[0].dtype == "float32"
    assert d_scores[0].shape == scores0.shape
    assert SequenceCategoricalCrossentropy().get_grad([], []) == []
コード例 #2
0
def test_categorical_crossentropy(guesses, labels):
    d_scores = CategoricalCrossentropy(normalize=True).get_grad(guesses, labels)
    assert d_scores.shape == guesses.shape

    # The normalization divides the difference (e.g. 0.4) by the number of vectors (4)
    assert d_scores[1][0] == pytest.approx(0.1, eps)
    assert d_scores[1][1] == pytest.approx(-0.1, eps)

    # The third vector predicted all labels, but only the first one was correct
    assert d_scores[2][0] == pytest.approx(0, eps)
    assert d_scores[2][1] == pytest.approx(0.25, eps)
    assert d_scores[2][2] == pytest.approx(0.25, eps)

    # The fourth vector predicted no labels but should have predicted the last one
    assert d_scores[3][0] == pytest.approx(0, eps)
    assert d_scores[3][1] == pytest.approx(0, eps)
    assert d_scores[3][2] == pytest.approx(-0.25, eps)

    loss = CategoricalCrossentropy(normalize=True).get_loss(guesses, labels)
    assert loss == pytest.approx(0.239375, eps)
コード例 #3
0
def test_crossentropy_incorrect_scores_targets():
    labels = numpy.asarray([2])

    guesses_neg = numpy.asarray([[-0.1, 0.5, 0.6]])
    with pytest.raises(ValueError, match=r"Cannot calculate.*guesses"):
        CategoricalCrossentropy(normalize=True).get_grad(guesses_neg, labels)

    guesses_larger_than_one = numpy.asarray([[1.1, 0.5, 0.6]])
    with pytest.raises(ValueError, match=r"Cannot calculate.*guesses"):
        CategoricalCrossentropy(normalize=True).get_grad(
            guesses_larger_than_one, labels)

    guesses_ok = numpy.asarray([[0.1, 0.4, 0.5]])
    targets_neg = numpy.asarray([[-0.1, 0.5, 0.6]])
    with pytest.raises(ValueError, match=r"Cannot calculate.*truth"):
        CategoricalCrossentropy(normalize=True).get_grad(
            guesses_ok, targets_neg)

    targets_larger_than_one = numpy.asarray([[2.0, 0.5, 0.6]])
    with pytest.raises(ValueError, match=r"Cannot calculate.*truth"):
        CategoricalCrossentropy(normalize=True).get_grad(
            guesses_ok, targets_larger_than_one)
コード例 #4
0
def test_categorical_crossentropy_int_list_missing(guesses, labels):
    d_scores = CategoricalCrossentropy(normalize=True,
                                       missing_value=0).get_grad(
                                           guesses, labels)
    assert d_scores.shape == guesses.shape

    # The normalization divides the difference (e.g. 0.4) by the number of vectors (4)
    assert d_scores[1][0] == pytest.approx(0.1, eps)
    assert d_scores[1][1] == pytest.approx(-0.1, eps)

    # Label 0 is masked, because it represents the missing value
    assert d_scores[2][0] == 0.0
    assert d_scores[2][1] == 0.0
    assert d_scores[2][2] == 0.0

    # The fourth vector predicted no labels but should have predicted the last one
    assert d_scores[3][0] == pytest.approx(0, eps)
    assert d_scores[3][1] == pytest.approx(0, eps)
    assert d_scores[3][2] == pytest.approx(-0.25, eps)

    loss = CategoricalCrossentropy(normalize=True,
                                   missing_value=0).get_loss(guesses, labels)
    assert loss == pytest.approx(0.114375, eps)
コード例 #5
0
def test_categorical_crossentropy_missing(guesses, labels):
    d_scores = CategoricalCrossentropy(normalize=True).get_grad(
        guesses, labels)
    assert d_scores.shape == guesses.shape
コード例 #6
0
eps = 0.0001


def test_loss():
    d_scores = CategoricalCrossentropy().get_grad(scores0, labels0)
    assert d_scores.dtype == "float32"
    assert d_scores.shape == scores0.shape
    d_scores = SequenceCategoricalCrossentropy().get_grad([scores0], [labels0])
    assert d_scores[0].dtype == "float32"
    assert d_scores[0].shape == scores0.shape
    assert SequenceCategoricalCrossentropy().get_grad([], []) == []


@pytest.mark.parametrize("dist", [
    CategoricalCrossentropy(),
    CosineDistance(ignore_zeros=True),
    L2Distance()
])
@pytest.mark.parametrize("vect", [scores0, guesses1, guesses2])
def test_equality(dist, vect):
    assert int(dist.get_grad(vect, vect)[0][0]) == pytest.approx(0, eps)
    assert dist.get_loss(vect, vect) == pytest.approx(0, eps)


@pytest.mark.parametrize("guesses, labels", [(guesses1, labels1),
                                             (guesses1, labels1_full)])
def test_categorical_crossentropy(guesses, labels):
    d_scores = CategoricalCrossentropy(normalize=True).get_grad(
        guesses, labels)
    assert d_scores.shape == guesses.shape
コード例 #7
0
eps = 0.0001


def test_loss():
    d_scores = CategoricalCrossentropy().get_grad(scores0, labels0)
    assert d_scores.dtype == "float32"
    assert d_scores.shape == scores0.shape
    d_scores = SequenceCategoricalCrossentropy().get_grad([scores0], [labels0])
    assert d_scores[0].dtype == "float32"
    assert d_scores[0].shape == scores0.shape
    assert SequenceCategoricalCrossentropy().get_grad([], []) == []


@pytest.mark.parametrize(
    "dist", [CategoricalCrossentropy(), CosineDistance(ignore_zeros=True), L2Distance()]
)
@pytest.mark.parametrize("vect", [scores0, guesses1, guesses2])
def test_equality(dist, vect):
    assert int(dist.get_grad(vect, vect)[0][0]) == pytest.approx(0, eps)
    assert dist.get_loss(vect, vect) == pytest.approx(0, eps)


@pytest.mark.parametrize(
    "guesses, labels", [(guesses1, labels1), (guesses1, labels1_full)]
)
def test_categorical_crossentropy(guesses, labels):
    d_scores = CategoricalCrossentropy(normalize=True).get_grad(guesses, labels)
    assert d_scores.shape == guesses.shape

    # The normalization divides the difference (e.g. 0.4) by the number of vectors (4)