Exemplo n.º 1
0
def test_p_hid(tknzr: BaseTknzr):
    r"""``p_hid`` must be an instance of `float` and must be a probability."""
    # Test case: Type mismatched.
    wrong_typed_inputs = [
        False,
        True,
        0,
        1,
        '',
        (),
        [],
        {},
        set(),
        None,
        ...,
        NotImplemented,
    ]

    for bad_p_hid in wrong_typed_inputs:
        with pytest.raises(TypeError) as excinfo:
            RNNModel(
                d_emb=1,
                d_hid=1,
                n_hid_lyr=1,
                n_pre_hid_lyr=1,
                n_post_hid_lyr=1,
                p_emb=0.0,
                p_hid=bad_p_hid,
                tknzr=tknzr,
            )

        assert '`p_hid` must be an instance of `float`' in str(excinfo.value)

    # Test case: Invalid value.
    wrong_value_inputs = [
        -1.0,
        -0.1,
        1.1,
        2.0,
    ]

    for bad_p_hid in wrong_value_inputs:
        with pytest.raises(ValueError) as excinfo:
            RNNModel(
                d_emb=1,
                d_hid=1,
                n_hid_lyr=1,
                n_pre_hid_lyr=1,
                n_post_hid_lyr=1,
                p_emb=0.0,
                p_hid=bad_p_hid,
                tknzr=tknzr,
            )

        assert ('`p_hid` must be in the range from `0.0` to `1.0`'
                in str(excinfo.value))
Exemplo n.º 2
0
def test_n_pre_hid_lyr(tknzr: BaseTknzr):
    r"""``n_pre_hid_lyr`` must be an instance of `int` and be positive."""
    # Test case: Type mismatched.
    wrong_typed_inputs = [
        0.0,
        0.1,
        1.0,
        '',
        (),
        [],
        {},
        set(),
        None,
        ...,
        NotImplemented,
    ]

    for bad_n_pre_hid_lyr in wrong_typed_inputs:
        with pytest.raises(TypeError) as excinfo:
            RNNModel(
                d_emb=1,
                d_hid=1,
                n_hid_lyr=1,
                n_pre_hid_lyr=bad_n_pre_hid_lyr,
                n_post_hid_lyr=1,
                p_emb=0.0,
                p_hid=0.0,
                tknzr=tknzr,
            )

        assert ('`n_pre_hid_lyr` must be an instance of `int`'
                in str(excinfo.value))

    # Test case: Invalid value.
    wrong_value_inputs = [
        0,
        -1,
        -2,
    ]

    for bad_n_pre_hid_lyr in wrong_value_inputs:
        with pytest.raises(ValueError) as excinfo:
            RNNModel(
                d_emb=1,
                d_hid=1,
                n_hid_lyr=1,
                n_pre_hid_lyr=bad_n_pre_hid_lyr,
                n_post_hid_lyr=1,
                p_emb=0.0,
                p_hid=0.0,
                tknzr=tknzr,
            )

        assert ('`n_pre_hid_lyr` must be bigger than or equal to `1`'
                in str(excinfo.value))
Exemplo n.º 3
0
def test_input_shape_and_dtype(
    rnn_model: RNNModel,
    batch_prev_tkids: torch.Tensor,
):
    r"""Input must be long tensor."""

    try:
        rnn_model = rnn_model.eval()
        rnn_model.pred(batch_prev_tkids)
    except Exception:
        assert False
Exemplo n.º 4
0
def test_value_range(
    rnn_model: RNNModel,
    batch_prev_tkids: torch.Tensor,
):
    r"""Return values are probabilities."""
    rnn_model = rnn_model.eval()
    out = rnn_model.pred(batch_prev_tkids)

    # Probabilities are values within range [0, 1].
    assert torch.all(0 <= out).item()
    assert torch.all(out <= 1).item()

    # Sum of the probabilities equals to 1.
    accum_out = out.sum(dim=-1)
    assert torch.allclose(accum_out, torch.ones_like(accum_out))
def test_forward_path(
    rnn_model: RNNModel,
    batch_prev_tkids: torch.Tensor,
):
    r"""Parameters used during forward must have gradients."""
    # Make sure model has no gradients.
    rnn_model = rnn_model.train()
    rnn_model.zero_grad()

    rnn_model(batch_prev_tkids).sum().backward()

    assert hasattr(rnn_model.emb.weight.grad, 'grad')
    assert hasattr(rnn_model.pre_hid[0].weight.grad, 'grad')
    assert hasattr(rnn_model.hid.weight_ih_l0.grad, 'grad')
    assert hasattr(rnn_model.post_hid[-1].weight.grad, 'grad')
def test_return_shape_and_dtype(
    rnn_model: RNNModel,
    batch_prev_tkids: torch.Tensor,
    batch_next_tkids: torch.Tensor,
):
    r"""Return float tensor with 0 dimension."""
    rnn_model = rnn_model.eval()
    loss = rnn_model.loss_fn(
        batch_prev_tkids=batch_prev_tkids,
        batch_next_tkids=batch_next_tkids,
    )

    # 0 dimension tensor.
    assert loss.shape == torch.Size([])
    # Return float tensor.
    assert loss.dtype == torch.float
def test_input_shape_and_dtype(
    rnn_model: RNNModel,
    batch_prev_tkids: torch.Tensor,
    batch_next_tkids: torch.Tensor,
):
    r"""Input tensors must be long tensors and have the same shape.

    Same shape is required since we are using teacher forcing.
    """
    try:
        rnn_model = rnn_model.eval()
        rnn_model.loss_fn(
            batch_prev_tkids=batch_prev_tkids,
            batch_next_tkids=batch_next_tkids,
        )
    except Exception:
        assert False
Exemplo n.º 8
0
def test_return_shape_and_dtype(
    rnn_model: RNNModel,
    batch_prev_tkids: torch.Tensor,
):
    r"""Return float tensor with correct shape."""
    rnn_model = rnn_model.eval()
    out = rnn_model.pred(batch_prev_tkids)

    # Output float tensor.
    assert out.dtype == torch.float

    # Input shape: (B, S).
    # Output shape: (B, S, V).
    assert out.shape == (
        batch_prev_tkids.shape[0],
        batch_prev_tkids.shape[1],
        rnn_model.emb.num_embeddings,
    )
def test_back_propagation_path(
    rnn_model: RNNModel,
    batch_prev_tkids: torch.Tensor,
    batch_next_tkids: torch.Tensor,
):
    r"""Gradients with respect to loss must get back propagated."""
    # Make sure model has no gradients.
    rnn_model = rnn_model.train()
    rnn_model.zero_grad()

    rnn_model.loss_fn(
        batch_prev_tkids=batch_prev_tkids,
        batch_next_tkids=batch_next_tkids,
    ).backward()

    assert hasattr(rnn_model.emb.weight.grad, 'grad')
    assert hasattr(rnn_model.pre_hid[0].weight.grad, 'grad')
    assert hasattr(rnn_model.hid.weight_ih_l0.grad, 'grad')
    assert hasattr(rnn_model.post_hid[-1].weight.grad, 'grad')
Exemplo n.º 10
0
def rnn_model(
    tknzr: BaseTknzr,
    d_emb: int,
    d_hid: int,
    n_hid_lyr: int,
    n_pre_hid_lyr: int,
    n_post_hid_lyr: int,
    p_emb: float,
    p_hid: float,
) -> RNNModel:
    r"""Example ``RNNModel`` instance."""
    return RNNModel(
        d_emb=d_emb,
        d_hid=d_hid,
        n_hid_lyr=n_hid_lyr,
        n_pre_hid_lyr=n_pre_hid_lyr,
        n_post_hid_lyr=n_post_hid_lyr,
        p_emb=p_emb,
        p_hid=p_hid,
        tknzr=tknzr,
    )
def test_save_and_load(
    tknzr: BaseTknzr,
    ckpt: int,
    exp_name: str,
    clean_model
):
    r"""Saved parameters are the same as loaded."""
    model = RNNModel(
        d_emb=1,
        d_hid=1,
        n_hid_lyr=1,
        n_pre_hid_lyr=1,
        n_post_hid_lyr=1,
        p_emb=0.5,
        p_hid=0.5,
        tknzr=tknzr,
    )

    # Save model parameters.
    model.save(
        ckpt=ckpt,
        exp_name=exp_name,
    )

    # Load model parameters.
    load_model = RNNModel.load(
        ckpt=ckpt,
        exp_name=exp_name,
        d_emb=1,
        d_hid=1,
        n_hid_lyr=1,
        n_pre_hid_lyr=1,
        n_post_hid_lyr=1,
        p_emb=0.5,
        p_hid=0.5,
        tknzr=tknzr,
    )

    # Ensure parameters are the same.
    for p_1, p_2 in zip(model.parameters(), load_model.parameters()):
        assert torch.equal(p_1, p_2)