Ejemplo n.º 1
0
def test_concat_sparse_dense_correct_output_for_dense_input() -> None:
    layer = ConcatenateSparseDenseFeatures(
        attribute=attribute_name,
        feature_type=SEQUENCE,
        feature_type_signature=[
            realistic_feature_signature_dense_1,
            realistic_feature_signature_dense_2,
        ],
        config=dict(
            model_config_basic,
            # also activate all dropout to check that it has no effect on dense features
            **{
                SPARSE_INPUT_DROPOUT: True,
                DENSE_INPUT_DROPOUT: True
            },
        ),
    )
    outputs_expected = [
        [[10.0, 1.0, 2.0], [20.0, 3.0, 4.0], [30.0, 5.0, 6.0]],
        [[40.0, 1.5, 2.5], [50.0, 3.5, 4.5], [0.0, 0.0, 0.0]],
    ]
    inputs = ([realistic_feature_dense_seq_1, realistic_feature_dense_seq_2], )
    train_outputs = layer(inputs, training=True)
    assert (train_outputs.numpy() == outputs_expected).all()
    test_outputs = layer(inputs, training=False)
    assert (test_outputs.numpy() == outputs_expected).all()
Ejemplo n.º 2
0
def test_concat_sparse_dense_applies_dropout_to_sparse_densified_input(
) -> None:
    layer_dropout_for_sparse_densified = ConcatenateSparseDenseFeatures(
        attribute=attribute_name,
        feature_type=SEQUENCE,
        feature_type_signature=[
            feature_signature_sparse_1, feature_signature_sparse_1
        ],
        config=dict(model_config_basic, **{
            DENSE_INPUT_DROPOUT: True,
            DROP_RATE: 0.99999999
        }),  # keras dropout doesn't accept velues >= 1.0
    )

    inputs = ([feature_sparse_seq_1, feature_sparse_seq_1], )
    expected_outputs_train = tf.zeros(
        (batch_size, max_seq_length, units_sparse_to_dense * 2))

    train_outputs = layer_dropout_for_sparse_densified(inputs, training=True)
    assert np.allclose(train_outputs.numpy(), expected_outputs_train.numpy())

    # We can't check exact output contents for sparse inputs but during test-time no
    # dropout should be applied, hence the outputs should not be all zeros in this case
    # (unlike at training time).
    test_outputs = layer_dropout_for_sparse_densified(inputs, training=False)
    assert not np.allclose(test_outputs.numpy(),
                           expected_outputs_train.numpy())
Ejemplo n.º 3
0
def test_concat_sparse_dense_raises_exception_when_inconsistent_sparse_features() -> None:  # noqa: E501
    with pytest.raises(TFLayerConfigException):
        ConcatenateSparseDenseFeatures(
            attribute=attribute_name,
            feature_type=SEQUENCE,
            feature_type_signature=[
                FeatureSignature(is_sparse=True, units=2, number_of_dimensions=3),
                FeatureSignature(is_sparse=True, units=1, number_of_dimensions=3),
            ],
            config=model_config_basic,
        )