Example #1
0
def test_transformer_combiner(encoder_outputs: tuple,
                              transformer_output_size: int,
                              output_size: int) -> None:
    encoder_outputs_dict, input_feature_dict = encoder_outputs

    # setup combiner to test
    combiner = TransformerCombiner(
        input_features=input_feature_dict,
        config=load_config(TransformerCombinerConfig)).to(DEVICE)

    # confirm correctness of input_shape property
    assert isinstance(combiner.input_shape, dict)
    for k in encoder_outputs_dict:
        assert k in combiner.input_shape
        assert encoder_outputs_dict[k]["encoder_output"].shape[
            1:] == combiner.input_shape[k]

    # calculate expected hidden size for concatenated tensors
    hidden_size = 0
    for k in encoder_outputs_dict:
        hidden_size += np.prod(
            encoder_outputs_dict[k]["encoder_output"].shape[1:])

    # confirm correctness of effective_input_shape
    assert combiner.concatenated_shape[-1] == hidden_size

    # concatenate encoder outputs
    combiner_output = combiner(encoder_outputs_dict)

    # check for correctness of combiner output
    check_combiner_output(combiner, combiner_output, BATCH_SIZE)
Example #2
0
def test_tabtransformer_combiner(
    features_to_test: tuple,
    embed_input_feature_name: Optional[Union[int, str]],
    fc_layers: Optional[list],
    reduce_output: str,
    num_layers: int,
) -> None:
    # retrieve simulated encoder outputs and input features for the test
    encoder_outputs, input_features = features_to_test

    # setup combiner to test
    combiner = TabTransformerCombiner(
        input_features=input_features,
        config=load_config(
            TabTransformerCombinerConfig,
            embed_input_feature_name=embed_input_feature_name,
            # emulates parameters passed from combiner def
            num_layers=num_layers,  # number of transformer layers
            fc_layers=fc_layers,  # fully_connected layer definition
            reduce_output=reduce_output,  # sequence reducer
        ),
    ).to(DEVICE)

    # concatenate encoder outputs
    combiner_output = combiner(encoder_outputs)

    check_combiner_output(combiner, combiner_output, BATCH_SIZE)
Example #3
0
def test_comparator_combiner(encoder_comparator_outputs: Tuple,
                             fc_layer: Optional[List[Dict]], entity_1: str,
                             entity_2: str) -> None:
    encoder_comparator_outputs_dict, input_features_dict = encoder_comparator_outputs
    # clean out unneeded encoder outputs since we only have 2 layers
    del encoder_comparator_outputs_dict["text_feature_3"]
    del encoder_comparator_outputs_dict["image_feature_3"]
    del encoder_comparator_outputs_dict["text_feature_4"]
    del encoder_comparator_outputs_dict["image_feature_4"]

    # setup combiner to test set to 256 for case when none as it's the default size
    output_size = fc_layer[0]["output_size"] if fc_layer else 256
    combiner = ComparatorCombiner(
        input_features_dict,
        config=load_config(ComparatorCombinerConfig,
                           entity_1=entity_1,
                           entity_2=entity_2,
                           fc_layers=fc_layer,
                           output_size=output_size),
    ).to(DEVICE)

    # concatenate encoder outputs
    combiner_output = combiner(encoder_comparator_outputs_dict)

    # check for correctness of combiner output
    check_combiner_output(combiner, combiner_output, BATCH_SIZE)
Example #4
0
def test_tabnet_combiner(features_to_test: Dict, size: int,
                         output_size: int) -> None:
    encoder_outputs, input_features = features_to_test

    # setup combiner to test
    combiner = TabNetCombiner(
        input_features,
        config=load_config(
            TabNetCombinerConfig,
            size=size,
            output_size=output_size,
            num_steps=3,
            num_total_blocks=4,
            num_shared_blocks=2,
            dropout=0.1,
        ),
    ).to(DEVICE)

    # concatenate encoder outputs
    combiner_output = combiner(encoder_outputs)

    # required key present
    assert "combiner_output" in combiner_output
    assert "attention_masks" in combiner_output
    assert "aggregated_attention_masks" in combiner_output

    assert isinstance(combiner_output["combiner_output"], torch.Tensor)
    assert combiner_output["combiner_output"].shape == (BATCH_SIZE,
                                                        output_size)
Example #5
0
def test_sequence_concat_combiner(encoder_outputs: Tuple,
                                  main_sequence_feature: Optional[str],
                                  reduce_output: Optional[str]) -> None:
    # extract encoder outputs and input feature dictionaries
    encoder_outputs_dict, input_feature_dict = encoder_outputs

    # setup combiner for testing
    combiner = SequenceConcatCombiner(
        input_feature_dict,
        config=load_config(SequenceConcatCombinerConfig,
                           main_sequence_feature=main_sequence_feature,
                           reduce_output=reduce_output),
    ).to(DEVICE)

    # confirm correctness of input_shape property
    assert isinstance(combiner.input_shape, dict)
    for k in encoder_outputs_dict:
        assert k in combiner.input_shape
        assert encoder_outputs_dict[k]["encoder_output"].shape[
            1:] == combiner.input_shape[k]

    # calculate expected hidden size for concatenated tensors
    hidden_size = 0
    for k in encoder_outputs_dict:
        hidden_size += encoder_outputs_dict[k]["encoder_output"].shape[-1]

    # confirm correctness of concatenated_shape
    assert combiner.concatenated_shape[-1] == hidden_size

    # combine encoder outputs
    combiner_output = combiner(encoder_outputs_dict)

    # check for correctness of combiner output
    check_combiner_output(combiner, combiner_output, BATCH_SIZE)
Example #6
0
def test_concat_combiner(encoder_outputs: Tuple,
                         fc_layer: Optional[List[Dict]], flatten_inputs: bool,
                         number_inputs: Optional[int]) -> None:
    encoder_outputs_dict, input_features_dict = encoder_outputs

    # setup encoder inputs to combiner based on test case
    if not flatten_inputs:
        # clean out rank-3 encoder outputs
        for feature in ["feature_3", "feature_4"]:
            del encoder_outputs_dict[feature]
            del input_features_dict[feature]
        if number_inputs == 1:
            # need only one encoder output for the test
            del encoder_outputs_dict["feature_2"]
            del input_features_dict["feature_2"]
    elif number_inputs == 1:
        # require only one rank-3 encoder output for testing
        for feature in ["feature_1", "feature_2", "feature_3"]:
            del encoder_outputs_dict[feature]
            del input_features_dict[feature]

    # setup combiner to test with pseudo input features
    combiner = ConcatCombiner(input_features_dict,
                              config=load_config(
                                  ConcatCombinerConfig,
                                  fc_layers=fc_layer,
                                  flatten_inputs=flatten_inputs)).to(DEVICE)

    # confirm correctness of input_shape property
    assert isinstance(combiner.input_shape, dict)
    for k in encoder_outputs_dict:
        assert k in combiner.input_shape
        assert encoder_outputs_dict[k]["encoder_output"].shape[
            1:] == combiner.input_shape[k]

    # combine encoder outputs
    combiner_output = combiner(encoder_outputs_dict)

    # check for correctness of combiner output
    check_combiner_output(combiner, combiner_output, BATCH_SIZE)
Example #7
0
def test_sequence_combiner(encoder_outputs: Tuple,
                           main_sequence_feature: Optional[str], encoder: str,
                           reduce_output: Optional[str]) -> None:
    encoder_outputs_dict, input_features_dict = encoder_outputs

    combiner = SequenceCombiner(
        input_features_dict,
        config=load_config(
            SequenceCombinerConfig,
            main_sequence_feature=main_sequence_feature,
            encoder=encoder,
            reduce_output=reduce_output,
        ),
        # following emulates encoder parameters passed in from config file
        output_size=OUTPUT_SIZE,
        num_fc_layers=3,
    ).to(DEVICE)

    # confirm correctness of input_shape property
    assert isinstance(combiner.input_shape, dict)
    for k in encoder_outputs_dict:
        assert k in combiner.input_shape
        assert encoder_outputs_dict[k]["encoder_output"].shape[
            1:] == combiner.input_shape[k]

    # calculate expected hidden size for concatenated tensors
    hidden_size = 0
    for k in encoder_outputs_dict:
        hidden_size += encoder_outputs_dict[k]["encoder_output"].shape[-1]

    # confirm correctness of concatenated_shape
    assert combiner.concatenated_shape[-1] == hidden_size

    # combine encoder outputs
    combiner_output = combiner(encoder_outputs_dict)

    # check for correctness of combiner output
    check_combiner_output(combiner, combiner_output, BATCH_SIZE)