Ejemplo n.º 1
0
def test_preserve_sentence_and_sequence_features_old_config():
    attribute = "text"
    message = Message.build("hi there")

    transformers_nlp = HFTransformersNLP({
        "model_name": "bert",
        "model_weights": "bert-base-uncased"
    })
    transformers_nlp.process(message)
    lm_tokenizer = LanguageModelTokenizer()
    lm_tokenizer.process(message)

    lm_featurizer = LanguageModelFeaturizer({"model_name": "gpt2"})
    lm_featurizer.process(message)

    message.set(LANGUAGE_MODEL_DOCS[attribute], None)
    lm_docs = lm_featurizer._get_docs_for_batch([message],
                                                attribute=attribute,
                                                inference_mode=True)[0]
    hf_docs = transformers_nlp._get_docs_for_batch([message],
                                                   attribute=attribute,
                                                   inference_mode=True)[0]
    assert not (message.features[0].features
                == lm_docs[SEQUENCE_FEATURES]).any()
    assert not (message.features[1].features
                == lm_docs[SENTENCE_FEATURES]).any()
    assert (message.features[0].features == hf_docs[SEQUENCE_FEATURES]).all()
    assert (message.features[1].features == hf_docs[SENTENCE_FEATURES]).all()
Ejemplo n.º 2
0
def test_lm_tokenizer_edge_cases(
    model_name,
    model_weights,
    texts,
    expected_tokens,
    expected_indices,
    expected_num_token_ids,
):

    if model_weights is None:
        model_weights_config = {}
    else:
        model_weights_config = {"model_weights": model_weights}
    transformers_config = {
        **{
            "model_name": model_name
        },
        **model_weights_config
    }

    transformers_nlp = HFTransformersNLP(transformers_config)
    lm_tokenizer = LanguageModelTokenizer()

    for text, gt_tokens, gt_indices, gt_num_indices in zip(
            texts, expected_tokens, expected_indices, expected_num_token_ids):

        message = Message.build(text=text)
        transformers_nlp.process(message)
        tokens = lm_tokenizer.tokenize(message, TEXT)
        token_ids = message.get(LANGUAGE_MODEL_DOCS[TEXT])[TOKEN_IDS]

        assert [t.text for t in tokens] == gt_tokens
        assert [t.start for t in tokens] == [i[0] for i in gt_indices]
        assert [t.end for t in tokens] == [i[1] for i in gt_indices]
        assert len(token_ids) == gt_num_indices
Ejemplo n.º 3
0
def test_attention_mask(actual_sequence_length: int,
                        max_input_sequence_length: int, zero_start_index: int):
    component = HFTransformersNLP({"model_name": "bert"}, skip_model_load=True)

    attention_mask = component._compute_attention_mask(
        [actual_sequence_length], max_input_sequence_length)
    mask_ones = attention_mask[0][:zero_start_index]
    mask_zeros = attention_mask[0][zero_start_index:]

    assert np.all(mask_ones == 1)
    assert np.all(mask_zeros == 0)
Ejemplo n.º 4
0
def train_texts(texts: List[Text], model_name: Text,
                model_weights: Text) -> List[Message]:
    config = create_pretrained_transformers_config(model_name, model_weights)
    whitespace_tokenizer = WhitespaceTokenizer()
    transformer = HFTransformersNLP(config)

    messages = [Message.build(text=text) for text in texts]
    td = TrainingData(messages)

    whitespace_tokenizer.train(td)
    transformer.train(td)
    return messages
Ejemplo n.º 5
0
def process_texts(texts: List[Text], model_name: Text,
                  model_weights: Text) -> List[Message]:
    config = create_pretrained_transformers_config(model_name, model_weights)
    whitespace_tokenizer = WhitespaceTokenizer()
    transformer = HFTransformersNLP(config)

    messages = []
    for text in texts:
        message = Message.build(text=text)
        whitespace_tokenizer.process(message)
        transformer.process(message)
        messages.append(message)
    return messages
Ejemplo n.º 6
0
def test_log_longer_sequence(sequence_length: int, model_name: Text,
                             should_overflow: bool, caplog):
    transformers_config = {"model_name": model_name}

    transformers_nlp = HFTransformersNLP(transformers_config)

    text = " ".join(["hi"] * sequence_length)
    message = Message(text)

    caplog.set_level(logging.DEBUG)
    transformers_nlp.process(message)
    if should_overflow:
        assert "hi hi hi" in caplog.text
    assert message.get("text_language_model_doc") is not None
Ejemplo n.º 7
0
def test_input_padding(
    token_ids: List[List[int]],
    max_sequence_length_model: int,
    resulting_length: int,
    padding_added: bool,
):
    component = HFTransformersNLP({"model_name": "bert"}, skip_model_load=True)
    component.pad_token_id = 0
    padded_input = component._add_padding_to_batch(token_ids,
                                                   max_sequence_length_model)
    assert len(padded_input[0]) == resulting_length
    if padding_added:
        original_length = len(token_ids[0])
        assert np.all(np.array(padded_input[0][original_length:]) == 0)
Ejemplo n.º 8
0
def test_lm_featurizer_shape_values(model_name, texts, expected_shape,
                                    expected_sequence_vec, expected_cls_vec):
    transformers_config = {"model_name": model_name}

    transformers_nlp = HFTransformersNLP(transformers_config)
    lm_featurizer = LanguageModelFeaturizer()

    messages = []
    for text in texts:
        messages.append(Message.build(text=text))
    td = TrainingData(messages)

    transformers_nlp.train(td)
    lm_featurizer.train(td)

    for index in range(len(texts)):

        computed_sequence_vec, computed_sentence_vec = messages[
            index].get_dense_features(TEXT, [])
        if computed_sequence_vec:
            computed_sequence_vec = computed_sequence_vec.features
        if computed_sentence_vec:
            computed_sentence_vec = computed_sentence_vec.features

        assert computed_sequence_vec.shape[0] == expected_shape[index][0] - 1
        assert computed_sequence_vec.shape[1] == expected_shape[index][1]
        assert computed_sentence_vec.shape[0] == 1
        assert computed_sentence_vec.shape[1] == expected_shape[index][1]

        # Look at the value of first dimension for a few starting timesteps
        assert np.allclose(
            computed_sequence_vec[:len(expected_sequence_vec[index]), 0],
            expected_sequence_vec[index],
            atol=1e-5,
        )

        # Look at the first value of first five dimensions
        assert np.allclose(computed_sentence_vec[0][:5],
                           expected_cls_vec[index],
                           atol=1e-5)

        intent_sequence_vec, intent_sentence_vec = messages[
            index].get_dense_features(INTENT, [])
        if intent_sequence_vec:
            intent_sequence_vec = intent_sequence_vec.features
        if intent_sentence_vec:
            intent_sentence_vec = intent_sentence_vec.features

        assert intent_sequence_vec is None
        assert intent_sentence_vec is None
Ejemplo n.º 9
0
def test_lm_tokenizer_number_of_sub_tokens(text, expected_number_of_sub_tokens):
    transformers_config = {"model_name": "bert"}  # Test for one should be enough

    transformers_nlp = HFTransformersNLP(transformers_config)
    lm_tokenizer = LanguageModelTokenizer()

    message = Message(text)

    td = TrainingData([message])

    transformers_nlp.train(td)
    lm_tokenizer.train(td)

    assert [
        t.get(NUMBER_OF_SUB_TOKENS) for t in message.get(TOKENS_NAMES[TEXT])[:-1]
    ] == expected_number_of_sub_tokens
Ejemplo n.º 10
0
def test_log_deprecation_warning_with_old_config(caplog: LogCaptureFixture):
    message = Message.build("hi there")

    transformers_nlp = HFTransformersNLP(
        {"model_name": "bert", "model_weights": "bert-base-uncased"}
    )
    transformers_nlp.process(message)

    caplog.set_level(logging.DEBUG)
    lm_tokenizer = LanguageModelTokenizer()
    lm_tokenizer.process(message)
    lm_featurizer = LanguageModelFeaturizer(skip_model_load=True)
    caplog.clear()
    with caplog.at_level(logging.DEBUG):
        lm_featurizer.process(message)

    assert "deprecated component HFTransformersNLP" in caplog.text
Ejemplo n.º 11
0
def test_lm_tokenizer_custom_intent_symbol(text, expected_tokens):
    component_config = {"intent_tokenization_flag": True, "intent_split_symbol": "+"}

    transformers_config = {"model_name": "bert"}  # Test for one should be enough

    transformers_nlp = HFTransformersNLP(transformers_config)
    lm_tokenizer = LanguageModelTokenizer(component_config)

    message = Message(text)
    message.set(INTENT, text)

    td = TrainingData([message])

    transformers_nlp.train(td)
    lm_tokenizer.train(td)

    assert [t.text for t in message.get(TOKENS_NAMES[INTENT])] == expected_tokens
Ejemplo n.º 12
0
def test_lm_tokenizer_edge_cases(model_name, texts, expected_tokens,
                                 expected_indices):

    transformers_config = {"model_name": model_name}

    transformers_nlp = HFTransformersNLP(transformers_config)
    lm_tokenizer = LanguageModelTokenizer()

    for text, gt_tokens, gt_indices in zip(texts, expected_tokens,
                                           expected_indices):

        message = Message.build(text=text)
        transformers_nlp.process(message)
        tokens = lm_tokenizer.tokenize(message, TEXT)

        assert [t.text for t in tokens] == gt_tokens
        assert [t.start for t in tokens] == [i[0] for i in gt_indices]
        assert [t.end for t in tokens] == [i[1] for i in gt_indices]
def test_lm_featurizer_shape_values():
    model_name, texts, expected_shape, expected_sequence_vec, expected_cls_vec = samples[3]
    transformers_config = {"model_name": model_name}

    transformers_nlp = HFTransformersNLP(transformers_config)
    lm_featurizer = LanguageModelFeaturizer()

    messages = []
    for text in texts:
        messages.append(Message.build(text=text))
    td = TrainingData(messages)
    show_training_data(td)
    transformers_nlp.train(td)
    show_training_data(td)
    lm_featurizer.train(td)
    show_training_data(td)


    for index in range(len(texts)):
        computed_feature_vec = messages[index].get(DENSE_FEATURE_NAMES[TEXT])
        computed_sequence_vec, computed_sentence_vec = (
            computed_feature_vec[:-1],
            computed_feature_vec[-1],
        )

        assert computed_feature_vec.shape == expected_shape[index]

        # Look at the value of first dimension for a few starting timesteps
        assert np.allclose(
            computed_sequence_vec[: len(expected_sequence_vec[index]), 0],
            expected_sequence_vec[index],
            atol=1e-5,
        )

        # Look at the first value of first five dimensions
        assert np.allclose(
            computed_sentence_vec[:5], expected_cls_vec[index], atol=1e-5
        )

        intent_vec = messages[index].get(DENSE_FEATURE_NAMES[INTENT])

        assert intent_vec is None
Ejemplo n.º 14
0
def test_long_sequences_extra_padding(
    sequence_embeddings: np.ndarray,
    actual_sequence_lengths: List[int],
    model_name: Text,
    padding_needed: bool,
):
    component = HFTransformersNLP({"model_name": model_name},
                                  skip_model_load=True)
    modified_sequence_embeddings = component._add_extra_padding(
        sequence_embeddings, actual_sequence_lengths)
    if not padding_needed:
        assert np.all(modified_sequence_embeddings) == np.all(
            sequence_embeddings)
    else:
        assert modified_sequence_embeddings.shape[
            1] == actual_sequence_lengths[0]
        assert (modified_sequence_embeddings[0].shape[-1] ==
                sequence_embeddings[0].shape[-1])
        zero_embeddings = modified_sequence_embeddings[0][sequence_embeddings.
                                                          shape[1]:]
        assert np.all(zero_embeddings == 0)
Ejemplo n.º 15
0
def test_hf_transformer_edge_cases(
    model_name, model_weights, texts, expected_tokens, expected_indices
):

    if model_weights is None:
        model_weights_config = {}
    else:
        model_weights_config = {"model_weights": model_weights}
    transformers_config = {**{"model_name": model_name}, **model_weights_config}

    hf_transformer = HFTransformersNLP(transformers_config)
    whitespace_tokenizer = WhitespaceTokenizer()

    for text, gt_tokens, gt_indices in zip(texts, expected_tokens, expected_indices):

        message = Message.build(text=text)
        tokens = whitespace_tokenizer.tokenize(message, TEXT)
        message.set(TOKENS_NAMES[TEXT], tokens)
        hf_transformer.process(message)

        assert [t.text for t in tokens] == gt_tokens
        assert [t.start for t in tokens] == [i[0] for i in gt_indices]
        assert [t.end for t in tokens] == [i[1] for i in gt_indices]
Ejemplo n.º 16
0
def test_lm_featurizer_shape_values():
    model_name, texts, expected_shape, expected_sequence_vec, expected_cls_vec = samples[0]
    transformers_config = {"model_name": model_name}

    transformers_nlp_bert = HFTransformersNLP({"model_name": "bert"})
    transformers_nlp_gpt = HFTransformersNLP({"model_name": "gpt"})
    lm_featurizer = LanguageModelFeaturizer()

    messages = []
    for text in texts:
        messages.append(Message.build(text=text))
    td = TrainingData(messages)
    show_training_data(td)
    transformers_nlp_bert.train(td)
    show_training_data(td)
    transformers_nlp_gpt.train(td)
    show_training_data(td)
    lm_featurizer.train(td)
    show_training_data(td)
Ejemplo n.º 17
0
def test_sequence_length_overflow_train(
    input_sequence_length: int, model_name: Text, should_overflow: bool
):
    component = HFTransformersNLP({"model_name": model_name}, skip_model_load=True)
    message = Message.build(text=" ".join(["hi"] * input_sequence_length))
    if should_overflow:
        with pytest.raises(RuntimeError):
            component._validate_sequence_lengths(
                [input_sequence_length], [message], "text", inference_mode=False
            )
    else:
        component._validate_sequence_lengths(
            [input_sequence_length], [message], "text", inference_mode=False
        )
Ejemplo n.º 18
0
from rasa.nlu.constants import (TEXT, SPACY_DOCS)

logger = logging_setup()

test_input = "Okay, pick up this yellow banana for me."
message = Message(test_input)

tk = WhitespaceTokenizer()
tokens = tk.tokenize(message, attribute=TEXT)
logger.info('Whitespace: {}'.format([t.text for t in tokens]))

tk = SpacyTokenizer()

message.set(SPACY_DOCS[TEXT], spacy_nlp(test_input))
tokens = tk.tokenize(message, attribute=TEXT)
logger.info('SpaCy: {}'.format([t.text for t in tokens]))

tk = MitieTokenizer()
tokens = tk.tokenize(message, attribute=TEXT)
logger.info('Mitie: {}'.format([t.text for t in tokens]))

tk = ConveRTTokenizer()
tokens = tk.tokenize(message, attribute=TEXT)
logger.info('ConveRT: {}'.format([t.text for t in tokens]))

tk = LanguageModelTokenizer()
transformers_nlp = HFTransformersNLP({"model_name": "bert"})
transformers_nlp.process(message)
tokens = tk.tokenize(message, attribute=TEXT)
logger.info('BERT: {}'.format([t.text for t in tokens]))