Beispiel #1
0
def test_entity_type(payload) -> None:
    """
    Evaluate a set of cases from a file.
    """
    body = payload["input"]
    mock_entity_json = payload["mock_entity_json"]
    expected = payload.get("expected")
    exception = payload.get("exception")

    duckling_plugin = DucklingPlugin(
        dest="output.entities",
        dimensions=["people", "time", "date", "duration"],
        locale="en_IN",
        timezone="Asia/Kolkata",
    )

    request_callback = request_builder(mock_entity_json)
    httpretty.register_uri(httpretty.POST,
                           "http://0.0.0.0:8000/parse",
                           body=request_callback)

    workflow = Workflow([duckling_plugin])

    if expected:
        _, output = workflow.run(Input(utterances=body))
        entities = output["entities"]
        for i, entity in enumerate(entities):
            assert entity["entity_type"] == expected[i]["entity_type"]
    elif exception:
        with pytest.raises(EXCEPTIONS[exception]):
            workflow.run(Input(utterances=body))
Beispiel #2
0
def test_input_extension():
    instance = Input(utterances="test", reference_time=1644238676772)
    extended = Input.from_dict({
        "utterances": "test",
        "reference_time": 1644238676772
    })
    assert instance == extended
def test_get_list_entities(payload):
    input_ = payload.get("input")
    lang_ = payload.get("lang")
    expected = payload.get("expected")
    exception = payload.get("exception")
    config = payload.get("config")
    transcripts = [expectation["text"] for expectation in input_]

    if expected:
        list_entity_plugin = ListSearchPlugin(dest="output.entities", **config)

        workflow = Workflow([list_entity_plugin])
        _, output = workflow.run(Input(utterances=transcripts, lang=lang_))
        entities = output["entities"]

        if not entities and expected:
            pytest.fail("No entities found!")

        for i, entity in enumerate(entities):
            assert entity["value"] == expected[i]["value"]
            assert entity["type"] == expected[i]["type"]
            if "score" in expected[i]:
                assert entity["score"] == expected[i]["score"]
    else:
        with pytest.raises(EXCEPTIONS.get(exception)):
            list_entity_plugin = ListSearchPlugin(dest="output.entities",
                                                  **config)

            workflow = Workflow([list_entity_plugin])
            workflow.run(Input(utterances=transcripts, lang=lang_))
def test_plugin_working_cases(payload) -> None:
    """
    An end-to-end example showing how to use `DucklingPlugin` with a `Workflow`.
    """
    body = payload["input"]
    mock_entity_json = payload["mock_entity_json"]
    expected_types = payload.get("expected")
    exception = payload.get("exception")
    duckling_args = payload.get("duckling")
    response_code = payload.get("response_code", 200)
    locale = payload.get("locale")
    reference_time = payload.get("reference_time")
    use_latent = payload.get("use_latent")

    duckling_plugin = DucklingPlugin(dest="output.entities", **duckling_args)

    request_callback = request_builder(mock_entity_json,
                                       response_code=response_code)
    httpretty.register_uri(httpretty.POST,
                           "http://0.0.0.0:8000/parse",
                           body=request_callback)

    workflow = Workflow([duckling_plugin])
    if isinstance(reference_time, str):
        reference_time = make_unix_ts("Asia/Kolkata")(reference_time)

    if expected_types is not None:
        input_ = Input(
            utterances=body,
            locale=locale,
            reference_time=reference_time,
            latent_entities=use_latent,
        )
        _, output = workflow.run(input_)

        if not output["entities"]:
            assert output["entities"] == []

        for i, entity in enumerate(output["entities"]):
            expected_entity_type = expected_types[i]["entity_type"]
            assert entity["entity_type"] == expected_entity_type
    else:
        with pytest.raises(EXCEPTIONS[exception]):
            input_ = Input(
                utterances=body,
                locale=locale,
                reference_time=reference_time,
                latent_entities=use_latent,
            )
            workflow.run(input_)
Beispiel #5
0
def test_calibration_model_utility():
    input_ = Input(utterances=[[{
        "transcript": "hello",
        "am_score": -100,
        "lm_score": -200
    }]])
    assert calibration_model.utility(input_, Output()) == ["hello"]
    calibration_model.threshold = float("inf")
    input_ = Input(utterances=[[{
        "transcript": "hello world hello world",
        "am_score": -100,
        "lm_score": -200,
    }]])
    assert calibration_model.utility(input_,
                                     Output()) == ["hello world hello world"]
def test_plugin_exit_at_missing_tracker():
    combine_date_time_plugin = CombineDateTimeOverSlots(
        trigger_intents=["_callback_"], dest="output.entities")

    workflow = Workflow(plugins=[combine_date_time_plugin])
    _, output = workflow.run(Input(utterances=[""]))
    assert output[const.ENTITIES] == []
def test_duckling_timeout() -> None:
    """
    [summary]

    :return: [description]
    :rtype: [type]
    """
    locale = "en_IN"
    wait_time = 0.1

    def raise_timeout(_, __, headers):
        time.sleep(wait_time)
        return 200, headers, "received"

    httpretty.register_uri(httpretty.POST,
                           "http://0.0.0.0:8000/parse",
                           body=raise_timeout)

    duckling_plugin = DucklingPlugin(
        locale=locale,
        dimensions=["time"],
        timezone="Asia/Kolkata",
        threshold=0.2,
        timeout=0.01,
        dest="output.entities",
    )

    workflow = Workflow([duckling_plugin])
    _, output = workflow.run(Input(utterances="test"))
    assert output["entities"] == []
def test_max_workers_greater_than_zero() -> None:
    """Checks that "ValueError: max_workers must be greater than 0" is not raised when there are no transcriptions

    When we get an empty transcription from ASR in a production setup, FSM does not send the empty transcription to the SLU service.

    Whereas in a development setup, when one tries to run `slu test` with atleast one data point that does not have any transcriptions(`[]`)
    it will raise a `ValueError: max_workers must be greater than 0` exception.

    The corresponding fix has been done and this test ensures that the exception is not raised when there are no transcriptions even in development setup

    :return: None
    :rtype: None
    """
    locale = "en_IN"

    duckling_plugin = DucklingPlugin(
        dest="output.entities",
        dimensions=["time"],
        timezone="Asia/Kolkata",
        url="https://duckling/parse",
    )

    workflow = Workflow([duckling_plugin])
    alternatives = []  # When ASR returns empty transcriptions.
    try:
        workflow.run(Input(utterances=alternatives, locale=locale))
    except ValueError as exc:
        pytest.fail(f"{exc}")
def test_plugin_cases(payload) -> None:
    """
    Test cases where the plugin should work.
    """
    entities = payload.get("inputs", {}).get("entities", [])
    tracker = payload.get("inputs", {}).get("tracker", [])
    expected = payload.get("expected", {})
    duckling_plugin = DucklingPlugin(dimensions=["date", "time"],
                                     timezone="Asia/Kolkata",
                                     dest="output.entities")

    for i, entity in enumerate(entities):
        current_turn_entities = duckling_plugin._reshape(entity, i)

    combine_date_time_plugin = CombineDateTimeOverSlots(
        trigger_intents=["_callback_"],
        dest="output.entities",
    )

    workflow = Workflow(plugins=[combine_date_time_plugin])
    workflow.output = Output(entities=current_turn_entities)
    _, output = workflow.run(Input(utterances=[""], slot_tracker=tracker))
    entity_values = [entity["value"] for entity in output[const.ENTITIES]]

    if len(entity_values) != len(expected):
        pytest.fail("Expected {} entities but got {}".format(
            len(expected), len(entity_values)))

    for entity_value, expected_value in zip(entity_values, expected):
        try:
            expected = datetime.fromisoformat(expected_value)
            generated = datetime.fromisoformat(entity_value)
            assert generated == expected, f"Expected {expected} but got {generated}"
        except (ValueError, TypeError):
            assert entity_value == expected_value
def test_slot_invalid_intent() -> None:
    """
    Here, we will see that an entity will not fill an intent unless the intent has a slot for it.
    `intent_1` doesn't have a slot for an entity of type `entity_2`.
    """
    intent_name = "intent_1"
    # ... a mock `Intent`
    intent = Intent(name=intent_name, score=0.8)

    # Setting up the slot-filler, both instantiation and plugin is created. (notice two calls).
    slot_filler = RuleBasedSlotFillerPlugin(rules=rules, dest="output.intents")

    # Create a mock `workflow`
    workflow = Workflow([slot_filler])

    # and a mock `Entity`.
    body = "12th december"
    entity = BaseEntity(
        range={
            "from": 0,
            "to": len(body)
        },
        body=body,
        dim="default",
        entity_type="entity_1",
        values=[{
            "key": "value"
        }],
    )

    # The RuleBasedSlotFillerPlugin specifies that it expects `Tuple[Intent, List[Entity])` on `access(workflow)`.
    workflow.set("output.intents", [1]).set("output.entities", [entity])

    with pytest.raises(AttributeError):
        workflow.run(Input(utterances=body))
def test_slot_filling_multiple() -> None:
    """
    Let's try filling both the slots this time with fill_multiple=True!
    `intent_2` supports both `entity_1` and `entity_2`.
    """
    intent_name = "intent_2"

    # Setting up the slot-filler, both instantiation and plugin is created. (notice two calls).
    slot_filler = RuleBasedSlotFillerPlugin(rules=rules,
                                            dest="output.intents",
                                            fill_multiple=True)

    # Create a mock `workflow`
    workflow = Workflow([slot_filler])

    # ... a mock `Intent`
    intent = Intent(name=intent_name, score=0.8)

    # and mock `Entity`-ies.
    body = "12th december"
    entity_1 = BaseEntity(
        range={
            "from": 0,
            "to": len(body)
        },
        body=body,
        dim="default",
        entity_type="entity_1",
        values=[{
            "key": "value"
        }],
    )

    entity_2 = BaseEntity(
        range={
            "from": 0,
            "to": len(body)
        },
        body=body,
        dim="default",
        entity_type="entity_2",
        values=[{
            "key": "value"
        }],
    )

    # The RuleBasedSlotFillerPlugin specifies that it expects `Tuple[Intent, List[Entity])` on `access(workflow)`.
    workflow.set("output.intents", [intent]).set("output.entities",
                                                 [entity_1, entity_2])
    _, output = workflow.run(Input(utterances=body))

    # `workflow.output[0]` is the `Intent` we created.
    # The `entity_1_slot` and `entity_2_slot` are filled.
    assert output[const.INTENTS][0]["slots"]["entity_1_slot"]["values"] == [
        entity_1.json()
    ]
    assert output[const.INTENTS][0]["slots"]["entity_2_slot"]["values"] == [
        entity_2.json()
    ]
Beispiel #12
0
def test_plugin_no_set_on_invalid_output():
    arbitrary_plugin = ArbitraryPlugin(
        dest="output.intents",
        guards=[lambda i, _: i.current_state == "COF"],
    )
    workflow = Workflow()
    workflow.input = Input(utterances="hello")
    workflow.output = None
    assert arbitrary_plugin(workflow) is None
def test_slot_competition_fill_multiple() -> None:
    """
    What happens when we have two entities of the same type but different value?
    """
    intent_name = "intent_1"

    # Setting up the slot-filler, both instantiation and plugin is created. (notice two calls).
    slot_filler = RuleBasedSlotFillerPlugin(rules=rules,
                                            dest="output.intents",
                                            fill_multiple=True)

    # Create a mock `workflow`
    workflow = Workflow([slot_filler])

    # ... a mock `Intent`
    intent = Intent(name=intent_name, score=0.8)

    # Here we have two entities which compete for the same slot but have different values.
    body = "12th december"
    entity_1 = BaseEntity(
        range={
            "from": 0,
            "to": len(body)
        },
        body=body,
        dim="default",
        entity_type="entity_1",
        values=[{
            "key": "value_1"
        }],
    )

    entity_2 = BaseEntity(
        range={
            "from": 0,
            "to": len(body)
        },
        body=body,
        dim="default",
        entity_type="entity_1",
        values=[{
            "key": "value_2"
        }],
    )

    workflow.set("output.intents", [intent]).set("output.entities",
                                                 [entity_1, entity_2])
    _, output = workflow.run(Input(utterances=body))

    # `workflow.output[0]` is the `Intent` we created.
    # The `entity_1_slot` and `entity_2_slot` are filled.

    assert output[const.INTENTS][0]["slots"]["entity_1_slot"]["values"] == [
        entity_1.json(),
        entity_2.json(),
    ]
Beispiel #14
0
def test_voting_0_intents():
    """
    The code uses division. So its always good to
    have a test to see if it takes care of division 0.
    """
    intents: List[Intent] = []
    vote_plugin = VotePlugin(dest="output.intents")
    workflow = Workflow([vote_plugin])
    workflow.output = Output(intents=intents)
    _, output = workflow.run(Input(utterances=["some text"]))
    assert output["intents"][0]["name"] == const.S_INTENT_OOS
Beispiel #15
0
def test_workflow_history_logs() -> None:
    """
    We can execute the workflow.
    """
    workflow = Workflow(
        [MergeASROutputPlugin(dest="input.clf_feature", debug=True)],
        debug=True,
    )
    input_, _ = workflow.run(Input(utterances=["apples"]))
    assert input_["clf_feature"] == ["<s> apples </s>"]
    assert workflow.input == None
    assert workflow.output == Output()
Beispiel #16
0
def test_representation_oos():
    intents = [
        Intent(name="a", score=0.99),
        Intent(name="b", score=0.1),
        Intent(name="b", score=0.4),
        Intent(name="b", score=0.31),
        Intent(name="d", score=0.44),
    ]

    vote_plugin = VotePlugin(dest="output.intents")
    workflow = Workflow([vote_plugin])
    workflow.output = Output(intents=intents)
    _, output = workflow.run(Input(utterances=["some text"]))
    assert output["intents"][0]["name"] == "_oos_"
Beispiel #17
0
def test_get_list_entities(payload):
    input_ = payload.get("input")
    expected = payload.get("expected")
    exception = payload.get("exception")
    config = payload.get("config")
    spacy_mocker = None
    transcripts = [expectation["text"] for expectation in input_]

    if config["style"] == "spacy":
        spacy_mocker = SpacyMocker(input_)

    if expected:
        list_entity_plugin = ListEntityPlugin(dest="output.entities",
                                              spacy_nlp=spacy_mocker,
                                              **config)

        workflow = Workflow([list_entity_plugin])
        print(transcripts)
        _, output = workflow.run(input_=Input(utterances=transcripts))
        entities = output["entities"]

        if not entities and expected:
            pytest.fail("No entities found!")

        for i, entity in enumerate(entities):
            assert entity["value"] == expected[i]["value"]
            assert entity["type"] == expected[i]["type"]
            if "score" in expected[i]:
                assert entity["score"] == expected[i]["score"]
    else:
        with pytest.raises(EXCEPTIONS.get(exception)):
            list_entity_plugin = ListEntityPlugin(dest="output.entities",
                                                  spacy_nlp=spacy_mocker,
                                                  **config)

            workflow = Workflow([list_entity_plugin])
            _, output = workflow.run(input_=Input(utterances=transcripts))
Beispiel #18
0
def test_voting_on_weak_signals():
    """
    Testing all weak intents.
    """
    intents = [
        Intent(name="a", score=0.3),
        Intent(name="a", score=0.2),
        Intent(name="b", score=0.1),
        Intent(name="b", score=0.1),
    ]
    vote_plugin = VotePlugin(dest="output.intents")
    workflow = Workflow([vote_plugin])
    workflow.output = Output(intents=intents)
    _, output = workflow.run(Input(utterances=["some text"]))
    assert output["intents"][0]["name"] == "_oos_"
Beispiel #19
0
def test_voting_on_conflicts():
    """
    Testing the case with conflicts.
    """
    intents = [
        Intent(name="a", score=1),
        Intent(name="a", score=1),
        Intent(name="b", score=1),
        Intent(name="b", score=1),
    ]
    vote_plugin = VotePlugin(dest="output.intents")
    workflow = Workflow([vote_plugin])
    workflow.output = Output(intents=intents)
    _, output = workflow.run(Input(utterances=["some text"]))
    assert output["intents"][0]["name"] == "_oos_"
Beispiel #20
0
def test_voting_n_intents():
    """
    Testing the usual case.
    """
    intents = [
        Intent(name="a", score=1),
        Intent(name="a", score=1),
        Intent(name="b", score=0.13),
        Intent(name="a", score=1),
    ]
    vote_plugin = VotePlugin(
        debug=False,
        dest="output.intents",
    )
    workflow = Workflow([vote_plugin])
    workflow.output = Output(intents=intents)
    _, output = workflow.run(Input(utterances=["some text"]))
    assert output["intents"][0]["name"] == "a"
Beispiel #21
0
def test_arbitrary_plugin() -> None:
    """
    We will test how an arbitrary-class-based plugin works with a workflow.
    """
    # create an instance of `ArbitraryPlugin`.
    arbitrary_plugin = ArbitraryPlugin(dest="output.intents")

    # create an instance of a `Workflow`.
    # we are calling the `arbitrary_plugin` to get the `plugin` de method.
    workflow = Workflow([arbitrary_plugin])
    input_ = Input(utterances=[[{"transcript": "hello"}]])

    # This runs all the `preprocessors` and `postprocessors` provided previously.
    # we can expect our `arbitrary_plugin` will also be used.
    _, output = workflow.run(input_)
    first_intent, *rest = output["intents"]

    # This test would pass only if our plugin works correctly!
    assert first_intent["name"] == "_greeting_"
    assert rest == []
Beispiel #22
0
def test_aggregate_fn_incorrect():
    intents = [
        Intent(name="a", score=0.99),
        Intent(name="a", score=0.99),
        Intent(name="a", score=0.91),
        Intent(name="b", score=0.1),
        Intent(name="c", score=0.31),
        Intent(name="d", score=0.44),
    ]

    vote_plugin = VotePlugin(
        dest="output.intents",
        aggregate_fn=5,
    )
    workflow = Workflow([vote_plugin])
    workflow.output = Output(intents=intents)

    with pytest.raises(TypeError):
        _, output = workflow.run(Input(utterances=[""]))
        assert output["intents"][0]["name"] == "a"
def test_duckling_connection_error() -> None:
    """
    [summary]

    :return: [description]
    :rtype: [type]
    """
    locale = "en_IN"

    duckling_plugin = DucklingPlugin(
        locale=locale,
        dimensions=["time"],
        timezone="Asia/Kolkata",
        dest="output.entities",
        threshold=0.2,
        timeout=0.01,
        url="https://duckling/parse",
    )

    workflow = Workflow([duckling_plugin])
    _, output = workflow.run(Input(utterances="test", locale=locale))
    assert output["entities"] == []
def test_slot_no_fill() -> None:
    """
    Here, we will see that an entity will not fill an intent unless the intent has a slot for it.
    `intent_1` doesn't have a slot for an entity of type `entity_2`.
    """
    intent_name = "intent_1"

    # Setting up the slot-filler, both instantiation and plugin is created. (notice two calls).
    slot_filler = RuleBasedSlotFillerPlugin(rules=rules, dest="output.intents")

    # Create a mock `workflow`
    workflow = Workflow([slot_filler])

    # ... a mock `Intent`
    intent = Intent(name=intent_name, score=0.8)

    # and a mock `Entity`.
    body = "12th december"
    entity = BaseEntity(
        range={
            "from": 0,
            "to": len(body)
        },
        body=body,
        dim="default",
        entity_type="entity_2",
        values=[{
            "key": "value"
        }],
    )

    # The RuleBasedSlotFillerPlugin specifies that it expects `Tuple[Intent, List[Entity])` on `access(workflow)`.
    workflow.set("output.intents", [intent]).set("output.entities", [entity])

    _, output = workflow.run(Input(utterances=body))

    # `workflow.output[0]` is the `Intent` we created.
    # we can see that the `entity_2_slot` is not filled by our mock entity.
    assert "entity_1_slot" not in output[const.INTENTS][0]["slots"]
def test_slot_filling() -> None:
    """
    This test case covers a trivial usage of a slot-filler.
    We have `rules` that demonstrate association of intents with entities and their respective slot-configuration.
    """
    intent_name = "intent_1"

    slot_filler = RuleBasedSlotFillerPlugin(rules=rules, dest="output.intents")

    # Create a mock `workflow`
    workflow = Workflow([slot_filler])

    # ... a mock `Intent`
    intent = Intent(name=intent_name, score=0.8)

    # and a mock `Entity`.
    body = "12th december"
    entity = BaseEntity(
        range={
            "from": 0,
            "to": len(body)
        },
        body=body,
        dim="default",
        entity_type="entity_1",
        values=[{
            "key": "value"
        }],
    )

    # The RuleBasedSlotFillerPlugin specifies that it expects `Tuple[Intent, List[Entity])` on `access(workflow)`.
    workflow.set("output.intents", [intent]).set("output.entities", [entity])

    _, output = workflow.run(Input(utterances=body))
    intent, *_ = output[const.INTENTS]

    # `workflow.output[0]` is the `Intent` we created.
    # so we are checking if the `entity_1_slot` is filled by our mock entity.
    assert intent["slots"]["entity_1_slot"]["values"][0] == entity.json()
Beispiel #26
0
def test_inference(payload):
    save_module_name = const.XLMR_MODULE
    save_model_name = const.XLMR_MULTI_CLASS_MODEL
    const.XLMR_MODULE = "tests.plugin.text.classification.test_xlmr"
    const.XLMR_MULTI_CLASS_MODEL = "MockClassifier"
    directory = "/tmp"
    file_path = os.path.join(directory, const.LABELENCODER_FILE)
    if os.path.exists(file_path):
        os.remove(file_path)

    transcripts = payload.get("input")
    intent = payload["expected"]["label"]

    xlmr_clf = XLMRMultiClass(
        model_dir=directory,
        dest="output.intents",
        debug=False,
    )

    merge_asr_output_plugin = MergeASROutputPlugin(dest="input.clf_feature",
                                                   debug=False)

    workflow = Workflow([merge_asr_output_plugin, xlmr_clf])

    train_df = pd.DataFrame([
        {
            "data": json.dumps([[{
                "transcript": "yes"
            }]]),
            "labels": "_confirm_",
        },
        {
            "data": json.dumps([[{
                "transcript": "yea"
            }]]),
            "labels": "_confirm_",
        },
        {
            "data": json.dumps([[{
                "transcript": "no"
            }]]),
            "labels": "_cancel_",
        },
        {
            "data": json.dumps([[{
                "transcript": "nope"
            }]]),
            "labels": "_cancel_",
        },
    ])

    workflow.train(train_df)
    assert isinstance(
        xlmr_clf.model,
        MockClassifier), "model should be a MockClassifier after training."

    _, output = workflow.run(input_=Input(utterances=[[{
        "transcript": transcript
    } for transcript in transcripts]]))
    assert output[const.INTENTS][0]["name"] == intent
    assert output[const.INTENTS][0]["score"] > 0.9

    if os.path.exists(file_path):
        os.remove(file_path)
    const.XLMR_MODULE = save_module_name
    const.XLMR_MULTI_CLASS_MODEL = save_model_name
Beispiel #27
0
def test_inference(payload):
    directory = "/tmp"
    file_path = os.path.join(directory, const.MLPMODEL_FILE)
    if os.path.exists(file_path):
        os.remove(file_path)

    USE = "use"
    fake_args = {
        const.TRAIN: {
            const.NUM_TRAIN_EPOCHS: 5,
            const.USE_GRIDSEARCH: {
                USE: False,
                const.CV: 2,
                const.VERBOSE_LEVEL: 2,
                const.PARAMS: {
                    "activation": ["relu", "tanh"],
                    "hidden_layer_sizes": [(10, ), (2, 2)],
                    "ngram_range": [(1, 1), (1, 2)],
                    "max_iter": [20, 2],
                },
            },
        },
        const.TEST: {},
        const.PRODUCTION: {},
    }

    transcripts = payload.get("input")
    intent = payload["expected"]["label"]

    mlp_clf = MLPMultiClass(
        model_dir=directory,
        dest="output.intents",
        args_map=fake_args,
        debug=False,
    )

    merge_asr_output_plugin = MergeASROutputPlugin(
        dest="input.clf_feature",
        debug=False,
    )

    workflow = Workflow([merge_asr_output_plugin, mlp_clf])

    train_df = pd.DataFrame([
        {
            "data": json.dumps([[{
                "transcript": "yes"
            }]]),
            "labels": "_confirm_",
        },
        {
            "data": json.dumps([[{
                "transcript": "ye"
            }]]),
            "labels": "_confirm_",
        },
        {
            "data": json.dumps([[{
                "transcript": "<s> yes </s> <s> ye </s>"
            }]]),
            "labels": "_confirm_",
        },
        {
            "data": json.dumps([[{
                "transcript": "no"
            }]]),
            "labels": "_cancel_",
        },
        {
            "data": json.dumps([[{
                "transcript": "new"
            }]]),
            "labels": "_cancel_",
        },
        {
            "data": json.dumps([[{
                "transcript": "<s> new </s> <s> no </s>"
            }]]),
            "labels": "_cancel_",
        },
    ])

    workflow.train(train_df)
    _, output = workflow.run(
        Input(utterances=[[{
            "transcript": transcript
        } for transcript in transcripts]]))
    assert output[const.INTENTS][0]["name"] == intent
    assert output[const.INTENTS][0]["score"] > 0.5
    if os.path.exists(file_path):
        os.remove(file_path)
Beispiel #28
0
def test_canonicalization_utility():
    input_ = Input(utterances=[[{"transcript": "hello apple"}]])
    input_, _ = workflow.run(input_)
    assert input_["clf_feature"] == ["MASK <fruits>"]
Beispiel #29
0
def test_invalid_reftime():
    with pytest.raises(ValueError):
        Input(utterances="test", reference_time=18**15)