예제 #1
0
def test_evaluate_no_pipe(nlp):
    """Test that docs are processed correctly within Language.pipe if the
    component doesn't expose a .pipe method."""
    def pipe(doc):
        return doc

    text = "hello world"
    annots = {"cats": {"POSITIVE": 1.0, "NEGATIVE": 0.0}}
    nlp = Language(Vocab())
    nlp.add_pipe(pipe)
    nlp.evaluate([(text, annots)])
예제 #2
0
def test_evaluate_no_pipe(nlp):
    """Test that docs are processed correctly within Language.pipe if the
    component doesn't expose a .pipe method."""
    @Language.component("test_evaluate_no_pipe")
    def pipe(doc):
        return doc

    text = "hello world"
    annots = {"cats": {"POSITIVE": 1.0, "NEGATIVE": 0.0}}
    nlp = Language(Vocab())
    doc = nlp(text)
    nlp.add_pipe("test_evaluate_no_pipe")
    nlp.evaluate([Example.from_dict(doc, annots)])
예제 #3
0
def evaluate(cfg: Config, nlp: Language, val_data: InputData) -> Dict:
    try:
        scorer: Scorer = nlp.evaluate(val_data, batch_size=cfg.nbatch * 2)
    except Exception:
        report_fail(val_data)
        raise
    return scorer.scores
예제 #4
0
def test_simple_train():
    fix_random_seed(0)
    nlp = Language()
    spancat = nlp.add_pipe("spancat", config={"spans_key": SPAN_KEY})
    get_examples = make_get_examples(nlp)
    nlp.initialize(get_examples)
    sgd = nlp.create_optimizer()
    assert len(spancat.labels) != 0
    for i in range(40):
        losses = {}
        nlp.update(list(get_examples()), losses=losses, drop=0.1, sgd=sgd)
    doc = nlp("I like London and Berlin.")
    assert doc.spans[spancat.key] == doc.spans[SPAN_KEY]
    assert len(doc.spans[spancat.key]) == 2
    assert doc.spans[spancat.key][0].text == "London"
    scores = nlp.evaluate(get_examples())
    assert f"spans_{SPAN_KEY}_f" in scores
    assert scores[f"spans_{SPAN_KEY}_f"] == 1.0
예제 #5
0
파일: test_cli.py 프로젝트: kokizzu/spaCy
def test_issue4924():
    nlp = Language()
    example = Example.from_dict(nlp.make_doc(""), {})
    nlp.evaluate([example])
예제 #6
0
def test_evaluate(nlp: Language):
    docs_golds = [(text, {}) for text in TESTCASES]
    nlp.evaluate(docs_golds, batch_size=1)
예제 #7
0
def test_eval(nlp: Language, docs_golds):
    score = nlp.evaluate(docs_golds)
    assert score.textcat_per_cat
예제 #8
0
def test_example_batch_eval(nlp: Language, example_gold):
    nlp.evaluate(example_gold)
예제 #9
0
파일: test_ner.py 프로젝트: tamuhey/camphr
def test_example_batch_eval(nlp: Language, example_gold):
    score = nlp.evaluate(example_gold)
    assert score["loss"]