def test_roboy(capture, basic_input): nlp_preprocess(basic_input) expected = True assert basic_input[prop_roboy] == expected capture.check_present((f"{FILE_NAME}", 'INFO', f"[NLP:roboy]: {expected}"))
def test_tags(capture, basic_input): nlp_preprocess(basic_input) expected = ('UH', 'NN', 'PRP$', 'NN', 'VBZ', 'JJ') assert basic_input[prop_tags] == expected capture.check_present((f"{FILE_NAME}", 'INFO', f"[NLP:tags]: {expected}"))
def test_ner(capture, basic_input): nlp_preprocess(basic_input) expected = (('roboy', 'ORG'),) assert basic_input[prop_ner] == expected capture.check_present((f"{FILE_NAME}", 'INFO', f"[NLP:ner]: {expected}"))
def test_postags(capture, basic_input): nlp_preprocess(basic_input) expected = ('INTJ', 'NOUN', 'DET', 'NOUN', 'VERB', 'ADJ') assert basic_input[prop_postags] == expected capture.check_present((f"{FILE_NAME}", 'INFO', f"[NLP:postags]: {expected}"))
def test_lemmas(capture, basic_input): nlp_preprocess(basic_input) expected = ('hello', 'world', '-PRON-', 'name', 'be', 'roboy') assert basic_input[prop_lemmas] == expected capture.check_present((f"{FILE_NAME}", 'INFO', f"[NLP:lemmas]: {expected}"))
def test_roboy(capture, basic_input): nlp_preprocess(basic_input) expected = True assert basic_input["nlp:roboy"] == expected capture.check_present((f"{FILE_NAME}", '\x1b[1;32mINFO\x1b[0m', f"{PREFIX} [NLP:roboy]: {expected}"))
def test_tokenization(capture, basic_input): nlp_preprocess(basic_input) expected = ('hello', 'world', 'my', 'name', 'is', 'roboy') assert basic_input[prop_tokens] == expected capture.check_present((f"{FILE_NAME}", 'INFO', f"[NLP:tokens]: {expected}"))
def test_ner(capture, basic_input): nlp_preprocess(basic_input) expected = (('Roboy', 'ORG'), ) assert basic_input["nlp:ner"] == expected capture.check_present((f"{FILE_NAME}", '\x1b[1;32mINFO\x1b[0m', f"{PREFIX} [NLP:ner]: {expected}"))
def test_tags(capture, basic_input): nlp_preprocess(basic_input) expected = ('UH', 'VB', 'PRP$', 'NN', 'VBZ', 'NNP') assert basic_input["nlp:tags"] == expected capture.check_present((f"{FILE_NAME}", '\x1b[1;32mINFO\x1b[0m', f"{PREFIX} [NLP:tags]: {expected}"))
def test_lemmas(capture, basic_input): nlp_preprocess(basic_input) expected = ('hello', 'world', '-PRON-', 'name', 'be', 'roboy') assert basic_input["nlp:lemmas"] == expected capture.check_present((f"{FILE_NAME}", '\x1b[1;32mINFO\x1b[0m', f"{PREFIX} [NLP:lemmas]: {expected}"))
def test_postags(capture, basic_input): nlp_preprocess(basic_input) expected = ('INTJ', 'VERB', 'ADJ', 'NOUN', 'VERB', 'PROPN') assert basic_input["nlp:postags"] == expected capture.check_present((f"{FILE_NAME}", '\x1b[1;32mINFO\x1b[0m', f"{PREFIX} [NLP:postags]: {expected}"))
def test_tokenization(capture, basic_input): nlp_preprocess(basic_input) expected = ('Hello', 'world', 'my', 'name', 'is', 'Roboy') assert basic_input["nlp:tokens"] == expected capture.check_present((f"{FILE_NAME}", '\x1b[1;32mINFO\x1b[0m', f"{PREFIX} [NLP:tokens]: {expected}"))