def tokenize(s, *args, **kwargs): """ Returns a list of sentences, where punctuation marks have been split from words. """ return parser.find_tokens(text_type(s), *args, **kwargs)
def split(s, token=[WORD, POS, CHUNK, PNP]): """ Returns a parsed Text from the given parsed string. """ return Text(text_type(s), token)