def __init__(self, preserve_case: Boolean(), reduce_len: Boolean(), strip_handles: Boolean()): self.preserve_case = preserve_case self.reduce_len = reduce_len self.strip_handles = strip_handles NltkTokenizer.__init__(self) _TweetTokenizer.__init__( self, preserve_case=preserve_case, reduce_len=reduce_len, strip_handles=strip_handles, )
def run(self, input: Sentence) -> Seq[Word]: return NltkTokenizer.run(self, input)
def __init__(self,): NltkTokenizer.__init__(self) _TreebankWordTokenizer.__init__(self,)
def __init__(self,): NltkTokenizer.__init__(self) _ToktokTokenizer.__init__(self,)
def run(self, input: Document) -> Seq[Sentence]: return NltkTokenizer.run(self, input)
def __init__(self,): NltkTokenizer.__init__(self) _SpaceTokenizer.__init__(self,)
def __init__(self,): NltkTokenizer.__init__(self) _LineTokenizer.__init__(self,)
def __init__(self, strict: BooleanValue()): self.strict = strict NltkTokenizer.__init__(self) _SExprTokenizer.__init__(self, strict=strict)
def __init__(self,): NltkTokenizer.__init__(self) _WordPunctTokenizer.__init__(self,)
def __init__(self,): NltkTokenizer.__init__(self) _WhitespaceTokenizer.__init__(self,)
def __init__(self,): NltkTokenizer.__init__(self) _BlanklineTokenizer.__init__(self,)
def __init__(self,): NltkTokenizer.__init__(self) _PunktSentenceTokenizer.__init__(self,)
def __init__(self,): NltkTokenizer.__init__(self) _MWETokenizer.__init__(self,)
def run(self, input: Sentence()) -> List(Word()): return NltkTokenizer.run(self, input)
def run(self, input: Document()) -> List(Sentence()): return NltkTokenizer.run(self, input)