Пример #1
0
    def __init__(
            self,
            dm: Discrete(min=0, max=2),
            dbow_words: Discrete(min=-100, max=100),
            dm_concat: Discrete(min=-100, max=100),
            dm_tag_count: Discrete(min=0, max=2),
            alpha: Continuous(min=0.001, max=0.075),
            epochs: Discrete(min=2, max=10),
            window: Discrete(min=2, max=10),
            inner_tokenizer: algorithm(Sentence(), List(Word())),
            inner_stemmer: algorithm(Word(), Stem()),
            inner_stopwords: algorithm(List(Word()), List(Word())),
            lowercase: Boolean(),
            stopwords_remove: Boolean(),
    ):

        self.inner_tokenizer = inner_tokenizer
        self.inner_stemmer = inner_stemmer
        self.inner_stopwords = inner_stopwords
        self.lowercase = lowercase
        self.stopwords_remove = stopwords_remove

        super().__init__(
            dm=dm,
            dbow_words=dbow_words,
            dm_concat=dm_concat,
            dm_tag_count=dm_tag_count,
            alpha=alpha,
            epochs=epochs,
            window=window,
        )
Пример #2
0
 def __init__(self, Trained: Boolean(), N: Discrete(min=500, max=2000),
              C: Boolean()):
     self.Trained = Trained
     self.N = N
     self.C = C
     NltkTrainedTagger.__init__(self)
     _TnT.__init__(self, Trained=Trained, N=N, C=C)
Пример #3
0
 def __init__(self, preserve_case: Boolean(), reduce_len: Boolean(),
              strip_handles: Boolean()):
     self.preserve_case = preserve_case
     self.reduce_len = reduce_len
     self.strip_handles = strip_handles
     NltkTokenizer.__init__(self)
     _TweetTokenizer.__init__(
         self,
         preserve_case=preserve_case,
         reduce_len=reduce_len,
         strip_handles=strip_handles,
     )
Пример #4
0
    def __init__(
            self,
            lowercase: Boolean(),
            stopwords_remove: Boolean(),
            binary: Boolean(),
            inner_tokenizer: algorithm(Sentence(), List(Word())),
            inner_stemmer: algorithm(Word(), Stem()),
            inner_stopwords: algorithm(List(Word()), List(Word())),
    ):
        self.stopwords_remove = stopwords_remove
        self.inner_tokenizer = inner_tokenizer
        self.inner_stemmer = inner_stemmer
        self.inner_stopwords = inner_stopwords

        SklearnTransformer.__init__(self)
        _CountVectorizer.__init__(self, lowercase=lowercase, binary=binary)
Пример #5
0
 def __init__(
     self,
     tokenizer: algorithm(Sentence(), List(Word())),
     feature_extractor: algorithm(Word(), Flags()),
     include_text: Boolean(),
 ):
     self.tokenizer = tokenizer
     self.feature_extractor = feature_extractor
     self.include_text = include_text
Пример #6
0
def _get_arg_values(arg, value, cls):
    if isinstance(value, bool):
        return Boolean()
    if isinstance(value, int):
        return Discrete(*_get_integer_values(arg, value, cls))
    if isinstance(value, float):
        return Continuous(*_get_float_values(arg, value, cls))
    if isinstance(value, str):
        values = _find_parameter_values(arg, cls)
        return Categorical(*values) if values else None
    return None
Пример #7
0
 def __init__(
         self,
         featurewise_center: Boolean(),
         samplewise_center: Boolean(),
         featurewise_std_normalization: Boolean(),
         samplewise_std_normalization: Boolean(),
         rotation_range: Discrete(0, 15),
         width_shift_range: Continuous(0, 0.25),
         height_shift_range: Continuous(0, 0.25),
         shear_range: Continuous(0, 15),
         zoom_range: Continuous(0, 0.25),
         horizontal_flip: Boolean(),
         vertical_flip: Boolean(),
 ):
     super().__init__(
         featurewise_center=featurewise_center,
         samplewise_center=samplewise_center,
         featurewise_std_normalization=featurewise_std_normalization,
         samplewise_std_normalization=samplewise_std_normalization,
         rotation_range=rotation_range,
         width_shift_range=width_shift_range,
         height_shift_range=height_shift_range,
         shear_range=shear_range,
         zoom_range=zoom_range,
         horizontal_flip=horizontal_flip,
         vertical_flip=vertical_flip,
     )
Пример #8
0
def _get_arg_values(arg, value, cls):
    print(f"Computing valid values for: {arg}={value}")

    try:
        if isinstance(value, bool):
            annotation = Boolean()
        elif isinstance(value, int):
            annotation = _get_integer_values(arg, value, cls)
        elif isinstance(value, float):
            annotation = _get_float_values(arg, value, cls)
        elif isinstance(value, str):
            annotation = _find_parameter_values(arg, cls)
        else:
            annotation = None
    except:
        annotation = None

    print(f"Found annotation {arg}:{annotation}")

    return annotation
Пример #9
0
 def __init__(
         self,
         language: Categorical("en", "es"),
         extract_pos: Boolean(),
         extract_lemma: Boolean(),
         extract_pos_tag: Boolean(),
         extract_dep: Boolean(),
         extract_entity: Boolean(),
         extract_details: Boolean(),
         extract_sentiment: Boolean(),
 ):
     self.language = language
     self.extract_pos = extract_pos
     self.extract_lemma = extract_lemma
     self.extract_pos_tag = extract_pos_tag
     self.extract_dep = extract_dep
     self.extract_entity = extract_entity
     self.extract_details = extract_details
     self.extract_sentiment = extract_sentiment
     self._nlp = None
Пример #10
0
 def __init__(self, strip_prefix_flag: Boolean()):
     self.strip_prefix_flag = strip_prefix_flag
     NltkStemmer.__init__(self)
     _LancasterStemmer.__init__(self, strip_prefix_flag=strip_prefix_flag)
Пример #11
0
 def __init__(self, strict: Boolean()):
     self.strict = strict
     NltkTokenizer.__init__(self)
     _SExprTokenizer.__init__(self, strict=strict)
Пример #12
0
 def __init__(self, case_insensitive: Boolean()):
     self.case_insensitive = case_insensitive
     NltkStemmer.__init__(self)
     _Cistem.__init__(self, case_insensitive=case_insensitive)
Пример #13
0
 def __init__(self, ngram: Discrete(1, 3), use_idf: Boolean()):
     super().__init__(ngram_range=(1, ngram), use_idf=use_idf)
     self.ngram = ngram