示例#1
0
    def __init__(
        self,
        language: Categorical(
            "danish",
            "dutch",
            "english",
            "finnish",
            "french",
            "german",
            "hungarian",
            "italian",
            "norwegian",
            "portuguese",
            "russian",
            "spanish",
            "swedish",
            "turkish",
        ),
    ):
        self.language = language

        nltk.download("stopwords")
        from nltk.corpus import stopwords

        self.words = stopwords.words(language)
        SklearnWrapper.__init__(self)
示例#2
0
 def __init__(
         self,
         preprocessor: KerasImagePreprocessor,
         optimizer: Categorical("sgd", "adam", "rmsprop"),
         **kwargs,
 ):
     self.preprocessor = preprocessor
     super().__init__(optimizer=optimizer, **kwargs)
示例#3
0
    def __init__(
        self,
        decode: Categorical("dense", "crf"),
        optimizer: Categorical("sgd", "adam", "rmsprop"),
        grammar=None,
        **kwargs,
    ):
        self._classes = None
        self._num_classes = None

        if decode not in ["dense", "crf"]:
            raise ValueError(f"Invalid decode={decode}")

        self.decode = decode
        super().__init__(grammar=grammar or self._build_grammar(),
                         optimizer=optimizer,
                         **kwargs)
示例#4
0
 def __init__(
         self,
         units: Discrete(32, 1024),
         activation: Categorical("tanh", "sigmoid", "relu", "linear"),
         recurrent_activation: Categorical("tanh", "sigmoid", "relu",
                                           "linear"),
         dropout: Continuous(0, 0.5),
         recurrent_dropout: Continuous(0, 0.5),
 ):
     super().__init__(
         units=units,
         activation=activation,
         recurrent_activation=recurrent_activation,
         dropout=dropout,
         recurrent_dropout=recurrent_dropout,
         return_sequences=False,
     )
示例#5
0
 def __init__(self,
              optimizer: Categorical("sgd", "adam", "rmsprop"),
              grammar=None,
              **kwargs):
     self._classes = None
     self._num_classes = None
     super().__init__(grammar=grammar or self._build_grammar(),
                      optimizer=optimizer,
                      **kwargs)
示例#6
0
def _get_args(cls):
    full_specs = inspect.getfullargspec(cls.__init__)

    args = full_specs.args
    specs = full_specs.defaults

    if not args or not specs:
        return {}

    non_kwargs = [arg for arg in args[: -len(specs) :] if arg != "self"]

    args = args[-len(specs) :]

    args_map = {k: v for k, v in zip(args, specs)}

    drop_args = [
        "url",
        "n_jobs",
        "max_iter",
        "class_weight",
        "warm_start",
        "copy_X",
        "copy_x",
        "copy",
        "eps",
        "ignore_stopwords",
        "verbose",
        "load",
    ]

    for arg in drop_args:
        args_map.pop(arg, None)

    result = {}

    for arg, value in args_map.items():
        values = _get_arg_values(arg, value, cls)
        if not values:
            continue
        result[arg] = values

    for arg in non_kwargs:
        # special handling of language
        if str.lower(arg) == "language":
            values = _find_language_values(cls)
            if values:
                result[arg] = Categorical(*values)
                continue

        if str.lower(arg) == "train" and _is_tagger(cls):
            continue

        raise Exception("No values found for positional argument %s " % (arg))
    return result
示例#7
0
def _get_arg_values(arg, value, cls):
    if isinstance(value, bool):
        return Boolean()
    if isinstance(value, int):
        return Discrete(*_get_integer_values(arg, value, cls))
    if isinstance(value, float):
        return Continuous(*_get_float_values(arg, value, cls))
    if isinstance(value, str):
        values = _find_parameter_values(arg, cls)
        return Categorical(*values) if values else None
    return None
示例#8
0
 def __init__(
         self,
         merge_mode: Categorical("sum", "mul", "concat", "ave"),
         units: Discrete(32, 1024),
         activation: Categorical("tanh", "sigmoid", "relu", "linear"),
         recurrent_activation: Categorical("tanh", "sigmoid", "relu",
                                           "linear"),
         dropout: Continuous(0, 0.5),
         recurrent_dropout: Continuous(0, 0.5),
 ):
     super().__init__(
         layer=_LSTM(
             units=units,
             activation=activation,
             recurrent_activation=recurrent_activation,
             dropout=dropout,
             recurrent_dropout=recurrent_dropout,
             return_sequences=False,
         ),
         merge_mode=merge_mode,
     )
示例#9
0
 def __init__(
     self,
     function: Categorical(
         "elu",
         "selu",
         "relu",
         "tanh",
         "sigmoid",
         "hard_sigmoid",
         "exponential",
         "linear",
     ),
 ):
     self.function = function
     super().__init__(activation=function)
示例#10
0
 def __init__(
         self,
         filters: Discrete(2, 8),
         kernel_size: Categorical(3, 5, 7),
         l1: Continuous(0, 1e-3),
         l2: Continuous(0, 1e-3),
 ):
     self.l1 = l1
     self.l2 = l2
     super().__init__(
         filters=2**filters,
         kernel_size=(kernel_size, kernel_size),
         kernel_regularizer=regularizers.l1_l2(l1=l1, l2=l2),
         padding="same",
         data_format="channels_last",
     )
示例#11
0
 def __init__(
     self,
     grammar: GraphGrammar,
     optimizer: Categorical("sgd", "adam", "rmsprop"),
     epochs=10,
     early_stop=3,
     validation_split=0.1,
     **compile_kwargs,
 ):
     self.optimizer = optimizer
     self._grammar = grammar
     self._epochs = epochs
     self._compile_kwargs = compile_kwargs
     self._model: Optional[Model] = None
     self._mode = "train"
     self._graph = None
     self._validation_split = validation_split
     self._early_stop = early_stop
示例#12
0
def _find_parameter_values(parameter, cls):
    documentation = []
    lines = cls.__doc__.split("\n")

    while lines:
        l = lines.pop(0)
        if l.strip().startswith(parameter):
            documentation.append(l)
            tabs = l.index(parameter)
            break

    while lines:
        l = lines.pop(0)

        if not l.strip():
            continue

        if l.startswith(" " * (tabs + 1)):
            documentation.append(l)
        else:
            break

    options = set(re.findall(r"'(\w+)'", " ".join(documentation)))
    valid = []
    invalid = []
    skip = set(["deprecated", "auto_deprecated", "precomputed"])

    for opt in options:
        opt = opt.lower()

        if opt in skip:
            continue

        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            if _try(cls, parameter, opt):
                valid.append(opt)
            else:
                invalid.append(opt)

    if valid:
        return Categorical(*sorted(valid))

    return None
示例#13
0
 def __init__(
         self,
         language: Categorical("en", "es"),
         extract_pos: Boolean(),
         extract_lemma: Boolean(),
         extract_pos_tag: Boolean(),
         extract_dep: Boolean(),
         extract_entity: Boolean(),
         extract_details: Boolean(),
         extract_sentiment: Boolean(),
 ):
     self.language = language
     self.extract_pos = extract_pos
     self.extract_lemma = extract_lemma
     self.extract_pos_tag = extract_pos_tag
     self.extract_dep = extract_dep
     self.extract_entity = extract_entity
     self.extract_details = extract_details
     self.extract_sentiment = extract_sentiment
     self._nlp = None
示例#14
0
 def __init__(
     self,
     language: Categorical(
         "italian",
         "portuguese",
         "hungarian",
         "english",
         "german",
         "arabic",
         "danish",
         "norwegian",
         "finnish",
         "dutch",
         "romanian",
         "russian",
         "swedish",
         "spanish",
         "french",
     ),
 ):
     self.language = language
     NltkStemmer.__init__(self)
     _SnowballStemmer.__init__(self, language=language)
示例#15
0
 def __init__(self, filters: Discrete(2, 8),
              kernel_size: Categorical(3, 5, 7)):
     super().__init__(filters=2**filters,
                      kernel_size=kernel_size,
                      padding="causal")
示例#16
0
 def __init__(
     self, algorithm: Categorical('lbfgs', 'l2sgd', 'ap', 'pa', 'arow')
 ) -> None:
     SklearnEstimator.__init__(self)
     super().__init__(algorithm=algorithm)
 def __init__(self, penalty: Categorical("l1", "l2"),
              reg: Continuous(0.1, 10)):
     super().__init__(penalty=penalty, C=reg, solver="liblinear")
     self.penalty = penalty
     self.reg = reg
示例#18
0
 def __init__(self, penalty: Categorical("l1", "l2"), C: Continuous(0.1, 10)):
     super().__init__(penalty=penalty, C=C, solver="liblinear")
示例#19
0
 def __init__(self, criterion: Categorical("gini", "entropy")):
     super().__init__(criterion=criterion)
示例#20
0
 def __init__(
     self, kernel: Categorical("rbf", "linear", "poly"), C: Continuous(0.1, 10)
 ):
     super().__init__(C=C, kernel=kernel)
示例#21
0
 def __init__(self, x: Categorical('A', 'B', 'C')):
     self.x = x
示例#22
0
 def __init__(self, mode: Categorical("mean", "max")):
     self.mode = mode
示例#23
0
 def __init__(self, x: Categorical("A", "B", "C")):
     self.x = x
示例#24
0
 def __init__(self, features: Subset('Subset', Discrete(1, 5),
                                     Categorical('adam', 'sgd'))):
     pass
 def __init__(self, kernel: Categorical("rbf", "linear", "poly"),
              reg: Continuous(0.1, 10)):
     super().__init__(C=reg, kernel=kernel)
     self.kernel = kernel
     self.reg = reg