def __init__( self, featurewise_center: BooleanValue(), samplewise_center: BooleanValue(), featurewise_std_normalization: BooleanValue(), samplewise_std_normalization: BooleanValue(), rotation_range: DiscreteValue(0, 15), width_shift_range: ContinuousValue(0, 0.25), height_shift_range: ContinuousValue(0, 0.25), shear_range: ContinuousValue(0, 15), zoom_range: ContinuousValue(0, 0.25), horizontal_flip: BooleanValue(), vertical_flip: BooleanValue(), ): super().__init__( featurewise_center=featurewise_center, samplewise_center=samplewise_center, featurewise_std_normalization=featurewise_std_normalization, samplewise_std_normalization=samplewise_std_normalization, rotation_range=rotation_range, width_shift_range=width_shift_range, height_shift_range=height_shift_range, shear_range=shear_range, zoom_range=zoom_range, horizontal_flip=horizontal_flip, vertical_flip=vertical_flip, )
def __init__( self, merge_mode: CategoricalValue("sum", "mul", "concat", "ave"), units: DiscreteValue(32, 1024), activation_fn: CategoricalValue("tanh", "sigmoid", "relu", "linear"), recurrent_activation_fn: CategoricalValue("tanh", "sigmoid", "relu", "linear"), dropout: ContinuousValue(0, 0.5), recurrent_dropout: ContinuousValue(0, 0.5), ): super().__init__( layer=_LSTM( units=units, activation=activation_fn, recurrent_activation=recurrent_activation_fn, dropout=dropout, recurrent_dropout=recurrent_dropout, return_sequences=True, ), merge_mode=merge_mode, ) self.activation_fn = activation_fn self.recurrent_activation_fn = recurrent_activation_fn
def __init__( self, extract_word: BooleanValue() = True, window_size: DiscreteValue(0, 5) = 0, ): self.extract_word = extract_word self.window_size = window_size
def __init__( self, cutoff: DiscreteValue(min=0, max=10), ): self.cutoff = cutoff self.tagger = _UnigramTagger self.values = dict(cutoff=cutoff) NltkTagger.__init__(self)
def __init__( self, # min_length tomará valores entre cero y cinco de forma automática para diferentes pipelines. # Este parámetro está permitiendo buscar distintos tamaños de palabra y probar cual de ellos será mejor min_length: DiscreteValue(min=0, max=5), # lower es un parámetro que en algunos casos será True y en otros False. Podemos utilizarlo # para llevar o no a minúsculas el texto. lower: BooleanValue(), ): self.min_length = min_length self.lower = lower
def _get_arg_values(arg, value, cls): if isinstance(value, bool): return BooleanValue() if isinstance(value, int): return DiscreteValue(*_get_integer_values(arg, value, cls)) if isinstance(value, float): return ContinuousValue(*_get_float_values(arg, value, cls)) if isinstance(value, str): values = _find_parameter_values(arg, cls) return CategoricalValue(*values) if values else None return None
def __init__( self, Trained: BooleanValue(), N: DiscreteValue(min=500, max=2000), C: BooleanValue(), ): self.Trained = Trained self.N = N self.C = C NltkTrainedTagger.__init__(self) _TnT.__init__(self, Trained=Trained, N=N, C=C)
def test_sample_subset(): class A: def __init__(self, features: Subset("Subset", DiscreteValue(1, 5), "Hello", 1, None)): self.features = features g = generate_cfg(A) selected_features = g.sample().features selected = set([repr(feature) for feature in selected_features]) assert selected.issubset( [repr(feature) for feature in [DiscreteValue(1, 5), "Hello", 1, None]])
def __init__(self, filters: DiscreteValue(2, 8), kernel_size: CategoricalValue(3, 5, 7), l1: ContinuousValue(0, 1e-3), l2: ContinuousValue(0, 1e-3), **kwargs): self.l1 = l1 self.l2 = l2 super().__init__(filters=2**filters, kernel_size=(kernel_size, kernel_size), kernel_regularizer=regularizers.l1_l2(l1=l1, l2=l2), padding="same", data_format="channels_last", **kwargs)
def __init__( self, dm: DiscreteValue(min=0, max=2), dbow_words: DiscreteValue(min=-100, max=100), dm_concat: DiscreteValue(min=-100, max=100), dm_tag_count: DiscreteValue(min=0, max=2), alpha: ContinuousValue(min=0.001, max=0.075), epochs: DiscreteValue(min=2, max=10), window: DiscreteValue(min=2, max=10), inner_tokenizer: algorithm(Sentence, Seq[Word]), inner_stemmer: algorithm(Word, Stem), inner_stopwords: algorithm(Seq[Word], Seq[Word]), lowercase: BooleanValue(), stopwords_remove: BooleanValue(), ): self.inner_tokenizer = inner_tokenizer self.inner_stemmer = inner_stemmer self.inner_stopwords = inner_stopwords self.lowercase = lowercase self.stopwords_remove = stopwords_remove super().__init__( dm=dm, dbow_words=dbow_words, dm_concat=dm_concat, dm_tag_count=dm_tag_count, alpha=alpha, epochs=epochs, window=window, )
def __init__( self, affix_length: DiscreteValue(min=2, max=6), min_stem_length: DiscreteValue(min=1, max=4), cutoff: DiscreteValue(min=0, max=10), backoff: algorithm( Seq[Seq[Word]], Supervised[Seq[Seq[Postag]]], Seq[Seq[Postag]] ), ): self.affix_length = affix_length self.min_stem_length = min_stem_length self.cutoff = cutoff self.backoff = backoff self.tagger = _AffixTagger self.values = dict( affix_length=affix_length, min_stem_length=min_stem_length, cutoff=cutoff, backoff=backoff, ) NltkTagger.__init__(self)
def _get_integer_values(arg, value, cls): if value > 0: min_value = 0 max_value = 2 * value elif value == 0: min_value = -100 max_value = 100 else: return None # binary search for minimum value left = min_value right = value while left < right: current_value = int((left + right) / 2) if current_value in [left, right]: break if _try(cls, arg, current_value): right = current_value else: left = current_value min_value = right # binary search for maximum value left = value right = max_value while left < right: current_value = int((left + right) / 2) if current_value in [left, right]: break if _try(cls, arg, current_value): left = current_value else: right = current_value max_value = left if min_value < max_value: return DiscreteValue(min=min_value, max=max_value) return None
def __init__(self, units: DiscreteValue(32, 1024), activation_fn: CategoricalValue("tanh", "sigmoid", "relu", "linear"), recurrent_activation_fn: CategoricalValue( "tanh", "sigmoid", "relu", "linear"), dropout: ContinuousValue(0, 0.5), recurrent_dropout: ContinuousValue(0, 0.5), **kwargs): super().__init__(units=units, activation=activation_fn, recurrent_activation=recurrent_activation_fn, dropout=dropout, recurrent_dropout=recurrent_dropout, return_sequences=False, **kwargs) self.activation_fn = activation_fn self.recurrent_activation_fn = recurrent_activation_fn
def __init__( self, dm: DiscreteValue(min=0, max=2), dbow_words: DiscreteValue(min=-100, max=100), dm_concat: DiscreteValue(min=-100, max=100), dm_tag_count: DiscreteValue(min=0, max=2), alpha: ContinuousValue(min=0.001, max=0.075), epochs: DiscreteValue(min=2, max=10), window: DiscreteValue(min=2, max=10), ): self.dm = int(dm) self.dbow_words = int(dbow_words) self.dm_concat = int(dm_concat) self.dm_tag_count = int(dm_tag_count) self.alpha = alpha self.epochs = int(epochs) self.window = int(window)
def __init__(self, features: Subset("Subset", DiscreteValue(1, 5), "Hello", 1, None)): self.features = features
def __init__(self, output_dim: DiscreteValue(32, 128), **kwargs): super().__init__(input_dim=1000, output_dim=output_dim, **kwargs)
def __init__(self, ngram: DiscreteValue(1, 3), use_idf: BooleanValue()): super().__init__(ngram_range=(1, ngram), use_idf=use_idf) self.ngram = ngram
def f(x: DiscreteValue(1, 5)): pass
def __init__(self, x: DiscreteValue(1, 5)): pass
def __init__(self, n: DiscreteValue(50, 200)): super().__init__(n_components=n) self.n = n
def __init__(self, ngram: DiscreteValue(1, 3)): super().__init__(ngram_range=(1, ngram)) self.ngram = ngram
def __init__(self, filters: DiscreteValue(2, 8), kernel_size: CategoricalValue(3, 5, 7), **kwargs): super().__init__(filters=2**filters, kernel_size=kernel_size, padding="causal", **kwargs)
def __init__( self, features: Subset("Subset", DiscreteValue(1, 5), CategoricalValue("adam", "sgd")), ): pass
def __init__(self, x: DiscreteValue(-10, 10), y: DiscreteValue(-10, 10)): self.x = x self.y = y
def __init__(self, units: DiscreteValue(128, 1024), **kwargs): super().__init__(units=units, **kwargs)