def __init__( self, merge_mode: CategoricalValue("sum", "mul", "concat", "ave"), units: DiscreteValue(32, 1024), activation_fn: CategoricalValue("tanh", "sigmoid", "relu", "linear"), recurrent_activation_fn: CategoricalValue("tanh", "sigmoid", "relu", "linear"), dropout: ContinuousValue(0, 0.5), recurrent_dropout: ContinuousValue(0, 0.5), ): super().__init__( layer=_LSTM( units=units, activation=activation_fn, recurrent_activation=recurrent_activation_fn, dropout=dropout, recurrent_dropout=recurrent_dropout, return_sequences=True, ), merge_mode=merge_mode, ) self.activation_fn = activation_fn self.recurrent_activation_fn = recurrent_activation_fn
def __init__( self, featurewise_center: BooleanValue(), samplewise_center: BooleanValue(), featurewise_std_normalization: BooleanValue(), samplewise_std_normalization: BooleanValue(), rotation_range: DiscreteValue(0, 15), width_shift_range: ContinuousValue(0, 0.25), height_shift_range: ContinuousValue(0, 0.25), shear_range: ContinuousValue(0, 15), zoom_range: ContinuousValue(0, 0.25), horizontal_flip: BooleanValue(), vertical_flip: BooleanValue(), ): super().__init__( featurewise_center=featurewise_center, samplewise_center=samplewise_center, featurewise_std_normalization=featurewise_std_normalization, samplewise_std_normalization=samplewise_std_normalization, rotation_range=rotation_range, width_shift_range=width_shift_range, height_shift_range=height_shift_range, shear_range=shear_range, zoom_range=zoom_range, horizontal_flip=horizontal_flip, vertical_flip=vertical_flip, )
def __init__(self, filters: DiscreteValue(2, 8), kernel_size: CategoricalValue(3, 5, 7), l1: ContinuousValue(0, 1e-3), l2: ContinuousValue(0, 1e-3), **kwargs): self.l1 = l1 self.l2 = l2 super().__init__(filters=2**filters, kernel_size=(kernel_size, kernel_size), kernel_regularizer=regularizers.l1_l2(l1=l1, l2=l2), padding="same", data_format="channels_last", **kwargs)
def __init__( self, dm: DiscreteValue(min=0, max=2), dbow_words: DiscreteValue(min=-100, max=100), dm_concat: DiscreteValue(min=-100, max=100), dm_tag_count: DiscreteValue(min=0, max=2), alpha: ContinuousValue(min=0.001, max=0.075), epochs: DiscreteValue(min=2, max=10), window: DiscreteValue(min=2, max=10), inner_tokenizer: algorithm(Sentence, Seq[Word]), inner_stemmer: algorithm(Word, Stem), inner_stopwords: algorithm(Seq[Word], Seq[Word]), lowercase: BooleanValue(), stopwords_remove: BooleanValue(), ): self.inner_tokenizer = inner_tokenizer self.inner_stemmer = inner_stemmer self.inner_stopwords = inner_stopwords self.lowercase = lowercase self.stopwords_remove = stopwords_remove super().__init__( dm=dm, dbow_words=dbow_words, dm_concat=dm_concat, dm_tag_count=dm_tag_count, alpha=alpha, epochs=epochs, window=window, )
def __init__(self, units: DiscreteValue(32, 1024), activation_fn: CategoricalValue("tanh", "sigmoid", "relu", "linear"), recurrent_activation_fn: CategoricalValue( "tanh", "sigmoid", "relu", "linear"), dropout: ContinuousValue(0, 0.5), recurrent_dropout: ContinuousValue(0, 0.5), **kwargs): super().__init__(units=units, activation=activation_fn, recurrent_activation=recurrent_activation_fn, dropout=dropout, recurrent_dropout=recurrent_dropout, return_sequences=False, **kwargs) self.activation_fn = activation_fn self.recurrent_activation_fn = recurrent_activation_fn
def _get_arg_values(arg, value, cls): if isinstance(value, bool): return BooleanValue() if isinstance(value, int): return DiscreteValue(*_get_integer_values(arg, value, cls)) if isinstance(value, float): return ContinuousValue(*_get_float_values(arg, value, cls)) if isinstance(value, str): values = _find_parameter_values(arg, cls) return CategoricalValue(*values) if values else None return None
def __init__( self, dm: DiscreteValue(min=0, max=2), dbow_words: DiscreteValue(min=-100, max=100), dm_concat: DiscreteValue(min=-100, max=100), dm_tag_count: DiscreteValue(min=0, max=2), alpha: ContinuousValue(min=0.001, max=0.075), epochs: DiscreteValue(min=2, max=10), window: DiscreteValue(min=2, max=10), ): self.dm = int(dm) self.dbow_words = int(dbow_words) self.dm_concat = int(dm_concat) self.dm_tag_count = int(dm_tag_count) self.alpha = alpha self.epochs = int(epochs) self.window = int(window)
def _get_float_values(arg, value, cls): if value in [inf, nan]: return None if value > 0: min_value = -10 * value max_value = 10 * value elif value == 0: min_value = -1 max_value = 1 else: return None # binary search for minimum value left = min_value right = value while abs(left - right) > 1e-2: current_value = round((left + right) / 2, 3) if _try(cls, arg, current_value): right = current_value else: left = current_value min_value = right # binary search for maximum value left = value right = max_value while abs(left - right) > 1e-2: current_value = round((left + right) / 2, 3) if _try(cls, arg, current_value): left = current_value else: right = current_value max_value = left if max_value - min_value >= 2 * value: return ContinuousValue(min=min_value, max=max_value) return None
def __init__(self, rate: ContinuousValue(0, 0.5), **kwargs): super().__init__(rate=rate, **kwargs)
def __init__(self, rate: ContinuousValue(0, 0.5)): super().__init__(rate=rate)
def __init__(self, penalty: CategoricalValue("l1", "l2"), C: ContinuousValue(0.1, 10)): super().__init__(penalty=penalty, C=C, solver="liblinear")
def __init__(self, var_smoothing: ContinuousValue(1e-10, 0.1)): super().__init__(var_smoothing=var_smoothing)
def __init__( self, kernel: CategoricalValue("rbf", "linear", "poly"), C: ContinuousValue(0.1, 10), ): super().__init__(C=C, kernel=kernel)