Exemplo n.º 1
0
def keras_jiggly_7(us, n_inputs: int):
    search_params = {'epochs': 500, 'patience': 6, 'jiggle_fraction': 0.5}
    learning_rate = to_log_space_1d(us[0], low=0.0001, high=0.001)
    info = {'keras_optimizer': "Adamax", 'learning_rate': learning_rate}
    loss = 'mse'
    unit0 = int(to_log_space_1d(us[1], low=1.5, high=20))
    unts = [unit0, 5, 3, 5, 16, 1]
    activs = ['linear', 'relu', 'relu', 'gelu', 'swish', 'linear']
    model = keras.Sequential()
    keras_optimizer = keras_optimizer_from_name(
        'Adamax', learning_rate=info['learning_rate'])

    for layer_ndx, (n_units, activation) in enumerate(zip(unts, activs)):
        if layer_ndx == len(unts) - 1:
            # Last layer
            model.add(keras.layers.Dense(1))
        else:
            bias_size = us[layer_ndx + 2]
            bias_initializer_0 = keras.initializers.RandomUniform(
                minval=-bias_size, maxval=bias_size, seed=None)
            if layer_ndx == 0:
                model.add(
                    keras.layers.Dense(n_units,
                                       activation=activation,
                                       input_shape=(1, n_inputs),
                                       bias_initializer=bias_initializer_0))
            else:
                model.add(
                    keras.layers.Dense(n_units,
                                       activation=activation,
                                       bias_initializer=bias_initializer_0))
    model.compile(loss=loss, optimizer=keras_optimizer)
    return model, search_params, info
Exemplo n.º 2
0
def keras_cnn_factory_light(us, n_inputs: int, layer_choices: dict,
                            filters_low: int, filters_high: int):
    """ Maps cube onto model and search params """
    search_params = {'epochs': 10000, 'patience': 6, 'jiggle_fraction': 0.6}
    n_search_params = 0
    n_cnn_layers = choice_from_dict(us[n_search_params], layer_choices)
    learning_rate = to_log_space_1d(us[n_search_params + 1],
                                    low=0.000001,
                                    high=0.001)
    opt_name = keras_optimizer_name(us[n_search_params + 2])
    dropout_rate = to_log_space_1d(us[n_search_params + 3],
                                   low=0.00001,
                                   high=0.2)
    keras_optimizer = keras_optimizer_from_name(opt_name=opt_name,
                                                learning_rate=learning_rate)
    info = {'keras_optimizer': opt_name, 'learning_rate': learning_rate}
    loss = mostly_mse(us[n_search_params + 4])
    offset = n_search_params + 5

    model = keras.Sequential()
    # First layer is permutation, so as to align with conventions for the dense networks
    model.add(keras.layers.Permute((2, 1), input_shape=(1, n_inputs)))

    # n_inputs = timesteps
    layer_ndx = 0
    n_filters = int(
        to_log_space_1d(us[1 * layer_ndx + offset],
                        low=filters_low,
                        high=filters_high))
    model.add(
        keras.layers.Conv1D(filters=n_filters,
                            input_shape=(n_inputs, 1),
                            kernel_size=3))

    for layer_ndx in range(1, n_cnn_layers):
        n_filters = int(
            to_log_space_1d(us[1 * layer_ndx + offset],
                            low=filters_low,
                            high=filters_high))
        model.add(
            keras.layers.Conv1D(filters=n_filters,
                                input_shape=(n_inputs, 1),
                                kernel_size=3))

    # Tails
    layer_ndx = n_cnn_layers
    n_units = int(to_log_space_1d(us[1 * layer_ndx + offset], low=10,
                                  high=200))
    model.add(keras.layers.Flatten())
    model.add(keras.layers.Dense(n_units))
    model.add(keras.layers.Dense(1, activation='linear'))

    model.compile(loss=loss, optimizer=keras_optimizer)
    return model, search_params, info
Exemplo n.º 3
0
def keras_transformer_5(us, n_inputs: int):
    """ Maps cube onto model and search params """
    # Inspired by https://towardsdatascience.com/using-lstms-to-forecast-time-series-4ab688386b1f
    # But see https://shiva-verma.medium.com/understanding-input-and-output-shape-in-lstm-keras-c501ee95c65e
    # Or https://towardsdatascience.com/3-steps-to-forecast-time-series-lstm-with-tensorflow-keras-ba88c6f05237
    # THIS IS NOT WORKING CURRENTLY
    # Maybe ... https://stackoverflow.com/questions/66373169/tensorflow-2-object-detection-api-numpy-version-errors/66486051#66486051
    search_params = {
        'epochs': int(to_log_space_1d(us[0], low=50, high=20000)),
        'patience': int(to_log_space_1d(us[1], low=5, high=500)),
        'jiggle_fraction': us[2]**2
    }
    head_size = choice_from_dict(us[3], {32: 5, 64: 10, 128: 10})
    model = ModelTrunk()
Exemplo n.º 4
0
def keras_mostly_linear_27(us, n_inputs: int):
    """ Maps cube onto model and search params """
    search_params = {
        'epochs': int(to_log_space_1d(us[0], low=50, high=5000)),
        'patience': int(to_log_space_1d(us[1], low=5, high=150)),
        'jiggle_fraction': us[2]
    }

    n_search_params = len(search_params)
    n_layers = choice_from_dict(us[n_search_params], {
        1: 5,
        2: 10,
        3: 20,
        4: 5,
        5: 5
    })
    learning_rate = to_log_space_1d(us[n_search_params + 1],
                                    low=0.000001,
                                    high=0.001)
    opt_name = keras_optimizer_name(us[n_search_params + 2])
    keras_optimizer = keras_optimizer_from_name(opt_name=opt_name,
                                                learning_rate=learning_rate)
    info = {'keras_optimizer': opt_name, 'learning_rate': learning_rate}
    loss = mostly_mse(us[n_search_params + 3])
    offset = n_search_params + 4

    model = keras.Sequential()
    for layer_ndx in range(n_layers):
        n_units = int(
            to_log_space_1d(us[4 * layer_ndx + offset], low=2, high=128))
        activation = mostly_linear(us[4 * layer_ndx + offset + 1])
        kernel_size = us[4 * layer_ndx + offset + 2]
        bias_size = us[4 * layer_ndx + offset + 3]
        kernel_initializer_0 = keras.initializers.RandomUniform(
            minval=-kernel_size, maxval=kernel_size, seed=None)
        bias_initializer_0 = keras.initializers.RandomUniform(
            minval=-bias_size, maxval=bias_size, seed=None)
        model.add(
            keras.layers.Dense(n_units,
                               activation=activation,
                               input_shape=(1, n_inputs),
                               kernel_initializer=kernel_initializer_0,
                               bias_initializer=bias_initializer_0))
    model.add(keras.layers.Dense(1, activation='linear'))
    model.compile(loss=loss, optimizer=keras_optimizer)
    return model, search_params, info
Exemplo n.º 5
0
def keras_nearby(us: [float],
                 n_input: int,
                 skater_name: str,
                 k: int,
                 fixed_search_params: [str] = None):
    """
       fixed_search_params : list with 'epochs','patience','jiggle_fraction', 'learning_rate' optional
    """
    model = load_champion_model(skater_name=skater_name, k=k, n_input=n_input)
    info = read_champion_info(skater_name=skater_name, k=k, n_input=n_input)

    if 'learning_rate' in fixed_search_params:
        learning_rate_scaling = 1
    else:
        learning_rate_scaling = to_log_space_1d(u=us[0], low=0.1, high=10.0)
    learning_rate = info['learning_rate'] * learning_rate_scaling
    keras_optimizer = keras_optimizer_from_name(info['keras_optimizer'],
                                                learning_rate=learning_rate)

    model.compile(loss='mse', optimizer=keras_optimizer)

    DEFAULT_LOW = {'epochs': 500, 'patience': 3, 'jiggle_fraction': 0.001}
    DEFAULT_HIGH = {'epochs': 5000, 'patience': 30, 'jiggle_fraction': 0.2}
    AS_INT = ['epochs', 'patience']

    fixed_search_params = dict(
    ) if fixed_search_params is None else fixed_search_params
    u_ndx = 1
    search_params = dict()
    for thing in ['epochs', 'patience', 'jiggle_fraction']:
        if thing in fixed_search_params:
            search_params[thing] = info[thing]
        else:
            low = DEFAULT_LOW[thing]
            high = DEFAULT_HIGH[thing]
            number = to_log_space_1d(us[u_ndx], low=low, high=high)
            if thing in AS_INT:
                number = int(number)
            u_ndx += 1
            search_params[thing] = number

    return model, search_params, info
Exemplo n.º 6
0
def keras_deeper_swish_17(us, n_inputs: int):
    """ Maps cube onto model and search params """
    search_params = {
        'epochs': int(to_log_space_1d(us[0], low=10, high=100)),
        'patience': int(to_log_space_1d(us[1], low=1, high=10)),
        'jiggle_fraction': us[2]**2
    }

    n_search_params = len(search_params)
    n_layers = choice_from_dict(us[n_search_params], {
        5: 10,
        6: 10,
        7: 10,
        8: 10,
        9: 10,
        10: 10
    })
    learning_rate = to_log_space_1d(us[n_search_params + 1],
                                    low=0.000001,
                                    high=0.001)
    opt_name = keras_optimizer_name(us[n_search_params + 2])
    keras_optimizer = keras_optimizer_from_name(opt_name=opt_name,
                                                learning_rate=learning_rate)
    info = {'keras_optimizer': opt_name, 'learning_rate': learning_rate}
    loss = mostly_mse(us[n_search_params + 3])
    offset = n_search_params + 4

    model = keras.Sequential()
    for layer_ndx in range(n_layers):
        n_units = int(
            to_log_space_1d(us[1 * layer_ndx + offset], low=2, high=128))
        model.add(keras.layers.Dense(n_units, input_shape=(1, n_inputs)))
        model.add(keras.layers.Dropout(0.1))
    model.add(keras.layers.Dense(1, activation='linear'))
    model.compile(loss=loss, optimizer=keras_optimizer)
    return model, search_params, info
Exemplo n.º 7
0
def keras_lstm_factory(us, n_inputs:int, layer_choices:dict, units_low=4, units_high=128):
    """ Maps cube onto model and search params """
    # Inspired by https://towardsdatascience.com/using-lstms-to-forecast-time-series-4ab688386b1f
    # But see https://shiva-verma.medium.com/understanding-input-and-output-shape-in-lstm-keras-c501ee95c65e
    # Or https://towardsdatascience.com/3-steps-to-forecast-time-series-lstm-with-tensorflow-keras-ba88c6f05237
    # THIS IS NOT WORKING CURRENTLY on M1
    # Maybe ... https://stackoverflow.com/questions/66373169/tensorflow-2-object-detection-api-numpy-version-errors/66486051#66486051
    search_params = {'epochs':int(to_log_space_1d(us[0], low=5, high=5000)),
                     'patience':int(to_log_space_1d(us[1], low=1, high=10)),
                     'jiggle_fraction':us[2]**2}

    n_search_params = len(search_params)
    n_lstm_layers = choice_from_dict(us[n_search_params], layer_choices)
    learning_rate = to_log_space_1d( us[n_search_params+1], low=0.000001, high=0.001)
    opt_name = keras_optimizer_name(us[n_search_params+2])
    dropout_rate = to_log_space_1d(us[n_search_params+3], low=0.00001, high=0.2)
    keras_optimizer = keras_optimizer_from_name(opt_name=opt_name, learning_rate=learning_rate)
    info = {'keras_optimizer':opt_name,'learning_rate':learning_rate}
    loss = mostly_mse( us[n_search_params + 4])
    offset = n_search_params+5

    model = keras.Sequential()
    # First layer is permutation, so as to align with conventions for the dense networks
    model.add(keras.layers.Permute((2, 1), input_shape=(1, n_inputs)))

    # n_inputs = timesteps
    layer_ndx = 0
    n_units = int(to_log_space_1d(us[1 * layer_ndx + offset], low=units_low, high=units_high))
    model.add(keras.layers.LSTM(units=n_units, input_shape=(1,n_inputs), return_sequences=True))
    model.add(keras.layers.Dropout(dropout_rate))

    # Intermediate layers
    for layer_ndx in range(1,n_lstm_layers-1):
        n_units = int(to_log_space_1d(us[1*layer_ndx+offset], low=units_low, high=units_high))
        model.add(keras.layers.LSTM(units=n_units, return_sequences=True))
        model.add(keras.layers.Dropout(dropout_rate))
    # Last layer
    layer_ndx = n_lstm_layers
    n_units = int(to_log_space_1d(us[1 * layer_ndx + offset], low=units_low, high=units_high))
    model.add(keras.layers.LSTM(units=n_units, return_sequences=False))

    # Dense
    model.add(keras.layers.Dense(1,activation='linear'))


    model.compile(loss=loss, optimizer=keras_optimizer)
    return model, search_params, info
Exemplo n.º 8
0
def keras_linear(us, n_inputs: int):
    min_kernel = us[0]
    max_kernel = us[0] + us[1] + 0.001
    bias_size = us[2]
    layers_0 = int(to_log_space_1d(us[3], low=8, high=128))
    layers_1 = int(to_log_space_1d(us[4], low=2, high=128))
    layers_2 = int(to_log_space_1d(us[5], low=2, high=16))
    learning_rate = to_log_space_1d(us[6], low=0.00001, high=0.05)
    epochs = int(to_log_space_1d(us[7], low=50, high=1000))  # 5000
    patience = int(to_log_space_1d(us[8], low=5, high=50))  # 5
    jiggle_fraction = us[9]

    search_params = {
        'epochs': epochs,
        'patience': patience,
        'jiggle_fraction': jiggle_fraction,
    }

    def build_linear_model(n_inputs):
        model = keras.Sequential()
        kernel_initializer_0 = keras.initializers.RandomUniform(
            minval=min_kernel, maxval=max_kernel, seed=None)
        bias_initializer_0 = keras.initializers.RandomUniform(
            minval=-bias_size, maxval=bias_size, seed=None)
        model.add(
            keras.layers.Dense(layers_0,
                               activation="linear",
                               input_shape=(1, n_inputs),
                               kernel_initializer=kernel_initializer_0,
                               bias_initializer=bias_initializer_0))
        model.add(keras.layers.Dense(layers_1, activation='linear'))
        model.add(keras.layers.Dense(layers_2, activation="linear"))  # selu
        model.add(keras.layers.Dense(1, activation="linear"))
        model.compile(
            loss='mse',
            optimizer=keras.optimizers.RMSprop(learning_rate=learning_rate))
        return model

    model = build_linear_model(n_inputs=n_inputs)

    return model, search_params
Exemplo n.º 9
0
def keras_tcn_factory(us, n_inputs: int, layer_choices: dict, filters_low: int,
                      filters_high: int):
    """ Maps cube onto model and search params """
    search_params = {
        'epochs': int(to_log_space_1d(us[0], low=5, high=5000)),
        'patience': int(to_log_space_1d(us[1], low=1, high=10)),
        'jiggle_fraction': us[2]**2
    }

    n_search_params = len(search_params)
    n_tcn_layers = choice_from_dict(us[n_search_params], layer_choices)
    learning_rate = to_log_space_1d(us[n_search_params + 1],
                                    low=0.000001,
                                    high=0.001)
    opt_name = keras_optimizer_name(us[n_search_params + 2])
    dropout_rate = to_log_space_1d(us[n_search_params + 3],
                                   low=0.00001,
                                   high=0.2)
    keras_optimizer = keras_optimizer_from_name(opt_name=opt_name,
                                                learning_rate=learning_rate)
    info = {'keras_optimizer': opt_name, 'learning_rate': learning_rate}
    loss = mostly_mse(us[n_search_params + 4])
    offset = n_search_params + 5

    model = keras.Sequential()
    # First layer is permutation, so as to align with conventions for the dense networks
    model.add(keras.layers.Permute((2, 1), input_shape=(1, n_inputs)))

    # n_inputs = timesteps
    layer_ndx = 0
    return_sequences = n_tcn_layers > 1
    n_filters = int(
        to_log_space_1d(us[1 * layer_ndx + offset],
                        low=filters_low,
                        high=filters_high))
    model.add(
        TCN(input_shape=(n_inputs, 1),
            nb_filters=n_filters,
            dropout_rate=dropout_rate,
            return_sequences=return_sequences))

    for layer_ndx in range(1, n_tcn_layers - 1):
        return_sequences = True
        n_filters = int(
            to_log_space_1d(us[1 * layer_ndx + offset],
                            low=filters_low,
                            high=filters_high))
        model.add(
            TCN(nb_filters=n_filters,
                dropout_rate=dropout_rate,
                return_sequences=return_sequences))

    if n_tcn_layers > 1:
        layer_ndx = 1
        return_sequences = False
        n_filters = int(
            to_log_space_1d(us[1 * layer_ndx + offset],
                            low=filters_low,
                            high=filters_high))
        model.add(
            TCN(nb_filters=n_filters,
                dropout_rate=dropout_rate,
                return_sequences=return_sequences))

    # Dense
    model.add(keras.layers.Dense(1, activation='linear'))

    model.compile(loss=loss, optimizer=keras_optimizer)
    return model, search_params, info