Пример #1
0
def keras_jiggly_7(us, n_inputs: int):
    search_params = {'epochs': 500, 'patience': 6, 'jiggle_fraction': 0.5}
    learning_rate = to_log_space_1d(us[0], low=0.0001, high=0.001)
    info = {'keras_optimizer': "Adamax", 'learning_rate': learning_rate}
    loss = 'mse'
    unit0 = int(to_log_space_1d(us[1], low=1.5, high=20))
    unts = [unit0, 5, 3, 5, 16, 1]
    activs = ['linear', 'relu', 'relu', 'gelu', 'swish', 'linear']
    model = keras.Sequential()
    keras_optimizer = keras_optimizer_from_name(
        'Adamax', learning_rate=info['learning_rate'])

    for layer_ndx, (n_units, activation) in enumerate(zip(unts, activs)):
        if layer_ndx == len(unts) - 1:
            # Last layer
            model.add(keras.layers.Dense(1))
        else:
            bias_size = us[layer_ndx + 2]
            bias_initializer_0 = keras.initializers.RandomUniform(
                minval=-bias_size, maxval=bias_size, seed=None)
            if layer_ndx == 0:
                model.add(
                    keras.layers.Dense(n_units,
                                       activation=activation,
                                       input_shape=(1, n_inputs),
                                       bias_initializer=bias_initializer_0))
            else:
                model.add(
                    keras.layers.Dense(n_units,
                                       activation=activation,
                                       bias_initializer=bias_initializer_0))
    model.compile(loss=loss, optimizer=keras_optimizer)
    return model, search_params, info
Пример #2
0
def keras_cnn_factory(us, n_inputs: int, layer_choices: dict, filters_low: int,
                      filters_high: int):
    """ Maps cube onto model and search params """
    search_params = {
        'epochs': int(to_log_space_1d(us[0], low=5, high=5000)),
        'patience': int(to_log_space_1d(us[1], low=1, high=10)),
        'jiggle_fraction': us[2]**2
    }

    n_search_params = len(search_params)
    n_cnn_layers = choice_from_dict(us[n_search_params], layer_choices)
    learning_rate = to_log_space_1d(us[n_search_params + 1],
                                    low=0.000001,
                                    high=0.001)
    opt_name = keras_optimizer_name(us[n_search_params + 2])
    dropout_rate = to_log_space_1d(us[n_search_params + 3],
                                   low=0.00001,
                                   high=0.2)
    keras_optimizer = keras_optimizer_from_name(opt_name=opt_name,
                                                learning_rate=learning_rate)
    info = {'keras_optimizer': opt_name, 'learning_rate': learning_rate}
    loss = mostly_mse(us[n_search_params + 4])
    offset = n_search_params + 5

    model = keras.Sequential()
    # First layer is permutation, so as to align with conventions for the dense networks
    model.add(keras.layers.Permute((2, 1), input_shape=(1, n_inputs)))

    # n_inputs = timesteps
    layer_ndx = 0
    n_filters = int(
        to_log_space_1d(us[1 * layer_ndx + offset],
                        low=filters_low,
                        high=filters_high))
    model.add(
        keras.layers.Conv1D(filters=n_filters,
                            input_shape=(n_inputs, 1),
                            kernel_size=3))

    for layer_ndx in range(1, n_cnn_layers):
        n_filters = int(
            to_log_space_1d(us[1 * layer_ndx + offset],
                            low=filters_low,
                            high=filters_high))
        model.add(
            keras.layers.Conv1D(filters=n_filters,
                                input_shape=(n_inputs, 1),
                                kernel_size=3))

    # Tails
    layer_ndx = n_cnn_layers
    n_units = int(to_log_space_1d(us[1 * layer_ndx + offset], low=10,
                                  high=200))
    model.add(keras.layers.Flatten())
    model.add(keras.layers.Dense(n_units))
    model.add(keras.layers.Dense(1, activation='linear'))

    model.compile(loss=loss, optimizer=keras_optimizer)
    return model, search_params, info
Пример #3
0
def keras_lstm_factory(us, n_inputs:int, layer_choices:dict, units_low=4, units_high=128):
    """ Maps cube onto model and search params """
    # Inspired by https://towardsdatascience.com/using-lstms-to-forecast-time-series-4ab688386b1f
    # But see https://shiva-verma.medium.com/understanding-input-and-output-shape-in-lstm-keras-c501ee95c65e
    # Or https://towardsdatascience.com/3-steps-to-forecast-time-series-lstm-with-tensorflow-keras-ba88c6f05237
    # THIS IS NOT WORKING CURRENTLY on M1
    # Maybe ... https://stackoverflow.com/questions/66373169/tensorflow-2-object-detection-api-numpy-version-errors/66486051#66486051
    search_params = {'epochs':int(to_log_space_1d(us[0], low=5, high=5000)),
                     'patience':int(to_log_space_1d(us[1], low=1, high=10)),
                     'jiggle_fraction':us[2]**2}

    n_search_params = len(search_params)
    n_lstm_layers = choice_from_dict(us[n_search_params], layer_choices)
    learning_rate = to_log_space_1d( us[n_search_params+1], low=0.000001, high=0.001)
    opt_name = keras_optimizer_name(us[n_search_params+2])
    dropout_rate = to_log_space_1d(us[n_search_params+3], low=0.00001, high=0.2)
    keras_optimizer = keras_optimizer_from_name(opt_name=opt_name, learning_rate=learning_rate)
    info = {'keras_optimizer':opt_name,'learning_rate':learning_rate}
    loss = mostly_mse( us[n_search_params + 4])
    offset = n_search_params+5

    model = keras.Sequential()
    # First layer is permutation, so as to align with conventions for the dense networks
    model.add(keras.layers.Permute((2, 1), input_shape=(1, n_inputs)))

    # n_inputs = timesteps
    layer_ndx = 0
    n_units = int(to_log_space_1d(us[1 * layer_ndx + offset], low=units_low, high=units_high))
    model.add(keras.layers.LSTM(units=n_units, input_shape=(1,n_inputs), return_sequences=True))
    model.add(keras.layers.Dropout(dropout_rate))

    # Intermediate layers
    for layer_ndx in range(1,n_lstm_layers-1):
        n_units = int(to_log_space_1d(us[1*layer_ndx+offset], low=units_low, high=units_high))
        model.add(keras.layers.LSTM(units=n_units, return_sequences=True))
        model.add(keras.layers.Dropout(dropout_rate))
    # Last layer
    layer_ndx = n_lstm_layers
    n_units = int(to_log_space_1d(us[1 * layer_ndx + offset], low=units_low, high=units_high))
    model.add(keras.layers.LSTM(units=n_units, return_sequences=False))

    # Dense
    model.add(keras.layers.Dense(1,activation='linear'))


    model.compile(loss=loss, optimizer=keras_optimizer)
    return model, search_params, info
Пример #4
0
def keras_mostly_linear_27(us, n_inputs: int):
    """ Maps cube onto model and search params """
    search_params = {
        'epochs': int(to_log_space_1d(us[0], low=50, high=5000)),
        'patience': int(to_log_space_1d(us[1], low=5, high=150)),
        'jiggle_fraction': us[2]
    }

    n_search_params = len(search_params)
    n_layers = choice_from_dict(us[n_search_params], {
        1: 5,
        2: 10,
        3: 20,
        4: 5,
        5: 5
    })
    learning_rate = to_log_space_1d(us[n_search_params + 1],
                                    low=0.000001,
                                    high=0.001)
    opt_name = keras_optimizer_name(us[n_search_params + 2])
    keras_optimizer = keras_optimizer_from_name(opt_name=opt_name,
                                                learning_rate=learning_rate)
    info = {'keras_optimizer': opt_name, 'learning_rate': learning_rate}
    loss = mostly_mse(us[n_search_params + 3])
    offset = n_search_params + 4

    model = keras.Sequential()
    for layer_ndx in range(n_layers):
        n_units = int(
            to_log_space_1d(us[4 * layer_ndx + offset], low=2, high=128))
        activation = mostly_linear(us[4 * layer_ndx + offset + 1])
        kernel_size = us[4 * layer_ndx + offset + 2]
        bias_size = us[4 * layer_ndx + offset + 3]
        kernel_initializer_0 = keras.initializers.RandomUniform(
            minval=-kernel_size, maxval=kernel_size, seed=None)
        bias_initializer_0 = keras.initializers.RandomUniform(
            minval=-bias_size, maxval=bias_size, seed=None)
        model.add(
            keras.layers.Dense(n_units,
                               activation=activation,
                               input_shape=(1, n_inputs),
                               kernel_initializer=kernel_initializer_0,
                               bias_initializer=bias_initializer_0))
    model.add(keras.layers.Dense(1, activation='linear'))
    model.compile(loss=loss, optimizer=keras_optimizer)
    return model, search_params, info
Пример #5
0
def keras_nearby(us: [float],
                 n_input: int,
                 skater_name: str,
                 k: int,
                 fixed_search_params: [str] = None):
    """
       fixed_search_params : list with 'epochs','patience','jiggle_fraction', 'learning_rate' optional
    """
    model = load_champion_model(skater_name=skater_name, k=k, n_input=n_input)
    info = read_champion_info(skater_name=skater_name, k=k, n_input=n_input)

    if 'learning_rate' in fixed_search_params:
        learning_rate_scaling = 1
    else:
        learning_rate_scaling = to_log_space_1d(u=us[0], low=0.1, high=10.0)
    learning_rate = info['learning_rate'] * learning_rate_scaling
    keras_optimizer = keras_optimizer_from_name(info['keras_optimizer'],
                                                learning_rate=learning_rate)

    model.compile(loss='mse', optimizer=keras_optimizer)

    DEFAULT_LOW = {'epochs': 500, 'patience': 3, 'jiggle_fraction': 0.001}
    DEFAULT_HIGH = {'epochs': 5000, 'patience': 30, 'jiggle_fraction': 0.2}
    AS_INT = ['epochs', 'patience']

    fixed_search_params = dict(
    ) if fixed_search_params is None else fixed_search_params
    u_ndx = 1
    search_params = dict()
    for thing in ['epochs', 'patience', 'jiggle_fraction']:
        if thing in fixed_search_params:
            search_params[thing] = info[thing]
        else:
            low = DEFAULT_LOW[thing]
            high = DEFAULT_HIGH[thing]
            number = to_log_space_1d(us[u_ndx], low=low, high=high)
            if thing in AS_INT:
                number = int(number)
            u_ndx += 1
            search_params[thing] = number

    return model, search_params, info
Пример #6
0
def keras_jiggly_10(us, n_inputs: int):
    search_params = {'epochs': 5000, 'patience': 6, 'jiggle_fraction': 0.5}
    info = {'keras_optimizer': "Adamax", 'learning_rate': 0.000345}
    loss = 'mse'
    unts = [2, 5, 3, 5, 16, 1]
    activs = ['linear', 'relu', 'relu', 'gelu', 'swish', 'linear']
    model = keras.Sequential()
    keras_optimizer = keras_optimizer_from_name(
        'Adamax', learning_rate=info['learning_rate'])

    for layer_ndx, (n_units, activation) in enumerate(zip(unts, activs)):
        if layer_ndx == len(unts) - 1:
            # Last layer
            model.add(keras.layers.Dense(1))
        else:
            kernel_size = us[2 * layer_ndx]
            bias_size = us[2 * layer_ndx + 1]
            kernel_initializer_0 = keras.initializers.RandomUniform(
                minval=-kernel_size, maxval=kernel_size, seed=None)
            bias_initializer_0 = keras.initializers.RandomUniform(
                minval=-bias_size, maxval=bias_size, seed=None)
            if layer_ndx == 0:
                model.add(
                    keras.layers.Dense(n_units,
                                       activation=activation,
                                       input_shape=(1, n_inputs),
                                       kernel_initializer=kernel_initializer_0,
                                       bias_initializer=bias_initializer_0))
            else:
                model.add(
                    keras.layers.Dense(n_units,
                                       activation=activation,
                                       kernel_initializer=kernel_initializer_0,
                                       bias_initializer=bias_initializer_0))
    model.compile(loss=loss, optimizer=keras_optimizer)
    return model, search_params, info
Пример #7
0
def keras_deeper_swish_17(us, n_inputs: int):
    """ Maps cube onto model and search params """
    search_params = {
        'epochs': int(to_log_space_1d(us[0], low=10, high=100)),
        'patience': int(to_log_space_1d(us[1], low=1, high=10)),
        'jiggle_fraction': us[2]**2
    }

    n_search_params = len(search_params)
    n_layers = choice_from_dict(us[n_search_params], {
        5: 10,
        6: 10,
        7: 10,
        8: 10,
        9: 10,
        10: 10
    })
    learning_rate = to_log_space_1d(us[n_search_params + 1],
                                    low=0.000001,
                                    high=0.001)
    opt_name = keras_optimizer_name(us[n_search_params + 2])
    keras_optimizer = keras_optimizer_from_name(opt_name=opt_name,
                                                learning_rate=learning_rate)
    info = {'keras_optimizer': opt_name, 'learning_rate': learning_rate}
    loss = mostly_mse(us[n_search_params + 3])
    offset = n_search_params + 4

    model = keras.Sequential()
    for layer_ndx in range(n_layers):
        n_units = int(
            to_log_space_1d(us[1 * layer_ndx + offset], low=2, high=128))
        model.add(keras.layers.Dense(n_units, input_shape=(1, n_inputs)))
        model.add(keras.layers.Dropout(0.1))
    model.add(keras.layers.Dense(1, activation='linear'))
    model.compile(loss=loss, optimizer=keras_optimizer)
    return model, search_params, info