コード例 #1
0
def keras_cnn_factory(us, n_inputs: int, layer_choices: dict, filters_low: int,
                      filters_high: int):
    """ Maps cube onto model and search params """
    search_params = {
        'epochs': int(to_log_space_1d(us[0], low=5, high=5000)),
        'patience': int(to_log_space_1d(us[1], low=1, high=10)),
        'jiggle_fraction': us[2]**2
    }

    n_search_params = len(search_params)
    n_cnn_layers = choice_from_dict(us[n_search_params], layer_choices)
    learning_rate = to_log_space_1d(us[n_search_params + 1],
                                    low=0.000001,
                                    high=0.001)
    opt_name = keras_optimizer_name(us[n_search_params + 2])
    dropout_rate = to_log_space_1d(us[n_search_params + 3],
                                   low=0.00001,
                                   high=0.2)
    keras_optimizer = keras_optimizer_from_name(opt_name=opt_name,
                                                learning_rate=learning_rate)
    info = {'keras_optimizer': opt_name, 'learning_rate': learning_rate}
    loss = mostly_mse(us[n_search_params + 4])
    offset = n_search_params + 5

    model = keras.Sequential()
    # First layer is permutation, so as to align with conventions for the dense networks
    model.add(keras.layers.Permute((2, 1), input_shape=(1, n_inputs)))

    # n_inputs = timesteps
    layer_ndx = 0
    n_filters = int(
        to_log_space_1d(us[1 * layer_ndx + offset],
                        low=filters_low,
                        high=filters_high))
    model.add(
        keras.layers.Conv1D(filters=n_filters,
                            input_shape=(n_inputs, 1),
                            kernel_size=3))

    for layer_ndx in range(1, n_cnn_layers):
        n_filters = int(
            to_log_space_1d(us[1 * layer_ndx + offset],
                            low=filters_low,
                            high=filters_high))
        model.add(
            keras.layers.Conv1D(filters=n_filters,
                                input_shape=(n_inputs, 1),
                                kernel_size=3))

    # Tails
    layer_ndx = n_cnn_layers
    n_units = int(to_log_space_1d(us[1 * layer_ndx + offset], low=10,
                                  high=200))
    model.add(keras.layers.Flatten())
    model.add(keras.layers.Dense(n_units))
    model.add(keras.layers.Dense(1, activation='linear'))

    model.compile(loss=loss, optimizer=keras_optimizer)
    return model, search_params, info
コード例 #2
0
def keras_lstm_factory(us, n_inputs:int, layer_choices:dict, units_low=4, units_high=128):
    """ Maps cube onto model and search params """
    # Inspired by https://towardsdatascience.com/using-lstms-to-forecast-time-series-4ab688386b1f
    # But see https://shiva-verma.medium.com/understanding-input-and-output-shape-in-lstm-keras-c501ee95c65e
    # Or https://towardsdatascience.com/3-steps-to-forecast-time-series-lstm-with-tensorflow-keras-ba88c6f05237
    # THIS IS NOT WORKING CURRENTLY on M1
    # Maybe ... https://stackoverflow.com/questions/66373169/tensorflow-2-object-detection-api-numpy-version-errors/66486051#66486051
    search_params = {'epochs':int(to_log_space_1d(us[0], low=5, high=5000)),
                     'patience':int(to_log_space_1d(us[1], low=1, high=10)),
                     'jiggle_fraction':us[2]**2}

    n_search_params = len(search_params)
    n_lstm_layers = choice_from_dict(us[n_search_params], layer_choices)
    learning_rate = to_log_space_1d( us[n_search_params+1], low=0.000001, high=0.001)
    opt_name = keras_optimizer_name(us[n_search_params+2])
    dropout_rate = to_log_space_1d(us[n_search_params+3], low=0.00001, high=0.2)
    keras_optimizer = keras_optimizer_from_name(opt_name=opt_name, learning_rate=learning_rate)
    info = {'keras_optimizer':opt_name,'learning_rate':learning_rate}
    loss = mostly_mse( us[n_search_params + 4])
    offset = n_search_params+5

    model = keras.Sequential()
    # First layer is permutation, so as to align with conventions for the dense networks
    model.add(keras.layers.Permute((2, 1), input_shape=(1, n_inputs)))

    # n_inputs = timesteps
    layer_ndx = 0
    n_units = int(to_log_space_1d(us[1 * layer_ndx + offset], low=units_low, high=units_high))
    model.add(keras.layers.LSTM(units=n_units, input_shape=(1,n_inputs), return_sequences=True))
    model.add(keras.layers.Dropout(dropout_rate))

    # Intermediate layers
    for layer_ndx in range(1,n_lstm_layers-1):
        n_units = int(to_log_space_1d(us[1*layer_ndx+offset], low=units_low, high=units_high))
        model.add(keras.layers.LSTM(units=n_units, return_sequences=True))
        model.add(keras.layers.Dropout(dropout_rate))
    # Last layer
    layer_ndx = n_lstm_layers
    n_units = int(to_log_space_1d(us[1 * layer_ndx + offset], low=units_low, high=units_high))
    model.add(keras.layers.LSTM(units=n_units, return_sequences=False))

    # Dense
    model.add(keras.layers.Dense(1,activation='linear'))


    model.compile(loss=loss, optimizer=keras_optimizer)
    return model, search_params, info
コード例 #3
0
def keras_mostly_linear_27(us, n_inputs: int):
    """ Maps cube onto model and search params """
    search_params = {
        'epochs': int(to_log_space_1d(us[0], low=50, high=5000)),
        'patience': int(to_log_space_1d(us[1], low=5, high=150)),
        'jiggle_fraction': us[2]
    }

    n_search_params = len(search_params)
    n_layers = choice_from_dict(us[n_search_params], {
        1: 5,
        2: 10,
        3: 20,
        4: 5,
        5: 5
    })
    learning_rate = to_log_space_1d(us[n_search_params + 1],
                                    low=0.000001,
                                    high=0.001)
    opt_name = keras_optimizer_name(us[n_search_params + 2])
    keras_optimizer = keras_optimizer_from_name(opt_name=opt_name,
                                                learning_rate=learning_rate)
    info = {'keras_optimizer': opt_name, 'learning_rate': learning_rate}
    loss = mostly_mse(us[n_search_params + 3])
    offset = n_search_params + 4

    model = keras.Sequential()
    for layer_ndx in range(n_layers):
        n_units = int(
            to_log_space_1d(us[4 * layer_ndx + offset], low=2, high=128))
        activation = mostly_linear(us[4 * layer_ndx + offset + 1])
        kernel_size = us[4 * layer_ndx + offset + 2]
        bias_size = us[4 * layer_ndx + offset + 3]
        kernel_initializer_0 = keras.initializers.RandomUniform(
            minval=-kernel_size, maxval=kernel_size, seed=None)
        bias_initializer_0 = keras.initializers.RandomUniform(
            minval=-bias_size, maxval=bias_size, seed=None)
        model.add(
            keras.layers.Dense(n_units,
                               activation=activation,
                               input_shape=(1, n_inputs),
                               kernel_initializer=kernel_initializer_0,
                               bias_initializer=bias_initializer_0))
    model.add(keras.layers.Dense(1, activation='linear'))
    model.compile(loss=loss, optimizer=keras_optimizer)
    return model, search_params, info
コード例 #4
0
def keras_deeper_swish_17(us, n_inputs: int):
    """ Maps cube onto model and search params """
    search_params = {
        'epochs': int(to_log_space_1d(us[0], low=10, high=100)),
        'patience': int(to_log_space_1d(us[1], low=1, high=10)),
        'jiggle_fraction': us[2]**2
    }

    n_search_params = len(search_params)
    n_layers = choice_from_dict(us[n_search_params], {
        5: 10,
        6: 10,
        7: 10,
        8: 10,
        9: 10,
        10: 10
    })
    learning_rate = to_log_space_1d(us[n_search_params + 1],
                                    low=0.000001,
                                    high=0.001)
    opt_name = keras_optimizer_name(us[n_search_params + 2])
    keras_optimizer = keras_optimizer_from_name(opt_name=opt_name,
                                                learning_rate=learning_rate)
    info = {'keras_optimizer': opt_name, 'learning_rate': learning_rate}
    loss = mostly_mse(us[n_search_params + 3])
    offset = n_search_params + 4

    model = keras.Sequential()
    for layer_ndx in range(n_layers):
        n_units = int(
            to_log_space_1d(us[1 * layer_ndx + offset], low=2, high=128))
        model.add(keras.layers.Dense(n_units, input_shape=(1, n_inputs)))
        model.add(keras.layers.Dropout(0.1))
    model.add(keras.layers.Dense(1, activation='linear'))
    model.compile(loss=loss, optimizer=keras_optimizer)
    return model, search_params, info