Ejemplo n.º 1
0
 def testPreNet(self):
     data, labels = np.random.random((100, 180)), np.random.random(100)
     pre_net = dense_model(nb_units=40,
                           activations=sigmoid,
                           input_shape=(180, ))
     post_net = dense_model(nb_units=[40, 1],
                            activations=[sigmoid, linear],
                            pre_model=pre_net)
     post_net.fit(x=data, y=labels, epochs=1)
Ejemplo n.º 2
0
def rnn_model(input_dim: int = 20,
              output_dim: int = 50,
              input_length: int = 9,
              nb_rnn_neurons: List[int] = None,
              dense_nb_neurons: Optional[List[int]] = None,
              dense_activations: Optional[List] = None,
              rnn_cell_type=GRU,
              pre_model=None,
              dropout: int = 0.5,
              name: Optional[str] = None,
              *args,
              **kwargs) -> Sequential:
    """
    Stacked RNN model
    :param input_dim: length of the vocabulary of vectors
    :param output_dim: nb of components of aa vectors
    :param input_length: length of tokenized vectors
    :param nb_rnn_neurons: nb of neurons per RNN layers
    :param dense_nb_neurons: nb of neurons per dense layer
    :param dense_activations: type of activation per dense layer
    :param rnn_cell_type: type RNN
    :param pre_model: keras Sequential
    :param dropout: rate of dropout for LSTM regularization
    :param name: name of the neural network
    :param args: extra param to dense model
    :param kwargs: extra param to dense model
    :return:
    """

    model = Sequential(name=name) if not pre_model else pre_model

    # Embedding layer
    model.add(
        Embedding(input_dim=input_dim,
                  output_dim=output_dim,
                  input_length=input_length))

    # Stacked RNN
    rnn_neurons = nb_rnn_neurons if nb_rnn_neurons else [32]
    for nb_neurons in rnn_neurons:
        model.add(
            Bidirectional(rnn_cell_type(nb_neurons, return_sequences=True)))
        model.add(Dropout(dropout))

    # Dens model
    model.add(Flatten())
    if dense_activations and dense_nb_neurons:
        return dense_model(nb_units=dense_nb_neurons,
                           activations=dense_activations,
                           pre_model=model,
                           *args,
                           **kwargs)

    return model
Ejemplo n.º 3
0
 def testReg(self):
     data, labels = np.random.random((100, 180)), np.random.random(100)
     nb_units_false = [40, 1]
     nb_units_true = [40, 40, 1]
     activations = [sigmoid, sigmoid, linear]
     input_shape = (180, )
     self.assertRaises(ValueError, dense_model,
                       *[nb_units_false, activations, input_shape])
     model = dense_model(nb_units=nb_units_true,
                         activations=activations,
                         input_shape=input_shape,
                         l1_regularization=[0.01, 0.01, 0.01],
                         dropout_reg=0.5)
     model.fit(x=data, y=labels, epochs=1)
Ejemplo n.º 4
0
 def testDenseModel(self):
     data, labels = np.random.random((100, 180)), np.random.random(100)
     nb_units_false = [40, 1]
     nb_units_true = [40, 40, 1]
     activations = [sigmoid, sigmoid, linear]
     input_shape = (180, )
     self.assertRaises(ValueError, dense_model,
                       *[nb_units_false, activations, input_shape])
     lr, m1, m2, rho, epsilon, decay = 0.01, 0.9, 0.999, 0.95, 1e-8, 0.
     for opt in (sgd, rmsprop, adamax, adamax, adam, adagrad, adadelta,
                 nadam):
         model = dense_model(nb_units=nb_units_true,
                             activations=activations,
                             input_shape=input_shape,
                             optimizer=opt,
                             loss=mean_squared_error,
                             learning_rate=lr,
                             momentum_1=m1,
                             momentum_2=m2,
                             epsilon=epsilon,
                             decay=decay,
                             rho=rho)
         model.fit(x=data, y=labels, epochs=1)
Ejemplo n.º 5
0
data = load_data()
enc, x_oh, y = get_one_hot_data(data)
x_train, x_val, y_train, y_val, data_train, data_val = train_test_split(
    x_oh, y, data, train_size=0.9)

categories = meas_discretize(pd.Series(y.reshape(-1)))
inner_cv = KFold(n_splits=10, shuffle=True, random_state=123)
outer_cv = StratifiedKFold(n_splits=10, shuffle=True, random_state=321)

a = GridSearchCV(reg, {"learning_rate": [0.01, 0.005]},
                 fit_params={
                     "epochs": 20,
                     "batch_size": 128
                 },
                 n_jobs=2)
a.fit(x_train, y_train)
b = a.predict(x_val)

c = pd.Series(np.exp(b), name="predicted", index=data_val.index)
pd.concat([data_val, c], axis=1).to_csv("test.tsv", sep='\t', index=False)

best_params = a.best_params_

model = dense_model(**merge(best_params, d1[1]))
model.fit(x_train, y_train)
model.save("output/model.mdl")

with open("output/encoder.pck", "wb") as f:
    pickle.dump(enc, f)
Ejemplo n.º 6
0
def cnn_model(conv_layout: Union[conv_operation, List[conv_operation]],
              input_shape=None,
              pre_model=None,
              dense_nb_neurons: Optional[List[int]] = None,
              dense_activations=None,
              name: Optional[str] = None,
              *args,
              **kwargs) -> Sequential:
    """
    cnn model with various number of convolution and pooling layer
    + FFN
    :param conv_layout: list of convolution + pooling layer with relevant parameters
    :param input_shape: shape of input data
    :param pre_model: a keras model to iterate on
    :param dense_nb_neurons: nb of neurons per layer in the dense post-net
    :param dense_activations: type of activation per layer in the dense post-net
    :param name: name of the neural network
    :param args: extra param to dense model
    :param kwargs: extra param to dense model
    :return: keras Sequential model
    """

    model = Sequential(name=name) if not pre_model else pre_model

    if not isinstance(conv_layout, list):
        conv_layout = [conv_layout]

    # first layer
    conv_start = 0
    if input_shape and not pre_model:
        conv_op = conv_layout[0]
        model.add(
            conv_op.conv_dim(filters=conv_op.filters,
                             kernel_size=conv_op.kernel_size,
                             strides=conv_op.strides,
                             activation=conv_op.activation,
                             padding=conv_op.padding,
                             input_shape=input_shape))
        if conv_op.pool_type:
            model.add(conv_op.pool_type(pool_size=conv_op.pool_size))
        conv_start = 1

    # following layers
    for conv, filters, kernel_size, strides, padding, activation, \
            pool_type, pool_size in conv_layout[conv_start:]:
        model.add(
            conv(filters=filters,
                 strides=strides,
                 kernel_size=kernel_size,
                 padding=padding,
                 activation=activation))
        if pool_type:
            model.add(pool_type(pool_size=pool_size))

    model.add(Flatten())

    return dense_model(nb_units=dense_nb_neurons,
                       activations=dense_activations,
                       pre_model=model,
                       *args,
                       **kwargs)