Esempio n. 1
0
    def create_model(self):
        print(CELLS)
        print(DROPOUT_PROBABILITY)
        print(EMBEDDING_DIM)
        print(LEARNING_RATE)
        print(DECAY)
        model = Sequential()
        model._name = "Practice_Model"

        # defining layer 1 forward LSTM layer
        lstm_fw_layer1 = LSTM(CELLS,
                              dropout=DROPOUT_PROBABILITY,
                              activation='relu',
                              return_sequences=True)
        # defining full BLSTM layer
        blstm_layer1 = Bidirectional(lstm_fw_layer1,
                                     input_shape=(self.batch_size, 1),
                                     name="LSTM_Layer_1")

        # defining layer 2 forward LSTM layer
        lstm_fw_layer2 = LSTM(CELLS,
                              dropout=DROPOUT_PROBABILITY,
                              activation='relu',
                              return_sequences=True)
        # defining full BLSTM layer
        blstm_layer2 = Bidirectional(lstm_fw_layer2,
                                     input_shape=(self.batch_size, 1),
                                     name="LSTM_Layer_2")

        # dense layer
        feedforward_layer = Dense(EMBEDDING_DIM,
                                  activation='tanh',
                                  name="Embedding_Layer")

        # creating the model
        model.add(blstm_layer1)
        model.add(blstm_layer2)
        model.add(feedforward_layer)

        opt = tf.keras.optimizers.Adam(lr=LEARNING_RATE, decay=DECAY)
        model.compile(loss=self.loss, optimizer=opt, metrics=['accuracy'])

        print('Model compiled successfully')

        return model
Esempio n. 2
0
# Save model description
with open(os.path.join(save_dir, "model_description.txt"), "w") as file:
    with redirect_stdout(file):
        print(model_description)

############################################################################################
# MODEL CONFIGURATION
############################################################################################
# MODEL ARCHITECTURE
####################
# Add the layers of model to a new sequential model
model = Sequential()
for layer in loaded_model.layers[:-1]:  # remove last layer
    model.add(layer)
# Rename model
model._name = model_name
model.name
# Freeze the weights in first blocks
for layer in model.layers[:limit_layer]:
    layer.trainable = False
for layer in model.layers[limit_layer:]:
    layer.trainable = True
# Add last layer for categories
model.add(Dense(len(class_names), activation="softmax"))

# Save model summary
model.summary()
with open(os.path.join(save_dir, "model_summary.txt"), "w") as file:
    with redirect_stdout(file):
        model.summary()
Esempio n. 3
0
def NN(feat_train, target_train, feat_val, target_val, params):
    epoch = params['epoch']
    batch = params['batch']
    layer = params['layer']
    nodes = params['nodes']
    wl2 = params['wl2']
    lr = params['lr']
    flr = params['flr']
    flrstep = params['flrstep']
    in_weight = params['in_weight']
    import_weights = params['import_weights']
    silent = params['silent']

    dim_in = len(feat_train[0])
    dim_out = len(target_train[0])
    ## input layer
    model = Sequential([
        Dense(nodes,
              input_shape=(dim_in, ),
              kernel_regularizer=regularizers.l2(wl2),
              activation='tanh'),
        BatchNormalization()
    ])
    model._name = "single"
    ## hidden layers
    for hd in range(layer):
        model.add(
            Dense(nodes,
                  kernel_regularizer=regularizers.l2(wl2),
                  activation='tanh'))
        model.add(BatchNormalization())
    ## output layer
    model.add(
        Dense(dim_out,
              kernel_regularizer=regularizers.l2(wl2),
              activation='linear'))

    adam = ks.optimizers.Adam(learning_rate=lr,
                              beta_1=0.9,
                              beta_2=0.999,
                              amsgrad=False)
    model.compile(
        optimizer=adam,
        loss='mean_squared_error',
        metrics=['mae', rmse, dmax],
    )

    if silent == 0:
        print(model.summary())

    if in_weight == -1:
        model.set_weights(import_weights)
        print('Successfully load weights')
        #        model.load_weights('%s-%s' % (model_name,weights_h5))
        history = model.predict(feat_train)
    else:
        if in_weight > 0:
            model.set_weights(import_weights)
            print('Successfully load weights')


#            model.load_weights('%s-%s' % (model_name,weights_h5))
        history = model.fit(feat_train,
                            target_train,
                            epochs=epoch,
                            batch_size=batch,
                            callbacks=[
                                ks.callbacks.LearningRateScheduler(
                                    lr_scheduler, verbose=0)
                            ],
                            validation_data=[feat_val, target_val],
                            shuffle=True)

    return history, model