def train_discriminator(X_train, Y_train, model):
    sgd = optimizers.SGD(lr=0.01)

    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])
    earlyStopping = callbacks.EarlyStopping(monitor='val_loss',
                                            patience=10,
                                            verbose=0,
                                            mode='min')
    mcp_save = callbacks.ModelCheckpoint('bestmodel.hdf5',
                                         save_best_only=True,
                                         monitor='val_loss',
                                         mode='min')
    reduce_lr_loss = callbacks.ReduceLROnPlateau(monitor='val_loss',
                                                 factor=0.1,
                                                 patience=10,
                                                 verbose=1,
                                                 epsilon=1e-4,
                                                 mode='min')
    csv_logger = callbacks.CSVLogger('Discriminator_stats_cnn.csv')
    history = model.fit(X_train,
                        Y_train,
                        callbacks=[mcp_save, reduce_lr_loss, csv_logger],
                        validation_split=0.1,
                        epochs=100,
                        batch_size=16)
コード例 #2
0
ファイル: nlp_trainer.py プロジェクト: xcalibersword/chatbot
outs = Dense(units=num_intents,
             activation='softmax')(flat)  # Sigmoid or softmax?
model = Model(inputs=main_input, outputs=outs)

INITIAL_LEARN_RATE = 10e-5
optimizer = Adam(learning_rate=INITIAL_LEARN_RATE)
# optimizer = RMSprop(learning_rate = 3e-5)
model.compile(optimizer,
              loss=categorical_crossentropy(label_smoothing=0.005),
              metrics=['accuracy'])

cbks = [
    callbacks.TerminateOnNaN(),
    callbacks.ReduceLROnPlateau(monitor='val_loss',
                                factor=0.1,
                                patience=5,
                                verbose=1),
    callbacks.EarlyStopping(monitor='loss',
                            min_delta=0,
                            patience=10,
                            verbose=1,
                            baseline=None,
                            restore_best_weights=True)
]

model.summary()

test_set = (test_x, test_y)
# Can't have validation split because too many intents.
# Val accuracy of 0.94 is good
model.fit(x=embed_xvals,