log_dir, 'ep{epoch:03d}-val_loss{val_loss:.3f}-val_acc{val_acc:.3f}.h5'), monitor='val_loss', save_weights_only=True, save_best_only=True, period=1) reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1) early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=1) training_generator = reader.generate_batch(X_train, y_train, args.batch_size) validation_generator = reader.generate_batch(X_val, y_val, args.batch_size) print('\nRun: `tensorboard --logdir={}`\n'.format(log_dir)) model.fit_generator( generator=training_generator, steps_per_epoch=train_samples // args.batch_size, epochs=args.epochs, validation_data=validation_generator, validation_steps=val_samples // args.batch_size, callbacks=[logging, checkpoint, reduce_lr, early_stopping]) else: model, encoder_model, decoder_model = nmt.build(inference=True) assert (model.layers[7] == model.get_layer('attention'))