def train(self): model = self.__build_model() # 每个epoch之后保存模型 checkpoint = ModelCheckpoint(os.path.join(self.model_path, 'weights.{epoch:03d}.h5'), monitor='val_acc', verbose=1, save_best_only=True, mode='auto') # 连续三次epoch某个观察项没有改善,则提前退出训练,默认观察项是val_acc early_stop = EarlyStopping(patience=3, verbose=1) history = model.fit(self.x_train, self.y_train, batch_size=self.batch_size, epochs=self.epochs, verbose=1, callbacks=[checkpoint, early_stop], validation_data=(self.x_test, self.y_test)) # 训练结束后,最后保存一次模型,其实没有必要 model.save(os.path.join(self.model_path, 'model.h5')) self.__save_config() plot(history) return model
def train(self): if self.attention: model = self.__build_model() else: model = self.__build_model_no_attention() checkpoint = ModelCheckpoint(os.path.join(self.model_path, 'weights.{epoch:03d}.h5'), monitor='val_loss', save_weights_only=True, verbose=1, save_best_only=True, mode='min') early = EarlyStopping(monitor="val_loss", mode="min", patience=10) model_trained = model.fit(self.x_train, self.y_train, batch_size=128, epochs=25, validation_data=[self.x_test, self.y_test], callbacks=[checkpoint, early]) plot(model_trained) if self.attention: model.save_weights( os.path.join(self.model_path, 'final_model_weights.h5')) else: model.save_weights( os.path.join(self.model_path, 'final_model_weights_no_attention.h5')) self.__save_config() return model
def train(self, batch_size=512, epochs=20): model = self.build_model() # early_stop配合checkpoint使用,可以得到val_loss最小的模型 early_stop = EarlyStopping(patience=3, verbose=1) checkpoint = ModelCheckpoint(os.path.join( self.model_path, 'weights.{epoch:03d}-{val_loss:.3f}.h5'), verbose=1, save_best_only=True) history = model.fit(self.x_train, self.y_train, batch_size=batch_size, epochs=epochs, verbose=1, callbacks=[checkpoint, early_stop], validation_data=(self.x_test, self.y_test)) plot(history) return model
def train(self, x, y, test_size=0.2, batch_size=512, epochs=20): model = self.build_model() # early_stop配合checkpoint使用,可以得到val_loss最小的模型 early_stop = self.early_stop() checkpoint = self.check_point() x_train, x_val, y_train, y_val = train_test_split(x, y, test_size=test_size, random_state=42) history = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, callbacks=[checkpoint, early_stop], validation_data=(x_val, y_val)) plot(history) return model
def train(self): model = self.__build_model() checkpoint = ModelCheckpoint(os.path.join( self.model_path, 'weights.{epoch:03d}-{val_acc:.4f}.h5'), monitor='val_acc', verbose=1, save_best_only=True, mode='auto') early_stop = EarlyStopping(patience=3, verbose=1) history = model.fit(self.x_train, self.y_train, batch_size=self.batch_size, epochs=self.epochs, verbose=1, callbacks=[checkpoint, early_stop], validation_data=(self.x_test, self.y_test)) plot(history) model.save(os.path.join(self.model_path, 'model.h5')) self.__save_config() return model
def train(self, weights_only=True, call_back=False): model = self._build_model() if call_back: early_stopping = EarlyStopping(monitor='val_loss', patience=30) stamp = 'lstm_%d' % self.n_hidden checkpoint_dir = os.path.join( self.model_path, 'checkpoints/' + str(int(time.time())) + '/') if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) bst_model_path = checkpoint_dir + stamp + '.h5' if weights_only: model_checkpoint = ModelCheckpoint(bst_model_path, save_best_only=True, save_weights_only=True) else: model_checkpoint = ModelCheckpoint(bst_model_path, save_best_only=True) tensor_board = TensorBoard(log_dir=checkpoint_dir + "logs/{}".format(time.time())) callbacks = [early_stopping, model_checkpoint, tensor_board] else: callbacks = None model_trained = model.fit( [self.x_train['left'], self.x_train['right']], self.y_train, batch_size=self.batch_size, epochs=self.epochs, validation_data=([self.x_val['left'], self.x_val['right']], self.y_val), verbose=1, callbacks=callbacks) if weights_only and not call_back: model.save_weights(os.path.join(self.model_path, 'weights_only.h5')) elif not weights_only and not call_back: model.save(os.path.join(self.model_path, 'model.h5')) self._save_config() plot(model_trained) return model