Beispiel #1
0
	def TrainModel(self, datapath, epoch = 2, save_step = 1000, batch_size = 32, filename = 'model_speech/speech_model24'):
		'''
		训练模型
		参数:
			datapath: 数据保存的路径
			epoch: 迭代轮数
			save_step: 每多少步保存一次模型
			filename: 默认保存文件名,不含文件后缀名
		'''
		data=DataSpeech(datapath, 'train')
		
		num_data = data.GetDataNum() # 获取数据的数量
		
		yielddatas = data.data_genetator(batch_size, self.AUDIO_LENGTH)
		
		for epoch in range(epoch): # 迭代轮数
			print('[running] train epoch %d .' % epoch)
			n_step = 0 # 迭代数据数
			while True:
				try:
					print('[message] epoch %d . Have train datas %d+'%(epoch, n_step*save_step))
					# data_genetator是一个生成器函数
					
					#self._model.fit_generator(yielddatas, save_step, nb_worker=2)
					self._model.fit_generator(yielddatas, save_step)
					n_step += 1
				except StopIteration:
					print('[error] generator error. please check data format.')
					break
				
				self.SaveModel(comment='_e_'+str(epoch)+'_step_'+str(n_step * save_step))
				self.TestModel(self.datapath, str_dataset='train', data_count = 4)
				self.TestModel(self.datapath, str_dataset='dev', data_count = 4)
 def TrainModel(self, datapath, epoch = 2, save_step = 1000, batch_size = 32, filename = abspath + 'model_speech/m' + ModelName + '/speech_model'+ModelName):
     '''
     训练模型
     参数:
         datapath: 数据保存的路径
         epoch: 迭代轮数
         save_step: 每多少步保存一次模型
         filename: 默认保存文件名,不含文件后缀名
     '''
     data=DataSpeech(datapath, 'train')  #首先获取的是 train数据集
     
     num_data = data.GetDataNum() # 获取数据的数量  
     
     yielddatas = data.data_genetator(batch_size, self.AUDIO_LENGTH) #将所有的数据使用生成器进行batch_size的封装,封装成一个个的对象
     
     for epoch in range(epoch): # 迭代轮数
         print('[running] train epoch %d .' % epoch)
         n_step = 0 # 迭代数据数
         while True:
             try:
                 print('[message] epoch %d . Have train datas %d+'%(epoch, n_step*save_step))
                 # data_genetator是一个生成器函数
                 
                 #self._model.fit_generator(yielddatas, save_step, nb_worker=2)
                 # 利用Python的生成器,逐个生成数据的batch并进行训练。生成器与模型将并行执行以提高效率。例如,该函数允许我们在CPU上进行实时的数据提升,同时在GPU上进行模型训练
                 self._model.fit_generator(yielddatas, save_step)    # self._model这个是初始化调用creatmodel返回的模型
                 # samples_per_epoch:整数,当模型处理的样本达到此数目时计一个epoch结束,执行下一个epoch
                 n_step += 1
             except StopIteration:
                 print('[error] generator error. please check data format.')
                 break
             
             self.SaveModel(comment='_e_'+str(epoch)+'_step_'+str(n_step * save_step))  #进行模型的保存
             self.TestModel(self.datapath, str_dataset='train', data_count = 4) #进行训练集模型的测试
             self.TestModel(self.datapath, str_dataset='dev', data_count = 4) # 进行 验证集模型的测试
    def TrainModel(self, datapath, epoch=2, save_step=1000, batch_size=32):
        '''
        训练模型
        参数:
            datapath: 数据保存的路径
            epoch: 迭代轮数
            save_step: 每多少步保存一次模型
            filename: 默认保存文件名,不含文件后缀名
        '''
        data = DataSpeech(datapath, 'train')

        # num_data = data.GetDataNum()  # 获取数据的数量
        txt_loss = open(
            os.path.join(os.getcwd(), 'speech_log_file', 'Test_Report_loss.txt'),
            mode='a', encoding='UTF-8')

        txt_obj = open(
            os.path.join(os.getcwd(), 'speech_log_file', 'Test_Report_accuracy.txt'),
            mode='a', encoding='UTF-8')

        saver = tf.train.Saver()
        with tf.Session() as sess:
            # sess.run(tf.global_variables_initializer())
            saver.restore(sess,os.path.join(os.getcwd(), 'speech_model_file','speech.module-50'))
            summary_merge = tf.summary.merge_all()
            train_writter = tf.summary.FileWriter('summary_file',sess.graph)
            for i in range(51,epoch):
                yielddatas = data.data_genetator(batch_size, self.MAX_TIME)
                pbar = tqdm(yielddatas)
                train_epoch = 0
                train_epoch_size = save_step
                for input,_ in pbar:
                    feed = {self.input_data: input[0],self.label_data: input[1],self.input_length:input[2],self.label_length:input[3],
                            self.is_train:True}
                    _,loss,train_summary = sess.run([self.optimize,self.loss,summary_merge],feed_dict=feed)
                    train_writter.add_summary(train_summary,train_epoch+i*train_epoch_size)
                    pr = 'epoch:%d/%d,train_epoch: %d/%d ,loss: %s'% (epoch,i,train_epoch_size,train_epoch,loss)
                    pbar.set_description(pr)
                    txt = pr + '\n'
                    txt_loss.write(txt)
                    if train_epoch == train_epoch_size:
                        break
                    train_epoch +=1
                    if train_epoch%3000==0:
                        self.TestMode(data, sess, i,txt_obj)
                saver.save(sess, os.path.join(os.getcwd(), 'speech_model_file', 'speech.module'), global_step=i)
            txt_loss.close()
Beispiel #4
0
    def TrainModel(self,
                   epoch=2,
                   save_step=1000,
                   batch_size=32,
                   start_nstep=0):
        '''
		训练模型
		参数:
			datapath: 数据保存的路径
			epoch: 迭代轮数
			save_step: 每多少步保存一次模型
			filename: 默认保存文件名,不含文件后缀名
		'''
        data = DataSpeech(self.datapath_thchs30, self.datapath_stcmds, 'train')

        num_data = data.GetDataNum()  # 获取数据的数量

        yielddatas = data.data_genetator(batch_size, self.AUDIO_LENGTH)

        for epoch in range(epoch):  # 迭代轮数
            self.logger.debug("train epoch %s." % epoch)
            # 			print('[running] train epoch %d .' % epoch)
            n_step = start_nstep  # 迭代数据数
            while True:
                try:
                    self.logger.debug('epoch %d . Have train datas %d+' %
                                      (epoch, n_step * save_step))
                    # 					print('[message] epoch %d . Have train datas %d+'%(epoch, n_step*save_step))
                    # data_genetator是一个生成器函数

                    #self._model.fit_generator(yielddatas, save_step, nb_worker=2)
                    self._model.fit_generator(yielddatas, save_step)
                    n_step += 1
                except StopIteration:
                    self.logger.error(
                        "generator error. please check data format.")
                    # 					print('[error] generator error. please check data format.')
                    break

                self.SaveModel(filename='speech_model_%s_e_%s_step_%s' %
                               (ModelName, epoch, n_step * save_step))
                self.TestModel(str_dataset='train', data_count=4)
                self.TestModel(str_dataset='dev', data_count=4)
Beispiel #5
0
    def TrainModel(self,
                   datapath,
                   epoch=2,
                   save_step=1000,
                   batch_size=32,
                   filename=abspath + 'model_speech/m' + ModelName +
                   '/speech_model' + ModelName):
        '''
        训练模型
        参数:
            datapath: 数据保存的路径
            epoch: 迭代轮数
            save_step: 每多少步保存一次模型
            filename: 默认保存文件名,不含文件后缀名
        '''
        data = DataSpeech(datapath, 'train')

        num_data = data.GetDataNum()  # 获取数据的数量

        yielddatas = data.data_genetator(batch_size, self.AUDIO_LENGTH)

        # 冻结层
        for layer in self._model.layers:
            layerName = str(layer.name)
            print("layerNAME:" + layerName)
            if layerName.startswith("conv2d_3") or layerName.startswith(
                    "conv2d_4"
            ) or layerName.startswith("conv2d_5") or layerName.startswith(
                    "conv2d_6") or layerName.startswith(
                        "conv2d_7") or layerName.startswith(
                            "conv2d_8") or layerName.startswith("conv2d_9"):
                layer.trainable = False
        self._model.compile(optimizer='rmsprop', loss='mse')

        #  可训练层
        for x in self._model.trainable_weights:
            print("可训练层:" + x.name)
            print('\n')

        # 不可训练层
        for x in self._model.non_trainable_weights:
            print("冻结层:" + x.name)
            print('\n')

        for epoch in range(epoch):  # 迭代轮数
            print('[running] train epoch %d .' % epoch)
            n_step = 0  # 迭代数据数
            while True:
                try:
                    print('[message] epoch %d . Have train datas %d+' %
                          (epoch, n_step * save_step))
                    # data_genetator是一个生成器函数

                    # self._model.fit_generator(yielddatas, save_step, nb_worker=2)
                    self._model.fit_generator(yielddatas, save_step)
                    n_step += 1
                except StopIteration:
                    print('[error] generator error. please check data format.')
                    break

                self.SaveModel(comment='_e_' + str(epoch) + '_step_' +
                               str(n_step * save_step))
                self.TestModel(self.datapath,
                               str_dataset='train',
                               data_count=4)
                self.TestModel(self.datapath, str_dataset='dev', data_count=4)