config = tf.ConfigProto(allow_soft_placement=True) config.gpu_options.per_process_gpu_memory_fraction = 0.9 #config.gpu_options.allow_growth=True #不全部占满显存, 按需分配 set_session(tf.Session(config=config)) datapath = '' modelpath = 'model_speech/' ms = ModelSpeech(datapath) ms.LoadModel(modelpath + 'm_dfcnn/speech_model_dfcnn_e_0_step_64000.model') #ms.LoadModel(modelpath + 'm_DFCNN/speech_model_DFCNN_e_0_step_410000.model') #ms.LoadModel(modelpath + 'm26/speech_model26_e_0_step_122500.model') #ms.TestModel(datapath, str_dataset='test', data_count = 64, out_report = True) #r = ms.RecognizeSpeech_FromFile('/home/speech.AI/github/DFCNN/dataset/data_thchs30/test/D11_750.wav') r = ms.RecognizeSpeech_FromFile( '/home/speech.AI/github/DFCNN/dataset/data_thchs30/train/A33_100.wav') print('*[提示] 语音识别结果:\n', r) ml = ModelLanguage('model_language') ml.LoadModel() #str_pinyin = ['zhe4','zhen1','shi4','ji2', 'hao3','de5'] #str_pinyin = ['jin1', 'tian1', 'shi4', 'xing1', 'qi1', 'san1'] #str_pinyin = ['ni3', 'hao3','a1'] str_pinyin = r #str_pinyin = ['su1', 'bei3', 'jun1', 'de5', 'yi4','xie1', 'ai4', 'guo2', 'jiang4', 'shi4', 'ma3', 'zhan4', 'shan1', 'ming2', 'yi1', 'dong4', 'ta1', 'ju4', 'su1', 'bi3', 'ai4', 'dan4', 'tian2','mei2', 'bai3', 'ye3', 'fei1', 'qi3', 'kan4', 'zhan4'] r = ml.SpeechToText(str_pinyin) print('语音转文字结果:\n', r)
datapath = 'dataset' modelpath = modelpath + '/' ms = ModelSpeech(datapath) ms.LoadModel(modelpath + 'm_dfcnn/speech_model_dfcnn_e_0_step_' + EPOCH + '000.model') #ms.LoadModel(modelpath + 'm_DFCNN/speech_model_DFCNN_e_0_step_8000.model') #ms.LoadModel(modelpath + 'm26/speech_model26_e_0_step_122500.model') #ms.TestModel(datapath, str_dataset='test', data_count = 64, out_report = True) #r = ms.RecognizeSpeech_FromFile('myspeech.wav') #r = ms.RecognizeSpeech_FromFile('/home/speech.AI/github/DFCNN/dataset/data_thchs30/test/D11_750.wav') #aishell r = ms.RecognizeSpeech_FromFile( '/home/speech.AI/github/DFCNN/dataset/data_aishell/wav/test/S0764/BAC009S0764W0122.wav' ) #r = ms.RecognizeSpeech_FromFile('/home/speech.AI/github/DFCNN/dataset/data_aishell/wav/test/S0764/BAC009S0764W0124.wav') #r = ms.RecognizeSpeech_FromFile('/home/speech.AI/github/DFCNN/dataset/data_aishell/wav/test/S0764/BAC009S0764W0179.wav') #r = ms.RecognizeSpeech_FromFile('/home/speech.AI/github/DFCNN/dataset/data_aishell/wav/test/S0764/BAC009S0764W0226.wav') #ST_CMDS #r = ms.RecognizeSpeech_FromFile('/home/speech.AI/data/ST-CMDS-20170001_1-OS/20170001P00215A0075.wav') #r = ms.RecognizeSpeech_FromFile('/home/speech.AI/data/ST-CMDS-20170001_1-OS/20170001P00091A0063.wav') #r = ms.RecognizeSpeech_FromFile('/home/speech.AI/data/ST-CMDS-20170001_1-OS/20170001P00272A0101.wav') #r = ms.RecognizeSpeech_FromFile('/home/speech.AI/data/ST-CMDS-20170001_1-OS/20170001P00087A0045.wav') #r = ms.RecognizeSpeech_FromFile('/home/speech.AI/github/DFCNN/dataset/data_thchs30/train/A33_100.wav') print('*[提示] 语音识别结果:\n', r) ml = ModelLanguage('model_language') ml.LoadModel()
if(not os.path.exists(modelpath)): # 判断保存模型的目录是否存在 os.makedirs(modelpath) # 如果不存在,就新建一个,避免之后保存模型的时候炸掉 system_type = plat.system() # 由于不同的系统的文件路径表示不一样,需要进行判断 if(system_type == 'Windows'): datapath = 'E:\\语音数据集' modelpath = modelpath + '\\' elif(system_type == 'Linux'): datapath = 'dataset' modelpath = modelpath + '/' else: print('*[Message] Unknown System\n') datapath = 'dataset' modelpath = modelpath + '/' ms = ModelSpeech(datapath) ms.LoadModel(modelpath + 'm_dfcnn/speech_model_dfcnn_e_0_step_'+iters_num+'000.model') #ms.LoadModel(modelpath + 'm_DFCNN/speech_model_DFCNN_e_0_step_84000.model') ms.TestModel(datapath, str_dataset='test', data_count = 64, out_report = True) r = ms.RecognizeSpeech_FromFile('dataset/data_aishell/wav/dev/S0733/BAC009S0733W0234.wav') #r = ms.RecognizeSpeech_FromFile('E:\\语音数据集\\ST-CMDS-20170001_1-OS\\20170001P00020I0087.wav') #r = ms.RecognizeSpeech_FromFile('E:\\语音数据集\\wav\\train\\A11\\A11_167.WAV') #r = ms.RecognizeSpeech_FromFile('E:\\语音数据集\\wav\\test\\D4\\D4_750.wav') print('*[提示] 语音识别结果:\n',r)