예제 #1
0
    def recognize(self, wavs, fs):
        datapath = 'data/'
        modelpath = 'model_speech/'
        ms = ModelSpeech(datapath)
        ms.LoadModel(modelpath + 'speech_model22_e_0_step_6500.model')

        r_speech = ms.RecognizeSpeech(wavs, fs)

        ml = ModelLanguage('model_language')
        ml.LoadModel()
        str_pinyin = r_speech
        r = ml.SpeechToText(str_pinyin)
        return r
        pass
예제 #2
0
    print('*[Message] Unknown System\n')
    datapath = 'dataset'
    modelpath = modelpath + '/'

ms = ModelSpeech(datapath)

#ms.LoadModel(modelpath + 'm22_2\\0\\speech_model22_e_0_step_257000.model')
ms.LoadModel(modelpath + 'm22_2/0/speech_model22_e_0_step_257000.model')

#ms.TestModel(datapath, str_dataset='test', data_count = 64, out_report = True)
r = ms.RecognizeSpeech_FromFile(
    'E:\语音数据集\ST-CMDS-20170001_1-OS\\20170001P00241I0052.wav')
#r = ms.RecognizeSpeech_FromFile('E:\\VS2015解决方案\\wav文件读写样例\\wav文件读写样例\\bin\\Debug\\1.wav')
#r = ms.RecognizeSpeech_FromFile('/home/nl/01.wav')
#r = ms.RecognizeSpeech_FromFile('C:\\Users\\nl\\Desktop\\01.wav')
#r = ms.RecognizeSpeech_FromFile('E:\语音数据集\ST-CMDS-20170001_1-OS\\20170001P00241I0053.wav')
#r = ms.RecognizeSpeech_FromFile('E:\\语音数据集\\ST-CMDS-20170001_1-OS\\20170001P00020I0087.wav')
#r = ms.RecognizeSpeech_FromFile('E:\\语音数据集\\wav\\train\\A11\\A11_167.WAV')
#r = ms.RecognizeSpeech_FromFile('E:\\语音数据集\\wav\\test\\D4\\D4_750.wav')
print('*[提示] 语音识别结果:\n', r)

ml = ModelLanguage('model_language')
ml.LoadModel()

#str_pinyin = ['zhe4','zhen1','shi4','ji2', 'hao3','de5']
#str_pinyin = ['jin1', 'tian1', 'shi4', 'xing1', 'qi1', 'san1']
#str_pinyin = ['ni3', 'hao3','a1']
str_pinyin = r
r = ml.SpeechToText(str_pinyin)
print('语音转文字结果:\n', r)
예제 #3
0
@author: nl8590687
语音识别API的HTTP服务器程序

"""
import http.server
import urllib
import keras
from SpeechModel25 import ModelSpeech
from LanguageModel import ModelLanguage

datapath = 'data/'
modelpath = 'model_speech/'
ms = ModelSpeech(datapath)
ms.LoadModel(modelpath + 'm25/speech_model25_e_0_step_545500.model')

ml = ModelLanguage('model_language')
ml.LoadModel()

class TestHTTPHandle(http.server.BaseHTTPRequestHandler):  
	def setup(self):
		self.request.settimeout(10)
		http.server.BaseHTTPRequestHandler.setup(self)
	
	def _set_response(self):
		self.send_response(200)
		self.send_header('Content-type', 'text/html')
		self.end_headers()
		
	def do_GET(self):  
	
		buf = 'ASRT_SpeechRecognition API'  
예제 #4
0
#!/usr/bin/env python3
# encoding: utf-8

import logging
import os
from flask import request, Blueprint, abort, jsonify
from werkzeug import secure_filename

from LanguageModel import ModelLanguage
from SpeechModel251 import ModelSpeech

data_path = 'data/train_data/'
ms = ModelSpeech(data_path)
ms.LoadModel('data/speech_model/speech_model251_e_0_step_12000.model')

ml = ModelLanguage('data/model_language/')
ml.LoadModel()

detect_speech_api = Blueprint('detect_language_api',
                              __name__,
                              template_folder='templates')

ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])


def allowed_file(filename):
    return '.' in filename and \
           filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS


@detect_speech_api.route('/language/recognize/chinese/offline',