def face_rank(page): cover_id_list, name_list = get_cover_id_list(page) cover_content_list = get_cover_content_list(cover_id_list) for cover_content, name, ids in zip(cover_content_list, name_list, cover_id_list): ai_obj = apiutil.AiPlat(AppID, AppKey) rsp = ai_obj.face_detectface(cover_content, 0) if rsp['ret'] == 0: for face in rsp['data']['face_list']: if face['beauty'] > face_min_rank and face[ 'gender'] < 50 and face['age'] < max_age: print('{0}颜值通过 分值{1} 年龄{2}'.format( name, face['beauty'], face['age'])) saveimage(cover_content, name) elif rsp['ret'] == -2147483636: print('{0}识别繁忙,重试'.format(name)) ai_obj = apiutil.AiPlat('1106858595', 'bNUNgOpY6AeeJjFu') rsp = ai_obj.face_detectface(cover_content, 0) if rsp['ret'] == 0: for face in rsp['data']['face_list']: if face['beauty'] > face_min_rank and face[ 'gender'] < 50 and face['age'] < max_age: print('{0}颜值通过 分值{1} 年龄{2}'.format( name, face['beauty'], face['age'])) saveimage(cover_content, name) elif rsp['ret'] == -2147483636: print('{0}第二次识别失败,放弃识别'.format(name)) saveerrorimage(cover_content, name) else: print('{2}识别出错,错误码{0},错信信息{1}'.format(rsp['ret'], rsp['msg'], name)) else: print('{2}识别出错,错误码{0},错信信息{1}'.format(rsp['ret'], rsp['msg'], name))
def test_aai_ailab(text='今天天气怎么样'): speaker = 1 for_mat = {"PCM": 1, "WAV": 2, "MP3": 3} volume = 0 speed = 100 #text = '今天天气怎么样' aht = 0 apc = 58 ai_obj = apiutil.AiPlat(app_id, app_key) print('----------------------SEND REQ----------------------') rsp = ai_obj.getAaiAiLab(speaker, for_mat['WAV'], volume, speed, text, aht, apc) if rsp['ret'] == 0: print(json.dumps(rsp, ensure_ascii=False, sort_keys=False, indent=4)) speech = rsp['data']['speech'] speech = base64.b64decode(speech) md5sum = rsp['data']['md5sum'] has_md5 = hashlib.md5(speech) speech_md5 = has_md5.hexdigest().upper() if md5sum == speech_md5: with open('./data/ailab.wav', 'wb') as f: f.write(speech) print('----------------------API SUCC----------------------') else: print(json.dumps(rsp, ensure_ascii=False, sort_keys=False, indent=4)) print('----------------------API FAIL----------------------')
def Get_Nlp_TextTrans_api(str_text, type=0): ai_obj = apiutil.AiPlat(app_id, app_key) rsp = ai_obj.getNlpTextTrans(str_text, type) data_string = '' if rsp['ret'] == 0: data_string = rsp['data']['trans_text'] return data_string
def Get_vision_ImgageToText_api(image_name, mediaId='1535646454'): with open(image_name, 'rb') as bin_data: image_data = bin_data.read() ai_obj = apiutil.AiPlat(app_id, app_key) rsp = ai_obj.gevision_imgtotext(image_data, mediaId) text_string = '' if rsp['ret'] == 0: text_string = rsp['data']['text'] return text_string
def getWord(self,st): api_obj=apiutil.AiPlat(self.app_id,self.app_key) type=10000 rsp = api_obj.getNlpTextChat(type,st) if rsp['ret'] == 0: print(rsp['data']['answer']) else: print("failed") print(rsp['ret']) print(json.dumps(rsp, ensure_ascii=False, sort_keys=False, indent=4))
def Get_Nlp_WordCom_api(str_text): ai_obj = apiutil.AiPlat(app_id, app_key) rsp = ai_obj.getNlpWordCom(str_text) data_list = [] if rsp['ret'] == 0: intent_dict = {'intent': rsp['data']['intent']} data_list.append(intent_dict) for obj in rsp['data']['com_tokens']: if obj['com_type'] < 50: data_list.append(obj) return data_list
def anso(questionS): str_question = questionS session = 10000 ai_obj = apiutil.AiPlat(app_id, app_key) rsp = ai_obj.getNlpTextChat(session, str_question) if rsp['ret'] == 0: ask = (rsp['data'])['answer'] return (ask) else: print(json.dumps(rsp, ensure_ascii=False, sort_keys=False, indent=4))
def get_Aai_ToTts_api(str_text, mediaId='451454543', Speech_ID=6, speed=0): ai_obj = apiutil.AiPlat(app_id, app_key) rsp = ai_obj.getAaiToTts(str_text, Speech_ID, speed) # 普通话男声 1 静琪女声 5 欢馨女声 6 欢馨女声 6 if rsp['ret'] == 0: str_data = rsp['data']['voice'] speech_chunk = base64.b64decode(str_data) file_name = './data/%s.mp3' % mediaId file_data = open(file_name, 'wb') file_data.write(speech_chunk) file_data.close() return file_name
def Get_vision_objectr_api(image_name, topk_number=1): with open(image_name, 'rb') as bin_data: image_data = bin_data.read() ai_obj = apiutil.AiPlat(app_id, app_key) rsp = ai_obj.getvision_objectr(image_data, topk_number) # 返回结果个数(已按置信度倒排) data_lists = [] if rsp['ret'] == 0: for obj in rsp['data']['object_list']: data_list = [str(obj['label_id']), str(obj['label_confd'])] data_lists.append(data_list) return data_lists
def Get_Ocr_GeneralOcr_api(image_name): with open(image_name, 'rb') as bin_data: image_data = bin_data.read() ai_obj = apiutil.AiPlat(app_id, app_key) rsp = ai_obj.getOcrGeneralocr(image_data) data_list = [] if rsp['ret'] == 0: for i in rsp['data']['item_list']: data_list.append(i['itemstring']) return data_list
def test_nlp_texttrans(): str_text = '今天天气怎么样' type = 0 ai_obj = apiutil.AiPlat(app_id, app_key) print('----------------------SEND REQ----------------------') rsp = ai_obj.getNlpTextTrans(str_text, type) if rsp['ret'] == 0: print(json.dumps(rsp, ensure_ascii=False, sort_keys=False, indent=4)) print('----------------------API SUCC----------------------') else: print(json.dumps(rsp, ensure_ascii=False, sort_keys=False, indent=4)) print('----------------------API FAIL----------------------')
def getWordTEST(self,st): print(time.asctime()) print("NOW START TO TEST THE AI CHAT") api_obj=apiutil.AiPlat(self.app_id,self.app_key) type=10000 rsp = api_obj.getNlpTextChat(type,st) if rsp['ret'] == 0: print("success") print(rsp['data']['answer']) else: print("failed") print(rsp['ret']) print(json.dumps(rsp, ensure_ascii=False, sort_keys=False, indent=4))
def Get_face_sticker_api(image_name, mediaId, model=random.randint(1, 31)): with open(image_name, 'rb') as bin_data: image_data = bin_data.read() # 原始图片的base64编码数据(原图大小上限500KB) ai_obj = apiutil.AiPlat(app_id, app_key) rsp = ai_obj.getfacesticker(image_data, model) image_file_name = "./image/%s_s.jpg" % mediaId if rsp['ret'] == 0: strs = rsp['data']['image'] imgdata = base64.b64decode(strs) file_image = open(image_file_name, 'wb') file_image.write(imgdata) file_image.close() return image_file_name
def test_aai_wxasrs(): seq = 0 for_mat = 8 rate = 16000 bits = 16 cont_res = 1 once_size = 6400 file_path = './data/wxasrs.mp3' f = open(file_path, 'rb') md5obj = hashlib.md5() md5obj.update(f.read()) hash = md5obj.hexdigest() speech_id = str(hash).upper() f.close() f = open(file_path, 'rb') file_size = os.path.getsize(file_path) try: while True: chunk = f.read(once_size) if not chunk: break else: chunk_size = len(chunk) if (seq + chunk_size) == file_size: end = 1 else: end = 0 ai_obj = apiutil.AiPlat(app_id, app_key) print('----------------------SEND REQ----------------------') rsp = ai_obj.getAaiWxAsrs(chunk, speech_id, end, for_mat, rate, bits, seq, chunk_size, cont_res) seq += chunk_size if rsp['ret'] == 0: print( json.dumps(rsp, ensure_ascii=False, sort_keys=False, indent=4)) print('----------------------API SUCC----------------------') else: print( json.dumps(rsp, ensure_ascii=False, sort_keys=False, indent=4)) print('----------------------API FAIL----------------------') finally: f.close()
def ai_image(image_data): time.sleep(0.5) ai_obj = apiutil.AiPlat(App_ID, App_Key) print('-----------') rsp = ai_obj.getRenlianFenxi(image_data) if rsp['ret'] == 0: for i in rsp['data']['face_list']: print(i['beauty']) print('----') return int(i['beauty']) else: # print('无返回') print(rsp['ret']) return int(rsp['ret'])
def recognize_image(self): #https://ai.qq.com/product/ocr.shtml#identify 优图ocr地址,免费申请 app_id = 'appid' app_key = 'appkey' with open('./captcha.jpg', 'rb') as bin_data: image_data = bin_data.read() ai_obj = apiutil.AiPlat(app_id, app_key) print('----------------------SEND REQ----------------------') rsp = ai_obj.getOcrGeneralocr(image_data) print(rsp) return rsp
def Get_Nlp_ImageTrans_api(image_name, mediaId='451454543', source='auto', target='auto'): with open(image_name, 'rb') as bin_data: image_data = bin_data.read() ai_obj = apiutil.AiPlat(app_id, app_key) rsp = ai_obj.getNlpImageTrans(image_data, mediaId, source, target) data_lists = [] if rsp['ret'] == 0: for obj in rsp['data']['image_records']: data_list = [obj['source_text'], obj['target_text']] data_lists.append(data_list) return data_lists
def ocr(image): with open(image, 'rb') as bin_data: image_data = bin_data.read() ai_obj = apiutil.AiPlat(app_id, app_key) # print ('----------------------SEND REQ----------------------') rsp = ai_obj.getOcrGeneralocr(image_data) if rsp['ret'] == 0: for i in rsp['data']['item_list']: return i['itemstring'] # print('----------------------API SUCC----------------------') else: print(json.dumps(rsp, ensure_ascii=False, sort_keys=False, indent=4)) print('----------------------API FAIL----------------------')
def get_response(questionS): #AI回复信息 app_id = 'XXXX' #控制台应用 APP ID app_key = 'XXXX' #控制台 应用APP Key str_question = questionS #传参 session = 10000 ai_obj = apiutil.AiPlat(app_id, app_key) #调用SDK AiPlat()方法 rsp = ai_obj.getNlpTextChat(session, str_question) #调用SDK方法 if rsp['ret'] == 0: #判断参数问题,并反馈信息 ask = (rsp['data'])['answer'] print(ask) else: print(json.dumps(rsp, ensure_ascii=False, sort_keys=False, indent=4))
def soundTotext(): app_key = '申请的Key' app_id = '申请的Id' seq = 0 for_mat = 2 rate = 16000 bits = 16 cont_res = 0 once_size = 41000 file_path = FILE_NAME #计算音频MD5 with open(file_path, 'rb') as f: md5obj = hashlib.md5() md5obj.update(f.read()) hash = md5obj.hexdigest() speech_id = str(hash).upper() #读取音频内容,每次41000字节 f = open(file_path, 'rb') file_size = os.path.getsize(file_path) try: while True: chunk = f.read(once_size) if not chunk: break else: chunk_size = len(chunk) if (seq + chunk_size) == file_size: end = 1 else: end = 0 #初始化AIPlat接口 ai_obj = apiutil.AiPlat(app_id, app_key) #调用语音识别-流式版(WeChat AI),传入参数 rsp = ai_obj.getAaiWxAsrs(chunk, speech_id, end, for_mat, rate, bits, seq, chunk_size, cont_res) seq += chunk_size if rsp['ret'] == 0: return rsp['data']['speech_text'] else: print("调用腾讯API失败") return None finally: f.close()
def TransWord(self,st): print("翻译开始") api_obj=apiutil.AiPlat(self.app_id,self.app_key) type=0 print("打开连接") rsp = api_obj.getNlpTextChat(type,st) if rsp['ret'] == 0: print("成功") print(json.dumps(rsp, ensure_ascii=False, sort_keys=False, indent=4)) print(rsp['data']['answer']) else: print("失败") print(rsp['ret']) #self.chatLOG(rsp['ret']) print(json.dumps(rsp, ensure_ascii=False, sort_keys=False, indent=4)) print("exit code 0") exit(0)
def Start(self, content, sender): global authorg authorg = sender if content.startswith("echo") == False: return str_text = content.replace("echo ", "") ai_obj = apiutil.AiPlat(app_id, app_key) rsp = ai_obj.getAaiTts(str_text) print rsp['ret'] if rsp['ret'] == 0: f = open('test.wav', 'wb') f.write(base64.b64decode(rsp["data"]["speech"])) f.close() res = commands.getoutput("omxplayer test.wav") else: res = 'API FAIL' sender.send(res)
def start(content): print "ok" print app_key print app_id if content.startswith("echo") == False: return str_text = content.replace("echo ","") ai_obj = apiutil.AiPlat(app_id, app_key) rsp = ai_obj.getAaiTts(str_text) print rsp['ret'] if rsp['ret'] == 0: f = open('test.wav', 'wb') f.write(base64.b64decode(rsp["data"]["speech"])) f.close() res = commands.getoutput("omxplayer test.wav") else: res = 'API FAIL' print res
def test_aai_youtu(): speed = 0 text = '今天天气怎么样' model_type = 0 ai_obj = apiutil.AiPlat(app_id, app_key) print('----------------------SEND REQ----------------------') rsp = ai_obj.getAaiYoutu(speed, text, model_type) if rsp['ret'] == 0: print(json.dumps(rsp, ensure_ascii=False, sort_keys=False, indent=4)) speech = rsp['data']['voice'] speech = base64.b64decode(speech) with open('./data/youtu.mp3', 'wb') as f: f.write(speech) print('----------------------API SUCC----------------------') else: print(json.dumps(rsp, ensure_ascii=False, sort_keys=False, indent=4)) print('----------------------API FAIL----------------------')
def acquiredata(self, word): if self.args == 'Tencent': app_id = '1106881265' # You can replace it with your own app id. app_key = 'cdjZ2xHc3vRoQrUi' # You can replace it with your own app key. type = 0 # 0:Automatic identification( https://ai.qq.com/doc/nlptrans.shtml ) ai_obj = apiutil.AiPlat(app_id, app_key) return ai_obj.getNlpTextTrans(word, type) if self.args == 'Youdao': request = _YOUDAO_API + quote(self.word) elif self.args == 'Jinshan': request = _CIBA_API + quote( self.word) + "&type=json&key=0EAE08A016D6688F64AB3EBB2337BFB0" else: print("Invalid dictionary!") try: response = urllib.request.urlopen(request) except urllib.error.URLError: raise Exception(SNIPPET_ERROR_TIMEOUT) data = response.read().decode('utf-8') return (json.loads(data))
def image2json(image_file, json_dir): ''' 通过调用Tencent Common OCR API将图片转为json文件。 :image_file: 待处理图像 :json_dir: 用于保存json文件的文件夹 :return None: ''' # 获取对应的json文件名,并跳过已转换的图片 json_file = os.path.split(image_file)[1] json_file = json_file.split('.')[0] + '.json' json_file = os.path.join(json_dir, json_file) if os.path.exists(json_file): return ai_obj = apiutil.AiPlat(app_id, app_key) rsp = ai_obj.getOcrGeneralocr(image_file) if rsp['ret'] == 16447: # 通用OCR识别错误 print('No character in image %s' % image_file) return with open(json_file, 'w') as f: json.dump(rsp, f) print('Image %s has been converted to json.' % image_file)
def Get_face_detectface_api(image_name): with open(image_name, 'rb') as bin_data: image_data = bin_data.read() ai_obj = apiutil.AiPlat(app_id, app_key) rsp = ai_obj.getdetectface(image_data) detect_data_dict = [] if rsp['ret'] == 0: for obj in rsp['data']['face_list']: if obj['gender'] > 50: gender = "男" else: gender = "女" if obj['expression'] < 10: smile = "黯然伤神" elif obj['expression'] < 20: smile = "半嗔半喜" elif obj['expression'] < 30: smile = "似笑非笑" elif obj['expression'] < 40: smile = "笑逐颜开" elif obj['expression'] < 60: smile = "喜上眉梢" elif obj['expression'] < 80: smile = "心花怒放" else: smile = "一笑倾城" detect_data_dict.append({ '性别': gender, '年龄': obj['age'], '情感': smile, '魅力': obj['beauty'] }) return detect_data_dict
def get_Aai_WxAsrs_api(file_path, for_mat=8, rate=16000, bits=16, seq=0, cont_res=1): once_size = 6400 f = open(file_path, 'r') md5obj = hashlib.md5() md5obj.update(f.read()) hash = md5obj.hexdigest() speech_id = str(hash).upper() f.close() f = open(file_path, 'rb') file_size = os.path.getsize(file_path) data_list = [] try: while True: chunk = f.read(once_size) if not chunk: break else: chunk_size = len(chunk) if (seq + chunk_size) == file_size: end = 1 else: end = 0 ai_obj = apiutil.AiPlat(app_id, app_key) rsp = ai_obj.getAaiWxAsrs(chunk, speech_id, end, for_mat, rate, bits, seq, chunk_size, cont_res) seq += chunk_size if rsp['ret'] == 0: data_list.append(rsp['data']['speech_text']) return data_list finally: f.close()
f.close() f = open(file_path, 'rb') file_size = os.path.getsize(file_path) try: while True: chunk = f.read(once_size) if not chunk: break else: chunk_size = len(chunk) if (seq + chunk_size) == file_size: end = 1 else: end = 0 ai_obj = apiutil.AiPlat(app_id, app_key) print '----------------------SEND REQ----------------------' rsp = ai_obj.getAaiWxAsrs(chunk, speech_id, end, for_mat, rate, bits, seq, chunk_size, cont_res) seq += chunk_size if rsp['ret'] == 0: print json.dumps(rsp, encoding="UTF-8", ensure_ascii=False, sort_keys=False, indent=4) print '----------------------API SUCC----------------------' else: print json.dumps(rsp, encoding="UTF-8",
#!encoding=utf-8 import apiutil from PIL import Image import json AppID = '1106954466' AppKey = '5u6ehLVCgxhznKhX' ai_obj = apiutil.AiPlat(AppID, AppKey) with open('1.png', 'rb') as bin_data: image_data = bin_data.read() #rsp = ai_obj.getFaceInfo(image_data, 0) rsp = ai_obj.getOcrGeneralocr(image_data) print json.dumps(rsp) exit() if rsp['ret'] == 0: beauty = 0 for face in rsp['data']['face_list']: #print(face) face_area = (face['x'], face['y'], face['x'] + face['width'], face['y'] + face['height']) #print(face_area) img = Image.open("1.png") cropped_img = img.crop(face_area).convert('RGB') cropped_img.save('data/' + face['face_id'] + '.png')