def paser_response_text_l2china(data): soup = bs4.BeautifulSoup(data, "lxml") texts = soup.select("div.generated_text") if texts and len(texts) > 0: result = TranslateResult() span_list = texts[0].select("span") for span in span_list: word = span.text pronounce = span["title"] if "null" == pronounce: pronounce = None result.add(word,pronounce) return result raise TranslationException("暂无翻译结果")
def get_notations_result(userid, content): r = phonetic.get_notations_result(content) if r: if cache_user_msg(userid,"@"+content): result = TranslateResult() result.words = r.in_str result.pronounce_list = r.plist result.has_pronounce = True cache_notations(userid, result) return r.pretty() + u"\n--回复#获得语音--" else: return r.pretty() else: return u"暂无解析1"
def get_notations_result(userid, content): r = phonetic.get_notations_result(content) if r: if cache_user_msg(userid, "@" + content): result = TranslateResult() result.words = r.in_str result.pronounce_list = r.plist result.has_pronounce = True cache_notations(userid, result) return r.pretty() + u"\n--回复#获得语音--" else: return r.pretty() else: return u"暂无解析1"
raise e trans_result = node["trans_result"] result_text = "" for t in trans_result: dst = t["dst"] if type(dst)==unicode: dst = dst.encode("utf-8") # print dst result_text = result_text + "\n" + dst # print result_text result_text = result_text[1:] r = phonetic.get_notations_result(result_text.decode("utf-8")) print r.plist result = TranslateResult() result.words = r.in_str result.pronounce_list = r.plist result.has_pronounce = True return result _traslate_url = "http://www.l2china.com/yueyu/" _text_len_limit = 100 def get_translation_l2china(text): uni = type(text) == unicode if uni: txt_len = len(text) utf8_txt = text.encode('utf-8') else: txt_len = len(text.decode('utf-8'))