def reset(self): self.velocity = Twist(0, 0) self.pose = RigidTransform(Translation(0, 0), Rotation.from_degrees(0)) self.previous_pose = RigidTransform(Translation(0, 0), Rotation.from_degrees(0)) self.prev_left_pos = 0 self.prev_right_pos = 0
def __init__(self, start_x, start_y, end_x, end_y, center_x = None, center_y = None): self.start = Translation(start_x, start_y) self.end = Translation(end_x, end_y) if center_x == None and center_y == None: self.slope = Translation.from_translations(self.start, self.end) self.segment_type = Type.LINE else: self.segment_type = Type.ARC self.center = Translation(center_x, center_y) center_to_start = Translation.from_translations(self.center, self.start) center_to_end = Translation.from_translations(self.center, self.end) if (center_to_start.norm()-center_to_end.norm() < 1E-9): self.radius = center_to_start.norm() else: self.radius = -3256
def my_record(self, TIME=10, INTERVAL=10): '''录制并保存音频文件,TIME录制时间,INTERVAL测试间隔''' pa = PyAudio() tr = Translation() stream = pa.open(format=paInt16, channels=self.__channels, rate=self.__framerate, input=True, frames_per_buffer=self.__NUM_SAMPLES) my_buf = [] # 主音频源 buf = [] # 测试音频源 i = 0 while i < TIME * 2: #控制录音时间 string_audio_data = stream.read(self.__NUM_SAMPLES) # 获取音频片段 my_buf.append(string_audio_data) if i < INTERVAL * 2: buf.append(string_audio_data) i += 1 elif i == INTERVAL * 2: self.__save_wave_file('00.wav', buf) # 保存测试音频 err, a = tr.get_word('00.wav') # 得到测试音频是否有效 if err == 0: break i = 0 # 初始化 buf = [] # 同上 print('.', end=' ') self.__save_wave_file('01.wav', my_buf) stream.close()
def import_vignettes(): print_percent('EXAPUNKS_vignettes.json') trans = Translation('EXAPUNKS_vignettes.json').get_translation() trans = insert_dot(trans) for root, dirs, files in os.walk('../export_txt/Content/vignettes'): for f in files: name = os.path.join(root, f) out = StringIO() csv_reader = csv.reader(open(name, 'r', encoding='utf_8_sig')) csv_writer = csv.writer(out, lineterminator='\n', escapechar='\\') need_save = False for row in csv_reader: if len(row) > 1: en = row[1] if en in trans and len(trans[en]) > 0: need_save = True row.extend([ trans[en][lang] for lang in ('French', 'Chinese', 'Japanese') ]) csv_writer.writerow(row) if need_save: name = name.replace('../export_txt', '../patch') try: os.makedirs(os.path.split(name)[0]) except BaseException: pass open(name, 'w', encoding='utf_8_sig').write(out.getvalue())
def get_args(self): if self.op == None: return pts = self._requirements.get(self.op) # Get requirements if self.op == 'rotate': p = PopupWindow(self.pz.frame, "Insira o angulo (em graus)") self.pz.frame.wait_window(p.top) p_list = list(sum(self.pz.buffer[:pts], ())) T = Rotation(float(p.getval()), p_list[0], p_list[1]) if self.op == 'scale': px = PopupWindow(self.pz.frame, "Insira um fator para x") self.pz.frame.wait_window(px.top) py = PopupWindow(self.pz.frame, "Insira um fator para y") self.pz.frame.wait_window(py.top) p_list = list(sum(self.pz.buffer[:pts], ())) T = Scale(float(px.getval()), float(py.getval()), p_list[0], p_list[1]) if self.op == 'translate': px = PopupWindow(self.pz.frame, "Insira um offset para x") self.pz.frame.wait_window(px.top) py = PopupWindow(self.pz.frame, "Insira um offset para y") self.pz.frame.wait_window(py.top) T = Translation(float(px.getval()), float(py.getval())) if self.op == 'zoom': self.zoom(self.pz.buffer[:pts]) T = None self.pz.buffer = self.pz.buffer[pts:] # Remove points of buffer return T
def test_translation(self): d = StenoDictionary() d[('S', 'T')] = 'translation' t = Translation([Stroke('S'), Stroke('T')], d) self.assertEqual(t.strokes, [Stroke('S'), Stroke('T')]) self.assertEqual(t.rtfcre, ('S', 'T')) self.assertEqual(t.english, 'translation')
def export_vignettes(): trans = try_to_get_translation('../import_txt/EXAPUNKS_vignettes.json') original = set() data = [] for root, dirs, files in os.walk('Content/vignettes'): for f in files: csv_reader = csv.reader( open(os.path.join(root, f), 'r', encoding='utf_8_sig')) for row in csv_reader: if len(row) > 1: en = row[1] if en not in original: original.add(en) if en in trans: data.append(trans[en]) else: data.append({ 'FileName': f, 'Role': row[0], 'English': en }) translation = Translation() translation.set_data( data, ('FileName', 'Role', 'English', 'French', 'Chinese', 'Japanese')) translation.save('EXAPUNKS_vignettes.json')
def createObjectsFromFile(dataSet): sentenceFile = open( const.CLASSIFICATION_DATASET + "/" + dataSet + "/" + const.CLASSIFICATION_SENTENCES, "r") featureFile = open( const.CLASSIFICATION_DATASET + "/" + dataSet + "/" + const.CLASSIFICATION_FEATURES, "r") translations = [] sentenceData = sentenceFile.readlines() featureData = featureFile.readlines() currIndex = 0 while currIndex < len(sentenceData): currTranslation = Translation() currTranslation.source, currTranslation.reference, currTranslation.hypothesis, currTranslation.trnID = [ sentenceData[currIndex], sentenceData[currIndex + 1], sentenceData[currIndex + 2], float(sentenceData[currIndex + 3]) ] currTranslation.loadProperties([ float(i.strip('\n')) for i in featureData[int(currIndex / 4)].split(" ") ]) currIndex += 4 translations.append(copy.deepcopy(currTranslation)) return translations
def parseGenerationResult(): bleu_res = open(const.FAIRSEQ_GENERATE_FILE, "r") NMT_original = open(const.NMT_ORIGINAL, "w") NMT_ground_truth = open(const.NMT_GROUND_TRUTH, "w") NMT_output = open(const.NMT_OUTPUT, "w") Sentence_stats = open(const.SENTENCE_STATS, "w") translations = [] translation_id = None currTranslation = Translation() for line in bleu_res: if line.startswith("H-"): hypothesis = line.split("\t")[2] currTranslation.hypothesis = hypothesis NMT_output.write(hypothesis) elif line.startswith("T-"): reference = line.split("\t")[1] currTranslation.reference = reference NMT_ground_truth.write(reference) elif line.startswith("S-"): translation_id = float(line.split("\t")[0].split("-")[1]) source = line.split("\t")[1] currTranslation.source = source currTranslation.trnID = int(translation_id) NMT_original.write(source) elif line.startswith("P-"): scores = [float(i) for i in line.split("\t")[1].split(" ")] currTranslation.avgLP, currTranslation.minLP, currTranslation.medianLP, currTranslation.maxLP, currTranslation.sumLP = [ mean(scores), min(scores), median(scores), max(scores), sum(scores) ] Sentence_stats.write( str(mean(scores)) + " " + str(min(scores)) + " " + str(median(scores)) + " " + str(max(scores)) + " " + str(sum(scores)) + " " + str(translation_id) + "\n") translations.append(copy.deepcopy(currTranslation)) currTranslation = Translation() NMT_ground_truth.close() NMT_output.close() Sentence_stats.close() NMT_original.close() return translations
def __init__(self, config_file, translation_folder): self.config = config.Config(config_file) self.translations = Translation(translation_folder) self.working = False self.online = False self.stop_by_user = False # set to true once PCRC is stopped by user; reset to false when PCRC starts self.file_thread = None self.chat_thread = None self.file_buffer = bytearray() self.file_name = None self.file_urls = [] self.mc_version = None self.mc_protocol = None self.logger = Logger(name='Recorder', display_debug=self.config.get('debug_mode')) self.print_config() if not self.config.get('online_mode'): self.logger.log("Login in offline mode") self.connection = Connection( self.config.get('address'), self.config.get('port'), username=self.config.get('username'), recorder=self, allowed_versions=utils.ALLOWED_VERSIONS, handle_exception=self.onConnectionException) else: self.logger.log("Login in online mode") auth_token = authentication.AuthenticationToken() auth_token.authenticate(self.config.get('username'), self.config.get('password')) self.logger.log("Logged in as %s" % auth_token.profile.name) self.config.set_value('username', auth_token.profile.name) self.connection = Connection( self.config.get('address'), self.config.get('port'), auth_token=auth_token, recorder=self, allowed_versions=utils.ALLOWED_VERSIONS, handle_exception=self.onConnectionException) self.connection.register_packet_listener(self.onPacketReceived, PycraftPacket) self.connection.register_packet_listener(self.onPacketSent, PycraftPacket, outgoing=True) self.connection.register_packet_listener( self.onGameJoin, clientbound.play.JoinGamePacket) self.connection.register_packet_listener( self.onDisconnect, clientbound.play.DisconnectPacket) self.connection.register_packet_listener( self.onChatMessage, clientbound.play.ChatMessagePacket) self.connection.register_packet_listener( self.onPlayerPositionAndLook, clientbound.play.PlayerPositionAndLookPacket) self.protocolMap = {} self.logger.log('init finish')
def test_rigidtransform(self): #test constructor pose1 = RigidTransform() self.assertEqual(pose1.get_translation().get_x(), 0.0) self.assertEqual(pose1.get_translation().get_y(), 0.0) self.assertEqual(pose1.get_rotation().get_theta(), 0.0) pose2 = RigidTransform(Translation(10.0, 15.0), Rotation.from_degrees(45.0)) self.assertEqual(pose2.get_translation().get_x(), 10.0) self.assertEqual(pose2.get_translation().get_y(), 15.0) self.assertEqual(pose2.get_rotation().get_theta(), 45.0) #test transform pose3 = pose1.transform(pose2) self.assertAlmostEqual(pose3.get_translation().get_x(), 10.0) self.assertAlmostEqual(pose3.get_translation().get_y(), 15.0) self.assertAlmostEqual(pose3.get_rotation().get_theta(), 45.0) pose4 = pose2.transform(pose1) self.assertAlmostEqual(pose4.get_translation().get_x(), 10.0) self.assertAlmostEqual(pose4.get_translation().get_y(), 15.0) self.assertAlmostEqual(pose4.get_rotation().get_theta(), 45.0) pose5 = RigidTransform(Translation(10.0, 10.0), Rotation.from_degrees(45.0)) pose6 = pose5.transform(pose2) #used this link to verify: http://www.wolframalpha.com/widgets/view.jsp?id=bd71841fce4a834c804930bd48e7b6cf self.assertAlmostEqual(pose6.get_translation().get_x(), 10-(25/math.sqrt(2))+10*math.sqrt(2)) self.assertAlmostEqual(pose6.get_translation().get_y(), 25/math.sqrt(2)+10*math.sqrt(2)-10*(-1+math.sqrt(2))) self.assertAlmostEqual(pose6.get_rotation().get_theta(), 90.0) pose7 = pose2.transform(pose5) self.assertAlmostEqual(pose7.get_translation().get_x(), -25.0/math.sqrt(2) + 10*math.sqrt(2) + 5.0/2.0*(4+math.sqrt(2))) self.assertAlmostEqual(pose7.get_translation().get_y(), 25.0/math.sqrt(2) + 10*math.sqrt(2) - 5.0/2.0*(-6+5*math.sqrt(2))) self.assertAlmostEqual(pose7.get_rotation().get_theta(), 90.0) #test inverse pose6inverse = pose6.inverse() pose8 = pose6.transform(pose6inverse) self.assertAlmostEqual(pose8.get_translation().get_x(), 0) self.assertAlmostEqual(pose8.get_translation().get_y(), 0) self.assertAlmostEqual(pose8.get_rotation().get_theta(), 0) #test intersection intersection_point = pose1.intersection(pose2) self.assertAlmostEqual(intersection_point.get_x(), -5.0) self.assertAlmostEqual(intersection_point.get_y(), 0.0)
def find_supported_translations(self): """ Retrieves a list of supported translations from BibleGateway's translation page. """ url = "https://www.biblegateway.com/versions/" translations = list() page = urlopen(url) soup = BeautifulSoup(page.read()) trans = soup.findAll("tr", {"class": "language-row"}) for t in trans: if not t.find("a").has_attr("title"): t_text = t.find("td", {"class": "translation-name"}).get_text() t_name = t_text[:t_text.rfind("(") - 1] t_abbreviation = t_text[t_text.rfind("(") + 1:t_text.rfind(")")] t_language = t["data-language"] if t.find("span", {"class": "testament"}): section = t.find("span", {"class": "testament"}).get_text() if section == "OT": t_has_ot = True t_has_nt = False t_has_deut = False elif section == "NT": t_has_ot = False t_has_nt = True t_has_deut = False elif section == "with Apocrypha": t_has_ot = True t_has_nt = True t_has_deut = True else: t_has_ot = True t_has_nt = True t_has_deut = False new_trans = Translation(t_name, t_abbreviation, t_language, t_has_ot, t_has_nt, t_has_deut) translations.append(new_trans) # Add local translations to supported translations list translations.append( Translation("JPS Tanakh", "JPS", "en", True, False, False)) return translations
def collapse_and_rename(self): """Collapses names to ints and indexes 0 .. self.n-1""" translation = Translation(*self.edgelist.keys()) new_edgelist = {} for u, neighbors in self.edgelist.items(): new_edgelist[translation[u]] = [translation[v] for v in neighbors] self.edgelist = new_edgelist self.translation = translation self.translation.lock()
def __init__(self, translation_=None, rotation_=None): if translation_ == None: self.translation = Translation() else: self.translation = translation_ if rotation_ == None: self.rotation = Rotation() else: self.rotation = rotation_
def intersection(self, other_transform): other_rot = other_transform.get_rotation() if (self.rotation.is_parallel(other_rot)): #should never reach here return Translation(float("inf"), float("inf")) if (math.fabs(self.rotation.get_cos()) < math.fabs( other_rot.get_cos())): return self.intersection_(self, other_transform) else: return self.intersection_(other_transform, self)
def import_strings(): print_percent('EXAPUNKS_exe.json') for v in Translation('EXAPUNKS_exe.json').check_variables( regex=r'\{\d*\}', org_index='English', trans_index='Chinese', ordered=False): print('Warning: ', v) trans = Translation('EXAPUNKS_exe.json').get_translation() trans = insert_dot(trans) csv_writer = csv.writer(open('strings.csv', 'w', encoding='utf-8'), lineterminator='\n', escapechar='\\') for key, value in trans.items(): row = [key, ''] row.extend([ value[lang] for lang in ('German', 'French', 'Russian', 'Chinese', 'Japanese') ]) csv_writer.writerow(row)
def change_lang(new_lang): """ Change langiage of the user and apply the required changes :param new_lang: New language to be stored """ try: globalvars.lang = Translation(new_lang, CONFIG['LANGUAGE_FILE']) except Exception as exc: logger.error("Error in Language file!") return None globalvars.HOME_KEYBOARD = [ [ globalvars.lang.text('MENU_HOME_EXISTING_KEY'), globalvars.lang.text('MENU_HOME_NEW_KEY') ], [ globalvars.lang.text('MENU_HOME_FAQ'), globalvars.lang.text('MENU_HOME_INSTRUCTION') ], [ globalvars.lang.text('MENU_HOME_CHANGE_LANGUAGE'), globalvars.lang.text('MENU_HOME_PRIVACY_POLICY') ], [ globalvars.lang.text('MENU_HOME_SUPPORT'), globalvars.lang.text('MENU_HOME_DELETE_ACCOUNT') ], [ globalvars.lang.text('MENU_CHECK_STATUS'), ] ] globalvars.BACK_TO_HOME_KEYBOARD = [[ globalvars.lang.text('MENU_BACK_HOME') ]] if new_lang in ['fa', 'ar']: globalvars.OPT_IN_KEYBOARD = [[ globalvars.lang.text('MENU_PRIVACY_POLICY_DECLINE'), globalvars.lang.text('MENU_PRIVACY_POLICY_CONFIRM') ]] else: globalvars.OPT_IN_KEYBOARD = [[ globalvars.lang.text('MENU_PRIVACY_POLICY_CONFIRM'), globalvars.lang.text('MENU_PRIVACY_POLICY_DECLINE') ]] globalvars.OPT_IN_DECLINED_KEYBOARD = [[ globalvars.lang.text('MENU_BACK_PRIVACY_POLICY'), globalvars.lang.text('MENU_HOME_CHANGE_LANGUAGE') ]]
def convert(input_name, output_name, auto=False): if os.path.exists(output_name): in_time = os.path.getmtime(input_name) out_time = os.path.getmtime(output_name) if in_time < out_time: if auto: print('file "%s" is newer, skipped.' % output_name) return elif input('file "%s" is newer, overwrite? [Y/N]' % output_name).lower() != 'y': return print('%s ==> %s' % (input_name, output_name)) Translation(input_name).save(output_name)
def test_listeners(self): output1 = [] def listener1(undo, do, prev): output1.append((undo, do, prev)) output2 = [] def listener2(undo, do, prev): output2.append((undo, do, prev)) t = Translator() s = Stroke('S') tr = Translation([s], StenoDictionary()) expected_output = [([], [tr], tr)] t.translate(s) t.add_listener(listener1) t.translate(s) self.assertEqual(output1, expected_output) del output1[:] t.add_listener(listener2) t.translate(s) self.assertEqual(output1, expected_output) self.assertEqual(output2, expected_output) del output1[:] del output2[:] t.add_listener(listener2) t.translate(s) self.assertEqual(output1, expected_output) self.assertEqual(output2, expected_output) del output1[:] del output2[:] t.remove_listener(listener1) t.translate(s) self.assertEqual(output1, []) self.assertEqual(output2, expected_output) del output1[:] del output2[:] t.remove_listener(listener2) t.translate(s) self.assertEqual(output1, []) self.assertEqual(output2, [])
def exp(twist): cos_theta = math.cos(twist.dtheta) sin_theta = math.sin(twist.dtheta) rotation = Rotation(cos_theta, sin_theta) #if theta is very small, use taylor series to approximate (we can't divide by zero) if (math.fabs(twist.dtheta) < zero): sin_theta_over_theta = 1.0 - math.pow( twist.dtheta, 2) / 6.0 + math.pow(twist.dtheta, 4) / 120.0 one_minus_cos_theta_over_theta = 1.0 / 2.0 * twist.dtheta - math.pow( twist.dtheta, 3) / 24.0 + math.pow(twist.dtheta, 5) / 720.0 else: sin_theta_over_theta = sin_theta / twist.dtheta one_minus_cos_theta_over_theta = (1.0 - cos_theta) / twist.dtheta translation = Translation(sin_theta_over_theta * twist.dx, one_minus_cos_theta_over_theta * twist.dx) return RigidTransform(translation, rotation)
def commit_verb_info(db_session, language, verb_info): verb_data_json = json.dumps(verb_info['modes'], sort_keys=True, indent=4, separators=(',', ': '), ensure_ascii=False, encoding='utf-8').encode('utf-8') verb = Verb(lang=language, verb=verb_info['name'], conjugations=verb_data_json) try: db_session.add(verb) db_session.commit() except IntegrityError: db_session.rollback() logging.error('Verb already in DB %s' % verb_info['name']) translations = [] for meaning in verb_info['meanings']: translation = Translation(lang=language, english=meaning['eng'], description=meaning['description'], verb=verb.verb) duped_translation = next( (t for t in translations if t.english == translation.english), None) if duped_translation: duped_translation.description += ', ' + translation.description else: translations.append(translation) try: for translation in translations: db_session.add(translation) db_session.commit() except IntegrityError: db_session.rollback() logging.error('Translations already exist for %s' % verb_info['name']) return True
def export_descriptions(): trans = try_to_get_translation('../import_txt/EXAPUNKS_descriptions.json') original = set() data = [] for root, dirs, files in os.walk('Content/descriptions'): for f in files: for line in open(os.path.join(root, f), 'r', encoding='utf_8_sig'): line = line.strip() if len(line) > 0 and line not in original: original.add(line) if line in trans: data.append(trans[line]) else: data.append({'FileName': f, 'English': line}) translation = Translation() translation.set_data(data, ('FileName', 'English', 'German', 'French', 'Russian', 'Chinese', 'Japanese')) translation.save('EXAPUNKS_descriptions.json')
class CrawlhtmlPipeline(object): t = Translation() def __init__(self): connection = pymongo.Connection(settings['MONGODB_SERVER'], settings['MONGODB_PORT']) db = connection[settings['MONGODB_DB']] self.collection = db[settings['MONGODB_COLLECTION']] def process_item(self, item, spider): valid = True if not item or item['content'] == '': valid = False raise DropItem("Missing content of page from %s" % (getattr(item, 'url', ''))) else: new_text = CrawlhtmlPipeline.t.translateTxt(item['content']) item['content'] = new_text if valid: self.collection.insert(dict(item)) return item
def testSaveTranslation(self): db=Db() db.dbFile="testmetabrainwallet.db" db.translation.dropTable() db.translation.createTable() hello = Phrase({'language':'en','content':'hello'}) hola = Phrase({'language':'es','content':'hola'}) db.phrase.save(hello) db.phrase.save(hola) translation = Translation({'originalId':hello.id, 'translatedId':hola.id}) db.translation.save(translation) phrases = db.translation.find(hello,'es') self.assertTrue(len(phrases)==1) self.assertTrue(phrases[0].id == hola.id) db.close()
def import_descriptions(): LANGS = { 'German': 'de', 'French': 'fr', 'Russian': 'ru', 'Chinese': 'zh', 'Japanese': 'ja' } print_percent('EXAPUNKS_descriptions.json') trans = Translation('EXAPUNKS_descriptions.json').get_translation() trans = insert_dot(trans) for root, dirs, files in os.walk('../export_txt/Content/descriptions/en'): for f in files: name = os.path.join(root, f) lines = open(name, 'r', encoding='utf_8_sig').readlines() for country, abbr in LANGS.items(): out = StringIO() need_save = True for line in lines: line = line.strip() if len(line) > 0 \ and line in trans \ and len(trans[line][country]) > 0: line = trans[line][country] # need_save = True out.write(line + '\n') if need_save: new_name = name.replace('../export_txt', '../patch').replace( '/en', '/' + abbr, 1) try: os.makedirs(os.path.split(new_name)[0]) except BaseException: pass open(new_name, 'w', encoding='utf').write(out.getvalue())
def find(input='input.fasta', frame='all'): try: f = open('./test_data/' + input, 'r') lines = f.readlines() name_start = lines[0].split()[0].split('.')[1][:5].upper() seq = ''.join(lines[1:]).replace('\n', '') all_length = len(seq) rc_seqs = ReverseComplement(seq) frames_6 = {} if (frame_number): frames_6 = rc_seqs.fillOneFrame(frame_number) else: frames_6 = rc_seqs.fillAll6() for key in rc_seqs.frame_6_set: frames_6[key] = Translation(frames_6[key]).proteinTranslation() ORFs = FindORF(frames_6[key], min_size) results = ORFs.findLongestORF(key) if len(results): index = 1 for start, orf in results.items(): if '-' in key: start = all_length - start + 1 output.write( '>%s_F%s_%04d\t%s\t%d\t%d\n' % (name_start, key[1], index, key, len(orf), start)) output.write('\n'.join( [orf[i:i + 30] for i in range(0, len(orf), 30)]) + '\n') index += 1 print( 'ORFs have been found for: %s, please check in output file: %s (under ./test_data folder)' % (input, write_file)) except: print( 'Cannot find file %s under ./test_data folder. Please input exist filename.' % input)
def test_changing_state(self): output = [] def listener(undo, do, prev): output.append((undo, do, prev)) d = StenoDictionary() d[('S', 'P')] = 'hi' dc = StenoDictionaryCollection() dc.set_dicts([d]) t = Translator() t.set_dictionary(dc) t.translate(stroke('T')) t.translate(stroke('S')) s = copy.deepcopy(t.get_state()) t.add_listener(listener) expected = [([Translation([stroke('S')], None)], [Translation([stroke('S'), stroke('P')], 'hi')], Translation([stroke('T')], None))] t.translate(stroke('P')) self.assertEqual(output, expected) del output[:] t.set_state(s) t.translate(stroke('P')) self.assertEqual(output, expected) del output[:] t.clear_state() t.translate(stroke('P')) self.assertEqual(output, [([], [Translation([stroke('P')], None)], None)]) del output[:] t.set_state(s) t.translate(stroke('P')) self.assertEqual(output, [([], [Translation([stroke('P')], None)], Translation([stroke('S'), stroke('P')], 'hi'))])
# -*- coding: utf-8 -*- """ Created on Sat Jan 20 14:05:16 2018 测试代码 @author: mxwbq """ from wav import WWAV from translation import Translation w = WWAV() x = Translation() count = 0 while count <= 10: # 录制测试次数 w.my_record() # 开始录制,无参数传入,则为默认TIME为60,INTERVAL为5 count += 1 condition, word = x.get_word("01.wav") # 得到转换后的信息 print("\n") if condition == 1: print("转换内容为:" + word) # 有效转换则返回转换后的文本 else: print("等待语音录入")
def t(self, strokes): """A quick way to make a translation.""" return Translation([Stroke(x) for x in strokes.split('/')], self.d)
def setUp(self): d = StenoDictionary() self.a = Translation([Stroke('S')], d) self.b = Translation([Stroke('T'), Stroke('-D')], d) self.c = Translation([Stroke('-Z'), Stroke('P'), Stroke('T*')], d)