def _create_menu_items(self, files, group): plural = len(files) > 1 config_items = self.config["items"] active_items = [] if config_items["path"]: item_path = Nautilus.MenuItem( name="NautilusCopyPath::CopyPath" + group, label=Translation.t("copy_paths" if plural else "copy_path"), ) item_path.connect("activate", self._copy_paths, files) active_items.append(item_path) if config_items["uri"]: item_uri = Nautilus.MenuItem( name="NautilusCopyPath::CopyUri" + group, label=Translation.t("copy_uris" if plural else "copy_uri"), ) item_uri.connect("activate", self._copy_uris, files) active_items.append(item_uri) if config_items["name"]: item_name = Nautilus.MenuItem( name="NautilusCopyPath::CopyName" + group, label=Translation.t("copy_names" if plural else "copy_name"), ) item_name.connect("activate", self._copy_names, files) active_items.append(item_name) return active_items
def createObjectsFromFile(dataSet): sentenceFile = open( const.CLASSIFICATION_DATASET + "/" + dataSet + "/" + const.CLASSIFICATION_SENTENCES, "r") featureFile = open( const.CLASSIFICATION_DATASET + "/" + dataSet + "/" + const.CLASSIFICATION_FEATURES, "r") translations = [] sentenceData = sentenceFile.readlines() featureData = featureFile.readlines() currIndex = 0 while currIndex < len(sentenceData): currTranslation = Translation() currTranslation.source, currTranslation.reference, currTranslation.hypothesis, currTranslation.trnID = [ sentenceData[currIndex], sentenceData[currIndex + 1], sentenceData[currIndex + 2], float(sentenceData[currIndex + 3]) ] currTranslation.loadProperties([ float(i.strip('\n')) for i in featureData[int(currIndex / 4)].split(" ") ]) currIndex += 4 translations.append(copy.deepcopy(currTranslation)) return translations
def reset(self): self.velocity = Twist(0, 0) self.pose = RigidTransform(Translation(0, 0), Rotation.from_degrees(0)) self.previous_pose = RigidTransform(Translation(0, 0), Rotation.from_degrees(0)) self.prev_left_pos = 0 self.prev_right_pos = 0
def __init__(self): self.clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD) self.clipboard_primary = Gtk.Clipboard.get(Gdk.SELECTION_PRIMARY) self.config = { "items": { "path": True, "uri": True, "name": True }, "selections": { "clipboard": True, "primary": True }, "language": "auto", "separator": ", " } with open(os.path.join(os.path.dirname(__file__), "config.json")) as json_file: try: self.config.update(json.load(json_file)) if self.config["language"]: Translation.select_language(self.config["language"]) except: pass
def my_record(self, TIME=10, INTERVAL=10): '''录制并保存音频文件,TIME录制时间,INTERVAL测试间隔''' pa = PyAudio() tr = Translation() stream = pa.open(format=paInt16, channels=self.__channels, rate=self.__framerate, input=True, frames_per_buffer=self.__NUM_SAMPLES) my_buf = [] # 主音频源 buf = [] # 测试音频源 i = 0 while i < TIME * 2: #控制录音时间 string_audio_data = stream.read(self.__NUM_SAMPLES) # 获取音频片段 my_buf.append(string_audio_data) if i < INTERVAL * 2: buf.append(string_audio_data) i += 1 elif i == INTERVAL * 2: self.__save_wave_file('00.wav', buf) # 保存测试音频 err, a = tr.get_word('00.wav') # 得到测试音频是否有效 if err == 0: break i = 0 # 初始化 buf = [] # 同上 print('.', end=' ') self.__save_wave_file('01.wav', my_buf) stream.close()
def __init__(self, config_file, translation_folder): self.config = config.Config(config_file) self.translations = Translation(translation_folder) self.working = False self.online = False self.stop_by_user = False # set to true once PCRC is stopped by user; reset to false when PCRC starts self.file_thread = None self.chat_thread = None self.file_buffer = bytearray() self.file_name = None self.file_urls = [] self.mc_version = None self.mc_protocol = None self.logger = Logger(name='Recorder', display_debug=self.config.get('debug_mode')) self.print_config() if not self.config.get('online_mode'): self.logger.log("Login in offline mode") self.connection = Connection( self.config.get('address'), self.config.get('port'), username=self.config.get('username'), recorder=self, allowed_versions=utils.ALLOWED_VERSIONS, handle_exception=self.onConnectionException) else: self.logger.log("Login in online mode") auth_token = authentication.AuthenticationToken() auth_token.authenticate(self.config.get('username'), self.config.get('password')) self.logger.log("Logged in as %s" % auth_token.profile.name) self.config.set_value('username', auth_token.profile.name) self.connection = Connection( self.config.get('address'), self.config.get('port'), auth_token=auth_token, recorder=self, allowed_versions=utils.ALLOWED_VERSIONS, handle_exception=self.onConnectionException) self.connection.register_packet_listener(self.onPacketReceived, PycraftPacket) self.connection.register_packet_listener(self.onPacketSent, PycraftPacket, outgoing=True) self.connection.register_packet_listener( self.onGameJoin, clientbound.play.JoinGamePacket) self.connection.register_packet_listener( self.onDisconnect, clientbound.play.DisconnectPacket) self.connection.register_packet_listener( self.onChatMessage, clientbound.play.ChatMessagePacket) self.connection.register_packet_listener( self.onPlayerPositionAndLook, clientbound.play.PlayerPositionAndLookPacket) self.protocolMap = {} self.logger.log('init finish')
def __init__(self, translation_=None, rotation_=None): if translation_ == None: self.translation = Translation() else: self.translation = translation_ if rotation_ == None: self.rotation = Rotation() else: self.rotation = rotation_
def get_translation(word, anki_media_dir): GOOGLE_TRASLATE_URL = 'http://translate.google.com/translate_a/t' GOOGLE_TRASLATE_PARAMETERS = { # 't' client will receiver non-standard json format # change client to something other than 't' to get standard json response 'client': 'z', 'sl': 'en', 'tl': 'pl', 'ie': 'UTF-8', 'oe': 'UTF-8', 'text': word } typ_mapping = { u'noun': Translation.Type.NOUN, u'verb': Translation.Type.VERB, u'adjective': Translation.Type.ADJECTIVE, u'adverb': Translation.Type.ADVERB, u'preposition': Translation.Type.PREPOSITION, u'pronoun': Translation.Type.PRONOUN, u'conjunction': Translation.Type.CONJUNCTION, } url = '?'.join((GOOGLE_TRASLATE_URL, urlencode(GOOGLE_TRASLATE_PARAMETERS))) req = urllib2.Request(url, headers={'User-Agent': 'Mozilla/4.0'}) t = urlopen2(req).read() w = json.loads(t) translation = Translation(word) if 'dict' not in w: return translation w = w['dict'] idx0 = 1 for typ_dict in w: entries = typ_dict.get('entry', None) typ = typ_mapping.get(typ_dict['pos'], None) if typ_dict.get('base_form', "") != word: continue if typ is None or entries is None: continue translation.add_typ(idx0, typ) idx1 = 1 for entry in entries: w = entry['word'] if entry.get('score', 0) >= WORD_GOOGLE_TRANSLATE_MINIMAL_SCORE: translation.add_meaning(idx0, idx1, w) idx1 += 1 idx0 += 1 tr2 = wikislownik.get_translation(word, anki_media_dir) if 'audio' not in tr2: blob = download_mp3(word) output = open(os.path.join(anki_media_dir, word + ".mp3"), 'wb') output.write(blob) output.close() translation.add_audio(word + ".mp3") else: translation.add_audio(tr2['audio']) return translation
class Reception(Station): type = 'reception' prefix = 'R' def __init__(self, queue, customers): self.queue = queue self.finished = [] self.failures = [] self.desired = len(customers) self.translation = Translation(self) self.testing = EyeTesting(self) self.licensing = Licensing(self) def process(self, customer): print "Processing reception for %s" % customer.emirates_id.first_name if (customer in self.failures): return if (customer.eye_test is None): self.testing.add(customer) print "Eye testing queue is %s people" % self.testing.peek() if (customer.drivers_license_translation is None): self.translation.add(customer) print "Translation queue is %s people" % self.translation.peek() if (customer.eye_test is not None and customer.drivers_license_translation is not None and customer.uae_license is None): self.licensing.add(customer) if (customer.uae_license is not None): print "%s is finished at %s" % (customer.emirates_id.first_name, time.time()) self.finished.append(customer) def fail(self, customer, cycle=True): self.failures.append(customer) if (cycle): self.add(customer) def run(self): while len(self.finished) + len(self.failures) < self.desired: customer = self.queue.get() self.process(customer) self.queue.done(customer) self.queue.wait() print "Finished the DLD queue: %s" % time.time() return self.finished
class Captcha: def __init__(self, language, captcha_type, captcha_difficulty=1): self.translation = Translation(language) self.captcha_type = captcha_type self.captcha_difficulty = captcha_difficulty def new_captcha(self, dtobj=None): if self.captcha_type == "math": operator = random.choice(["+", "-"]) if operator == "-" and self.captcha_difficulty == 1: while True: first = random.randrange(1, 20) second = random.randrange(1, 20) if first > second: break elif self.captcha_difficulty == 1: first = random.randrange(1, 20) second = random.randrange(1, 20) elif self.captcha_difficulty == 2: first = random.randrange(11, 20) second = random.randrange(11, 20) elif self.captcha_difficulty == 3: while True: first = random.randrange(21, 60) second = random.randrange(21, 60) if abs(eval(str(first) + operator + str(second))) > 20: break else: first = random.randrange(1, 10) second = random.randrange(1, 10) term = "{first} {operator} {second}".format(first=first, operator=operator, second=second) solution = str(eval(term)) excercise = "Was ist {term} ?".format(term=term) return excercise, solution elif self.captcha_type == "clock": now_time = datetime.datetime.now() hours = ftime.get_alarm_hour(now_time) minutes = ftime.get_alarm_minute(now_time) solution = (hours, minutes) clock_part = self.translation.get("It's {h}:{min} .", { 'h': hours, 'min': minutes }) excercise = self.translation.get( "Repeat the following sentence.") + " " + clock_part return excercise, solution
def get_args(self): if self.op == None: return pts = self._requirements.get(self.op) # Get requirements if self.op == 'rotate': p = PopupWindow(self.pz.frame, "Insira o angulo (em graus)") self.pz.frame.wait_window(p.top) p_list = list(sum(self.pz.buffer[:pts], ())) T = Rotation(float(p.getval()), p_list[0], p_list[1]) if self.op == 'scale': px = PopupWindow(self.pz.frame, "Insira um fator para x") self.pz.frame.wait_window(px.top) py = PopupWindow(self.pz.frame, "Insira um fator para y") self.pz.frame.wait_window(py.top) p_list = list(sum(self.pz.buffer[:pts], ())) T = Scale(float(px.getval()), float(py.getval()), p_list[0], p_list[1]) if self.op == 'translate': px = PopupWindow(self.pz.frame, "Insira um offset para x") self.pz.frame.wait_window(px.top) py = PopupWindow(self.pz.frame, "Insira um offset para y") self.pz.frame.wait_window(py.top) T = Translation(float(px.getval()), float(py.getval())) if self.op == 'zoom': self.zoom(self.pz.buffer[:pts]) T = None self.pz.buffer = self.pz.buffer[pts:] # Remove points of buffer return T
def _create_menu_items(self, files, group): plural = len(files) > 1 item_path = Nautilus.MenuItem( name="NautilusCopyPath::CopyPath" + group, label=Translation.t("copy_paths") if plural else Translation.t("copy_path"), ) item_name = Nautilus.MenuItem( name="NautilusCopyPath::CopyName" + group, label=Translation.t("copy_names") if plural else Translation.t("copy_name"), ) item_path.connect("activate", self._copy_paths, files) item_name.connect("activate", self._copy_names, files) return [item_path, item_name]
def test_translation(self): d = StenoDictionary() d[('S', 'T')] = 'translation' t = Translation([Stroke('S'), Stroke('T')], d) self.assertEqual(t.strokes, [Stroke('S'), Stroke('T')]) self.assertEqual(t.rtfcre, ('S', 'T')) self.assertEqual(t.english, 'translation')
def import_vignettes(): print_percent('EXAPUNKS_vignettes.json') trans = Translation('EXAPUNKS_vignettes.json').get_translation() trans = insert_dot(trans) for root, dirs, files in os.walk('../export_txt/Content/vignettes'): for f in files: name = os.path.join(root, f) out = StringIO() csv_reader = csv.reader(open(name, 'r', encoding='utf_8_sig')) csv_writer = csv.writer(out, lineterminator='\n', escapechar='\\') need_save = False for row in csv_reader: if len(row) > 1: en = row[1] if en in trans and len(trans[en]) > 0: need_save = True row.extend([ trans[en][lang] for lang in ('French', 'Chinese', 'Japanese') ]) csv_writer.writerow(row) if need_save: name = name.replace('../export_txt', '../patch') try: os.makedirs(os.path.split(name)[0]) except BaseException: pass open(name, 'w', encoding='utf_8_sig').write(out.getvalue())
def __init__(self, waypoint_one, waypoint_two): self.slope = Translation.from_translations(waypoint_one.get_position(), waypoint_two.get_position()) self.start = waypoint_one.get_position().translate( self.slope.scale(waypoint_one.get_radius() / self.slope.norm())) self.end = waypoint_two.get_position().translate( self.slope.scale(-1.0 * (waypoint_two.get_radius() / self.slope.norm())))
def find_supported_translations(self): """ Retrieves a list of supported translations from BibleGateway's translation page. """ url = "https://www.biblegateway.com/versions/" translations = list() page = urlopen(url) soup = BeautifulSoup(page.read()) trans = soup.findAll("tr", {"class": "language-row"}) for t in trans: if not t.find("a").has_attr("title"): t_text = t.find("td", {"class": "translation-name"}).get_text() t_name = t_text[:t_text.rfind("(") - 1] t_abbreviation = t_text[t_text.rfind("(") + 1:t_text.rfind(")")] t_language = t["data-language"] if t.find("span", {"class": "testament"}): section = t.find("span", {"class": "testament"}).get_text() if section == "OT": t_has_ot = True t_has_nt = False t_has_deut = False elif section == "NT": t_has_ot = False t_has_nt = True t_has_deut = False elif section == "with Apocrypha": t_has_ot = True t_has_nt = True t_has_deut = True else: t_has_ot = True t_has_nt = True t_has_deut = False new_trans = Translation(t_name, t_abbreviation, t_language, t_has_ot, t_has_nt, t_has_deut) translations.append(new_trans) # Add local translations to supported translations list translations.append( Translation("JPS Tanakh", "JPS", "en", True, False, False)) return translations
def test_rigidtransform(self): #test constructor pose1 = RigidTransform() self.assertEqual(pose1.get_translation().get_x(), 0.0) self.assertEqual(pose1.get_translation().get_y(), 0.0) self.assertEqual(pose1.get_rotation().get_theta(), 0.0) pose2 = RigidTransform(Translation(10.0, 15.0), Rotation.from_degrees(45.0)) self.assertEqual(pose2.get_translation().get_x(), 10.0) self.assertEqual(pose2.get_translation().get_y(), 15.0) self.assertEqual(pose2.get_rotation().get_theta(), 45.0) #test transform pose3 = pose1.transform(pose2) self.assertAlmostEqual(pose3.get_translation().get_x(), 10.0) self.assertAlmostEqual(pose3.get_translation().get_y(), 15.0) self.assertAlmostEqual(pose3.get_rotation().get_theta(), 45.0) pose4 = pose2.transform(pose1) self.assertAlmostEqual(pose4.get_translation().get_x(), 10.0) self.assertAlmostEqual(pose4.get_translation().get_y(), 15.0) self.assertAlmostEqual(pose4.get_rotation().get_theta(), 45.0) pose5 = RigidTransform(Translation(10.0, 10.0), Rotation.from_degrees(45.0)) pose6 = pose5.transform(pose2) #used this link to verify: http://www.wolframalpha.com/widgets/view.jsp?id=bd71841fce4a834c804930bd48e7b6cf self.assertAlmostEqual(pose6.get_translation().get_x(), 10-(25/math.sqrt(2))+10*math.sqrt(2)) self.assertAlmostEqual(pose6.get_translation().get_y(), 25/math.sqrt(2)+10*math.sqrt(2)-10*(-1+math.sqrt(2))) self.assertAlmostEqual(pose6.get_rotation().get_theta(), 90.0) pose7 = pose2.transform(pose5) self.assertAlmostEqual(pose7.get_translation().get_x(), -25.0/math.sqrt(2) + 10*math.sqrt(2) + 5.0/2.0*(4+math.sqrt(2))) self.assertAlmostEqual(pose7.get_translation().get_y(), 25.0/math.sqrt(2) + 10*math.sqrt(2) - 5.0/2.0*(-6+5*math.sqrt(2))) self.assertAlmostEqual(pose7.get_rotation().get_theta(), 90.0) #test inverse pose6inverse = pose6.inverse() pose8 = pose6.transform(pose6inverse) self.assertAlmostEqual(pose8.get_translation().get_x(), 0) self.assertAlmostEqual(pose8.get_translation().get_y(), 0) self.assertAlmostEqual(pose8.get_rotation().get_theta(), 0) #test intersection intersection_point = pose1.intersection(pose2) self.assertAlmostEqual(intersection_point.get_x(), -5.0) self.assertAlmostEqual(intersection_point.get_y(), 0.0)
def collapse_and_rename(self): """Collapses names to ints and indexes 0 .. self.n-1""" translation = Translation(*self.edgelist.keys()) new_edgelist = {} for u, neighbors in self.edgelist.items(): new_edgelist[translation[u]] = [translation[v] for v in neighbors] self.edgelist = new_edgelist self.translation = translation self.translation.lock()
def __init__(self, queue, customers): self.queue = queue self.finished = [] self.failures = [] self.desired = len(customers) self.translation = Translation(self) self.testing = EyeTesting(self) self.licensing = Licensing(self)
def intersection(self, other_transform): other_rot = other_transform.get_rotation() if (self.rotation.is_parallel(other_rot)): #should never reach here return Translation(float("inf"), float("inf")) if (math.fabs(self.rotation.get_cos()) < math.fabs( other_rot.get_cos())): return self.intersection_(self, other_transform) else: return self.intersection_(other_transform, self)
def import_strings(): print_percent('EXAPUNKS_exe.json') for v in Translation('EXAPUNKS_exe.json').check_variables( regex=r'\{\d*\}', org_index='English', trans_index='Chinese', ordered=False): print('Warning: ', v) trans = Translation('EXAPUNKS_exe.json').get_translation() trans = insert_dot(trans) csv_writer = csv.writer(open('strings.csv', 'w', encoding='utf-8'), lineterminator='\n', escapechar='\\') for key, value in trans.items(): row = [key, ''] row.extend([ value[lang] for lang in ('German', 'French', 'Russian', 'Chinese', 'Japanese') ]) csv_writer.writerow(row)
def __init__(self, line_one_, line_two_): self.line_one = line_one_ self.line_two = line_two_ normal_line_one = RigidTransform( self.line_one.end, Rotation.from_translation(self.line_one.slope, True).normal()) normal_line_two = RigidTransform( self.line_two.start, Rotation.from_translation(self.line_two.slope, True).normal()) self.center = normal_line_one.intersection(normal_line_two) center_to_end_dist = Translation.from_translations( self.center, self.line_one.end).norm() start_to_center_dist = Translation.from_translations( self.center, self.line_two.start).norm() if (center_to_end_dist - start_to_center_dist > 1E-9): #should never enter here print("ERROR, CENTER OF ARC IS CALCULATED INCORRECTLY") self.radius = 7777 else: self.radius = center_to_end_dist
def parseGenerationResult(): bleu_res = open(const.FAIRSEQ_GENERATE_FILE, "r") NMT_original = open(const.NMT_ORIGINAL, "w") NMT_ground_truth = open(const.NMT_GROUND_TRUTH, "w") NMT_output = open(const.NMT_OUTPUT, "w") Sentence_stats = open(const.SENTENCE_STATS, "w") translations = [] translation_id = None currTranslation = Translation() for line in bleu_res: if line.startswith("H-"): hypothesis = line.split("\t")[2] currTranslation.hypothesis = hypothesis NMT_output.write(hypothesis) elif line.startswith("T-"): reference = line.split("\t")[1] currTranslation.reference = reference NMT_ground_truth.write(reference) elif line.startswith("S-"): translation_id = float(line.split("\t")[0].split("-")[1]) source = line.split("\t")[1] currTranslation.source = source currTranslation.trnID = int(translation_id) NMT_original.write(source) elif line.startswith("P-"): scores = [float(i) for i in line.split("\t")[1].split(" ")] currTranslation.avgLP, currTranslation.minLP, currTranslation.medianLP, currTranslation.maxLP, currTranslation.sumLP = [ mean(scores), min(scores), median(scores), max(scores), sum(scores) ] Sentence_stats.write( str(mean(scores)) + " " + str(min(scores)) + " " + str(median(scores)) + " " + str(max(scores)) + " " + str(sum(scores)) + " " + str(translation_id) + "\n") translations.append(copy.deepcopy(currTranslation)) currTranslation = Translation() NMT_ground_truth.close() NMT_output.close() Sentence_stats.close() NMT_original.close() return translations
def export_vignettes(): trans = try_to_get_translation('../import_txt/EXAPUNKS_vignettes.json') original = set() data = [] for root, dirs, files in os.walk('Content/vignettes'): for f in files: csv_reader = csv.reader( open(os.path.join(root, f), 'r', encoding='utf_8_sig')) for row in csv_reader: if len(row) > 1: en = row[1] if en not in original: original.add(en) if en in trans: data.append(trans[en]) else: data.append({ 'FileName': f, 'Role': row[0], 'English': en }) translation = Translation() translation.set_data( data, ('FileName', 'Role', 'English', 'French', 'Chinese', 'Japanese')) translation.save('EXAPUNKS_vignettes.json')
def change_lang(new_lang): """ Change langiage of the user and apply the required changes :param new_lang: New language to be stored """ try: globalvars.lang = Translation(new_lang, CONFIG['LANGUAGE_FILE']) except Exception as exc: logger.error("Error in Language file!") return None globalvars.HOME_KEYBOARD = [ [ globalvars.lang.text('MENU_HOME_EXISTING_KEY'), globalvars.lang.text('MENU_HOME_NEW_KEY') ], [ globalvars.lang.text('MENU_HOME_FAQ'), globalvars.lang.text('MENU_HOME_INSTRUCTION') ], [ globalvars.lang.text('MENU_HOME_CHANGE_LANGUAGE'), globalvars.lang.text('MENU_HOME_PRIVACY_POLICY') ], [ globalvars.lang.text('MENU_HOME_SUPPORT'), globalvars.lang.text('MENU_HOME_DELETE_ACCOUNT') ], [ globalvars.lang.text('MENU_CHECK_STATUS'), ] ] globalvars.BACK_TO_HOME_KEYBOARD = [[ globalvars.lang.text('MENU_BACK_HOME') ]] if new_lang in ['fa', 'ar']: globalvars.OPT_IN_KEYBOARD = [[ globalvars.lang.text('MENU_PRIVACY_POLICY_DECLINE'), globalvars.lang.text('MENU_PRIVACY_POLICY_CONFIRM') ]] else: globalvars.OPT_IN_KEYBOARD = [[ globalvars.lang.text('MENU_PRIVACY_POLICY_CONFIRM'), globalvars.lang.text('MENU_PRIVACY_POLICY_DECLINE') ]] globalvars.OPT_IN_DECLINED_KEYBOARD = [[ globalvars.lang.text('MENU_BACK_PRIVACY_POLICY'), globalvars.lang.text('MENU_HOME_CHANGE_LANGUAGE') ]]
def convert(input_name, output_name, auto=False): if os.path.exists(output_name): in_time = os.path.getmtime(input_name) out_time = os.path.getmtime(output_name) if in_time < out_time: if auto: print('file "%s" is newer, skipped.' % output_name) return elif input('file "%s" is newer, overwrite? [Y/N]' % output_name).lower() != 'y': return print('%s ==> %s' % (input_name, output_name)) Translation(input_name).save(output_name)
def test_listeners(self): output1 = [] def listener1(undo, do, prev): output1.append((undo, do, prev)) output2 = [] def listener2(undo, do, prev): output2.append((undo, do, prev)) t = Translator() s = Stroke('S') tr = Translation([s], StenoDictionary()) expected_output = [([], [tr], tr)] t.translate(s) t.add_listener(listener1) t.translate(s) self.assertEqual(output1, expected_output) del output1[:] t.add_listener(listener2) t.translate(s) self.assertEqual(output1, expected_output) self.assertEqual(output2, expected_output) del output1[:] del output2[:] t.add_listener(listener2) t.translate(s) self.assertEqual(output1, expected_output) self.assertEqual(output2, expected_output) del output1[:] del output2[:] t.remove_listener(listener1) t.translate(s) self.assertEqual(output1, []) self.assertEqual(output2, expected_output) del output1[:] del output2[:] t.remove_listener(listener2) t.translate(s) self.assertEqual(output1, []) self.assertEqual(output2, [])
def exp(twist): cos_theta = math.cos(twist.dtheta) sin_theta = math.sin(twist.dtheta) rotation = Rotation(cos_theta, sin_theta) #if theta is very small, use taylor series to approximate (we can't divide by zero) if (math.fabs(twist.dtheta) < zero): sin_theta_over_theta = 1.0 - math.pow( twist.dtheta, 2) / 6.0 + math.pow(twist.dtheta, 4) / 120.0 one_minus_cos_theta_over_theta = 1.0 / 2.0 * twist.dtheta - math.pow( twist.dtheta, 3) / 24.0 + math.pow(twist.dtheta, 5) / 720.0 else: sin_theta_over_theta = sin_theta / twist.dtheta one_minus_cos_theta_over_theta = (1.0 - cos_theta) / twist.dtheta translation = Translation(sin_theta_over_theta * twist.dx, one_minus_cos_theta_over_theta * twist.dx) return RigidTransform(translation, rotation)
def commit_verb_info(db_session, language, verb_info): verb_data_json = json.dumps(verb_info['modes'], sort_keys=True, indent=4, separators=(',', ': '), ensure_ascii=False, encoding='utf-8').encode('utf-8') verb = Verb(lang=language, verb=verb_info['name'], conjugations=verb_data_json) try: db_session.add(verb) db_session.commit() except IntegrityError: db_session.rollback() logging.error('Verb already in DB %s' % verb_info['name']) translations = [] for meaning in verb_info['meanings']: translation = Translation(lang=language, english=meaning['eng'], description=meaning['description'], verb=verb.verb) duped_translation = next( (t for t in translations if t.english == translation.english), None) if duped_translation: duped_translation.description += ', ' + translation.description else: translations.append(translation) try: for translation in translations: db_session.add(translation) db_session.commit() except IntegrityError: db_session.rollback() logging.error('Translations already exist for %s' % verb_info['name']) return True
class CrawlhtmlPipeline(object): t = Translation() def __init__(self): connection = pymongo.Connection(settings['MONGODB_SERVER'], settings['MONGODB_PORT']) db = connection[settings['MONGODB_DB']] self.collection = db[settings['MONGODB_COLLECTION']] def process_item(self, item, spider): valid = True if not item or item['content'] == '': valid = False raise DropItem("Missing content of page from %s" % (getattr(item, 'url', ''))) else: new_text = CrawlhtmlPipeline.t.translateTxt(item['content']) item['content'] = new_text if valid: self.collection.insert(dict(item)) return item
def __init__(self, start_x, start_y, end_x, end_y, center_x = None, center_y = None): self.start = Translation(start_x, start_y) self.end = Translation(end_x, end_y) if center_x == None and center_y == None: self.slope = Translation.from_translations(self.start, self.end) self.segment_type = Type.LINE else: self.segment_type = Type.ARC self.center = Translation(center_x, center_y) center_to_start = Translation.from_translations(self.center, self.start) center_to_end = Translation.from_translations(self.center, self.end) if (center_to_start.norm()-center_to_end.norm() < 1E-9): self.radius = center_to_start.norm() else: self.radius = -3256
def testSaveTranslation(self): db=Db() db.dbFile="testmetabrainwallet.db" db.translation.dropTable() db.translation.createTable() hello = Phrase({'language':'en','content':'hello'}) hola = Phrase({'language':'es','content':'hola'}) db.phrase.save(hello) db.phrase.save(hola) translation = Translation({'originalId':hello.id, 'translatedId':hola.id}) db.translation.save(translation) phrases = db.translation.find(hello,'es') self.assertTrue(len(phrases)==1) self.assertTrue(phrases[0].id == hola.id) db.close()
def export_descriptions(): trans = try_to_get_translation('../import_txt/EXAPUNKS_descriptions.json') original = set() data = [] for root, dirs, files in os.walk('Content/descriptions'): for f in files: for line in open(os.path.join(root, f), 'r', encoding='utf_8_sig'): line = line.strip() if len(line) > 0 and line not in original: original.add(line) if line in trans: data.append(trans[line]) else: data.append({'FileName': f, 'English': line}) translation = Translation() translation.set_data(data, ('FileName', 'English', 'German', 'French', 'Russian', 'Chinese', 'Japanese')) translation.save('EXAPUNKS_descriptions.json')
def __init__(self, id, binDir, binName): Translation.__init__(self, id) self.__binLocation = "%s/%s" % (binDir, binName)
def haplotype_calling(self, hap, win, ref, format, out): outfile = open(out, "w") if format == 0 or format == 1: # collect set of variant positions unique = [] for k1, v1 in hap.haplotype.items(): for k2 in v1.keys(): for k3 in k2: unique.append(k3[0]) sorted_unique = sorted(set(unique)) if format == 1: outfile.write(",".join(["id"] + map(str, sorted_unique) + ["frequency"]) + "\n") # collect the bases at this positions for k1, v1 in hap.haplotype.items(): for n, (k2, v2) in enumerate(sorted(v1.items(), key=lambda q: q[1], reverse=True)): out = collections.OrderedDict() for u in sorted_unique: # set([2816...2819]) added = False for k3 in k2: if k3[0] == u: # check whether its a deletion greater than 2 if "-" in k3[2]: if len(k3[2]) > 1: for i in range(len(k3[2])): out[u+i] = "-" else: out[u] = k3[2] elif "I" in k3[1]: out[u] = ref.sequence[u-1] + k3[2] else: out[u] = k3[2] added = True if not added: try: out[u] except KeyError: out[u] = ref.sequence[u-1] if format == 0: outfile.write("\t".join(["".join(out.values()), str(v2)]) + "\n") elif format == 1: outfile.write(",".join([str(n)] + out.values() + [str(v2)]) + "\n") elif format == 2: for k1, v1 in hap.haplotype.items(): for n1, (k2, v2) in enumerate(sorted(v1.items(), key=lambda q: q[1], reverse = True)): outfile.write(str(n1) + "\t" + str(k2) + "\t" + str(v2) + "\n") elif format == 3: # TEST if self.gb: gb = GenBank(self.gb) gb.load() cds = gb.get_cds_from_genbank() t = Translation(ref, cds[0], cds[1]) for k1, v1 in hap.haplotype.items(): for n1, (k2, v2) in enumerate(sorted(v1.items(), key=lambda q: q[1], reverse = True)): # TEST ref_protein = t.get_translation(ref.sequence, start=win.windows[0][0], end=win.windows[0][1]) if self.gb: s = t.load_variants(k2) q = t.get_translation(s, start=win.windows[0][0], end=win.windows[0][1]) p = t.diff(ref_protein, q) outfile.write(str(n1) + "\t" + str(k2) + "\t" + str(p) + "\t" + str(v2) + "\n") else: outfile.write(str(n1) + "\t" + str(k2) + "\t" + str(v2) + "\n") outfile.close()