def struct_result(self, _): if self.args.fcrit: self._make_wl_query() return self.freqs(self.args.fcrit, self.args.flimit, self.args.freq_sort, 1) if '.' in self.args.wlattr: raise WordlistError('Text types are limited to Simple output') if self.args.wlnums != 'frq': raise WordlistError( 'Multilevel lists are limited to Word counts frequencies') level = 3 if not self.args.wlposattr1: raise WordlistError(translate('No output attribute specified')) if not self.args.wlposattr3: level = 2 if not self.args.wlposattr2: level = 1 if not self.args.wlpat and not self.args.wlwords: raise WordlistError( translate( 'You must specify either a pattern or a file to get the multilevel wordlist' )) self._make_wl_query() self.args.flimit = self.args.wlminfreq return self.freqml(flimit=self.args.wlminfreq, freqlevel=level, ml1attr=self.args.wlposattr1, ml2attr=self.args.wlposattr2, ml3attr=self.args.wlposattr3)
def callback_select_words(call): user_id = get_id(call.message) user = user_manager.get_user(user_id) user.set_state(fsm.LOCKED) logger = user.logger done, btn_set, btn = user.parse_keyboard_ans(call) if done == True: subject = user.temp_subject user.set_subject_active(subject, 0) txt = translation.translate("#active_topics_listing", user.get_native_language()) + "\n" txt_args = (subject, ) for i in btn_set: user.set_topic_active(subject, btn[i][0], 1) txt += "*.%s*" txt_args += (btn[i][0], ) if len(txt_args) == 1: txt += translation.translate("#no_active_topics", user.get_native_language()) user.send_message(txt, txt_args=txt_args, translate_flag=False) user.set_state(fsm.next_state[(fsm.SELECT_TRAINING, fsm.GET_TOPICS)]['done']) else: user.set_state(fsm.next_state[(fsm.SELECT_TRAINING, fsm.GET_TOPICS)]['continue'])
def viewattrs(self, _): """ attrs, refs, structs form """ from collections import defaultdict out = {} if self.args.maincorp: corp = self.cm.get_corpus(self.args.maincorp) else: corp = self.corp out['AttrList'] = [{ 'label': corp.get_conf(f'{n}.LABEL') or n, 'n': n, 'multisep': corp.get_conf(f'{n}.MULTISEP') } for n in corp.get_posattrs() if n] out['fixed_attr'] = 'word' out['attr_vmode'] = self.args.attr_vmode availstruct = corp.get_structs() structlist = set(self.args.structs.split(',')).union( set([x.split('.')[0] for x in self.args.structattrs])) out['Availstructs'] = [{'n': n, 'sel': 'selected' if n in structlist else '', 'label': corp.get_conf(f'{n}.LABEL')} for n in availstruct if n and n != '#'] out['base_viewattr'] = self.args.base_viewattr availref = corp.get_structattrs() reflist = self.args.refs.split(',') if self.args.refs else [] structattrs = defaultdict(list) out['qs_enabled'] = self.args.qs_enabled def ref_is_allowed(r): return r and r not in ( '#', self.get_corpus_info(self.args.corpname).speech_segment) for item in availref: if ref_is_allowed(item): k, v = item.split('.', 1) structattrs[k].append(v) out['Availrefs'] = [dict(n='#', label=translate('Token number'), sel='selected' if '#' in reflist else '')] for n in availref: if ref_is_allowed(n): out['Availrefs'].append(dict(n='=' + n, sel='selected' if ('=' + n) in reflist else '', label=(corp.get_conf(f'{n}.LABEL') or n))) doc = corp.get_conf('DOCSTRUCTURE') if doc in availstruct: out['Availrefs'].insert(1, dict(n=doc, label=translate('Document number'), sel=(doc in reflist and 'selected' or ''))) out['newctxsize'] = self.args.kwicleftctx[1:] out['structattrs'] = structattrs out['curr_structattrs'] = self.args.structattrs out['query_overview'] = self.concdesc_json().get('Desc', []) out['CurrentAttrs'] = self.args.attrs.split(',') out['use_conc_toolbar'] = settings.get_bool('global', 'use_conc_toolbar') return out
def overwrite_check(msg): user_id = get_id(msg) user = user_manager.get_user(user_id) user.set_state(fsm.LOCKED) logger = user.logger valid, overwrite, keyboard_option, keyboard_len = user.parse_keyboard_ans( msg) if valid == False: user.send_message("#choose_from_keyboard", markup=None) user.set_state(fsm.next_state[(fsm.COPY_FROM_USER, fsm.GET_OVERWRITE)]['error']) return if keyboard_option == 0: overwrite = True else: overwrite = False selected_topics = user.temp_topics_list text = translation.translate('#copy_results', user.get_native_language()) + "\n" user.send_message(text, translate_flag=False) count = 0 for topic in selected_topics: txt_args = () text = "" copied, overwritten_items = user_manager.copy_topic( user, user.temp_user, user.temp_subject, topic, overwrite) text += translation.translate('#topic', user.get_native_language()) + "\n" txt_args += (topic, ) if len(overwritten_items) > 0: text += translation.translate( '#overwritten_items', user.get_native_language()) + "\n" count += 1 for item in overwritten_items: text += '_.%s_\n' txt_args += (item, ) if len(copied) > 0: text += translation.translate( '#copied_items', user.get_native_language()) + "\n" count += 1 for item in copied: text += '_.%s_\n' txt_args += (item, ) print(text) user.send_message(text, txt_args, translate_flag=False) user.set_state(fsm.next_state[(fsm.COPY_FROM_USER, fsm.GET_OVERWRITE)]['done'])
def generate5(center, width, renderer, alpha=0): #print(alpha) color = 0x00ffffff+(int(0xFF*alpha)<<24) #print("{:0x}".format( color)) x1=center-width/2 x2=center+width/2 for y in range(0, rect.bottom): p1 = translation.translate((x1*math.sin(y)+y*math.cos(y), x1*math.cos(y) - y*math.sin(y)), (0, 0)) p2 = translation.translate((x2*math.sin(-y)+y*math.cos(-y), x2*math.cos(-y)- y*math.sin(-y)), (0, 0)) renderer.draw_line((p1[0], p1[1], p2[0], p2[1]), color)
def collocs(self, cattr='-', csortfn='m', cbgrfns='mt', cfromw=-5, ctow=5, cminfreq=5, cminbgr=3, max_lines=0): statdesc = {'t': translate('T-score'), 'm': translate('MI'), '3': translate('MI3'), 'l': translate('log likelihood'), 's': translate('min. sensitivity'), 'p': translate('MI.log_f'), 'r': translate('relative freq. [%]'), 'f': translate('absolute freq.'), 'd': translate('logDice') } items = [] colls = manatee.CollocItems(self, cattr, csortfn, cminfreq, cminbgr, cfromw, ctow, max_lines) qfilter = '%%s%i %i 1 [%s="%%s"]' % (cfromw, ctow, cattr) i = 0 while not colls.eos(): if 0 < max_lines < i: break items.append(dict( str=colls.get_item(), freq=colls.get_cnt(), Stats=[{'s': '%.3f' % colls.get_bgr(s)} for s in cbgrfns], pfilter=qfilter % ('P', escape(self.import_string(colls.get_item()))), nfilter=qfilter % ('N', escape(self.import_string(colls.get_item()))) )) colls.next() i += 1 head = [{'n': ''}, {'n': 'Freq', 's': 'f'}] + \ [{'n': statdesc.get(s, s), 's': s} for s in cbgrfns] return dict(Head=head, Items=items)
def viewattrs(self, _): """ attrs, refs, structs form """ from collections import defaultdict out = {} if self.args.maincorp: corp = corplib.manatee.Corpus(self.args.maincorp) else: corp = self.corp out['AttrList'] = [{'label': corp.get_conf(n + '.LABEL') or n, 'n': n} for n in corp.get_conf('ATTRLIST').split(',') if n] out['fixed_attr'] = 'word' out['attr_allpos'] = self.args.attr_allpos out['attr_vmode'] = self.args.attr_vmode availstruct = corp.get_conf('STRUCTLIST').split(',') structlist = set(self.args.structs.split(',')).union( set([x.split('.')[0] for x in self.args.structattrs])) out['Availstructs'] = [{'n': n, 'sel': 'selected' if n in structlist else '', 'label': corp.get_conf(n + '.LABEL')} for n in availstruct if n and n != '#'] availref = corp.get_conf('STRUCTATTRLIST').split(',') reflist = self.args.refs.split(',') if self.args.refs else [] structattrs = defaultdict(list) def ref_is_allowed(r): return r and r not in ( '#', self.get_corpus_info(self.args.corpname).get('speech_segment')) for item in availref: if ref_is_allowed(item): k, v = item.split('.', 1) structattrs[k].append(v) out['Availrefs'] = [dict(n='#', label=translate('Token number'), sel='selected' if '#' in reflist else '')] for n in availref: if ref_is_allowed(n): out['Availrefs'].append(dict(n='=' + n, sel='selected' if ('=' + n) in reflist else '', label=(corp.get_conf(n + '.LABEL') or n))) doc = corp.get_conf('DOCSTRUCTURE') if doc in availstruct: out['Availrefs'].insert(1, dict(n=doc, label=translate('Document number'), sel=(doc in reflist and 'selected' or ''))) out['newctxsize'] = self.args.kwicleftctx[1:] out['structattrs'] = structattrs out['curr_structattrs'] = self.args.structattrs out['query_overview'] = self.concdesc_json().get('Desc', []) out['CurrentAttrs'] = self.args.attrs.split(',') out['use_conc_toolbar'] = settings.get_bool('global', 'use_conc_toolbar') return out
def ajax_wipe_subcorpus(self, request): if plugins.runtime.SUBC_RESTORE.exists: corpus_id = request.form['corpname'] subcorp_name = request.form['subcname'] with plugins.runtime.SUBC_RESTORE as sr: sr.delete_query(self.session_get('user', 'id'), corpus_id, subcorp_name) self.add_system_message('info', translate('Subcorpus %s has been deleted permanently.') % subcorp_name) else: self.add_system_message('error', translate( 'Unsupported operation (plug-in not present)')) return {}
def _addTranslationTags(jobId, doc): docLang = doc.tags[FEEDTAG_LANG] if docLang != LANG_ENGLISH: doc.tags[DOCTAG_TRANSLATED_TITLE] = translate(jobId, doc.tags[LINKTAG_TITLE], docLang) doc.tags[DOCTAG_TRANSLATED_SUMMARYTEXT] = translate( jobId, doc.tags[LINKTAG_SUMMARYTEXT], docLang) doc.tags[DOCTAG_TRANSLATED_CONTENT] = translate( jobId, doc.content, docLang) return doc
def generate(center, width, renderer, alpha=1): #i=0.000001 x1=center-width/2 x2=center+width/2 for y in range(int(-rect.bottom//2), int( rect.bottom//2)): generate.i+=0.00003; p1 = translation.translate((rect.bottom/2-width/2+x1/4*math.cos(y+generate.i) - y/2*math.sin(y+generate.i), rect.right/2-width/2+x1/4*math.sin(y+generate.i)+y/4*math.cos(y+generate.i)), (window_size[0]/4, -window_size[1]/2)) p2 = translation.translate((rect.bottom/2+width/2+x2/4*math.cos(-y-generate.i)- y/4*math.sin(-y-generate.i), rect.right/2+width/2+x2/4*math.sin(-y-generate.i)+y/4*math.cos(-y-generate.i)),(window_size[0]/4,-window_size[1]/2)) #print(y/rect.bottom*0xff+0x80) #color = y*0xff+int(0x80+y/rect.bottom*0xFF)<<24 color = y*0xff+(int(alpha*0xff)<<24) #print(color) renderer.draw_line((p1[0],p1[1],p2[0],p2[1]), color)
def replyToReporter(response='',queue_id='',uid='',keep=True,ticketText=''): if keep == True: if mode == '1' or mode == '2': mids = findMidsWithQueueId(str(queue_id)) message = bot.messaging.get_messages_by_id(mids)[0] bot.messaging.update_message(message, translate(lang,'ticketConfirmation') + ticketText + '\n***\n' + translate(lang,'ticketSent') + ' [' + response[1] + '](' + response[0] + ')') common.conMsg('bot','Replied to reporter with editing queue_id=' + str(queue_id) + ' response=' + str(response)) elif mode == '0': bot.messaging.send_message(bot.users.get_user_peer_by_id(int(uid)),translate(lang,'ticketConfirmation') + ticketText + '\n***\n' + translate(lang,'ticketSent') + ' [' + response[1] + '](' + response[0] + ')') common.conMsg('bot','Replied to reporter with messaging uid=' + str(uid) + ' response=' + str(response)) else: mids = findMidsWithQueueId(str(queue_id)) message = bot.messaging.get_messages_by_id(mids)[0] bot.messaging.update_message(message, translate(lang,'ticketConfirmation') + ticketText + '\n***\n' + translate(lang,'cancelRequest')) common.conMsg('bot','Cancelling Replied to reporter with editing queue_id=' + str(queue_id)) removeFromRequestMessageList(queue_id)
def __init__(self, corp, action, params, sample_size=0, full_size=-1, orig_corp=None): self.pycorp = corp self.corpname = corp.get_conffile() self.orig_corp = orig_corp or self.pycorp self.corpus_encoding = corp.get_conf('ENCODING') self.import_string = partial(import_string, from_encoding=self.corpus_encoding) self.export_string = partial(export_string, to_encoding=self.corpus_encoding) self._conc_file = None try: if action == 'q': params = self.export_string(params) manatee.Concordance.__init__( self, corp, params, sample_size, full_size) elif action == 'a': # query with a default attribute default_attr, query = params.split(',', 1) corp.set_default_attr(default_attr) manatee.Concordance.__init__( self, corp, self.export_string(query), sample_size, full_size) elif action == 'l': # load from a file self._conc_file = params manatee.Concordance.__init__(self, corp, self._conc_file) elif action == 's': # stored in _conc_dir self._conc_file = os.path.join( self.pycorp._conc_dir, corp.corpname, params + '.conc') manatee.Concordance.__init__(self, corp, self._conc_file) else: raise RuntimeError(translate('Unknown concordance action: %s') % action) except UnicodeEncodeError: raise RuntimeError('Character encoding of this corpus ({0}) does not support one or more characters in the query.' .format(self.corpus_encoding))
def ajax_get_corp_details(self, request): """ """ corp_conf_info = self.get_corpus_info(request.args['corpname']) corpus = self.cm.get_Corpus(request.args['corpname']) citation_info = corp_conf_info.get('citation_info', None) citation_info = citation_info.to_dict() if citation_info else {} import_str = partial(l10n.import_string, from_encoding=corpus.get_conf('ENCODING')) if corpus.get_conf('NAME'): corpus_name = corpus.get_conf('NAME') else: corpus_name = corpus.corpname ans = { 'corpname': import_str(corpus_name), 'description': import_str(corpus.get_info()), 'size': int(corpus.size()), 'attrlist': [], 'structlist': [], 'web_url': corp_conf_info['web'] if corp_conf_info is not None else '', 'citation_info': citation_info } try: ans['attrlist'] = [{'name': item, 'size': int(corpus.get_attr(item).id_range())} for item in corpus.get_conf('ATTRLIST').split(',')] except RuntimeError as e: logging.getLogger(__name__).warn('%s' % e) ans['attrlist'] = {'error': translate('Failed to load')} ans['structlist'] = [{'name': item, 'size': int(corpus.get_struct(item).size())} for item in corpus.get_conf('STRUCTLIST').split(',')] return ans
def getPlotDeta(dic): read_seq = getRows(dic,'mRNA') read_list = [] for seq in read_seq: tran=translation.translate(seq) read_list.extend(tran) return read_list
def init_session(self) -> None: """ Starts/reloads user's web session data. It can be called even if there is no 'sessions' plugin installed (in such case, it just creates an empty dictionary with some predefined keys to allow other parts of the application to operate properly) """ with plugins.runtime.AUTH as auth: if auth is None: raise RuntimeError('Auth plugin was not initialized') if 'user' not in self._session: self._session['user'] = auth.anonymous_user() if hasattr(auth, 'revalidate'): try: auth.revalidate(self._plugin_ctx) # type: ignore except Exception as ex: self._session['user'] = auth.anonymous_user() logging.getLogger(__name__).error( 'Revalidation error: %s' % ex) self.add_system_message( 'error', translate( 'User authentication error. Please try to reload the page or ' 'contact system administrator.'))
def test_translate_without_token(): error, result = translate(lang_from='de', lang_to='en', token=None, text='katze') assert error assert not result
def fin2persian(message): try: # TABLE: EditMsg, Groups, Users translated_msg = translate(message) if translated_msg: translated_msg = postprocess_msg(message, translated_msg) bot_message = bot.send_message(message.chat.id, translated_msg, parse_mode='HTML') DBhandler.add_msgId(table='EditMsg', message=message, bot_message=bot_message) # # Update User and Group Info # if message.chat.type == 'group': # DBhandler.update_group(message) # DBhandler.update_user(message) # # REMOVING expired messages for edit global UPDATE_COUNTER UPDATE_COUNTER += 1 if UPDATE_COUNTER % 200 == 0: DBhandler.update_msg_ids(message) UPDATE_COUNTER = 0 except Exception as e: print(e)
def do_translate(form, translate_state): api_req = { 'text': form['slots'].get('phrase', {}).get('value'), 'lang_from': form['slots'].get('from', {}).get('value'), 'lang_to': form['slots'].get('to', {}).get('value'), } api_req = {k: v for k, v in api_req.items() if v} if 'lang_from' in api_req: code = lang_to_code(api_req['lang_from']) if not code: return 'Не поняла, на какой язык переводить', translate_state api_req['lang_from'] = code if 'lang_to' in api_req: code = lang_to_code(api_req['lang_to']) if not code: return 'Не поняла, c какого языка переводить', translate_state api_req['lang_to'] = code translate_state.update(api_req) if 'text' not in translate_state: return 'Не поняла, какой текст нужно перевести', translate_state if is_like_russian( translate_state['text']) and 'lang_to' not in translate_state: return 'На какой язык нужно перевести?', translate_state if not is_like_russian( translate_state['text']) and 'lang_from' not in translate_state: return 'С какого языка нужно перевести?', translate_state tran_error, tran_result = translate(**translate_state) text = tran_error or tran_result return text, translate_state
def translate_online_cb(self, menuoption): print "translating online" win32clipboard.OpenClipboard() try: #we try to get the data self.string = win32clipboard.GetClipboardData(win32con.CF_TEXT) except TypeError: print "Clipboard is empty" #this generally means there's no data in the clipboard #and we escape the function win32clipboard.CloseClipboard() return else: win32clipboard.CloseClipboard() # self.string = str(self.clipboard.wait_for_text()) self.which_language() #we get the selected languages and turned them into google translate options. self.string = translate(self.string, self.lang1, self.lang2) self.string = self.string.decode("UTF-8") # if self.configManager.general_clipboardtraslation: # self.clipboard.set_text(self.string, len=-1) win32clipboard.OpenClipboard() win32clipboard.EmptyClipboard() try: print self.string win32clipboard.SetClipboardData(win32con.CF_UNICODETEXT, self.string) except pywintypes.error: #for some reason we can't set the clipboard pass #there's nothing to do about it but to catch the exception win32clipboard.CloseClipboard() if self.configManager.general_notifications: self.notify(self.string)
def edited_message(message): """ Edits the translated message if user edits the message. NOTE: if user edits the message to non-finglish, message will be edited to: 'Message Edited to NON-Finglish.' """ try: # TABLE: EditMsg bot_MsgId = DBhandler.get_botMsgId(table='EditMsg', message=message) print(bot_MsgId) if bot_MsgId: translated_msg = translate(message) if translated_msg: translated_msg = postprocess_msg(message, translated_msg) bot.edit_message_text(translated_msg, message.chat.id, bot_MsgId, parse_mode='HTML') else: bot.edit_message_text('Message Edited to NON-Finglish.', message.chat.id, bot_MsgId, parse_mode='HTML') except Exception as e: print(e)
def splicing(sequence, introns): intronRemovedSeq = sequence.getSequence() for intron in introns: intronRemovedSeq = intronRemovedSeq.replace(intron.getSequence(), '') toTranscribe = fastaForm.NucleicFasta() toTranscribe.assignSequence(intronRemovedSeq) mRNA = transcription.transcribe(toTranscribe) return translation.translate(mRNA)
def _translate_feedback(data): if data.get('Feedback', {}).get('Message'): trans = translation.translate(config['googleApiServers'], config['googleApiKey'], data['Feedback']['Message']['text']) data['Feedback']['Message']['text_lang_code'] = trans[0] data['Feedback']['Message']['text_lang_name'] = trans[1] data['Feedback']['Message']['text_translated'] = trans[2]
def _wlnums2structattr(self, wlnums): if wlnums == 'arf': raise WordlistError(translate('ARF cannot be used with text types')) elif wlnums == 'frq': return 'doc sizes' elif wlnums == 'docf': return 'docf' else: return wlnums
def returnError(code,userId): peer = bot.users.get_user_peer_by_id(userId) if code == '100': bot.messaging.send_message(peer,translate(lang,'imTooSeriousForYourMedia')) common.conMsg('bot','Attempting to send request from id ' + str(userId) + ' with media') return None if code == '101': bot.messaging.send_message(peer,translate(lang,'invalidInput')) common.conMsg('bot','Attempting to send invalid command from id ' + str(userId)) return None if code == '200': bot.messaging.send_message(peer,translate(lang,'greetings')) common.conMsg('bot','New user with id ' + str(userId) + ' started session') return None if code == '300': bot.messaging.send_message(peer,translate(lang,'tooManyRequests')) common.conMsg('bot','Attempting to send request from id ' + str(userId) + ' with non-expired delay') return None
def run(): sdl2.ext.init() window = sdl2.ext.Window("TriFan", window_size) renderer = sdl2.ext.Renderer(window) sprite_factory = sdl2.ext.SpriteFactory(sdl2.ext.TEXTURE, renderer=renderer) renderer.blendmode = sdl2.SDL_BLENDMODE_BLEND window.show() renderer.clear(clear_color) rot_per_div = 2*math.pi/divisions center = (window_size[0]/2, window_size[1]/2) rot_inc = 0#math.pi/600 rot = 2*math.pi/600 length = center[1]/4 total_rot=0 sub = 1 total_lip = length_inc_percentage running=True while(running): events = sdl2.ext.get_events() for event in events: if event.type==sdl2.SDL_QUIT or (event.type==sdl2.SDL_KEYDOWN and event.key.keysym.sym==sdl2.SDLK_ESCAPE): running=False renderer.clear(clear_color) total_rot +=rot_inc total_lip-=0.01 for p in range(0,6): for m in range(0,divisions): len = (6-p)*length*total_lip#length_inc_percentage #print len current_length = length-(len) #lines = [] for n in range(0,30):#divisions): p2 = translation.translate(rotation.rotate((0,current_length), total_rot),center) renderer.draw_line((center[0], center[1],p2[0], p2[1]),0x2f606060+p*0x001010) total_rot+=rot#rot_per_div current_length -=sub renderer.present() return 0
def generate2(center, width, renderer, alpha=0): #i_2=0.000001 #drawbits=(char *)ddsd.lpSurface; #memset(drawbits, 0, ddsd.lPitch*rect.bottom); x1=center-width x2=center+width for y in range(-3000, 3000): #do a y for every line #draw line with midpoint on center x and y+rotation, y+rotation[tablesize/2] is half period ypi=generate2.i+y*3.14 generate2.i+=.000003 p1=translation.translate((rect.bottom/2+x1/6*math.cos(ypi+generate2.i) - y/6*math.sin(ypi+generate2.i), rect.right/2+x1/6*math.sin(ypi+generate2.i)+y/6*math.cos(ypi+generate2.i)), (200, -200)) p2=translation.translate( (rect.bottom/2+x2/6*math.cos(y+generate2.i)- y/6*math.sin(y+generate2.i), rect.right/2+x2/6*math.sin(y)+y/6*math.cos(y)),(200, -200)) renderer.draw_line((p1[0], p1[1], p2[0], p2[1]), (0x00ffffff&0xff<<y%32) +( int(0xFF*alpha)<<24))
def _wlnums2structattr(self, wlnums): if wlnums == 'arf': raise WordlistError( translate('ARF cannot be used with text types')) elif wlnums == 'frq': return 'doc sizes' elif wlnums == 'docf': return 'docf' else: return wlnums
def ajax_fetch_conc_form_args(self, request: Request) -> Dict[str, Any]: try: # we must include only regular (i.e. the ones visible in the breadcrumb-like # navigation bar) operations - otherwise the indices would not match. pipeline = [x for x in self.load_pipeline_ops(request.args['last_key']) if x.form_type != 'nop'] op_data = pipeline[int(request.args['idx'])] return op_data.to_dict() except (IndexError, KeyError): self.add_system_message('error', translate('Operation not found in the storage')) return {}
def _worker (self, toTranslate, l1, l2): before = self.logBeforeTranslation() self.runPipeline(l1, l2) self.res = translate(toTranslate, self.pipeline_locks[(l1, l2)], self.pipelines[(l1, l2)]) self.logAfterTranslation(before, toTranslate) _, _, do_flush = self.pipelines[(l1, l2)] if not do_flush: self.shutdownPair((l1, l2))
def load_product_name_translations(product_names): if 'product_name_translations.json' in os.listdir('.'): return json.load(open('product_name_translations.json', 'r')) product_name_translations = {} for product_name in product_names: product_name_translations[product_name] = translate(product_name) json.dump(product_name_translations, open('product_name_translations.json', 'w', encoding='utf8'), indent=4, ensure_ascii=False)
def run(): sdl2.ext.init() window = sdl2.ext.Window("Leaf", window_size) renderer = sdl2.ext.Renderer(window) sprite_factory = sdl2.ext.SpriteFactory(sdl2.ext.TEXTURE, renderer=renderer) renderer.blendmode = sdl2.SDL_BLENDMODE_BLEND window.show() renderer.clear(clear_color) delta_theta = 2 * math.pi / steps running = True while (running): events = sdl2.ext.get_events() for event in events: if event.type == sdl2.SDL_QUIT or (event.type == sdl2.SDL_KEYDOWN and event.key.keysym.sym == sdl2.SDLK_ESCAPE): running = False center = (int(window_size[0] / 2), int(window_size[1] / 2)) point = (int(window_size[0] / 4), int(window_size[1] / 3)) renderer.draw_point(translation.translate(point, center), draw_color) for i in range(0, steps): new_point = rotation.rotate(point, delta_theta * i) percent = ((2 * math.pi) / (i + delta_theta)) #print(percent) renderer.draw_point(translation.translate(center, new_point), draw_color - (int(percent * 0xffffffff))) renderer.present() return 0
def _add_save_menu(self): self._add_save_menu_item( 'CSV', save_format='csv', hint=translate( 'Saves at most {0} items. Use "Custom" for more options.'. format(self.PQUERY_QUICK_SAVE_MAX_LINES))) self._add_save_menu_item( 'XLSX', save_format='xlsx', hint=translate( 'Saves at most {0} items. Use "Custom" for more options.'. format(self.PQUERY_QUICK_SAVE_MAX_LINES))) self._add_save_menu_item( 'XML', save_format='xml', hint=translate( 'Saves at most {0} items. Use "Custom" for more options.'. format(self.PQUERY_QUICK_SAVE_MAX_LINES))) self._add_save_menu_item(translate('Custom'))
def classify(self, news=''): x = ' '.join([word for word in stringClean(news).split()]) x, lang = translate(key, x) if(lang != 'en'): label = (" - ").join(self.lables) label = translate(key, label, lang)[0] labels = label.split("-") else: labels = self.lables X = self.vect.transform([x]) prob = self.model.predict_proba(X) prob = list(prob[0]) for i in range(len(prob)): prob[i] = round(prob[i]*100, 2) results = dict(zip(labels, prob)) results = sorted(results.items(), key=lambda kv: ( kv[1], kv[0]), reverse=True) return results
def _set_new_viewopts(self, newctxsize='', ctxunit='', line_numbers=0, cql_editor=0): if ctxunit == '@pos': ctxunit = '' if "%s%s" % (newctxsize, ctxunit) != self.args.kwicrightctx: if not newctxsize.isdigit(): raise Exception( translate('Value [%s] cannot be used as a context width. Please use numbers 0,1,2,...') % newctxsize) self.args.kwicleftctx = '-%s%s' % (newctxsize, ctxunit) self.args.kwicrightctx = '%s%s' % (newctxsize, ctxunit) self.args.line_numbers = line_numbers self.args.cql_editor = cql_editor
def sendTicketManually(queue_id,requestTypeId): try: i = 0 while i < len(queue): if queue_id == queue[i][0]: queueMember = queue[i] reporter = bot.users.get_user_by_id(int(queueMember[2])).data.nick.value requestMessage = queueMember[3] if jira.checkIfUserExists(credentials,reporter) == True: response = jira.parseResponseCreatingTicket(jira.createTicket(credentials,projectId,requestTypeId,reporter,requestMessage)) jira.deleteUserFromWatchers(credentials,response[1]) replyToReporter(response=response,queue_id=queueMember[0],uid=queueMember[2],ticketText=requestMessage) common.conMsg('bot','Ticket sent request manually TypeId=' + str(requestTypeId) + ' reporter=' + str(reporter)) removeFromRequestMessageList(queue_id) removeFromQueue(str(queueMember[0])) return response else: optional = [None]*2 optional[0] = link optional[1] = projectId common.conMsg('bot','Cannot send message because missing user in Jira') message = bot.messaging.get_messages_by_id(findMidsWithQueueId(queue_id))[0] bot.messaging.update_message(message, translate(lang,'ticketConfirmation') + requestMessage + '\n***\n' + translate(lang,'jiraUserNotExists',optional)) removeFromBanList(findUidWithQueueId(queue_id)) removeFromRequestMessageList(queue_id) removeFromQueue(queue_id) return None i += 1 else: return None except (TypeError,json.decoder.JSONDecodeError): optional = [None]*2 optional[0] = link optional[1] = projectId common.conMsg('bot','Failed to send ticket cause failed to authorize or connect to Jira') message = bot.messaging.get_messages_by_id(findMidsWithQueueId(queue_id))[0] bot.messaging.update_message(message, translate(lang,'jiraAuthError',optional)) removeFromBanList(findUidWithQueueId(queue_id)) removeFromRequestMessageList(queue_id) removeFromQueue(queue_id) return None
def language_translate_v2(): source = request.args.get('source', '') target = request.args.get('target', '') text = request.args.get('q', '') if len(source) == 0 or len(target) == 0: data = detect(text) else: data = translate(source, target, text) return jsonify(data = data)
def formatRequest(request, charLimit): newRequest = [''] * 2 newRequest[0] = request newRequest[1] = request words = request.split(' ') while len(newRequest[0]) >= charLimit and len(words) > 1: words = words[:-1] newRequest[0] = '' i = 0 while i < len(words): newRequest[0] = newRequest[0] + words[i] + ' ' i += 1 newRequest[0] = newRequest[0][:-1] if len(newRequest[0]) >= charLimit: newRequest[0] = translate('ru', 'veryLongFirstWordInRequest') newRequest[1] = newRequest[1] + '\n***\n' + translate('ru', 'createdBy') return newRequest
def gloss(self): gloss = [] for struct, sent in zip(self.structures, self.sentences): forms, idxs = struct for idx in idxs: words = translation.translate(sent[idx]) if words: gloss.append(words[0]) elif not gloss or gloss[-1] is not None: gloss.append(None) gloss.append("/") return gloss[:-1]
def normalize(allTriPolygones): # 平移变换, 将模型重心作为空间原点 mp, transed_tripoints = translation.translate(allTriPolygones) ''' # 旋转变换 rotated_points = rotation.rotate(transed_points) ''' # 缩放变换, 返回最终预处理好的模型 # final_points是一个二维的nx3数组,每一行代表三角形的一个顶点 final_tripoints = scaling.scale(mp, transed_tripoints) return final_tripoints
def command_x(self, options): if options[0] == '-': self.switch_aligned(self.orig_corp.get_conffile()) try: self.add_aligned(options[1:]) except RuntimeError as e: logging.getLogger(__name__).warning('Failed to add aligned corpus: %s' % e) raise EmptyParallelCorporaIntersection( translate('No alignment available for the selected languages')) self.switch_aligned(options[1:]) self.corpname = options[1:] else: self.swap_kwic_coll(int(options))
def _get_email_info(msg): logger.debug_log('maildecryptor._get_email_info start') subject_translation = translation.translate(config['googleApiServers'], config['googleApiKey'], msg['subject']) subject = dict(text=msg['subject'], text_lang_code=subject_translation[0], text_lang_name=subject_translation[1], text_translated=subject_translation[2]) body_translation = translation.translate(config['googleApiServers'], config['googleApiKey'], msg['body']) body = dict(text=msg['body'], text_lang_code=body_translation[0], text_lang_name=body_translation[1], text_translated=body_translation[2], html=msg['html']) raw_address = msg['from'] or msg['msgobj'].get('Return-Path') stripped_address = None if raw_address: match = _email_stripper_regex.match(raw_address) if not match: logger.error('when stripping email address failed to match: %s' % str(raw_address)) return None stripped_address = match.group(2) email_info = dict(address=stripped_address, to=msg['to'], message_id=msg['msgobj']['Message-ID'], subject=subject, body=body) logger.debug_log('maildecryptor._get_email_info end') return email_info
def struct_result(self, _): if self.args.fcrit: self._make_wl_query() return self.freqs(self.args.fcrit, self.args.flimit, self.args.freq_sort, 1) if '.' in self.args.wlattr: raise WordlistError('Text types are limited to Simple output') if self.args.wlnums != 'frq': raise WordlistError('Multilevel lists are limited to Word counts frequencies') level = 3 if not self.args.wlposattr1: raise WordlistError(translate('No output attribute specified')) if not self.args.wlposattr3: level = 2 if not self.args.wlposattr2: level = 1 if not self.args.wlpat and not self.args.wlwords: raise WordlistError( translate('You must specify either a pattern or a file to get the multilevel wordlist')) self._make_wl_query() self.args.flimit = self.args.wlminfreq return self.freqml(flimit=self.args.wlminfreq, freqlevel=level, ml1attr=self.args.wlposattr1, ml2attr=self.args.wlposattr2, ml3attr=self.args.wlposattr3)
def _create_user_action_err_result(self, ex, return_type): """ arguments: ex -- a risen exception return_type -- """ e2 = self._normalize_error(ex) if settings.is_debug_mode() or isinstance(e2, UserActionException): user_msg = fetch_exception_msg(e2) else: user_msg = translate('Failed to process your request. ' 'Please try again later or contact system support.') if return_type == 'json': return dict(error_code=getattr(ex, 'error_code', None), error_args=getattr(ex, 'error_args', {})) else: return dict()
def _normalize_error(self, err): """ This method is intended to extract as much details as possible from a broad range of errors and rephrase them in a more specific ones (including exception object type). It is quite a lame solution but it appears that in case of syntax errors, attribute errors etc. Manatee raises only RuntimeError without further type distinction. Please note that some of the decoding is dependent on how Manatee outputs phrases its errors which may change between versions (as it probably happened in 2.150.x). arguments: err -- an instance of Exception returns: a (possibly different) instance of Exception with (possibly) rephrased error message. """ if isinstance(err, UserActionException): return err if err.message: if type(err.message) == unicode: text = err.message else: text = str(err.message).decode(self.corp_encoding, errors='replace') else: text = unicode(err) err.message = text # in case we return the original error if 'syntax error' in text.lower(): srch = re.match(r'.+ position (\d+)', text) if srch: text = translate('Query failed: Syntax error at position %s.') % srch.groups()[0] else: text = translate('Query failed: Syntax error.') new_err = UserActionException( translate('%s Please make sure the query and selected query type are correct.') % text) elif 'AttrNotFound' in text: srch = re.match(r'AttrNotFound\s+\(([^)]+)\)', text) if srch: text = translate('Attribute "%s" not found.') % srch.groups()[0] else: text = translate('Attribute not found.') new_err = UserActionException(text) elif 'EvalQueryException' in text: new_err = UserActionException( translate('Failed to evaluate the query. Please check the syntax and used attributes.')) else: new_err = err return new_err
def init_session(self): """ Starts/reloads user's web session data. It can be called even if there is no 'sessions' plugin installed (in such case, it just creates an empty dictionary with some predefined keys to allow other parts of the application to operate properly) """ with plugins.runtime.AUTH as auth: if 'user' not in self._session: self._session['user'] = auth.anonymous_user() if hasattr(auth, 'revalidate'): try: auth.revalidate(self._plugin_api) except Exception as ex: self._session['user'] = auth.anonymous_user() logging.getLogger(__name__).error('Revalidation error: %s' % ex) self.add_system_message('error', translate('User authentication error. Please try to reload the page or ' 'contact system administrator.'))
def normalize(allPolygones, name): # 平移变换, 将模型重心作为空间原点 mp, transed_points = translation.translate(allPolygones) # 旋转变换 rotated_points = rotation.rotate(transed_points) # 缩放变换, 返回最终预处理好的模型 # final_points是一个二维的nx3数组,每一行代表三角形的一个顶点 final_points = scaling.scale(mp, rotated_points) # 原本final_points是list类型,转换为<type 'numpy.ndarray'> final_points = np.array(final_points) #extractEigvect(final_points, name) #eigvectflat(final_points, name) # 测试生成二维投影直方图 # histogram.getHist(final_points) allTris = [] for i in range(len(final_points)/3): allTris.append(final_points[i*3:i*3+3]) #allTris = np.array(allTris) # print 'type(allTris): ', type(allTris) histogram.getTriHist(allTris, final_points, name, 200, 200)
def _create_subcorpus(self, request): """ req. arguments: subcname -- name of new subcorpus create -- bool, sets whether to create new subcorpus cql -- custom within condition """ subcname = request.form['subcname'] within_json = request.form.get('within_json') raw_cql = request.form.get('cql') aligned_corpora = request.form.getlist('aligned_corpora') publish = bool(int(request.form.get('publish'))) corpus_info = self.get_corpus_info(self.args.corpname) description = request.form.get('description') if not subcname: raise UserActionException(translate('No subcorpus name specified!')) if publish and not description: raise UserActionException(translate('No description specified')) if raw_cql: aligned_corpora = [] tt_query = () within_cql = raw_cql full_cql = 'aword,[] %s' % raw_cql imp_cql = (full_cql,) elif within_json: # user entered a subcorpus query manually aligned_corpora = [] tt_query = () within_cql = self._deserialize_custom_within(json.loads(within_json)) full_cql = 'aword,[] %s' % within_cql imp_cql = (full_cql,) elif len(aligned_corpora) > 0 and plugins.runtime.LIVE_ATTRIBUTES.exists: if corpus_info.metadata.label_attr and corpus_info.metadata.id_attr: within_cql = None attrs = json.loads(request.form.get('attrs', '{}')) sel_match = plugins.runtime.LIVE_ATTRIBUTES.instance.get_attr_values( self._plugin_api, corpus=self.corp, attr_map=attrs, aligned_corpora=aligned_corpora, limit_lists=False) values = sel_match['attr_values'][corpus_info.metadata.label_attr] args = argmapping.Args() setattr(args, 'sca_{0}'.format( corpus_info.metadata.id_attr), [v[1] for v in values]) tt_query = TextTypeCollector(self.corp, args).get_query() tmp = ['<%s %s />' % item for item in tt_query] full_cql = ' within '.join(tmp) full_cql = 'aword,[] within %s' % full_cql full_cql = import_string(full_cql, from_encoding=self.corp_encoding) imp_cql = (full_cql,) else: raise FunctionNotSupported( 'Corpus must have a bibliography item defined to support this function') else: within_cql = None tt_query = TextTypeCollector(self.corp, request).get_query() tmp = ['<%s %s />' % item for item in tt_query] full_cql = ' within '.join(tmp) full_cql = 'aword,[] within %s' % full_cql full_cql = import_string(full_cql, from_encoding=self.corp_encoding) imp_cql = (full_cql,) basecorpname = self.args.corpname.split(':')[0] path = self.prepare_subc_path(basecorpname, subcname, publish=False) publish_path = self.prepare_subc_path( basecorpname, subcname, publish=True) if publish else None if type(path) == unicode: path = path.encode('utf-8') if len(tt_query) == 1 and len(aligned_corpora) == 0: result = corplib.create_subcorpus(path, self.corp, tt_query[0][0], tt_query[0][1]) if result and publish_path: corplib.mk_publish_links(path, publish_path, self.session_get( 'user', 'fullname'), description) elif len(tt_query) > 1 or within_cql or len(aligned_corpora) > 0: backend = settings.get('calc_backend', 'type') if backend in ('celery', 'konserver'): import bgcalc app = bgcalc.calc_backend_client(settings) res = app.send_task('worker.create_subcorpus', (self.session_get('user', 'id'), self.args.corpname, path, publish_path, tt_query, imp_cql, self.session_get('user', 'fullname'), description), time_limit=TASK_TIME_LIMIT) self._store_async_task(AsyncTaskStatus(status=res.status, ident=res.id, category=AsyncTaskStatus.CATEGORY_SUBCORPUS, label=u'%s:%s' % (basecorpname, subcname), args=dict(subcname=subcname, corpname=basecorpname))) result = {} elif backend == 'multiprocessing': from bgcalc import subc_calc import functools import multiprocessing worker = subc_calc.CreateSubcorpusTask(user_id=self.session_get('user', 'id'), corpus_id=self.args.corpname) multiprocessing.Process(target=functools.partial( worker.run, tt_query, imp_cql, path, publish_path, description)).start() result = {} else: raise UserActionException(translate('Nothing specified!')) if result is not False: with plugins.runtime.SUBC_RESTORE as sr: try: sr.store_query(user_id=self.session_get('user', 'id'), corpname=self.args.corpname, subcname=subcname, cql=full_cql.strip().split('[]', 1)[-1]) except Exception as e: logging.getLogger(__name__).warning('Failed to store subcorpus query: %s' % e) self.add_system_message('warning', translate('Subcorpus created but there was a problem saving a backup copy.')) unfinished_corpora = filter(lambda at: not at.is_finished(), self.get_async_tasks(category=AsyncTaskStatus.CATEGORY_SUBCORPUS)) return dict(processed_subc=[uc.to_dict() for uc in unfinished_corpora]) else: raise SubcorpusError(translate('Empty subcorpus!'))
def get_translation(self, text): yandex_api_key = json.loads(load_settings())["yandex_api_key"] return json.dumps({"value": translate(yandex_api_key, "en-ru", text)})
# change settings for protein alignment hygo.set_alphabet(HyGo.amino_alphabet) hygo.set_matrix(HyGo.blosum62) hygo.set_gap_open(7, True) hygo.set_gap_open(7, False) hygo.set_gap_extend(1, True) hygo.set_gap_extend(1, False) # prepare outputs outputs = {} h77 = {} with open('data/h77-genes.csv', 'rU') as f: for line in f: gene, seq = line.strip('\n').split(',') prot = translate(seq, 0).strip('*') h77.update({gene: prot}) outfile = open('%s.%s.csv' % (args.root, gene), 'w') outputs.update({gene: outfile}) outfile.write('accession') for i, aa in enumerate(prot): outfile.write(',%s%d' % (aa, i+1)) outfile.write(',inserts\n') reader = DictReader(open(args.infile, 'rU')) for rcount, row in enumerate(reader): accno = row['accession'] for gene, refseq in h77.iteritems():
def _validate_http_method(self, action_metadata): hm = action_metadata.get('http_method', 'GET') if type(hm) is not tuple: hm = (hm,) if self.get_http_method() not in hm: raise UserActionException(translate('Unknown action'), code=404)
def run(self, path=None): """ This method wraps all the processing of an HTTP request. arguments: path -- path part of URL returns: a 4-tuple: HTTP status, HTTP headers, valid SID flag, response body """ self._install_plugin_actions() self._proc_time = time.time() path = path if path is not None else self._import_req_path() methodname = path[0] named_args = {} headers = [] action_metadata = self._get_method_metadata(methodname) if not action_metadata: def null(): pass action_metadata = {} action_metadata.update(exposed()(null).__dict__) try: self.init_session() if self.is_action(methodname, action_metadata): named_args = self.pre_dispatch(methodname, named_args, action_metadata) self._pre_action_validate() tmpl, result = self.process_action(methodname, named_args) else: orig_method = methodname methodname = 'message' raise NotFoundException(translate('Unknown action [%s]') % orig_method) except CorpusForbiddenException as ex: self._status = ex.code tmpl, result = self._run_message_action( named_args, action_metadata, 'error', ex.message) except ImmediateRedirectException as ex: tmpl, result = None, None self.redirect(ex.url, ex.code) except UserActionException as ex: self._status = ex.code msg_args = self._create_user_action_err_result(ex, action_metadata['return_type']) tmpl, result = self._run_message_action( msg_args, action_metadata, 'error', ex.message) except werkzeug.exceptions.BadRequest as ex: self._status = ex.code tmpl, result = self._run_message_action(named_args, action_metadata, 'error', '{0}: {1}'.format(ex.name, ex.description)) except Exception as ex: # an error outside the action itself (i.e. pre_dispatch, action validation, # post_dispatch etc.) logging.getLogger(__name__).error(u'%s\n%s' % (ex, ''.join(get_traceback()))) self._status = 500 if settings.is_debug_mode(): message = fetch_exception_msg(ex) else: message = translate( 'Failed to process your request. Please try again later or contact system support.') tmpl, result = self._run_message_action(named_args, action_metadata, 'error', message) self._proc_time = round(time.time() - self._proc_time, 4) self.post_dispatch(methodname, action_metadata, tmpl, result) # response rendering headers += self.output_headers(action_metadata['return_type']) output = StringIO.StringIO() if self._status < 300 or self._status >= 400: self.output_result(methodname, tmpl, result, action_metadata, return_type=action_metadata['return_type'], outf=output) ans_body = output.getvalue() output.close() return self._export_status(), headers, self._uses_valid_sid, ans_body
from yaml_importer import from_file_with_list parser = argparse.ArgumentParser(description='Translate some packages') parser.add_argument('--git-branch', dest="git_branch", default="master") group = parser.add_mutually_exclusive_group(required=True) group.add_argument('--prepare', dest='target', action='store_const', const='prepare') group.add_argument('--translate', dest='target', action='store_const', const='translate') group.add_argument('--commit', dest='target', action='store_const', const='commit') parser.add_argument('file', metavar='filename.yml', type=str, help='an integer for the accumulator') args = parser.parse_args() settings = json.loads(settings_keeper.load_settings()) project_group = settings["abf_projects_group"] yandex_api_key = settings["yandex_api_key"] assert translate(yandex_api_key, "en-ru", "Lazy cat jumps over talking dog") == "Ленивый кот перепрыгивает через говорящая собака" project_info = [full_project_info(project_group, f, ["Name", "Comment"]) for f in from_file_with_list(args.file)] for one in project_info: random_str = uuid.uuid4().hex.capitalize() for f in one["desktop_files"]: for i in f["strings"]: if args.target in ['translate', 'commit']: i["value"]["ru"] = translate(yandex_api_key, "en-ru", i["value"]["en"]) else: i["value"]["ru"] = " " prepare_patch(random_str, one["git"], one["package_name"], json.dumps(one["desktop_files"]), args.git_branch) if (args.target == 'commit'): push_patch(random_str)
def xfreq_dist(self, crit, limit=1, sortkey='f', ml='', ftt_include_empty='', rel_mode=0, collator_locale='en_US'): """ Calculates data (including data for visual output) of a frequency distribution specified by the 'crit' parameter arguments: crit -- specified criteria (CQL) limit -- str type!, minimal frequency accepted, this value is exclusive! (i.e. accepted values must be greater than the limit) sortkey -- a key according to which the distribution will be sorted ml -- str, if non-empty then multi-level freq. distribution is generated ftt_include_empty -- str, TODO rel_mode -- {0, 1}, TODO """ # ml = determines how the bar appears (multilevel x text type) # import math normwidth_freq = 100 normwidth_rel = 100 def calc_scale(freqs, norms): """ Create proper scaling coefficients for freqs and norms to match a 100 units length bar. """ from operator import add sumn = float(reduce(add, norms)) if sumn == 0: return float(normwidth_rel) / max(freqs), 0 else: sumf = float(reduce(add, freqs)) corr = min(sumf / max(freqs), sumn / max(norms)) return normwidth_rel / sumf * corr, normwidth_rel / sumn * corr def label(attr): if '/' in attr: attr = attr[:attr.index('/')] lab = self.pycorp.get_conf(attr + '.LABEL') return self.import_string(lab if lab else attr) words = manatee.StrVector() freqs = manatee.NumVector() norms = manatee.NumVector() self.pycorp.freq_dist(self.RS(), crit, limit, words, freqs, norms) words = [self.import_string(w) for w in words] if not len(freqs): return {} # now we intentionally rewrite norms as filled in by freq_dist() # because of "hard to explain" metrics they lead to if rel_mode == 0: norms2_dict = self.get_attr_values_sizes(crit) norms = [norms2_dict.get(x, 0) for x in words] sumf = float(sum([x for x in freqs])) attrs = crit.split() head = [dict(n=label(attrs[x]), s=x / 2) for x in range(0, len(attrs), 2)] head.append(dict(n=translate('Freq'), s='freq', title=translate('Frequency'))) tofbar, tonbar = calc_scale(freqs, norms) if tonbar and not ml: maxf = max(freqs) # because of bar height minf = min(freqs) maxrel = 0 # because of bar width for index, (f, nf) in enumerate(zip(freqs, norms)): if nf == 0: nf = 100000 norms[index] = 100000 newrel = (f * tofbar / (nf * tonbar)) if maxrel < newrel: maxrel = newrel if rel_mode == 0: head.append(dict( n='i.p.m.', title=translate( 'instances per million positions (refers to the respective category)'), s='rel' )) else: head.append(dict(n='Freq [%]', title='', s='rel')) lines = [] for w, f, nf in zip(words, freqs, norms): w = self.import_string(w) rel_norm_freq = { 0: round(f * 1e6 / nf, 2), 1: round(f / sumf * 100, 2) }[rel_mode] rel_bar = { 0: 1 + int(f * tofbar * normwidth_rel / (nf * tonbar * maxrel)), 1: 1 + int(float(f) / maxf * normwidth_rel) }[rel_mode] freq_bar = { 0: int(normwidth_freq * float(f) / (maxf - minf + 1) + 1), 1: 10 }[rel_mode] lines.append(dict( Word=[{'n': ' '.join(n.split('\v'))} for n in w.split('\t')], freq=f, fbar=int(f * tofbar) + 1, norm=nf, nbar=int(nf * tonbar), relbar=rel_bar, norel=ml, freqbar=freq_bar, rel=rel_norm_freq )) else: lines = [] for w, f, nf in zip(words, freqs, norms): w = self.import_string(w) lines.append(dict( Word=[{'n': ' '.join(n.split('\v'))} for n in w.split('\t')], freq=f, fbar=int(f * tofbar) + 1, norel=1, relbar=None )) if ftt_include_empty and limit == 0 and '.' in attrs[0]: attr = self.pycorp.get_attr(attrs[0]) all_vals = [attr.id2str(i) for i in range(attr.id_range())] used_vals = [line['Word'][0]['n'] for line in lines] for v in all_vals: if v in used_vals: continue lines.append(dict( Word=[{'n': self.import_string(v)}], freq=0, rel=0, norm=0, nbar=0, relbar=0, norel=ml, freqbar=0, fbar=0 )) if (sortkey in ('0', '1', '2')) and (int(sortkey) < len(lines[0]['Word'])): sortkey = int(sortkey) lines = l10n.sort(lines, loc=collator_locale, key=lambda v: v['Word'][sortkey]['n']) else: if sortkey not in ('freq', 'rel'): sortkey = 'freq' lines = sorted(lines, key=lambda v: v[sortkey], reverse=True) return dict(Head=head, Items=lines)
from ocr import read ## For Translation from translation import translate ## Open Serial connection wtih Arduino board arduino = serial.Serial('/dev/ttyACM0', 9600) # find port number arduino.write('000000') ## Camera Setup and Capture camera = PiCamera() camera.start_preview() sleep(5) camera.capture('image.jpg') camera.stop_preview() ## Optical Character Recognition text = read() ## Translation for letter in text: braille = translate(letter) print letter + ": " + braille arduino.write(braille) sleep(2) ## Delete image file captured os.remove("/home/pi/Desktop/read-real/image.jpg")
temp = file(file_name) code = temp.read() lines = code.split("\n") temp.close() for i in range(len(lines)): lines[i] = lines[i].lstrip().rstrip().upper() print "Enter language of the file: " language_input = raw_input () lines=macroExpand.macroExpand(lines) lines=predefExpand.predefExpand(lines) lines = translation.translate(language_input,lines) for line in lines: print line symboltable, length = pass1.pass1(lines) lines = pass1.out(symboltable,lines) print lines temp1 = file(file_name.split('.')[0]+'1.txt', 'w') for line in lines: temp1.write(line+'\n') temp1.write('#\n') for key in symboltable:
def translate(self): #we get the string that the user wants to translate. self.string = self.original_entry.get_text() self.which_language() self.string = (translate(self.string, self.lang1, self.lang2)).decode("UTF-8") self.translated_entry.set_text (self.string.encode("latin-1"))