def test_updated_timestamp(self): self._generate() r = self.client.get(urlparams(self.url, sort='updated')) items = pq(r.content)('.primary .item') for idx, c in enumerate(r.context['pager'].object_list): assert trim_whitespace(items.eq(idx).find('.modified').text()) == ( 'Updated %s' % trim_whitespace(datetime_filter(c.modified)))
def _trim_text(tokens): for i, token in enumerate(tokens): token.contents = trim_whitespace(token.contents) if i == 0 and token.contents[0] == " ": # first tag token.contents = token.contents[1:] elif i == len(tokens) - 1 and token.contents[-1] == " ": # last tag token.contents = token.contents[:-1]
def test_added_date(self): doc = pq(self.client.get(urlparams(self.url, sort='created')).content) for item in doc('.items .item'): item = pq(item) addon_id = item('.install').attr('data-addon') ts = Addon.objects.get(id=addon_id).created assert item('.updated').text() == ( u'Added %s' % trim_whitespace(datetime_filter(ts)))
def test_updated_date(self): doc = pq(self.client.get(urlparams(self.url, sort='updated')).content) for item in doc('.items .item'): item = pq(item) addon_id = item('.install').attr('data-addon') ts = Addon.objects.get(id=addon_id).last_updated assert item('.updated').text() == ( u'Updated %s' % translation.trim_whitespace(format_date(ts)))
def test_added_date(self): doc = pq(self.client.get(urlparams(self.url, sort='created')).content) for item in doc('.items .item'): item = pq(item) addon_id = item('.install').attr('data-addon') ts = Addon.objects.get(id=addon_id).created assert item('.updated').text() == ( u'Added %s' % trim_whitespace(datetime_filter(ts)))
def test_updated_date(self): doc = pq(self.client.get(urlparams(self.url, sort='updated')).content) for item in doc('.items .item'): item = pq(item) addon_id = item('.install').attr('data-addon') ts = Addon.objects.get(id=addon_id).last_updated assert item('.updated').text() == ( u'Updated %s' % translation.trim_whitespace(format_date(ts)))
def _get_system_information(self): linux=[["Debian-based (Debian, Ubuntu, Mint, Elementary OS, SteamOS)","Debian-based \(Debian\, Ubuntu\, Mint\, Elementary OS\, SteamOS\)", ["Debian","Ubuntu","Mint","Elementary","Steam"]], ["Arch-based (Arch, Manjaro)","Arch-based \(Arch\, Manjaro\)", ["Arch","Manjaro",]], ["Red Hat-based (RedHat, Fedora, CentOS)","Red Hat-based \(RedHat\, Fedora\, CentOS\)", ["RedHat","RHEL","Fedora","Cent"]], ["Gentoo-based (Gentoo, Chromium, Funtoo)","Gentoo-based \(Gentoo\, Chromium\, Funtoo\)", ["Gentoo","Chromium","Funtoo"]], ["SUSE-based","SUSE-based", ["SUSE"]], ["Slackware-based","Slackware-based", ["Slack"]], ["Mandriva-based","Mandriva-based", ["Mageia"]], "Linux-other"] windows=[["Windows 10","Windows\ 10", ["10"]], ["Windows 8","Windows\ 8", ["8"]], ["Windows 7","Windows\ 7", ["7"]], ["Windows Vista","Windows\ Vista", ["vista","VISTA"]], ["Windows XP","Windows\ XP", ["XP"]], "Windows-other"] system_information=system_info.SystemInformations() distro_type=system_information.system check=0 if(re.search("Windows", distro_type) == None): for a in range(0, (len(linux)-1)): for i in range(0, len(linux[a][2])): if(re.search(linux[a][2][i], distro_type) != None): distro_type=[linux[a][0],linux[a][1]] check=1 break if(check==1): break if(check==0): distro_type=[linux[(len(linux)-1)],linux[(len(linux)-1)]] else: for a in range(0, (len(windows)-1)): for i in range(0, len(windows[a][2])): if(re.search(windows[a][2][i], distro_type) != None): distro_type=[windows[a][0],windows[a][1]] check=1 break if(check==1): break if(check==0): distro_type=[windows[(len(windows)-1)],windows[(len(windows)-1)]] system_information.system=distro_type[0] system_specs=[system_information.cpu,trim_whitespace(system_information.gpu[0]),system_information.gpu[2],distro_type[1]] return system_specs, system_information
def handle_endtag(self, tag): if self.in_translate: if len(self.inner_tags) > 0: tag = self.inner_tags.pop() self.data += "</%s>" % tag return if self.plural_form: messages = ( trim_whitespace(self.data.strip()), trim_whitespace(self.plural_form) ) func_name = u'ngettext' else: messages = trim_whitespace(self.data.strip()) func_name = u'gettext' self.strings.append( (self.line, func_name, messages, self.comments) ) self.in_translate = False self.data = '' self.comments = []
def render_token_list(self, tokens): result = [] vars = [] for token in tokens: if token.token_type == TOKEN_TEXT: result.append(token.contents.replace("%", "%%")) elif token.token_type == TOKEN_VAR: result.append("%%(%s)s" % token.contents) vars.append(token.contents) msg = "".join(result) if self.trimmed: msg = translation.trim_whitespace(msg) return msg, vars
def render_token_list(self, tokens): result = [] vars = [] for token in tokens: if token.token_type == TOKEN_TEXT: result.append(token.contents.replace("%", "%%")) elif token.token_type == TOKEN_VAR: result.append("%%(%s)s" % token.contents) vars.append(token.contents) msg = "".join(result) if self.trimmed: msg = translation.trim_whitespace(msg) return msg, vars
def render_token_list(self, tokens): result = [] vars = [] for token in tokens: if token.token_type == TokenType.TEXT: result.append(token.contents.replace('%', '%%')) elif token.token_type == TokenType.VAR: result.append('%%(%s)s' % token.contents) vars.append(token.contents) msg = ''.join(result) if self.trimmed: msg = translation.trim_whitespace(msg) return msg, vars
def render_token_list(self, tokens): result = [] vars = [] for token in tokens: if token.token_type == TokenType.TEXT: result.append(token.contents.replace('%', '%%')) elif token.token_type == TokenType.VAR: result.append('%%(%s)s' % token.contents) vars.append(token.contents) msg = ''.join(result) if self.trimmed: msg = translation.trim_whitespace(msg) return msg, vars
def _cpu(self): if(SystemInformations._platform(self) != "Windows"): f = open("/proc/cpuinfo", 'r') for line in f: if(re.match("model name",str(line))): cpuM=str(line) f.close() else: cpuM=subprocess.getoutput("wmic cpu get name") cpuM = re.sub("Name", "", cpuM) cpuM=re.sub("model name *.: ", "", cpuM) cpuM=re.sub(" CPU.*", "", cpuM) cpuM=re.sub("(.(R)).", "", cpuM) cpuM=re.sub("(.(TM).)", "", cpuM) cpuM=trim_whitespace(cpuM) return cpuM
def test_created_not_updated(self): """Don't display the updated date but the created date for themes.""" r = self.client.get(self.url) doc = pq(r.content) details = doc('.addon-info li') # There's no "Last Updated" entry. assert not any('Last Updated' in node.text_content() for node in details) # But there's a "Created" entry. for detail in details: if detail.find('h3').text_content() == 'Created': created = detail.find('p').text_content() assert created == ( trim_whitespace(datetime_filter(self.addon.created))) break # Needed, or we go in the "else" clause. else: assert False, 'No "Created" entry found.'
def test_created_not_updated(self): """Don't display the updated date but the created date for themes.""" r = self.client.get(self.url) doc = pq(r.content) details = doc('.addon-info li') # There's no "Last Updated" entry. assert not any('Last Updated' in node.text_content() for node in details) # But there's a "Created" entry. for detail in details: if detail.find('h3').text_content() == 'Created': created = detail.find('p').text_content() assert created == ( trim_whitespace(datetime_filter(self.addon.created))) break # Needed, or we go in the "else" clause. else: assert False, 'No "Created" entry found.'
def loc(s): """A noop function for strings that are not ready to be localized.""" return trim_whitespace(s)
def join_tokens(tokens, trim=False): message = ''.join(tokens) if trim: message = trim_whitespace(message) return message
def join_tokens(tokens, trim=False): message = ''.join(tokens) if trim: message = trim_whitespace(message) return message
def loc(s): """A noop function for strings that are not ready to be localized.""" return trim_whitespace(s)
from __future__ import unicode_literals
def find_trans_nodes(tokens, output): start_tag = None buf = [] plural_buf = [] in_plural = False context = None for i, (token_type, token) in enumerate(tokens): group = DEFAULT_TRANSLATION_GROUP parts = list(smart_split(token)) if "endblocktrans" in parts: buf_joined = "".join(buf) plural_buf_joined = "".join(plural_buf) if "trimmed" in list(smart_split(start_tag)): buf_joined = buf_joined.strip() plural_buf_joined = plural_buf_joined.strip() output.append((start_tag, buf_joined, plural_buf_joined, context or u"", group)) start_tag = None buf = [] plural_buf = [] in_plural = False context = "" elif "blocktrans" in parts: start_tag = token try: context = _strip_quotes(parts[parts.index("context") + 1]) except ValueError: context = None try: group = _strip_quotes(parts[parts.index("group") + 1]) except ValueError: group = DEFAULT_TRANSLATION_GROUP elif "{%plural%}" in token.replace(" ", ""): in_plural = True elif start_tag: # Convert django {{ vars }} into gettext friendly %(vars)s part = TEMPLATE_VAR_RE.sub(r'%(\1)s', token) # Escape lone percentage signs part = re.sub(u'%(?!\()', u'%%', part) start_tag_parts = list(smart_split(start_tag)) if start_tag_parts[1] == "blocktrans" and "trimmed" in start_tag_parts: part = trim_whitespace(part) if not in_plural: buf.append(part) else: plural_buf.append(part) elif "trans" in token: match = re.compile(TRANS_TAG_REGEX[0]).match(token) if match: group = _strip_quotes(match.group('group')).strip() if match.group('group') else DEFAULT_TRANSLATION_GROUP hint = _strip_quotes(match.group('hint') or u"") output.append( (match.group(0), _strip_quotes(match.group('text')), u"", hint, group) )
def templatize(src, origin=None): """ Turns a Django template into something that is understood by xgettext. It does so by translating the Django translation tags into standard gettext function invocations. """ from django.conf import settings from django.template import (Lexer, TOKEN_TEXT, TOKEN_VAR, TOKEN_BLOCK, TOKEN_COMMENT, TRANSLATOR_COMMENT_MARK) src = force_text(src, settings.FILE_CHARSET) out = StringIO() message_context = None intrans = False inplural = False trimmed = False singular = [] plural = [] incomment = False comment = [] lineno_comment_map = {} comment_lineno_cache = None for t in Lexer(src, origin).tokenize(): if incomment: if t.token_type == TOKEN_BLOCK and t.contents == 'endcomment': content = ''.join(comment) translators_comment_start = None for lineno, line in enumerate(content.splitlines(True)): if line.lstrip().startswith(TRANSLATOR_COMMENT_MARK): translators_comment_start = lineno for lineno, line in enumerate(content.splitlines(True)): if translators_comment_start is not None and lineno >= translators_comment_start: out.write(' # %s' % line) else: out.write(' #\n') incomment = False comment = [] else: comment.append(t.contents) elif intrans: if t.token_type == TOKEN_BLOCK: endbmatch = endblock_re.match(t.contents) pluralmatch = plural_re.match(t.contents) if endbmatch: if trimmed: singular = trim_whitespace(''.join(singular)) else: singular = ''.join(singular) if inplural: if trimmed: plural = trim_whitespace(''.join(plural)) else: plural = ''.join(plural) if message_context: out.write(' npgettext(%r, %r, %r,count) ' % (message_context, singular, plural)) else: out.write(' ngettext(%r, %r, count) ' % (singular, plural)) for part in singular: out.write(blankout(part, 'S')) for part in plural: out.write(blankout(part, 'P')) else: if message_context: out.write(' pgettext(%r, %r) ' % (message_context, singular)) else: out.write(' gettext(%r) ' % singular) for part in singular: out.write(blankout(part, 'S')) message_context = None intrans = False inplural = False singular = [] plural = [] elif pluralmatch: inplural = True else: filemsg = '' if origin: filemsg = 'file %s, ' % origin raise SyntaxError( "Translation blocks must not include other block tags: %s (%sline %d)" % (t.contents, filemsg, t.lineno)) elif t.token_type == TOKEN_VAR: if inplural: plural.append('%%(%s)s' % t.contents) else: singular.append('%%(%s)s' % t.contents) elif t.token_type == TOKEN_TEXT: contents = one_percent_re.sub('%%', t.contents) if inplural: plural.append(contents) else: singular.append(contents) else: # Handle comment tokens (`{# ... #}`) plus other constructs on # the same line: if comment_lineno_cache is not None: cur_lineno = t.lineno + t.contents.count('\n') if comment_lineno_cache == cur_lineno: if t.token_type != TOKEN_COMMENT: for c in lineno_comment_map[comment_lineno_cache]: filemsg = '' if origin: filemsg = 'file %s, ' % origin warn_msg = ( "The translator-targeted comment '%s' " "(%sline %d) was ignored, because it wasn't the last item " "on the line.") % (c, filemsg, comment_lineno_cache) warnings.warn(warn_msg, TranslatorCommentWarning) lineno_comment_map[comment_lineno_cache] = [] else: out.write( '# %s' % ' | '.join(lineno_comment_map[comment_lineno_cache])) comment_lineno_cache = None if t.token_type == TOKEN_BLOCK: imatch = inline_re.match(t.contents) bmatch = block_re.match(t.contents) cmatches = constant_re.findall(t.contents) if imatch: g = imatch.group(1) if g[0] == '"': g = g.strip('"') elif g[0] == "'": g = g.strip("'") g = one_percent_re.sub('%%', g) if imatch.group(2): # A context is provided context_match = context_re.match(imatch.group(2)) message_context = context_match.group(1) if message_context[0] == '"': message_context = message_context.strip('"') elif message_context[0] == "'": message_context = message_context.strip("'") out.write(' pgettext(%r, %r) ' % (message_context, g)) message_context = None else: out.write(' gettext(%r) ' % g) elif bmatch: for fmatch in constant_re.findall(t.contents): out.write(' _(%s) ' % fmatch) if bmatch.group(1): # A context is provided context_match = context_re.match(bmatch.group(1)) message_context = context_match.group(1) if message_context[0] == '"': message_context = message_context.strip('"') elif message_context[0] == "'": message_context = message_context.strip("'") intrans = True inplural = False trimmed = 'trimmed' in t.split_contents() singular = [] plural = [] elif cmatches: for cmatch in cmatches: out.write(' _(%s) ' % cmatch) elif t.contents == 'comment': incomment = True else: out.write(blankout(t.contents, 'B')) elif t.token_type == TOKEN_VAR: parts = t.contents.split('|') cmatch = constant_re.match(parts[0]) if cmatch: out.write(' _(%s) ' % cmatch.group(1)) for p in parts[1:]: if p.find(':_(') >= 0: out.write(' %s ' % p.split(':', 1)[1]) else: out.write(blankout(p, 'F')) elif t.token_type == TOKEN_COMMENT: if t.contents.lstrip().startswith(TRANSLATOR_COMMENT_MARK): lineno_comment_map.setdefault(t.lineno, []).append(t.contents) comment_lineno_cache = t.lineno else: out.write(blankout(t.contents, 'X')) return force_str(out.getvalue())
def _upload_data(self, game_info): try: login="" while(len(login) == 0): username=input("User name:") password=getpass.getpass("Password:"******"http://www.opengamebenchmarks.org/accounts/login/" client = requests.session() csrf = client.get(url).cookies['csrftoken'] login_data = dict(username=username, password=password, csrfmiddlewaretoken=csrf) client.post(url, data=login_data, headers=dict(Referer=url)) url="http://www.opengamebenchmarks.org/accounts/profile/" if(platform.system() == "Windows"): parser="html.parser" else: parser="lxml" profile=bs4.BeautifulSoup(client.get(url).content, parser) check=profile.find("table") if(check != None): login="******" system=Upload._get_system_information("") system_specs=system[0] system_id="" #Checks if a similar system is registered and return a link id. def _system_check(specs): link="" url="http://www.opengamebenchmarks.org/accounts/profile/" info=bs4.BeautifulSoup(client.get(url).content, parser) tr = info.find_all("tr") for tr in tr: count=0 for td in tr.findAll("td", {"class" : ""}): if(re.search(system_specs[0], str(td)) != None): count=count+1 if(re.search(system_specs[1], str(td)) != None): count=count+1 if(re.search(system_specs[2], str(td)) != None): count=count+1 if(re.search(system_specs[3], str(td)) != None): count=count+1 if(count == 4): link=td.find("a", { "class" : "nounderline label label-xs label-danger" }) link=str(link) if(len(link) != 0): break return link link=_system_check(system_specs) if(len(link) != 0): system_id=re.sub("[^0-9]", "", link) url="http://www.opengamebenchmarks.org/system_edit/"+system_id page=bs4.BeautifulSoup(client.get(url).content, parser) sys_name = page.find_all("h2") sys_name=trim_whitespace(re.sub("Edit system:", "", str(sys_name))) sys_name=re.sub("\<\/h2\>\]", "", sys_name) sys_name=re.sub("\[\<h2\>", "", sys_name) sys_name=trim_whitespace(sys_name) csrf=client.get(url).cookies['csrftoken'] data = dict(csrfmiddlewaretoken=csrf, descriptive_name=sys_name, cpu_model=system[1].cpu, gpu_model=trim_whitespace(system[1].gpu[0]), dual_gpu="None", resolution=system[1].resolution, driver=system[1].gpu[2], operating_system=system[1].system, desktop_environment=system[1].desktop_env, kernel=system[1].kernel_version, gpu_driver_version=str(system[1].gpu[1]), additional_details="VRAM: "+str(system[1].gpu[3])+"RAM: "+str(system[1].memory)) client.post(url, data=data, headers=dict(Referer=url)) else: print("\n") system_name=input("Insert a name for your system:") url="http://www.opengamebenchmarks.org/system_add/" csrf=client.get(url).cookies['csrftoken'] data = dict(csrfmiddlewaretoken=csrf, descriptive_name=system_name, cpu_model=system[1].cpu, gpu_model=trim_whitespace(system[1].gpu[0]), dual_gpu="None", resolution=system[1].resolution, driver=system[1].gpu[2], operating_system=system[1].system, desktop_environment=system[1].desktop_env, kernel=system[1].kernel_version, gpu_driver_version=str(system[1].gpu[1]), additional_details="VRAM: "+str(system[1].gpu[3])+"RAM: "+str(system[1].memory)) client.post(url, data=data, headers=dict(Referer=url)) link=_system_check(system_specs) system_id=re.sub("[^0-9]", "", link) #Get the game name to search the id of it in OpenGameBenchmarks. conn = sqlite3.connect('ogbatdb.db') c=conn.cursor() #A special process in case the of the user had choose to use the voglperf filepath game start method. if(game_info[1] != "ns"): c.execute("SELECT stdb_game,name_game FROM game WHERE stdb_game=?", [game_info[1]]) s=0 else: c.execute("SELECT stdb_game,name_game FROM game") g=c.fetchall() if(game_info == "ns"): print ("What game did you benchmarked?") i=0 for a in range(0, len(g)): print(str(i)+") "+g[a][1]) i=i+1 s = input("Choice: ") s=int(s) print("\n") benchmark_notes=input("Additional information about the benchmark (optional):") preset=["Ultra","Very High","High","Medium","Low","n.a."] print ("\nWhat game preset was used?") i=0 for a in range(0, len(preset)): print(str(i)+") "+preset[a]) i=i+1 p = input("Choice: ") p=int(p) url="http://www.opengamebenchmarks.org/benchmark_add/" csrf=client.get(url).cookies['csrftoken'] info=bs4.BeautifulSoup(client.get(url).content, parser) select = info.find_all("select", {"id": "id_game"}) replace=['<option value.*">',"<\/option>","b'",'<option value="','".*',"b'"] for select in select: for option in select.findAll("option"): game_option_name=str(option.encode(encoding='ascii', errors='ignore')) for a in range(0, 3): game_option_name=re.sub(replace[a], "", game_option_name) game_option_name=re.sub("\\\\'s", "'s", game_option_name) game_option_name=re.sub("\\\\'S", "'S", game_option_name) game_option_name=game_option_name[0:len(game_option_name)-1] game_option=str(option.encode(encoding='ascii', errors='ignore')) for a in range(0, 3): game_option=trim_whitespace(re.sub(replace[a+3], "", game_option)) if(re.fullmatch(g[0][1], game_option_name) != None): break file=open(game_info[0], 'rb') data = dict(csrfmiddlewaretoken=csrf, game=game_option, user_system=system_id, frames_file={game_info[0]: file}, game_quality_preset=preset[p], additional_notes=benchmark_notes) client.post(url, data=data, headers=dict(Referer=url)) client.close() except KeyboardInterrupt: pass except UnboundLocalError: print("Some variable was left empty and produced a error") except Exception as e: print("A error was occurred: "+str(e))
def templatize(src, origin=None): """ Turns a Django template into something that is understood by xgettext. It does so by translating the Django translation tags into standard gettext function invocations. """ from django.conf import settings from django.template import (Lexer, TOKEN_TEXT, TOKEN_VAR, TOKEN_BLOCK, TOKEN_COMMENT, TRANSLATOR_COMMENT_MARK) src = force_text(src, settings.FILE_CHARSET) out = StringIO() message_context = None intrans = False inplural = False trimmed = False singular = [] plural = [] incomment = False comment = [] lineno_comment_map = {} comment_lineno_cache = None for t in Lexer(src, origin).tokenize(): if incomment: if t.token_type == TOKEN_BLOCK and t.contents == 'endcomment': content = ''.join(comment) translators_comment_start = None for lineno, line in enumerate(content.splitlines(True)): if line.lstrip().startswith(TRANSLATOR_COMMENT_MARK): translators_comment_start = lineno for lineno, line in enumerate(content.splitlines(True)): if translators_comment_start is not None and lineno >= translators_comment_start: out.write(' # %s' % line) else: out.write(' #\n') incomment = False comment = [] else: comment.append(t.contents) elif intrans: if t.token_type == TOKEN_BLOCK: endbmatch = endblock_re.match(t.contents) pluralmatch = plural_re.match(t.contents) if endbmatch: if trimmed: singular = trim_whitespace(''.join(singular)) else: singular = ''.join(singular) if inplural: if trimmed: plural = trim_whitespace(''.join(plural)) else: plural = ''.join(plural) if message_context: out.write(' npgettext(%r, %r, %r,count) ' % (message_context, singular, plural)) else: out.write(' ngettext(%r, %r, count) ' % (singular, plural)) for part in singular: out.write(blankout(part, 'S')) for part in plural: out.write(blankout(part, 'P')) else: if message_context: out.write(' pgettext(%r, %r) ' % (message_context, singular)) else: out.write(' gettext(%r) ' % singular) for part in singular: out.write(blankout(part, 'S')) message_context = None intrans = False inplural = False singular = [] plural = [] elif pluralmatch: inplural = True else: filemsg = '' if origin: filemsg = 'file %s, ' % origin raise SyntaxError("Translation blocks must not include other block tags: %s (%sline %d)" % (t.contents, filemsg, t.lineno)) elif t.token_type == TOKEN_VAR: if inplural: plural.append('%%(%s)s' % t.contents) else: singular.append('%%(%s)s' % t.contents) elif t.token_type == TOKEN_TEXT: contents = one_percent_re.sub('%%', t.contents) if inplural: plural.append(contents) else: singular.append(contents) else: # Handle comment tokens (`{# ... #}`) plus other constructs on # the same line: if comment_lineno_cache is not None: cur_lineno = t.lineno + t.contents.count('\n') if comment_lineno_cache == cur_lineno: if t.token_type != TOKEN_COMMENT: for c in lineno_comment_map[comment_lineno_cache]: filemsg = '' if origin: filemsg = 'file %s, ' % origin warn_msg = ("The translator-targeted comment '%s' " "(%sline %d) was ignored, because it wasn't the last item " "on the line.") % (c, filemsg, comment_lineno_cache) warnings.warn(warn_msg, TranslatorCommentWarning) lineno_comment_map[comment_lineno_cache] = [] else: out.write('# %s' % ' | '.join(lineno_comment_map[comment_lineno_cache])) comment_lineno_cache = None if t.token_type == TOKEN_BLOCK: imatch = inline_re.match(t.contents) bmatch = block_re.match(t.contents) cmatches = constant_re.findall(t.contents) if imatch: g = imatch.group(1) if g[0] == '"': g = g.strip('"') elif g[0] == "'": g = g.strip("'") g = one_percent_re.sub('%%', g) if imatch.group(2): # A context is provided context_match = context_re.match(imatch.group(2)) message_context = context_match.group(1) if message_context[0] == '"': message_context = message_context.strip('"') elif message_context[0] == "'": message_context = message_context.strip("'") out.write(' pgettext(%r, %r) ' % (message_context, g)) message_context = None else: out.write(' gettext(%r) ' % g) elif bmatch: for fmatch in constant_re.findall(t.contents): out.write(' _(%s) ' % fmatch) if bmatch.group(1): # A context is provided context_match = context_re.match(bmatch.group(1)) message_context = context_match.group(1) if message_context[0] == '"': message_context = message_context.strip('"') elif message_context[0] == "'": message_context = message_context.strip("'") intrans = True inplural = False trimmed = 'trimmed' in t.split_contents() singular = [] plural = [] elif cmatches: for cmatch in cmatches: out.write(' _(%s) ' % cmatch) elif t.contents == 'comment': incomment = True else: out.write(blankout(t.contents, 'B')) elif t.token_type == TOKEN_VAR: parts = t.contents.split('|') cmatch = constant_re.match(parts[0]) if cmatch: out.write(' _(%s) ' % cmatch.group(1)) for p in parts[1:]: if p.find(':_(') >= 0: out.write(' %s ' % p.split(':', 1)[1]) else: out.write(blankout(p, 'F')) elif t.token_type == TOKEN_COMMENT: if t.contents.lstrip().startswith(TRANSLATOR_COMMENT_MARK): lineno_comment_map.setdefault(t.lineno, []).append(t.contents) comment_lineno_cache = t.lineno else: out.write(blankout(t.contents, 'X')) return force_str(out.getvalue())
def find_trans_nodes(tokens, output): start_tag = None buf = [] plural_buf = [] in_plural = False group = DEFAULT_TRANSLATION_GROUP context = None for i, (token_type, token) in enumerate(tokens): parts = list(smart_split(token)) if "endblocktrans" in parts: buf_joined = "".join(buf) plural_buf_joined = "".join(plural_buf) if "trimmed" in list(smart_split(start_tag)): buf_joined = buf_joined.strip() plural_buf_joined = plural_buf_joined.strip() output.append((start_tag, buf_joined, plural_buf_joined, context or u"", group)) start_tag = None buf = [] plural_buf = [] in_plural = False context = "" group = DEFAULT_TRANSLATION_GROUP elif "blocktrans" in parts: start_tag = token try: context = _strip_quotes(parts[parts.index("context") + 1]) except ValueError: context = None try: group = _strip_quotes(parts[parts.index("group") + 1]) except ValueError: group = DEFAULT_TRANSLATION_GROUP elif "{%plural%}" in token.replace(" ", ""): in_plural = True elif start_tag: # Convert django {{ vars }} into gettext friendly %(vars)s part = TEMPLATE_VAR_RE.sub(r'%(\1)s', token) # Escape lone percentage signs part = re.sub(u'%(?!\()', u'%%', part) start_tag_parts = list(smart_split(start_tag)) if start_tag_parts[1] == "blocktrans" and "trimmed" in start_tag_parts: part = trim_whitespace(part) if not in_plural: buf.append(part) else: plural_buf.append(part) elif "trans" in token: match = re.compile(TRANS_TAG_REGEX[0]).match(token) if match: group = _strip_quotes(match.group('group')).strip() if match.group('group') else DEFAULT_TRANSLATION_GROUP hint = _strip_quotes(match.group('hint') or u"") output.append( (match.group(0), _strip_quotes(match.group('text')), u"", hint, group) )
def _gpu(self): if(SystemInformations._platform(self) != "Windows"): vendor=str(subprocess.getoutput("grep 'Creating default' /var/log/Xorg.0.log")) if(re.findall("NVIDIA", vendor)): vendorMemory=vendor="NVIDIA" elif(re.findall("Advanced", vendor)): vendorMemory=vendor="AMD" elif(re.findall("intel", vendor)): vendor="Intel" vendorMemory=vendor.lower() gpuMem = str(subprocess.getoutput("grep '"+vendorMemory+"(0): Memory' /var/log/Xorg.0.log")) gpuMem=re.sub("\[ .*]", "", gpuMem) gpuMem=re.sub("\(.*\):", "", gpuMem) gpuMem=trim_whitespace(re.sub("Memory: ", "", gpuMem)) gpuMem=re.sub("k.*", "", gpuMem) if(len(gpuMem) == 0): gpuMem="0" gpuMem=str(int(gpuMem)/1024)+"MB" try: ''' ##Old vendor discovery. vendor = str(subprocess.getoutput("lspci -v | grep 'VGA compatible controller:'")) vendor = re.sub("0[0-9].[0-9].\.[0-9] VGA compatible controller*: ", "", vendor) vendor = vendor[0:8] vendor = re.sub(" .*","",vendor) ''' if(vendor == "Intel"): gpuCard=str(subprocess.getoutput("grep '(--) intel(0): Integrated' /var/log/Xorg.0.log")) intelReplace=["Integrated Graphics Chipset: ","\(R\)","\[ [0-9][0-9].[0-9][0-9][0-9]\] \(--\) intel\(0\): ", "OpenGL version string:"," [0-9].[0-9]","Video memory:"," "] for a in range(0,3): gpuCard=re.sub(intelReplace[a],"",gpuCard) if(subprocess.getoutput("command -v glxinfo >/dev/null 2>&1 || { return 0; }") != 0): gpuDriver=str(subprocess.getoutput("glxinfo | grep 'OpenGL version string'")) for a in range(0, 2): gpuDriver=re.sub(intelReplace[a+3], "", gpuDriver) gpuMem=str(subprocess.getoutput("glxinfo | grep 'Video memory:'")) for a in range(0, 2): gpuMem=re.sub(intelReplace[a+5], "", gpuMem) else: gpuDriver="Unknow" driverType="Opensource" elif(vendor == "NVIDIA"): gpuCard=str(subprocess.getoutput("grep 'NVIDIA GPU GeF' /var/log/Xorg.0.log")) gpuCard=gpuCard[40:56] if(subprocess.getoutput("command -v nvidia-smi >/dev/null 2>&1 || { return 0; }") != 0): gpu = str(subprocess.getoutput("nvidia-smi | grep Driver")) gpuDriver = gpu[12:20] gpuDriver=trim_whitespace(gpuDriver) ''' ##Old nvidia card discovery. gpu = str(subprocess.getoutput("nvidia-smi | grep GeForce")) gpuCard = gpu[5:27] gpuCard="NVidia "+trim_whitespace(gpuCard) ''' driverType="Proprietary" else: gpuDriver="" driverType="Opensource" gpuCard="NVidia "+gpuCard else: if(subprocess.getoutput("command -v fglrxinfo >/dev/null 2>&1 || { return 0; }") != 0): gpu = str(subprocess.getoutput("fglrxinfo | grep Radeon")) gpuCard = gpu[28] gpuCard=trim_whitespace(gpuCard) driverType="Proprietary" gpu = str(subprocess.getoutput("fglrxinfo | grep version")) gpuDriver = gpu[23:37] gpuDriver = re.sub("[a-z].*[A-Z].*","", gpuDriver) gpuDriver=trim_whitespace(gpuDriver) else: gpu=str(subprocess.getoutput("lspci -v | grep Radeon")) gpu=re.sub("^(\[Radeon .*\] )", "", gpu) gpuCard=re.sub("$\[\]","",gpu) driverType="Opensource" gpuDriver="" gpuCard="AMD "+gpuCard except: if(vendor != "AMD"): vendor = "NVIDIA" else: vendor = "AMD" print ("Error while executing a gpu detection function of "+vendor) else: gpuCard=re.sub("Name", "", subprocess.getoutput("wmic path win32_VideoController get name")) gpuCard=re.sub("\n","",gpuCard) gpuCard=trim_whitespace(gpuCard) gpuMem=re.sub("AdapterRAM", "", subprocess.getoutput("wmic path win32_VideoController get AdapterRAM")) gpuMem=str(int(gpuMem)/1048576)+"MB" gpuDriver=re.sub("DriverVersion", "", subprocess.getoutput("wmic path win32_VideoController get DriverVersion")) gpuDriver=re.sub("\n","",gpuDriver) gpuCard=trim_whitespace(gpuCard) driverType="Proprietary" return gpuCard,gpuDriver,driverType,gpuMem