def mail(app_conf, project_root='.'): smtp_conf = { k[5:]: v for k, v in app_conf.__dict__.items() if k.startswith('smtp_') } mailer = SMTPMailer(**smtp_conf) if smtp_conf else DummyMailer() emails = {} emails_dir = project_root+'/emails/' i = len(emails_dir) for spt in find_files(emails_dir, '*.spt'): base_name = spt[i:-4] emails[base_name] = compile_email_spt(spt) def log_email(message): message = dict(message) html, text = message.pop('html'), message.pop('text') print('\n', ' ', '='*26, 'BEGIN EMAIL', '='*26) print(json.dumps(message)) print('[---] text/html') print(html) print('[---] text/plain') print(text) print(' ', '='*27, 'END EMAIL', '='*27) log_email = log_email if app_conf.log_emails else lambda *a, **kw: None return {'emails': emails, 'log_email': log_email, 'mailer': mailer}
def mail(app_conf, project_root='.'): if not app_conf: return smtp_conf = { k[5:]: v for k, v in app_conf.__dict__.items() if k.startswith('smtp_') } if smtp_conf: smtp_conf.setdefault('timeout', app_conf.socket_timeout) mailer = SMTPMailer(**smtp_conf) if smtp_conf else DummyMailer() emails = {} emails_dir = project_root + '/emails/' i = len(emails_dir) for spt in find_files(emails_dir, '*.spt'): base_name = spt[i:-4] emails[base_name] = compile_email_spt(spt) def log_email(message): message = dict(message) html, text = message.pop('html'), message.pop('text') print('\n', ' ', '=' * 26, 'BEGIN EMAIL', '=' * 26) print(json.dumps(message)) print('[---] text/html') print(html) print('[---] text/plain') print(text) print(' ', '=' * 27, 'END EMAIL', '=' * 27) log_email = log_email if app_conf.log_emails else lambda *a, **kw: None return {'emails': emails, 'log_email': log_email, 'mailer': mailer}
def setUpClass(cls): super(BrowseTestHarness, cls).setUpClass() i = len(cls.client.www_root) def f(spt): if spt[spt.rfind('/') + 1:].startswith('index.'): return spt[i:spt.rfind('/') + 1] return spt[i:-4] urls = OrderedDict() for url in sorted(map(f, find_files(cls.client.www_root, '*.spt'))): url = url.replace('/%username/membership/', '/team/membership/') \ .replace('/team/membership/%action', '/team/membership/join') \ .replace('/%username/%action', '/%username/subscribe') \ .replace('/for/%name/', '/for/wonderland/') \ .replace('/for/wonderland/%action', '/for/wonderland/leave') \ .replace('/%platform', '/github') \ .replace('/%user_name/', '/liberapay/') \ .replace('/%redirect_to', '/giving') \ .replace('/%back_to', '/') \ .replace('/payday/%id', '/payday/') \ .replace('/%type', '/receiving.js') urls[url.replace('/%username/', '/david/')] = None urls[url.replace('/%username/', '/team/')] = None cls.urls = list(urls)
def setUpClass(cls): super(BrowseTestHarness, cls).setUpClass() i = len(cls.client.www_root) def f(spt): if spt[spt.rfind('/')+1:].startswith('index.'): return spt[i:spt.rfind('/')+1] return spt[i:-4] urls = OrderedDict() for url in sorted(map(f, find_files(cls.client.www_root, '*.spt'))): url = url.replace('/%username/membership/', '/team/membership/') \ .replace('/team/membership/%action', '/team/membership/join') \ .replace('/%username/news/%action', '/%username/news/subscribe') \ .replace('/for/%name/', '/for/wonderland/') \ .replace('/for/wonderland/%action', '/for/wonderland/leave') \ .replace('/%platform', '/github') \ .replace('/%user_name/', '/liberapay/') \ .replace('/%redirect_to', '/giving') \ .replace('/%back_to', '/') \ .replace('/%provider', '/stripe') \ .replace('/%payment_id', '/') \ .replace('/%payin_id', '/') \ .replace('/payday/%id', '/payday/') \ .replace('/%type', '/receiving.js') urls[url.replace('/%username/', '/david/')] = None urls[url.replace('/%username/', '/team/')] = None cls.urls = list(urls)
def clean_assets(www_root, older_than=None): for spt in find_files(www_root+'/assets/', '*.spt'): try: path = spt[:-4] if older_than and os.stat(path).st_mtime > older_than: continue os.unlink(path) except: pass
def compile_assets(website): cleanup = [] for spt in find_files(website.www_root+'/assets/', '*.spt'): filepath = spt[:-4] # /path/to/www/assets/foo.css if not os.path.exists(filepath): cleanup.append(filepath) dispatch_result = DispatchResult(DispatchStatus.okay, spt, {}, "Found.", {}, True) state = dict(dispatch_result=dispatch_result, response=Response()) state['state'] = state content = resources.get(website, spt).respond(state).body if not isinstance(content, bytes): content = content.encode('utf8') tmpfd, tmpfpath = mkstemp(dir='.') os.write(tmpfd, content) os.close(tmpfd) os.rename(tmpfpath, filepath) atexit.register(lambda: rm_f(*cleanup))
def compile_assets(website): cleanup = [] for spt in find_files(website.www_root+'/assets/', '*.spt'): filepath = spt[:-4] # /path/to/www/assets/foo.css if not os.path.exists(filepath): cleanup.append(filepath) dispatch_result = DispatchResult(DispatchStatus.okay, spt, {}, "Found.", {}, True) state = dict(dispatch_result=dispatch_result, response=Response()) state['state'] = state content = resources.get(website.request_processor, spt).render(state).body if not isinstance(content, bytes): content = content.encode('utf8') tmpfd, tmpfpath = mkstemp(dir='.') os.write(tmpfd, content) os.close(tmpfd) os.rename(tmpfpath, filepath) if website.env.clean_assets: atexit.register(lambda: rm_f(*cleanup))
def test_read_only_sessions_are_not_admin_sessions(self): alice = self.make_participant('alice', privileges=1) alice.session = alice.start_session(suffix='.ro') i = len(self.client.www_root) def f(spt): if spt[spt.rfind('/') + 1:].startswith('index.'): return spt[i:spt.rfind('/') + 1] return spt[i:-4] for url in sorted( map(f, find_files(self.client.www_root + '/admin', '*.spt'))): r = self.client.GxT(url, auth_as=alice) assert r.code == 403, r.text self.make_participant('bob') r = self.client.GxT('/bob/admin', auth_as=alice) assert r.code == 403, r.text r = self.client.GxT('/bob/giving/', auth_as=alice) assert r.code == 403, r.text
def compile_assets(website): client = Client(website.www_root, website.project_root) client._website = website for spt in find_files(website.www_root+'/assets/', '*.spt'): filepath = spt[:-4] # /path/to/www/assets/foo.css urlpath = spt[spt.rfind('/assets/'):-4] # /assets/foo.css try: # Remove any existing compiled asset, so we can access the dynamic # one instead (Aspen prefers foo.css over foo.css.spt). os.unlink(filepath) except: pass content = client.GET(urlpath).body tmpfd, tmpfpath = mkstemp(dir='.') os.write(tmpfd, content) os.close(tmpfd) os.rename(tmpfpath, filepath) compilation_time = time() atexit.register(lambda: clean_assets(website.www_root, compilation_time))
def mail(app_conf, env, project_root='.'): if not app_conf: return smtp_conf = { k[5:]: v for k, v in app_conf.__dict__.items() if k.startswith('smtp_') } if smtp_conf: smtp_conf.setdefault('timeout', app_conf.socket_timeout) if getattr(app_conf, 'ses_region', None): mailer = AmazonSESMailer(env.aws_access_key_id, env.aws_secret_access_key, region_name=app_conf.ses_region) elif smtp_conf: mailer = SMTPMailer(**smtp_conf) else: mailer = ToConsoleMailer() emails = {} emails_dir = project_root + '/emails/' i = len(emails_dir) for spt in find_files(emails_dir, '*.spt'): base_name = spt[i:-4] emails[base_name] = compile_email_spt(spt) def log_email(message): message = dict(message) html, text = message.pop('html'), message.pop('text') print('\n', ' ', '=' * 26, 'BEGIN EMAIL', '=' * 26) print(json.dumps(message)) print('[---] text/html') print(html) print('[---] text/plain') print(text) print(' ', '=' * 27, 'END EMAIL', '=' * 27) if app_conf.log_emails and not isinstance(mailer, ToConsoleMailer): log_email = log_email else: log_email = lambda *a, **kw: None return {'emails': emails, 'log_email': log_email, 'mailer': mailer}
def setUpClass(cls): super(TestPages, cls).setUpClass() i = len(cls.client.www_root) def f(spt): if spt[spt.rfind('/')+1:].startswith('index.'): return spt[i:spt.rfind('/')+1] return spt[i:-4] urls = OrderedDict() for url in sorted(map(f, find_files(cls.client.www_root, '*.spt'))): url = url.replace('/%username/membership/', '/team/membership/') \ .replace('/for/%name/', '/for/wonderland/') \ .replace('/%platform/', '/github/') \ .replace('/%user_name/', '/liberapay/') \ .replace('/%action', '/leave') \ .replace('/%redirect_to', '/giving') \ .replace('/%back_to', '/Li4=') \ .replace('/%type', '/receiving.js') \ .replace('/%endpoint', '/public') urls[url.replace('/%username/', '/david/')] = None urls[url.replace('/%username/', '/team/')] = None cls.urls = list(urls)
def mail(app_conf, env, project_root='.'): if not app_conf: return smtp_conf = { k[5:]: v for k, v in app_conf.__dict__.items() if k.startswith('smtp_') } if smtp_conf: smtp_conf.setdefault('timeout', app_conf.socket_timeout) if getattr(app_conf, 'ses_region', None): mailer = AmazonSESMailer( env.aws_access_key_id, env.aws_secret_access_key, region_name=app_conf.ses_region ) elif smtp_conf: mailer = SMTPMailer(**smtp_conf) else: mailer = DummyMailer() emails = {} emails_dir = project_root+'/emails/' i = len(emails_dir) for spt in find_files(emails_dir, '*.spt'): base_name = spt[i:-4] emails[base_name] = compile_email_spt(spt) def log_email(message): message = dict(message) html, text = message.pop('html'), message.pop('text') print('\n', ' ', '='*26, 'BEGIN EMAIL', '='*26) print(json.dumps(message)) print('[---] text/html') print(html) print('[---] text/plain') print(text) print(' ', '='*27, 'END EMAIL', '='*27) log_email = log_email if app_conf.log_emails else lambda *a, **kw: None return {'emails': emails, 'log_email': log_email, 'mailer': mailer}
def mail(app_conf, project_root='.'): mailer = mandrill.Mandrill(app_conf.mandrill_key) emails = {} emails_dir = project_root+'/emails/' i = len(emails_dir) for spt in find_files(emails_dir, '*.spt'): base_name = spt[i:-4] emails[base_name] = compile_email_spt(spt) def log_email(message): message = dict(message) html, text = message.pop('html'), message.pop('text') print('\n', ' ', '='*26, 'BEGIN EMAIL', '='*26) print(json.dumps(message)) print('[---] text/html') print(html) print('[---] text/plain') print(text) print(' ', '='*27, 'END EMAIL', '='*27) log_email = log_email if app_conf.log_emails else lambda *a, **kw: None return {'emails': emails, 'log_email': log_email, 'mailer': mailer}
def load_i18n(canonical_host, canonical_scheme, project_root, tell_sentry): # Load the locales localeDir = os.path.join(project_root, 'i18n', 'core') locales = LOCALES source_strings = {} for file in os.listdir(localeDir): try: parts = file.split(".") if not (len(parts) == 2 and parts[1] == "po"): continue lang = parts[0] with open(os.path.join(localeDir, file)) as f: l = locales[lang.lower()] = Locale(lang) c = l.catalog = read_po(f) share_source_strings(c, source_strings) c.plural_func = get_function_from_rule(c.plural_expr) replace_unused_singulars(c) try: l.countries = make_sorted_dict(COUNTRIES, l.territories) except KeyError: l.countries = COUNTRIES try: l.languages_2 = make_sorted_dict(LANGUAGES_2, l.languages) except KeyError: l.languages_2 = LANGUAGES_2 except Exception as e: tell_sentry(e, {}) del source_strings # Prepare a unique and sorted list for use in the language switcher percent = lambda l, total: sum((percent(s, len(s)) if isinstance(s, tuple) else 1) for s in l if s) / total for l in list(locales.values()): if l.language == 'en': l.completion = 1 continue l.completion = percent( [m.string for m in l.catalog if m.id and not m.fuzzy], len(l.catalog)) if l.completion == 0: del locales[l.language] loc_url = canonical_scheme + '://%s.' + canonical_host domain, port = (canonical_host.split(':') + [None])[:2] port = int(port) if port else socket.getservbyname(canonical_scheme, 'tcp') subdomains = { k: loc_url % k for k in locales if resolve(k + '.' + domain, port) } lang_list = sorted( ((l.completion, l.language, l.language_name.title(), loc_url % l.language) for l in set(locales.values()) if l.completion > 0.5), key=lambda t: (-t[0], t[1]), ) # Add year-less date format year_re = re.compile(r'(^y+[^a-zA-Z]+|[^a-zA-Z]+y+$)') for l in locales.values(): short_format = l.date_formats['short'].pattern assert short_format[0] == 'y' or short_format[-1] == 'y', ( l.language, short_format) l.date_formats['short_yearless'] = year_re.sub('', short_format) # Add aliases for k, v in list(locales.items()): locales.setdefault(ALIASES.get(k, k), v) locales.setdefault(ALIASES_R.get(k, k), v) for k, v in list(locales.items()): locales.setdefault(k.split('_', 1)[0], v) # Patch the locales to look less formal locales['fr'].currency_formats['standard'] = parse_pattern( '#,##0.00\u202f\xa4') locales['fr'].currency_symbols['USD'] = '$' locales['fr'].currencies['USD'] = 'dollar états-unien' # Load the markdown files docs = {} heading_re = re.compile(r'^(#+ )', re.M) for path in find_files(os.path.join(project_root, 'i18n'), '*.md'): d, b = os.path.split(path) doc = os.path.basename(d) lang = b[:-3] with open(path, 'rb') as f: md = f.read().decode('utf8') if md.startswith('# '): md = '\n'.join(md.split('\n')[1:]).strip() md = heading_re.sub(r'##\1', md) docs.setdefault(doc, {}).__setitem__(lang, markdown.render(md)) return { 'docs': docs, 'lang_list': lang_list, 'locales': locales, 'subdomains': subdomains }
def load_i18n(canonical_host, canonical_scheme, project_root, tell_sentry): # Load the locales localeDir = os.path.join(project_root, 'i18n', 'core') locales = LOCALES for file in os.listdir(localeDir): try: parts = file.split(".") if not (len(parts) == 2 and parts[1] == "po"): continue lang = parts[0] with open(os.path.join(localeDir, file)) as f: l = locales[lang.lower()] = Locale(lang) c = l.catalog = read_po(f) c.plural_func = get_function_from_rule(c.plural_expr) try: l.countries = make_sorted_dict(COUNTRIES, l.territories) except KeyError: l.countries = COUNTRIES try: l.languages_2 = make_sorted_dict(LANGUAGES_2, l.languages) except KeyError: l.languages_2 = LANGUAGES_2 except Exception as e: tell_sentry(e, {}, allow_reraise=True) # Prepare a unique and sorted list for use in the language switcher percent = lambda l: sum( (percent(s) if isinstance(s, tuple) else 1) for s in l if s) / len(l) for l in locales.values(): if l.language == 'en': l.completion = 1 continue l.completion = percent([m.string for m in l.catalog if m.id]) loc_url = canonical_scheme + '://%s.' + canonical_host lang_list = sorted( ((l.completion, l.language, l.language_name.title(), loc_url % l.language) for l in set(locales.values()) if l.completion), key=lambda t: (-t[0], t[1]), ) # Add aliases for k, v in list(locales.items()): locales.setdefault(ALIASES.get(k, k), v) locales.setdefault(ALIASES_R.get(k, k), v) for k, v in list(locales.items()): locales.setdefault(k.split('_', 1)[0], v) # Patch the locales to look less formal locales['fr'].currency_formats[None] = parse_pattern('#,##0.00\u202f\xa4') locales['fr'].currency_symbols['USD'] = '$' # Load the markdown files docs = {} heading_re = re.compile(r'^(#+ )', re.M) for path in find_files(os.path.join(project_root, 'i18n'), '*.md'): d, b = os.path.split(path) doc = os.path.basename(d) lang = b[:-3] with open(path, 'rb') as f: md = f.read().decode('utf8') if md.startswith('# '): md = '\n'.join(md.split('\n')[1:]).strip() md = heading_re.sub(r'##\1', md) docs.setdefault(doc, {}).__setitem__(lang, markdown.render(md)) return {'docs': docs, 'lang_list': lang_list, 'locales': locales}
def load_i18n(canonical_host, canonical_scheme, project_root, tell_sentry): # Load the locales localeDir = os.path.join(project_root, 'i18n', 'core') locales = LOCALES for file in os.listdir(localeDir): try: parts = file.split(".") if not (len(parts) == 2 and parts[1] == "po"): continue lang = parts[0] with open(os.path.join(localeDir, file)) as f: l = locales[lang.lower()] = Locale(lang) c = l.catalog = read_po(f) c.plural_func = get_function_from_rule(c.plural_expr) try: l.countries = make_sorted_dict(COUNTRIES, l.territories) except KeyError: l.countries = COUNTRIES try: l.languages_2 = make_sorted_dict(LANGUAGES_2, l.languages) except KeyError: l.languages_2 = LANGUAGES_2 except Exception as e: tell_sentry(e, {}, allow_reraise=True) # Prepare a unique and sorted list for use in the language switcher for l in locales.values(): strings = [m.string for m in l.catalog] l.completion = sum(1 for s in strings if s) / len(strings) loc_url = canonical_scheme+'://%s.'+canonical_host lang_list = sorted( ( (l.completion, l.language, l.language_name.title(), loc_url % l.language) for l in set(locales.values()) ), key=lambda t: (-t[0], t[1]), ) # Add aliases for k, v in list(locales.items()): locales.setdefault(ALIASES.get(k, k), v) locales.setdefault(ALIASES_R.get(k, k), v) for k, v in list(locales.items()): locales.setdefault(k.split('_', 1)[0], v) # Patch the locales to look less formal locales['fr'].currency_formats[None] = parse_pattern('#,##0.00\u202f\xa4') locales['fr'].currency_symbols['USD'] = '$' # Load the markdown files docs = {} heading_re = re.compile(r'^(#+ )', re.M) for path in find_files(os.path.join(project_root, 'i18n'), '*.md'): d, b = os.path.split(path) doc = os.path.basename(d) lang = b[:-3] with open(path, 'rb') as f: md = f.read().decode('utf8') if md.startswith('# '): md = '\n'.join(md.split('\n')[1:]).strip() md = heading_re.sub(r'##\1', md) docs.setdefault(doc, {}).__setitem__(lang, markdown.render(md)) return {'docs': docs, 'lang_list': lang_list, 'locales': locales}
def clean_assets(www_root): rm_f(*[spt[:-4] for spt in find_files(www_root+'/assets/', '*.spt')])
def clean_assets(www_root): rm_f(*[spt[:-4] for spt in find_files(www_root+'/assets/', '*.spt')])
def load_i18n(canonical_host, canonical_scheme, project_root, tell_sentry): # Load the locales localeDir = os.path.join(project_root, 'i18n', 'core') locales = LOCALES source_strings = {} for file in os.listdir(localeDir): try: parts = file.split(".") if not (len(parts) == 2 and parts[1] == "po"): continue lang = parts[0] with open(os.path.join(localeDir, file), 'rb') as f: l = locales[lang.lower()] = Locale(lang) c = l.catalog = read_po(f) share_source_strings(c, source_strings) c.plural_func = get_function_from_rule(c.plural_expr) replace_unused_singulars(c) try: l.countries = make_sorted_dict(COUNTRIES, l.territories) except KeyError: l.countries = COUNTRIES try: l.languages_2 = make_sorted_dict(LANGUAGES_2, l.languages) except KeyError: l.languages_2 = LANGUAGES_2 except Exception as e: tell_sentry(e, {}) del source_strings # Prepare a unique and sorted list for use in the language switcher percent = lambda l, total: sum((percent(s, len(s)) if isinstance(s, tuple) else 1) for s in l if s) / total for l in list(locales.values()): if l.language == 'en': l.completion = 1 continue l.completion = percent([m.string for m in l.catalog if m.id and not m.fuzzy], len(l.catalog)) if l.completion == 0: del locales[l.language] loc_url = canonical_scheme+'://%s.'+canonical_host domain, port = (canonical_host.split(':') + [None])[:2] port = int(port) if port else socket.getservbyname(canonical_scheme, 'tcp') subdomains = { l.subdomain: loc_url % l.subdomain for l in locales.values() if resolve(l.subdomain + '.' + domain, port) } lang_list = sorted( ( (l.completion, l.language, l.language_name.title(), loc_url % l.subdomain) for l in set(locales.values()) if l.completion > 0.5 ), key=lambda t: (-t[0], t[1]), ) # Add year-less date format year_re = re.compile(r'(^y+[^a-zA-Z]+|[^a-zA-Z]+y+$)') for l in locales.values(): short_format = l.date_formats['short'].pattern assert short_format[0] == 'y' or short_format[-1] == 'y', (l.language, short_format) l.date_formats['short_yearless'] = year_re.sub('', short_format) # Add aliases for k, v in list(locales.items()): locales.setdefault(ALIASES.get(k, k), v) locales.setdefault(ALIASES_R.get(k, k), v) for k, v in list(locales.items()): locales.setdefault(k.split('_', 1)[0], v) # Patch the locales to look less formal locales['fr'].currency_formats['standard'] = parse_pattern('#,##0.00\u202f\xa4') locales['fr'].currencies['USD'] = 'dollar états-unien' # Load the markdown files docs = {} heading_re = re.compile(r'^(#+ )', re.M) for path in find_files(os.path.join(project_root, 'i18n'), '*.md'): d, b = os.path.split(path) doc = os.path.basename(d) lang = b[:-3] with open(path, 'rb') as f: md = f.read().decode('utf8') if md.startswith('# '): md = '\n'.join(md.split('\n')[1:]).strip() md = heading_re.sub(r'##\1', md) docs.setdefault(doc, {}).__setitem__(lang, markdown.render(md)) return {'docs': docs, 'lang_list': lang_list, 'locales': locales, 'subdomains': subdomains}
def load_i18n(canonical_host, canonical_scheme, project_root, tell_sentry): def compute_percentage(it, total): return sum( (compute_percentage(s, len(s)) if isinstance(s, tuple) else 1) for s in it if s) / total # Load the base locales localeDir = os.path.join(project_root, 'i18n', 'core') locales = LOCALES source_strings = {} for file in os.listdir(localeDir): try: parts = file.split(".") if not (len(parts) == 2 and parts[1] == "po"): continue lang = parts[0] with open(os.path.join(localeDir, file), 'rb') as f: l = Locale(lang) c = l.catalog = read_po(f) share_source_strings(c, source_strings) c.plural_func = get_function_from_rule(c.plural_expr) replace_unused_singulars(c) l.completion = compute_percentage( (m.string for m in c if m.id and not m.fuzzy), len(c)) if l.completion == 0: continue else: locales[lang.lower()] = l try: l.countries = make_sorted_dict(COUNTRIES, l.territories) except KeyError: l.countries = COUNTRIES try: l.languages_2 = make_sorted_dict(LANGUAGES_2, l.languages) except KeyError: l.languages_2 = LANGUAGES_2 except Exception as e: tell_sentry(e) del source_strings # Load the variants for loc_id in babel.localedata.locale_identifiers(): if loc_id in locales: continue i = loc_id.rfind('_') if i == -1: continue base = locales.get(loc_id[:i]) if base: l = locales[loc_id.lower()] = Locale.parse(loc_id) l.catalog = base.catalog l.completion = base.completion l.countries = base.countries l.languages_2 = base.languages_2 # Unload the Babel data that we no longer need # We load a lot of data to populate the LANGUAGE_NAMES dict, we don't want # to keep it all in RAM. used_data_dict_addresses = set(id(l._data._data) for l in locales.values()) for key, data_dict in list(babel.localedata._cache.items()): if id(data_dict) not in used_data_dict_addresses: del babel.localedata._cache[key] # Prepare a unique and sorted list for use in the language switcher loc_url = canonical_scheme + '://%s.' + canonical_host domain, port = (canonical_host.split(':') + [None])[:2] port = int(port) if port else socket.getservbyname(canonical_scheme, 'tcp') subdomains = { l.subdomain: loc_url % l.subdomain for l in locales.values() if not l.territory and resolve(l.subdomain + '.' + domain, port) } lang_list = sorted( ((l.completion, l.language, l.language_name.title(), loc_url % l.subdomain) for l in set(locales.values()) if not l.territory and l.completion > 0.5), key=lambda t: (-t[0], t[1]), ) # Add year-less date format year_re = re.compile(r'(^y+[^a-zA-Z]+|[^a-zA-Z]+y+$)') for l in locales.values(): short_format = l.date_formats['short'].pattern assert short_format[0] == 'y' or short_format[-1] == 'y', ( l.language, short_format) l.date_formats['short_yearless'] = year_re.sub('', short_format) # Add aliases for k, v in list(locales.items()): locales.setdefault(ALIASES.get(k, k), v) locales.setdefault(ALIASES_R.get(k, k), v) for k, v in list(locales.items()): locales.setdefault(k.split('_', 1)[0], v) # Add universal strings # These strings don't need to be translated, but they have to be in the catalogs # so that they're counted as translated. for l in locales.values(): l.catalog.add("PayPal", "PayPal") # Patch the locales to look less formal locales['fr'].currency_formats['standard'] = parse_pattern( '#,##0.00\u202f\xa4') locales['fr'].currencies['USD'] = 'dollar états-unien' # Load the markdown files docs = {} heading_re = re.compile(r'^(#+ )', re.M) for path in find_files(os.path.join(project_root, 'i18n'), '*.md'): d, b = os.path.split(path) doc = os.path.basename(d) lang = b[:-3] with open(path, 'rb') as f: md = f.read().decode('utf8') if md.startswith('# '): md = '\n'.join(md.split('\n')[1:]).strip() md = heading_re.sub(r'##\1', md) docs.setdefault(doc, {}).__setitem__(lang, markdown.render(md)) return { 'docs': docs, 'lang_list': lang_list, 'locales': locales, 'subdomains': subdomains }