def _create_lang_script_to_names(lang_script_data): """Generate a map from lang-script to English (and possibly native) names. Whether the script is included in the name depends on the number of used and unused scripts. If there's one used script, that script is omitted. Else if there's no used script and one unused script, that script is omitted. Else the script is included. If there's no English name for the lang_script, it is excluded.""" lang_to_names = {} for lang in lang_script_data: used, unused = lang_script_data[lang] if len(used) == 1: exclude_script = iter(used).next() elif not used and len(unused) == 1: exclude_script = iter(unused).next() else: exclude_script = '' for script in (used | unused): lang_script = lang + '-' + script target = lang if script == exclude_script else lang_script # special case, not generally useful if target.startswith('und-'): en_name = cldr_data.get_english_script_name(target[4:]) + ' script' else: en_name = cldr_data.get_english_language_name(target) if not en_name: print '!No english name for %s' % lang_script continue native_name = cldr_data.get_native_language_name(lang_script, exclude_script) if native_name == en_name: native_name = None lang_to_names[lang_script] = [en_name, native_name] if native_name else [en_name] return lang_to_names
def _create_lang_script_to_names(lang_script_data): """Generate a map from lang-script to English (and possibly native) names. Whether the script is included in the name depends on the number of used and unused scripts. If there's one used script, that script is omitted. Else if there's no used script and one unused script, that script is omitted. Else the script is included. If there's no English name for the lang_script, it is excluded. """ lang_to_names = {} for lang in lang_script_data: used, unused = lang_script_data[lang] if len(used) == 1: exclude_script = iter(used).next() elif not used and len(unused) == 1: exclude_script = iter(unused).next() else: exclude_script = '' for script in (used | unused): lang_script = lang + '-' + script target = lang if script == exclude_script else lang_script # special case, not generally useful if target.startswith('und-'): en_name = cldr_data.get_english_script_name( target[4:]) + ' script' else: en_name = cldr_data.get_english_language_name(target) if not en_name: # Easier than patching the cldr_data, not sure I want to go there. if lang_script == 'tlh-Piqd': en_name = u'Klingon' else: _log('No english name for %s' % lang_script) continue native_name = cldr_data.get_native_language_name( lang_script, exclude_script) if native_name == en_name: native_name = None lang_to_names[lang_script] = ([en_name, native_name] if native_name else [en_name]) return lang_to_names
def build_data_json(self, families, family_zip_info, universal_zip_info, family_id_to_lang_tags, family_id_to_regions, lang_tag_to_family_ids, region_to_family_ids): data_obj = collections.OrderedDict() families_obj = collections.OrderedDict() # Sort families by English name, except 'Noto Sans' and 'Noto Serif' come first family_ids = [family_id for family_id in families if family_id != 'sans' and family_id != 'serif'] family_ids = sorted(family_ids, key=lambda f: families[f].name) sorted_ids = ['sans', 'serif'] sorted_ids.extend(family_ids) for k in sorted_ids: family = families[k] family_obj = {} family_obj['name'] = family.name name, hinted_size, unhinted_size = family_zip_info[k] pkg_obj = collections.OrderedDict() if hinted_size: pkg_obj['hinted'] = hinted_size if unhinted_size: pkg_obj['unhinted'] = unhinted_size family_obj['pkgSize'] = pkg_obj family_obj['fonts'] = len(family.hinted_members or family.unhinted_members) family_obj['langs'] = len(family_id_to_lang_tags[k]) family_obj['regions'] = len(family_id_to_regions[k]) families_obj[k] = family_obj data_obj['family'] = families_obj data_obj['familyOrder'] = sorted_ids langs_obj = collections.OrderedDict() # Dont list 'und-' lang tags, these are for default samples and not listed in the UI lang_tags = [lang for lang in lang_tag_to_family_ids if not lang.startswith('und-')] lang_tags = sorted(lang_tags, key=lambda l: cldr_data.get_english_language_name(l)) for lang in lang_tags: lang_obj = collections.OrderedDict() english_name = cldr_data.get_english_language_name(lang) lang_obj['name'] = english_name lang_obj['families'] = sorted(lang_tag_to_family_ids[lang]) native_name = cldr_data.get_native_language_name(lang) if native_name and native_name != english_name: lang_obj['keywords'] = [native_name] langs_obj[lang] = lang_obj data_obj['lang'] = langs_obj regions_obj = collections.OrderedDict() for region in sorted(region_to_family_ids, key=lambda r: cldr_data.get_english_region_name(r)): region_obj = collections.OrderedDict() region_obj['families'] = sorted(region_to_family_ids[region]) region_obj['keywords'] = [cldr_data.get_english_region_name(region)] regions_obj[region] = region_obj data_obj['region'] = regions_obj pkg_obj = collections.OrderedDict() pkg_obj['hinted'] = universal_zip_info[1] pkg_obj['unhinted'] = universal_zip_info[2] data_obj['pkgSize'] = pkg_obj self.write_json(data_obj, 'data')
def build_data_json(self, families, family_zip_info, universal_zip_info, family_id_to_lang_tags, family_id_to_regions, lang_tag_to_family_ids, region_to_family_ids): data_obj = collections.OrderedDict() families_obj = collections.OrderedDict() # Sort families by English name, except 'Noto Sans' and 'Noto Serif' come first family_ids = [ family_id for family_id in families if family_id != 'sans' and family_id != 'serif' ] family_ids = sorted(family_ids, key=lambda f: families[f].name) sorted_ids = ['sans', 'serif'] sorted_ids.extend(family_ids) for k in sorted_ids: family = families[k] family_obj = {} family_obj['name'] = family.name name, hinted_size, unhinted_size = family_zip_info[k] pkg_obj = collections.OrderedDict() if hinted_size: pkg_obj['hinted'] = hinted_size if unhinted_size: pkg_obj['unhinted'] = unhinted_size family_obj['pkgSize'] = pkg_obj family_obj['fonts'] = len(family.hinted_members or family.unhinted_members) family_obj['langs'] = len(family_id_to_lang_tags[k]) family_obj['regions'] = len(family_id_to_regions[k]) families_obj[k] = family_obj data_obj['family'] = families_obj data_obj['familyOrder'] = sorted_ids langs_obj = collections.OrderedDict() # Dont list 'und-' lang tags, these are for default samples and not listed in the UI lang_tags = [ lang for lang in lang_tag_to_family_ids if not lang.startswith('und-') ] lang_tags = sorted( lang_tags, key=lambda l: cldr_data.get_english_language_name(l)) for lang in lang_tags: lang_obj = collections.OrderedDict() english_name = cldr_data.get_english_language_name(lang) lang_obj['name'] = english_name lang_obj['families'] = sorted(lang_tag_to_family_ids[lang]) native_name = cldr_data.get_native_language_name(lang) if native_name and native_name != english_name: lang_obj['keywords'] = [native_name] langs_obj[lang] = lang_obj data_obj['lang'] = langs_obj regions_obj = collections.OrderedDict() for region in sorted( region_to_family_ids, key=lambda r: cldr_data.get_english_region_name(r)): region_obj = collections.OrderedDict() region_obj['families'] = sorted(region_to_family_ids[region]) region_obj['keywords'] = [ cldr_data.get_english_region_name(region) ] regions_obj[region] = region_obj data_obj['region'] = regions_obj pkg_obj = collections.OrderedDict() pkg_obj['hinted'] = universal_zip_info[1] pkg_obj['unhinted'] = universal_zip_info[2] data_obj['pkgSize'] = pkg_obj self.write_json(data_obj, 'data')