def wiki(fonts, _library=library): for font_filename in fonts: font = FontFactory.openfont(font_filename) print('=== %s ===' % font.common_name.encode('ascii', 'ignore')) print('{|') print('| colspan=3 |') for subset in _library.charsets: charsetinfo = CharsetInfo(font, subset) if charsetinfo.support_level == SUPPORT_LEVEL_UNSUPPORTED: continue if not hasattr(subset, 'glyphs'): continue if callable(subset.glyphs): glyphs = list(subset.glyphs()) else: glyphs = list(subset.glyphs) print('|-') print( "| [[ %s ]] (%s/%s) || style='text-align:right'" % (subset.common_name, len(glyphs) - len(list(charsetinfo.missing)), len(glyphs)), " | {{bartable|%s|%%|2||background:green}}" % charsetinfo.coverage) print('|}')
def generate(config, outfile='review.html'): directory = UpstreamDirectory(config['path']) fonts = [(path, FontFactory.openfont(op.join(config['path'], path))) for path in directory.BIN] metadata_file = open(op.join(config['path'], 'METADATA.json')).read() family_metadata = Metadata.get_family_metadata(metadata_file) faces = [] for f in family_metadata.fonts: faces.append({'name': f.full_name, 'basename': f.post_script_name, 'path': f.filename, 'meta': f}) destfile = open(op.join(config['path'], 'review.html'), 'w') app_version = report_utils.git_info(config) report_app = report_utils.ReportApp(config) fonts_orthography = get_orthography(fonts) report_app.review_page.dump_file(fonts_orthography, 'orthography.json') print(report_utils.render_template( outfile, fonts=faces, markdown=markdown, current_page=outfile, get_weight_name=get_weight_name, build_repo_url=report_utils.build_repo_url, app_version=app_version, get_orthography=get_orthography_old, fontaineFonts=fonts), file=destfile)
def get_subsets_coverage_data(source_fonts_paths): """ Return dict mapping key to the corresponding subsets coverage {'subsetname': {'fontname-light': 13, 'fontname-bold': 45}, 'subsetname': {'fontname-light': 9, 'fontname-bold': 100} } """ library = Library(collections=['subsets']) subsets = {} for fontpath in source_fonts_paths: if fontpath.lower().endswith('.sfd'): continue try: font = FontFactory.openfont(fontpath) except AssertionError: continue for info in font.get_orthographies(_library=library): subsetname = info.charset.common_name.replace('Subset ', '') if subsetname not in subsets: subsets[subsetname] = {} subsets[subsetname][fontpath] = info.coverage return subsets
def __generateTests__(cls): pattern = re.compile(r'[\W_]+') library = Library(collections=['subsets']) directory = UpstreamDirectory(cls.operator.path) yamlpath = op.join(cls.operator.path, 'bakery.yaml') try: bakerydata = yaml.load(open(yamlpath)) except IOError: bakerydata = yaml.load(open(BAKERY_CONFIGURATION_DEFAULTS)) for fontpath in directory.UFO + directory.TTX: font = FontFactory.openfont(op.join(cls.operator.path, fontpath)) for charmap, _, coverage, _ in \ font.get_orthographies(_library=library): common_name = charmap.common_name.replace('Subset ', '') shortname = pattern.sub('', common_name) if shortname not in bakerydata['subset']: continue exec 'cls.test_charset_%s = get_test_subset_function(%s)' % ( shortname, coverage) exec 'cls.test_charset_%s.__func__.__doc__ = "Is %s covered 100%%?"' % ( shortname, common_name)
def project_fontaine(project, build): from fontaine.font import FontFactory param = {'login': project.login, 'id': project.id, 'revision': build.revision, 'build': build.id} _out = op.join(current_app.config['DATA_ROOT'], '%(login)s/%(id)s.out/%(build)s.%(revision)s/' % param) # Its very likely that _out exists, but just in case: if op.exists(_out): os.chdir(_out) else: # This is very unlikely, but should it happen, just return return # Run pyFontaine on all the TTF fonts fonts = {} for filename in glob.glob("*.ttf"): fontaine = FontFactory.openfont(filename) fonts[filename] = fontaine # Make a plain dictionary, unlike the fancy data structures # used by pyFontaine :) family = {} for fontfilename, fontaine in fonts.iteritems(): # Use the font file name as a key to a dictionary of char sets family[fontfilename] = {} for orthography in fontaine.get_orthographies(): charset, coverage, percent_complete, missing_chars = orthography # Use each charset name as a key to dictionary of font's # coverage details charset = charset.common_name family[fontfilename][charset] = {} # unsupport, fragmentary, partial, full family[fontfilename][charset]['coverage'] = coverage family[fontfilename][charset]['percentcomplete'] = percent_complete # list of ord numbers family[fontfilename][charset]['missingchars'] = missing_chars # Use the char set name as a key to a list of the family's # average coverage if not charset in family: family[charset] = [] # Append the char set percentage of each font file to the list family[charset].append(percent_complete) # [10, 32, 40, 40] etc # And finally, if the list now has all the font files, make it # the mean average percentage if len(family[charset]) == len(fonts.items()): family[charset] = sum(family[charset]) / len(fonts.items()) # Make a plain dictionary with just the bits we want on the dashboard totals = {} totals['gwf'] = family.get('GWF latin', None) totals['al3'] = family.get('Adobe Latin 3', None) # Store it in the $(id).state.yaml file project.config['local']['charsets'] = totals project.save_state() # fonts.itervalues() emits fontaine.font.Font instances that are used # for the rfiles.html template return fonts.itervalues()
def construct_tree(self, fonts): if self.show_hilbert: try: import matplotlib except ImportError: raise Exception('Install matplotlib to use --show-hilbert feature') tree = OrderedDict({'fonts': []}) for font_filename in fonts: font = FontFactory.openfont(font_filename, charmaps=self.charmaps) F = OrderedDict() desc = OrderedDict() desc['commonName'] = font.common_name desc['subFamily'] = font.sub_family desc['style'] = font.style_flags desc['weight'] = font.weight desc['fixedWidth'] = yesno(font.is_fixed_width) desc['fixedSizes'] = yesno(font.has_fixed_sizes) desc['copyright'] = extract_firstline(font.copyright or '') desc['license'] = extract_firstline(font.license or '') desc['licenseUrl'] = font.license_url desc['version'] = font.version desc['vendor'] = extract_firstline(font.vendor or '') desc['vendorUrl'] = font.vendor_url desc['designer'] = font.designer desc['designerUrl'] = font.designer_url desc['glyphCount'] = font.glyph_num desc['characterCount'] = font.character_count for charmap, support_level, coverage, missing \ in font.get_orthographies(): if support_level == SUPPORT_LEVEL_UNSUPPORTED: continue if 'orthographies' not in desc: desc['orthographies'] = [] orth = OrderedDict({'orthography': OrderedDict()}) orth['orthography']['commonName'] = charmap.common_name orth['orthography']['nativeName'] = charmap.native_name orth['orthography']['supportLevel'] = support_level if support_level != SUPPORT_LEVEL_FULL: values = u'\n%s' % u'\n'.join(unicodevalues_asstring(missing)) orth['orthography']['percentCoverage'] = coverage if self.missingValues: orth['orthography']['missingValues'] = values desc['orthographies'].append(orth) if self.show_hilbert: self.represent_coverage_png(font) F['font'] = desc tree['fonts'].append(F) return tree
def __generateTests__(cls): pattern = re.compile('[\W_]+') library = Library(collections=['subsets']) ufo_files, ttx_files, _ = get_sources_lists(cls.path) for fontpath in ufo_files + ttx_files: font = FontFactory.openfont(fontpath) for charmap, _, coverage, _ in \ font.get_orthographies(_library=library): common_name = charmap.common_name.replace('Subset ', '') shortname = pattern.sub('', common_name) exec 'cls.test_charset_%s = get_test_subset_function(%s)' % (shortname, coverage) exec 'cls.test_charset_%s.__func__.__doc__ = "Is %s covered 100%%?"' % (shortname, common_name)
def __generateTests__(cls): pattern = re.compile('[\W_]+') library = Library(collections=['subsets']) directory = UpstreamDirectory(cls.path) for fontpath in directory.UFO + directory.TTX: font = FontFactory.openfont(op.join(cls.path, fontpath)) for charmap, _, coverage, _ in \ font.get_orthographies(_library=library): common_name = charmap.common_name.replace('Subset ', '') shortname = pattern.sub('', common_name) exec 'cls.test_charset_%s = get_test_subset_function(%s)' % (shortname, coverage) exec 'cls.test_charset_%s.__func__.__doc__ = "Is %s covered 100%%?"' % (shortname, common_name)
def csv_(fonts, _library=library): data = StringIO.StringIO() doc = csv.writer(data, delimiter=',', quoting=csv.QUOTE_MINIMAL) headers = [ 'Filename', 'commonName', 'subFamily', 'style', 'weight', 'fixedWidth', 'fixedSizes', 'copyright', 'license', 'licenseUrl', 'version', 'vendor', 'vendorUrl', 'designer', 'designerUrl', 'glyphCount', 'characterCount' ] for subset in _library.charsets: headers.append(subset.common_name.encode('ascii', 'ignore')) doc.writerow(headers) for filename in fonts: font = FontFactory.openfont(filename) row = [os.path.basename(filename).encode('ascii', 'ignore')] row += [font.common_name.encode('ascii', 'ignore')] row += [font.sub_family.encode('ascii', 'ignore')] row += [font.style_flags.encode('ascii', 'ignore')] row += [font.weight.encode('ascii', 'ignore')] row += [yesno(font.is_fixed_width)] row += [yesno(font.has_fixed_sizes)] row += [ extract_firstline(font.copyright or '').encode('ascii', 'ignore') ] row += [ extract_firstline(font.license or '').encode('ascii', 'ignore') ] row += [font.license_url.encode('ascii', 'ignore')] row += [font.version.encode('ascii', 'ignore')] row += [ extract_firstline(font.vendor or '').encode('ascii', 'ignore') ] row += [font.vendor_url.encode('ascii', 'ignore')] row += [font.designer.encode('ascii', 'ignore')] row += [font.designer_url.encode('ascii', 'ignore')] row += [str(font.glyph_num)] row += [str(font.character_count)] for subset in _library.charsets: charsetinfo = CharsetInfo(font, subset) row.append(str(charsetinfo.coverage)) doc.writerow(row) data.seek(0) return data.read()
def compact_(fonts, _library=library): for filename in fonts: font = FontFactory.openfont(filename) print('Filename:', font.common_name) print('Glyph count: ', font.glyph_num) print('Character count:', font.character_count) for subset in _library.charsets: charsetinfo = CharsetInfo(font, subset) # if charsetinfo.support_level == SUPPORT_LEVEL_FULL: # continue if charsetinfo.support_level == SUPPORT_LEVEL_UNSUPPORTED: continue print('{}% {}/{} {}'.format( charsetinfo.coverage, charsetinfo.hits, charsetinfo.glyphs_count, subset.common_name.encode('ascii', 'ignore')))
def compact_(fonts, _library=library): for filename in fonts: font = FontFactory.openfont(filename) print('Filename:', font.common_name) print('Glyph count: ', font.glyph_num) print('Character count:', font.character_count) for subset in _library.charsets: charsetinfo = CharsetInfo(font, subset) # if charsetinfo.support_level == SUPPORT_LEVEL_FULL: # continue if charsetinfo.support_level == SUPPORT_LEVEL_UNSUPPORTED: continue print('{}% {}/{} {}'.format(charsetinfo.coverage, charsetinfo.hits, charsetinfo.glyphs_count, subset.common_name.encode('ascii', 'ignore')))
def generate(config, outfile='review.html'): directory = UpstreamDirectory(config['path']) fonts = [(path, FontFactory.openfont(op.join(config['path'], path))) for path in directory.BIN] metadata_file = open(op.join(config['path'], 'METADATA.json')).read() family_metadata = Metadata.get_family_metadata(metadata_file) faces = [] for f in family_metadata.fonts: faces.append({'name': f.full_name, 'basename': f.post_script_name, 'path': f.filename, 'meta': f}) report_app = report_utils.BuildInfo(config) fonts_orthography = get_orthography(fonts) report_app.review_page.dump_file(fonts_orthography, 'orthography.json')
def wiki(fonts, _library=library): for font_filename in fonts: font = FontFactory.openfont(font_filename) print('=== %s ===' % font.common_name.encode('ascii', 'ignore')) print('{|') print('| colspan=3 |') for subset in _library.charmaps: charmapinfo = CharmapInfo(font, subset) if charmapinfo.support_level == SUPPORT_LEVEL_UNSUPPORTED: continue glyphs = subset.glyphs if callable(glyphs): glyphs = glyphs() print('|-') print("| [[ %s ]] (%s/%s) || style='text-align:right'" % (subset.common_name, len(glyphs) - len(charmapinfo.missing), len(glyphs)), " | {{bartable|%s|%%|2||background:green}}" % charmapinfo.coverage) print('|}')
def csv_(fonts, _library=library): data = StringIO.StringIO() doc = csv.writer(data, delimiter=',', quoting=csv.QUOTE_MINIMAL) headers = ['Family', 'Style'] for subset in _library.charmaps: headers.append(subset.common_name.encode('ascii', 'ignore')) doc.writerow(headers) for filename in fonts: font = FontFactory.openfont(filename) row = [font.common_name.encode('ascii', 'ignore')] row += [font.sub_family.encode('ascii', 'ignore')] for subset in _library.charmaps: charmapinfo = CharmapInfo(font, subset) row.append(str(charmapinfo.coverage)) doc.writerow(row) data.seek(0) return data.read()
def generate(config, outfile='review.html'): directory = UpstreamDirectory(config['path']) fonts = [(path, FontFactory.openfont(op.join(config['path'], path))) for path in directory.BIN] metadata_file = open(op.join(config['path'], 'METADATA.json')).read() family_metadata = Metadata.get_family_metadata(metadata_file) faces = [] for f in family_metadata.fonts: faces.append({ 'name': f.full_name, 'basename': f.post_script_name, 'path': f.filename, 'meta': f }) report_app = report_utils.BuildInfo(config) fonts_orthography = get_orthography(fonts) report_app.review_page.dump_file(fonts_orthography, 'orthography.json')
def get_subsets_coverage_data(source_fonts_paths, log=None): """ Return dict mapping key to the corresponding subsets coverage. For example: {'latin': 86, 'devanagari': 72} """ from fontaine.font import FontFactory from fontaine.cmap import Library library = Library(collections=['subsets']) subsets = {} for fontpath in source_fonts_paths: try: font = FontFactory.openfont(fontpath) except AssertionError, ex: if log: log.write('Error: [%s] %s' % (fontpath, ex.message)) continue for charmap, _, coverage, _ in \ font.get_orthographies(_library=library): subsets[charmap.common_name.replace('Subset ', '')] = coverage
def csv_(fonts, _library=library): data = StringIO.StringIO() doc = csv.writer(data, delimiter=',', quoting=csv.QUOTE_MINIMAL) headers = ['Filename', 'commonName', 'subFamily', 'style', 'weight', 'fixedWidth', 'fixedSizes', 'copyright', 'license', 'licenseUrl', 'version', 'vendor', 'vendorUrl', 'designer', 'designerUrl', 'glyphCount', 'characterCount'] for subset in _library.charsets: headers.append(subset.common_name.encode('ascii', 'ignore')) doc.writerow(headers) for filename in fonts: font = FontFactory.openfont(filename) row = [os.path.basename(filename).encode('ascii', 'ignore')] row += [font.common_name.encode('ascii', 'ignore')] row += [font.sub_family.encode('ascii', 'ignore')] row += [font.style_flags.encode('ascii', 'ignore')] row += [font.weight.encode('ascii', 'ignore')] row += [yesno(font.is_fixed_width)] row += [yesno(font.has_fixed_sizes)] row += [extract_firstline(font.copyright or '').encode('ascii', 'ignore')] row += [extract_firstline(font.license or '').encode('ascii', 'ignore')] row += [font.license_url.encode('ascii', 'ignore')] row += [font.version.encode('ascii', 'ignore')] row += [extract_firstline(font.vendor or '').encode('ascii', 'ignore')] row += [font.vendor_url.encode('ascii', 'ignore')] row += [font.designer.encode('ascii', 'ignore')] row += [font.designer_url.encode('ascii', 'ignore')] row += [str(font.glyph_num)] row += [str(font.character_count)] for subset in _library.charsets: charsetinfo = CharsetInfo(font, subset) row.append(str(charsetinfo.coverage)) doc.writerow(row) data.seek(0) return data.read()
def __generateTests__(cls): pattern = re.compile(r'[\W_]+') library = Library(collections=['subsets']) directory = UpstreamDirectory(cls.operator.path) yamlpath = os.path.join(cls.operator.path, 'bakery.yaml') try: bakerydata = yaml.load(open(yamlpath)) except IOError: from bakery_cli.bakery import BAKERY_CONFIGURATION_DEFAULTS bakerydata = yaml.load(open(BAKERY_CONFIGURATION_DEFAULTS)) for fontpath in directory.UFO + directory.TTX: font = FontFactory.openfont(os.path.join(cls.operator.path, fontpath)) for charmap in font.get_orthographies(_library=library): common_name = charmap.charset.common_name.replace('Subset ', '') shortname = pattern.sub('', common_name) if shortname not in bakerydata['subset']: continue exec 'cls.test_charset_%s = get_test_subset_function(%s)' % (shortname, charmap.coverage) exec 'cls.test_charset_%s.__func__.__doc__ = "Is %s covered 100%%?"' % (shortname, common_name)
def get_subsets_coverage_data(source_fonts_paths): """ Return dict mapping key to the corresponding subsets coverage {'subsetname': {'fontname-light': 13, 'fontname-bold': 45}, 'subsetname': {'fontname-light': 9, 'fontname-bold': 100} } """ library = Library(collections=['subsets']) subsets = {} for fontpath in source_fonts_paths: try: font = FontFactory.openfont(fontpath) except AssertionError: continue for info in font.get_orthographies(_library=library): subsetname = info.charmap.common_name.replace('Subset ', '') if subsetname not in subsets: subsets[subsetname] = {} subsets[subsetname][fontpath] = info.coverage return subsets
if len(sys.argv) < 2: print __doc__ sys.exit() # Check the arg is a directory workingDir = sys.argv[1] if os.path.exists(workingDir): # If it is a directory, change context to it os.chdir(workingDir) else: print __doc__ sys.exit() # Run pyFontaine on all the TTF fonts fonts = {} for filename in glob.glob("*.*tf"): fontaine = FontFactory.openfont(filename) fonts[filename] = fontaine # Make a plain dictionary family = {} for fontfilename, fontaine in fonts.iteritems(): # Use the font file name as a key to a dictionary of char sets family[fontfilename] = {} #print fontfilename for charset, coverage, percentcomplete, missingchars in fontaine.get_orthographies( ): # Use each char set name as a key to a dictionary of this font's coverage details charsetname = charset.common_name family[fontfilename][charsetname] = {} family[fontfilename][charsetname][ 'coverage'] = coverage # unsupport, fragmentary, partial, full
if len(sys.argv) < 2: print __doc__ sys.exit() # Check the arg is a directory workingDir = sys.argv[1] if os.path.exists(workingDir): # If it is a directory, change context to it os.chdir(workingDir) else: print __doc__ sys.exit() # Run pyFontaine on all the TTF fonts fonts = {} for filename in glob.glob("*.*tf"): fontaine = FontFactory.openfont(filename) fonts[filename] = fontaine # Make a plain dictionary family = {} for fontfilename, fontaine in fonts.iteritems(): # Use the font file name as a key to a dictionary of char sets family[fontfilename] = {} #print fontfilename for charset, coverage, percentcomplete, missingchars in fontaine.get_orthographies(): # Use each char set name as a key to a dictionary of this font's coverage details charsetname = charset.common_name family[fontfilename][charsetname] = {} family[fontfilename][charsetname]['coverage'] = coverage # unsupport, fragmentary, partial, full family[fontfilename][charsetname]['percentcomplete'] = percentcomplete # int family[fontfilename][charsetname]['missingchars'] = missingchars # list of ord numbers
def construct_tree(self, fonts): if self.show_hilbert: try: import matplotlib except ImportError: raise Exception( 'Install matplotlib to use --show-hilbert feature') tree = OrderedDict({'fonts': [], 'identical': True}) # in process of generating fonts information tree collect for each # font character set. then compare them and if they are not identical # set to tree flag `identical` to `False` fonts_charactersets_names = [] for font_filename in fonts: font = FontFactory.openfont(font_filename, charsets=self.charsets) F = OrderedDict() desc = OrderedDict() desc['Filename'] = os.path.basename(font_filename) desc['commonName'] = font.common_name desc['subFamily'] = font.sub_family desc['style'] = font.style_flags desc['weight'] = font.weight desc['fixedWidth'] = yesno(font.is_fixed_width) desc['fixedSizes'] = yesno(font.has_fixed_sizes) desc['copyright'] = extract_firstline(font.copyright or '') desc['license'] = extract_firstline(font.license or '') desc['licenseUrl'] = font.license_url desc['version'] = font.version desc['vendor'] = extract_firstline(font.vendor or '') desc['vendorUrl'] = font.vendor_url desc['designer'] = font.designer desc['designerUrl'] = font.designer_url desc['glyphCount'] = font.glyph_num desc['characterCount'] = font.character_count font_charactersets_names = [] for charsetinfo in font.get_orthographies(self.library): if charsetinfo.support_level == SUPPORT_LEVEL_UNSUPPORTED: continue if 'orthographies' not in desc: desc['orthographies'] = [] orth = OrderedDict({'orthography': OrderedDict()}) orth['orthography']['Coverage'] = 0 orth['orthography']['SetTotal'] = 0 orth['orthography']['percentCoverage'] = 0 orth['orthography']['missingValues'] = '' orth['orthography'][ 'commonName'] = charsetinfo.charset.common_name orth['orthography'][ 'nativeName'] = charsetinfo.charset.native_name if charsetinfo.support_level != SUPPORT_LEVEL_FULL: orth['orthography']['Coverage'] = charsetinfo.glyphs_count orth['orthography'][ 'SetTotal'] = charsetinfo.glyphs_in_charset_count orth['orthography'][ 'percentCoverage'] = charsetinfo.coverage if self.missingValues: values = u'\n%s' % u'\n'.join( unicodevalues_asstring(charsetinfo.missing)) orth['orthography']['missingValues'] = values orth['orthography']['supportLevel'] = charsetinfo.support_level desc['orthographies'].append(orth) font_charactersets_names.append( charsetinfo.charset.common_name) if 'orthographies' in desc: desc['orthographies'] = sorted( desc['orthographies'], reverse=True, key=lambda x: x['orthography'].get('percentCoverage', 100)) if fonts_charactersets_names: if (tree['identical'] and fonts_charactersets_names != font_charactersets_names): tree['identical'] = False if not fonts_charactersets_names: fonts_charactersets_names = font_charactersets_names if self.show_hilbert: self.represent_coverage_png(font) F['font'] = desc tree['fonts'].append(F) if len(tree['fonts']) == 1: tree.pop('identical') return tree
def construct_tree(self, fonts): if self.show_hilbert: try: import matplotlib except ImportError: raise Exception('Install matplotlib to use --show-hilbert feature') tree = OrderedDict({'fonts': [], 'identical': True}) # in process of generating fonts information tree collect for each # font character set. then compare them and if they are not identical # set to tree flag `identical` to `False` fonts_charactersets_names = [] for font_filename in fonts: font = FontFactory.openfont(font_filename, charmaps=self.charmaps) F = OrderedDict() desc = OrderedDict() desc['commonName'] = font.common_name desc['subFamily'] = font.sub_family desc['style'] = font.style_flags desc['weight'] = font.weight desc['fixedWidth'] = yesno(font.is_fixed_width) desc['fixedSizes'] = yesno(font.has_fixed_sizes) desc['copyright'] = extract_firstline(font.copyright or '') desc['license'] = extract_firstline(font.license or '') desc['licenseUrl'] = font.license_url desc['version'] = font.version desc['vendor'] = extract_firstline(font.vendor or '') desc['vendorUrl'] = font.vendor_url desc['designer'] = font.designer desc['designerUrl'] = font.designer_url desc['glyphCount'] = font.glyph_num desc['characterCount'] = font.character_count font_charactersets_names = [] for charmapinfo in font.get_orthographies(self.library): if charmapinfo.support_level == SUPPORT_LEVEL_UNSUPPORTED: continue if 'orthographies' not in desc: desc['orthographies'] = [] orth = OrderedDict({'orthography': OrderedDict()}) orth['orthography']['commonName'] = charmapinfo.charmap.common_name orth['orthography']['nativeName'] = charmapinfo.charmap.native_name orth['orthography']['supportLevel'] = charmapinfo.support_level if charmapinfo.support_level != SUPPORT_LEVEL_FULL: values = u'\n%s' % u'\n'.join(unicodevalues_asstring(charmapinfo.missing)) orth['orthography']['percentCoverage'] = charmapinfo.coverage if self.missingValues: orth['orthography']['missingValues'] = values desc['orthographies'].append(orth) font_charactersets_names.append(charmapinfo.charmap.common_name) if fonts_charactersets_names: if (tree['identical'] and fonts_charactersets_names != font_charactersets_names): tree['identical'] = False if not fonts_charactersets_names: fonts_charactersets_names = font_charactersets_names if self.show_hilbert: self.represent_coverage_png(font) F['font'] = desc tree['fonts'].append(F) if len(tree['fonts']) == 1: tree.pop('identical') return tree
def generate(config, outfile='index.html'): if config.get('failed'): destfile = open(op.join(config['path'], outfile), 'w') slug = os.environ.get('TRAVIS_REPO_SLUG', 'fontdirectory/dummy') link = 'https://travis-ci.org/{}'.format(slug) print(report_utils.render_template('failedbuild.html', link=link), file=destfile) return directory = UpstreamDirectory(config['path']) metadata_file = open(op.join(config['path'], 'METADATA.json')).read() family_metadata = Metadata.get_family_metadata(metadata_file) faces = [] for f in family_metadata.fonts: faces.append({'name': f.full_name, 'basename': f.post_script_name, 'path': f.filename, 'meta': f}) destfile = open(op.join(config['path'], outfile), 'w') data = yaml.load(open(op.join(config['path'], 'METADATA.yaml'))) basenames = [op.basename(font['path']) for font in faces] fontpaths = [op.join(config['path'], path) for path in directory.BIN] ttftablesizes = get_fonts_table_sizes(fontpaths) ftables_data = get_fonts_table_sizes_grouped(fontpaths) ttftablesizes_mean = sorted( [list(item) for item in ftables_data.mean.items()] ) ttftablesizes_grouped = ftables_data.grouped ttftablesizes_delta = ftables_data.delta buildstate = yaml.load(open(op.join(config['path'], 'build.state.yaml'))) autohint_sizes = buildstate.get('autohinting_sizes', []) vmet = get_metric_view(fontpaths) fonts = [(path, FontFactory.openfont(op.join(config['path'], path))) for path in directory.BIN] app_version = report_utils.git_info(config) new_data = [] for k in data: d = {'name': k} d.update(data[k]) new_data.append(d) report_app = report_utils.ReportApp(config) metrics = {'data': vmet._its_metrics, 'headings': vmet._its_metrics_header} table_sizes = {'tables': ttftablesizes[0], 'sizes': ttftablesizes[1:]} report_app.summary_page.dump_file(metrics, 'metrics.json') report_app.summary_page.dump_file(table_sizes, 'table_sizes.json') report_app.summary_page.dump_file(autohint_sizes, 'autohint_sizes.json') report_app.summary_page.dump_file(new_data, 'tests.json') report_app.summary_page.dump_file({'mean': ftables_data.mean, 'grouped': ftables_data.grouped, 'delta': ftables_data.delta}, 'fonts_tables_grouped.json') for face in family_metadata.fonts: face_template = "@font-face {{ font-family: {}; src: url(fonts/{});}}\n".format(face.metadata_object['postScriptName'], face.metadata_object['filename']) report_app.write_file(face_template, op.join(report_app.css_dir, 'faces.css'), mode='a') fonts_serialized = dict([(str(path), font_factory_instance_to_dict(fontaine)) for path, fontaine in fonts]) report_app.summary_page.dump_file(fonts_serialized, 'fontaine_fonts.json') fonts_orthography = get_orthography(fonts) report_app.summary_page.dump_file({'fonts_list': fonts_orthography[0], 'coverage_averages': fonts_orthography[1], 'fonts_info': fonts_orthography[2]}, 'fonts_orthography.json') print(report_utils.render_template(outfile, current_page=outfile, fonts=faces, tests=data, basenames=basenames, filter_with_tag=filter_with_tag, filter_by_results_with_tag=filter_by_results_with_tag, vmet=vmet._its_metrics, vhead=vmet._its_metrics_header, autohinting_sizes=autohint_sizes, ttftablesizes=ttftablesizes, fontaineFonts=fonts, get_orthography=get_orthography, to_google_data_list=to_google_data_list, font_table_to_google_data_list=font_table_to_google_data_list, ttftablesizes_mean=ttftablesizes_mean, ttftablesizes_grouped=ttftablesizes_grouped, ttftablesizes_delta=ttftablesizes_delta, average_table_size=average_table_size, build_repo_url=report_utils.build_repo_url, hex=hex, sort=sort, app_version=app_version, failed_build=config.get('failed')), file=destfile)
def generate(config): if config.get('failed'): return directory = UpstreamDirectory(config['path']) if op.exists(op.join(config['path'], 'METADATA.json.new')): metadata_file = open(op.join(config['path'], 'METADATA.json.new')).read() else: metadata_file = open(op.join(config['path'], 'METADATA.json')).read() family_metadata = Metadata.get_family_metadata(metadata_file) faces = [] for f in family_metadata.fonts: faces.append({'name': f.full_name, 'basename': f.post_script_name, 'path': f.filename, 'meta': f}) metadata = yaml.load(open(op.join(config['path'], 'METADATA.yaml'))) upstreamdata = {} upstreamdatafile = op.join(config['path'], 'upstream.yaml') if op.exists(upstreamdatafile): upstreamdata = yaml.load(open(upstreamdatafile)) data = {} for fp in directory.BIN: path = op.join(config['path'], '{}.yaml'.format(fp[:-4])) if op.exists(path): data[fp] = yaml.load(open(path)) data.update(metadata) data.update(upstreamdata) fontpaths = [op.join(config['path'], path) for path in directory.BIN] ttftablesizes = get_fonts_table_sizes(fontpaths) ftables_data = get_fonts_table_sizes_grouped(fontpaths) buildstate = yaml.load(open(op.join(config['path'], 'build.state.yaml'))) autohint_sizes = buildstate.get('autohinting_sizes', []) vmet = get_metric_view(fontpaths) fonts = [(path, FontFactory.openfont(op.join(config['path'], path))) for path in directory.BIN] stems = [get_stem_info(op.join(config['path'], path)) for path in directory.BIN] new_data = [] for k in data: d = {'name': k} d.update(data[k]) new_data.append(d) report_app = report_utils.BuildInfo(config) metrics = {'data': vmet._its_metrics, 'headings': vmet._its_metrics_header} table_sizes = {'tables': ttftablesizes[0], 'sizes': ttftablesizes[1:]} report_app.summary_page.dump_file(metrics, 'metrics.json') report_app.summary_page.dump_file(stems, 'stems.json') report_app.summary_page.dump_file(table_sizes, 'table_sizes.json') report_app.summary_page.dump_file(autohint_sizes, 'autohint_sizes.json') report_app.summary_page.dump_file(new_data, 'tests.json') report_app.summary_page.dump_file({'mean': ftables_data.mean, 'grouped': ftables_data.grouped, 'delta': ftables_data.delta}, 'fonts_tables_grouped.json') for face in family_metadata.fonts: face_template = "@font-face {{ font-family: {}; src: url(fonts/{});}}\n".format(face.metadata_object['postScriptName'], face.metadata_object['filename']) report_app.write_file(face_template, op.join(report_app.css_dir, 'faces.css'), mode='a') fonts_serialized = dict([(str(path), font_factory_instance_to_dict(fontaine)) for path, fontaine in fonts]) report_app.summary_page.dump_file(fonts_serialized, 'fontaine_fonts.json') fonts_orthography = get_orthography(fonts) report_app.summary_page.dump_file({'fonts_list': fonts_orthography[0], 'coverage_averages': fonts_orthography[1], 'fonts_info': fonts_orthography[2]}, 'fonts_orthography.json')
def project_fontaine(project, build): from fontaine.font import FontFactory param = { 'login': project.login, 'id': project.id, 'revision': build.revision, 'build': build.id } _out = op.join(current_app.config['DATA_ROOT'], '%(login)s/%(id)s.out/%(build)s.%(revision)s/' % param) # Its very likely that _out exists, but just in case: if op.exists(_out): os.chdir(_out) else: # This is very unlikely, but should it happen, just return return # Run pyFontaine on all the TTF fonts fonts = {} for filename in glob.glob("*.ttf"): fontaine = FontFactory.openfont(filename) fonts[filename] = fontaine # Make a plain dictionary, unlike the fancy data structures # used by pyFontaine :) family = {} for fontfilename, fontaine in fonts.iteritems(): # Use the font file name as a key to a dictionary of char sets family[fontfilename] = {} for orthography in fontaine.get_orthographies(): charset, coverage, percent_complete, missing_chars = orthography # Use each charset name as a key to dictionary of font's # coverage details charset = charset.common_name family[fontfilename][charset] = {} # unsupport, fragmentary, partial, full family[fontfilename][charset]['coverage'] = coverage family[fontfilename][charset]['percentcomplete'] = percent_complete # list of ord numbers family[fontfilename][charset]['missingchars'] = missing_chars # Use the char set name as a key to a list of the family's # average coverage if not charset in family: family[charset] = [] # Append the char set percentage of each font file to the list family[charset].append(percent_complete) # [10, 32, 40, 40] etc # And finally, if the list now has all the font files, make it # the mean average percentage if len(family[charset]) == len(fonts.items()): family[charset] = sum(family[charset]) / len(fonts.items()) # Make a plain dictionary with just the bits we want on the dashboard totals = {} totals['gwf'] = family.get('GWF latin', None) totals['al3'] = family.get('Adobe Latin 3', None) # Store it in the $(id).state.yaml file project.config['local']['charsets'] = totals project.save_state() # fonts.itervalues() emits fontaine.font.Font instances that are used # for the rfiles.html template return fonts.itervalues()
def generate(config): if config.get('failed'): return directory = UpstreamDirectory(config['path']) if op.exists(op.join(config['path'], 'METADATA.json.new')): metadata_file = open(op.join(config['path'], 'METADATA.json.new')).read() else: metadata_file = open(op.join(config['path'], 'METADATA.json')).read() family_metadata = Metadata.get_family_metadata(metadata_file) faces = [] for f in family_metadata.fonts: faces.append({ 'name': f.full_name, 'basename': f.post_script_name, 'path': f.filename, 'meta': f }) metadata = yaml.load(open(op.join(config['path'], 'METADATA.yaml'))) upstreamdata = {} upstreamdatafile = op.join(config['path'], 'upstream.yaml') if op.exists(upstreamdatafile): upstreamdata = yaml.load(open(upstreamdatafile)) data = {} for fp in directory.BIN: path = op.join(config['path'], '{}.yaml'.format(fp[:-4])) if op.exists(path): data[fp] = yaml.load(open(path)) data.update(metadata) data.update(upstreamdata) fontpaths = [op.join(config['path'], path) for path in directory.BIN] ttftablesizes = get_fonts_table_sizes(fontpaths) ftables_data = get_fonts_table_sizes_grouped(fontpaths) buildstate = yaml.load(open(op.join(config['path'], 'build.state.yaml'))) autohint_sizes = buildstate.get('autohinting_sizes', []) vmet = get_metric_view(fontpaths) fonts = [(path, FontFactory.openfont(op.join(config['path'], path))) for path in directory.BIN] stems = [ get_stem_info(op.join(config['path'], path)) for path in directory.BIN ] new_data = [] for k in data: d = {'name': k} d.update(data[k]) new_data.append(d) report_app = report_utils.BuildInfo(config) metrics = {'data': vmet._its_metrics, 'headings': vmet._its_metrics_header} table_sizes = {'tables': ttftablesizes[0], 'sizes': ttftablesizes[1:]} report_app.summary_page.dump_file(metrics, 'metrics.json') report_app.summary_page.dump_file(stems, 'stems.json') report_app.summary_page.dump_file(table_sizes, 'table_sizes.json') report_app.summary_page.dump_file(autohint_sizes, 'autohint_sizes.json') report_app.summary_page.dump_file(new_data, 'tests.json') report_app.summary_page.dump_file( { 'mean': ftables_data.mean, 'grouped': ftables_data.grouped, 'delta': ftables_data.delta }, 'fonts_tables_grouped.json') for face in family_metadata.fonts: face_template = "@font-face {{ font-family: {}; src: url(fonts/{});}}\n".format( face.metadata_object['postScriptName'], face.metadata_object['filename']) report_app.write_file(face_template, op.join(report_app.css_dir, 'faces.css'), mode='a') fonts_serialized = dict([(str(path), font_factory_instance_to_dict(fontaine)) for path, fontaine in fonts]) report_app.summary_page.dump_file(fonts_serialized, 'fontaine_fonts.json') #Temporarily remove this broken piece of code if False: fonts_orthography = get_orthography(fonts) report_app.summary_page.dump_file( { 'fonts_list': fonts_orthography[0], 'coverage_averages': fonts_orthography[1], 'fonts_info': fonts_orthography[2] }, 'fonts_orthography.json')