コード例 #1
0
def generate_ttc_zips_with_7za():
    """Generate zipped versions of the ttc files and put in pkgs directory."""

    # The font family code skips the ttc files, but we want them in the
    # package directory. Instead of mucking with the family code to add the ttcs
    # and then exclude them from the other handling, we'll just handle them
    # separately.
    # For now at least, the only .ttc fonts are the CJK fonts

    pkg_dir = path.join(OUTPUT_DIR, 'pkgs')
    tool_utils.ensure_dir_exists(pkg_dir)
    filenames = [path.basename(f) for f in os.listdir(CJK_DIR) if f.endswith('.ttc')]
    for filename in filenames:
        zip_basename = filename + '.zip'
        zip_path = path.join(pkg_dir, zip_basename)
        if path.isfile(zip_path):
            print("Continue: assuming built %s is valid." % zip_basename)
            continue
        oldsize = os.stat(path.join(CJK_DIR, filename)).st_size
        pairs = [(path.join(CJK_DIR, filename), filename),
                 (SIL_LICENSE_LOC, 'LICENSE_CJK.txt')]
        tool_utils.generate_zip_with_7za_from_filepairs(pairs, zip_path)
        newsize = os.stat(zip_path).st_size
        print "Wrote " + zip_path
        print 'Compressed from {0:,}B to {1:,}B.'.format(oldsize, newsize)
コード例 #2
0
def generate_samples(dstdir, imgdir, summary):
  if imgdir:
    imgdir = tool_utils.ensure_dir_exists(imgdir)
    print 'writing images to %s' % imgdir

  if dstdir:
    dstdir = tool_utils.ensure_dir_exists(dstdir)
    print 'writing files to %s' % dstdir

  verbose = summary
  script_map = get_script_to_exemplar_data_map()
  for script in sorted(script_map):
    sample, info = generate_sample_for_script(script, script_map[script])
    if summary:
      print
      print info
      print sample

    if imgdir:
      path = os.path.join(imgdir, 'und-%s_chars.png' % script)
      print 'writing image %s.png' % script
      rtl = script in ['Adlm', 'Arab', 'Hebr', 'Nkoo', 'Syrc', 'Tfng', 'Thaa']
      create_image.create_png(
          sample, path, font_size=34, line_spacing=40, width=800, rtl=rtl)

    if dstdir:
      filename = 'und-%s_chars.txt' % script
      print 'writing data %s' % filename
      filepath = os.path.join(dstdir, filename)
      with codecs.open(filepath, 'w', 'utf-8') as f:
        f.write(sample + '\n')
コード例 #3
0
ファイル: ttc_utils.py プロジェクト: dougfelt/nototools
def ttcfile_build(output_ttc_path, fontpath_list, tool_path=_BUILD_TOOL_PATH):
  """Build a .ttc from a list of font files."""
  otf2otc = tool_utils.resolve_path(tool_path)
  if not otf2otc:
    raise ValueError('can not resolve %s' % tool_path)

  tool_utils.ensure_dir_exists(path.dirname(output_ttc_path))
  # capture and discard standard output, the tool is noisy
  subprocess.check_output([otf2otc, '-o', output_ttc_path] + fontpath_list)
コード例 #4
0
ファイル: android_patches.py プロジェクト: moyogo/nototools
def patch_fonts(srcdir, dstdir):
  """Remove dstdir and repopulate with patched contents of srcdir (and
  its 'cjk' subdirectory if it exists)."""

  srcdir = tool_utils.resolve_path(srcdir)
  dstdir = tool_utils.resolve_path(dstdir)

  tool_utils.ensure_dir_exists(dstdir, clean=True)

  patch_hyphen(srcdir, dstdir)
  patch_cjk_ttcs(path.join(srcdir, 'cjk'), path.join(dstdir, 'cjk'))
  subset_symbols(srcdir, dstdir)
  patch_post_table(srcdir, dstdir)
コード例 #5
0
ファイル: android_patches.py プロジェクト: moyogo/nototools
def patch_cjk_ttcs(srcdir, dstdir):
  """Call patch_cjk_ttc for each ttc file in srcdir, writing the
  result to dstdir using the same name."""

  if not path.isdir(srcdir):
    print '%s is not a directory' % srcdir
    return

  ttc_files = [f for f in os.listdir(srcdir) if f.endswith('.ttc')]
  if not ttc_files:
    print 'no .ttc file to patch in %s' % srcdir
    return

  tool_utils.ensure_dir_exists(dstdir)
  for f in ttc_files:
    patch_cjk_ttc(path.join(srcdir, f), path.join(dstdir, f))
コード例 #6
0
def autofix_fonts(
    font_names, dstdir, release_dir, version, version_info, autohint, dry_run):
  dstdir = tool_utils.ensure_dir_exists(dstdir)

  font_names.sort()
  print 'Processing\n  %s' % '\n  '.join(font_names)
  print 'Dest dir: %s' % dstdir

  if release_dir is None:
    reldir = None
  else:
    reldir = tool_utils.resolve_path(release_dir)
    if not path.isdir(reldir):
      raise Exception('release dir "%s" does not exist' % reldir)

  if version_info is None or version_info == '[fonts]':
    if version_info is None:
      version_info = _get_version_info(font_names)
    else:
      version_info = _get_fonts_repo_version_info()

    if not version_info:
      raise Exception('could not compute version info from fonts')
    print 'Computed version_info: %s' % version_info
  else:
    _check_version_info(version_info)

  _check_version(version)
  _check_autohint(autohint)

  if dry_run:
    print '*** dry run %s***' % ('(autohint) ' if autohint else '')
  for f in font_names:
    fix_font(f, dstdir, reldir, version, version_info, autohint, dry_run)
コード例 #7
0
def autohint_font(src, dst, script, dry_run):
  code = _autohint_code(src, script)
  if code == 'not-hinted':
    print 'Warning: no hinting information for %s, script %s' % (src, script)
    return

  if code == None:
    print 'Warning: unable to autohint %s' % src
    return

  if code == 'no-script':
    args = ['ttfautohint', '-t', '-W', src, dst]
  else:
    args = ['ttfautohint', '-t', '-W', '-f', code, src, dst]
  if dry_run:
    print 'dry run would autohint:\n  "%s"' % ' '.join(args)
    return

  hinted_dir = tool_utils.ensure_dir_exists(path.dirname(dst))
  try:
    subprocess.check_call(args)
  except Exception as e:
    print '### failed to autohint %s' % src
    # we failed to autohint, let's continue anyway
    # however autohint will have left an empty file there, remove it.
    try:
      os.remove(dst)
    except:
      pass


  print 'wrote autohinted %s using %s' % (dst, code)
コード例 #8
0
ファイル: svg_cleaner.py プロジェクト: dougfelt/noto-emoji
def clean_svg_files(in_dir, out_dir, match_pat=None, clean=False, strip=False):
  regex = re.compile(match_pat) if match_pat else None
  count = 0

  if clean and path.samefile(in_dir, out_dir):
    logging.error('Cannot clean %s (same as in_dir)', out_dir)
    return

  out_dir = tool_utils.ensure_dir_exists(out_dir, clean=clean)

  cleaner = SvgCleaner(strip)
  for file_name in os.listdir(in_dir):
    if regex and not regex.match(file_name):
      continue
    in_path = os.path.join(in_dir, file_name)
    logging.debug('read: %s', in_path)
    with open(in_path) as in_fp:
      result = cleaner.clean_svg(in_fp.read())
    out_path = os.path.join(out_dir, file_name)
    with codecs.open(out_path, 'w', 'utf-8') as out_fp:
      logging.debug('write: %s', out_path)
      out_fp.write(result)
      count += 1
  if not count:
    logging.warning('Failed to match any files')
  else:
    logging.info('Wrote %s files to %s', count, out_dir)
コード例 #9
0
def update_samples(
    sample_dir, udhr_dir, bcp_to_code_attrib_sample, in_repo, no_stage):
  """Create samples in sample_dir based on the bcp to c_a_s map.  Stage
  if sample_dir is in the repo.  If sample_dir is in the repo, don't
  overwrite samples whose most recent log entry does not start with
  'Updated by tool'."""

  tool_utils.check_dir_exists(udhr_dir)

  if (in_repo and not no_stage and os.path.isdir(sample_dir) and
      not tool_utils.git_is_clean(sample_dir)):
    raise ValueError('Please clean %s.' % sample_dir)

  if in_repo:
    repo, subdir = os.path.split(sample_dir)
    tool_samples = frozenset(tool_utils.get_tool_generated(repo, subdir))
    print 'allowing overwrite of %d files:\n  %s' % (
        len(tool_samples), ', '.join(sorted(tool_samples)))

  comments = [
    '# Attributions for sample excerpts:',
    '#   original - in the public domain, no attribution',
    '#   UN - UN, OHCHR, or affiliate, attribute to UN',
    '#   other - not a UN translation',
    '#   none - not on ohchr, not a UN translation'
  ]
  sample_attrib_list = []
  sample_dir = tool_utils.ensure_dir_exists(sample_dir)
  count = 0
  for bcp, (code, attrib, sample) in bcp_to_code_attrib_sample.iteritems():
    dst_file = '%s_udhr.txt' % bcp
    dst_path = os.path.join(sample_dir, dst_file)
    if in_repo and os.path.isfile(dst_path) and dst_file not in tool_samples:
      print 'Not overwriting modified file %s' % dst_file
    else:
      with codecs.open(dst_path, 'w', 'utf8') as f:
        f.write(sample)
      count += 1
    sample_attrib_list.append('%s: %s' % (dst_file, attrib))
  print 'Created %d samples' % count

  # Some existing samples that we don't overwrite are not in
  # bcp_to_code_attrib_sample, so they're not listed.  Readers of the
  # attributions.txt file will need to default these to 'none'.
  attrib_data = '\n'.join(comments + sorted(sample_attrib_list)) + '\n'
  with open(os.path.join(sample_dir, 'attributions.txt'), 'w') as f:
    f.write(attrib_data)

  if in_repo and not no_stage:
    tool_utils.git_add_all(sample_dir)

  date = datetime.datetime.now().strftime('%Y-%m-%d')
  dst = 'in %s ' % sample_dir if not in_repo else ''
  noto_ix = udhr_dir.find('nototools')
  src = udhr_dir if noto_ix == -1 else udhr_dir[noto_ix:]

  # prefix of this sample commit message indicates that these were
  # tool-generated
  print 'Updated by tool - sample files %sfrom %s as of %s.' % (dst, src, date)
コード例 #10
0
def write_block_coverage(block_data, names, msg, fmt=None, out_file=sys.stdout):
  if not fmt:
    if not out_file:
      fmt = 'text'
    else:
      ext = path.splitext(out_file)[1]
      if not ext or ext in ['.txt', '.text']:
        fmt = 'text'
      elif ext in ['.htm', '.html']:
        fmt = 'html'
      elif ext in ['.csv']:
        fmt = 'csv'
  if out_file:
    tool_utils.ensure_dir_exists(path.dirname(out_file))
    with codecs.open(out_file, 'w', 'utf-8') as f:
      _write_block_coverage_fmt(block_data, names, msg, fmt, f)
  else:
    _write_block_coverage_fmt(block_data, names, msg, fmt, sys.stdout)
コード例 #11
0
def _copy_files(src, dst):
  """Copies files named 'emoji_u*.png' from dst to src, and return a set of
  the names with 'emoji_u' and the extension stripped."""
  code_strings = set()
  tool_utils.check_dir_exists(src)
  dst = tool_utils.ensure_dir_exists(dst, clean=True)
  for f in glob.glob(path.join(src, 'emoji_u*.png')):
    shutil.copy(f, dst)
    code_strings.add(path.splitext(path.basename(f))[0][7:])
  return code_strings
コード例 #12
0
def generate_names(
    srcdir, outfile, ordering_file, eext=None, esep=None, force=False,
    pretty_print=False, verbose=False):
  if not path.isdir(srcdir):
    print >> sys.stderr, '%s is not a directory' % srcdir
    return

  parent = tool_utils.ensure_dir_exists(path.dirname(outfile))
  if path.exists(outfile):
    if not force:
      print >> sys.stderr, '%s already exists' % outfile
      return
    if not path.isfile(outfile):
      print >> sys.stderr, '%s is not a file' % outfile
      return

  eo = emoji_ordering.from_file(ordering_file, ext=eext, sep=esep)

  estr_to_file = _estr_to_file(srcdir, eo, verbose)

  skipped = collections.defaultdict(list)
  data = []
  for category in eo.category_names():
    name_data = []
    for estr in eo.emoji_in_category(category):
      if not estr in estr_to_file:
        skipped[category].append(estr)
      else:
        name_data.append(_name_data(estr, estr_to_file))
    data.append({'category': category, 'emojis': name_data})

  if verbose and skipped:
    # report sequences in the categories for which we have no images
    total = 0
    print '\nskipped items (no images):'
    for category in eo.category_names():
      estrs = skipped.get(category)
      if not estrs:
        continue
      count = len(estrs)
      total += count
      cps_list = [[ord(cp) for cp in estr]
                for estr in estrs]
      _report_info(category, cps_list)
    print 'skipped %d items total' % total

  with open(outfile, 'w') as f:
    indent = 2 if pretty_print else None
    separators = None if pretty_print else (',', ':')
    json.dump(data, f, indent=indent, separators=separators)
  print 'wrote %s' % outfile
コード例 #13
0
def generate_names(
    srcdir, outfile, ordering_file, eext=None, esep=None, force=False,
    pretty_print=False, verbose=False):
  if not path.isdir(srcdir):
    print >> sys.stderr, '%s is not a directory' % srcdir
    return

  parent = tool_utils.ensure_dir_exists(path.dirname(outfile))
  if path.exists(outfile):
    if not force:
      print >> sys.stderr, '%s already exists' % outfile
      return
    if not path.isfile(outfile):
      print >> sys.stderr, '%s is not a file' % outfile
      return

  eo = emoji_ordering.from_file(ordering_file, ext=eext, sep=esep)

  estr_to_file = _estr_to_file(srcdir, eo, verbose)

  skipped = collections.defaultdict(list)
  data = []
  for category in eo.category_names():
    name_data = []
    for estr in eo.emoji_in_category(category):
      if not estr in estr_to_file:
        skipped[category].append(estr)
      else:
        name_data.append(_name_data(estr, estr_to_file))
    data.append({'category': category, 'emojis': name_data})

  if verbose and skipped:
    total = 0
    print 'skipped items (no images):'
    for category in eo.category_names():
      estrs = skipped.get(category)
      if not estrs:
        continue
      count = len(estrs)
      total += count
      print '%s skipped %d items:' % (category, count)
      cps_list = [[ord(cp) for cp in estr]
                for estr in estrs]
      _report_info(' ', cps_list)
    print 'skipped %d items total' % total

  with open(outfile, 'w') as f:
    indent = 2 if pretty_print else None
    separators = None if pretty_print else (',', ':')
    json.dump(data, f, indent=indent, separators=separators)
  print 'wrote %s' % outfile
コード例 #14
0
def ttcfile_extract(input_ttc_path, output_dir, tool_path=_EXTRACT_TOOL_PATH):
  """Extract .ttf/.otf fonts from a .ttc file, and return a list of the names of
  the extracted fonts."""

  otc2otf = tool_utils.resolve_path(tool_path)
  if not otc2otf:
    raise ValueError('can not resolve %s' % tool_path)

  input_ttc_path = tool_utils.resolve_path(input_ttc_path)
  output_dir = tool_utils.ensure_dir_exists(output_dir)
  with tool_utils.temp_chdir(output_dir):
    # capture and discard standard output, the tool is noisy
    subprocess.check_output([otc2otf, input_ttc_path])
  return ttcfile_filenames(input_ttc_path)
コード例 #15
0
def generate_samples(dstdir, imgdir, summary):
    if imgdir:
        imgdir = tool_utils.ensure_dir_exists(imgdir)
        print 'writing images to %s' % imgdir

    if dstdir:
        dstdir = tool_utils.ensure_dir_exists(dstdir)
        print 'writing files to %s' % dstdir

    verbose = summary
    script_map = get_script_to_exemplar_data_map()
    for script in sorted(script_map):
        sample, info = generate_sample_for_script(script, script_map[script])
        if summary:
            print
            print info
            print sample

        if imgdir:
            path = os.path.join(imgdir, 'und-%s_chars.png' % script)
            print 'writing image %s.png' % script
            rtl = script in [
                'Adlm', 'Arab', 'Hebr', 'Nkoo', 'Syrc', 'Tfng', 'Thaa'
            ]
            create_image.create_png(sample,
                                    path,
                                    font_size=34,
                                    line_spacing=40,
                                    width=800,
                                    rtl=rtl)

        if dstdir:
            filename = 'und-%s_chars.txt' % script
            print 'writing data %s' % filename
            filepath = os.path.join(dstdir, filename)
            with codecs.open(filepath, 'w', 'utf-8') as f:
                f.write(sample + '\n')
コード例 #16
0
def generate_samples(dstdir, imgdir, summary):
    if imgdir:
        imgdir = tool_utils.ensure_dir_exists(imgdir)
        print("writing images to %s" % imgdir)

    if dstdir:
        dstdir = tool_utils.ensure_dir_exists(dstdir)
        print("writing files to %s" % dstdir)

    verbose = summary
    script_map = get_script_to_exemplar_data_map()
    for script in sorted(script_map):
        sample, info = generate_sample_for_script(script, script_map[script])
        if summary:
            print()
            print(info)
            print(sample)

        if imgdir:
            path = os.path.join(imgdir, "und-%s_chars.png" % script)
            print("writing image %s.png" % script)
            rtl = script in [
                "Adlm", "Arab", "Hebr", "Nkoo", "Syrc", "Tfng", "Thaa"
            ]
            create_image.create_png(sample,
                                    path,
                                    font_size=34,
                                    line_spacing=40,
                                    width=800,
                                    rtl=rtl)

        if dstdir:
            filename = "und-%s_chars.txt" % script
            print("writing data %s" % filename)
            filepath = os.path.join(dstdir, filename)
            with codecs.open(filepath, "w", "utf-8") as f:
                f.write(sample + "\n")
コード例 #17
0
def build_svg_dir(dst_dir, clean=False, emoji_dir='', flags_dir=''):
  """Copies/renames files from emoji_dir and then flags_dir, giving them the
  standard format and prefix ('emoji_u' followed by codepoints expressed in hex
  separated by underscore).  If clean, removes the target dir before proceding.
  If either emoji_dir or flags_dir are empty, skips them."""

  dst_dir = tool_utils.ensure_dir_exists(dst_dir, clean=clean)

  if not emoji_dir and not flags_dir:
    logging.warning('Nothing to do.')
    return

  if emoji_dir:
    copy_with_rename(
        emoji_dir, dst_dir, accept_pred=_is_svg_and_startswith_emoji)

  if flags_dir:
     copy_with_rename(
        flags_dir, dst_dir, accept_pred=_is_svg, rename=_flag_rename)
コード例 #18
0
def autofix_fonts(
    font_names, src_root, dst_dir, release_dir, version, version_info, autohint,
    dry_run):
  dst_dir = tool_utils.resolve_path(dst_dir)
  dst_dir = tool_utils.ensure_dir_exists(dst_dir)

  font_names = sorted(_expand_font_names(font_names))
  print 'Processing %d fonts\n  %s' % (
      len(font_names), '\n  '.join(font_names[:5]) + '...')

  src_root = tool_utils.resolve_path(src_root)
  print 'Src root: %s' % src_root
  print 'Dest dir: %s' % dst_dir

  if release_dir is None:
    rel_dir = None
  else:
    rel_dir = tool_utils.resolve_path(release_dir)
    if not path.isdir(rel_dir):
      raise Exception('release dir "%s" does not exist' % rel_dir)

  if (version_info is None or version_info == '[fonts]' or
      version_info == '[fonts_alpha]'):
    if version_info is None:
      version_info = _get_version_info(font_names)
    else:
      version_info = _get_fonts_repo_version_info()

    if not version_info:
      raise Exception('could not compute version info from fonts')
    print 'Computed version_info: %s' % version_info
  else:
    _check_version_info(version_info)

  _check_version(version)
  _check_autohint(autohint)

  if dry_run:
    print '*** dry run %s***' % ('(autohint) ' if autohint else '')
  for f in font_names:
    f = path.join(src_root, f)
    fix_font(f, dst_dir, rel_dir, version, version_info, autohint, dry_run)
コード例 #19
0
def autofix_fonts(
    font_names, src_root, dst_dir, release_dir, version, version_info, autohint,
    dry_run):
  dst_dir = tool_utils.resolve_path(dst_dir)
  dst_dir = tool_utils.ensure_dir_exists(dst_dir)

  font_names = sorted(_expand_font_names(font_names))
  print('Processing %d fonts\n  %s' % (
      len(font_names), '\n  '.join(font_names[:5]) + '...'))

  src_root = tool_utils.resolve_path(src_root)
  print('Src root: %s' % src_root)
  print('Dest dir: %s' % dst_dir)

  if release_dir is None:
    rel_dir = None
  else:
    rel_dir = tool_utils.resolve_path(release_dir)
    if not path.isdir(rel_dir):
      raise Exception('release dir "%s" does not exist' % rel_dir)

  if (version_info is None or version_info == '[fonts]' or
      version_info == '[fonts_alpha]'):
    if version_info is None:
      version_info = _get_version_info(font_names)
    else:
      version_info = _get_fonts_repo_version_info()

    if not version_info:
      raise Exception('could not compute version info from fonts')
    print('Computed version_info: %s' % version_info)
  else:
    _check_version_info(version_info)

  _check_version(version)
  _check_autohint(autohint)

  if dry_run:
    print('*** dry run %s***' % ('(autohint) ' if autohint else ''))
  for f in font_names:
    f = path.join(src_root, f)
    fix_font(f, dst_dir, rel_dir, version, version_info, autohint, dry_run)
コード例 #20
0
def generate(cmapdata, dst_dir, scripts, namepats):
  if not scripts:
    raise ValueError('no scripts')

  if not namepats:
    raise ValueError('no namepats')

  if len(scripts) != len(namepats):
    if len(namepats) != 1:
      raise ValueError(
          'Have %d script%s but %d namepats' %
          (len(scripts), '' if len(scripts) == 1 else 's', len(namepats)))
    if '%s' not in namepats[0] and len(scripts) > 1:
      raise ValueError(
          'Have multiple scripts but single namepat "%s" has no substitution'
          % namepats[0])
    namepats = [namepats[0]] * len(scripts)

  dst_dir = tool_utils.ensure_dir_exists(dst_dir)
  for s, n in zip(scripts, namepats):
    outfile = path.join(dst_dir, (n % s) if '%s' in n else n)
    generate_single(cmapdata, s, outfile)
コード例 #21
0
def autohint_font(src, dst, script, dry_run):
  code = _autohint_code(src, script)
  if code == 'not-hinted':
    print 'Warning: no hinting information for %s, script %s' % (src, script)
    return

  if code == None:
    print 'Warning: unable to autohint %s' % src
    return

  if code == 'no-script':
    args = ['ttfautohint', '-t', '-W', src, dst]
  else:
    args = ['ttfautohint', '-t', '-W', '-f', code, src, dst]
  if dry_run:
    print 'dry run would autohint:\n  "%s"' % ' '.join(args)
    return

  hinted_dir = tool_utils.ensure_dir_exists(path.dirname(dst))
  subprocess.check_call(args)

  print 'wrote autohinted %s using %s' % (dst, code)
コード例 #22
0
def build_svg_dir(dst_dir, clean=False, emoji_dir='', flags_dir=''):
    """Copies/renames files from emoji_dir and then flag_dir, giving them the
  standard format and prefix ('emoji_u' followed by codepoints expressed in hex
  separated by underscore).  If clean, removes the target dir before proceding.
  If either emoji_dir or flag_dir are empty, skips them."""

    dst_dir = tool_utils.ensure_dir_exists(dst_dir, clean=clean)

    if not emoji_dir and not flag_dir:
        logging.warning('Nothing to do.')
        return

    if emoji_dir:
        copy_with_rename(emoji_dir,
                         dst_dir,
                         accept_pred=_is_svg_and_startswith_emoji)

    if flags_dir:
        copy_with_rename(flags_dir,
                         dst_dir,
                         accept_pred=_is_svg,
                         rename=_flag_rename)
コード例 #23
0
def autohint_font(src, dst, script, dry_run):
  code = _autohint_code(src, script)
  if code == 'not-hinted':
    print 'Warning: no hinting information for %s, script %s' % (src, script)
    return

  if code == None:
    print 'Warning: unable to autohint %s' % src
    return

  if code == 'no-script':
    args = ['ttfautohint', '-t', '-W', src, dst]
  else:
    args = ['ttfautohint', '-t', '-W', '-f', code, src, dst]
  if dry_run:
    print 'dry run would autohint:\n  "%s"' % ' '.join(args)
    return

  hinted_dir = tool_utils.ensure_dir_exists(path.dirname(dst))
  subprocess.check_call(args)

  print 'wrote autohinted %s using %s' % (dst, code)
コード例 #24
0
def create_thumbnails_and_aliases(src_dir, dst_dir, crop, dst_prefix):
  """Creates thumbnails in dst_dir based on sources in src.dir, using
  dst_prefix. Assumes the source prefix is 'emoji_u' and the common suffix
  is '.png'."""

  src_dir = tool_utils.resolve_path(src_dir)
  if not path.isdir(src_dir):
    raise ValueError('"%s" is not a directory')

  dst_dir = tool_utils.ensure_dir_exists(tool_utils.resolve_path(dst_dir))

  src_prefix = 'emoji_u'
  suffix = '.png'

  inv_aliases = get_inv_aliases()

  for src_file in os.listdir(src_dir):
    try:
      seq = unicode_data.strip_emoji_vs(
          filename_to_sequence(src_file, src_prefix, suffix))
    except ValueError as ve:
      logger.warning('Error (%s), skipping' % ve)
      continue

    src_path = path.join(src_dir, src_file)

    dst_file = sequence_to_filename(seq, dst_prefix, suffix)
    dst_path = path.join(dst_dir, dst_file)

    create_thumbnail(src_path, dst_path, crop)
    logger.info('wrote thumbnail%s: %s' % (
        ' with crop' if crop else '', dst_file))

    for alias_seq in inv_aliases.get(seq, ()):
      alias_file = sequence_to_filename(alias_seq, dst_prefix, suffix)
      alias_path = path.join(dst_dir, alias_file)
      shutil.copy2(dst_path, alias_path)
      logger.info('wrote alias: %s' % alias_file)
コード例 #25
0
def create_thumbnails_and_aliases(src_dir, dst_dir, crop, dst_prefix):
    """Creates thumbnails in dst_dir based on sources in src.dir, using
  dst_prefix. Assumes the source prefix is 'emoji_u' and the common suffix
  is '.png'."""

    src_dir = tool_utils.resolve_path(src_dir)
    if not path.isdir(src_dir):
        raise ValueError('"%s" is not a directory')

    dst_dir = tool_utils.ensure_dir_exists(tool_utils.resolve_path(dst_dir))

    src_prefix = 'emoji_u'
    suffix = '.png'

    inv_aliases = get_inv_aliases()

    for src_file in os.listdir(src_dir):
        try:
            seq = unicode_data.strip_emoji_vs(
                filename_to_sequence(src_file, src_prefix, suffix))
        except ValueError as ve:
            logger.warning('Error (%s), skipping' % ve)
            continue

        src_path = path.join(src_dir, src_file)

        dst_file = sequence_to_filename(seq, dst_prefix, suffix)
        dst_path = path.join(dst_dir, dst_file)

        create_thumbnail(src_path, dst_path, crop)
        logger.info('wrote thumbnail%s: %s' %
                    (' with crop' if crop else '', dst_file))

        for alias_seq in inv_aliases.get(seq, ()):
            alias_file = sequence_to_filename(alias_seq, dst_prefix, suffix)
            alias_path = path.join(dst_dir, alias_file)
            shutil.copy2(dst_path, alias_path)
            logger.info('wrote alias: %s' % alias_file)
コード例 #26
0
ファイル: update_udhr_samples.py プロジェクト: hmr/nototools
def update_samples(sample_dir, udhr_dir, bcp_to_code_attrib_sample, in_repo, no_stage):
    """Create samples in sample_dir based on the bcp to c_a_s map.  Stage
  if sample_dir is in the repo.  If sample_dir is in the repo, don't
  overwrite samples whose most recent log entry does not start with
  'Updated by tool'."""

    tool_utils.check_dir_exists(udhr_dir)

    if (
        in_repo
        and not no_stage
        and os.path.isdir(sample_dir)
        and not tool_utils.git_is_clean(sample_dir)
    ):
        raise ValueError("Please clean %s." % sample_dir)

    if in_repo:
        repo, subdir = os.path.split(sample_dir)
        tool_samples = frozenset(tool_utils.get_tool_generated(repo, subdir))
        print(
            "allowing overwrite of %d files:\n  %s"
            % (len(tool_samples), ", ".join(sorted(tool_samples)))
        )

    comments = [
        "# Attributions for sample excerpts:",
        "#   original - in the public domain, no attribution",
        "#   UN - UN, OHCHR, or affiliate, attribute to UN",
        "#   other - not a UN translation",
        "#   none - not on ohchr, not a UN translation",
    ]
    sample_attrib_list = []
    sample_dir = tool_utils.ensure_dir_exists(sample_dir)
    count = 0
    for bcp, (code, attrib, sample) in bcp_to_code_attrib_sample.items():
        dst_file = "%s_udhr.txt" % bcp
        dst_path = os.path.join(sample_dir, dst_file)
        if in_repo and os.path.isfile(dst_path) and dst_file not in tool_samples:
            print("Not overwriting modified file %s" % dst_file)
        else:
            with codecs.open(dst_path, "w", "utf8") as f:
                f.write(sample)
            count += 1
        sample_attrib_list.append("%s: %s" % (dst_file, attrib))
    print("Created %d samples" % count)

    # Some existing samples that we don't overwrite are not in
    # bcp_to_code_attrib_sample, so they're not listed.  Readers of the
    # attributions.txt file will need to default these to 'none'.
    attrib_data = "\n".join(comments + sorted(sample_attrib_list)) + "\n"
    with open(os.path.join(sample_dir, "attributions.txt"), "w") as f:
        f.write(attrib_data)

    if in_repo and not no_stage:
        tool_utils.git_add_all(sample_dir)

    date = datetime.datetime.now().strftime("%Y-%m-%d")
    dst = "in %s " % sample_dir if not in_repo else ""
    noto_ix = udhr_dir.find("nototools")
    src = udhr_dir if noto_ix == -1 else udhr_dir[noto_ix:]

    # prefix of this sample commit message indicates that these were
    # tool-generated
    print("Updated by tool - sample files %sfrom %s as of %s." % (dst, src, date))
コード例 #27
0
ファイル: update_udhr_samples.py プロジェクト: hmr/nototools
def fetch_udhr(fetch_dir):
    """Fetch UDHR xml bundle from unicode.org to fetch_dir."""
    fetch_dir = tool_utils.ensure_dir_exists(fetch_dir)
    dstfile = os.path.join(fetch_dir, UDHR_XML_ZIP_NAME)
    result = urlretrieve(UDHR_XML_ZIP_URL, dstfile)
    print("Fetched: " + result[0])
コード例 #28
0
def main():
  tool_utils.ensure_dir_exists(DST_DIR, clean=True)
  _patch_hyphen()
  # TODO: first unpack from ttc, then rebuild
  # _remove_cjk_emoji()
  _subset_symbols()
コード例 #29
0
def generate_names(
    src_dir, dst_dir, skip_limit=20, omit_groups=None, pretty_print=False,
    verbose=False):
  srcdir = tool_utils.resolve_path(src_dir)
  if not path.isdir(srcdir):
    print('%s is not a directory' % src_dir, file=sys.stderr)
    return

  if omit_groups:
    unknown_groups = set(omit_groups) - set(unicode_data.get_emoji_groups())
    if unknown_groups:
      print('did not recognize %d group%s: %s' % (
          len(unknown_groups), '' if len(unknown_groups) == 1 else 's',
          ', '.join('"%s"' % g for g in omit_groups if g in unknown_groups)), file=sys.stderr)
      print('valid groups are:\n  %s' % (
          '\n  '.join(g for g in unicode_data.get_emoji_groups())), file=sys.stderr)
      return
    print('omitting %d group%s: %s' % (
        len(omit_groups), '' if len(omit_groups) == 1 else 's',
        ', '.join('"%s"' % g for g in omit_groups)))
  else:
    # might be None
    print('keeping all groups')
    omit_groups = []

  # make sure the destination exists
  dstdir = tool_utils.ensure_dir_exists(
      tool_utils.resolve_path(dst_dir))

  # _get_image_data returns canonical cp sequences
  print('src dir:', srcdir)
  seq_to_file = generate_emoji_html._get_image_data(srcdir, 'png', 'emoji_u')
  print('seq to file has %d sequences' % len(seq_to_file))

  # Aliases add non-gendered versions using gendered images for the most part.
  # But when we display the images, we don't distinguish genders in the
  # naming, we rely on the images-- so these look redundant. So we
  # intentionally don't generate images for these.
  # However, the alias file also includes the flag aliases, which we do want,
  # and it also fails to exclude the unknown flag pua (since it doesn't
  # map to anything), so we need to adjust for this.
  canonical_aliases = generate_emoji_html._get_canonical_aliases()

  aliases = set([
      cps for cps in canonical_aliases.keys()
      if not unicode_data.is_regional_indicator_seq(cps)])
  aliases.add((0xfe82b,))  # unknown flag PUA
  excluded = aliases | generate_emoji_html._get_canonical_excluded()

  # The flag aliases have distinct names, so we _do_ want to show them
  # multiple times.
  to_add = {}
  for seq in canonical_aliases:
    if unicode_data.is_regional_indicator_seq(seq):
      replace_seq = canonical_aliases[seq]
      if seq in seq_to_file:
        print('warning, alias %s has file %s' % (
            unicode_data.regional_indicator_seq_to_string(seq),
            seq_to_file[seq]))
        continue
      replace_file = seq_to_file.get(replace_seq)
      if replace_file:
        to_add[seq] = replace_file
  seq_to_file.update(to_add)

  data = []
  last_skipped_group = None
  skipcount = 0
  for group in unicode_data.get_emoji_groups():
    if group in omit_groups:
      continue
    name_data = []
    for seq in unicode_data.get_emoji_in_group(group):
      if seq in excluded:
        continue
      seq_file = seq_to_file.get(seq, None)
      if seq_file is None:
        skipcount += 1
        if verbose:
          if group != last_skipped_group:
            print('group %s' % group)
            last_skipped_group = group
          print('  %s (%s)' % (
              unicode_data.seq_to_string(seq),
              ', '.join(unicode_data.name(cp, 'x') for cp in seq)))
        if skip_limit >= 0 and skipcount > skip_limit:
          raise Exception('skipped too many items')
      else:
        name_data.append(_name_data(seq, seq_file))
    data.append({'category': group, 'emojis': name_data})

  outfile = path.join(dstdir, 'data.json')
  with open(outfile, 'w') as f:
    indent = 2 if pretty_print else None
    separators = None if pretty_print else (',', ':')
    json.dump(data, f, indent=indent, separators=separators)
  print('wrote %s' % outfile)
コード例 #30
0
ファイル: android_patches.py プロジェクト: wendelas/nototools
def main():
    tool_utils.ensure_dir_exists(DST_DIR, clean=True)
    _patch_hyphen()
    # TODO: first unpack from ttc, then rebuild
    # _remove_cjk_emoji()
    _subset_symbols()
コード例 #31
0
def fix_font(f, dst_dir, rel_dir, version, version_info, autohint, dry_run):
  print('\n-----\nfont:', f)
  font = ttLib.TTFont(f)

  relfont = _get_release_font(f, rel_dir)
  expected_font_revision = get_new_version(font, relfont, version)
  if expected_font_revision is not None:
    font_revision = font_data.printable_font_revision(font, 3)
    if font_revision != expected_font_revision:
      _alert('revision', font_revision, expected_font_revision)
      font['head'].fontRevision = float(expected_font_revision)

    names = font_data.get_name_records(font)
    NAME_ID = 5
    font_version = names[NAME_ID]
    expected_version = (
        'Version %s;%s' % (expected_font_revision, version_info))
    if font_version != expected_version:
      _alert('version string', font_version, expected_version)
      font_data.set_name_record(font, NAME_ID, expected_version)

  expected_upem = 1000
  upem = font['head'].unitsPerEm
  if upem != expected_upem:
    print('expected %d upem but got %d upem' % (expected_upem, upem))

  if _is_ui_metrics(f):
    if upem == 2048:
      expected_ascent = 2163
      expected_descent = -555
    elif upem == 1000:
      expected_ascent = 1069
      expected_descent = -293
    else:
      raise Exception('no expected ui ascent/descent for upem: %d' % upem)

    font_ascent = font['hhea'].ascent
    font_descent = font['hhea'].descent
    if font_ascent != expected_ascent:
      _alert_and_check('ascent', font_ascent, expected_ascent, 2)
      font['hhea'].ascent = expected_ascent
      font['OS/2'].sTypoAscender = expected_ascent
      font['OS/2'].usWinAscent = expected_ascent

    if font_descent != expected_descent:
      _alert_and_check('descent', font_descent, expected_descent, -2)
      font['hhea'].descent = expected_descent
      font['OS/2'].sTypoDescender = expected_descent
      font['OS/2'].usWinDescent = -expected_descent

  tool_utils.ensure_dir_exists(path.join(dst_dir, 'unhinted'))

  fname = path.basename(f)
  udst = path.join(dst_dir, 'unhinted', fname)
  if dry_run:
    print('dry run would write:\n  "%s"' % udst)
  else:
    font.save(udst)
    print('wrote %s' % udst)

  if autohint:
    hdst = path.join(dst_dir, 'hinted', fname)
    autohint_font(udst, hdst, autohint, dry_run)
コード例 #32
0
def generate_names(src_dir,
                   dst_dir,
                   skip_limit=20,
                   omit_groups=None,
                   pretty_print=False,
                   verbose=False):
    srcdir = tool_utils.resolve_path(src_dir)
    if not path.isdir(srcdir):
        print('%s is not a directory' % src_dir, file=sys.stderr)
        return

    if omit_groups:
        unknown_groups = set(omit_groups) - set(
            unicode_data.get_emoji_groups())
        if unknown_groups:
            print(
                'did not recognize %d group%s: %s' %
                (len(unknown_groups), '' if len(unknown_groups) == 1 else 's',
                 ', '.join('"%s"' % g
                           for g in omit_groups if g in unknown_groups)),
                file=sys.stderr)
            print('valid groups are:\n  %s' %
                  ('\n  '.join(g for g in unicode_data.get_emoji_groups())),
                  file=sys.stderr)
            return
        print('omitting %d group%s: %s' %
              (len(omit_groups), '' if len(omit_groups) == 1 else 's',
               ', '.join('"%s"' % g for g in omit_groups)))
    else:
        # might be None
        print('keeping all groups')
        omit_groups = []

    # make sure the destination exists
    dstdir = tool_utils.ensure_dir_exists(tool_utils.resolve_path(dst_dir))

    # _get_image_data returns canonical cp sequences
    print('src dir:', srcdir)
    seq_to_file = generate_emoji_html._get_image_data(srcdir, 'png', 'emoji_u')
    print('seq to file has %d sequences' % len(seq_to_file))

    # Aliases add non-gendered versions using gendered images for the most part.
    # But when we display the images, we don't distinguish genders in the
    # naming, we rely on the images-- so these look redundant. So we
    # intentionally don't generate images for these.
    # However, the alias file also includes the flag aliases, which we do want,
    # and it also fails to exclude the unknown flag pua (since it doesn't
    # map to anything), so we need to adjust for this.
    canonical_aliases = generate_emoji_html._get_canonical_aliases()

    aliases = set([
        cps for cps in canonical_aliases.keys()
        if not unicode_data.is_regional_indicator_seq(cps)
    ])
    aliases.add((0xfe82b, ))  # unknown flag PUA
    excluded = aliases | generate_emoji_html._get_canonical_excluded()

    # The flag aliases have distinct names, so we _do_ want to show them
    # multiple times.
    to_add = {}
    for seq in canonical_aliases:
        if unicode_data.is_regional_indicator_seq(seq):
            replace_seq = canonical_aliases[seq]
            if seq in seq_to_file:
                print('warning, alias %s has file %s' %
                      (unicode_data.regional_indicator_seq_to_string(seq),
                       seq_to_file[seq]))
                continue
            replace_file = seq_to_file.get(replace_seq)
            if replace_file:
                to_add[seq] = replace_file
    seq_to_file.update(to_add)

    data = []
    last_skipped_group = None
    skipcount = 0
    for group in unicode_data.get_emoji_groups():
        if group in omit_groups:
            continue
        name_data = []
        for seq in unicode_data.get_emoji_in_group(group):
            if seq in excluded:
                continue
            seq_file = seq_to_file.get(seq, None)
            if seq_file is None:
                skipcount += 1
                if verbose:
                    if group != last_skipped_group:
                        print('group %s' % group)
                        last_skipped_group = group
                    print('  %s (%s)' %
                          (unicode_data.seq_to_string(seq), ', '.join(
                              unicode_data.name(cp, 'x') for cp in seq)))
                if skip_limit >= 0 and skipcount > skip_limit:
                    raise Exception('skipped too many items')
            else:
                name_data.append(_name_data(seq, seq_file))
        data.append({'category': group, 'emojis': name_data})

    outfile = path.join(dstdir, 'data.json')
    with open(outfile, 'w') as f:
        indent = 2 if pretty_print else None
        separators = None if pretty_print else (',', ':')
        json.dump(data, f, indent=indent, separators=separators)
    print('wrote %s' % outfile)
コード例 #33
0
def fix_font(f, dst_dir, rel_dir, version, version_info, autohint, dry_run):
  print '\n-----\nfont:', f
  font = ttLib.TTFont(f)

  relfont = _get_release_font(f, rel_dir)
  expected_font_revision = get_new_version(font, relfont, version)
  if expected_font_revision != None:
    font_revision = font_data.printable_font_revision(font, 3)
    if font_revision != expected_font_revision:
      _alert('revision', font_revision, expected_font_revision)
      font['head'].fontRevision = float(expected_font_revision)

    names = font_data.get_name_records(font)
    NAME_ID = 5
    font_version = names[NAME_ID]
    expected_version = (
        'Version %s;%s' % (expected_font_revision, version_info))
    if font_version != expected_version:
      _alert('version string', font_version, expected_version)
      font_data.set_name_record(font, NAME_ID, expected_version)

  expected_upem = 1000
  upem = font['head'].unitsPerEm
  if upem != expected_upem:
    print 'expected %d upem but got %d upem' % (expected_upem, upem)

  if _is_ui_metrics(f):
    if upem == 2048:
      expected_ascent = 2163
      expected_descent = -555
    elif upem == 1000:
      expected_ascent = 1069
      expected_descent = -293
    else:
      raise Exception('no expected ui ascent/descent for upem: %d' % upem)

    font_ascent = font['hhea'].ascent
    font_descent = font['hhea'].descent
    if font_ascent != expected_ascent:
      _alert_and_check('ascent', font_ascent, expected_ascent, 2)
      font['hhea'].ascent = expected_ascent
      font['OS/2'].sTypoAscender = expected_ascent
      font['OS/2'].usWinAscent = expected_ascent

    if font_descent != expected_descent:
      _alert_and_check('descent', font_descent, expected_descent, -2)
      font['hhea'].descent = expected_descent
      font['OS/2'].sTypoDescender = expected_descent
      font['OS/2'].usWinDescent = -expected_descent

  tool_utils.ensure_dir_exists(path.join(dst_dir, 'unhinted'))

  fname = path.basename(f)
  udst = path.join(dst_dir, 'unhinted', fname)
  if dry_run:
    print 'dry run would write:\n  "%s"' % udst
  else:
    font.save(udst)
    print 'wrote %s' % udst

  if autohint:
    hdst = path.join(dst_dir, 'hinted', fname)
    autohint_font(udst, hdst, autohint, dry_run)
コード例 #34
0
def subset_fonts_cmap(
    fonts, dstdir, exclude=None, include=None, bump_version=True):
  dstdir = tool_utils.ensure_dir_exists(dstdir)
  for srcname in fonts:
    dstname = path.join(dstdir, path.basename(srcname))
    subset_font_cmap(srcname, dstname, exclude, include, bump_version)
コード例 #35
0
def fetch_udhr(fetch_dir):
  """Fetch UDHR xml bundle from unicode.org to fetch_dir."""
  fetch_dir = tool_utils.ensure_dir_exists(fetch_dir)
  dstfile = os.path.join(fetch_dir, UDHR_XML_ZIP_NAME)
  result = urllib.urlretrieve(UDHR_XML_ZIP_URL, dstfile)
  print 'Fetched: ' + result[0]