def list_fonts(): u = 'http://sofia.nmsu.edu/~mleisher/Software/cu/' collector = html_link_collector() with io.open(acquire(u, 'local'), mode='r', encoding='utf-8') as f: for line in f: collector.feed(line) collector.close() for link in collector.links: if link.endswith('.tgz'): # hack for broken link as of Feb 2019 link = link.replace('www.math.nmsu.edu', 'sofia.nmsu.edu') with tarfile.open(acquire(link, 'local'), 'r') as tar: for name in tar.getnames(): if name.endswith('.bdf'): tar.extract(name, cache_path()) path = os.path.join(cache_path(), name) name = name.split('/')[-1][:-4] with open(path, 'r') as bdf: for line in bdf: if line.startswith('FAMILY_NAME'): name = line[11:].strip() if name.startswith('"') and name.endswith( '"'): name = name[1:-1] yield (name, path, u)
def list_fonts(): u = 'https://apagreekkeys.org/NAUdownload.html' collector = html_link_collector() with io.open(acquire(u, 'local'), mode='r', encoding='utf-8') as f: for line in f: collector.feed(line) collector.close() latestDate = None latestLink = None for link in collector.links: m = re.search('/NAU([0-9_]+)[.]zip$', link) if m is not None: if latestDate is None or m.group(1) > latestDate: latestDate = m.group(1) latestLink = link with zipfile.ZipFile( acquire('https://apagreekkeys.org/' + latestLink, 'local'), 'r') as zip: for info in zip.infolist(): if info.filename.endswith('.ttf'): name = info.filename.split('/')[-1][:-4] name = re.sub('newathu', 'New Athena Unicode', name) name = re.sub('([a-z])([A-Z])', '\\1 \\2', name) name = re.sub('[0-9_]+', '', name) yield (name, zip.extract(info, cache_path()), u)
def list_fonts(): visited = [] mu = 'https://www.babelstone.co.uk/Fonts/' mc = html_link_collector() with io.open(acquire(mu, 'local'), mode='r', encoding='utf-8') as f: for line in f: mc.feed(line) mc.close() for ml in mc.links: if '/' not in ml and ml.endswith('.html') and ml not in visited: visited.append(ml) u = mu + ml c = html_link_collector() with io.open(acquire(u, 'local'), mode='r', encoding='utf-8') as f: for line in f: c.feed(line) c.close() for l in c.links: if l.endswith('.ttf') and l not in visited: visited.append(l) name = l.split('/')[-1][:-4] name = re.sub('([a-z])([A-Z])', '\\1 \\2', name) yield (name.strip(), acquire(mu + l, 'local'), u)
def list_fonts(): u = 'http://users.teilar.gr/~g1951d/' collector = html_link_collector() with io.open(acquire(u, 'local'), mode='r', encoding='utf16') as f: for line in f: collector.feed(line) collector.close() for link in collector.links: if link.endswith('.zip'): with zipfile.ZipFile(acquire(u + link, 'local'), 'r') as zip: for info in zip.infolist(): if 'hint' not in info.filename and info.filename.endswith( '.ttf'): name = info.filename[:-4].split('/')[-1] yield (name, zip.extract(info, cache_path()), u)
def list_fonts(): u = 'https://www.kurinto.com/' collector = html_link_collector() with io.open(acquire(u + 'download.htm', 'local'), mode='r', encoding='utf-8') as f: for line in f: collector.feed(line) collector.close() for link in collector.links: if link.endswith('_Full.zip'): with zipfile.ZipFile(acquire(u + link, 'local'), 'r') as zip: for info in zip.infolist(): if info.filename.endswith('-Rg.ttf'): name = info.filename[:-7].split('/')[-1] yield (name, zip.extract(info, cache_path()), u)
def list_fonts(): u = 'http://unifoundry.com/unifont/index.html' collector = html_link_collector() with io.open(acquire(u, 'local'), mode='r', encoding='iso-8859-1') as f: for line in f: collector.feed(line) collector.close() for link in collector.links: if 'sample' not in link and link.endswith('.ttf'): name = link.split('/')[-1].split('-')[0] name = re.sub('unifont', 'Unifont', name) name = re.sub('upper', 'Upper', name) name = re.sub('csur', 'CSUR', name) name = re.sub('_', ' ', name) link = 'http://unifoundry.com' + link yield (name, acquire(link, 'local'), u)
def list_fonts(): u = 'https://www.google.com/get/noto/' collector = html_link_collector() with io.open(acquire(u, 'local'), mode='r', encoding='utf-8') as f: for line in f: collector.feed(line) collector.close() for link in collector.links: if '/Noto-' not in link and link.endswith('.zip'): with zipfile.ZipFile(acquire(link, 'local'), 'r') as zip: for info in zip.infolist(): if info.filename.endswith( '.ttf') or info.filename.endswith('.otf'): name = info.filename[:-4] name = re.sub('([a-z])([A-Z])', '\\1 \\2', name) name = re.sub('-Regular', '', name) name = re.sub('-', ' ', name) yield (name, zip.extract(info, cache_path()), u)
def list_fonts(): for id in ['source-code-pro', 'source-sans-pro', 'source-serif-pro']: u = 'https://github.com/adobe-fonts/' + id collector = html_link_collector() with io.open(acquire(u + '/releases', 'local'), mode='r', encoding='utf-8') as f: for line in f: collector.feed(line) collector.close() for link in collector.links: if link.endswith('.ttf') or link.endswith('.otf'): du = 'https://github.com' + link name = link.split('/')[-1][:-4] name = re.sub('([a-z])([A-Z])', '\\1 \\2', name) name = re.sub('( *Variable)?-(Roman)?', ' Pro ', name) yield (name.strip(), acquire(du, 'local'), u) elif link.endswith('.zip') or link.endswith('.tar.gz'): break
def list_fonts(): u = 'https://pagure.io/liberation-fonts' collector = html_link_collector() with io.open(acquire(u, 'local'), mode='r', encoding='utf-8') as f: for line in f: collector.feed(line) collector.close() for link in collector.links: if '-ttf-' in link and link.endswith('.tar.gz'): with tarfile.open(acquire(link, 'local'), 'r') as tar: for name in tar.getnames(): if name.endswith('.ttf'): tar.extract(name, cache_path()) path = os.path.join(cache_path(), name) name = name.split('/')[-1][:-4] name = re.sub('([a-z])([A-Z])', '\\1 \\2', name) name = re.sub('-', ' ', name) yield (name, path, u) break
def list_fonts(): u = 'https://design.ubuntu.com/font/' collector = html_link_collector() with io.open(acquire(u, 'local', compressed=True), mode='r', encoding='iso-8859-1') as f: for line in f: collector.feed(line) collector.close() for link in collector.links: if link.endswith('.zip'): with zipfile.ZipFile(acquire(link, 'local'), 'r') as zip: for info in zip.infolist(): if '__MACOSX' in info.filename: continue if info.filename.endswith('.ttf'): name = info.filename.split('/')[-1][:-4] name = re.sub('([a-z])([A-Z])', '\\1 \\2', name) name = re.sub('-', ' ', name) yield (name, zip.extract(info, cache_path()), u) break
def list_fonts(): u = 'https://dejavu-fonts.github.io/' du = 'https://dejavu-fonts.github.io/Download.html' collector = html_link_collector() with io.open(acquire(du, 'local'), mode='r', encoding='utf-8') as f: for line in f: collector.feed(line) collector.close() for link in collector.links: bn = link.split('/')[-1] if bn.startswith('dejavu-fonts-ttf') and bn.endswith('.zip'): with zipfile.ZipFile(acquire(link, 'local', None), 'r') as zip: for info in zip.infolist(): if info.filename.endswith('.ttf'): name = info.filename.split('/')[-1][:-4] name = re.sub('([a-z])([A-Z])', '\\1 \\2', name) name = re.sub('Deja Vu', 'DejaVu', name) name = re.sub('-', ' ', name) yield (name, zip.extract(info, cache_path()), u)
def list_fonts(): bu = 'http://scholarsfonts.net/' u = bu + 'cardofnt.html' collector = html_link_collector() with io.open(acquire(u, 'local'), mode='r', encoding='windows-1252') as f: for line in f: collector.feed(line) collector.close() for link in collector.links: if link.endswith('.zip'): with zipfile.ZipFile(acquire(bu + link, 'local'), 'r') as zip: for info in zip.infolist(): if info.filename.endswith('.ttf'): name = info.filename[:-4].split('/')[-1] if name.startswith('Cardob'): name = 'Cardo Bold' elif name.startswith('Cardoi'): name = 'Cardo Italic' else: name = 'Cardo' yield (name, zip.extract(info, cache_path()), u) break
def list_fonts(): u = 'http://savannah.gnu.org/projects/freefont/' du = 'http://ftp.gnu.org/gnu/freefont/' collector = html_link_collector() with io.open(acquire(du, 'local'), mode='r', encoding='utf-8') as f: for line in f: collector.feed(line) collector.close() latestDate = None latestLink = None for link in collector.links: m = re.match('^freefont-[ot]tf-([0-9]+)[.]zip$', link) if m is not None: if latestDate is None or m.group(1) > latestDate: latestDate = m.group(1) latestLink = link with zipfile.ZipFile(acquire(du + latestLink, 'local'), 'r') as zip: for info in zip.infolist(): if info.filename.endswith('.ttf') or info.filename.endswith( '.otf'): name = info.filename.split('/')[-1][:-4] name = re.sub('([a-z])([A-Z])', '\\1 \\2', name) name = re.sub('Free (Sans|Serif|Mono)', 'Free\\1', name) yield (name, zip.extract(info, cache_path()), u)