コード例 #1
0
ファイル: clearlyu.py プロジェクト: kreativekorp/charset
def list_fonts():
    u = 'http://sofia.nmsu.edu/~mleisher/Software/cu/'
    collector = html_link_collector()
    with io.open(acquire(u, 'local'), mode='r', encoding='utf-8') as f:
        for line in f:
            collector.feed(line)
    collector.close()
    for link in collector.links:
        if link.endswith('.tgz'):
            # hack for broken link as of Feb 2019
            link = link.replace('www.math.nmsu.edu', 'sofia.nmsu.edu')
            with tarfile.open(acquire(link, 'local'), 'r') as tar:
                for name in tar.getnames():
                    if name.endswith('.bdf'):
                        tar.extract(name, cache_path())
                        path = os.path.join(cache_path(), name)
                        name = name.split('/')[-1][:-4]
                        with open(path, 'r') as bdf:
                            for line in bdf:
                                if line.startswith('FAMILY_NAME'):
                                    name = line[11:].strip()
                                    if name.startswith('"') and name.endswith(
                                            '"'):
                                        name = name[1:-1]
                        yield (name, path, u)
コード例 #2
0
def list_fonts():
    u = 'https://apagreekkeys.org/NAUdownload.html'
    collector = html_link_collector()
    with io.open(acquire(u, 'local'), mode='r', encoding='utf-8') as f:
        for line in f:
            collector.feed(line)
    collector.close()
    latestDate = None
    latestLink = None
    for link in collector.links:
        m = re.search('/NAU([0-9_]+)[.]zip$', link)
        if m is not None:
            if latestDate is None or m.group(1) > latestDate:
                latestDate = m.group(1)
                latestLink = link
    with zipfile.ZipFile(
            acquire('https://apagreekkeys.org/' + latestLink, 'local'),
            'r') as zip:
        for info in zip.infolist():
            if info.filename.endswith('.ttf'):
                name = info.filename.split('/')[-1][:-4]
                name = re.sub('newathu', 'New Athena Unicode', name)
                name = re.sub('([a-z])([A-Z])', '\\1 \\2', name)
                name = re.sub('[0-9_]+', '', name)
                yield (name, zip.extract(info, cache_path()), u)
コード例 #3
0
def list_fonts():
    u = 'http://users.teilar.gr/~g1951d/'
    collector = html_link_collector()
    with io.open(acquire(u, 'local'), mode='r', encoding='utf16') as f:
        for line in f:
            collector.feed(line)
    collector.close()
    for link in collector.links:
        if link.endswith('.zip'):
            with zipfile.ZipFile(acquire(u + link, 'local'), 'r') as zip:
                for info in zip.infolist():
                    if 'hint' not in info.filename and info.filename.endswith(
                            '.ttf'):
                        name = info.filename[:-4].split('/')[-1]
                        yield (name, zip.extract(info, cache_path()), u)
コード例 #4
0
ファイル: unifont.py プロジェクト: kreativekorp/charset
def list_fonts():
    u = 'http://unifoundry.com/unifont/index.html'
    collector = html_link_collector()
    with io.open(acquire(u, 'local'), mode='r', encoding='iso-8859-1') as f:
        for line in f:
            collector.feed(line)
    collector.close()
    for link in collector.links:
        if 'sample' not in link and link.endswith('.ttf'):
            name = link.split('/')[-1].split('-')[0]
            name = re.sub('unifont', 'Unifont', name)
            name = re.sub('upper', 'Upper', name)
            name = re.sub('csur', 'CSUR', name)
            name = re.sub('_', ' ', name)
            link = 'http://unifoundry.com' + link
            yield (name, acquire(link, 'local'), u)
コード例 #5
0
def list_fonts():
    u = 'https://www.kurinto.com/'
    collector = html_link_collector()
    with io.open(acquire(u + 'download.htm', 'local'),
                 mode='r',
                 encoding='utf-8') as f:
        for line in f:
            collector.feed(line)
    collector.close()
    for link in collector.links:
        if link.endswith('_Full.zip'):
            with zipfile.ZipFile(acquire(u + link, 'local'), 'r') as zip:
                for info in zip.infolist():
                    if info.filename.endswith('-Rg.ttf'):
                        name = info.filename[:-7].split('/')[-1]
                        yield (name, zip.extract(info, cache_path()), u)
コード例 #6
0
def list_fonts():
	yield ('Alco Sans', acquire('http://www.kreativekorp.com/lib/font/AlcoSans.ttf', 'local'), 'http://www.kreativekorp.com/software/fonts/alco.shtml')
	yield ('Constructium', acquire('http://www.kreativekorp.com/lib/font/Constructium.ttf', 'local'), 'http://www.kreativekorp.com/software/fonts/constructium.shtml')
	yield ('Fairfax', acquire('http://www.kreativekorp.com/lib/font/Fairfax.ttf', 'local'), 'http://www.kreativekorp.com/software/fonts/fairfax.shtml')
	yield ('Fairfax Bold', acquire('http://www.kreativekorp.com/lib/font/FairfaxBold.ttf', 'local'), 'http://www.kreativekorp.com/software/fonts/fairfax.shtml')
	yield ('Fairfax Italic', acquire('http://www.kreativekorp.com/lib/font/FairfaxItalic.ttf', 'local'), 'http://www.kreativekorp.com/software/fonts/fairfax.shtml')
	yield ('Fairfax Serif', acquire('http://www.kreativekorp.com/lib/font/FairfaxSerif.ttf', 'local'), 'http://www.kreativekorp.com/software/fonts/fairfax.shtml')
	yield ('Fairfax HD', acquire('http://www.kreativekorp.com/lib/font/FairfaxHD.ttf', 'local'), 'http://www.kreativekorp.com/software/fonts/fairfaxhd.shtml')
	yield ('Kreative Square', acquire('http://www.kreativekorp.com/lib/font/KreativeSquare.ttf', 'local'), 'http://www.kreativekorp.com/software/fonts/ksquare.shtml')
コード例 #7
0
ファイル: microsoft.py プロジェクト: kreativekorp/charset
def list_vendors():
	vip = __vendor_id_index_parser()
	with io.open(acquire('https://docs.microsoft.com/en-us/typography/vendors/', 'local'), mode='r', encoding='utf-8') as f:
		for line in f:
			vip.feed(line)
	vip.close()
	for vendor in vip.vendors:
		yield vendor
コード例 #8
0
def list_fonts():
    with zipfile.ZipFile(
            acquire('https://umihotaru.work/nishiki-teki.zip', 'local'),
            'r') as zip:
        for info in zip.infolist():
            if info.filename.endswith('.ttf'):
                yield ('Nishiki-teki', zip.extract(info, cache_path()),
                       'https://umihotaru.work/')
コード例 #9
0
ファイル: noto.py プロジェクト: kreativekorp/charset
def list_fonts():
    u = 'https://www.google.com/get/noto/'
    collector = html_link_collector()
    with io.open(acquire(u, 'local'), mode='r', encoding='utf-8') as f:
        for line in f:
            collector.feed(line)
    collector.close()
    for link in collector.links:
        if '/Noto-' not in link and link.endswith('.zip'):
            with zipfile.ZipFile(acquire(link, 'local'), 'r') as zip:
                for info in zip.infolist():
                    if info.filename.endswith(
                            '.ttf') or info.filename.endswith('.otf'):
                        name = info.filename[:-4]
                        name = re.sub('([a-z])([A-Z])', '\\1 \\2', name)
                        name = re.sub('-Regular', '', name)
                        name = re.sub('-', ' ', name)
                        yield (name, zip.extract(info, cache_path()), u)
コード例 #10
0
ファイル: everson-mono.py プロジェクト: kreativekorp/charset
def list_fonts():
    with zipfile.ZipFile(
            acquire('http://www.evertype.com/emono/evermono.zip', 'local'),
            'r') as zip:
        for info in zip.infolist():
            if '._' not in info.filename and info.filename.endswith('.ttf'):
                yield (info.filename[:-4].split('/')[-1],
                       zip.extract(info, cache_path()),
                       'http://www.evertype.com/emono/')
コード例 #11
0
def list_fonts():
	u = 'https://design.ubuntu.com/font/'
	collector = html_link_collector()
	with io.open(acquire(u, 'local', compressed=True), mode='r', encoding='iso-8859-1') as f:
		for line in f:
			collector.feed(line)
	collector.close()
	for link in collector.links:
		if link.endswith('.zip'):
			with zipfile.ZipFile(acquire(link, 'local'), 'r') as zip:
				for info in zip.infolist():
					if '__MACOSX' in info.filename:
						continue
					if info.filename.endswith('.ttf'):
						name = info.filename.split('/')[-1][:-4]
						name = re.sub('([a-z])([A-Z])', '\\1 \\2', name)
						name = re.sub('-', ' ', name)
						yield (name, zip.extract(info, cache_path()), u)
			break
コード例 #12
0
def list_fonts():
    for code in [
            'GA', 'GB', 'GC', 'GD', 'GE', 'GF', 'GH', 'GK', 'GV', 'GW', 'MA',
            'MB', 'MC', 'MD', 'ME', 'MF', 'MH', 'MK', 'MV', 'MW', 'VB', 'VF',
            'VN'
    ]:
        name = 'Kreative Vexillo %s' % code
        url = 'https://github.com/kreativekorp/vexillo/raw/master/fonts/Vexillo/Vexillo%s.ttf.sbix.ttf' % code
        yield (name, acquire(url, 'local'),
               'https://github.com/kreativekorp/vexillo')
コード例 #13
0
def list_fonts():
	u = 'http://junicode.sourceforge.net/'
	du = 'https://sourceforge.net/projects/junicode/files/latest/download'
	with zipfile.ZipFile(acquire(du, 'local', None), 'r') as zip:
		for info in zip.infolist():
			if info.filename.endswith('.ttf'):
				name = info.filename.split('/')[-1][:-4]
				name = re.sub('([a-z])([A-Z])', '\\1 \\2', name)
				name = re.sub('-', ' ', name)
				yield (name, zip.extract(info, cache_path()), u)
コード例 #14
0
def list_fonts():
    for name in ['fluorine', 'fluorinem']:
        with zipfile.ZipFile(
                acquire(
                    'http://www.kreativekorp.com/swdownload/fonts/relay/%s.zip'
                    % name, 'local'), 'r') as zip:
            for info in zip.infolist():
                if info.filename.endswith('.ttf'):
                    yield (info.filename[:-4], zip.extract(info, cache_path()),
                           'http://www.kreativekorp.com/software/fonts/')
コード例 #15
0
ファイル: dejavu.py プロジェクト: kreativekorp/charset
def list_fonts():
	u = 'https://dejavu-fonts.github.io/'
	du = 'https://dejavu-fonts.github.io/Download.html'
	collector = html_link_collector()
	with io.open(acquire(du, 'local'), mode='r', encoding='utf-8') as f:
		for line in f:
			collector.feed(line)
	collector.close()
	for link in collector.links:
		bn = link.split('/')[-1]
		if bn.startswith('dejavu-fonts-ttf') and bn.endswith('.zip'):
			with zipfile.ZipFile(acquire(link, 'local', None), 'r') as zip:
				for info in zip.infolist():
					if info.filename.endswith('.ttf'):
						name = info.filename.split('/')[-1][:-4]
						name = re.sub('([a-z])([A-Z])', '\\1 \\2', name)
						name = re.sub('Deja Vu', 'DejaVu', name)
						name = re.sub('-', ' ', name)
						yield (name, zip.extract(info, cache_path()), u)
コード例 #16
0
def list_fonts():
    u = 'https://pagure.io/liberation-fonts'
    collector = html_link_collector()
    with io.open(acquire(u, 'local'), mode='r', encoding='utf-8') as f:
        for line in f:
            collector.feed(line)
    collector.close()
    for link in collector.links:
        if '-ttf-' in link and link.endswith('.tar.gz'):
            with tarfile.open(acquire(link, 'local'), 'r') as tar:
                for name in tar.getnames():
                    if name.endswith('.ttf'):
                        tar.extract(name, cache_path())
                        path = os.path.join(cache_path(), name)
                        name = name.split('/')[-1][:-4]
                        name = re.sub('([a-z])([A-Z])', '\\1 \\2', name)
                        name = re.sub('-', ' ', name)
                        yield (name, path, u)
            break
コード例 #17
0
def list_fonts():
    with zipfile.ZipFile(
            acquire(
                'http://www.kreativekorp.com/swdownload/fonts/xlang/voynich.zip',
                'local'), 'r') as zip:
        for info in zip.infolist():
            if info.filename.endswith('.ttf'):
                yield (
                    info.filename[:-4], zip.extract(info, cache_path()),
                    'http://www.kreativekorp.com/software/fonts/voynich.shtml')
コード例 #18
0
ファイル: source.py プロジェクト: kreativekorp/charset
def list_fonts():
    for id in ['source-code-pro', 'source-sans-pro', 'source-serif-pro']:
        u = 'https://github.com/adobe-fonts/' + id
        collector = html_link_collector()
        with io.open(acquire(u + '/releases', 'local'),
                     mode='r',
                     encoding='utf-8') as f:
            for line in f:
                collector.feed(line)
        collector.close()
        for link in collector.links:
            if link.endswith('.ttf') or link.endswith('.otf'):
                du = 'https://github.com' + link
                name = link.split('/')[-1][:-4]
                name = re.sub('([a-z])([A-Z])', '\\1 \\2', name)
                name = re.sub('( *Variable)?-(Roman)?', ' Pro ', name)
                yield (name.strip(), acquire(du, 'local'), u)
            elif link.endswith('.zip') or link.endswith('.tar.gz'):
                break
コード例 #19
0
def list_entities():
    with io.open(acquire('https://dev.w3.org/html5/html-author/charref',
                         'local'),
                 mode='r',
                 encoding='utf-8') as f:
        for line in f:
            dm = __dec_matcher.search(line)
            if dm is not None:
                cp = int(dm.group(1))
                nm = __named_matcher.search(line)
                if nm is not None:
                    yield cp, '&' + nm.group(1) + ';'
コード例 #20
0
def list_fonts():
    bu = 'http://scholarsfonts.net/'
    u = bu + 'cardofnt.html'
    collector = html_link_collector()
    with io.open(acquire(u, 'local'), mode='r', encoding='windows-1252') as f:
        for line in f:
            collector.feed(line)
    collector.close()
    for link in collector.links:
        if link.endswith('.zip'):
            with zipfile.ZipFile(acquire(bu + link, 'local'), 'r') as zip:
                for info in zip.infolist():
                    if info.filename.endswith('.ttf'):
                        name = info.filename[:-4].split('/')[-1]
                        if name.startswith('Cardob'):
                            name = 'Cardo Bold'
                        elif name.startswith('Cardoi'):
                            name = 'Cardo Italic'
                        else:
                            name = 'Cardo'
                        yield (name, zip.extract(info, cache_path()), u)
            break
コード例 #21
0
def list_fonts():
    visited = []
    mu = 'https://www.babelstone.co.uk/Fonts/'
    mc = html_link_collector()
    with io.open(acquire(mu, 'local'), mode='r', encoding='utf-8') as f:
        for line in f:
            mc.feed(line)
    mc.close()
    for ml in mc.links:
        if '/' not in ml and ml.endswith('.html') and ml not in visited:
            visited.append(ml)
            u = mu + ml
            c = html_link_collector()
            with io.open(acquire(u, 'local'), mode='r', encoding='utf-8') as f:
                for line in f:
                    c.feed(line)
            c.close()
            for l in c.links:
                if l.endswith('.ttf') and l not in visited:
                    visited.append(l)
                    name = l.split('/')[-1][:-4]
                    name = re.sub('([a-z])([A-Z])', '\\1 \\2', name)
                    yield (name.strip(), acquire(mu + l, 'local'), u)
コード例 #22
0
def list_fonts():
    u = 'http://savannah.gnu.org/projects/freefont/'
    du = 'http://ftp.gnu.org/gnu/freefont/'
    collector = html_link_collector()
    with io.open(acquire(du, 'local'), mode='r', encoding='utf-8') as f:
        for line in f:
            collector.feed(line)
    collector.close()
    latestDate = None
    latestLink = None
    for link in collector.links:
        m = re.match('^freefont-[ot]tf-([0-9]+)[.]zip$', link)
        if m is not None:
            if latestDate is None or m.group(1) > latestDate:
                latestDate = m.group(1)
                latestLink = link
    with zipfile.ZipFile(acquire(du + latestLink, 'local'), 'r') as zip:
        for info in zip.infolist():
            if info.filename.endswith('.ttf') or info.filename.endswith(
                    '.otf'):
                name = info.filename.split('/')[-1][:-4]
                name = re.sub('([a-z])([A-Z])', '\\1 \\2', name)
                name = re.sub('Free (Sans|Serif|Mono)', 'Free\\1', name)
                yield (name, zip.extract(info, cache_path()), u)
コード例 #23
0
def list_fonts():
    for name in ['pr', 'petme']:
        with zipfile.ZipFile(
                acquire(
                    'http://www.kreativekorp.com/swdownload/fonts/retro/%s.zip'
                    % name, 'local'), 'r') as zip:
            for info in zip.infolist():
                if info.filename.endswith('.ttf'):
                    name = info.filename[:-4]
                    name = re.sub('PrintChar', 'Print Char ', name)
                    name = re.sub('PRNumber', 'PR Number ', name)
                    name = re.sub('PetMe', 'Pet Me ', name)
                    name = re.sub('(64|128)(2X|2Y)', '\\1 \\2', name)
                    yield (name.strip(), zip.extract(info, cache_path()),
                           'http://www.kreativekorp.com/software/fonts/')
コード例 #24
0
ファイル: microsoft.py プロジェクト: kreativekorp/charset
def list_files():
	parser = html_table_parser()
	with io.open(acquire('https://msdn.microsoft.com/en-us/library/windows/desktop/dd317756.aspx', 'local'), mode='r', encoding='utf-8') as f:
		for line in f:
			parser.feed(line)
	parser.close()

	path = charset_path('identifiers', 'codepages-microsoft.txt')
	with io.open(path, mode='w', encoding='utf-8') as f:
		print(u'@codepage\t@charset', file=f)
		for row in parser.rows:
			if re.match('[0-9]+', row[0]):
				id = int(row[0])
				cs = row[1] if row[1] else '--'
				name = row[2]
				if (id < 100):
					print(u'%03d\t%s\t# %s' % (id, cs, name), file=f)
				else:
					print(u'%d\t%s\t# %s' % (id, cs, name), file=f)
	yield ('codepages-microsoft.txt', path)
コード例 #25
0
ファイル: kreative-erin.py プロジェクト: kreativekorp/charset
def list_fonts():
	for name in ['magdalena', 'mcmillen', 'mischke', 'monterey']:
		with zipfile.ZipFile(acquire('http://www.kreativekorp.com/swdownload/fonts/relaybm/%s.zip' % name, 'local'), 'r') as zip:
			for info in zip.infolist():
				if info.filename.endswith('.ttf'):
					yield (re.sub('Bold', ' Bold', info.filename[:-4]), zip.extract(info, cache_path()), 'http://www.kreativekorp.com/software/fonts/')
コード例 #26
0
def verify(path):
    url = None
    map = {}
    for line in expand(path):
        u = __verify_matcher.match(line)
        if u is not None:
            url = u
        else:
            b, c, ba, ca = split_mapline(line)
            if b is not None and c is not None:
                if not ba or tuple(b) not in map:
                    map[tuple(b)] = tuple(c)
    if url is None:
        return None
    else:
        expmap = {}
        with open(acquire(url, 'local'), 'r') as f:
            for line in f:
                # Hacks for reference encodings: JIS X 0208 has three columns.
                if url.endswith('/MAPPINGS/OBSOLETE/EASTASIA/JIS/JIS0208.TXT'):
                    m = __jis_x_0208_matcher.match(line)
                    if m is not None:
                        line = line[:m.start(1)] + m.group(2) + '\t' + m.group(
                            3) + line[m.end(3):]
                # Hacks for reference encodings: Adobe reverses bytes and chars.
                if '/MAPPINGS/VENDORS/ADOBE/' in url:
                    m = __adobe_line_matcher.match(line)
                    if m is not None:
                        if (int(m.group(2), 16), ) in expmap:
                            continue
                        elif url.endswith('/symbol.txt') and m.group(
                                1) == '00B5' and m.group(2) == '6D':
                            continue
                        else:
                            line = line[:m.start(1)] + '0x' + m.group(
                                2) + '\t0x' + m.group(1) + line[m.end(2):]
                # Hacks for reference encodings: Bytes delimited by +0x.
                m = __delimited_byte_matcher.match(line)
                if m is not None:
                    line = line[:m.start(1)] + __delimited_byte_sub.sub(
                        '', m.group(1)) + line[m.end(1):]
                # Hacks for reference encodings: Odd number of hex digits in bytes.
                line = __odd_hex_digits_sub.sub('\\g<1>0\\g<2>', line)
                # Hacks for reference encodings: Apple's <LR> and <RL> markup.
                line = __apple_lr_sub.sub('\\g<1>0x202D\\g<2>+0x202C', line)
                line = __apple_rl_sub.sub('\\g<1>0x202E\\g<2>+0x202C', line)
                # End hacks.
                b, c, ba, ca = split_mapline(line)
                if b is not None and c is not None:
                    expmap[tuple(b)] = tuple(c)
        if not url.endswith('/MAPPINGS/OBSOLETE/EASTASIA/OTHER/CNS11643.TXT'):
            # Hacks for reference encodings: No ASCII characters.
            if all((x, ) not in expmap and (x, x) not in expmap
                   for x in range(0, 128)):
                for x in range(0, 128):
                    expmap[(x, )] = (x, )
            # Hacks for reference encodings: No C0 control characters.
            if all((x, ) not in expmap and (x, x) not in expmap
                   for x in range(0, 32) + [127]):
                for x in range(0, 32) + [127]:
                    expmap[(x, )] = (x, )
            # Hacks for reference encodings: No C1 control characters.
            if '/MAPPINGS/VENDORS/ADOBE/' in url or '/MAPPINGS/OBSOLETE/EASTASIA/' in url:
                if any((x, ) in expmap
                       for x in range(160, 256)) or url.endswith('/BIG5.TXT'):
                    if all((x, ) not in expmap and (x, x) not in expmap
                           for x in range(128, 160)):
                        for x in range(128, 160):
                            expmap[(x, )] = (x, )
        # Hacks for reference encodings: Undefined characters mapped to U+FFFD.
        for k, v in expmap.items():
            if v == (0xFFFD, ):
                del expmap[k]
        # End hacks.
        return set(map.items()) ^ set(expmap.items())
コード例 #27
0
def list_fonts():
    yield ('Catrinity',
           acquire('http://catrinity-font.de/downloads/Catrinity.otf',
                   'local'), 'http://catrinity-font.de/')
コード例 #28
0
def list_fonts():
    yield ('Conlang Unicode',
           acquire('http://dedalvs.free.fr/writing/ConlangUnicode.ttf',
                   'local'), None)
コード例 #29
0
ファイル: neoletters.py プロジェクト: kreativekorp/charset
def list_fonts():
    yield ('Neoletters',
           acquire('http://www.orenwatson.be/neoletters.ttf',
                   'local'), 'http://www.orenwatson.be/fontdemo.htm')
コード例 #30
0
ファイル: ibm.py プロジェクト: kreativekorp/charset
def list_files():
    parser = html_table_parser()
    with io.open(acquire(
            'https://www-01.ibm.com/software/globalization/cs/cs_gcsgid.html',
            'local'),
                 mode='r',
                 encoding='utf-8') as f:
        for line in f:
            parser.feed(line)
    parser.close()

    path = charset_path('identifiers', 'ibm-gcsgid.txt')
    with io.open(path, mode='w', encoding='utf-8') as f:
        for row in parser.rows:
            if row[0] != 'GCSGID':
                id = int(row[0])
                name = row[1]
                print(u'%05d\t%04X\t%s' % (id, id, name), file=f)
    yield ('ibm-gcsgid.txt', path)

    parser = html_table_parser()
    with io.open(acquire(
            'https://www-01.ibm.com/software/globalization/cp/cp_cpgid.html',
            'local'),
                 mode='r',
                 encoding='utf-8') as f:
        for line in f:
            parser.feed(line)
    parser.close()

    path = charset_path('identifiers', 'ibm-cpgid.txt')
    with io.open(path, mode='w', encoding='utf-8') as f:
        for row in parser.rows:
            if row[0] != 'CPGID':
                id = int(row[0])
                name = row[1]
                print(u'%05d\t%04X\t%s' % (id, id, name), file=f)
    yield ('ibm-cpgid.txt', path)

    parser = html_table_parser()
    with io.open(acquire(
            'https://www-01.ibm.com/software/globalization/ccsid/ccsid_registered.html',
            'local'),
                 mode='r',
                 encoding='utf-8') as f:
        for line in f:
            parser.feed(line)
    parser.close()

    path = charset_path('identifiers', 'ibm-ccsid.txt')
    with io.open(path, mode='w', encoding='utf-8') as f:
        for row in parser.rows:
            if not row[0].startswith('CCSID'):
                id = int(row[0])
                name = row[2]
                print(u'%05d\t%04X\t%s' % (id, id, name), file=f)
    yield ('ibm-ccsid.txt', path)

    path = charset_path('identifiers', 'codepages-ibm.txt')
    with io.open(path, mode='w', encoding='utf-8') as f:
        print(u'@codepage', file=f)
        for row in parser.rows:
            if not row[0].startswith('CCSID'):
                id = int(row[0])
                name = row[2]
                if (id < 100):
                    print(u'%03d\t# %s' % (id, name), file=f)
                else:
                    print(u'%d\t# %s' % (id, name), file=f)
    yield ('codepages-ibm.txt', path)