Exemple #1
0
	def _saveXML(self, writer,
		     writeVersion=True,
		     quiet=None, tables=None, skipTables=None, splitTables=False,
		     splitGlyphs=False, disassembleInstructions=True,
		     bitmapGlyphDataFormat='raw'):

		if quiet is not None:
			deprecateArgument("quiet", "configure logging instead")

		self.disassembleInstructions = disassembleInstructions
		self.bitmapGlyphDataFormat = bitmapGlyphDataFormat
		if not tables:
			tables = list(self.keys())
			if "GlyphOrder" not in tables:
				tables = ["GlyphOrder"] + tables
			if skipTables:
				for tag in skipTables:
					if tag in tables:
						tables.remove(tag)
		numTables = len(tables)

		if writeVersion:
			from fontTools import version
			version = ".".join(version.split('.')[:2])
			writer.begintag("ttFont", sfntVersion=repr(tostr(self.sfntVersion))[1:-1],
					ttLibVersion=version)
		else:
			writer.begintag("ttFont", sfntVersion=repr(tostr(self.sfntVersion))[1:-1])
		writer.newline()

		# always splitTables if splitGlyphs is enabled
		splitTables = splitTables or splitGlyphs

		if not splitTables:
			writer.newline()
		else:
			path, ext = os.path.splitext(writer.filename)
			fileNameTemplate = path + ".%s" + ext

		for i in range(numTables):
			tag = tables[i]
			if splitTables:
				tablePath = fileNameTemplate % tagToIdentifier(tag)
				tableWriter = xmlWriter.XMLWriter(tablePath,
						newlinestr=writer.newlinestr)
				tableWriter.begintag("ttFont", ttLibVersion=version)
				tableWriter.newline()
				tableWriter.newline()
				writer.simpletag(tagToXML(tag), src=os.path.basename(tablePath))
				writer.newline()
			else:
				tableWriter = writer
			self._tableToXML(tableWriter, tag, splitGlyphs=splitGlyphs)
			if splitTables:
				tableWriter.endtag("ttFont")
				tableWriter.newline()
				tableWriter.close()
		writer.endtag("ttFont")
		writer.newline()
	def decompileEntryList(self, data):
		# data starts with the first entry of the entry list.
		pos = subTableStart = self.offsetToSVGDocIndex
		self.numEntries = numEntries = struct.unpack(">H", data[pos:pos+2])[0]
		pos += 2
		if self.numEntries > 0:
			data2 = data[pos:]
			self.docList = []
			self.entries = entries = []
			for i in range(self.numEntries):
				docIndexEntry, data2 = sstruct.unpack2(doc_index_entry_format_0, data2, DocumentIndexEntry())
				entries.append(docIndexEntry)

			for entry in entries:
				start = entry.svgDocOffset + subTableStart
				end = start + entry.svgDocLength
				doc = data[start:end]
				if doc.startswith(b"\x1f\x8b"):
					import gzip
					bytesIO = BytesIO(doc)
					with gzip.GzipFile(None, "r", fileobj=bytesIO) as gunzipper:
						doc = gunzipper.read()
					self.compressed = True
					del bytesIO
				doc = tostr(doc, "utf_8")
				self.docList.append( [doc, entry.startGlyphID, entry.endGlyphID] )
Exemple #3
0
def escape(data):
    data = tostr(data, 'utf_8')
    data = data.replace("&", "&")
    data = data.replace("<", "&lt;")
    data = data.replace(">", "&gt;")
    data = data.replace("\r", "&#13;")
    return data
Exemple #4
0
    def checkSkipGlyph(self, glyphName, newSrcHash, doAll):
        skip = False
        if self.log_only:
            return skip

        srcHash = None
        historyList = []

        # Get hash entry for glyph
        if glyphName in self.hashMap:
            srcHash, historyList = self.hashMap[glyphName]

        if srcHash == newSrcHash:
            if AUTOHINT_NAME in historyList:
                # The glyph has already been autohinted, and there have been no
                # changes since.
                skip = not doAll
            if not skip and AUTOHINT_NAME not in historyList:
                historyList.append(AUTOHINT_NAME)
        else:
            if CHECKOUTLINE_NAME in historyList:
                log.error(
                    "Glyph '%s' has been edited. You must first "
                    "run '%s' before running '%s'. Skipping.", glyphName,
                    CHECKOUTLINE_NAME, AUTOHINT_NAME)
                skip = True

            # If the source hash has changed, we need to delete the processed
            # layer glyph.
            self.hashMapChanged = True
            self.hashMap[glyphName] = [tostr(newSrcHash), [AUTOHINT_NAME]]
            if glyphName in self.processedLayerGlyphMap:
                del self.processedLayerGlyphMap[glyphName]

        return skip
Exemple #5
0
def unpackPStrings(data, n):
    # extract n Pascal strings from data.
    # if there is not enough data, use ""

    strings = []
    index = 0
    dataLen = len(data)

    for _ in range(n):
        if dataLen <= index:
            length = 0
        else:
            length = byteord(data[index])
        index += 1

        if dataLen <= index + length - 1:
            name = ""
        else:
            name = tostr(data[index:index + length], encoding="latin1")
        strings.append(name)
        index += length

    if index < dataLen:
        log.warning("%d extra bytes in post.stringData array", dataLen - index)

    elif dataLen < index:
        log.warning("not enough data in post.stringData array")

    return strings
Exemple #6
0
def guessFileType(fileName):
    base, ext = os.path.splitext(fileName)
    try:
        with open(fileName, "rb") as f:
            header = f.read(256)
    except IOError:
        return None

    if header.startswith(b'\xef\xbb\xbf<?xml'):
        header = header.lstrip(b'\xef\xbb\xbf')
    cr, tp = getMacCreatorAndType(fileName)
    if tp in ("sfnt", "FFIL"):
        return "TTF"
    if ext == ".dfont":
        return "TTF"
    head = Tag(header[:4])
    if head == "OTTO":
        return "OTF"
    elif head == "ttcf":
        return "TTC"
    elif head in ("\0\1\0\0", "true"):
        return "TTF"
    elif head == "wOFF":
        return "WOFF"
    elif head == "wOF2":
        return "WOFF2"
    elif head == "<?xm":
        # Use 'latin1' because that can't fail.
        header = tostr(header, 'latin1')
        if opentypeheaderRE.search(header):
            return "OTX"
        else:
            return "TTX"
    return None
Exemple #7
0
 def getNamedResource(self, resType, name):
     """Return the named resource of given type, else return None."""
     name = tostr(name, encoding='mac-roman')
     for res in self.get(resType, []):
         if res.name == name:
             return res
     return None
Exemple #8
0
def ot_tag_to_script(tag):
    """ Return the Unicode script code for the given OpenType script tag, or
    None for "DFLT" tag or if there is no Unicode script associated with it.
    Raises ValueError if the tag is invalid.
    """
    tag = tostr(tag).strip()
    if not tag or " " in tag or len(tag) > 4:
        raise ValueError("invalid OpenType tag: %r" % tag)

    while len(tag) != 4:
        tag += str(" ")  # pad with spaces

    if tag == OTTags.DEFAULT_SCRIPT:
        # it's unclear which Unicode script the "DFLT" OpenType tag maps to,
        # so here we return None
        return None

    if tag in OTTags.NEW_SCRIPT_TAGS_REVERSED:
        return OTTags.NEW_SCRIPT_TAGS_REVERSED[tag]

    # This side of the conversion is fully algorithmic

    # Any spaces at the end of the tag are replaced by repeating the last
    # letter. Eg 'nko ' -> 'Nkoo'.
    # Change first char to uppercase
    script_code = tag[0].upper() + tag[1]
    for i in range(2, 4):
        script_code += (script_code[i - 1] if tag[i] == " " else tag[i])

    if script_code not in Scripts.NAMES:
        return None
    return script_code
Exemple #9
0
def getPSName(data):
	format, n, stringOffset = struct.unpack(">HHH", data[:6])
	expectedStringOffset = 6 + n * nameRecordSize
	if stringOffset != expectedStringOffset:
		# XXX we need a warn function
		print("Warning: 'name' table stringOffset incorrect.", end=' ')
		print("Expected: %s; Actual: %s" % (expectedStringOffset, stringOffset))
	stringData = data[stringOffset:]
	startNameRecordData = data[6:]
	psName = None
	
	for nameRecordKey in [ AppleLatinKey, MSUnicodeKey ]:
		if psName != None:
			break
		
		data = startNameRecordData
		for i in range(n):
			if len(data) < 12:
				# compensate for buggy font
				break
			platformID, platEncID, langID, nameID, length, offset = struct.unpack(nameRecordFormat, data[:nameRecordSize])
			data = data[nameRecordSize:]
			if ((platformID, platEncID, nameID) == nameRecordKey):
				psName = stringData[offset:offset+length]
				if nameRecordKey == MSUnicodeKey:
					psName = psName.decode('utf-16be')
				else:
					assert len(psName) == length
				break
			else:
				continue

	if psName == None:
		psName = "PSNameUndefined"
	return tostr(psName)
    def toUnicode(self, errors='strict'):
        """
		If self.string is a Unicode string, return it; otherwise try decoding the
		bytes in self.string to a Unicode string using the encoding of this
		entry as returned by self.getEncoding(); Note that  self.getEncoding()
		returns 'ascii' if the encoding is unknown to the library.

		Certain heuristics are performed to recover data from bytes that are
		ill-formed in the chosen encoding, or that otherwise look misencoded
		(mostly around bad UTF-16BE encoded bytes, or bytes that look like UTF-16BE
		but marked otherwise).  If the bytes are ill-formed and the heuristics fail,
		the error is handled according to the errors parameter to this function, which is
		passed to the underlying decode() function; by default it throws a
		UnicodeDecodeError exception.

		Note: The mentioned heuristics mean that roundtripping a font to XML and back
		to binary might recover some misencoded data whereas just loading the font
		and saving it back will not change them.
		"""
        def isascii(b):
            return (b >= 0x20 and b <= 0x7E) or b in [0x09, 0x0A, 0x0D]

        encoding = self.getEncoding()
        string = self.string

        if isinstance(
                string,
                bytes) and encoding == 'utf_16_be' and len(string) % 2 == 1:
            # Recover badly encoded UTF-16 strings that have an odd number of bytes:
            # - If the last byte is zero, drop it.  Otherwise,
            # - If all the odd bytes are zero and all the even bytes are ASCII,
            #   prepend one zero byte.  Otherwise,
            # - If first byte is zero and all other bytes are ASCII, insert zero
            #   bytes between consecutive ASCII bytes.
            #
            # (Yes, I've seen all of these in the wild... sigh)
            if byteord(string[-1]) == 0:
                string = string[:-1]
            elif all(
                    byteord(b) == 0 if i % 2 else isascii(byteord(b))
                    for i, b in enumerate(string)):
                string = b'\0' + string
            elif byteord(string[0]) == 0 and all(
                    isascii(byteord(b)) for b in string[1:]):
                string = bytesjoin(b'\0' + bytechr(byteord(b))
                                   for b in string[1:])

        string = tostr(string, encoding=encoding, errors=errors)

        # If decoded strings still looks like UTF-16BE, it suggests a double-encoding.
        # Fix it up.
        if all(
                ord(c) == 0 if i % 2 == 0 else isascii(ord(c))
                for i, c in enumerate(string)):
            # If string claims to be Mac encoding, but looks like UTF-16BE with ASCII text,
            # narrow it down.
            string = ''.join(c for c in string[1::2])

        return string
Exemple #11
0
    def buildTables(self):
        if not self.features.strip():
            return

        import subprocess
        from fontTools.misc.py23 import tostr

        outline_path = feasrc_path = fea_path = None
        try:
            fd, outline_path = tempfile.mkstemp()
            os.close(fd)
            self.ttFont.save(outline_path)

            fd, feasrc_path = tempfile.mkstemp()
            os.close(fd)

            fd, fea_path = tempfile.mkstemp()
            os.write(fd, tobytes(self.features, encoding='utf-8'))
            os.close(fd)

            process = subprocess.Popen(
                [
                    "makeotf",
                    "-o",
                    feasrc_path,
                    "-f",
                    outline_path,
                    "-ff",
                    fea_path,
                ],
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
            )
            stdout, stderr = process.communicate()
            retcode = process.poll()

            report = tostr(stdout + (b"\n" + stderr if stderr else b""))
            logger.info(report)

            # before afdko >= 2.7.1rc1, makeotf did not exit with non-zero
            # on failure, so we have to parse the error message
            if retcode != 0:
                success = False
            else:
                success = (
                    "makeotf [Error] Failed to build output font" not in report
                )
                if success:
                    with TTFont(feasrc_path) as feasrc:
                        for table in ["GDEF", "GPOS", "GSUB"]:
                            if table in feasrc:
                                self.ttFont[table] = feasrc[table]
            if not success:
                raise FontmakeError("Feature syntax compilation failed.")
        finally:
            for path in (outline_path, fea_path, feasrc_path):
                if path is not None:
                    os.remove(path)
Exemple #12
0
def b64encode(b):
    s = base64.b64encode(b)
    # Line-break at 76 chars.
    items = []
    while s:
        items.append(tostr(s[:76]))
        items.append('\n')
        s = s[76:]
    return strjoin(items)
Exemple #13
0
def runShellCmdLogging(cmd):
    try:
        proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
                                stderr=subprocess.STDOUT)
        while 1:
            output = proc.stdout.readline().rstrip()
            if output:
                print(tostr(output))
            if proc.poll() is not None:
                output = proc.stdout.readline().rstrip()
                if output:
                    print(tostr(output))
                break
    except (subprocess.CalledProcessError, OSError) as err:
        msg = "Error executing command '%s'\n%s" % (cmd, err)
        print(msg)
        return 1
    return 0
Exemple #14
0
    def recalcHashEntry(self, glyphName, glyph):
        hashBefore, historyList = self.hashMap[glyphName]

        hash_pen = HashPointPen(glyph)
        glyph.drawPoints(hash_pen)
        hashAfter = hash_pen.getHash()

        if hashAfter != hashBefore:
            self.hashMap[glyphName] = [tostr(hashAfter), historyList]
            self.hashMapChanged = True
def unpackPStrings(data):
    strings = []
    index = 0
    dataLen = len(data)
    while index < dataLen:
        length = byteord(data[index])
        strings.append(
            tostr(data[index + 1:index + 1 + length], encoding="latin1"))
        index = index + 1 + length
    return strings
Exemple #16
0
def runShellCmdLogging(cmd):
    try:
        proc = subprocess.Popen(cmd,
                                shell=True,
                                stdout=subprocess.PIPE,
                                stderr=subprocess.STDOUT)
        while 1:
            output = proc.stdout.readline().rstrip()
            if output:
                print(tostr(output))
            if proc.poll() is not None:
                output = proc.stdout.readline().rstrip()
                if output:
                    print(tostr(output))
                break
    except (subprocess.CalledProcessError, OSError) as err:
        msg = "Error executing command '%s'\n%s" % (cmd, err)
        print(msg)
        return 1
    return 0
Exemple #17
0
    def test_non_BMP_text_arg_input(self):
        _, fontpath = self.compile_font(
            self.getpath("TestTTF-Regular_non_BMP_char.ttx"), ".ttf")
        subsetpath = self.temp_path(".ttf")
        text = tostr(u"A\U0001F6D2", encoding='utf-8')

        subset.main([fontpath, "--text=%s" % text, "--output-file=%s" % subsetpath])
        subsetfont = TTFont(subsetpath)

        self.assertEqual(subsetfont['maxp'].numGlyphs, 3)
        self.assertEqual(subsetfont.getGlyphOrder(), ['.notdef', 'A', 'u1F6D2'])
	def toXML(self, writer, ttFont):
		data = tostr(self.data)
		# removing null bytes. XXX needed??
		data = data.split('\0')
		data = strjoin(data)
		writer.begintag("source")
		writer.newline()
		writer.write_noindent(data)
		writer.newline()
		writer.endtag("source")
		writer.newline()
Exemple #19
0
    def getnexttoken(
            self,
            # localize some stuff, for performance
            len=len,
            ps_special=ps_special,
            stringmatch=stringRE.match,
            hexstringmatch=hexstringRE.match,
            commentmatch=commentRE.match,
            endmatch=endofthingRE.match):

        self.skipwhite()
        if self.pos >= self.len:
            return None, None
        pos = self.pos
        buf = self.buf
        char = bytechr(byteord(buf[pos]))
        if char in ps_special:
            if char in b'{}[]':
                tokentype = 'do_special'
                token = char
            elif char == b'%':
                tokentype = 'do_comment'
                _, nextpos = commentmatch(buf, pos).span()
                token = buf[pos:nextpos]
            elif char == b'(':
                tokentype = 'do_string'
                m = stringmatch(buf, pos)
                if m is None:
                    raise PSTokenError('bad string at character %d' % pos)
                _, nextpos = m.span()
                token = buf[pos:nextpos]
            elif char == b'<':
                tokentype = 'do_hexstring'
                m = hexstringmatch(buf, pos)
                if m is None:
                    raise PSTokenError('bad hexstring at character %d' % pos)
                _, nextpos = m.span()
                token = buf[pos:nextpos]
            else:
                raise PSTokenError('bad token at character %d' % pos)
        else:
            if char == b'/':
                tokentype = 'do_literal'
                m = endmatch(buf, pos + 1)
            else:
                tokentype = ''
                m = endmatch(buf, pos)
            if m is None:
                raise PSTokenError('bad token at character %d' % pos)
            _, nextpos = m.span()
            token = buf[pos:nextpos]
        self.pos = pos + len(token)
        token = tostr(token, encoding=self.encoding)
        return tokentype, token
def normalizeStringForPostscript(s, allowSpaces=True):
    s = tounicode(s)
    normalized = []
    for c in s:
        if c == " " and not allowSpaces:
            continue
        if c in _postscriptFontNameExceptions:
            continue
        if c not in _postscriptFontNameAllowed:
            c = unicodedata.normalize("NFKD", c)
        normalized.append(tostr(c))
    return "".join(normalized)
    def decompile(self, data, ttFont):
        dummy, rest = sstruct.unpack2(SINGFormat, data, self)
        self.uniqueName = self.decompileUniqueName(self.uniqueName)
        self.nameLength = byteord(self.nameLength)
        assert len(rest) == self.nameLength
        self.baseGlyphName = tostr(rest)

        rawMETAMD5 = self.METAMD5
        self.METAMD5 = "[" + hex(byteord(self.METAMD5[0]))
        for char in rawMETAMD5[1:]:
            self.METAMD5 = self.METAMD5 + ", " + hex(byteord(char))
        self.METAMD5 = self.METAMD5 + "]"
Exemple #22
0
def normalizeStringForPostscript(s, allowSpaces=True):
    s = tounicode(s)
    normalized = []
    for c in s:
        if c == " " and not allowSpaces:
            continue
        if c in _postscriptFontNameExceptions:
            continue
        if c not in _postscriptFontNameAllowed:
            c = unicodedata.normalize("NFKD", c)
        normalized.append(tostr(c))
    return "".join(normalized)
 def decompile(self, data, ttFont):
     dummy, newData = sstruct.unpack2(GMAPFormat, data, self)
     self.psFontName = tostr(newData[:self.fontNameLength])
     assert (self.recordsOffset %
             4) == 0, "GMAP error: recordsOffset is not 32 bit aligned."
     newData = data[self.recordsOffset:]
     self.gmapRecords = []
     for i in range(self.recordsCount):
         gmapRecord, newData = sstruct.unpack2(GMAPRecordFormat1, newData,
                                               GMAPRecord())
         gmapRecord.name = gmapRecord.name.strip('\0')
         self.gmapRecords.append(gmapRecord)
Exemple #24
0
 def decompile(self, refData, reader):
     sstruct.unpack(ResourceRefItem, refData, self)
     # interpret 3-byte dataOffset as (padded) ULONG to unpack it with struct
     self.dataOffset, = struct.unpack('>L',
                                      bytesjoin([b"\0", self.dataOffset]))
     absDataOffset = reader.dataOffset + self.dataOffset
     dataLength, = struct.unpack(">L", reader._read(4, absDataOffset))
     self.data = reader._read(dataLength)
     if self.nameOffset == -1:
         return
     absNameOffset = reader.absNameListOffset + self.nameOffset
     nameLength, = struct.unpack('B', reader._read(1, absNameOffset))
     name, = struct.unpack('>%ss' % nameLength, reader._read(nameLength))
     self.name = tostr(name, encoding='mac-roman')
Exemple #25
0
def set_cvt_table(font, data):
    data = re.sub(r"/\*.*?\*/", "", data, flags=re.DOTALL)
    values = array.array(tostr("h"))
    # control values are defined in VTT Control Program as colon-separated
    # INDEX: VALUE pairs
    for m in re.finditer(r"^\s*([0-9]+)\s*:\s*(-?[0-9]+)", data, re.MULTILINE):
        index, value = int(m.group(1)), int(m.group(2))
        for i in range(1 + index - len(values)):
            # missing CV indexes default to zero
            values.append(0)
        values[index] = value
    if len(values):
        if "cvt " not in font:
            font["cvt "] = newTable("cvt ")
        font["cvt "].values = values
Exemple #26
0
def normalizeStringForPostscript(s, allowSpaces=True):
    s = tounicode(s)
    normalized = []
    for c in s:
        if c == " " and not allowSpaces:
            continue
        if c in _postscriptFontNameExceptions:
            continue
        if c not in _postscriptFontNameAllowed:
            # Use compatibility decomposed form, to keep parts in ascii
            c = unicodedata.normalize("NFKD", c)
            if not set(c) < _postscriptFontNameAllowed:
                c = tounicode(tobytes(c, errors="replace"))
        normalized.append(tostr(c))
    return "".join(normalized)
Exemple #27
0
def set_cvt_table(font, data):
    data = re.sub(r"/\*.*?\*/", "", data, flags=re.DOTALL)
    values = array.array(tostr("h"))
    # control values are defined in VTT Control Program as colon-separated
    # INDEX: VALUE pairs
    for m in re.finditer(r"^\s*([0-9]+)\s*:\s*(-?[0-9]+)", data, re.MULTILINE):
        index, value = int(m.group(1)), int(m.group(2))
        for i in range(1 + index - len(values)):
            # missing CV indexes default to zero
            values.append(0)
        values[index] = value
    if len(values):
        if "cvt " not in font:
            font["cvt "] = newTable("cvt ")
        font["cvt "].values = values
Exemple #28
0
 def _readTypeList(self):
     absTypeListOffset = self.absTypeListOffset
     numTypesData = self._read(2, absTypeListOffset)
     self.numTypes, = struct.unpack('>H', numTypesData)
     absTypeListOffset2 = absTypeListOffset + 2
     for i in range(self.numTypes + 1):
         resTypeItemOffset = absTypeListOffset2 + ResourceTypeItemSize * i
         resTypeItemData = self._read(ResourceTypeItemSize,
                                      resTypeItemOffset)
         item = sstruct.unpack(ResourceTypeItem, resTypeItemData)
         resType = tostr(item['type'], encoding='mac-roman')
         refListOffset = absTypeListOffset + item['refListOffset']
         numRes = item['numRes'] + 1
         resources = self._readReferenceList(resType, refListOffset, numRes)
         self._resources[resType] = resources
Exemple #29
0
def normalizeStringForPostscript(s, allowSpaces=True):
    s = tounicode(s)
    normalized = []
    for c in s:
        if c == " " and not allowSpaces:
            continue
        if c in _postscriptFontNameExceptions:
            continue
        if c not in _postscriptFontNameAllowed:
            # Use compatibility decomposed form, to keep parts in ascii
            c = unicodedata.normalize("NFKD", c)
            if not set(c) < _postscriptFontNameAllowed:
                c = tounicode(tobytes(c, errors="replace"))
        normalized.append(tostr(c))
    return "".join(normalized)
Exemple #30
0
 def test_decompile_badOffset(self):
     # https://github.com/fonttools/fonttools/issues/525
     table = table__n_a_m_e()
     badRecord = {
         "platformID": 1,
         "platEncID": 3,
         "langID": 7,
         "nameID": 1,
         "length": 3,
         "offset": 8765  # out of range
     }
     data = bytesjoin([
         struct.pack(tostr(">HHH"), 1, 1, 6 + nameRecordSize),
         sstruct.pack(nameRecordFormat, badRecord)
     ])
     table.decompile(data, ttFont=None)
     self.assertEqual(table.names, [])
Exemple #31
0
def test_fontv_fonttools_lib_unicode():
    test_string = tobytes("hello")
    test_string_str = tostr("hello")
    test_string_unicode = tounicode(test_string, 'utf-8')
    test_string_str_unicode = tounicode(test_string_str, 'utf-8')

    assert (isinstance(test_string, unicode)) is False
    if sys.version_info[0] == 2:
        assert (isinstance(test_string_str,
                           unicode)) is False  # str != unicode in Python 2
    elif sys.version_info[0] == 3:
        assert (isinstance(test_string_str,
                           unicode)) is True  # str = unicode in Python 3
    assert (isinstance(test_string_unicode, unicode)
            ) is True  # after cast with fonttools function, Py2+3 = unicode
    assert (isinstance(test_string_str_unicode, unicode)) is True  # ditto
    assert test_string_unicode == "hello"
Exemple #32
0
def _dict_element(d: Mapping[str, PlistEncodable],
                  ctx: SimpleNamespace) -> etree.Element:
    el = etree.Element("dict")
    items = d.items()
    if ctx.sort_keys:
        items = sorted(items)  # type: ignore
    ctx.indent_level += 1
    for key, value in items:
        if not isinstance(key, str):
            if ctx.skipkeys:
                continue
            raise TypeError("keys must be strings")
        k = etree.SubElement(el, "key")
        k.text = tostr(key, "utf-8")
        el.append(_make_element(value, ctx))
    ctx.indent_level -= 1
    return el
Exemple #33
0
 def _tounicode(s):
     """Test if a string is valid user input and decode it to unicode string
     using ASCII encoding if it's a bytes string.
     Reject all bytes/unicode input that contains non-XML characters.
     Reject all bytes input that contains non-ASCII characters.
     """
     try:
         s = tostr(s, encoding="ascii", errors="strict")
     except UnicodeDecodeError:
         raise ValueError(
             "Bytes strings can only contain ASCII characters. "
             "Use unicode strings for non-ASCII characters.")
     except AttributeError:
         _raise_serialization_error(s)
     if s and _invalid_xml_string.search(s):
         raise ValueError(
             "All strings must be XML compatible: Unicode or ASCII, "
             "no NULL bytes or control characters"
         )
     return s
Exemple #34
0
    def setupFile_featureTables(self):
        if self.mtiFeaFiles is not None:
            super(FDKFeatureCompiler, self).setupFile_featureTables()

        elif not self.features.strip():
            return

        import subprocess
        from fontTools.misc.py23 import tostr

        fd, outline_path = tempfile.mkstemp()
        os.close(fd)
        self.outline.save(outline_path)

        fd, feasrc_path = tempfile.mkstemp()
        os.close(fd)

        fd, fea_path = tempfile.mkstemp()
        os.write(fd, tobytes(self.features, encoding='utf-8'))
        os.close(fd)

        report = tostr(
            subprocess.check_output([
                "makeotf", "-o", feasrc_path, "-f", outline_path, "-ff",
                fea_path
            ]))
        os.remove(outline_path)
        os.remove(fea_path)

        print(report)
        success = "Done." in report
        if success:
            feasrc = TTFont(feasrc_path)
            for table in ["GDEF", "GPOS", "GSUB"]:
                if table in feasrc:
                    self.outline[table] = feasrc[table]
            feasrc.close()

        os.remove(feasrc_path)
        if not success:
            raise ValueError("Feature syntax compilation failed.")
Exemple #35
0
    def setupFile_featureTables(self):
        if self.mtiFeaFiles is not None:
            super(FDKFeatureCompiler, self).setupFile_featureTables()

        elif not self.features.strip():
            return

        import subprocess
        from fontTools.ttLib import TTFont
        from fontTools.misc.py23 import tostr

        fd, outline_path = tempfile.mkstemp()
        os.close(fd)
        self.outline.save(outline_path)

        fd, feasrc_path = tempfile.mkstemp()
        os.close(fd)

        fd, fea_path = tempfile.mkstemp()
        os.write(fd, self.features)
        os.close(fd)

        report = tostr(subprocess.check_output([
            "makeotf", "-o", feasrc_path, "-f", outline_path,
            "-ff", fea_path]))
        os.remove(outline_path)
        os.remove(fea_path)

        print(report)
        success = "Done." in report
        if success:
            feasrc = TTFont(feasrc_path)
            for table in ["GDEF", "GPOS", "GSUB"]:
                if table in feasrc:
                    self.outline[table] = feasrc[table]

        feasrc.close()
        os.remove(feasrc_path)
        if not success:
            raise ValueError("Feature syntax compilation failed.")
Exemple #36
0
def getformat(fmt, keep_pad_byte=False):
    fmt = tostr(fmt, encoding="ascii")
    try:
        formatstring, names, fixes = _formatcache[fmt]
    except KeyError:
        lines = re.split("[\n;]", fmt)
        formatstring = ""
        names = []
        fixes = {}
        for line in lines:
            if _emptyRE.match(line):
                continue
            m = _extraRE.match(line)
            if m:
                formatchar = m.group(1)
                if formatchar != 'x' and formatstring:
                    raise Error("a special fmt char must be first")
            else:
                m = _elementRE.match(line)
                if not m:
                    raise Error("syntax error in fmt: '%s'" % line)
                name = m.group(1)
                formatchar = m.group(2)
                if keep_pad_byte or formatchar != "x":
                    names.append(name)
                if m.group(3):
                    # fixed point
                    before = int(m.group(3))
                    after = int(m.group(4))
                    bits = before + after
                    if bits not in [8, 16, 32]:
                        raise Error(
                            "fixed point must be 8, 16 or 32 bits long")
                    formatchar = _fixedpointmappings[bits]
                    assert m.group(5) == "F"
                    fixes[name] = after
            formatstring = formatstring + formatchar
        _formatcache[fmt] = formatstring, names, fixes
    return formatstring, names, fixes
Exemple #37
0
 def __fspath__(self):
     return tostr(self._path, sys.getfilesystemencoding())
Exemple #38
0
def run(args):
	tagOverrideMap, fileList, ttcFilePath = parseArgs(args)
	print("Input fonts:", fileList)
	
	fontList = []
	tableMap = {}
	tableList = []
	
	# Read each font file into a list of tables in a fontEntry
	for fontPath in fileList:
		fontEntryList = readFontFile(fontPath)
		fontList += fontEntryList
	# Add the fontEntry tableEntries to tableList.
	for fontEntry in fontList:
		tableIndex = 0
		numTables = len(fontEntry.tableList)
		while tableIndex < numTables:
			tableEntry = fontEntry.tableList[tableIndex]
			
			try:
				fontIndex = tagOverrideMap[tableEntry.tag]
				tableEntry = fontList[fontIndex].getTable(tableEntry.tag)
				fontEntry.tableList[tableIndex] = tableEntry
			except KeyError:
				pass
				
			try:
				tableEntryList = tableMap[tableEntry.tag]
				matched = 0
				for tEntry in tableEntryList:
					if (tEntry.checksum == tableEntry.checksum) and (tEntry.length == tableEntry.length) and (tEntry.data == tableEntry.data):
						matched = 1
						fontEntry.tableList[tableIndex] = tEntry
						break
				if not matched:
					tableEntryList.append(tableEntry)
			except KeyError:
				tableEntryList = [tableEntry]
				tableMap[tableEntry.tag] = tableEntryList
				tableList.insert(tableIndex, tableEntryList)
				
			tableIndex += 1
			

	writeTTC(fontList, tableList, ttcFilePath)
	print("Output font:", ttcFilePath)

	# report which tabetablesls are shared.
	sharedTables = []
	unSharedTables = []
	for tableEntryList in tableList:
		if len(tableEntryList) > 1:
			unSharedTables.append(tostr(tableEntryList[0].tag.decode('ascii')))
		else:
			sharedTables.append(tostr(tableEntryList[0].tag.decode('ascii')))
	if len(sharedTables) == 0:
		print("No tables are shared")
	else:
		print("Shared tables: %s" % repr(sharedTables))
	if len(unSharedTables) == 0:
		print("All tables are shared")
	else:
		print("Un-shared tables: %s" % repr(unSharedTables))


	print("Done")