コード例 #1
0
 def _compileColorRecords(self):
     colorRecords, colorRecordIndices, pool = [], [], {}
     for palette in self.palettes:
         packedPalette = self._compilePalette(palette)
         if packedPalette in pool:
             index = pool[packedPalette]
         else:
             index = len(colorRecords)
             colorRecords.append(packedPalette)
             pool[packedPalette] = index
         colorRecordIndices.append(
             struct.pack(">H", index * self.numPaletteEntries))
     return bytesjoin(colorRecordIndices), bytesjoin(colorRecords)
コード例 #2
0
    def createData(self):
        sf = self.font

        eexec_began = False
        eexec_dict = {}
        lines = []
        lines.extend([
            self._tobytes(f"%!FontType1-1.1: {sf['FontName']}"),
            self._tobytes(f"%t1Font: ({fontTools.version})"),
            self._tobytes(f"%%BeginResource: font {sf['FontName']}")
        ])
        # follow t1write.c:writeRegNameKeyedFont
        size = 3  # Headroom for new key addition
        size += 1  # FontMatrix is always counted
        size += 1 + 1  # Private, CharStings
        for key in font_dictionary_keys:
            size += int(key in sf)
        lines.append(self._tobytes(f"{size} dict dup begin"))

        for key, value in sf.items():
            if eexec_began:
                eexec_dict[key] = value
                continue

            if key == "FontInfo":
                fi = sf["FontInfo"]
                # follow t1write.c:writeFontInfoDict
                size = 3  # Headroom for new key addition
                for subkey in FontInfo_dictionary_keys:
                    size += int(subkey in fi)
                lines.append(self._tobytes(f"/FontInfo {size} dict dup begin"))

                for subkey, subvalue in fi.items():
                    lines.extend(self._make_lines(subkey, subvalue))
                lines.append(b"end def")
            elif key in _type1_post_eexec_order:  # usually 'Private'
                eexec_dict[key] = value
                eexec_began = True
            else:
                lines.extend(self._make_lines(key, value))
        lines.append(b"end")
        eexec_portion = self.encode_eexec(eexec_dict)
        lines.append(bytesjoin([b"currentfile eexec ", eexec_portion]))

        for _ in range(8):
            lines.append(self._tobytes("0" * 64))
        lines.extend([b"cleartomark", b"%%EndResource", b"%%EOF"])

        data = bytesjoin(lines, "\n")
        return data
コード例 #3
0
ファイル: S_V_G_.py プロジェクト: googlefonts/fonttools
    def compile(self, ttFont):
        version = 0
        offsetToSVGDocIndex = SVG_format_0Size  # I start the SVGDocIndex right after the header.
        # get SGVDoc info.
        docList = []
        entryList = []
        numEntries = len(self.docList)
        datum = struct.pack(">H", numEntries)
        entryList.append(datum)
        curOffset = len(datum) + doc_index_entry_format_0Size * numEntries
        seenDocs = {}
        allCompressed = getattr(self, "compressed", False)
        for i, doc in enumerate(self.docList):
            if isinstance(doc, (list, tuple)):
                doc = SVGDocument(*doc)
                self.docList[i] = doc
            docBytes = tobytes(doc.data, encoding="utf_8")
            if (allCompressed or
                    doc.compressed) and not docBytes.startswith(b"\x1f\x8b"):
                import gzip
                bytesIO = BytesIO()
                # mtime=0 strips the useless timestamp and makes gzip output reproducible;
                # equivalent to `gzip -n`
                with gzip.GzipFile(None, "w", fileobj=bytesIO,
                                   mtime=0) as gzipper:
                    gzipper.write(docBytes)
                gzipped = bytesIO.getvalue()
                if len(gzipped) < len(docBytes):
                    docBytes = gzipped
                del gzipped, bytesIO
            docLength = len(docBytes)
            if docBytes in seenDocs:
                docOffset = seenDocs[docBytes]
            else:
                docOffset = curOffset
                curOffset += docLength
                seenDocs[docBytes] = docOffset
                docList.append(docBytes)
            entry = struct.pack(">HHLL", doc.startGlyphID, doc.endGlyphID,
                                docOffset, docLength)
            entryList.append(entry)
        entryList.extend(docList)
        svgDocData = bytesjoin(entryList)

        reserved = 0
        header = struct.pack(">HLL", version, offsetToSVGDocIndex, reserved)
        data = [header, svgDocData]
        data = bytesjoin(data)
        return data
コード例 #4
0
    def getData(self):
        """Assemble the data for this writer/table, without subtables."""
        items = list(self.items)  # make a shallow copy
        pos = self.pos
        numItems = len(items)
        for i in range(numItems):
            item = items[i]

            if hasattr(item, "getData"):
                if item.offsetSize == 4:
                    items[i] = packULong(item.pos - pos)
                elif item.offsetSize == 2:
                    try:
                        items[i] = packUShort(item.pos - pos)
                    except struct.error:
                        # provide data to fix overflow problem.
                        overflowErrorRecord = self.getOverflowErrorRecord(item)

                        raise OTLOffsetOverflowError(overflowErrorRecord)
                elif item.offsetSize == 3:
                    items[i] = packUInt24(item.pos - pos)
                else:
                    raise ValueError(item.offsetSize)

        return bytesjoin(items)
コード例 #5
0
    def compile(self, ttFont):
        # First make sure that all the data lines up properly. Format 4
        # must have all its data lined up consecutively. If not this will fail.
        for curLoc, nxtLoc in zip(self.locations, self.locations[1:]):
            assert curLoc[1] == nxtLoc[
                0], "Data must be consecutive in indexSubTable format 4"

        offsets = list(
            self.locations[0]) + [loc[1] for loc in self.locations[1:]]
        # Image data offset must be less than or equal to the minimum of locations.
        # Resetting this offset may change the value for round tripping but is safer
        # and allows imageDataOffset to not be required to be in the XML version.
        self.imageDataOffset = min(offsets)
        offsets = [offset - self.imageDataOffset for offset in offsets]
        glyphIds = list(map(ttFont.getGlyphID, self.names))
        # Create an iterator over the ids plus a padding value.
        idsPlusPad = list(itertools.chain(glyphIds, [0]))

        dataList = [EblcIndexSubTable.compile(self, ttFont)]
        dataList.append(struct.pack(">L", len(glyphIds)))
        tmp = [
            struct.pack(codeOffsetPairFormat, *cop)
            for cop in zip(idsPlusPad, offsets)
        ]
        dataList += tmp
        data = bytesjoin(dataList)
        return data
コード例 #6
0
    def getAllData(self):
        """Assemble all data, including all subtables."""
        internedTables = {}
        self._doneWriting(internedTables)
        tables = []
        extTables = []
        done = {}
        self._gatherTables(tables, extTables, done)
        tables.reverse()
        extTables.reverse()
        # Gather all data in two passes: the absolute positions of all
        # subtable are needed before the actual data can be assembled.
        pos = 0
        for table in tables:
            table.pos = pos
            pos = pos + table.getDataLength()

        for table in extTables:
            table.pos = pos
            pos = pos + table.getDataLength()

        data = []
        for table in tables:
            tableData = table.getData()
            data.append(tableData)

        for table in extTables:
            tableData = table.getData()
            data.append(tableData)

        return bytesjoin(data)
コード例 #7
0
def readLWFN(path, onlyHeader=False):
    """reads an LWFN font file, returns raw data"""
    from fontTools.misc.macRes import ResourceReader
    reader = ResourceReader(path)
    try:
        data = []
        for res in reader.get('POST', []):
            code = byteord(res.data[0])
            if byteord(res.data[1]) != 0:
                raise T1Error('corrupt LWFN file')
            if code in [1, 2]:
                if onlyHeader and code == 2:
                    break
                data.append(res.data[2:])
            elif code in [3, 5]:
                break
            elif code == 4:
                with open(path, "rb") as f:
                    data.append(f.read())
            elif code == 0:
                pass  # comment, ignore
            else:
                raise T1Error('bad chunk code: ' + repr(code))
    finally:
        reader.close()
    data = bytesjoin(data)
    assertType1(data)
    return data
コード例 #8
0
 def compile(self, ttFont):
     keys = sorted(self.data.keys())
     headerSize = sstruct.calcsize(META_HEADER_FORMAT)
     dataOffset = headerSize + len(keys) * sstruct.calcsize(DATA_MAP_FORMAT)
     header = sstruct.pack(
         META_HEADER_FORMAT, {
             "version": 1,
             "flags": 0,
             "dataOffset": dataOffset,
             "numDataMaps": len(keys)
         })
     dataMaps = []
     dataBlocks = []
     for tag in keys:
         if tag in ["dlng", "slng"]:
             data = self.data[tag].encode("utf-8")
         else:
             data = self.data[tag]
         dataMaps.append(
             sstruct.pack(DATA_MAP_FORMAT, {
                 "tag": tag,
                 "dataOffset": dataOffset,
                 "dataLength": len(data)
             }))
         dataBlocks.append(data)
         dataOffset += len(data)
     return bytesjoin([header] + dataMaps + dataBlocks)
コード例 #9
0
    def setRows(self, dataRows, bitDepth=1, metrics=None, reverseBytes=False):
        if metrics is None:
            metrics = self.metrics
        if not reverseBytes:
            dataRows = list(map(_reverseBytes, dataRows))

        # Keep track of a list of ordinal values as they are easier to modify
        # than a list of strings. Map to actual strings later.
        numBytes = (self._getBitRange(len(dataRows), bitDepth, metrics)[0] +
                    7) // 8
        ordDataList = [0] * numBytes
        for row, data in enumerate(dataRows):
            bitRange = self._getBitRange(row, bitDepth, metrics)
            stepRange = bitRange + (8, )
            for curBit, curByte in zip(range(*stepRange), data):
                endBit = min(curBit + 8, bitRange[1])
                cutPoint = curBit % 8
                firstByteLoc = curBit // 8
                secondByteLoc = endBit // 8
                if firstByteLoc < secondByteLoc:
                    numBitsCut = 8 - cutPoint
                else:
                    numBitsCut = endBit - curBit
                curByte = byteord(curByte)
                firstByte = curByte & ((1 << numBitsCut) - 1)
                ordDataList[firstByteLoc] |= (firstByte << cutPoint)
                if firstByteLoc < secondByteLoc and secondByteLoc < numBytes:
                    secondByte = (curByte >> numBitsCut) & (
                        (1 << 8 - numBitsCut) - 1)
                    ordDataList[secondByteLoc] |= secondByte

        # Save the image data with the bits going the correct way.
        self.imageData = _reverseBytes(bytesjoin(map(bytechr, ordDataList)))
コード例 #10
0
    def compile(self, ttFont):
        self.numGMAPs = len(self.GMAPs)
        self.numGlyplets = len(self.glyphlets)
        GMAPoffsets = [0] * (self.numGMAPs + 1)
        glyphletOffsets = [0] * (self.numGlyplets + 1)

        dataList = [sstruct.pack(GPKGFormat, self)]

        pos = len(
            dataList[0]) + (self.numGMAPs + 1) * 4 + (self.numGlyplets + 1) * 4
        GMAPoffsets[0] = pos
        for i in range(1, self.numGMAPs + 1):
            pos += len(self.GMAPs[i - 1])
            GMAPoffsets[i] = pos
        gmapArray = array.array("I", GMAPoffsets)
        if sys.byteorder != "big": gmapArray.byteswap()
        dataList.append(gmapArray.tobytes())

        glyphletOffsets[0] = pos
        for i in range(1, self.numGlyplets + 1):
            pos += len(self.glyphlets[i - 1])
            glyphletOffsets[i] = pos
        glyphletArray = array.array("I", glyphletOffsets)
        if sys.byteorder != "big": glyphletArray.byteswap()
        dataList.append(glyphletArray.tobytes())
        dataList += self.GMAPs
        dataList += self.glyphlets
        data = bytesjoin(dataList)
        return data
コード例 #11
0
def decrypt(cipherstring, R):
    r"""
	Decrypts a string using the Type 1 encryption algorithm.

	Args:
		cipherstring: String of ciphertext.
		R: Initial key.

	Returns:
		decryptedStr: Plaintext string.
		R: Output key for subsequent decryptions.

	Examples::

		>>> testStr = b"\0\0asdadads asds\265"
		>>> decryptedStr, R = decrypt(testStr, 12321)
		>>> decryptedStr == b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1'
		True
		>>> R == 36142
		True
	"""
    plainList = []
    for cipher in cipherstring:
        plain, R = _decryptChar(cipher, R)
        plainList.append(plain)
    plainstring = bytesjoin(plainList)
    return plainstring, int(R)
コード例 #12
0
    def toUnicode(self, errors='strict'):
        """
		If self.string is a Unicode string, return it; otherwise try decoding the
		bytes in self.string to a Unicode string using the encoding of this
		entry as returned by self.getEncoding(); Note that  self.getEncoding()
		returns 'ascii' if the encoding is unknown to the library.

		Certain heuristics are performed to recover data from bytes that are
		ill-formed in the chosen encoding, or that otherwise look misencoded
		(mostly around bad UTF-16BE encoded bytes, or bytes that look like UTF-16BE
		but marked otherwise).  If the bytes are ill-formed and the heuristics fail,
		the error is handled according to the errors parameter to this function, which is
		passed to the underlying decode() function; by default it throws a
		UnicodeDecodeError exception.

		Note: The mentioned heuristics mean that roundtripping a font to XML and back
		to binary might recover some misencoded data whereas just loading the font
		and saving it back will not change them.
		"""
        def isascii(b):
            return (b >= 0x20 and b <= 0x7E) or b in [0x09, 0x0A, 0x0D]

        encoding = self.getEncoding()
        string = self.string

        if isinstance(
                string,
                bytes) and encoding == 'utf_16_be' and len(string) % 2 == 1:
            # Recover badly encoded UTF-16 strings that have an odd number of bytes:
            # - If the last byte is zero, drop it.  Otherwise,
            # - If all the odd bytes are zero and all the even bytes are ASCII,
            #   prepend one zero byte.  Otherwise,
            # - If first byte is zero and all other bytes are ASCII, insert zero
            #   bytes between consecutive ASCII bytes.
            #
            # (Yes, I've seen all of these in the wild... sigh)
            if byteord(string[-1]) == 0:
                string = string[:-1]
            elif all(
                    byteord(b) == 0 if i % 2 else isascii(byteord(b))
                    for i, b in enumerate(string)):
                string = b'\0' + string
            elif byteord(string[0]) == 0 and all(
                    isascii(byteord(b)) for b in string[1:]):
                string = bytesjoin(b'\0' + bytechr(byteord(b))
                                   for b in string[1:])

        string = tostr(string, encoding=encoding, errors=errors)

        # If decoded strings still looks like UTF-16BE, it suggests a double-encoding.
        # Fix it up.
        if all(
                ord(c) == 0 if i % 2 == 0 else isascii(ord(c))
                for i, c in enumerate(string)):
            # If string claims to be Mac encoding, but looks like UTF-16BE with ASCII text,
            # narrow it down.
            string = ''.join(c for c in string[1::2])

        return string
コード例 #13
0
 def _compilePaletteTypes(self):
     if self.version == 0 or not any(self.paletteTypes):
         return b''
     assert len(self.paletteTypes) == len(self.palettes)
     result = bytesjoin(
         [struct.pack(">I", ptype) for ptype in self.paletteTypes])
     assert len(result) == 4 * len(self.palettes)
     return result
コード例 #14
0
 def compile(self, ttFont):
     dataList = []
     dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
     dataList.append(struct.pack(">H", len(self.componentArray)))
     for curComponent in self.componentArray:
         curComponent.glyphCode = ttFont.getGlyphID(curComponent.name)
         dataList.append(sstruct.pack(ebdtComponentFormat, curComponent))
     return bytesjoin(dataList)
コード例 #15
0
 def compile(self, axisTags, includePostScriptName):
     result = [sstruct.pack(FVAR_INSTANCE_FORMAT, self)]
     for axis in axisTags:
         fixedCoord = fl2fi(self.coordinates[axis], 16)
         result.append(struct.pack(">l", fixedCoord))
     if includePostScriptName:
         result.append(struct.pack(">H", self.postscriptNameID))
     return bytesjoin(result)
コード例 #16
0
	def compile(self, ttFont):
		if self.data:
			return struct.pack(">HHLLL", self.format, self.reserved, self.length, self.language, self.nGroups) + self.data
		charCodes = list(self.cmap.keys())
		names = list(self.cmap.values())
		nameMap = ttFont.getReverseGlyphMap()
		try:
			gids = [nameMap[name] for name in names]
		except KeyError:
			nameMap = ttFont.getReverseGlyphMap(rebuild=True)
			try:
				gids = [nameMap[name] for name in names]
			except KeyError:
				# allow virtual GIDs in format 12 tables
				gids = []
				for name in names:
					try:
						gid = nameMap[name]
					except KeyError:
						try:
							if (name[:3] == 'gid'):
								gid = int(name[3:])
							else:
								gid = ttFont.getGlyphID(name)
						except:
							raise KeyError(name)

					gids.append(gid)

		cmap = {}  # code:glyphID mapping
		for code, gid in zip(charCodes, gids):
			cmap[code] = gid

		charCodes.sort()
		index = 0
		startCharCode = charCodes[0]
		startGlyphID = cmap[startCharCode]
		lastGlyphID = startGlyphID - self._format_step
		lastCharCode = startCharCode - 1
		nGroups = 0
		dataList = []
		maxIndex = len(charCodes)
		for index in range(maxIndex):
			charCode = charCodes[index]
			glyphID = cmap[charCode]
			if not self._IsInSameRun(glyphID, lastGlyphID, charCode, lastCharCode):
				dataList.append(struct.pack(">LLL", startCharCode, lastCharCode, startGlyphID))
				startCharCode = charCode
				startGlyphID = glyphID
				nGroups = nGroups + 1
			lastGlyphID = glyphID
			lastCharCode = charCode
		dataList.append(struct.pack(">LLL", startCharCode, lastCharCode, startGlyphID))
		nGroups = nGroups + 1
		data = bytesjoin(dataList)
		lengthSubtable = len(data) +16
		assert len(data) == (nGroups*12) == (lengthSubtable-16)
		return struct.pack(">HHLLL", self.format, self.reserved, lengthSubtable, self.language, nGroups) + data
コード例 #17
0
 def test_unsupportedLookupType(self):
     data = bytesjoin([
         MORX_NONCONTEXTUAL_DATA[:67],
         bytechr(66), MORX_NONCONTEXTUAL_DATA[69:]
     ])
     with self.assertRaisesRegex(AssertionError,
                                 r"unsupported 'morx' lookup type 66"):
         morx = newTable('morx')
         morx.decompile(data, FakeFont(['.notdef']))
コード例 #18
0
 def _compilePaletteEntryLabels(self):
     if self.version == 0 or all(l == self.NO_NAME_ID
                                 for l in self.paletteEntryLabels):
         return b''
     assert len(self.paletteEntryLabels) == self.numPaletteEntries
     result = bytesjoin(
         [struct.pack(">H", label) for label in self.paletteEntryLabels])
     assert len(result) == 2 * self.numPaletteEntries
     return result
コード例 #19
0
def _reverseBytes(data):
    if len(data) != 1:
        return bytesjoin(map(_reverseBytes, data))
    byte = byteord(data)
    result = 0
    for i in range(8):
        result = result << 1
        result |= byte & 1
        byte = byte >> 1
    return bytechr(result)
コード例 #20
0
    def compile(self, ttFont):
        version = 0
        offsetToSVGDocIndex = SVG_format_0Size  # I start the SVGDocIndex right after the header.
        # get SGVDoc info.
        docList = []
        entryList = []
        numEntries = len(self.docList)
        datum = struct.pack(">H", numEntries)
        entryList.append(datum)
        curOffset = len(datum) + doc_index_entry_format_0Size * numEntries
        seenDocs = {}
        for doc, startGlyphID, endGlyphID in self.docList:
            docBytes = tobytes(doc, encoding="utf_8")
            if getattr(self, "compressed",
                       False) and not docBytes.startswith(b"\x1f\x8b"):
                import gzip
                bytesIO = BytesIO()
                with gzip.GzipFile(None, "w", fileobj=bytesIO) as gzipper:
                    gzipper.write(docBytes)
                gzipped = bytesIO.getvalue()
                if len(gzipped) < len(docBytes):
                    docBytes = gzipped
                del gzipped, bytesIO
            docLength = len(docBytes)
            if docBytes in seenDocs:
                docOffset = seenDocs[docBytes]
            else:
                docOffset = curOffset
                curOffset += docLength
                seenDocs[docBytes] = docOffset
                docList.append(docBytes)
            entry = struct.pack(">HHLL", startGlyphID, endGlyphID, docOffset,
                                docLength)
            entryList.append(entry)
        entryList.extend(docList)
        svgDocData = bytesjoin(entryList)

        reserved = 0
        header = struct.pack(">HLL", version, offsetToSVGDocIndex, reserved)
        data = [header, svgDocData]
        data = bytesjoin(data)
        return data
コード例 #21
0
 def compile(self, ttFont):
     self.imageDataOffset = min(next(iter(zip(*self.locations))))
     dataList = [EblcIndexSubTable.compile(self, ttFont)]
     dataList.append(struct.pack(">L", self.imageSize))
     dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
     glyphIds = list(map(ttFont.getGlyphID, self.names))
     dataList.append(struct.pack(">L", len(glyphIds)))
     dataList += [struct.pack(">H", curId) for curId in glyphIds]
     if len(glyphIds) % 2 == 1:
         dataList.append(struct.pack(">H", 0))
     return bytesjoin(dataList)
コード例 #22
0
 def test_indent_dedent(self):
     writer = XMLWriter(BytesIO())
     writer.write("foo")
     writer.newline()
     writer.indent()
     writer.write("bar")
     writer.newline()
     writer.dedent()
     writer.write("baz")
     self.assertEqual(HEADER + bytesjoin(["foo", "  bar", "baz"], "\n"),
                      writer.file.getvalue())
コード例 #23
0
def _binary2data(binary):
    byteList = []
    for bitLoc in range(0, len(binary), 8):
        byteString = binary[bitLoc:bitLoc + 8]
        curByte = 0
        for curBit in reversed(byteString):
            curByte = curByte << 1
            if curBit == '1':
                curByte |= 1
        byteList.append(bytechr(curByte))
    return bytesjoin(byteList)
コード例 #24
0
    def compile(self, ttFont):
        glyphIds = list(map(ttFont.getGlyphID, self.names))
        # Make sure all the ids are consecutive. This is required by Format 2.
        assert glyphIds == list(
            range(self.firstGlyphIndex, self.lastGlyphIndex +
                  1)), "Format 2 ids must be consecutive."
        self.imageDataOffset = min(next(iter(zip(*self.locations))))

        dataList = [EblcIndexSubTable.compile(self, ttFont)]
        dataList.append(struct.pack(">L", self.imageSize))
        dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
        return bytesjoin(dataList)
コード例 #25
0
def readOther(path):
    """reads any (font) file, returns raw data"""
    with open(path, "rb") as f:
        data = f.read()
    assertType1(data)
    chunks = findEncryptedChunks(data)
    data = []
    for isEncrypted, chunk in chunks:
        if isEncrypted and isHex(chunk[:4]):
            data.append(deHexString(chunk))
        else:
            data.append(chunk)
    return bytesjoin(data)
コード例 #26
0
 def test_dumphex(self):
     writer = XMLWriter(BytesIO())
     writer.dumphex(
         "Type is a beautiful group of letters, not a group of beautiful letters."
     )
     self.assertEqual(
         HEADER + bytesjoin([
             "54797065 20697320 61206265 61757469",
             "66756c20 67726f75 70206f66 206c6574",
             "74657273 2c206e6f 74206120 67726f75",
             "70206f66 20626561 75746966 756c206c", "65747465 72732e  ", ""
         ],
                            joiner="\n"), writer.file.getvalue())
コード例 #27
0
    def compile(self, ttFont):

        dataList = []
        dataList.append(sstruct.pack(ebdtTableVersionFormat, self))
        dataSize = len(dataList[0])

        # Keep a dict of glyphs that have been seen so they aren't remade.
        # This dict maps the id of the BitmapGlyph to the interval
        # in the data.
        glyphDict = {}

        # Go through the bitmap glyph data. Just in case the data for a glyph
        # changed the size metrics should be recalculated. There are a variety
        # of formats and they get stored in the EBLC table. That is why
        # recalculation is defered to the EblcIndexSubTable class and just
        # pass what is known about bitmap glyphs from this particular table.
        locator = ttFont[self.__class__.locatorName]
        for curStrike, curGlyphDict in zip(locator.strikes, self.strikeData):
            for curIndexSubTable in curStrike.indexSubTables:
                dataLocations = []
                for curName in curIndexSubTable.names:
                    # Handle the data placement based on seeing the glyph or not.
                    # Just save a reference to the location if the glyph has already
                    # been saved in compile. This code assumes that glyphs will only
                    # be referenced multiple times from indexFormat5. By luck the
                    # code may still work when referencing poorly ordered fonts with
                    # duplicate references. If there is a font that is unlucky the
                    # respective compile methods for the indexSubTables will fail
                    # their assertions. All fonts seem to follow this assumption.
                    # More complicated packing may be needed if a counter-font exists.
                    glyph = curGlyphDict[curName]
                    objectId = id(glyph)
                    if objectId not in glyphDict:
                        data = glyph.compile(ttFont)
                        data = curIndexSubTable.padBitmapData(data)
                        startByte = dataSize
                        dataSize += len(data)
                        endByte = dataSize
                        dataList.append(data)
                        dataLoc = (startByte, endByte)
                        glyphDict[objectId] = dataLoc
                    else:
                        dataLoc = glyphDict[objectId]
                    dataLocations.append(dataLoc)
                # Just use the new data locations in the indexSubTable.
                # The respective compile implementations will take care
                # of any of the problems in the convertion that may arise.
                curIndexSubTable.locations = dataLocations

        return bytesjoin(dataList)
コード例 #28
0
 def compile(self, ttFont):
     dataList = [
         struct.pack(">LLL", self.version, self.flags, len(self.tags))
     ]
     stringPool = ""
     for tag in self.tags:
         offset = stringPool.find(tag)
         if offset < 0:
             offset = len(stringPool)
             stringPool = stringPool + tag
         offset = offset + 12 + len(self.tags) * 4
         dataList.append(struct.pack(">HH", offset, len(tag)))
     dataList.append(tobytes(stringPool))
     return bytesjoin(dataList)
コード例 #29
0
    def getRow(self, row, bitDepth=1, metrics=None, reverseBytes=False):
        if metrics is None:
            metrics = self.metrics
        assert 0 <= row and row < metrics.height, "Illegal row access in bitmap"

        # Loop through each byte. This can cover two bytes in the original data or
        # a single byte if things happen to be aligned. The very last entry might
        # not be aligned so take care to trim the binary data to size and pad with
        # zeros in the row data. Bit aligned data is somewhat tricky.
        #
        # Example of data cut. Data cut represented in x's.
        # '|' represents byte boundary.
        # data = ...0XX|XXXXXX00|000... => XXXXXXXX
        #		or
        # data = ...0XX|XXXX0000|000... => XXXXXX00
        #   or
        # data = ...000|XXXXXXXX|000... => XXXXXXXX
        #   or
        # data = ...000|00XXXX00|000... => XXXX0000
        #
        dataList = []
        bitRange = self._getBitRange(row, bitDepth, metrics)
        stepRange = bitRange + (8, )
        for curBit in range(*stepRange):
            endBit = min(curBit + 8, bitRange[1])
            numBits = endBit - curBit
            cutPoint = curBit % 8
            firstByteLoc = curBit // 8
            secondByteLoc = endBit // 8
            if firstByteLoc < secondByteLoc:
                numBitsCut = 8 - cutPoint
            else:
                numBitsCut = endBit - curBit
            curByte = _reverseBytes(self.imageData[firstByteLoc])
            firstHalf = byteord(curByte) >> cutPoint
            firstHalf = ((1 << numBitsCut) - 1) & firstHalf
            newByte = firstHalf
            if firstByteLoc < secondByteLoc and secondByteLoc < len(
                    self.imageData):
                curByte = _reverseBytes(self.imageData[secondByteLoc])
                secondHalf = byteord(curByte) << numBitsCut
                newByte = (firstHalf | secondHalf) & ((1 << numBits) - 1)
            dataList.append(bytechr(newByte))

        # The way the data is kept is opposite the algorithm used.
        data = bytesjoin(dataList)
        if not reverseBytes:
            data = _reverseBytes(data)
        return data
コード例 #30
0
 def test_decompile_badOffset(self):
     # https://github.com/fonttools/fonttools/issues/525
     table = table__n_a_m_e()
     badRecord = {
         "platformID": 1,
         "platEncID": 3,
         "langID": 7,
         "nameID": 1,
         "length": 3,
         "offset": 8765  # out of range
     }
     data = bytesjoin([
         struct.pack(tostr(">HHH"), 1, 1, 6 + nameRecordSize),
         sstruct.pack(nameRecordFormat, badRecord)
     ])
     table.decompile(data, ttFont=None)
     self.assertEqual(table.names, [])