示例#1
0
	def compileFormat0(self, ttFont):
		version = 0
		offsetToSVGDocIndex = SVG_format_0Size # I start the SVGDocIndex right after the header.
		# get SGVDoc info.
		docList = []
		entryList = []
		numEntries = len(self.docList)
		datum = struct.pack(">H",numEntries)
		entryList.append(datum)
		curOffset = len(datum) + doc_index_entry_format_0Size*numEntries
		for doc, startGlyphID, endGlyphID in self.docList:
			docOffset = curOffset
			docBytes = tobytes(doc, encoding="utf_8")
			if getattr(self, "compressed", False) and not docBytes.startswith(b"\x1f\x8b"):
				import gzip
				bytesIO = BytesIO()
				with gzip.GzipFile(None, "w", fileobj=bytesIO) as gzipper:
					gzipper.write(docBytes)
				gzipped = bytesIO.getvalue()
				if len(gzipped) < len(docBytes):
					docBytes = gzipped
				del gzipped, bytesIO
			docLength = len(docBytes)
			curOffset += docLength
			entry = struct.pack(">HHLL", startGlyphID, endGlyphID, docOffset, docLength)
			entryList.append(entry)
			docList.append(docBytes)
		entryList.extend(docList)
		svgDocData = bytesjoin(entryList)

		# get colorpalette info.
		if self.colorPalettes is None:
			offsetToColorPalettes = 0
			palettesData = ""
		else:
			offsetToColorPalettes = SVG_format_0Size + len(svgDocData)
			dataList = []
			numColorParams = len(self.colorPalettes.colorParamUINameIDs)
			datum = struct.pack(">H", numColorParams)
			dataList.append(datum)
			for uiNameId in self.colorPalettes.colorParamUINameIDs:
				datum = struct.pack(">H", uiNameId)
				dataList.append(datum)
			numColorPalettes = len(self.colorPalettes.colorPaletteList)
			datum = struct.pack(">H", numColorPalettes)
			dataList.append(datum)
			for colorPalette in self.colorPalettes.colorPaletteList:
				datum = struct.pack(">H", colorPalette.uiNameID)
				dataList.append(datum)
				for colorRecord in colorPalette.paletteColors:
					data = struct.pack(">BBBB", colorRecord.red, colorRecord.green, colorRecord.blue, colorRecord.alpha)
					dataList.append(data)
			palettesData = bytesjoin(dataList)

		header = struct.pack(">HLL", version, offsetToSVGDocIndex, offsetToColorPalettes)
		data = [header, svgDocData, palettesData]
		data = bytesjoin(data)
		return data
示例#2
0
	def _compileColorRecords(self):
		colorRecords, colorRecordIndices, pool = [], [], {}
		for palette in self.palettes:
			packedPalette = self._compilePalette(palette)
			if packedPalette in pool:
				index = pool[packedPalette]
			else:
				index = len(colorRecords)
				colorRecords.append(packedPalette)
				pool[packedPalette] = index
			colorRecordIndices.append(struct.pack(">H", index * self.numPaletteEntries))
		return bytesjoin(colorRecordIndices), bytesjoin(colorRecords)
示例#3
0
 def compile(self, ttFont):
     keys = sorted(self.data.keys())
     headerSize = sstruct.calcsize(META_HEADER_FORMAT)
     dataOffset = headerSize + len(keys) * sstruct.calcsize(DATA_MAP_FORMAT)
     header = sstruct.pack(META_HEADER_FORMAT, {
             "version": 1,
             "flags": 0,
             "dataOffset": dataOffset,
             "numDataMaps": len(keys)
     })
     dataMaps = []
     dataBlocks = []
     for tag in keys:
         if tag in ["dlng", "slng"]:
             data = self.data[tag].encode("utf-8")
         else:
             data = self.data[tag]
         dataMaps.append(sstruct.pack(DATA_MAP_FORMAT, {
             "tag": tag,
             "dataOffset": dataOffset,
             "dataLength": len(data)
         }))
         dataBlocks.append(data)
         dataOffset += len(data)
     return bytesjoin([header] + dataMaps + dataBlocks)
    def getAllData(self):
        """Assemble all data, including all subtables."""
        internedTables = {}
        self._doneWriting(internedTables)
        tables = []
        extTables = []
        done = {}
        self._gatherTables(tables, extTables, done)
        tables.reverse()
        extTables.reverse()
        # Gather all data in two passes: the absolute positions of all
        # subtable are needed before the actual data can be assembled.
        pos = 0
        for table in tables:
            table.pos = pos
            pos = pos + table.getDataLength()

        for table in extTables:
            table.pos = pos
            pos = pos + table.getDataLength()

        data = []
        for table in tables:
            tableData = table.getData()
            data.append(tableData)

        for table in extTables:
            tableData = table.getData()
            data.append(tableData)

        return bytesjoin(data)
示例#5
0
    def setRows(self, dataRows, bitDepth=1, metrics=None, reverseBytes=False):
        if metrics is None:
            metrics = self.metrics
        if not reverseBytes:
            dataRows = list(map(_reverseBytes, dataRows))

        # Keep track of a list of ordinal values as they are easier to modify
        # than a list of strings. Map to actual strings later.
        numBytes = (self._getBitRange(len(dataRows), bitDepth, metrics)[0] +
                    7) // 8
        ordDataList = [0] * numBytes
        for row, data in enumerate(dataRows):
            bitRange = self._getBitRange(row, bitDepth, metrics)
            stepRange = bitRange + (8, )
            for curBit, curByte in zip(range(*stepRange), data):
                endBit = min(curBit + 8, bitRange[1])
                cutPoint = curBit % 8
                firstByteLoc = curBit // 8
                secondByteLoc = endBit // 8
                if firstByteLoc < secondByteLoc:
                    numBitsCut = 8 - cutPoint
                else:
                    numBitsCut = endBit - curBit
                curByte = byteord(curByte)
                firstByte = curByte & ((1 << numBitsCut) - 1)
                ordDataList[firstByteLoc] |= (firstByte << cutPoint)
                if firstByteLoc < secondByteLoc and secondByteLoc < numBytes:
                    secondByte = (curByte >> numBitsCut) & (
                        (1 << 8 - numBitsCut) - 1)
                    ordDataList[secondByteLoc] |= secondByte

        # Save the image data with the bits going the correct way.
        self.imageData = _reverseBytes(bytesjoin(map(bytechr, ordDataList)))
示例#6
0
    def compile(self, ttFont):
        # First make sure that all the data lines up properly. Format 4
        # must have all its data lined up consecutively. If not this will fail.
        for curLoc, nxtLoc in zip(self.locations, self.locations[1:]):
            assert curLoc[1] == nxtLoc[
                0], "Data must be consecutive in indexSubTable format 4"

        offsets = list(
            self.locations[0]) + [loc[1] for loc in self.locations[1:]]
        # Image data offset must be less than or equal to the minimum of locations.
        # Resetting this offset may change the value for round tripping but is safer
        # and allows imageDataOffset to not be required to be in the XML version.
        self.imageDataOffset = min(offsets)
        offsets = [offset - self.imageDataOffset for offset in offsets]
        glyphIds = list(map(ttFont.getGlyphID, self.names))
        # Create an iterator over the ids plus a padding value.
        idsPlusPad = list(itertools.chain(glyphIds, [0]))

        dataList = [EblcIndexSubTable.compile(self, ttFont)]
        dataList.append(struct.pack(">L", len(glyphIds)))
        tmp = [
            struct.pack(codeOffsetPairFormat, *cop)
            for cop in zip(idsPlusPad, offsets)
        ]
        dataList += tmp
        data = bytesjoin(dataList)
        return data
示例#7
0
	def compile(self, ttFont):
		self.numGMAPs = len(self.GMAPs)
		self.numGlyplets = len(self.glyphlets)
		GMAPoffsets = [0]*(self.numGMAPs + 1)
		glyphletOffsets = [0]*(self.numGlyplets + 1)

		dataList =[ sstruct.pack(GPKGFormat, self)]

		pos = len(dataList[0]) + (self.numGMAPs + 1)*4 + (self.numGlyplets + 1)*4
		GMAPoffsets[0] = pos
		for i in range(1, self.numGMAPs +1):
			pos += len(self.GMAPs[i-1])
			GMAPoffsets[i] = pos
		gmapArray = array.array("I", GMAPoffsets)
		if sys.byteorder != "big": gmapArray.byteswap()
		dataList.append(gmapArray.tobytes())

		glyphletOffsets[0] = pos
		for i in range(1, self.numGlyplets +1):
			pos += len(self.glyphlets[i-1])
			glyphletOffsets[i] = pos
		glyphletArray = array.array("I", glyphletOffsets)
		if sys.byteorder != "big": glyphletArray.byteswap()
		dataList.append(glyphletArray.tobytes())
		dataList += self.GMAPs
		dataList += self.glyphlets
		data = bytesjoin(dataList)
		return data
示例#8
0
    def compile(self, ttFont):
        axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
        sharedTuples = tv.compileSharedTuples(
            axisTags, itertools.chain(*self.variations.values()))
        sharedTupleIndices = {coord: i for i, coord in enumerate(sharedTuples)}
        sharedTupleSize = sum([len(c) for c in sharedTuples])
        compiledGlyphs = self.compileGlyphs_(ttFont, axisTags,
                                             sharedTupleIndices)
        offset = 0
        offsets = []
        for glyph in compiledGlyphs:
            offsets.append(offset)
            offset += len(glyph)
        offsets.append(offset)
        compiledOffsets, tableFormat = self.compileOffsets_(offsets)

        header = {}
        header["version"] = self.version
        header["reserved"] = self.reserved
        header["axisCount"] = len(axisTags)
        header["sharedTupleCount"] = len(sharedTuples)
        header["offsetToSharedTuples"] = GVAR_HEADER_SIZE + len(
            compiledOffsets)
        header["glyphCount"] = len(compiledGlyphs)
        header["flags"] = tableFormat
        header["offsetToGlyphVariationData"] = header[
            "offsetToSharedTuples"] + sharedTupleSize
        compiledHeader = sstruct.pack(GVAR_HEADER_FORMAT, header)

        result = [compiledHeader, compiledOffsets]
        result.extend(sharedTuples)
        result.extend(compiledGlyphs)
        return bytesjoin(result)
	def compile(self, axisTags, sharedCoordIndices, sharedPoints):
		tupleData = []

		assert all(tag in axisTags for tag in self.axes.keys()), ("Unknown axis tag found.", self.axes.keys(), axisTags)

		coord = self.compileCoord(axisTags)
		if coord in sharedCoordIndices:
			flags = sharedCoordIndices[coord]
		else:
			flags = EMBEDDED_PEAK_TUPLE
			tupleData.append(coord)

		intermediateCoord = self.compileIntermediateCoord(axisTags)
		if intermediateCoord is not None:
			flags |= INTERMEDIATE_REGION
			tupleData.append(intermediateCoord)

		points = self.getUsedPoints()
		if sharedPoints == points:
			# Only use the shared points if they are identical to the actually used points
			auxData = self.compileDeltas(sharedPoints)
			usesSharedPoints = True
		else:
			flags |= PRIVATE_POINT_NUMBERS
			numPointsInGlyph = len(self.coordinates)
			auxData = self.compilePoints(points, numPointsInGlyph) + self.compileDeltas(points)
			usesSharedPoints = False

		tupleData = struct.pack('>HH', len(auxData), flags) + bytesjoin(tupleData)
		return (tupleData, auxData, usesSharedPoints)
示例#10
0
    def getData(self):
        """Assemble the data for this writer/table, without subtables."""
        items = list(self.items)  # make a shallow copy
        pos = self.pos
        numItems = len(items)
        for i in range(numItems):
            item = items[i]

            if hasattr(item, "getData"):
                if item.offsetSize == 4:
                    items[i] = packULong(item.pos - pos)
                elif item.offsetSize == 2:
                    try:
                        items[i] = packUShort(item.pos - pos)
                    except struct.error:
                        # provide data to fix overflow problem.
                        overflowErrorRecord = self.getOverflowErrorRecord(item)

                        raise OTLOffsetOverflowError(overflowErrorRecord)
                elif item.offsetSize == 3:
                    items[i] = packUInt24(item.pos - pos)
                else:
                    raise ValueError(item.offsetSize)

        return bytesjoin(items)
示例#11
0
def decrypt(cipherstring, R):
    r"""
	Decrypts a string using the Type 1 encryption algorithm.

	Args:
		cipherstring: String of ciphertext.
		R: Initial key.

	Returns:
		decryptedStr: Plaintext string.
		R: Output key for subsequent decryptions.

	Examples::

		>>> testStr = b"\0\0asdadads asds\265"
		>>> decryptedStr, R = decrypt(testStr, 12321)
		>>> decryptedStr == b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1'
		True
		>>> R == 36142
		True
	"""
    plainList = []
    for cipher in cipherstring:
        plain, R = _decryptChar(cipher, R)
        plainList.append(plain)
    plainstring = bytesjoin(plainList)
    return plainstring, int(R)
示例#12
0
def readLWFN(path, onlyHeader=False):
    """reads an LWFN font file, returns raw data"""
    from fontTools.misc.macRes import ResourceReader
    reader = ResourceReader(path)
    try:
        data = []
        for res in reader.get('POST', []):
            code = byteord(res.data[0])
            if byteord(res.data[1]) != 0:
                raise T1Error('corrupt LWFN file')
            if code in [1, 2]:
                if onlyHeader and code == 2:
                    break
                data.append(res.data[2:])
            elif code in [3, 5]:
                break
            elif code == 4:
                with open(path, "rb") as f:
                    data.append(f.read())
            elif code == 0:
                pass  # comment, ignore
            else:
                raise T1Error('bad chunk code: ' + repr(code))
    finally:
        reader.close()
    data = bytesjoin(data)
    assertType1(data)
    return data
示例#13
0
    def toUnicode(self, errors='strict'):
        """
		If self.string is a Unicode string, return it; otherwise try decoding the
		bytes in self.string to a Unicode string using the encoding of this
		entry as returned by self.getEncoding(); Note that  self.getEncoding()
		returns 'ascii' if the encoding is unknown to the library.

		Certain heuristics are performed to recover data from bytes that are
		ill-formed in the chosen encoding, or that otherwise look misencoded
		(mostly around bad UTF-16BE encoded bytes, or bytes that look like UTF-16BE
		but marked otherwise).  If the bytes are ill-formed and the heuristics fail,
		the error is handled according to the errors parameter to this function, which is
		passed to the underlying decode() function; by default it throws a
		UnicodeDecodeError exception.

		Note: The mentioned heuristics mean that roundtripping a font to XML and back
		to binary might recover some misencoded data whereas just loading the font
		and saving it back will not change them.
		"""
        def isascii(b):
            return (b >= 0x20 and b <= 0x7E) or b in [0x09, 0x0A, 0x0D]

        encoding = self.getEncoding()
        string = self.string

        if isinstance(
                string,
                bytes) and encoding == 'utf_16_be' and len(string) % 2 == 1:
            # Recover badly encoded UTF-16 strings that have an odd number of bytes:
            # - If the last byte is zero, drop it.  Otherwise,
            # - If all the odd bytes are zero and all the even bytes are ASCII,
            #   prepend one zero byte.  Otherwise,
            # - If first byte is zero and all other bytes are ASCII, insert zero
            #   bytes between consecutive ASCII bytes.
            #
            # (Yes, I've seen all of these in the wild... sigh)
            if byteord(string[-1]) == 0:
                string = string[:-1]
            elif all(
                    byteord(b) == 0 if i % 2 else isascii(byteord(b))
                    for i, b in enumerate(string)):
                string = b'\0' + string
            elif byteord(string[0]) == 0 and all(
                    isascii(byteord(b)) for b in string[1:]):
                string = bytesjoin(b'\0' + bytechr(byteord(b))
                                   for b in string[1:])

        string = tostr(string, encoding=encoding, errors=errors)

        # If decoded strings still looks like UTF-16BE, it suggests a double-encoding.
        # Fix it up.
        if all(
                ord(c) == 0 if i % 2 == 0 else isascii(ord(c))
                for i, c in enumerate(string)):
            # If string claims to be Mac encoding, but looks like UTF-16BE with ASCII text,
            # narrow it down.
            string = ''.join(c for c in string[1::2])

        return string
示例#14
0
 def compile(self, ttFont):
     dataList = []
     dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
     dataList.append(struct.pack(">H", len(self.componentArray)))
     for curComponent in self.componentArray:
         curComponent.glyphCode = ttFont.getGlyphID(curComponent.name)
         dataList.append(sstruct.pack(ebdtComponentFormat, curComponent))
     return bytesjoin(dataList)
示例#15
0
	def _compilePaletteTypes(self):
		if self.version == 0 or not any(self.paletteTypes):
			return b''
		assert len(self.paletteTypes) == len(self.palettes)
		result = bytesjoin([struct.pack(">I", ptype)
                                    for ptype in self.paletteTypes])
		assert len(result) == 4 * len(self.palettes)
		return result
示例#16
0
	def _compilePaletteEntryLabels(self):
		if self.version == 0 or all(l == self.NO_NAME_ID for l in self.paletteEntryLabels):
			return b''
		assert len(self.paletteEntryLabels) == self.numPaletteEntries
		result = bytesjoin([struct.pack(">H", label)
                                    for label in self.paletteEntryLabels])
		assert len(result) == 2 * self.numPaletteEntries
		return result
示例#17
0
 def test_unsupportedLookupType(self):
     data = bytesjoin([
         MORX_NONCONTEXTUAL_DATA[:67],
         bytechr(66), MORX_NONCONTEXTUAL_DATA[69:]
     ])
     with self.assertRaisesRegex(AssertionError,
                                 r"unsupported 'morx' lookup type 66"):
         morx = newTable('morx')
         morx.decompile(data, FakeFont(['.notdef']))
示例#18
0
def deHexStr(hexdata):
	"""Convert a hex string to binary data."""
	hexdata = strjoin(hexdata.split())
	if len(hexdata) % 2:
		hexdata = hexdata + "0"
	data = []
	for i in range(0, len(hexdata), 2):
		data.append(bytechr(int(hexdata[i:i+2], 16)))
	return bytesjoin(data)
示例#19
0
	def test_dumphex(self):
		writer = XMLWriter(BytesIO())
		writer.dumphex("Type is a beautiful group of letters, not a group of beautiful letters.")
		self.assertEqual(HEADER + bytesjoin([
		    "54797065 20697320 61206265 61757469",
		    "66756c20 67726f75 70206f66 206c6574",
		    "74657273 2c206e6f 74206120 67726f75",
		    "70206f66 20626561 75746966 756c206c",
		    "65747465 72732e  ", ""], joiner=linesep), writer.file.getvalue())
	def compilePoints(points, numPointsInGlyph):
		# If the set consists of all points in the glyph, it gets encoded with
		# a special encoding: a single zero byte.
		if len(points) == numPointsInGlyph:
			return b"\0"

		# In the 'gvar' table, the packing of point numbers is a little surprising.
		# It consists of multiple runs, each being a delta-encoded list of integers.
		# For example, the point set {17, 18, 19, 20, 21, 22, 23} gets encoded as
		# [6, 17, 1, 1, 1, 1, 1, 1]. The first value (6) is the run length minus 1.
		# There are two types of runs, with values being either 8 or 16 bit unsigned
		# integers.
		points = list(points)
		points.sort()
		numPoints = len(points)

		# The binary representation starts with the total number of points in the set,
		# encoded into one or two bytes depending on the value.
		if numPoints < 0x80:
			result = [bytechr(numPoints)]
		else:
			result = [bytechr((numPoints >> 8) | 0x80) + bytechr(numPoints & 0xff)]

		MAX_RUN_LENGTH = 127
		pos = 0
		lastValue = 0
		while pos < numPoints:
			run = io.BytesIO()
			runLength = 0
			useByteEncoding = None
			while pos < numPoints and runLength <= MAX_RUN_LENGTH:
				curValue = points[pos]
				delta = curValue - lastValue
				if useByteEncoding is None:
					useByteEncoding = 0 <= delta <= 0xff
				if useByteEncoding and (delta > 0xff or delta < 0):
					# we need to start a new run (which will not use byte encoding)
					break
				# TODO This never switches back to a byte-encoding from a short-encoding.
				# That's suboptimal.
				if useByteEncoding:
					run.write(bytechr(delta))
				else:
					run.write(bytechr(delta >> 8))
					run.write(bytechr(delta & 0xff))
				lastValue = curValue
				pos += 1
				runLength += 1
			if useByteEncoding:
				runHeader = bytechr(runLength - 1)
			else:
				runHeader = bytechr((runLength - 1) | POINTS_ARE_WORDS)
			result.append(runHeader)
			result.append(run.getvalue())

		return bytesjoin(result)
示例#21
0
def _reverseBytes(data):
    if len(data) != 1:
        return bytesjoin(map(_reverseBytes, data))
    byte = byteord(data)
    result = 0
    for i in range(8):
        result = result << 1
        result |= byte & 1
        byte = byte >> 1
    return bytechr(result)
示例#22
0
	def test_indent_dedent(self):
		writer = XMLWriter(BytesIO())
		writer.write("foo")
		writer.newline()
		writer.indent()
		writer.write("bar")
		writer.newline()
		writer.dedent()
		writer.write("baz")
		self.assertEqual(HEADER + bytesjoin(["foo", "  bar", "baz"], linesep),
				 writer.file.getvalue())
示例#23
0
 def compile(self, ttFont):
     self.imageDataOffset = min(next(iter(zip(*self.locations))))
     dataList = [EblcIndexSubTable.compile(self, ttFont)]
     dataList.append(struct.pack(">L", self.imageSize))
     dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
     glyphIds = list(map(ttFont.getGlyphID, self.names))
     dataList.append(struct.pack(">L", len(glyphIds)))
     dataList += [struct.pack(">H", curId) for curId in glyphIds]
     if len(glyphIds) % 2 == 1:
         dataList.append(struct.pack(">H", 0))
     return bytesjoin(dataList)
示例#24
0
def _binary2data(binary):
    byteList = []
    for bitLoc in range(0, len(binary), 8):
        byteString = binary[bitLoc:bitLoc + 8]
        curByte = 0
        for curBit in reversed(byteString):
            curByte = curByte << 1
            if curBit == '1':
                curByte |= 1
        byteList.append(bytechr(curByte))
    return bytesjoin(byteList)
示例#25
0
    def compile(self, ttFont):
        glyphIds = list(map(ttFont.getGlyphID, self.names))
        # Make sure all the ids are consecutive. This is required by Format 2.
        assert glyphIds == list(
            range(self.firstGlyphIndex, self.lastGlyphIndex +
                  1)), "Format 2 ids must be consecutive."
        self.imageDataOffset = min(next(iter(zip(*self.locations))))

        dataList = [EblcIndexSubTable.compile(self, ttFont)]
        dataList.append(struct.pack(">L", self.imageSize))
        dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
        return bytesjoin(dataList)
示例#26
0
	def compile(self, ttFont):
		dataList = [struct.pack(">LLL", self.version, self.flags, len(self.tags))]
		stringPool = ""
		for tag in self.tags:
			offset = stringPool.find(tag)
			if offset < 0:
				offset = len(stringPool)
				stringPool = stringPool + tag
			offset = offset + 12 + len(self.tags) * 4
			dataList.append(struct.pack(">HH", offset, len(tag)))
		dataList.append(tobytes(stringPool))
		return bytesjoin(dataList)
示例#27
0
def readOther(path):
    """reads any (font) file, returns raw data"""
    with open(path, "rb") as f:
        data = f.read()
    assertType1(data)
    chunks = findEncryptedChunks(data)
    data = []
    for isEncrypted, chunk in chunks:
        if isEncrypted and isHex(chunk[:4]):
            data.append(deHexString(chunk))
        else:
            data.append(chunk)
    return bytesjoin(data)
示例#28
0
    def compile(self, ttFont):

        dataList = []
        dataList.append(sstruct.pack(ebdtTableVersionFormat, self))
        dataSize = len(dataList[0])

        # Keep a dict of glyphs that have been seen so they aren't remade.
        # This dict maps the id of the BitmapGlyph to the interval
        # in the data.
        glyphDict = {}

        # Go through the bitmap glyph data. Just in case the data for a glyph
        # changed the size metrics should be recalculated. There are a variety
        # of formats and they get stored in the EBLC table. That is why
        # recalculation is defered to the EblcIndexSubTable class and just
        # pass what is known about bitmap glyphs from this particular table.
        locator = ttFont[self.__class__.locatorName]
        for curStrike, curGlyphDict in zip(locator.strikes, self.strikeData):
            for curIndexSubTable in curStrike.indexSubTables:
                dataLocations = []
                for curName in curIndexSubTable.names:
                    # Handle the data placement based on seeing the glyph or not.
                    # Just save a reference to the location if the glyph has already
                    # been saved in compile. This code assumes that glyphs will only
                    # be referenced multiple times from indexFormat5. By luck the
                    # code may still work when referencing poorly ordered fonts with
                    # duplicate references. If there is a font that is unlucky the
                    # respective compile methods for the indexSubTables will fail
                    # their assertions. All fonts seem to follow this assumption.
                    # More complicated packing may be needed if a counter-font exists.
                    glyph = curGlyphDict[curName]
                    objectId = id(glyph)
                    if objectId not in glyphDict:
                        data = glyph.compile(ttFont)
                        data = curIndexSubTable.padBitmapData(data)
                        startByte = dataSize
                        dataSize += len(data)
                        endByte = dataSize
                        dataList.append(data)
                        dataLoc = (startByte, endByte)
                        glyphDict[objectId] = dataLoc
                    else:
                        dataLoc = glyphDict[objectId]
                    dataLocations.append(dataLoc)
                # Just use the new data locations in the indexSubTable.
                # The respective compile implementations will take care
                # of any of the problems in the convertion that may arise.
                curIndexSubTable.locations = dataLocations

        return bytesjoin(dataList)
示例#29
0
 def decompile(self, refData, reader):
     sstruct.unpack(ResourceRefItem, refData, self)
     # interpret 3-byte dataOffset as (padded) ULONG to unpack it with struct
     self.dataOffset, = struct.unpack('>L',
                                      bytesjoin([b"\0", self.dataOffset]))
     absDataOffset = reader.dataOffset + self.dataOffset
     dataLength, = struct.unpack(">L", reader._read(4, absDataOffset))
     self.data = reader._read(dataLength)
     if self.nameOffset == -1:
         return
     absNameOffset = reader.absNameListOffset + self.nameOffset
     nameLength, = struct.unpack('B', reader._read(1, absNameOffset))
     name, = struct.unpack('>%ss' % nameLength, reader._read(nameLength))
     self.name = tostr(name, encoding='mac-roman')
示例#30
0
    def getRow(self, row, bitDepth=1, metrics=None, reverseBytes=False):
        if metrics is None:
            metrics = self.metrics
        assert 0 <= row and row < metrics.height, "Illegal row access in bitmap"

        # Loop through each byte. This can cover two bytes in the original data or
        # a single byte if things happen to be aligned. The very last entry might
        # not be aligned so take care to trim the binary data to size and pad with
        # zeros in the row data. Bit aligned data is somewhat tricky.
        #
        # Example of data cut. Data cut represented in x's.
        # '|' represents byte boundary.
        # data = ...0XX|XXXXXX00|000... => XXXXXXXX
        #		or
        # data = ...0XX|XXXX0000|000... => XXXXXX00
        #   or
        # data = ...000|XXXXXXXX|000... => XXXXXXXX
        #   or
        # data = ...000|00XXXX00|000... => XXXX0000
        #
        dataList = []
        bitRange = self._getBitRange(row, bitDepth, metrics)
        stepRange = bitRange + (8, )
        for curBit in range(*stepRange):
            endBit = min(curBit + 8, bitRange[1])
            numBits = endBit - curBit
            cutPoint = curBit % 8
            firstByteLoc = curBit // 8
            secondByteLoc = endBit // 8
            if firstByteLoc < secondByteLoc:
                numBitsCut = 8 - cutPoint
            else:
                numBitsCut = endBit - curBit
            curByte = _reverseBytes(self.imageData[firstByteLoc])
            firstHalf = byteord(curByte) >> cutPoint
            firstHalf = ((1 << numBitsCut) - 1) & firstHalf
            newByte = firstHalf
            if firstByteLoc < secondByteLoc and secondByteLoc < len(
                    self.imageData):
                curByte = _reverseBytes(self.imageData[secondByteLoc])
                secondHalf = byteord(curByte) << numBitsCut
                newByte = (firstHalf | secondHalf) & ((1 << numBits) - 1)
            dataList.append(bytechr(newByte))

        # The way the data is kept is opposite the algorithm used.
        data = bytesjoin(dataList)
        if not reverseBytes:
            data = _reverseBytes(data)
        return data
def _cmap_format_12_or_13_compile(self, ttFont):
  if self.data:
    return struct.pack(">HHLLL", self.format, self.reserved, self.length, self.language, self.nGroups) + self.data
  charCodes = list(self.cmap.keys())
  lenCharCodes = len(charCodes)
  names = list(self.cmap.values())
  nameMap = ttFont.getReverseGlyphMap()
  try:
    gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names))
  except KeyError:
    nameMap = ttFont.getReverseGlyphMap(rebuild=True)
    try:
      gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names))
    except KeyError:
      # allow virtual GIDs in format 12 tables
      gids = []
      for name in names:
        try:
          gid = nameMap[name]
        except KeyError:
          try:
            if (name[:3] == 'gid'):
              gid = eval(name[3:])
            else:
              gid = ttFont.getGlyphID(name)
          except:
            raise KeyError(name)

        gids.append(gid)

  cmap = {}  # code:glyphID mapping
  list(map(operator.setitem, [cmap]*len(charCodes), charCodes, gids))

  charCodes.sort()
  nGroups = 0
  dataList =  []
  maxIndex = len(charCodes)
  for index in range(maxIndex):
    charCode = charCodes[index]
    glyphID = cmap[charCode]
    dataList.append(struct.pack(">LLL", charCode, charCode, glyphID))
    nGroups = nGroups + 1
  data = bytesjoin(dataList)
  lengthSubtable = len(data) +16
  assert len(data) == (nGroups*12) == (lengthSubtable-16)
  return struct.pack(">HHLLL", self.format, self.reserved , lengthSubtable, self.language, nGroups) + data