Exemple #1
0
 def writeXref(self, f):
     self.startxref = f.tell()
     f.write(tobytes('xref' + LINEEND, encoding='utf-8'))
     f.write(tobytes('0 %d' % (len(self.objects) + 1) + LINEEND, encoding='utf-8'))
     f.write(tobytes('0000000000 65535 f' + LINEEND, encoding='utf-8'))
     for pos in self.xref:
         f.write(tobytes('%0.10d 00000 n' % pos + LINEEND, encoding='utf-8'))
Exemple #2
0
def hint_bez_glyph(info,
                   glyph,
                   allow_edit=True,
                   allow_hint_sub=True,
                   round_coordinates=True,
                   report_zones=False,
                   report_stems=False,
                   report_all_stems=False,
                   use_autohintexe=False):
    if use_autohintexe:
        hinted = _hint_with_autohintexe(info, glyph, allow_edit,
                                        allow_hint_sub, round_coordinates,
                                        report_zones, report_stems,
                                        report_all_stems)
    else:
        report = 0
        if report_zones:
            report = 1
        elif report_stems:
            report = 2
        hinted = _psautohint.autohint(tobytes(info), tobytes(glyph),
                                      allow_edit, allow_hint_sub,
                                      round_coordinates, report,
                                      report_all_stems)

    return tounicode(hinted)
Exemple #3
0
    def save(self, file):
        #avoid crashes if they wrote nothing in the page
        if self.data == None:
             self.data = TestStream

        if self.compression == 1:
            comp = zlib.compress(self.data)   #this bit is very fast...
            base85 = pdfutils._AsciiBase85Encode(comp) #...sadly this isn't
            data_to_write = pdfutils._wrap(base85)
        else:
            data_to_write = self.data
        # the PDF length key should contain the length including
        # any extra LF pairs added by Print on DOS.

        #lines = len(string.split(self.data,'\n'))
        #length = len(self.data) + lines   # one extra LF each
        length = len(data_to_write) + len(LINEEND)    #AR 19980202
        if self.fontType == None:
            fontStreamEntry = ""
        else:
            fontStreamEntry = "/Subtype %s" % (self.fontType)

        if self.compression:
            file.write(tobytes('<<  %s /Length %d /Filter [/ASCII85Decode /FlateDecode] >>' % (fontStreamEntry, length) + LINEEND, encoding='utf-8'))
        else:
            file.write(tobytes('<< /Length %d %s >>' % (length,  fontStreamEntry) + LINEEND, encoding='utf-8'))
        file.write(tobytes('stream' + LINEEND, encoding='utf-8'))
        file.write(data_to_write + b'\r\n')
        file.write(tobytes('endstream' + LINEEND, encoding='utf-8'))
Exemple #4
0
def parseXML(xmlSnippet):
    """Parses a snippet of XML.

    Input can be either a single string (unicode or UTF-8 bytes), or a
    a sequence of strings.

    The result is in the same format that would be returned by
    XMLReader, but the parser imposes no constraints on the root
    element so it can be called on small snippets of TTX files.
    """
    # To support snippets with multiple elements, we add a fake root.
    reader = TestXMLReader_()
    xml = b"<root>"
    if isinstance(xmlSnippet, bytes):
        xml += xmlSnippet
    elif isinstance(xmlSnippet, str):
        xml += tobytes(xmlSnippet, 'utf-8')
    elif isinstance(xmlSnippet, Iterable):
        xml += b"".join(tobytes(s, 'utf-8') for s in xmlSnippet)
    else:
        raise TypeError("expected string or sequence of strings; found %r" %
                        type(xmlSnippet).__name__)
    xml += b"</root>"
    reader.parser.Parse(xml, 0)
    return reader.root[2]
Exemple #5
0
 def writeTrailer(self, f):
     f.write(tobytes('trailer' + LINEEND, encoding='utf-8'))
     f.write(
         tobytes('<< /Size %d /Root %d 0 R /Info %d 0 R>>' %
                 (len(self.objects) + 1, 1, self.infopos) + LINEEND,
                 encoding='utf-8'))
     f.write(tobytes('startxref' + LINEEND, encoding='utf-8'))
     f.write(tobytes(str(self.startxref) + LINEEND, encoding='utf-8'))
Exemple #6
0
def hint_bez_glyph(info,
                   glyph,
                   allow_edit=True,
                   allow_hint_sub=True,
                   round_coordinates=True):
    hinted = _psautohint.autohint(tobytes(info), tobytes(glyph), allow_edit,
                                  allow_hint_sub, round_coordinates)

    return tounicode(hinted)
Exemple #7
0
 def writeXref(self, f):
     self.startxref = f.tell()
     f.write(tobytes('xref' + LINEEND, encoding='utf-8'))
     f.write(
         tobytes('0 %d' % (len(self.objects) + 1) + LINEEND,
                 encoding='utf-8'))
     f.write(tobytes('0000000000 65535 f' + LINEEND, encoding='utf-8'))
     for pos in self.xref:
         f.write(tobytes('%0.10d 00000 n' % pos + LINEEND,
                         encoding='utf-8'))
Exemple #8
0
	def test_newlinestr(self):
		header = b'<?xml version="1.0" encoding="UTF-8"?>'

		for nls in (None, '\n', '\r\n', '\r', ''):
			writer = XMLWriter(BytesIO(), newlinestr=nls)
			writer.write("hello")
			writer.newline()
			writer.write("world")
			writer.newline()

			linesep = tobytes(os.linesep) if nls is None else tobytes(nls)

			self.assertEqual(
				header + linesep + b"hello" + linesep + b"world" + linesep,
				writer.file.getvalue())
Exemple #9
0
    def buildTables(self):
        """
        Compile OpenType feature tables from the source.
        Raises a FeaLibError if the feature compilation was unsuccessful.

        **This should not be called externally.** Subclasses
        may override this method to handle the table compilation
        in a different way if desired.
        """

        if not self.features:
            return

        # the path is used by the lexer to follow 'include' statements;
        # if we generated some automatic features, includes have already been
        # resolved, and we work from a string which does't exist on disk
        path = self.ufo.path if not self.featureWriters else None
        try:
            addOpenTypeFeaturesFromString(self.ttFont,
                                          self.features,
                                          filename=path)
        except FeatureLibError:
            if path is None:
                # if compilation fails, create temporary file for inspection
                data = tobytes(self.features, encoding="utf-8")
                with NamedTemporaryFile(delete=False) as tmp:
                    tmp.write(data)
                logger.error("Compilation failed! Inspect temporary file: %r",
                             tmp.name)
            raise
def makeGAFile(gaPath, fontPath, glyphList, fontDictList, fdGlyphDict,
               removeNotdef):
    """
    Creates a glyph alias file for each FDDict.
    These files will be used by 'mergefonts' tool.
    For documentation on the format of this file, run 'mergefonts -h'.
    """
    glyph_list = getGlyphList(fontPath, removeNotdef)

    try:
        fdIndex = fdGlyphDict[glyph_list[0]][0]  # [fdIndex value, gi]
    except KeyError:
        fdIndex = 0

    fdDict = fontDictList[fdIndex]
    lineList = [""]

    lang_group = fdDict.LanguageGroup
    if lang_group is None:
        langGroup = " 0"
    else:
        langGroup = " %s" % lang_group

    dictName = "%s_%s" % (fdDict.FontName, fdDict.DictName)

    for glyph_name in glyph_list:
        gid = glyphList.index(glyph_name)
        lineList.append("%s\t%s" % (gid, glyph_name))

    lineList.append("")
    gaText = "mergefonts %s%s%s" % (dictName, langGroup, '\n'.join(lineList))

    with open(gaPath, "wb") as gf:
        gf.write(tobytes(gaText))
Exemple #11
0
def makeGAFile(gaPath, fontPath, glyphList, fontDictList, fdGlyphDict,
               removeNotdef):
    """
    Creates a glyph alias file for each FDDict.
    These files will be used by 'mergefonts' tool.
    For documentation on the format of this file, run 'mergefonts -h'.
    """
    glyph_list = getGlyphList(fontPath, removeNotdef)

    try:
        fdIndex = fdGlyphDict[glyph_list[0]][0]  # [fdIndex value, gi]
    except KeyError:
        fdIndex = 0

    fdDict = fontDictList[fdIndex]
    lineList = [""]

    lang_group = fdDict.LanguageGroup
    if lang_group is None:
        langGroup = " 0"
    else:
        langGroup = " %s" % lang_group

    dictName = "%s_%s" % (fdDict.FontName, fdDict.DictName)

    for glyph_name in glyph_list:
        gid = glyphList.index(glyph_name)
        lineList.append("%s\t%s" % (gid, glyph_name))

    lineList.append("")
    gaText = "mergefonts %s%s%s" % (dictName, langGroup, '\n'.join(lineList))

    with open(gaPath, "wb") as gf:
        gf.write(tobytes(gaText))
Exemple #12
0
 def save(self, file):
     file.write(
         tobytes(LINEEND.join([
             "<</Title (%s)", "/Author (%s)", "/CreationDate (D:%s)",
             "/Producer (PDFgen)", "/Subject (%s)", ">>"
         ]) % (pdfutils._escape(self.title), pdfutils._escape(self.author),
               self.datestr, pdfutils._escape(self.subject)) + LINEEND,
                 encoding='utf-8'))
Exemple #13
0
 def SaveToFileObject(self, fileobj):
     """Open a file, and ask each object in turn to write itself to
     the file.  Keep track of the file position at each point for
     use in the index at the end"""
     f = fileobj
     self.xref = []
     f.write(tobytes("%PDF-1.2" + LINEEND, encoding='utf-8'))  # for CID support
     f.write(tobytes(b"%\xed\xec\xb6\xbe\r\n", encoding='utf-8'))
     for i, obj in enumerate(self.objects, 1):
         pos = f.tell()
         self.xref.append(pos)
         f.write(tobytes(str(i) + ' 0 obj' + LINEEND, encoding='utf-8'))
         obj.save(f)
         f.write(tobytes('endobj' + LINEEND, encoding='utf-8'))
     self.writeXref(f)
     self.writeTrailer(f)
     f.write(tobytes('%%EOF', encoding='utf-8'))  # no lineend needed on this one!
Exemple #14
0
 def __init__(self, buf=b'', encoding="ascii"):
     # Force self.buf to be a byte string
     buf = tobytes(buf)
     self.buf = buf
     self.len = len(buf)
     self.pos = 0
     self.closed = False
     self.encoding = encoding
Exemple #15
0
    def buildTables(self):
        if not self.features.strip():
            return

        import subprocess
        from fontTools.misc.py23 import tostr

        outline_path = feasrc_path = fea_path = None
        try:
            fd, outline_path = tempfile.mkstemp()
            os.close(fd)
            self.ttFont.save(outline_path)

            fd, feasrc_path = tempfile.mkstemp()
            os.close(fd)

            fd, fea_path = tempfile.mkstemp()
            os.write(fd, tobytes(self.features, encoding='utf-8'))
            os.close(fd)

            process = subprocess.Popen(
                [
                    "makeotf",
                    "-o",
                    feasrc_path,
                    "-f",
                    outline_path,
                    "-ff",
                    fea_path,
                ],
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
            )
            stdout, stderr = process.communicate()
            retcode = process.poll()

            report = tostr(stdout + (b"\n" + stderr if stderr else b""))
            logger.info(report)

            # before afdko >= 2.7.1rc1, makeotf did not exit with non-zero
            # on failure, so we have to parse the error message
            if retcode != 0:
                success = False
            else:
                success = (
                    "makeotf [Error] Failed to build output font" not in report
                )
                if success:
                    with TTFont(feasrc_path) as feasrc:
                        for table in ["GDEF", "GPOS", "GSUB"]:
                            if table in feasrc:
                                self.ttFont[table] = feasrc[table]
            if not success:
                raise FontmakeError("Feature syntax compilation failed.")
        finally:
            for path in (outline_path, fea_path, feasrc_path):
                if path is not None:
                    os.remove(path)
	def compileFormat0(self, ttFont):
		version = 0
		offsetToSVGDocIndex = SVG_format_0Size # I start the SVGDocIndex right after the header.
		# get SGVDoc info.
		docList = []
		entryList = []
		numEntries = len(self.docList)
		datum = struct.pack(">H",numEntries)
		entryList.append(datum)
		curOffset = len(datum) + doc_index_entry_format_0Size*numEntries
		for doc, startGlyphID, endGlyphID in self.docList:
			docOffset = curOffset
			docBytes = tobytes(doc, encoding="utf_8")
			if getattr(self, "compressed", False) and not docBytes.startswith(b"\x1f\x8b"):
				import gzip
				bytesIO = BytesIO()
				with gzip.GzipFile(None, "w", fileobj=bytesIO) as gzipper:
					gzipper.write(docBytes)
				gzipped = bytesIO.getvalue()
				if len(gzipped) < len(docBytes):
					docBytes = gzipped
				del gzipped, bytesIO
			docLength = len(docBytes)
			curOffset += docLength
			entry = struct.pack(">HHLL", startGlyphID, endGlyphID, docOffset, docLength)
			entryList.append(entry)
			docList.append(docBytes)
		entryList.extend(docList)
		svgDocData = bytesjoin(entryList)

		# get colorpalette info.
		if self.colorPalettes is None:
			offsetToColorPalettes = 0
			palettesData = ""
		else:
			offsetToColorPalettes = SVG_format_0Size + len(svgDocData)
			dataList = []
			numColorParams = len(self.colorPalettes.colorParamUINameIDs)
			datum = struct.pack(">H", numColorParams)
			dataList.append(datum)
			for uiNameId in self.colorPalettes.colorParamUINameIDs:
				datum = struct.pack(">H", uiNameId)
				dataList.append(datum)
			numColorPalettes = len(self.colorPalettes.colorPaletteList)
			datum = struct.pack(">H", numColorPalettes)
			dataList.append(datum)
			for colorPalette in self.colorPalettes.colorPaletteList:
				datum = struct.pack(">H", colorPalette.uiNameID)
				dataList.append(datum)
				for colorRecord in colorPalette.paletteColors:
					data = struct.pack(">BBBB", colorRecord.red, colorRecord.green, colorRecord.blue, colorRecord.alpha)
					dataList.append(data)
			palettesData = bytesjoin(dataList)

		header = struct.pack(">HLL", version, offsetToSVGDocIndex, offsetToColorPalettes)
		data = [header, svgDocData, palettesData]
		data = bytesjoin(data)
		return data
Exemple #17
0
def test_decompile_magic_length_last_extra(empty_font):
    indextable = empty_font['TSI0']
    indextable.extra_indices[-1] = (0xFFFD, 0x8000, 0)
    content = "0" * (0x8000 + 1)
    data = tobytes(content)

    table = table_T_S_I__1()
    table.decompile(data, empty_font)

    assert table.extraPrograms['fpgm'] == content
Exemple #18
0
def test_decompile_magic_length_last_extra(empty_font):
    indextable = empty_font['TSI0']
    indextable.extra_indices[-1] = (0xFFFD, 0x8000, 0)
    content = "0" * (0x8000 + 1)
    data = tobytes(content)

    table = table_T_S_I__1()
    table.decompile(data, empty_font)

    assert table.extraPrograms['fpgm'] == content
 def compile(self, ttFont):
     self.recordsCount = len(self.gmapRecords)
     self.fontNameLength = len(self.psFontName)
     self.recordsOffset = 4 * (((self.fontNameLength + 12) + 3) // 4)
     data = sstruct.pack(GMAPFormat, self)
     data = data + tobytes(self.psFontName)
     data = data + b"\0" * (self.recordsOffset - len(data))
     for record in self.gmapRecords:
         data = data + record.compile(ttFont)
     return data
Exemple #20
0
 def save(self, file):
     lines = [
         '<<', '/Type /Pages',
         '/Count %d' % len(self.PageList), '/Kids ['
     ]
     for page in self.PageList:
         lines.append(str(page) + ' 0 R ')
     lines.append(']')
     lines.append('>>')
     text = LINEEND.join(lines)
     file.write(tobytes(text + LINEEND, encoding='utf-8'))
Exemple #21
0
    def save(self, file):
        self.info['pagewidth'] = self.pagewidth
        self.info['pageheight'] = self.pageheight
        # check for image support
        if self.hasImages:
            self.info['procsettext'] = '[/PDF /Text /ImageC]'
        else:
            self.info['procsettext'] = '[/PDF /Text]'
        self.info['transitionString'] = self.pageTransitionString

        file.write(tobytes(self.template % self.info + LINEEND, encoding='utf-8'))
Exemple #22
0
def readDesignSpaceFile(options):
    """ Read design space file.
    build a new instancesList with all the instances from the ds file

    - Promote all the source and instance filename attributes from relative
      to absolute paths
    - Write a temporary ds file
    - Return a path to the temporary ds file, and the current instances list.
    """

    instanceEntryList = []
    logger.info("Reading design space file '%s' ..." % options.dsPath)

    with open(options.dsPath, "r", encoding='utf-8') as f:
        data = f.read()

    ds = ET.XML(data)

    instances = ds.find("instances")

    # Remove any instances that are not in the specified list of instance
    # indices, from the option -i.
    if options.indexList:
        newInstanceXMLList = instances.findall("instance")
        numInstances = len(newInstanceXMLList)
        instanceIndex = numInstances
        while instanceIndex > 0:
            instanceIndex -= 1
            instanceXML = newInstanceXMLList[instanceIndex]
            if instanceIndex not in options.indexList:
                instances.remove(instanceXML)

    # We want to build all remaining instances.
    for instanceXML in instances:
        familyName = instanceXML.attrib["familyname"]
        styleName = instanceXML.attrib["stylename"]
        curPath = instanceXML.attrib["filename"]
        logger.info("Adding %s %s to build list." % (familyName, styleName))
        instanceEntryList.append(curPath)
        if os.path.exists(curPath):
            glyphDir = os.path.join(curPath, "glyphs")
            if os.path.exists(glyphDir):
                shutil.rmtree(glyphDir, ignore_errors=True)
    if not instanceEntryList:
        logger.error("Failed to find any instances in the ds file '%s' that "
                     "have the postscriptfilename attribute" % options.dsPath)
        return None, None

    dsPath = "{}.temp".format(options.dsPath)
    data = ET.tostring(ds)
    with open(dsPath, "wb") as f:
        f.write(tobytes(data, encoding='utf-8'))

    return dsPath, instanceEntryList
Exemple #23
0
    def test_from_svg_file(self):
        pen = RecordingPen()
        with NamedTemporaryFile(delete=False) as tmp:
            tmp.write(tobytes(SVG_DATA))
        try:
            svg = SVGPath(tmp.name)
            svg.draw(pen)
        finally:
            os.remove(tmp.name)

        assert pen.value == EXPECTED_PEN_COMMANDS
    def toBytes(self, errors='strict'):
        """ If self.string is a bytes object, return it; otherwise try encoding
		the Unicode string in self.string to bytes using the encoding of this
		entry as returned by self.getEncoding(); Note that self.getEncoding()
		returns 'ascii' if the encoding is unknown to the library.

		If the Unicode string cannot be encoded to bytes in the chosen encoding,
		the error is handled according to the errors parameter to this function,
		which is passed to the underlying encode() function; by default it throws a
		UnicodeEncodeError exception.
		"""
        return tobytes(self.string, encoding=self.getEncoding(), errors=errors)
Exemple #25
0
 def SaveToFileObject(self, fileobj):
     """Open a file, and ask each object in turn to write itself to
     the file.  Keep track of the file position at each point for
     use in the index at the end"""
     f = fileobj
     i = 1
     self.xref = []
     f.write(tobytes("%PDF-1.2" + LINEEND,
                     encoding='utf-8'))  # for CID support
     f.write(tobytes(b"%\xed\xec\xb6\xbe\r\n", encoding='utf-8'))
     for obj in self.objects:
         pos = f.tell()
         self.xref.append(pos)
         f.write(tobytes(str(i) + ' 0 obj' + LINEEND, encoding='utf-8'))
         obj.save(f)
         f.write(tobytes('endobj' + LINEEND, encoding='utf-8'))
         i = i + 1
     self.writeXref(f)
     self.writeTrailer(f)
     f.write(tobytes('%%EOF',
                     encoding='utf-8'))  # no lineend needed on this one!
Exemple #26
0
 def save(self, file):
     lines = [ '<<',
             '/Type /Pages',
             '/Count %d' % len(self.PageList),
             '/Kids ['
             ]
     for page in self.PageList:
         lines.append(str(page) + ' 0 R ')
     lines.append(']')
     lines.append('>>')
     text = LINEEND.join(lines)
     file.write(tobytes(text + LINEEND, encoding='utf-8'))
Exemple #27
0
    def save(self, file):
        self.info['pagewidth'] = self.pagewidth
        self.info['pageheight'] = self.pageheight
        # check for image support
        if self.hasImages:
            self.info['procsettext'] = '[/PDF /Text /ImageC]'
        else:
            self.info['procsettext'] = '[/PDF /Text]'
        self.info['transitionString'] = self.pageTransitionString

        file.write(
            tobytes(self.template % self.info + LINEEND, encoding='utf-8'))
	def compile(self, ttFont):
		dataList = [struct.pack(">LLL", self.version, self.flags, len(self.tags))]
		stringPool = ""
		for tag in self.tags:
			offset = stringPool.find(tag)
			if offset < 0:
				offset = len(stringPool)
				stringPool = stringPool + tag
			offset = offset + 12 + len(self.tags) * 4
			dataList.append(struct.pack(">HH", offset, len(tag)))
		dataList.append(tobytes(stringPool))
		return bytesjoin(dataList)
    def compile(self, ttFont):
        if not hasattr(self, "glyphPrograms"):
            self.glyphPrograms = {}
            self.extraPrograms = {}
        data = b''
        indextable = ttFont[self.indextable]
        glyphNames = ttFont.getGlyphOrder()

        indices = []
        for i in range(len(glyphNames)):
            if len(data) % 2:
                data = data + b"\015"  # align on 2-byte boundaries, fill with return chars. Yum.
            name = glyphNames[i]
            if name in self.glyphPrograms:
                text = tobytes(self.glyphPrograms[name], encoding="utf-8")
            else:
                text = b""
            textLength = len(text)
            if textLength >= 0x8000:
                textLength = 0x8000
            indices.append((i, textLength, len(data)))
            data = data + text

        extra_indices = []
        codes = sorted(self.extras.items())
        for i in range(len(codes)):
            if len(data) % 2:
                data = data + b"\015"  # align on 2-byte boundaries, fill with return chars.
            code, name = codes[i]
            if name in self.extraPrograms:
                text = tobytes(self.extraPrograms[name], encoding="utf-8")
            else:
                text = b""
            textLength = len(text)
            if textLength >= 0x8000:
                textLength = 0x8000
            extra_indices.append((code, textLength, len(data)))
            data = data + text
        indextable.set(indices, extra_indices)
        return data
Exemple #30
0
    def save(self, file):
        #avoid crashes if they wrote nothing in the page
        if self.data == None:
            self.data = TestStream

        if self.compression == 1:
            comp = zlib.compress(tobytes(
                self.data, encoding='utf-8'))  #this bit is very fast...
            base85 = pdfutils._AsciiBase85Encode(comp)  #...sadly this isn't
            wrapped = pdfutils._wrap(base85)
            data_to_write = wrapped
        else:
            data_to_write = self.data
        # the PDF length key should contain the length including
        # any extra LF pairs added by Print on DOS.

        #lines = len(string.split(self.data,'\n'))
        #length = len(self.data) + lines   # one extra LF each
        length = len(data_to_write) + len(LINEEND)  #AR 19980202
        if self.compression:
            file.write(
                tobytes(
                    '<< /Length %d /Filter [/ASCII85Decode /FlateDecode]>>' %
                    length + LINEEND,
                    encoding='utf-8'))
        else:
            file.write(
                tobytes('<< /Length %d >>' % length + LINEEND,
                        encoding='utf-8'))
        file.write(tobytes('stream' + LINEEND, encoding='utf-8'))
        file.write(tobytes(data_to_write + LINEEND, encoding='utf-8'))
        file.write(tobytes('endstream' + LINEEND, encoding='utf-8'))
Exemple #31
0
def test_decompile_offset_past_end(empty_font):
    empty_font.glyphOrder = ['foo', 'bar']
    content = 'baz'
    data = tobytes(content)
    empty_font['TSI0'].indices = [(0, len(data), 0), (1, 1, len(data)+1)]

    table = table_T_S_I__1()
    with CapturingLogHandler(table.log, "WARNING") as captor:
        table.decompile(data, empty_font)

    # the 'bar' program is skipped because its offset > len(data)
    assert table.glyphPrograms == {'foo': 'baz'}
    assert any("textOffset > totalLength" in r.msg for r in captor.records)
 def compile(self, ttFont):
     d = self.__dict__.copy()
     d["nameLength"] = bytechr(len(self.baseGlyphName))
     d["uniqueName"] = self.compilecompileUniqueName(self.uniqueName, 28)
     METAMD5List = eval(self.METAMD5)
     d["METAMD5"] = b""
     for val in METAMD5List:
         d["METAMD5"] += bytechr(val)
     assert (len(d["METAMD5"]) == 16
             ), "Failed to pack 16 byte MD5 hash in SING table"
     data = sstruct.pack(SINGFormat, d)
     data = data + tobytes(self.baseGlyphName)
     return data
Exemple #33
0
def test_decompile_offset_past_end(empty_font):
    empty_font.glyphOrder = ['foo', 'bar']
    content = 'baz'
    data = tobytes(content)
    empty_font['TSI0'].indices = [(0, len(data), 0), (1, 1, len(data) + 1)]

    table = table_T_S_I__1()
    with CapturingLogHandler(table.log, "WARNING") as captor:
        table.decompile(data, empty_font)

    # the 'bar' program is skipped because its offset > len(data)
    assert table.glyphPrograms == {'foo': 'baz'}
    assert any("textOffset > totalLength" in r.msg for r in captor.records)
Exemple #34
0
 def save(self, file):
     file.write(tobytes(LINEEND.join([
             "<</Title (%s)",
             "/Author (%s)",
             "/CreationDate (D:%s)",
             "/Producer (PDFgen)",
             "/Subject (%s)",
             ">>"
             ]) % (
 pdfutils._escape(self.title),
 pdfutils._escape(self.author),
 self.datestr,
 pdfutils._escape(self.subject)
 ) + LINEEND, encoding='utf-8'))
Exemple #35
0
def addSTATTable(varFont, varFontPath):
    print("Adding STAT table")
    kSTAT_OverrideName = "override.STAT.ttx"
    statPath = os.path.dirname(varFontPath)
    statPath = os.path.join(statPath, kSTAT_OverrideName)
    if not os.path.exists(statPath):
        print("Note: Generating simple STAT table from 'fvar' table in "
              "'%s'." % (statPath))
        fvar = varFont["fvar"]
        xmlSTATData = tobytes(makeSTAT(fvar))
        statFile = io.BytesIO(xmlSTATData)
        varFont.importXML(statFile)
        varFont.saveXML(statPath, tables=["STAT"])
    else:
        varFont.importXML(statPath)
Exemple #36
0
def addSTATTable(varFont, varFontPath):
    print("Adding STAT table")
    kSTAT_OverrideName = "override.STAT.ttx"
    statPath = os.path.dirname(varFontPath)
    statPath = os.path.join(statPath, kSTAT_OverrideName)
    if not os.path.exists(statPath):
        print("Note: Generating simple STAT table from 'fvar' table in "
              "'%s'." % (statPath))
        fvar = varFont["fvar"]
        xmlSTATData = tobytes(makeSTAT(fvar))
        statFile = io.BytesIO(xmlSTATData)
        varFont.importXML(statFile)
        varFont.saveXML(statPath, tables=["STAT"])
    else:
        varFont.importXML(statPath)
Exemple #37
0
def normalizeStringForPostscript(s, allowSpaces=True):
    s = tounicode(s)
    normalized = []
    for c in s:
        if c == " " and not allowSpaces:
            continue
        if c in _postscriptFontNameExceptions:
            continue
        if c not in _postscriptFontNameAllowed:
            # Use compatibility decomposed form, to keep parts in ascii
            c = unicodedata.normalize("NFKD", c)
            if not set(c) < _postscriptFontNameAllowed:
                c = tounicode(tobytes(c, errors="replace"))
        normalized.append(tostr(c))
    return "".join(normalized)
Exemple #38
0
def pack(fmt, obj):
    formatstring, names, fixes = getformat(fmt, keep_pad_byte=True)
    elements = []
    if not isinstance(obj, dict):
        obj = obj.__dict__
    for name in names:
        value = obj[name]
        if name in fixes:
            # fixed point conversion
            value = fl2fi(value, fixes[name])
        elif isinstance(value, str):
            value = tobytes(value)
        elements.append(value)
    data = struct.pack(*(formatstring, ) + tuple(elements))
    return data
Exemple #39
0
def normalizeStringForPostscript(s, allowSpaces=True):
    s = tounicode(s)
    normalized = []
    for c in s:
        if c == " " and not allowSpaces:
            continue
        if c in _postscriptFontNameExceptions:
            continue
        if c not in _postscriptFontNameAllowed:
            # Use compatibility decomposed form, to keep parts in ascii
            c = unicodedata.normalize("NFKD", c)
            if not set(c) < _postscriptFontNameAllowed:
                c = tounicode(tobytes(c, errors="replace"))
        normalized.append(tostr(c))
    return "".join(normalized)
def _makeMacName(name, nameID, language, font=None):
    """Create a NameRecord for Apple platforms

	'language' is an arbitrary IETF BCP 47 language identifier such
	as 'en', 'de-CH', 'de-AT-1901', or 'fa-Latn'. When possible, we
	create a Macintosh NameRecord that is understood by old applications
	(platform ID 1 and an old-style Macintosh language enum). If this
	is not possible, we create a Unicode NameRecord (platform ID 0)
	whose language points to the font’s 'ltag' table. The latter
	can encode any string in any language, but legacy applications
	might not recognize the format (in which case they will ignore
	those names).

	'font' should be the TTFont for which you want to create a name.
	If 'font' is None, we only return NameRecords for legacy Macintosh;
	in that case, the result will be None for names that need to
	be encoded with an 'ltag' table.

	See the section “The language identifier” in Apple’s specification:
	https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6name.html
	"""
    macLang = _MAC_LANGUAGE_CODES.get(language.lower())
    macScript = _MAC_LANGUAGE_TO_SCRIPT.get(macLang)
    if macLang is not None and macScript is not None:
        encoding = getEncoding(1, macScript, macLang, default="ascii")
        # Check if we can actually encode this name. If we can't,
        # for example because we have no support for the legacy
        # encoding, or because the name string contains Unicode
        # characters that the legacy encoding cannot represent,
        # we fall back to encoding the name in Unicode and put
        # the language tag into the ltag table.
        try:
            _ = tobytes(name, encoding, errors="strict")
            return makeName(name, nameID, 1, macScript, macLang)
        except UnicodeEncodeError:
            pass
    if font is not None:
        ltag = font.tables.get("ltag")
        if ltag is None:
            ltag = font["ltag"] = newTable("ltag")
        # 0 = Unicode; 4 = “Unicode 2.0 or later semantics (non-BMP characters allowed)”
        # “The preferred platform-specific code for Unicode would be 3 or 4.”
        # https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6name.html
        return makeName(name, nameID, 0, 4, ltag.addTag(language))
    else:
        log.warning("cannot store language %s into 'ltag' table "
                    "without having access to the TTFont object" % language)
        return None
Exemple #41
0
 def test_include_absolute_path(self):
     with tempfile.NamedTemporaryFile(delete=False) as included:
         included.write(
             tobytes("""
             feature kern {
                 pos A B -40;
             } kern;
             """,
                     encoding="utf-8"))
     including = StringIO("include(%s);" % included.name)
     try:
         lexer = IncludingLexer(including)
         files = set(loc.file for _, _, loc in lexer)
         self.assertIn(included.name, files)
     finally:
         os.remove(included.name)
Exemple #42
0
def test_decompile_magic_length_last_glyph(empty_font):
    empty_font.glyphOrder = ['foo', 'bar']
    indextable = empty_font['TSI0']
    indextable.indices = [
        (0, 3, 0),
        (1, 0x8000, 3)]           # the actual length of 'bar' program is
    indextable.extra_indices = [  # the difference between the first extra's
        (0xFFFA, 0, 0x8004),      # offset and 'bar' offset: 0x8004 - 3
        (0xFFFB, 0, 0x8004),
        (0xFFFC, 0, 0x8004),
        (0xFFFD, 0, 0x8004)]
    foo_content = "0" * 3
    bar_content = "1" * (0x8000 + 1)
    data = tobytes(foo_content + bar_content)

    table = table_T_S_I__1()
    table.decompile(data, empty_font)

    assert table.glyphPrograms['foo'] == foo_content
    assert table.glyphPrograms['bar'] == bar_content
Exemple #43
0
    def setupFile_featureTables(self):
        if self.mtiFeaFiles is not None:
            super(FDKFeatureCompiler, self).setupFile_featureTables()

        elif not self.features.strip():
            return

        import subprocess
        from fontTools.misc.py23 import tostr

        fd, outline_path = tempfile.mkstemp()
        os.close(fd)
        self.outline.save(outline_path)

        fd, feasrc_path = tempfile.mkstemp()
        os.close(fd)

        fd, fea_path = tempfile.mkstemp()
        os.write(fd, tobytes(self.features, encoding='utf-8'))
        os.close(fd)

        report = tostr(subprocess.check_output([
            "makeotf", "-o", feasrc_path, "-f", outline_path,
            "-ff", fea_path]))
        os.remove(outline_path)
        os.remove(fea_path)

        print(report)
        success = "Done." in report
        if success:
            feasrc = TTFont(feasrc_path)
            for table in ["GDEF", "GPOS", "GSUB"]:
                if table in feasrc:
                    self.outline[table] = feasrc[table]
            feasrc.close()

        os.remove(feasrc_path)
        if not success:
            raise ValueError("Feature syntax compilation failed.")
Exemple #44
0
def test_decompile_magic_length_non_last(empty_font):
    indextable = empty_font['TSI0']
    indextable.extra_indices = [
        (0xFFFA, 3, 0),
        (0xFFFB, 0x8000, 3),  # the actual length of 'cvt' program is:
        (0xFFFC, 0, 0x8004),  # nextTextOffset - textOffset: 0x8004 - 3
        (0xFFFD, 0, 0x8004)]
    ppgm_content = "0" * 3
    cvt_content = "1" * (0x8000 + 1)
    data = tobytes(ppgm_content + cvt_content)

    table = table_T_S_I__1()
    table.decompile(data, empty_font)

    assert table.extraPrograms['ppgm'] == ppgm_content
    assert table.extraPrograms['cvt'] == cvt_content

    table = table_T_S_I__1()
    with CapturingLogHandler(table.log, "WARNING") as captor:
        table.decompile(data[:-1], empty_font)  # last entry is truncated
    captor.assertRegex("nextTextOffset > totalLength")
    assert table.extraPrograms['cvt'] == cvt_content[:-1]
Exemple #45
0
 def save(self, file):
     file.write(tobytes(self.template % (self.RefPages, self.RefOutlines) + LINEEND, encoding='utf-8'))
	def make_temp(data):
		with tempfile.NamedTemporaryFile(delete=False) as f:
			f.write(tobytes(data))
		return f.name
from fontTools.misc.py23 import tobytes

from runner import main as runner
from differ import main as differ
from test_utils import get_expected_path, get_temp_file_path

TOOL = 'otf2otc'
CMD = ['-t', TOOL]

REGULAR = 'SourceSansPro-Regular.otf'
ITALIC = 'SourceSansPro-It.otf'
BOLD = 'SourceSansPro-Bold.otf'

MSG_1 = tobytes(
    "Shared tables: "
    "['BASE', 'DSIG', 'GDEF', 'GSUB', 'cmap', 'maxp', 'post']%s"
    "Un-shared tables: "
    "['CFF ', 'GPOS', 'OS/2', 'head', 'hhea', 'hmtx', 'name']" % os.linesep)

MSG_2 = tobytes(
    "Shared tables: ['BASE', 'DSIG']%s"
    "Un-shared tables: ['CFF ', 'GDEF', 'GPOS', 'GSUB', 'OS/2', 'cmap', "
    "'head', 'hhea', 'hmtx', 'maxp', 'name', 'post']" % os.linesep)


# -----
# Tests
# -----

@pytest.mark.parametrize('otf_filenames, ttc_filename, tables_msg', [
    ([REGULAR, BOLD], 'RgBd.ttc', MSG_1),
Exemple #48
0
 def writeTrailer(self, f):
     f.write(tobytes('trailer' + LINEEND, encoding='utf-8'))
     f.write(tobytes('<< /Size %d /Root %d 0 R /Info %d 0 R>>' % (len(self.objects) + 1, 1, self.infopos)  + LINEEND, encoding='utf-8'))
     f.write(tobytes('startxref' + LINEEND, encoding='utf-8'))
     f.write(tobytes(str(self.startxref)  + LINEEND, encoding='utf-8'))
Exemple #49
0
 def save(self, file):
     file.write(tobytes(self.template % (self.keyname, self.fontname) + LINEEND, encoding='utf-8'))
Exemple #50
0
 def save(self, file):
     file.write(tobytes(self.text + LINEEND, encoding='utf-8'))