예제 #1
0
 def _encoding(self):
     """helper method to lookup the encoding in the font"""
     c = reader.PStokenizer(self.data1, "/Encoding")
     token1 = c.gettoken()
     token2 = c.gettoken()
     if token1 == "StandardEncoding" and token2 == "def":
         self.encoding = adobestandardencoding
     else:
         self.encoding = [None] * 256
         while True:
             self.encodingstart = c.pos
             if c.gettoken() == "dup":
                 break
         while True:
             i = c.getint()
             glyph = c.gettoken()
             if 0 <= i < 256:
                 self.encoding[i] = glyph[1:]
             token = c.gettoken()
             assert token == "put"
             self.encodingend = c.pos
             token = c.gettoken()
             if token == "readonly" or token == "def":
                 break
             assert token == "dup"
예제 #2
0
    def __init__(self, bytes):
        c = reader.PStokenizer(bytes, "")

        # name of encoding
        self.name = c.gettoken()
        token = c.gettoken()
        if token != "[":
            raise ENCfileError(
                "cannot parse encoding file '%s', expecting '[' got '%s'" %
                (filename, token))
        self.vector = []
        for i in range(256):
            token = c.gettoken()
            if token == "]":
                raise ENCfileError(
                    "not enough charcodes in encoding file '%s'" % filename)
            if not token[0] == "/":
                raise ENCfileError(
                    "token does not start with / in encoding file '%s'" %
                    filename)
            self.vector.append(token[1:])
        if c.gettoken() != "]":
            raise ENCfileError("too many charcodes in encoding file '%s'" %
                               filename)
        token = c.gettoken()
        if token != "def":
            raise ENCfileError(
                "cannot parse encoding file '%s', expecting 'def' got '%s'" %
                (filename, token))
예제 #3
0
    def _data2decode(self):
        """decodes data2eexec to the data2 string and the subr and glyphs dictionary

        It doesn't make sense to call this method twice -- check the content of
        data2 before calling. The method also keeps the subrs and charstrings
        start and end positions for later use."""
        self._data2 = self._eexecdecode(self._data2eexec)

        m = self.lenIVpattern.search(self._data2)
        if m:
            self.lenIV = int(m.group(1))
        else:
            self.lenIV = 4
        self.emptysubr = self._charstringencode(chr(11))

        # extract Subrs
        c = reader.PStokenizer(self._data2, "/Subrs")
        self.subrsstart = c.pos
        arraycount = c.getint()
        token = c.gettoken(); assert token == "array"
        self.subrs = []
        for i in range(arraycount):
            token = c.gettoken(); assert token == "dup"
            token = c.getint(); assert token == i
            size = c.getint()
            if not i:
                self.subrrdtoken = c.gettoken()
            else:
                token = c.gettoken(); assert token == self.subrrdtoken
            self.subrs.append(c.getbytes(size))
            token = c.gettoken()
            if token == "noaccess":
                token = "%s %s" % (token, c.gettoken())
            if not i:
                self.subrnptoken = token
            else:
                assert token == self.subrnptoken
        self.subrsend = c.pos

        # hasflexhintsubrs is a boolean indicating that the font uses flex or
        # hint replacement subrs as specified by Adobe (tm). When it does, the
        # first 4 subrs should all be copied except when none of them are used
        # in the stripped version of the font since we then get a font not
        # using flex or hint replacement subrs at all.
        self.hasflexhintsubrs = (arraycount >= len(self.flexhintsubrs) and
                                 [self.getsubrcmds(i)
                                  for i in range(len(self.flexhintsubrs))] == self.flexhintsubrs)

        # extract glyphs
        self.glyphs = {}
        self.glyphlist = [] # we want to keep the order of the glyph names
        c = reader.PStokenizer(self._data2, "/CharStrings")
        self.charstringsstart = c.pos
        c.getint()
        token = c.gettoken(); assert token == "dict"
        token = c.gettoken(); assert token == "dup"
        token = c.gettoken(); assert token == "begin"
        first = 1
        while 1:
            chartoken = c.gettoken()
            if chartoken == "end":
                break
            assert chartoken[0] == "/"
            size = c.getint()
            if first:
                self.glyphrdtoken = c.gettoken()
            else:
                token = c.gettoken(); assert token == self.glyphrdtoken
            self.glyphlist.append(chartoken[1:])
            self.glyphs[chartoken[1:]] = c.getbytes(size)
            if first:
                self.glyphndtoken = c.gettoken()
            else:
                token = c.gettoken(); assert token == self.glyphndtoken
            first = 0
        self.charstringsend = c.pos
        assert not self.subrs or self.subrrdtoken == self.glyphrdtoken