Ejemplo n.º 1
0
def _validate(obj, **kwArgs):
    d = obj.__dict__
    logger = kwArgs['logger']
    editor = kwArgs['editor']
    
    if (editor is None) or (not editor.reallyHas(b'cmap')):
        logger.error((
          'V0553',
          (),
          "Unable to validate Unicode ranges, because the Editor and/or "
          "Cmap are missing or empty."))
        
        return False
    
    uMap = editor.cmap.getUnicodeMap()
    
    if not len(uMap):
        uMap = editor.cmap.getSymbolMap()
    
    uSpan = span.Span(uMap)
    kwArgs['threshold'] = 0
    obj2 = obj.recalculated(unicodeSpan=uSpan, **kwArgs)
    d2 = obj2.__dict__
    r = True
    
    for rangeID, key in obj.rangeIDToName.items():
        if d[key] and (not d2[key]):
            if rangeID == 57:
                logger.error((
                  'E2123',
                  (),
                  "Surrogates indicated but none present in the font"))
            
            else:
                logger.error((
                  'E2113',
                  (key,),
                  "Unicode range %r claimed to be present but is not."))
            
            r = False
        
        elif d2[key] and (not d[key]):
            logger.warning((
              'V0795',
              (key,),
              "Unicode range %r claimed not to be present, but at least "
              "one glyph from the range is in the font."))
    
    if (
      utilities.safeMax(t[1] for t in uSpan) > 0xFFFF and
      (not d['hasNonPlaneZero'])):
        
        logger.error((
          'E2122',
          (),
          "Font has non-BMP characters but surrogates bit not set."))
        
        r = False
    
    return r
Ejemplo n.º 2
0
    def fromwalker(cls, w, **kwArgs):
        """
        Creates and returns a new Format1 object from the specified walker.
        
        >>> d = {'coverage': _testingValues[1].coverage.__copy__()}
        >>> obj = _testingValues[1]
        >>> obj == Format1.frombytes(obj.binaryString(), **d)
        True
        """

        numClasses, oCT, oSA, oET, oVT = w.unpack("5L")

        wCT, wSA, wET, wVT = stutils.offsetsToSubWalkers(
            w.subWalker(0), oCT, oSA, oET, oVT)

        wETCopy = wET.subWalker(0, relative=True)
        v = wETCopy.unpackRest("3H", strict=False)
        numStates = 1 + utilities.safeMax(x[0] for x in v)
        numEntries = len(v)

        nsObj = namestash.NameStash.readormake(w, (oCT, oSA, oET, oVT),
                                               numStates, numClasses)

        stateNames = nsObj.allStateNames()
        classNames = nsObj.allClassNames()

        classTable = classtable.ClassTable.fromwalker(wCT,
                                                      classNames=classNames)

        kwArgs.pop('classTable', None)

        r = cls({},
                classTable=classTable,
                **utilities.filterKWArgs(cls, kwArgs))

        # build value table
        valueDict = {}
        fw = valuetuple.ValueTuple.fromwalker
        index = 0

        while wVT.stillGoing():
            valueDict[index] = fw(wVT)
            index += 1

        # build entry table
        fw = entry.Entry.fromwalker
        d = {'stateNames': stateNames, 'valueDict': valueDict}
        entries = [fw(wET, **d) for i in range(numEntries)]

        # finally, build state table
        fw = staterow.StateRow.fromwalker
        d = {'classNames': classNames, 'entries': entries}

        for stateName in stateNames:
            r[stateName] = fw(wSA, **d)

        return r
Ejemplo n.º 3
0
def _validate(obj, **kwArgs):
    logger = kwArgs['logger']

    markClassCount = 1 + utilities.safeMax(rec.markClass
                                           for rec in obj.mark.values())

    # all BaseRecords must be at least markClassCount long
    if any(len(rec) != markClassCount for rec in obj.base.values()):
        logger.error(
            ('V0347', (),
             "The BaseRecords' lengths do not match the mark class count."))

        return False

    return True
Ejemplo n.º 4
0
    def fromvalidatedwalker(cls, w, **kwArgs):
        """
        Creates and returns a new Format4 object from the specified walker,
        doing source validation.
        
        >>> obj = _makePointExample()
        >>> k = {'coverage': obj.coverage, 'tupleIndex': obj.tupleIndex}
        >>> k['logger'] = utilities.makeDoctestLogger("fvw")
        >>> bs = obj.binaryString()
        >>> obj2 = Format4.fromvalidatedbytes(bs, **k)
        fvw.format4 - DEBUG - Walker has 152 remaining bytes.
        fvw.format4.namestash - DEBUG - Walker has 132 remaining bytes.
        fvw.format4 - DEBUG - Walker has 28 remaining bytes.
        fvw.format4.lookup_aat - DEBUG - Walker has 28 remaining bytes.
        fvw.format4.lookup_aat.binsearch.binsrch header - DEBUG - Walker has 26 remaining bytes.
        fvw.format4.actions.[0].pointentry - DEBUG - Walker has 8 remaining bytes.
        fvw.format4.actions.[1].pointentry - DEBUG - Walker has 4 remaining bytes.
        fvw.format4.entries.[0].entry4 - DEBUG - Walker has 24 remaining bytes.
        fvw.format4.entries.[1].entry4 - DEBUG - Walker has 18 remaining bytes.
        fvw.format4.entries.[2].entry4 - DEBUG - Walker has 12 remaining bytes.
        fvw.format4.entries.[3].entry4 - DEBUG - Walker has 6 remaining bytes.
        fvw.format4.state Start of text.staterow - DEBUG - Walker has 44 remaining bytes.
        fvw.format4.state Start of line.staterow - DEBUG - Walker has 30 remaining bytes.
        fvw.format4.state Saw x.staterow - DEBUG - Walker has 16 remaining bytes.
        >>> obj == obj2
        True
        """

        logger = kwArgs.pop('logger', logging.getLogger())
        logger = logger.getChild("format4")

        logger.debug(
            ('V0001', (w.length(), ), "Walker has %d remaining bytes."))

        if w.length() < 20:
            logger.error(('V0004', (), "Insufficient bytes."))
            return None

        stBaseOffset = w.getOffset()
        numClasses, oCT, oSA, oET, kind, oVT = w.unpack("4LBT")

        if numClasses < 4:
            logger.error(
                ('V0634', (numClasses, ),
                 "The number of classes in the state table must be at least "
                 "four, but is only %d."))

            return None

        if kind & 0x3F:
            logger.warning((
                'V0879', (kind, ),
                "One or more reserved bits in the flag byte %02X are not zero."
            ))

        kind >>= 6

        if kind == 3:
            logger.error(
                ('V0880', (), "Action type mask is 3, which is undefined."))

            return None

        t = (oCT, oSA, oET, oVT)
        firstValid = w.getOffset() - stBaseOffset
        lastValidPlusOne = firstValid + w.length()

        if any(o < firstValid or o >= lastValidPlusOne for o in t):
            logger.error(
                ('V0635', (),
                 "One or more offsets to state table components are outside "
                 "the bounds of the state table itself."))

            return None

        wCT, wSA, wET, wVT = stutils.offsetsToSubWalkers(w.subWalker(0), *t)

        wETCopy = wET.subWalker(0, relative=True)
        v = wETCopy.unpackRest("3H", strict=False)
        wET = wET.subWalker(0, relative=True, newLimit=6 * len(v))
        numStates = max(2, 1 + utilities.safeMax(x[0] for x in v))
        fvw = namestash.NameStash.readormake_validated
        nsObj = fvw(w, t, numStates, numClasses, logger=logger)

        if nsObj is None:
            return None

        stateNames = nsObj.allStateNames()
        classNames = nsObj.allClassNames()
        fvw = classtable.ClassTable.fromvalidatedwalker
        classTable = fvw(wCT, classNames=classNames, logger=logger)

        if classTable is None:
            return None

        kwArgs.pop('classTable', None)

        r = cls({},
                classTable=classTable,
                **utilities.filterKWArgs(cls, kwArgs))

        wVTCopy = wVT.subWalker(0, relative=True)
        v = wVTCopy.unpackRest(("2H" if kind < 2 else "4h"), strict=False)

        wVT = wVT.subWalker(0,
                            relative=True,
                            newLimit=(4 if kind < 2 else 8) * len(v))

        gfvw = _actionClasses[kind].groupfromvalidatedwalker
        v = gfvw(wVT, logger=logger.getChild("actions"), **kwArgs)

        if v is None:
            return None

        actionMap = dict(enumerate(v))

        entries = entry4.Entry.groupfromvalidatedwalker(
            wET,
            actionMap=actionMap,
            stateNames=stateNames,
            logger=logger.getChild("entries"))

        if entries is None:
            return None

        fvw = staterow.StateRow.fromvalidatedwalker

        for stateName in stateNames:
            obj = fvw(wSA,
                      classNames=classNames,
                      entries=entries,
                      logger=logger.getChild("state %s" % (stateName, )))

            if obj is None:
                return None

            r[stateName] = obj

        return r
Ejemplo n.º 5
0
class PSChainCoverage(dict, metaclass=mapmeta.FontDataMetaclass):
    """
    Objects containing format 3 chaining contextual subtables.
    
    These are dicts mapping a single Key to a PSLookupGroup. (Note that in the
    future, if OpenType permits a format for multiple entries instead of a
    single entry, the existing dict will suffice).
    
    >>> _testingValues[0].pprint(namer=namer.testingNamer())
    (({xyz21, xyz22}, {xyz31, xyz32}), ({xyz31, xyz32}, {afii60001, afii60002, xyz95}), ({afii60001, afii60002, xyz95}, {xyz21, xyz31, xyz41})):
      Effect #1:
        Sequence index: 0
        Lookup:
          Subtable 0 (Pair (glyph) positioning table):
            (xyz11, xyz21):
              First adjustment:
                FUnit adjustment to origin's x-coordinate: 30
                Device for vertical advance:
                  Tweak at 12 ppem: -2
                  Tweak at 14 ppem: -1
                  Tweak at 18 ppem: 1
              Second adjustment:
                Device for origin's x-coordinate:
                  Tweak at 12 ppem: -2
                  Tweak at 14 ppem: -1
                  Tweak at 18 ppem: 1
                Device for origin's y-coordinate:
                  Tweak at 12 ppem: -5
                  Tweak at 13 ppem: -3
                  Tweak at 14 ppem: -1
                  Tweak at 18 ppem: 2
                  Tweak at 20 ppem: 3
            (xyz9, xyz16):
              Second adjustment:
                FUnit adjustment to origin's x-coordinate: -10
            (xyz9, xyz21):
              First adjustment:
                Device for vertical advance:
                  Tweak at 12 ppem: -2
                  Tweak at 14 ppem: -1
                  Tweak at 18 ppem: 1
          Lookup flags:
            Right-to-left for Cursive: False
            Ignore base glyphs: True
            Ignore ligatures: False
            Ignore marks: False
          Sequence order (lower happens first): 1
      Effect #2:
        Sequence index: 1
        Lookup:
          Subtable 0 (Pair (class) positioning table):
            (First class 1, Second class 1):
              Second adjustment:
                FUnit adjustment to origin's x-coordinate: -10
            (First class 2, Second class 0):
              First adjustment:
                Device for vertical advance:
                  Tweak at 12 ppem: -2
                  Tweak at 14 ppem: -1
                  Tweak at 18 ppem: 1
            (First class 2, Second class 1):
              First adjustment:
                FUnit adjustment to origin's x-coordinate: 30
                Device for vertical advance:
                  Tweak at 12 ppem: -2
                  Tweak at 14 ppem: -1
                  Tweak at 18 ppem: 1
              Second adjustment:
                Device for origin's x-coordinate:
                  Tweak at 12 ppem: -2
                  Tweak at 14 ppem: -1
                  Tweak at 18 ppem: 1
                Device for origin's y-coordinate:
                  Tweak at 12 ppem: -5
                  Tweak at 13 ppem: -3
                  Tweak at 14 ppem: -1
                  Tweak at 18 ppem: 2
                  Tweak at 20 ppem: 3
            Class definition table for first glyph:
              xyz16: 1
              xyz6: 1
              xyz7: 1
              xyz8: 2
            Class definition table for second glyph:
              xyz21: 1
              xyz22: 1
              xyz23: 1
          Lookup flags:
            Right-to-left for Cursive: True
            Ignore base glyphs: False
            Ignore ligatures: False
            Ignore marks: False
          Sequence order (lower happens first): 2
    
    >>> _testingValues[0].gatheredMaxContext()
    6
    """

    #
    # Class definition variables
    #

    mapSpec = dict(
        item_followsprotocol=True,
        item_keyfollowsprotocol=True,
        item_pprintlabelpresortfunc=operator.itemgetter(1),
        item_renumberdeepkeys=True,
        item_usenamerforstr=True,
        map_compactiblefunc=(lambda d, k, **kw: False),
        #map_compactremovesfalses = True,
        map_maxcontextfunc=(lambda d: sum(
            utilities.safeMax(len(k[i]) for k in d) for i in range(3))),
        map_validatefunc_partial=_validate)

    #
    # Methods
    #

    def buildBinary(self, w, **kwArgs):
        """
        Adds the binary data for the PSChainCoverage to the specified
        LinkedWriter.
        
        NOTE! There will be unresolved lookup list indices in the LinkedWriter
        after this method is finished. The caller (or somewhere higher up) is
        responsible for adding an index map to the LinkedWriter with the tag
        "lookupList" before the LinkedWriter's binaryString() method is called.
        
        >>> w = writer.LinkedWriter()
        >>> _testingValues[0].buildBinary(w, forGPOS=True)
        >>> ltv = lookup._testingValues
        >>> w.addIndexMap(
        ...   "lookupList_GPOS",
        ...   { ltv[1].asImmutable(): 11,
        ...     ltv[2].asImmutable(): 25})
        >>> utilities.hexdump(w.binaryString())
               0 | 0003 0002 0030 001E  0002 0030 0038 0002 |.....0.....0.8..|
              10 | 0038 0026 0002 0000  000B 0001 0019 0001 |.8.&............|
              20 | 0002 0014 0015 0001  0003 0014 001E 0028 |...............(|
              30 | 0001 0002 001E 001F  0001 0003 005E 0060 |.............^.`|
              40 | 0061                                     |.a              |
        
        >>> PSChainCoverage().binaryString()
        Traceback (most recent call last):
          ...
        ValueError: Cannot write empty PSChainCoverages!
        """

        if 'stakeValue' in kwArgs:
            stakeValue = kwArgs.pop('stakeValue')
            w.stakeCurrentWithValue(stakeValue)
        else:
            stakeValue = w.stakeCurrent()

        if not len(self):
            raise ValueError("Cannot write empty PSChainCoverages!")

        w.add("H", 3)  # format
        pool = {}  # immutable for Coverage -> (coverage, stake)

        # Note the strong assumption that Python correctly walks the iterators
        # over keys and values in the same way in the following two loops.

        for key in self:
            for it in (reversed(key[0]), iter(key[1]), iter(key[2])):
                v = list(it)
                w.add("H", len(v))

                for c in v:
                    immut = tuple(sorted(c))

                    if immut not in pool:
                        pool[immut] = (c, w.getNewStake())

                    w.addUnresolvedOffset("H", stakeValue, pool[immut][1])

        for value in self.values():
            w.add("H", len(value))
            value.buildBinary(w, **kwArgs)

        for immut, (obj, stake) in sorted(pool.items()):
            obj.buildBinary(w, stakeValue=stake)

    def compacted(self, **kwArgs):
        """
        Returns a compacted version of the object. This first does canonical
        compacting, and then walks the Key to make sure there are no "emptied
        out" entries (perhaps as a result of glyph renumbering).
        """

        r = mapmeta.M_compacted(self, **kwArgs)

        if not r:
            return None

        k = next(iter(r))

        for ct in k:
            for cs in ct:
                if not cs:
                    return None

        return r

    @classmethod
    def fromValidatedFontWorkerSource(cls, fws, **kwArgs):
        """
        Creates and returns a new PSChainCoverage from the specified
        FontWorkerSource, doing source validation.

        >>> logger = utilities.makeDoctestLogger("FW_test")
        >>> obj = PSChainCoverage.fromValidatedFontWorkerSource(
        ...   _test_FW_fws2,
        ...   namer = _test_FW_namer,
        ...   forGPOS = True,
        ...   lookupDict = _test_FW_lookupDict,
        ...   logger = logger,
        ...   editor = {})
        FW_test.pschaincoverage - WARNING - line 16 -- unexpected token: foo
        FW_test.pschaincoverage - WARNING - line 0 -- did not find matching 'subtable end/lookup end'
        >>> obj.pprint()
        Key((CoverageTuple((CoverageSet(frozenset({2})),)), CoverageTuple((CoverageSet(frozenset({5, 7})),)), CoverageTuple((CoverageSet(frozenset({11, 13})),)))):
          Effect #1:
            Sequence index: 0
            Lookup:
              3:
                FUnit adjustment to horizontal advance: 678
          Effect #2:
            Sequence index: 0
            Lookup:
              3:
                FUnit adjustment to horizontal advance: 901
        """
        logger = kwArgs.pop('logger', logging.getLogger())
        logger = logger.getChild("pschaincoverage")

        terminalStrings = ('subtable end', 'lookup end')
        startingLineNumber = fws.lineNumber

        CoverageTuple = pschaincoverage_coveragetuple.CoverageTuple
        Key = pschaincoverage_key.Key
        fVFWS = coverageset.CoverageSet.fromValidatedFontWorkerSource

        # initial lists; will be converted to CoverageTuple later
        backtracks = []
        inputs = []
        lookaheads = []

        lookupGroups = {}

        for line in fws:
            if len(line) > 0:
                tokens = [x.strip() for x in line.split('\t')]

                # NOTE: added '.lower()' to token parsing as FontWorker seems to
                # write 'Backtrackcoverage', 'Inputcoverage', and
                # 'LookAheadcoverage', unlike other lookups which are all
                # lowercase.

                if tokens[0].lower() in terminalStrings:
                    return cls(lookupGroups)

                if tokens[0].lower() == 'backtrackcoverage definition begin':
                    backtracks.append(fVFWS(fws, logger=logger, **kwArgs))

                elif tokens[0].lower() == 'inputcoverage definition begin':
                    inputs.append(fVFWS(fws, logger=logger, **kwArgs))

                elif tokens[0].lower() == 'lookaheadcoverage definition begin':
                    lookaheads.append(fVFWS(fws, logger=logger, **kwArgs))

                elif tokens[0].lower() == 'coverage':
                    lookupList = []

                    for effect in tokens[1:]:
                        effectTokens = [x.strip() for x in effect.split(',')]
                        sequenceIndex = int(effectTokens[0]) - 1
                        lookupName = effectTokens[1]

                        lookupList.append(
                            pslookuprecord.PSLookupRecord(
                                sequenceIndex,
                                lookup.Lookup.fromValidatedFontWorkerSource(
                                    fws, lookupName, logger=logger, **kwArgs)))

                    key = Key(
                        (CoverageTuple(reversed(backtracks)),
                         CoverageTuple(inputs), CoverageTuple(lookaheads)))

                    lookupGroup = pslookupgroup.PSLookupGroup(lookupList)
                    lookupGroups[key] = lookupGroup

                else:
                    logger.warning(('V0960', (fws.lineNumber, tokens[0]),
                                    'line %d -- unexpected token: %s'))

        logger.warning(
            ('V0958', (startingLineNumber, "/".join(terminalStrings)),
             "line %d -- did not find matching '%s'"))

        return cls(lookupGroups)

    @classmethod
    def fromvalidatedwalker(cls, w, **kwArgs):
        """
        Creates and returns a new PSChainCoverage object from the specified
        walker, doing source validation.
        
        >>> w = writer.LinkedWriter()
        >>> _testingValues[0].buildBinary(w, forGPOS=True)
        >>> ltv = lookup._testingValues
        >>> d = {
        ...   ltv[1].asImmutable(): 11,
        ...   ltv[2].asImmutable(): 25}
        >>> w.addIndexMap("lookupList_GPOS", d)
        >>> s = w.binaryString()
        >>> FL = []
        >>> fvb = PSChainCoverage.fromvalidatedbytes
        >>> logger = utilities.makeDoctestLogger("pschaincoverage_test")
        >>> obj = fvb(s, fixupList=FL, logger=logger)
        pschaincoverage_test.pschaincoverage - DEBUG - Walker has 66 remaining bytes.
        pschaincoverage_test.pschaincoverage - DEBUG - Format is 3
        pschaincoverage_test.pschaincoverage - DEBUG - Backtrack count is 2
        pschaincoverage_test.pschaincoverage - DEBUG - Backtrack offsets (reversed) are (30, 48)
        pschaincoverage_test.pschaincoverage.backtrack coverage 0.coverageset - DEBUG - Walker has 18 remaining bytes.
        pschaincoverage_test.pschaincoverage.backtrack coverage 0.coverageset - DEBUG - Format is 1, count is 2
        pschaincoverage_test.pschaincoverage.backtrack coverage 0.coverageset - DEBUG - Raw data are [30, 31]
        pschaincoverage_test.pschaincoverage.backtrack coverage 1.coverageset - DEBUG - Walker has 36 remaining bytes.
        pschaincoverage_test.pschaincoverage.backtrack coverage 1.coverageset - DEBUG - Format is 1, count is 2
        pschaincoverage_test.pschaincoverage.backtrack coverage 1.coverageset - DEBUG - Raw data are [20, 21]
        pschaincoverage_test.pschaincoverage - DEBUG - Input count is 2
        pschaincoverage_test.pschaincoverage - DEBUG - Input offsets are (48, 56)
        pschaincoverage_test.pschaincoverage.input coverage 0.coverageset - DEBUG - Walker has 18 remaining bytes.
        pschaincoverage_test.pschaincoverage.input coverage 0.coverageset - DEBUG - Format is 1, count is 2
        pschaincoverage_test.pschaincoverage.input coverage 0.coverageset - DEBUG - Raw data are [30, 31]
        pschaincoverage_test.pschaincoverage.input coverage 1.coverageset - DEBUG - Walker has 10 remaining bytes.
        pschaincoverage_test.pschaincoverage.input coverage 1.coverageset - DEBUG - Format is 1, count is 3
        pschaincoverage_test.pschaincoverage.input coverage 1.coverageset - DEBUG - Raw data are [94, 96, 97]
        pschaincoverage_test.pschaincoverage - DEBUG - Lookahead count is 2
        pschaincoverage_test.pschaincoverage - DEBUG - Lookahead offsets are (56, 38)
        pschaincoverage_test.pschaincoverage.lookahead coverage 0.coverageset - DEBUG - Walker has 10 remaining bytes.
        pschaincoverage_test.pschaincoverage.lookahead coverage 0.coverageset - DEBUG - Format is 1, count is 3
        pschaincoverage_test.pschaincoverage.lookahead coverage 0.coverageset - DEBUG - Raw data are [94, 96, 97]
        pschaincoverage_test.pschaincoverage.lookahead coverage 1.coverageset - DEBUG - Walker has 28 remaining bytes.
        pschaincoverage_test.pschaincoverage.lookahead coverage 1.coverageset - DEBUG - Format is 1, count is 3
        pschaincoverage_test.pschaincoverage.lookahead coverage 1.coverageset - DEBUG - Raw data are [20, 30, 40]
        pschaincoverage_test.pschaincoverage - DEBUG - Action count is 2
        pschaincoverage_test.pschaincoverage.pslookupgroup - DEBUG - Walker has 44 bytes remaining.
        pschaincoverage_test.pschaincoverage.pslookupgroup.[0].pslookuprecord - DEBUG - Walker has 44 remaining bytes.
        pschaincoverage_test.pschaincoverage.pslookupgroup.[0].pslookuprecord - DEBUG - Sequence index is 0
        pschaincoverage_test.pschaincoverage.pslookupgroup.[0].pslookuprecord - DEBUG - Lookup index is 11
        pschaincoverage_test.pschaincoverage.pslookupgroup.[1].pslookuprecord - DEBUG - Walker has 40 remaining bytes.
        pschaincoverage_test.pschaincoverage.pslookupgroup.[1].pslookuprecord - DEBUG - Sequence index is 1
        pschaincoverage_test.pschaincoverage.pslookupgroup.[1].pslookuprecord - DEBUG - Lookup index is 25

        >>> fvb(s[:25], fixupList=FL, logger=logger)
        pschaincoverage_test.pschaincoverage - DEBUG - Walker has 25 remaining bytes.
        pschaincoverage_test.pschaincoverage - DEBUG - Format is 3
        pschaincoverage_test.pschaincoverage - DEBUG - Backtrack count is 2
        pschaincoverage_test.pschaincoverage - DEBUG - Backtrack offsets (reversed) are (30, 48)
        pschaincoverage_test.pschaincoverage.backtrack coverage 0.coverageset - DEBUG - Walker has 0 remaining bytes.
        pschaincoverage_test.pschaincoverage.backtrack coverage 0.coverageset - ERROR - Insufficient bytes.
        """

        assert 'fixupList' in kwArgs
        fixupList = kwArgs.pop('fixupList')

        logger = kwArgs.pop('logger', logging.getLogger())
        logger = logger.getChild("pschaincoverage")

        logger.debug(
            ('V0001', (w.length(), ), "Walker has %d remaining bytes."))

        if w.length() < 4:
            logger.error(('V0004', (), "Insufficient bytes."))
            return None

        format, backCount = w.unpack("2H")

        if format != 3:
            logger.error(
                ('V0002', (format, ), "Expected format 3, but got format %d."))

            return None

        else:
            logger.debug(('Vxxxx', (), "Format is 3"))

        logger.debug(('Vxxxx', (backCount, ), "Backtrack count is %d"))

        if w.length() < 2 * backCount:
            logger.error(
                ('V0391', (),
                 "The Backtrack Coverage offsets are missing or incomplete."))

            return None

        backOffsets = w.group("H", backCount)

        logger.debug(('Vxxxx', (tuple(reversed(backOffsets)), ),
                      "Backtrack offsets (reversed) are %s"))

        v = [None] * backCount
        CoverageTuple = pschaincoverage_coveragetuple.CoverageTuple
        Key = pschaincoverage_key.Key
        fvw = coverageset.CoverageSet.fromvalidatedwalker

        for i, offset in enumerate(backOffsets):
            subLogger = logger.getChild("backtrack coverage %d" % (i, ))
            obj = fvw(w.subWalker(offset), logger=subLogger)

            if obj is None:
                return None

            v[i] = obj

        kBack = CoverageTuple(reversed(v))

        if w.length() < 2:
            logger.error(
                ('V0392', (), "The InputGlyphCount is missing or incomplete."))

            return None

        inCount = w.unpack("H")
        logger.debug(('Vxxxx', (inCount, ), "Input count is %d"))

        if w.length() < 2 * inCount:
            logger.error(
                ('V0393', (),
                 "The Input Coverage offsets are missing or incomplete."))

            return None

        inOffsets = w.group("H", inCount)
        logger.debug(('Vxxxx', (inOffsets, ), "Input offsets are %s"))
        v = [None] * inCount

        for i, offset in enumerate(inOffsets):
            subLogger = logger.getChild("input coverage %d" % (i, ))
            obj = fvw(w.subWalker(offset), logger=subLogger)

            if obj is None:
                return None

            v[i] = obj

        kIn = CoverageTuple(v)

        if w.length() < 2:
            logger.error(('V0394', (),
                          "The LookaheadGlyphCount is missing or incomplete."))

            return None

        lookCount = w.unpack("H")
        logger.debug(('Vxxxx', (lookCount, ), "Lookahead count is %d"))

        if w.length() < 2 * lookCount:
            logger.error(
                ('V0395', (),
                 "The Lookahead Coverage offsets are missing or incomplete."))

            return None

        lookOffsets = w.group("H", lookCount)
        logger.debug(('Vxxxx', (lookOffsets, ), "Lookahead offsets are %s"))
        v = [None] * lookCount

        for i, offset in enumerate(lookOffsets):
            subLogger = logger.getChild("lookahead coverage %d" % (i, ))
            obj = fvw(w.subWalker(offset), logger=subLogger)

            if obj is None:
                return None

            v[i] = obj

        kLook = CoverageTuple(v)
        key = Key([kBack, kIn, kLook])

        if w.length() < 2:
            logger.error(
                ('V0396', (),
                 "The count of lookup records is missing or incomplete."))

            return None

        count = w.unpack("H")
        logger.debug(('Vxxxx', (count, ), "Action count is %d"))

        group = pslookupgroup.PSLookupGroup.fromvalidatedwalker(
            w, count=count, fixupList=fixupList, logger=logger, **kwArgs)

        return cls({key: group})

    @classmethod
    def fromwalker(cls, w, **kwArgs):
        """
        Creates and returns a new PSChainCoverage from the specified walker.
        
        There is one required keyword argument:
        
            fixupList   A list, to which (lookupListIndex, fixupFunc) pairs
                        will be appended. The actual lookup won't be set in the
                        PSLookupRecord until this call is made, usually by the
                        top-level GPOS construction logic. The fixup call takes
                        one argument, the Lookup being set into it.
        
        >>> w = writer.LinkedWriter()
        >>> _testingValues[0].buildBinary(w, forGPOS=True)
        >>> ltv = lookup._testingValues
        >>> d = {
        ...   ltv[1].asImmutable(): 11,
        ...   ltv[2].asImmutable(): 25}
        >>> w.addIndexMap("lookupList_GPOS", d)
        >>> s = w.binaryString()
        >>> FL = []
        >>> obj = PSChainCoverage.frombytes(s, fixupList=FL)
        >>> d = {11: ltv[1], 25: ltv[2]}
        >>> for index, func in FL:
        ...     func(d[index])
        >>> obj == _testingValues[0]
        True
        """

        assert 'fixupList' in kwArgs
        format = w.unpack("H")
        assert format == 3
        f = coverageset.CoverageSet.fromwalker
        backOffsets = w.group("H", w.unpack("H"))
        CoverageTuple = pschaincoverage_coveragetuple.CoverageTuple
        Key = pschaincoverage_key.Key

        kBack = CoverageTuple(
            reversed([f(w.subWalker(offset)) for offset in backOffsets]))

        inOffsets = w.group("H", w.unpack("H"))
        kIn = CoverageTuple(f(w.subWalker(offset)) for offset in inOffsets)
        lookOffsets = w.group("H", w.unpack("H"))
        kLook = CoverageTuple(f(w.subWalker(offset)) for offset in lookOffsets)
        key = Key([kBack, kIn, kLook])
        count = w.unpack("H")

        group = pslookupgroup.PSLookupGroup.fromwalker(
            w, count=count, fixupList=kwArgs['fixupList'])

        return cls({key: group})

    def writeFontWorkerSource(self, s, **kwArgs):
        """
        Writes contents of lookup to provided stream 's'. Uses
        namer.bestNameForGlyphIndex if a namer is provided, otherwise uses Font
        Worker glyph index labeling ("# <id>").
        """

        namer = kwArgs.get('namer')
        bnfgi = namer.bestNameForGlyphIndex

        for k in iter(self):
            coverageBacktrack = k[0]
            coverageInput = k[1]
            coverageLookahead = k[2]
            lookupList = self[k]

            if coverageBacktrack:
                for cbi in reversed(coverageBacktrack):  # N.B.!!
                    s.write("backtrackcoverage definition begin\n")
                    for g in sorted(cbi):
                        s.write("%s\n" % (bnfgi(g), ))
                    s.write("coverage definition end\n\n")

            if coverageInput:
                for cii in coverageInput:
                    s.write("inputcoverage definition begin\n")
                    for g in sorted(cii):
                        s.write("%s\n" % (bnfgi(g), ))
                    s.write("coverage definition end\n\n")

            if coverageLookahead:
                for cli in coverageLookahead:
                    s.write("lookaheadcoverage definition begin\n")
                    for g in sorted(cli):
                        s.write("%s\n" % (bnfgi(g), ))
                    s.write("coverage definition end\n\n")

            s.write("coverage")
            for lkp in lookupList:
                s.write("\t%d,%d" %
                        (lkp.sequenceIndex + 1, lkp.lookup.sequence))
Ejemplo n.º 6
0
class Ligature(dict, metaclass=mapmeta.FontDataMetaclass):
    """
    Ligature substitution subtables for a GSUB table. These are dicts whose keys
    are GlyphTuples and whose values are glyph indices. There is one attribute:
    
        keyOrder    A list of keys for the dict in their order of application.
                    Note that this class has a custom __iter__() method that
                    guarantees the keys will always be produced in an order
                    that respects this keyOrder list; see the docstring for the
                    __iter__() method for more details.
    
    >>> _testingValues[1].pprint()
    Ligature_GlyphTuple((4, 11, 29)): 97
    
    >>> _testingValues[1].pprint(namer=namer.testingNamer())
    (xyz5, xyz12, xyz30): afii60002
    
    >>> _testingValues[2].pprint(namer=namer.testingNamer())
    (xyz6, xyz10): xyz33
    (xyz6, xyz4): xyz32
    (xyz12, xyz13): xyz14
    """
    
    #
    # Class definition variables
    #
    
    mapSpec = dict(
        item_pprintlabelnosort = True,  # we have a custom __iter__()
        item_renumberdeepkeysnoshrink = True,
        item_renumberdirectvalues = True,
        item_usenamerforstr = True,
        item_valueisoutputglyph = True,
        map_maxcontextfunc = (lambda d: utilities.safeMax(len(k) for k in d)),
        map_validatefunc_partial = _validate)
    
    attrSpec = dict(
        keyOrder = dict(
            attr_followsprotocol = True,
            attr_ignoreforcomparisons = True,
            attr_initfunc = _GlyphList))
    
    attrSorted = ()
    
    kind = ('GSUB', 4)
    kindString = "Ligature substitution table"
    
    #
    # Methods
    #
    
    def __iter__(self):
        """
        Returns an iterator over keys. The keys are returned sorted by first
        glyph in the key, and then as specified by the keyOrder list. Any keys
        not present in the keyOrder list will be returned last in their
        respective first-glyph groups.
        
        >>> for k in _testingValues[2]: print(k)
        (5, 9)
        (5, 3)
        (11, 12)
        """
        
        actuals = set(super(Ligature, self).__iter__())
        ko = list(self.keyOrder)
        ko.extend(actuals - set(ko))
        firsts = {k[0] for k in actuals}
        
        for first in sorted(firsts):
            it = (x for x in ko if x[0] == first)
            
            for k, g in itertools.groupby(it, key=operator.itemgetter(0)):
                for obj in g:
                    yield obj
    
    @staticmethod
    def _classTableFromKeyGroup(keyGroup, nm):
        """
        Creates and returns a new ClassTable object based on an analysis of the
        specified keyGroup, which should be a dict mapping input glyph tuples
        to ligature glyphs. This analysis takes into account sharing classes
        for glyphs whose behaviors are similar enough.
        
        Note that partial overlaps should already have been resolved (i.e. the
        keyGroup passed in should have passed a prior _separatedKeys() call)
        before this method is called.
        
        <<< tm = _getTestDataModule()
        <<< ligObj = tm.makeGSUBObj()
        <<< postObj = tm.makePOSTObj()
        <<< vKeyGroups = ligObj._separatedKeys()
        <<< len(vKeyGroups)
        1
        <<< nm = lambda n: postObj[n]
        <<< Ligature._classTableFromKeyGroup(ligObj, nm).pprint()
        4: group candrabindudeva
        5: group candrabindudeva
        11: group iideva
        16: group iideva
        17: group iideva
        19: group iideva
        20: group iideva
        21: group iideva
        22: group iideva
        23: group iideva
        65: group iivowelsigndeva
        70: group iivowelsigndeva
        71: group iivowelsigndeva
        72: group iivowelsigndeva
        73: group iivowelsigndeva
        74: group iivowelsigndeva
        75: group iivowelsigndeva
        76: group iivowelsigndeva
        77: group iivowelsigndeva
        110: group iideva
        122: group iideva
        125: group iideva
        126: group iideva
        128: group iideva
        129: group iideva
        130: group iideva
        131: group iideva
        132: group iideva
        158: group rephdeva
        412: group rephdeva
        """
        
        from fontio3.morx import classtable
        ct = classtable.ClassTable()
        d = {}
        
        for inTuple, lig in keyGroup.items():
            currState = "SOT"
            
            for i, glyph in enumerate(inTuple):
                sr = d.setdefault(currState, {})
                
                if i == (len(inTuple) - 1):
                    t = ("SOT", lig)
                
                else:
                    if i:
                        s = "%s-%d" % (currState, glyph)
                    else:
                        s = "Saw %d" % (glyph,)
                    
                    t = (s, None)
                
                sr[glyph] = t
                currState = t[0]
        
        leafTriggers = {k[-1] for k in keyGroup}
        dTailSames = {}
        
        for trigger in leafTriggers:
            s = {
              stateName
              for stateName, dSub in d.items()
              if (stateName != "SOT") and (trigger in dSub)}
            
            dTailSames.setdefault(frozenset(s), set()).add(trigger)
        
        for tailSameSet in dTailSames.values():
            if len(tailSameSet) == 1:
                trigger = tailSameSet.pop()
                ct[trigger] = nm(trigger)
            
            else:
                s = "group %s" % (nm(min(tailSameSet)),)
                
                for trigger in tailSameSet:
                    ct[trigger] = s
        
        headTriggers = set(d) - {'SOT'}
        dHeadSames = {}
        
        for trigger in headTriggers:
            dSub = d[trigger]
            dHeadSames.setdefault(frozenset(dSub), set()).add(trigger)
        
        for headSameSet in dHeadSames.values():
            if len(headSameSet) > 1:
                glyphs = {int(s[4:]) for s in headSameSet if '-' not in s}
                
                if glyphs:
                    s = "group %s" % (nm(min(glyphs)),)
                    
                    for glyph in glyphs:
                        ct[glyph] = s
        
        allSingleGlyphs = {glyph for key in keyGroup for glyph in key}
        
        for glyph in allSingleGlyphs:
            if glyph not in ct:
                ct[glyph] = nm(glyph)
        
        return ct
    
    def _separatedKeys(self):
        """
        Returns a list of sets of keys. The union of all sets in the list is
        the same as the set of self. The reason for this is to separate keys
        which have potential intersecting effects and to segregate them into
        their own separate AAT ligature table. This is important since AAT does
        not allow control over the ordering in the same way OpenType does, so
        it's possible to have an OpenType Ligature object that cannot be
        represented in a single-pass AAT ligature subtable. For instance, if
        two rules are for "ff" and "ffi", and the "ffi" rule has higher
        priority, AAT cannot just form the "ff" since the "ffi" rule's inputs
        are not ligated, but are just simple glyphs. This results from AAT's
        single-pass approach.
        
        >>> k1 = GT(['f', 'f', 'i'])
        >>> k2 = GT(['f', 'f'])
        >>> Ligature({k1: 30, k2: 40}, keyOrder=[k1, k2])._separatedKeys()
        [[Ligature_GlyphTuple(('f', 'f', 'i'))], [Ligature_GlyphTuple(('f', 'f'))]]
        >>> Ligature({k1: 30, k2: 40}, keyOrder=[k2, k1])._separatedKeys()
        [[Ligature_GlyphTuple(('f', 'f')), Ligature_GlyphTuple(('f', 'f', 'i'))]]
        """
        
        fullList = list(self)  # order is important from obj's __iter__()
        rv = []
    
        while fullList:
            v = []
            startSet = set()
        
            for key in fullList:
                testSet = {key[n:] for n in range(1, len(key))}
            
                if not (testSet & startSet):
                    v.append(key)
                    startSet.update(key[:n] for n in range(1, len(key)))
        
            thisGroup = set(v)
        
            for i in range(len(fullList) - 1, -1, -1):
                if fullList[i] in thisGroup:
                    del fullList[i]
        
            rv.append(v)
    
        return rv
    
    def asAAT(self, **kwArgs):
        """
        Returns a list of AAT 'morx' subtable objects that have the same effect
        as this Ligature object.
        <<<
        <<< tm = _getTestDataModule()
        <<< ligObj = tm.makeGSUBObj()
        <<< v = ligObj.asAAT()
        <<< len(v)
        1
        <<< v[0].pprint(onlySignificant=True)
        State 'Start of text':
          Class 'group glyph 11':
            Remember this glyph, then go to state 'Saw group glyph 11'
          Class 'group glyph 65':
            Remember this glyph, then go to state 'Saw group glyph 65'
        State 'Start of line':
          Class 'group glyph 11':
            Remember this glyph, then go to state 'Saw group glyph 11'
          Class 'group glyph 65':
            Remember this glyph, then go to state 'Saw group glyph 65'
        State 'Saw group glyph 11':
          Class 'group glyph 4':
            Remember this glyph, then go to state 'Start of text' after doing these substitutions:
              (11, 4) becomes (413, None)
              (11, 5) becomes (413, None)
              (16, 4) becomes (414, None)
              (16, 5) becomes (414, None)
              (17, 4) becomes (415, None)
              (17, 5) becomes (415, None)
              (19, 4) becomes (416, None)
              (19, 5) becomes (416, None)
              (20, 4) becomes (417, None)
              (20, 5) becomes (417, None)
              (21, 4) becomes (418, None)
              (21, 5) becomes (418, None)
              (22, 4) becomes (419, None)
              (22, 5) becomes (419, None)
              (23, 4) becomes (420, None)
              (23, 5) becomes (420, None)
              (110, 4) becomes (429, None)
              (110, 5) becomes (429, None)
              (122, 4) becomes (421, None)
              (122, 5) becomes (421, None)
              (125, 4) becomes (422, None)
              (125, 5) becomes (422, None)
              (126, 4) becomes (423, None)
              (126, 5) becomes (423, None)
              (128, 4) becomes (424, None)
              (128, 5) becomes (424, None)
              (129, 4) becomes (425, None)
              (129, 5) becomes (425, None)
              (130, 4) becomes (426, None)
              (130, 5) becomes (426, None)
              (131, 4) becomes (427, None)
              (131, 5) becomes (427, None)
              (132, 4) becomes (428, None)
              (132, 5) becomes (428, None)
        State 'Saw group glyph 65':
          Class 'group glyph 158':
            Remember this glyph, then go to state 'Start of text' after doing these substitutions:
              (65, 158) becomes (386, None)
              (65, 412) becomes (387, None)
              (70, 158) becomes (389, None)
              (70, 412) becomes (390, None)
              (71, 158) becomes (392, None)
              (71, 412) becomes (393, None)
              (72, 158) becomes (395, None)
              (72, 412) becomes (396, None)
              (73, 158) becomes (398, None)
              (73, 412) becomes (399, None)
              (74, 158) becomes (401, None)
              (74, 412) becomes (402, None)
              (75, 158) becomes (404, None)
              (75, 412) becomes (405, None)
              (76, 158) becomes (407, None)
              (76, 412) becomes (408, None)
              (77, 158) becomes (410, None)
              (77, 412) becomes (411, None)
          Class 'group glyph 4':
            Remember this glyph, then go to state 'Start of text' after doing these substitutions:
              (65, 4) becomes (385, None)
              (65, 5) becomes (385, None)
              (70, 4) becomes (388, None)
              (70, 5) becomes (388, None)
              (71, 4) becomes (391, None)
              (71, 5) becomes (391, None)
              (72, 4) becomes (394, None)
              (72, 5) becomes (394, None)
              (73, 4) becomes (397, None)
              (73, 5) becomes (397, None)
              (74, 4) becomes (400, None)
              (74, 5) becomes (400, None)
              (75, 4) becomes (403, None)
              (75, 5) becomes (403, None)
              (76, 4) becomes (406, None)
              (76, 5) becomes (406, None)
              (77, 4) becomes (409, None)
              (77, 5) becomes (409, None)
        Class table:
          4: group glyph 4
          5: group glyph 4
          11: group glyph 11
          16: group glyph 11
          17: group glyph 11
          19: group glyph 11
          20: group glyph 11
          21: group glyph 11
          22: group glyph 11
          23: group glyph 11
          65: group glyph 65
          70: group glyph 65
          71: group glyph 65
          72: group glyph 65
          73: group glyph 65
          74: group glyph 65
          75: group glyph 65
          76: group glyph 65
          77: group glyph 65
          110: group glyph 11
          122: group glyph 11
          125: group glyph 11
          126: group glyph 11
          128: group glyph 11
          129: group glyph 11
          130: group glyph 11
          131: group glyph 11
          132: group glyph 11
          158: group glyph 158
          412: group glyph 158
        Mask value: (no data)
        Coverage: (no data)
        """
        
        if not self:
            return []
        
        from fontio3.morx import (
          entry_ligature,
          glyphtuple,
          glyphtupledict,
          ligature,
          staterow_ligature)
        
        if 'namerObj' in kwArgs:
            nm = kwArgs['namerObj'].bestNameForGlyphIndex
        else:
            nm = (lambda n: "glyph %d" % (n,))
    
        keyGroups = self._separatedKeys()
        rv = []
        entryNOP = entry_ligature.Entry()
    
        for keyGroup in keyGroups:
            dLigPiece = {k: self[k] for k in keyGroup}
            
            # Each keyGroup represents a complete Ligature subtable. All
            # potential conflicts have already been ironed out by the
            # separation logic, so here we can just put things together.
        
            ct = self._classTableFromKeyGroup(dLigPiece, nm)
            d = {}
        
            for key in keyGroup:
                currState = 'Start of text'
            
                for i, inGlyph in enumerate(key):
                    if currState not in d:
                        d[currState] = staterow_ligature.StateRow({
                          'End of text': entryNOP,
                          'Out of bounds': entryNOP,
                          'Deleted glyph': entryNOP,
                          'End of line': entryNOP})
                
                    currClass = ct[inGlyph]
                
                    if currClass not in d[currState]:
                        if i == len(key) - 1:
                            tIn = glyphtuple.GlyphTupleInput(key)
                            v = [None] * len(tIn)
                            v[0] = self[key]
                            tOut = glyphtuple.GlyphTupleOutput(v)
                            gtd = glyphtupledict.GlyphTupleDict({tIn: tOut})
                        
                            d[currState][currClass] = entry_ligature.Entry(
                              newState = "Start of text",
                              push = True,
                              actions = gtd)
                    
                        else:
                            if i:
                                newState = "%s-%s" % (currState, currClass)
                            else:
                                newState = "Saw %s" % (currClass,)
                    
                            d[currState][currClass] = entry_ligature.Entry(
                              newState = newState,
                              push = True)
                
                    elif i == len(key) - 1:
                        tIn = glyphtuple.GlyphTupleInput(key)
                        v = [None] * len(tIn)
                        v[0] = self[key]
                        tOut = glyphtuple.GlyphTupleOutput(v)
                        gtd = glyphtupledict.GlyphTupleDict({tIn: tOut})
                        d[currState][currClass].actions[tIn] = tOut
                
                    currState = d[currState][currClass].newState
        
            allClasses = set(ct.values())
        
            for stateName, row in d.items():
                for className in allClasses:
                    if className not in row:
                        row[className] = entryNOP
        
            d['Start of line'] = d['Start of text']
            rv.append(ligature.Ligature(d, classTable=ct))
    
        return rv
    
    def buildBinary(self, w, **kwArgs):
        """
        Adds the binary data for the Ligature object to the specified
        LinkedWriter.
        
        >>> utilities.hexdump(_testingValues[0].binaryString())
               0 | 0001 0006 0000 0001  0000                |..........      |
        
        >>> utilities.hexdump(_testingValues[1].binaryString())
               0 | 0001 0008 0001 000E  0001 0001 0004 0001 |................|
              10 | 0004 0061 0003 000B  001D                |...a......      |
        
        >>> utilities.hexdump(_testingValues[2].binaryString())
               0 | 0001 000A 0002 0012  0018 0001 0002 0005 |................|
              10 | 000B 0002 000A 0010  0001 0010 0020 0002 |............. ..|
              20 | 0009 001F 0002 0003  000D 0002 000C      |..............  |
        """
        
        if 'stakeValue' in kwArgs:
            stakeValue = kwArgs.pop('stakeValue')
            w.stakeCurrentWithValue(stakeValue)
        else:
            stakeValue = w.stakeCurrent()
        
        w.add("H", 1)  # format
        
        # Create a dict whose keys are firstGlyphs and whose values are lists,
        # in sequence order, of (fullKey, ligature) pairs. This will make the
        # writing of the sets much easier later on.
        
        fgMap = {}
        
        for k, g in itertools.groupby(self, operator.itemgetter(0)):
            fgMap[k] = [(key, self[key]) for key in g]
        
        sortedFirstGlyphs = sorted(fgMap)
        covTable = coverage.Coverage.fromglyphset(sortedFirstGlyphs)
        covStake = w.getNewStake()
        w.addUnresolvedOffset("H", stakeValue, covStake)
        w.add("H", len(sortedFirstGlyphs))
        setStakes = list(w.getNewStake() for firstGlyph in sortedFirstGlyphs)
        
        for setStake in setStakes:
            w.addUnresolvedOffset("H", stakeValue, setStake)
        
        covTable.buildBinary(w, stakeValue=covStake)
        ligStakes = {}  # firstGlyph -> list of stakes
        
        for firstGlyph, setStake in zip(sortedFirstGlyphs, setStakes):
            w.stakeCurrentWithValue(setStake)
            v = fgMap[firstGlyph]
            w.add("H", len(v))
            ligStakes[firstGlyph] = list(w.getNewStake() for obj in v)
            
            for ligStake in ligStakes[firstGlyph]:
                w.addUnresolvedOffset("H", setStake, ligStake)
        
        for firstGlyph in sortedFirstGlyphs:
            for t, ligStake in zip(fgMap[firstGlyph], ligStakes[firstGlyph]):
                w.stakeCurrentWithValue(ligStake)
                key, lig = t
                w.add("H", lig)
                w.add("H", len(key))
                w.addGroup("H", key[1:])
    
    def componentCounts(self):
        """
        Returns a dict mapping ligature glyph indices to counts of input glyphs
        that went into that ligature's making. This only counts the substantive
        contributors, and is only one-level.
        
        >>> _testingValues[2].pprint()
        Ligature_GlyphTuple((5, 9)): 32
        Ligature_GlyphTuple((5, 3)): 31
        Ligature_GlyphTuple((11, 12)): 13
        
        >>> d = _testingValues[2].componentCounts()
        >>> for ligGlyph in sorted(d):
        ...   print(ligGlyph, d[ligGlyph])
        13 2
        31 2
        32 2
        """
        
        return {g: len(t) for t, g in self.items()}
    
    def effects(self, **kwArgs):
        raise DeprecationWarning(
          "The effects() method is deprecated; "
          "please use effectsSummary() instead.")
    
    def effectsSummary(self, **kwArgs):
        """
        Returns an EffectsSummary object. If present, notes will be made in a
        provided memo kwArgs to allow elision of reprocessing, which should
        eliminate the combinatoric explosion.
        
        >>> obj = _testingValues[2]
        >>> memo = {}
        >>> es = obj.effectsSummary(memo=memo)
        >>> es.pprint()
        3:
          31
        5:
          31
          32
        9:
          32
        11:
          13
        12:
          13
        >>> id(obj) in memo
        True
        """
        
        memo = kwArgs.pop('memo', {})
        
        if id(self) in memo:
            return memo[id(self)]
        
        r = EffectsSummary()
        
        for tIn, gOut in self.items():
            for gIn in tIn:
                r[gIn].add(gOut)
        
        memo[id(self)] = r
        return r
    
    @classmethod
    def fromValidatedFontWorkerSource(cls, fws, **kwArgs):
        """
        Returns a new Ligature constructed from the specified FontWorkerSource,
        with source validation.
        
        >>> logger = utilities.makeDoctestLogger("FW_test")
        >>> _test_FW_fws.goto(1) # go back to start of file
        >>> l = Ligature.fromValidatedFontWorkerSource(_test_FW_fws, namer=_test_FW_namer, logger=logger)
        >>> l.pprint()
        Ligature_GlyphTuple((2, 2, 3)): 11
        Ligature_GlyphTuple((2, 2)): 15
        Ligature_GlyphTuple((2, 3)): 7
        Ligature_GlyphTuple((2, 5)): 17
        >>> l = Ligature.fromValidatedFontWorkerSource(_test_FW_fws2, namer=_test_FW_namer, logger=logger)
        FW_test.ligature - WARNING - line 3 -- incorrect number of tokens, expected 2 or more, found 1
        FW_test.ligature - WARNING - line 6 -- ignoring duplicated entry for 'f,f,i'
        FW_test.ligature - WARNING - line 8 -- glyph 'B' not found
        FW_test.ligature - WARNING - line 8 -- glyph 'C' not found
        FW_test.ligature - ERROR - line 0 -- did not find matching 'subtable end/lookup end'
        >>> l.pprint()
        Ligature_GlyphTuple((2, 2, 3)): 11
        Ligature_GlyphTuple((2, 2)): 15
        Ligature_GlyphTuple((2, 3)): 7
        Ligature_GlyphTuple((2, 5)): 17
        """
        
        logger = kwArgs.pop('logger', logging.getLogger())
        logger = logger.getChild("ligature")
        namer = kwArgs['namer']
        terminalStrings = ('subtable end', 'lookup end')
        startingLineNumber = fws.lineNumber
        sawTerminalString = False
        keyOrder = _GlyphList()
        ligatureDict = {}
        
        for line in fws:
            if line in terminalStrings:
                sawTerminalString = True
                break
            
            if len(line) > 0:
                tokens = [x.strip() for x in line.split('\t')]
                
                if len(tokens) < 2:
                    logger.warning((
                      'V0957',
                      (fws.lineNumber, len(tokens)), 
                      "line %d -- incorrect number of tokens, expected "
                      "2 or more, found %d"))
                    
                    continue

                glyphsOK = True
                glyphIndices = [namer.glyphIndexFromString(t) for t in tokens]
                
                for i in range(len(tokens)):
                    if glyphIndices[i] is None:
                        logger.warning((
                          'V0956',
                          (fws.lineNumber, tokens[i]),
                          "line %d -- glyph '%s' not found"))
                        
                        glyphsOK = False

                if glyphsOK:
                    key = ligature_glyphtuple.Ligature_GlyphTuple(glyphIndices[1:])
                    value = glyphIndices[0]
                    
                    if key not in ligatureDict:  # there are occasionally duplicates
                        ligatureDict[key] = value
                        keyOrder.append(key)
                    
                    else:
                        keyStr = ",".join([t for t in tokens[1:]])
                        
                        logger.warning((
                          'V0963',
                          (fws.lineNumber, keyStr),
                          "line %d -- ignoring duplicated entry for '%s'"))
        
        if not sawTerminalString:
            logger.error((
              'V0958',
              (startingLineNumber, "/".join(terminalStrings)),
              'line %d -- did not find matching \'%s\''))

        keyOrder.doCanonicalKeyOrdering()
        return cls(ligatureDict, keyOrder=keyOrder)

    @classmethod
    def fromvalidatedwalker(cls, w, **kwArgs):
        """
        Creates and returns a new Ligature object from the specified walker,
        doing source validation.
        
        >>> logger = utilities.makeDoctestLogger("ligature_test")
        >>> fvb = Ligature.fromvalidatedbytes
        >>> s = _testingValues[2].binaryString()
        >>> obj = fvb(s, logger=logger)
        ligature_test.ligature - DEBUG - Walker has 46 remaining bytes.
        ligature_test.ligature.coverage - DEBUG - Walker has 36 remaining bytes.
        ligature_test.ligature.coverage - DEBUG - Format is 1, count is 2
        ligature_test.ligature.coverage - DEBUG - Raw data are [5, 11]
        
        >>> h = utilities.fromhex
        >>> fvb(s[:2], logger=logger)
        ligature_test.ligature - DEBUG - Walker has 2 remaining bytes.
        ligature_test.ligature - ERROR - Insufficient bytes.
        
        >>> fvb(h("00 02") + s[2:], logger=logger)
        ligature_test.ligature - DEBUG - Walker has 46 remaining bytes.
        ligature_test.ligature - ERROR - Expected format 1, but got format 2.
        
        >>> fvb(s[:6], logger=logger)
        ligature_test.ligature - DEBUG - Walker has 6 remaining bytes.
        ligature_test.ligature.coverage - DEBUG - Walker has 0 remaining bytes.
        ligature_test.ligature.coverage - ERROR - Insufficient bytes.
        
        >>> s = h("00 01 00 08 00 01 00 12 00 01 00 02 00 05 00 0B")
        >>> fvb(s, logger=logger)
        ligature_test.ligature - DEBUG - Walker has 16 remaining bytes.
        ligature_test.ligature.coverage - DEBUG - Walker has 8 remaining bytes.
        ligature_test.ligature.coverage - DEBUG - Format is 1, count is 2
        ligature_test.ligature.coverage - DEBUG - Raw data are [5, 11]
        ligature_test.ligature - ERROR - The LigSetCount (1) does not match the length of the Coverage (2).
        """
        
        logger = kwArgs.pop('logger', logging.getLogger())
        logger = logger.getChild("ligature")
        
        logger.debug((
          'V0001',
          (w.length(),),
          "Walker has %d remaining bytes."))
        
        if w.length() < 6:
            logger.error(('V0004', (), "Insufficient bytes."))
            return None
        
        format, covOffset, setCount = w.unpack("3H")
        
        if format != 1:
            logger.error((
              'V0002',
              (format,),
              "Expected format 1, but got format %d."))
            
            return None
        
        covTable = coverage.Coverage.fromvalidatedwalker(
          w.subWalker(covOffset),
          logger = logger,
          **kwArgs)
        
        if covTable is None:
            return None
        
        if setCount != len(covTable):
            logger.error((
              'V0430',
              (setCount, len(covTable)),
              "The LigSetCount (%d) does not match the length of "
              "the Coverage (%d)."))
            
            return None
        
        firstGlyphs = sorted(covTable)
        r = cls()
        
        if w.length() < 2 * setCount:
            logger.error((
              'V0431',
              (),
              "The LigatureSet offsets are missing or incomplete."))
            
            return None
        
        if not setCount:
            logger.warning((
              'V0432',
              (),
              "The LigSetCount is zero, so the Lookup has no effect."))
            
            return r
        
        setOffsets = w.group("H", setCount)
        
        for firstGlyph, setOffset in zip(firstGlyphs, setOffsets):
            wSet = w.subWalker(setOffset)
            setLogger = logger.getChild("first glyph %d" % (firstGlyph,))
            
            if wSet.length() < 2:
                setLogger.error((
                  'V0433',
                  (),
                  "The LigatureCount is missing or incomplete."))
                
                return None
            
            ligCount = wSet.unpack("H")
            
            if wSet.length() < 2 * ligCount:
                setLogger.error((
                  'V0434',
                  (),
                  "The Ligature offsets are missing or incomplete."))
                
                return None
            
            if not ligCount:
                setLogger.warning((
                  'V0438',
                  (),
                  "The LigatureCount is zero, so this first glyph "
                  "will not have any ligatures made."))
            
            for i, ligOffset in enumerate(wSet.group("H", ligCount)):
                wLig = wSet.subWalker(ligOffset)
                ligLogger = setLogger.getChild("array entry %d" % (i,))
                
                if wLig.length() < 4:
                    ligLogger.error((
                      'V0435',
                      (),
                      "The Ligature table header is missing or incomplete."))
                    
                    return None
                
                ligGlyph, fullCount = wLig.unpack("2H")
                
                if not fullCount:
                    ligLogger.error((
                      'V0439',
                      (),
                      "The CompCount is zero, which is invalid."))
                    
                    return None
                
                elif fullCount == 1:
                    ligLogger.warning((
                      'V0440',
                      (),
                      "The CompCount is one, which means substitution on "
                      "the first glyph alone. Use a Single Lookup instead."))
                
                if wLig.length() < 2 * (fullCount - 1):
                    ligLogger.error((
                      'V0436',
                      (),
                      "The Component array is missing or incomplete."))
                    
                    return None
                
                key = GT((firstGlyph,) + wLig.group("H", fullCount - 1))
                
                if key in r:
                    ligLogger.warning((
                      'V0437',
                      (key,),
                      "There are duplicate entries with key %s."))
                
                else:
                    r.keyOrder.append(key)
                    r[key] = ligGlyph
        
        return r
    
    @classmethod
    def fromwalker(cls, w, **kwArgs):
        """
        Creates and returns a new Ligature object from the specified walker.
        
        >>> _testingValues[0] == Ligature.frombytes(_testingValues[0].binaryString())
        True
        
        >>> _testingValues[1] == Ligature.frombytes(_testingValues[1].binaryString())
        True
        
        >>> _testingValues[2] == Ligature.frombytes(_testingValues[2].binaryString())
        True
        """
        
        format, covOffset, setCount = w.unpack("3H")
        
        if format != 1:
            raise ValueError("Unknown format for Ligature subtable: %s" % (format,))
        
        covTable = coverage.Coverage.fromwalker(w.subWalker(covOffset))
        
        if setCount != len(covTable):
            raise ValueError("Internal conflict in Ligature data!")
        
        firstGlyphs = sorted(covTable)
        r = cls()
        setOffsets = w.group("H", setCount)
        
        for firstGlyph, setOffset in zip(firstGlyphs, setOffsets):
            wSet = w.subWalker(setOffset)
            
            for ligOffset in wSet.group("H", wSet.unpack("H")):
                wLig = wSet.subWalker(ligOffset)
                ligGlyph, fullCount = wLig.unpack("2H")
                key = GT((firstGlyph,) + wLig.group("H", fullCount - 1))
                
                if key not in r:  # there are occasionally duplicates
                    r.keyOrder.append(key)
                    r[key] = ligGlyph
        
        return r
    
    def glyphsRenumbered(self, oldToNew, **kwArgs):
        """
        We put a 'shim' layer here to ensure that keyOrder is renumbered first,
        since the __iter__() method uses it to walk the actual dict.
        """
        
        r = mapmeta.M_glyphsRenumbered(self, oldToNew, **kwArgs)
        s = set(super(Ligature, r).__iter__())
        r.keyOrder[:] = [x for x in r.keyOrder if x in s]
        return r
    
    def run(glyphArray, **kwArgs):
        raise DeprecationWarning(
          "The run() method is deprecated; "
          "please use runOne() instead.")
    
    def runOne(self, glyphArray, startIndex, **kwArgs):
        """
        Do the processing for a single (initial) glyph in a glyph array. This
        method is called by the Lookup object's run() method (and possibly by
        actions within contextual or related subtables).
        
        This method returns a pair: the new output GlyphList, and a count of
        the number of glyph indices involved (or zero, if no action happened).
        
        Note that the igsFunc and useEmpties are used in this method.
        
        >>> inTuple = ligature_glyphtuple.Ligature_GlyphTuple([4, 5, 7])
        >>> obj = Ligature({inTuple: 77}, keyOrder=_GlyphList([(4, 5, 7)]))
        >>> ga = runningglyphs.GlyphList.fromiterable([3, 4, 5, 60, 7, 8])
        >>> igsFunc = lambda *a, **k: [False, False, False, True, False, False]
        >>> r, count = obj.runOne(ga, 0, igsFunc=igsFunc)
        >>> count
        0
        
        When no match is found, the same (input) glyphArray is returned:
        
        >>> r is ga
        True
        
        >>> r, count = obj.runOne(ga, 1, igsFunc=igsFunc)
        >>> count
        4
        
        >>> r.pprint()
        0:
          Value: 3
          originalOffset: 0
        1:
          Value: 77
          originalOffset: 1
        2:
          Value: -1
          originalOffset: 2
        3:
          Value: 60
          originalOffset: 3
        4:
          Value: -1
          originalOffset: 4
        5:
          Value: 8
          originalOffset: 5
        
        >>> r[1].ligInputOffsets
        (1, 2, 4)
        
        >>> r, count = obj.runOne(ga, 1, igsFunc=igsFunc, useEmpties=False)
        >>> count
        2
        
        >>> r.pprint()
        0:
          Value: 3
          originalOffset: 0
        1:
          Value: 77
          originalOffset: 1
        2:
          Value: 60
          originalOffset: 3
        3:
          Value: 8
          originalOffset: 5
        
        Note that the ligInputOffsets will refer to offsets whose associated
        glyphs are no longer present!
        
        >>> r[1].ligInputOffsets
        (1, 2, 4)
        """
        
        igs = kwArgs['igsFunc'](glyphArray, **kwArgs)
        useEmpties = kwArgs.get('useEmpties', True)
        firstGlyph = glyphArray[startIndex]
        
        # To make comparisons in the loop easier, we use the igs data to
        # extract just the non-ignorables starting with startIndex into a
        # separate list called vNonIgs.
        
        v = [
          (g, i)
          for i, g in enumerate(glyphArray[startIndex:], start=startIndex)
          if (not igs[i])]
        
        vNonIgs = [x[0] for x in v]
        vBackMap = [x[1] for x in v]
        G = runningglyphs.Glyph
        
        for key in self:  # custom order, remember...
            if firstGlyph != key[0]:
                continue
            
            if len(key) > len(vNonIgs):
                continue
            
            if not all(a == b for a, b in zip(key, vNonIgs)):
                continue
            
            # If we get here the key is a match
            
            r = glyphArray.fromiterable(glyphArray)  # preserves offsets
            it = (g.shaperClass for g in vNonIgs[:len(key)] if g.shaperClass)
            sc = '+'.join(it) or None
            lastIndex = None
            lio = []
            toDel = []
            
            for i in vBackMap[:len(key)]:
                if i == startIndex:
                    r[i] = G(
                      self[key],
                      originalOffset = firstGlyph.originalOffset,
                      shaperClass = sc)
                    
                    lio.append(firstGlyph.originalOffset)
                
                else:
                    lastIndex = i
                    lio.append(r[i].originalOffset)
                    
                    if useEmpties:
                        r[i] = G(-1, originalOffset=r[i].originalOffset)
                    else:
                        toDel.append(i)
            
            for i in reversed(toDel):
                del r[i]
            
            r[startIndex].ligInputOffsets = tuple(lio)
            assert lastIndex is not None
            count = lastIndex - startIndex + 1
            
            if not useEmpties:
                count -= (len(key) - 1)
            
            return (r, count)
        
        return (glyphArray, 0)

    def writeFontWorkerSource(self, s, **kwArgs):
        """
        Writes contents of lookup to provided stream 's'. Uses
        namer.bestNameForGlyphIndex if a namer is provided, otherwise
        uses Font Worker glyph index labeling ("# <id>").
        """
        namer = kwArgs.get('namer')
        bnfgi = namer.bestNameForGlyphIndex

        for inTuple in iter(self):
            outGlyph = self[inTuple]
            inTupleStr = "\t".join([bnfgi(g) for g in inTuple])
            s.write("%s\t%s\n" % (bnfgi(outGlyph), inTupleStr))
Ejemplo n.º 7
0
    def fromvalidatedwalker(cls, w, **kwArgs):
        """
        Creates and returns a new Format1 object from the specified walker,
        doing source validation.
        """

        logger = kwArgs.pop('logger', logging.getLogger())
        logger = logger.getChild("format1")

        logger.debug(
            ('V0001', (w.length(), ), "Walker has %d remaining bytes."))

        if w.length() < 20:
            logger.error(('V0004', (), "Insufficient bytes."))
            return None

        stBaseOffset = w.getOffset()
        t = w.unpack("5L")
        numClasses, oCT, oSA, oET, oVT = t

        if numClasses < 4:
            logger.error(
                ('V0634', (numClasses, ),
                 "The number of classes in the state table must be at least "
                 "four, but is only %d."))

            return None

        firstValid = w.getOffset() - stBaseOffset
        lastValidPlusOne = firstValid + w.length()

        if any(o < firstValid or o >= lastValidPlusOne for o in t[1:]):
            logger.error(
                ('V0635', (),
                 "One or more offsets to state table components are outside "
                 "the bounds of the state table itself."))

            return None

        wCT, wSA, wET, wVT = stutils.offsetsToSubWalkers(
            w.subWalker(0), *t[1:])

        wETCopy = wET.subWalker(0, relative=True)
        v = wETCopy.unpackRest("3H", strict=False)
        numStates = 1 + utilities.safeMax(x[0] for x in v)
        numEntries = len(v)

        nsObj = namestash.NameStash.readormake_validated(w,
                                                         (oCT, oSA, oET, oVT),
                                                         numStates,
                                                         numClasses,
                                                         logger=logger)

        if nsObj is None:
            return None

        stateNames = nsObj.allStateNames()
        classNames = nsObj.allClassNames()

        classTable = classtable.ClassTable.fromvalidatedwalker(
            wCT, classNames=classNames, logger=logger)

        if classTable is None:
            return None

        kwArgs.pop('classTable', None)

        r = cls({},
                classTable=classTable,
                **utilities.filterKWArgs(cls, kwArgs))

        # build value table
        valueDict = {}
        fvw = valuetuple.ValueTuple.fromvalidatedwalker
        index = 0

        while wVT.stillGoing():
            obj = fvw(wVT, logger=logger.getChild("value %d" % (index, )))

            if obj is None:
                return None

            valueDict[index] = obj
            index += 1

        # build entry table
        entries = []
        index = 0
        fvw = entry.Entry.fromvalidatedwalker

        while index < numEntries:
            obj = fvw(wET,
                      stateNames=stateNames,
                      valueDict=valueDict,
                      logger=logger.getChild("entry %d" % (index, )))

            if obj is None:
                return None

            entries.append(obj)
            index += 1

        # finally, build state table
        fvw = staterow.StateRow.fromvalidatedwalker

        for stateName in stateNames:
            obj = fvw(wSA,
                      classNames=classNames,
                      entries=entries,
                      logger=logger.getChild("state %s" % (stateName, )))

            if obj is None:
                return None

            r[stateName] = obj

        return r
Ejemplo n.º 8
0
    def fromvalidatedwalker(cls, w, **kwArgs):
        """
        Creates and returns a new MarkToBase object from the specified walker,
        doing source validation.
        
        >>> obj, ed = _makeTest()
        >>> s = obj.binaryString()
        >>> logger = utilities.makeDoctestLogger("marktobase_test")
        >>> fvb = MarkToBase.fromvalidatedbytes
        >>> obj = fvb(s, logger=logger)
        marktobase_test.marktobase - DEBUG - Walker has 106 remaining bytes.
        marktobase_test.marktobase.mark.coverage - DEBUG - Walker has 94 remaining bytes.
        marktobase_test.marktobase.mark.coverage - DEBUG - Format is 2, count is 1
        marktobase_test.marktobase.mark.coverage - DEBUG - Raw data are [(12, 15, 0)]
        marktobase_test.marktobase.base.coverage - DEBUG - Walker has 84 remaining bytes.
        marktobase_test.marktobase.base.coverage - DEBUG - Format is 1, count is 2
        marktobase_test.marktobase.base.coverage - DEBUG - Raw data are [40, 45]
        marktobase_test.marktobase.markarray - DEBUG - Walker has 76 remaining bytes.
        marktobase_test.marktobase.markarray.glyph 12.markrecord - DEBUG - Walker has 74 bytes remaining.
        marktobase_test.marktobase.markarray.glyph 12.markrecord.anchor_coord - DEBUG - Walker has 48 remaining bytes.
        marktobase_test.marktobase.markarray.glyph 13.markrecord - DEBUG - Walker has 70 bytes remaining.
        marktobase_test.marktobase.markarray.glyph 13.markrecord.anchor_coord - DEBUG - Walker has 42 remaining bytes.
        marktobase_test.marktobase.markarray.glyph 14.markrecord - DEBUG - Walker has 66 bytes remaining.
        marktobase_test.marktobase.markarray.glyph 14.markrecord.anchor_coord - DEBUG - Walker has 36 remaining bytes.
        marktobase_test.marktobase.markarray.glyph 15.markrecord - DEBUG - Walker has 62 bytes remaining.
        marktobase_test.marktobase.markarray.glyph 15.markrecord.anchor_coord - DEBUG - Walker has 30 remaining bytes.
        marktobase_test.marktobase.basearray - DEBUG - Walker has 58 remaining bytes.
        marktobase_test.marktobase.basearray.glyph 40.baserecord - DEBUG - Walker has 56 bytes remaining.
        marktobase_test.marktobase.basearray.glyph 40.baserecord.[0].anchor_coord - DEBUG - Walker has 24 remaining bytes.
        marktobase_test.marktobase.basearray.glyph 40.baserecord.[1].anchor_coord - DEBUG - Walker has 18 remaining bytes.
        marktobase_test.marktobase.basearray.glyph 45.baserecord - DEBUG - Walker has 52 bytes remaining.
        marktobase_test.marktobase.basearray.glyph 45.baserecord.[0].anchor_coord - DEBUG - Walker has 12 remaining bytes.
        marktobase_test.marktobase.basearray.glyph 45.baserecord.[1].anchor_coord - DEBUG - Walker has 6 remaining bytes.
        
        >>> fvb(s[:5], logger=logger)
        marktobase_test.marktobase - DEBUG - Walker has 5 remaining bytes.
        marktobase_test.marktobase - ERROR - Insufficient bytes.
        """

        logger = kwArgs.pop('logger', logging.getLogger())
        logger = logger.getChild("marktobase")

        logger.debug(
            ('V0001', (w.length(), ), "Walker has %d remaining bytes."))

        if w.length() < 12:
            logger.error(('V0004', (), "Insufficient bytes."))
            return None

        format = w.unpack("H")

        if format != 1:
            logger.error(
                ('V0002', (format, ),
                 "Was expecting format 1, but got format %d instead."))

            return None

        markCovTable = coverage.Coverage.fromvalidatedwalker(
            w.subWalker(w.unpack("H")),
            logger=logger.getChild("mark"),
            **kwArgs)

        if markCovTable is None:
            return None

        baseCovTable = coverage.Coverage.fromvalidatedwalker(
            w.subWalker(w.unpack("H")),
            logger=logger.getChild("base"),
            **kwArgs)

        if baseCovTable is None:
            return None

        classCount, mOffset, bOffset = w.unpack("3H")
        kwArgs.pop('classCount', None)
        kwArgs.pop('coverage', None)

        m = markarray.MarkArray.fromvalidatedwalker(w.subWalker(mOffset),
                                                    coverage=markCovTable,
                                                    logger=logger,
                                                    **kwArgs)

        if m is None:
            return None

        actualCount = 1 + utilities.safeMax(x.markClass for x in m.values())

        if classCount != actualCount:
            logger.error(('V0343', (
                actualCount, classCount
            ), "The number of classes should be %d (based on the actual values "
                          "in the MarkRecords), but is actually %d."))

            return None

        b = basearray.BaseArray.fromvalidatedwalker(w.subWalker(bOffset),
                                                    coverage=baseCovTable,
                                                    classCount=classCount,
                                                    logger=logger,
                                                    **kwArgs)

        if b is None:
            return None

        return cls(mark=m, base=b)
Ejemplo n.º 9
0
 def buildBinary(self, w, **kwArgs):
     """
     Adds the binary data for the PairClasses object to the specified
     LinkedWriter.
     
     >>> utilities.hexdump(_testingValues[0].binaryString())
            0 | 0002 004C 0081 0031  0058 006E 0003 0002 |...L...1.X.n....|
           10 | 0000 0000 0000 0000  0000 0000 0000 0000 |................|
           20 | 0000 0000 0000 0000  0000 0000 0000 0000 |................|
           30 | 0000 FFF6 0000 0000  0000 0084 0000 0000 |................|
           40 | 0000 001E 0084 0000  0084 0078 0001 0004 |...........x....|
           50 | 0005 0006 0007 000F  0002 0003 0005 0006 |................|
           60 | 0001 0007 0007 0002  000F 000F 0001 0002 |................|
           70 | 0001 0014 0016 0001  000C 0014 0002 BDF0 |................|
           80 | 0020 3000 000C 0012  0001 8C04           |. 0.........    |
     """
     
     if 'stakeValue' in kwArgs:
         stakeValue = kwArgs.pop('stakeValue')
         w.stakeCurrentWithValue(stakeValue)
     else:
         stakeValue = w.stakeCurrent()
     
     w.add("H", 2)  # format 2
     s = set(self.classDef1) | self.coverageExtras
     covTable = coverage.Coverage.fromglyphset(s)
     covStake = w.getNewStake()
     w.addUnresolvedOffset("H", stakeValue, covStake)
     vf1, vf2 = self.getMasks()
     w.add("HH", vf1, vf2)
     cd1Stake = w.getNewStake()
     cd2Stake = w.getNewStake()
     w.addUnresolvedOffset("H", stakeValue, cd1Stake)
     w.addUnresolvedOffset("H", stakeValue, cd2Stake)
     count1 = 1 + utilities.safeMax(self.classDef1.values())
     count2 = 1 + utilities.safeMax(self.classDef2.values())
     w.add("HH", count1, count2)
     emptyPV = pairvalues.PairValues(value.Value(), value.Value())
     devicePool = {}
     Key = pairclasses_key.Key
     
     for c1 in range(count1):
         for c2 in range(count2):
             obj = self.get(Key([c1, c2]), emptyPV)
             
             obj.buildBinary(
               w,
               devicePool = devicePool,
               posBase = stakeValue,
               valueFormatFirst = vf1,
               valueFormatSecond = vf2,
               **kwArgs)
     
     # Now add the deferred objects
     covTable.buildBinary(w, stakeValue=covStake)
     self.classDef1.buildBinary(w, stakeValue=cd1Stake)
     self.classDef2.buildBinary(w, stakeValue=cd2Stake)
     
     it = sorted(
       (sorted(obj.asImmutable()[1]), obj, stake)
       for obj, stake in devicePool.values())
     
     for t in it:
         t[1].buildBinary(w, stakeValue=t[2], **kwArgs)
Ejemplo n.º 10
0
class Reverse(dict, metaclass=mapmeta.FontDataMetaclass):
    """
    Objects containing format 1 reverse chaining contextual single substitution
    subtables (coverage-based).
    
    These are dicts mapping a single Key to a GlyphTuple. The [1] element of
    the key must have the same number of entries as the GlyphTuple, since this
    format does not use the PSLookupGroup construct at all.
    
    >>> _testingValues[0].pprint(namer=namer.testingNamer())
    (({xyz21, xyz22}, {xyz31, xyz32}), {xyz51, xyz52, xyz54, xyz57, xyz58}, ({afii60001, afii60002, xyz95}, {xyz21, xyz31, xyz41})):
      0: xyz61
      1: xyz62
      2: xyz64
      3: xyz67
      4: xyz68
    """

    #
    # Class definition variables
    #

    mapSpec = dict(
        item_followsprotocol=True,
        item_pprintlabelpresortfunc=(lambda obj: obj[1]),
        item_renumberdeepkeys=True,
        item_usenamerforstr=True,
        map_maxcontextfunc=(lambda d: utilities.safeMax(len(k[0]) for k in d) +
                            1 + utilities.safeMax(len(k[2]) for k in d)),
        map_validatefunc_partial=_validate)

    kind = ('GSUB', 8)
    kindString = "Reverse chaining table"

    #
    # Methods
    #

    def buildBinary(self, w, **kwArgs):
        """
        Adds the binary data for the Reverse object to the specified
        LinkedWriter.
        
        >>> utilities.hexdump(_testingValues[0].binaryString())
               0 | 0001 001C 0002 003C  002A 0002 0044 0032 |.......<.*...D.2|
              10 | 0005 003C 003D 003F  0042 0043 0001 0005 |...<.=.?.B.C....|
              20 | 0032 0033 0035 0038  0039 0001 0002 0014 |.2.3.5.8.9......|
              30 | 0015 0001 0003 0014  001E 0028 0001 0002 |...........(....|
              40 | 001E 001F 0001 0003  005E 0060 0061      |.........^.`.a  |
        """

        if 'stakeValue' in kwArgs:
            stakeValue = kwArgs.pop('stakeValue')
            w.stakeCurrentWithValue(stakeValue)
        else:
            stakeValue = w.stakeCurrent()

        w.add("H", 1)  # format
        pool = {}
        covStake = w.getNewStake()
        w.addUnresolvedOffset("H", stakeValue, covStake)

        for key in self:
            for it in (reversed(key[0]), iter(key[2])):
                v = list(it)
                w.add("H", len(v))

                for c in v:
                    immut = tuple(sorted(c))

                    if immut not in pool:
                        pool[immut] = (c, w.getNewStake())

                    w.addUnresolvedOffset("H", stakeValue, pool[immut][1])

        for value in self.values():
            w.add("H", len(value))
            w.addGroup("H", value)

        key[1].buildBinary(w, stakeValue=covStake)

        for immut, (obj, stake) in sorted(pool.items()):
            obj.buildBinary(w, stakeValue=stake)

    def effectsSummary(self, **kwArgs):
        """
        Returns an EffectsSummary object. If present, notes will be made in a
        provided memo kwArgs to allow elision of reprocessing, which should
        eliminate the combinatoric explosion.
        
        >>> obj = _testingValues[0]
        >>> memo = {}
        >>> es = obj.effectsSummary(memo=memo)
        >>> es.pprint()
        50:
          60
        51:
          61
        53:
          63
        56:
          66
        57:
          67
        """

        memo = kwArgs.pop('memo', {})

        if id(self) in memo:
            return memo[id(self)]

        r = EffectsSummary()

        for key, tOut in self.items():
            for gIn, gOut in zip(sorted(key[1]), tOut):
                r[gIn].add(gOut)

        memo[id(self)] = r
        return r

    @classmethod
    def fromValidatedFontWorkerSource(cls, fws, **kwArgs):
        """
        Creates and returns a new Reverse from the specified FontWorkerSource,
        doing source validation.

        >>> logger = utilities.makeDoctestLogger("FW_test")
        >>> _test_FW_fws.goto(1) # go back to start of file
        >>> obj = Reverse.fromValidatedFontWorkerSource(
        ...   _test_FW_fws,
        ...   logger=logger,
        ...   namer = _test_FW_namer)
        >>> obj.pprint()
        Key((CoverageTuple((CoverageSet(frozenset({2, 3})),)), CoverageSet(frozenset({8})), CoverageTuple((CoverageSet(frozenset({5})),)))):
          0: 9

        >>> _test_FW_fws2.goto(1) # go back to start of file
        >>> obj = Reverse.fromValidatedFontWorkerSource(
        ...  _test_FW_fws2,
        ...  logger=logger,
        ...  namer = _test_FW_namer)
        FW_test.reverse - WARNING - line 3 -- glyph 'foo' not found
        FW_test.reverse - WARNING - line 3 -- glyph 'bar' not found
        FW_test.reverse - ERROR - Must define at least one backtrackcoverage or lookaheadcoverage for reversechaining lookup type.
        >>> obj is None
        True
        """
        logger = kwArgs.pop('logger', logging.getLogger())
        logger = logger.getChild("reverse")

        namer = kwArgs['namer']
        terminalStrings = ('subtable end', 'lookup end')
        startingLineNumber = fws.lineNumber

        CoverageTuple = pschaincoverage_coveragetuple.CoverageTuple
        Key = reverse_key.Key
        fFWS = coverageset.CoverageSet.fromValidatedFontWorkerSource

        backtrackcvgsets = []
        singlesubin = []
        singlesubout = []
        lookaheadcvgsets = []

        r = cls()

        for line in fws:
            if line.lower() in terminalStrings:
                if len(backtrackcvgsets) == 0 and len(lookaheadcvgsets) == 0:
                    logger.error(('Vxxxx', (),
                                  "Must define at least one backtrackcoverage "
                                  "or lookaheadcoverage for reversechaining "
                                  "lookup type."))

                    return None

                if len(singlesubin) == 0:
                    logger.error(
                        ('Vxxxx', (), "Single substitution is empty."))

                    return None

                # sort output to match order of input
                ssinsorted, ssoutsorted = list(
                    zip(*sorted(zip(singlesubin, singlesubout))))

                ss = coverageset.CoverageSet(ssinsorted)
                k = Key(CoverageTuple(backtrackcvgsets), ss,
                        CoverageTuple(lookaheadcvgsets))
                v = glyphtuple.GlyphTuple(ssoutsorted)
                r[k] = v

                return r

            if len(line) > 0:
                tokens = [x.strip() for x in line.split('\t')]

                if tokens[0].lower() == 'backtrackcoverage definition begin':
                    # key[0]
                    backtrackcvgsets.append(fFWS(fws, **kwArgs))

                elif tokens[0].lower() == 'lookaheadcoverage definition begin':
                    # key[2]
                    lookaheadcvgsets.append(fFWS(fws, **kwArgs))

                elif len(tokens) == 2:
                    glyphsOK = True
                    glyphIndices = [
                        namer.glyphIndexFromString(t) for t in tokens
                    ]

                    for i in range(2):
                        if glyphIndices[i] is None:
                            logger.warning(
                                ('V0956', (fws.lineNumber, tokens[i]),
                                 "line %d -- glyph '%s' not found"))
                            glyphsOK = False

                    if glyphsOK:
                        singlesubin.append(glyphIndices[0])
                        singlesubout.append(glyphIndices[1])

        logger.error(('V0958', (startingLineNumber, "/".join(terminalStrings)),
                      'line %d -- did not find matching \'%s\''))

        return r

    @classmethod
    def fromvalidatedwalker(cls, w, **kwArgs):
        """
        Creates and returns a new Reverse object from the specified walker,
        doing source validation.
        
        >>> logger = utilities.makeDoctestLogger("reverse_test")
        >>> fvb = Reverse.fromvalidatedbytes
        >>> s = _testingValues[0].binaryString()
        >>> obj = fvb(s, logger=logger)
        reverse_test.reverse - DEBUG - Walker has 78 bytes remaining.
        reverse_test.reverse.input.coverageset - DEBUG - Walker has 50 remaining bytes.
        reverse_test.reverse.input.coverageset - DEBUG - Format is 1, count is 5
        reverse_test.reverse.input.coverageset - DEBUG - Raw data are [50, 51, 53, 56, 57]
        reverse_test.reverse.backtrack index 0.coverageset - DEBUG - Walker has 18 remaining bytes.
        reverse_test.reverse.backtrack index 0.coverageset - DEBUG - Format is 1, count is 2
        reverse_test.reverse.backtrack index 0.coverageset - DEBUG - Raw data are [30, 31]
        reverse_test.reverse.backtrack index 1.coverageset - DEBUG - Walker has 36 remaining bytes.
        reverse_test.reverse.backtrack index 1.coverageset - DEBUG - Format is 1, count is 2
        reverse_test.reverse.backtrack index 1.coverageset - DEBUG - Raw data are [20, 21]
        reverse_test.reverse.lookahead index 0.coverageset - DEBUG - Walker has 10 remaining bytes.
        reverse_test.reverse.lookahead index 0.coverageset - DEBUG - Format is 1, count is 3
        reverse_test.reverse.lookahead index 0.coverageset - DEBUG - Raw data are [94, 96, 97]
        reverse_test.reverse.lookahead index 1.coverageset - DEBUG - Walker has 28 remaining bytes.
        reverse_test.reverse.lookahead index 1.coverageset - DEBUG - Format is 1, count is 3
        reverse_test.reverse.lookahead index 1.coverageset - DEBUG - Raw data are [20, 30, 40]

        >>> fvb(s[:5], logger=logger)
        reverse_test.reverse - DEBUG - Walker has 5 bytes remaining.
        reverse_test.reverse - ERROR - Insufficient bytes.
        """

        logger = kwArgs.pop('logger', logging.getLogger())
        logger = logger.getChild("reverse")

        logger.debug(
            ('V0001', (w.length(), ), "Walker has %d bytes remaining."))

        if w.length() < 6:
            logger.error(('V0004', (), "Insufficient bytes."))
            return None

        format, inOffset, backCount = w.unpack("3H")

        if format != 1:
            logger.error(
                ('V0002', (format, ), "Expected format 1, but got format %d."))

            return None

        fvw = coverageset.CoverageSet.fromvalidatedwalker

        kIn = fvw(w.subWalker(inOffset),
                  logger=logger.getChild("input"),
                  **kwArgs)

        if kIn is None:
            return None

        if w.length() < 2 * backCount:
            logger.error(('V0446', (),
                          "The Backtrack offsets are missing or incomplete."))

            return None

        backOffsets = w.group("H", backCount)
        v = [None] * backCount

        for i, offset in enumerate(backOffsets):
            obj = fvw(w.subWalker(offset),
                      logger=logger.getChild("backtrack index %d" % (i, )),
                      **kwArgs)

            if obj is None:
                return None

            v[i] = obj

        kBack = CoverageTuple(reversed(v))

        if w.length() < 2:
            logger.error(
                ('V0447', (), "The Lookahead count is missing or incomplete."))

            return None

        lookCount = w.unpack("H")

        if w.length() < 2 * lookCount:
            logger.error(('V0448', (),
                          "The Lookahead offsets are missing or incomplete."))

            return None

        lookOffsets = w.group("H", lookCount)
        v = [None] * lookCount

        for i, offset in enumerate(lookOffsets):
            obj = fvw(w.subWalker(offset),
                      logger=logger.getChild("lookahead index %d" % (i, )),
                      **kwArgs)

            if obj is None:
                return None

            v[i] = obj

        kLook = CoverageTuple(v)
        key = Key([kBack, kIn, kLook])

        if w.length() < 2:
            logger.error(('V0449', (),
                          "The Substitute count is missing or incomplete."))

            return None

        substCount = w.unpack("H")

        if w.length() < 2 * substCount:
            logger.error(('V0450', (),
                          "The Substitute array is missing or incomplete."))

            return None

        if substCount != len(kIn):
            logger.error(
                ('V0451', (),
                 "The Substitute count does not match the length of the "
                 "input Coverage."))

            return None

        group = glyphtuple.GlyphTuple(w.group("H", substCount))
        return cls({key: group})

    @classmethod
    def fromwalker(cls, w, **kwArgs):
        """
        Creates and returns a new Reverse object from the specified walker.
        
        >>> obj = _testingValues[0]
        >>> obj == Reverse.frombytes(obj.binaryString())
        True
        """

        format = w.unpack("H")
        assert format == 1
        f = coverageset.CoverageSet.fromwalker
        kIn = f(w.subWalker(w.unpack("H")))
        backOffsets = w.group("H", w.unpack("H"))

        kBack = CoverageTuple(
            reversed([f(w.subWalker(offset)) for offset in backOffsets]))

        lookOffsets = w.group("H", w.unpack("H"))
        kLook = CoverageTuple(f(w.subWalker(offset)) for offset in lookOffsets)
        key = Key([kBack, kIn, kLook])
        group = glyphtuple.GlyphTuple(w.group("H", w.unpack("H")))
        return cls({key: group})

    def run(glyphArray, **kwArgs):
        raise DeprecationWarning("The run() method is deprecated; "
                                 "please use runOne() instead.")

    def runOne(self, glyphArray, startIndex, **kwArgs):
        """
        Do the processing for a single (initial) glyph in a glyph array. This
        method is called by the Lookup object's run() method (and possibly by
        actions within contextual or related subtables).
        
        This method returns a pair: the new output GlyphList, and a count of
        the number of glyph indices involved (or zero, if no action happened).
        
        Note that igs is used in this method.
        
        >>> obj = _testingValues[0]
        >>> ga = runningglyphs.GlyphList.fromiterable([20, 77, 30, 57, 94, 77, 20])
        >>> igsFunc = lambda *a, **k: [False, True, False, False, False, True, False]
        >>> r, count = obj.runOne(ga, 0, igsFunc=igsFunc)
        >>> count
        0
        >>> r is ga
        True
        
        >>> r, count = obj.runOne(ga, 3, igsFunc=igsFunc)
        >>> count
        1
        >>> r.pprint()
        0:
          Value: 20
          originalOffset: 0
        1:
          Value: 77
          originalOffset: 1
        2:
          Value: 30
          originalOffset: 2
        3:
          Value: 67
          originalOffset: 3
        4:
          Value: 94
          originalOffset: 4
        5:
          Value: 77
          originalOffset: 5
        6:
          Value: 20
          originalOffset: 6
        """

        igsFunc = kwArgs['igsFunc']
        igs = igsFunc(glyphArray, **kwArgs)
        firstGlyph = glyphArray[startIndex]

        # Find all non-ignorables (not just starting with startIndex, since we
        # potentially need backtrack here too...)

        v = [(g, i) for i, g in enumerate(glyphArray) if (not igs[i])]

        vNonIgs = [x[0] for x in v]
        vBackMap = [x[1] for x in v]
        startIndexNI = vBackMap.index(startIndex)

        for key in self:
            if firstGlyph not in key[1]:
                continue

            backLen, inLen, lookLen = len(key[0]), 1, len(key[2])
            totalLen = backLen + inLen + lookLen

            if backLen > startIndexNI:
                continue

            if (inLen + lookLen) > (len(vNonIgs) - startIndexNI):
                continue

            pieceStart = startIndexNI - backLen
            piece = vNonIgs[pieceStart:pieceStart + totalLen]

            if not all(a in b
                       for a, b in zip(piece, key[0] + (key[1], ) + key[2])):
                continue

            # If we get here the key is a match

            r = glyphArray.fromiterable(glyphArray)  # preserves offsets
            it = list(zip(sorted(key[1]), self[key]))
            d = {gIn: gOut for gIn, gOut in it}

            r[startIndex] = runningglyphs.Glyph(
                d[firstGlyph],
                originalOffset=glyphArray[startIndex].originalOffset)

            return (r, 1)

        return (glyphArray, 0)

    def writeFontWorkerSource(self, s, **kwArgs):
        """
        Writes contents of lookup to provided stream 's'. Uses
        namer.bestNameForGlyphIndex if a namer is provided, otherwise
        uses Font Worker glyph index labeling ("# <id>").
        """

        namer = kwArgs.get('namer')
        bnfgi = namer.bestNameForGlyphIndex

        # backtrackcoverage
        k = list(self.keys())[0]

        if k[0]:
            for btc in k[0]:
                s.write("\nbacktrackcoverage definition begin\n")

                for g in sorted(btc):
                    s.write("%s\n" % (bnfgi(g), ))

                s.write("coverage definition end\n")

        # lookaheadcoverage
        if k[2]:
            for lac in k[2]:
                s.write("\nlookaheadcoverage definition begin\n")

                for g in sorted(lac):
                    s.write("%s\n" % (bnfgi(g), ))

                s.write("coverage definition end\n")

        # single substitution lookup (input coverage -> output coverage)
        in_cvg = k[1]
        out_cvg = self[k]
        s.write("\n")

        for i, inGlyph in enumerate(sorted(in_cvg)):
            outGlyph = out_cvg[i]
            s.write("%s\t%s\n" % (bnfgi(inGlyph), bnfgi(outGlyph)))

        s.write("")
Ejemplo n.º 11
0
    def buildBinary(self, w, **kwArgs):
        """
        Adds the binary data for the ContextClass to the specified
        LinkedWriter.
        
        NOTE! There will be unresolved lookup list indices in the LinkedWriter
        after this method is finished. The caller (or somewhere higher up) is
        responsible for adding an index map to the LinkedWriter with the tag
        "lookupList" before the LinkedWriter's binaryString() method is called.
        
        >>> w = writer.LinkedWriter()
        >>> obj = _testingValues[1]
        >>> obj.buildBinary(w, forGPOS=False)
        >>> d = {obj[k][0].lookup.asImmutable(): 22 for k in obj}
        >>> w.addIndexMap("lookupList_GSUB", d)
        >>> utilities.hexdump(w.binaryString())
               0 | 0002 0014 001C 0026  003C 0004 0000 0044 |.......&.<.....D|
              10 | 0000 0000 0001 0002  0014 0015 0001 000A |................|
              20 | 0002 0001 0001 0002  0003 0014 0015 0001 |................|
              30 | 0016 0016 0002 0028  0029 0003 0001 001E |.......(.)......|
              40 | 0001 0001 0001 0004  0001 0001 0002 0002 |................|
              50 | 0001 0001 0001 0000  0016                |..........      |
        """

        if 'stakeValue' in kwArgs:
            stakeValue = kwArgs.pop('stakeValue')
            w.stakeCurrentWithValue(stakeValue)
        else:
            stakeValue = w.stakeCurrent()

        w.add("H", 2)  # format

        inputClassToGlyphs = utilities.invertDictFull(self.classDefInput,
                                                      asSets=True)

        firstInputClasses = set(k[1][0] for k in self)
        firstInputClassesSorted = sorted(firstInputClasses)

        firstInputGlyphs = functools.reduce(set.union,
                                            (inputClassToGlyphs[c]
                                             for c in firstInputClasses))

        covTable = coverage.Coverage.fromglyphset(firstInputGlyphs)
        covStake = w.getNewStake()
        w.addUnresolvedOffset("H", stakeValue, covStake)

        v = [
            self.classDefBacktrack, self.classDefInput, self.classDefLookahead
        ]

        vImm = [(obj.asImmutable()[1] if obj else None) for obj in v]
        cdPool = {}
        objStakes = {}

        for cd, cdImm in zip(v, vImm):
            if len(cd):
                obj = cdPool.setdefault(cdImm, cd)

                objStake = objStakes.setdefault(obj.asImmutable(),
                                                w.getNewStake())

                w.addUnresolvedOffset("H", stakeValue, objStake)

            else:
                w.add("H", 0)

        count = utilities.safeMax(self.classDefInput.values(), -1) + 1
        w.add("H", count)

        setStakes = dict((firstInputClass, w.getNewStake())
                         for firstInputClass in firstInputClassesSorted)

        for firstInputClass in range(count):
            if firstInputClass in firstInputClasses:
                w.addUnresolvedOffset("H", stakeValue,
                                      setStakes[firstInputClass])

            else:
                w.add("H", 0)

        covTable.buildBinary(w, stakeValue=covStake)

        for cdImm in sorted(cdPool):  # sort to guarantee repeatable ordering
            obj = cdPool[cdImm]
            obj.buildBinary(w, stakeValue=objStakes[obj.asImmutable()])

        orderings = {}
        ruleStakes = {}

        for firstInputClass in firstInputClassesSorted:
            setStake = setStakes[firstInputClass]
            w.stakeCurrentWithValue(setStake)

            o = orderings[firstInputClass] = sorted(
                (k.ruleOrder, k[1], k) for k in self
                if k[1][0] == firstInputClass)

            w.add("H", len(o))

            for order, ignore, key in o:
                ruleStake = w.getNewStake()
                ruleStakes[(firstInputClass, order)] = ruleStake
                w.addUnresolvedOffset("H", setStake, ruleStake)

        for firstInputClass in firstInputClassesSorted:
            for order, ignore, key in orderings[firstInputClass]:
                w.stakeCurrentWithValue(ruleStakes[(firstInputClass, order)])
                obj = self[key]
                w.add("H", len(key[0]))
                w.addGroup("H", reversed(key[0]))
                w.add("H", len(key[1]))
                w.addGroup("H", key[1][1:])
                w.add("H", len(key[2]))
                w.addGroup("H", key[2])
                w.add("H", len(obj))
                obj.buildBinary(w, **kwArgs)
Ejemplo n.º 12
0
class PSChainClass(dict, metaclass=mapmeta.FontDataMetaclass):
    """
    Objects containing format 2 chaining contextual lookups. Note that these
    work for both GPOS and GSUB tables.
    
    These are dicts mapping Keys to PSLookupGroups.
    
    >>> _testingValues[1].pprint(namer=namer.testingNamer())
    Key((ClassTuple((1,)), ClassTuple((1, 2)), ClassTuple((1,))), ruleOrder=0):
      Effect #1:
        Sequence index: 0
        Lookup:
          Subtable 0 (Single substitution table):
            xyz21: xyz41
            xyz22: xyz42
          Lookup flags:
            Right-to-left for Cursive: False
            Ignore base glyphs: False
            Ignore ligatures: False
            Ignore marks: False
          Sequence order (lower happens first): 22
    Class definition table (backtrack):
      xyz11: 1
      xyz12: 1
    Class definition table (input):
      xyz21: 1
      xyz22: 1
      xyz23: 2
      xyz41: 3
      xyz42: 3
    Class definition table (lookahead):
      xyz31: 1
    """

    #
    # Class definition variables
    #

    mapSpec = dict(
        item_followsprotocol=True,
        item_pprintlabelpresortfunc=(lambda obj: (obj[1][0], obj.ruleOrder)),
        map_compactiblefunc=(lambda d, k, **kw: False),
        #map_compactiblefunc = _canRemove,
        map_maxcontextfunc=(lambda d: utilities.safeMax(len(k) for k in d)),
        map_validatefunc_partial=_validate)

    attrSpec = dict(
        classDefBacktrack=dict(
            attr_followsprotocol=True,
            attr_ignoreforbool=True,
            attr_initfunc=classdef.ClassDef,
            attr_label="Class definition table (backtrack)"),
        classDefInput=dict(attr_followsprotocol=True,
                           attr_ignoreforbool=True,
                           attr_initfunc=classdef.ClassDef,
                           attr_label="Class definition table (input)"),
        classDefLookahead=dict(
            attr_followsprotocol=True,
            attr_ignoreforbool=True,
            attr_initfunc=classdef.ClassDef,
            attr_label="Class definition table (lookahead)"),
        coverageExtras=dict(attr_followsprotocol=True,
                            attr_ignoreforbool=True,
                            attr_initfunc=glyphset.GlyphSet,
                            attr_label="Coverage glyphs not in ClassDef",
                            attr_showonlyiftrue=True))

    #
    # Methods
    #

    def __iter__(self):
        """
        We provide a custom iterator to make sure the ruleOrder is correctly
        being followed.
        
        >>> for k in _testingValues[1]: print(k)
        ((1,), (1, 2), (1,)), Relative order = 0
        """

        v = list(super(PSChainClass, self).__iter__())
        return iter(sorted(v, key=_keySort))

    def buildBinary(self, w, **kwArgs):
        """
        Adds the binary data for the ContextClass to the specified
        LinkedWriter.
        
        NOTE! There will be unresolved lookup list indices in the LinkedWriter
        after this method is finished. The caller (or somewhere higher up) is
        responsible for adding an index map to the LinkedWriter with the tag
        "lookupList" before the LinkedWriter's binaryString() method is called.
        
        >>> w = writer.LinkedWriter()
        >>> obj = _testingValues[1]
        >>> obj.buildBinary(w, forGPOS=False)
        >>> d = {obj[k][0].lookup.asImmutable(): 22 for k in obj}
        >>> w.addIndexMap("lookupList_GSUB", d)
        >>> utilities.hexdump(w.binaryString())
               0 | 0002 0014 001C 0026  003C 0004 0000 0044 |.......&.<.....D|
              10 | 0000 0000 0001 0002  0014 0015 0001 000A |................|
              20 | 0002 0001 0001 0002  0003 0014 0015 0001 |................|
              30 | 0016 0016 0002 0028  0029 0003 0001 001E |.......(.)......|
              40 | 0001 0001 0001 0004  0001 0001 0002 0002 |................|
              50 | 0001 0001 0001 0000  0016                |..........      |
        """

        if 'stakeValue' in kwArgs:
            stakeValue = kwArgs.pop('stakeValue')
            w.stakeCurrentWithValue(stakeValue)
        else:
            stakeValue = w.stakeCurrent()

        w.add("H", 2)  # format

        inputClassToGlyphs = utilities.invertDictFull(self.classDefInput,
                                                      asSets=True)

        firstInputClasses = set(k[1][0] for k in self)
        firstInputClassesSorted = sorted(firstInputClasses)

        firstInputGlyphs = functools.reduce(set.union,
                                            (inputClassToGlyphs[c]
                                             for c in firstInputClasses))

        covTable = coverage.Coverage.fromglyphset(firstInputGlyphs)
        covStake = w.getNewStake()
        w.addUnresolvedOffset("H", stakeValue, covStake)

        v = [
            self.classDefBacktrack, self.classDefInput, self.classDefLookahead
        ]

        vImm = [(obj.asImmutable()[1] if obj else None) for obj in v]
        cdPool = {}
        objStakes = {}

        for cd, cdImm in zip(v, vImm):
            if len(cd):
                obj = cdPool.setdefault(cdImm, cd)

                objStake = objStakes.setdefault(obj.asImmutable(),
                                                w.getNewStake())

                w.addUnresolvedOffset("H", stakeValue, objStake)

            else:
                w.add("H", 0)

        count = utilities.safeMax(self.classDefInput.values(), -1) + 1
        w.add("H", count)

        setStakes = dict((firstInputClass, w.getNewStake())
                         for firstInputClass in firstInputClassesSorted)

        for firstInputClass in range(count):
            if firstInputClass in firstInputClasses:
                w.addUnresolvedOffset("H", stakeValue,
                                      setStakes[firstInputClass])

            else:
                w.add("H", 0)

        covTable.buildBinary(w, stakeValue=covStake)

        for cdImm in sorted(cdPool):  # sort to guarantee repeatable ordering
            obj = cdPool[cdImm]
            obj.buildBinary(w, stakeValue=objStakes[obj.asImmutable()])

        orderings = {}
        ruleStakes = {}

        for firstInputClass in firstInputClassesSorted:
            setStake = setStakes[firstInputClass]
            w.stakeCurrentWithValue(setStake)

            o = orderings[firstInputClass] = sorted(
                (k.ruleOrder, k[1], k) for k in self
                if k[1][0] == firstInputClass)

            w.add("H", len(o))

            for order, ignore, key in o:
                ruleStake = w.getNewStake()
                ruleStakes[(firstInputClass, order)] = ruleStake
                w.addUnresolvedOffset("H", setStake, ruleStake)

        for firstInputClass in firstInputClassesSorted:
            for order, ignore, key in orderings[firstInputClass]:
                w.stakeCurrentWithValue(ruleStakes[(firstInputClass, order)])
                obj = self[key]
                w.add("H", len(key[0]))
                w.addGroup("H", reversed(key[0]))
                w.add("H", len(key[1]))
                w.addGroup("H", key[1][1:])
                w.add("H", len(key[2]))
                w.addGroup("H", key[2])
                w.add("H", len(obj))
                obj.buildBinary(w, **kwArgs)

    @classmethod
    def fromValidatedFontWorkerSource(cls, fws, **kwArgs):
        """
        Creates and returns a new PSChainClass from the specified
        FontWorkerSource.

        >>> logger = utilities.makeDoctestLogger("FW_test")
        >>> obj = PSChainClass.fromValidatedFontWorkerSource(
        ...   _test_FW_fws2,
        ...   namer = _test_FW_namer,
        ...   forGPOS = True,
        ...   lookupDict = _test_FW_lookupDict,
        ...   logger = logger,
        ...   editor = {})
        FW_test.pschainclass - WARNING - line 20 -- unexpected token: foo
        FW_test.pschainclass - WARNING - line 22 -- invalid backtrack class: 4
        FW_test.pschainclass - WARNING - line 22 -- invalid input class: 7
        FW_test.pschainclass - WARNING - line 22 -- invalid lookahead class: 6
        FW_test.pschainclass - WARNING - line 0 -- did not find matching 'subtable end/lookup end'
        >>> obj.pprint()
        Key((ClassTuple((2, 1, 0)), ClassTuple((0, 5)), ClassTuple((7, 8, 9, 0))), ruleOrder=0):
          Effect #1:
            Sequence index: 0
            Lookup:
              3:
                FUnit adjustment to horizontal advance: 678
          Effect #2:
            Sequence index: 0
            Lookup:
              3:
                FUnit adjustment to horizontal advance: 901
        Class definition table (backtrack):
          1: 1
          2: 2
          3: 3
        Class definition table (input):
          1: 4
          2: 5
          3: 6
        Class definition table (lookahead):
          1: 7
          2: 8
          3: 9
        """

        logger = kwArgs.pop('logger', logging.getLogger())
        logger = logger.getChild("pschainclass")
        terminalStrings = ('subtable end', 'lookup end')
        startingLineNumber = fws.lineNumber

        # place-holders
        classDefBacktrack = classdef.ClassDef()
        classDefInput = classdef.ClassDef()
        classDefLookahead = classdef.ClassDef()

        ruleOrders = {}
        lookupGroups = {}
        stringKeys = {}

        for line in fws:
            if line.lower() in terminalStrings:
                r = cls(lookupGroups,
                        classDefBacktrack=classDefBacktrack,
                        classDefInput=classDefInput,
                        classDefLookahead=classDefLookahead)

                return r

            if len(line) > 0:
                tokens = [x.strip() for x in line.split('\t')]
                fVFWS = classdef.ClassDef.fromValidatedFontWorkerSource

                if tokens[0].lower() == 'backtrackclass definition begin':
                    classDefBacktrack = fVFWS(fws, logger=logger, **kwArgs)
                    cdBackSet = set(classDefBacktrack.values())

                elif tokens[0].lower() == 'class definition begin':
                    classDefInput = fVFWS(fws, logger=logger, **kwArgs)
                    cdInputSet = set(classDefInput.values())

                elif tokens[0].lower() == 'lookaheadclass definition begin':
                    classDefLookahead = fVFWS(fws,
                                              logger=logger,
                                              dbg=True,
                                              **kwArgs)
                    cdLookSet = set(classDefLookahead.values())

                elif tokens[0].lower() == 'class-chain':
                    CT = pschainclass_classtuple.ClassTuple
                    classTuple1 = CT()
                    classTuple2 = CT()
                    classTuple3 = CT()
                    classesOK = True

                    if tokens[1] != '':
                        try:
                            classList1 = [int(x) for x in tokens[1].split(',')]
                            classList1.reverse(
                            )  # backtrack goes in reverse order
                        except:
                            logger.warning((
                                'Vxxxx', (fws.lineNumber, tokens[1]),
                                'line %d -- invalid backtrack definition: %s'))

                            classesOK = False

                        for classNum in classList1:
                            if classNum == 0:
                                continue

                            if not classNum in cdBackSet:
                                logger.warning(
                                    ('V0962', (fws.lineNumber, classNum),
                                     'line %d -- invalid backtrack class: %d'))

                                classesOK = False

                        classTuple1 = CT(classList1)

                    if tokens[2] != '':
                        try:
                            classList2 = [int(x) for x in tokens[2].split(',')]
                        except ValueError:
                            logger.warning(
                                ('Vxxxx', (fws.lineNumber, tokens[2]),
                                 'line %d -- invalid input definition: %s'))

                            classesOK = False

                        for classNum in classList2:
                            if classNum == 0:
                                continue

                            if not classNum in cdInputSet:
                                logger.warning(
                                    ('V0962', (fws.lineNumber, classNum),
                                     'line %d -- invalid input class: %d'))

                                classesOK = False

                        classTuple2 = CT(classList2)

                    if tokens[3] != '':
                        try:
                            classList3 = [int(x) for x in tokens[3].split(',')]
                        except ValueError:
                            logger.warning((
                                'Vxxxx', (fws.lineNumber, tokens[3]),
                                'line %d -- invalid lookahead definition: %s'))

                            classesOK = False

                        for classNum in classList3:
                            if classNum == 0:
                                continue

                            if not classNum in cdLookSet:
                                logger.warning(
                                    ('V0962', (fws.lineNumber, classNum),
                                     'line %d -- invalid lookahead class: %d'))

                                classesOK = False

                        classTuple3 = CT(classList3)

                    if not classesOK:
                        continue

                    lookupList = []

                    for effect in tokens[4:]:
                        effectTokens = [x.strip() for x in effect.split(',')]
                        sequenceIndex = int(effectTokens[0]) - 1
                        lookupName = effectTokens[1]

                        lookupList.append(
                            pslookuprecord.PSLookupRecord(
                                sequenceIndex,
                                lookup.Lookup.fromValidatedFontWorkerSource(
                                    fws, lookupName, logger=logger, **kwArgs)))

                    stringKey = "(%s), (%s), (%s)" % (",".join([
                        str(ci) for ci in classTuple1[::-1]
                    ]), ",".join([str(ci) for ci in classTuple2]), ",".join(
                        [str(ci) for ci in classTuple3]))

                    if stringKey in stringKeys:
                        logger.warning(('Vxxxx', (
                            fws.lineNumber, stringKey, stringKeys[stringKey]
                        ), "line %d -- context '%s' previously defined at line %d"
                                        ))

                    else:
                        stringKeys[stringKey] = fws.lineNumber

                        key = pschainclass_key.Key(
                            [classTuple1, classTuple2, classTuple3])

                        ruleOrder = ruleOrders.get(classTuple2[0], 0)
                        key.ruleOrder = ruleOrder
                        ruleOrders[classTuple2[0]] = ruleOrder + 1
                        lookupGroup = pslookupgroup.PSLookupGroup(lookupList)
                        lookupGroups[key] = lookupGroup

                else:
                    logger.warning(('V0960', (fws.lineNumber, tokens[0]),
                                    'line %d -- unexpected token: %s'))

        logger.warning(
            ('V0958', (startingLineNumber, "/".join(terminalStrings)),
             "line %d -- did not find matching '%s'"))

        r = cls(lookupGroups,
                classDefBacktrack=classDefBacktrack,
                classDefInput=classDefInput,
                classDefLookahead=classDefLookahead)

        return r

    @classmethod
    def fromvalidatedwalker(cls, w, **kwArgs):
        """
        Creates and returns a new PSChainClass object from the specified
        walker, doing source validation.
        
        >>> logger = utilities.makeDoctestLogger("pschainclass_test")
        >>> w = writer.LinkedWriter()
        >>> obj = _testingValues[1]
        >>> obj.buildBinary(w, forGPOS=False)
        >>> d = {obj[k][0].lookup.asImmutable(): 22 for k in obj}
        >>> w.addIndexMap("lookupList_GSUB", d)
        >>> s = w.binaryString()
        >>> FL = []
        >>> fvb = PSChainClass.fromvalidatedbytes
        >>> obj2 = fvb(s, fixupList=FL, logger=logger)
        pschainclass_test.pschainclass - DEBUG - Walker has 90 remaining bytes.
        pschainclass_test.pschainclass - DEBUG - Format is 2
        pschainclass_test.pschainclass.coverage - DEBUG - Walker has 70 remaining bytes.
        pschainclass_test.pschainclass.coverage - DEBUG - Format is 1, count is 2
        pschainclass_test.pschainclass.coverage - DEBUG - Raw data are [20, 21]
        pschainclass_test.pschainclass - DEBUG - Backtrack offset is 28
        pschainclass_test.pschainclass.classDef - DEBUG - Walker has 62 remaining bytes.
        pschainclass_test.pschainclass.classDef - DEBUG - ClassDef is format 1.
        pschainclass_test.pschainclass.classDef - DEBUG - First is 10, and count is 2
        pschainclass_test.pschainclass.classDef - DEBUG - Raw data are (1, 1)
        pschainclass_test.pschainclass - DEBUG - Input offset is 38
        pschainclass_test.pschainclass.classDef - DEBUG - Walker has 52 remaining bytes.
        pschainclass_test.pschainclass.classDef - DEBUG - ClassDef is format 2.
        pschainclass_test.pschainclass.classDef - DEBUG - Count is 3
        pschainclass_test.pschainclass.classDef - DEBUG - Raw data are [(20, 21, 1), (22, 22, 2), (40, 41, 3)]
        pschainclass_test.pschainclass - DEBUG - Lookahead offset is 60
        pschainclass_test.pschainclass.classDef - DEBUG - Walker has 30 remaining bytes.
        pschainclass_test.pschainclass.classDef - DEBUG - ClassDef is format 1.
        pschainclass_test.pschainclass.classDef - DEBUG - First is 30, and count is 1
        pschainclass_test.pschainclass.classDef - DEBUG - Raw data are (1,)
        pschainclass_test.pschainclass - DEBUG - Set offsets are (0, 68, 0, 0)
        pschainclass_test.pschainclass.class index 0 - DEBUG - Set offset is zero
        pschainclass_test.pschainclass.class index 1 - DEBUG - Set offset is 68
        pschainclass_test.pschainclass.class index 1 - DEBUG - Rule count is 1
        pschainclass_test.pschainclass.class index 1 - DEBUG - Raw rule offsets are (4,)
        pschainclass_test.pschainclass.class index 1.rule order 0 - DEBUG - Backtrack count is 1
        pschainclass_test.pschainclass.class index 1.rule order 0 - DEBUG - Backtrack classes (reversed) are (1,)
        pschainclass_test.pschainclass.class index 1.rule order 0 - DEBUG - Input count is 2
        pschainclass_test.pschainclass.class index 1.rule order 0 - DEBUG - Input classes are (1, 2)
        pschainclass_test.pschainclass.class index 1.rule order 0 - DEBUG - Lookahead count is 1
        pschainclass_test.pschainclass.class index 1.rule order 0 - DEBUG - Lookahead classes are (1,)
        pschainclass_test.pschainclass.class index 1.rule order 0 - DEBUG - Action count is 1
        pschainclass_test.pschainclass.class index 1.rule order 0.pslookupgroup - DEBUG - Walker has 4 bytes remaining.
        pschainclass_test.pschainclass.class index 1.rule order 0.pslookupgroup.[0].pslookuprecord - DEBUG - Walker has 4 remaining bytes.
        pschainclass_test.pschainclass.class index 1.rule order 0.pslookupgroup.[0].pslookuprecord - DEBUG - Sequence index is 0
        pschainclass_test.pschainclass.class index 1.rule order 0.pslookupgroup.[0].pslookuprecord - DEBUG - Lookup index is 22
        pschainclass_test.pschainclass.class index 2 - DEBUG - Set offset is zero
        pschainclass_test.pschainclass.class index 3 - DEBUG - Set offset is zero
        pschainclass_test.pschainclass - WARNING - The classes [3] in the ClassDef are not used in any key, so the corresponding glyphs [40, 41] will be removed from it.
        pschainclass_test.pschainclass - INFO - The following glyphs appear only in the ClassDef and are not present in the Coverage: [22]
        pschainclass_test.pschainclass - INFO - The following glyphs appear in the Coverage and the ClassDef: [20, 21]
        >>> d = {22: obj[k][0].lookup for k in obj}
        >>> for index, func in FL:
        ...     func(d[index])
        
        >>> fvb(s[:33], logger=logger, fixupList=FL)
        pschainclass_test.pschainclass - DEBUG - Walker has 33 remaining bytes.
        pschainclass_test.pschainclass - DEBUG - Format is 2
        pschainclass_test.pschainclass.coverage - DEBUG - Walker has 13 remaining bytes.
        pschainclass_test.pschainclass.coverage - DEBUG - Format is 1, count is 2
        pschainclass_test.pschainclass.coverage - DEBUG - Raw data are [20, 21]
        pschainclass_test.pschainclass - DEBUG - Backtrack offset is 28
        pschainclass_test.pschainclass.classDef - DEBUG - Walker has 5 remaining bytes.
        pschainclass_test.pschainclass.classDef - DEBUG - ClassDef is format 1.
        pschainclass_test.pschainclass.classDef - ERROR - Insufficient bytes for format 1 header.
        """

        assert 'fixupList' in kwArgs
        fixupList = kwArgs.pop('fixupList')

        logger = kwArgs.pop('logger', logging.getLogger())
        logger = logger.getChild("pschainclass")

        logger.debug(
            ('V0001', (w.length(), ), "Walker has %d remaining bytes."))

        if w.length() < 12:
            logger.error(('V0004', (), "Insufficient bytes."))
            return None

        format = w.unpack("H")

        if format != 2:
            logger.error(
                ('V0002', (format, ), "Expected format 2, but got format %d."))

            return None

        else:
            logger.debug(('Vxxxx', (), "Format is 2"))

        covTable = coverage.Coverage.fromvalidatedwalker(w.subWalker(
            w.unpack("H")),
                                                         logger=logger)

        if covTable is None:
            return None

        fvw = classdef.ClassDef.fromvalidatedwalker
        backOffset = w.unpack("H")
        logger.debug(('Vxxxx', (backOffset, ), "Backtrack offset is %d"))

        if backOffset:
            cdBack = fvw(w.subWalker(backOffset), logger=logger)

            if cdBack is None:
                return None

        else:
            cdBack = classdef.ClassDef()

        inOffset = w.unpack("H")
        logger.debug(('Vxxxx', (inOffset, ), "Input offset is %d"))
        cdIn = fvw(w.subWalker(inOffset), logger=logger)

        if cdIn is None:
            return None

        lookOffset = w.unpack("H")
        logger.debug(('Vxxxx', (lookOffset, ), "Lookahead offset is %d"))

        if lookOffset:
            cdLook = fvw(w.subWalker(lookOffset), logger=logger)

            if cdLook is None:
                return None

        else:
            cdLook = classdef.ClassDef()

        r = cls({},
                classDefBacktrack=cdBack,
                classDefInput=cdIn,
                classDefLookahead=cdLook)

        setCount = w.unpack("H")

        if w.length() < 2 * setCount:
            logger.error(
                ('V0378', (),
                 "The ChainClassSet offsets are missing or incomplete."))

            return None

        setOffsets = w.group("H", setCount)
        logger.debug(('Vxxxx', (setOffsets, ), "Set offsets are %s"))
        ClassTuple = pschainclass_classtuple.ClassTuple
        Key = pschainclass_key.Key
        fvw = pslookupgroup.PSLookupGroup.fromvalidatedwalker

        for firstClassIndex, setOffset in enumerate(setOffsets):
            subLogger = logger.getChild("class index %d" % (firstClassIndex, ))

            if setOffset:
                subLogger.debug(('Vxxxx', (setOffset, ), "Set offset is %d"))
                wSet = w.subWalker(setOffset)

                if wSet.length() < 2:
                    subLogger.error(
                        ('V0379', (),
                         "The ChainClassRuleCount is missing or incomplete."))

                    return None

                ruleCount = wSet.unpack("H")
                subLogger.debug(('Vxxxx', (ruleCount, ), "Rule count is %d"))

                if wSet.length() < 2 * ruleCount:
                    subLogger.error((
                        'V0380', (),
                        "The ChainClassRule offsets are missing or incomplete."
                    ))

                    return None

                ruleOffsets = wSet.group("H", ruleCount)

                subLogger.debug(
                    ('Vxxxx', (ruleOffsets, ), "Raw rule offsets are %s"))

                for ruleOrder, ruleOffset in enumerate(ruleOffsets):
                    wRule = wSet.subWalker(ruleOffset)

                    subLogger2 = subLogger.getChild("rule order %d" %
                                                    (ruleOrder, ))

                    if wRule.length() < 2:
                        subLogger2.error((
                            'V0381', (),
                            "The BacktrackGlyphCount is missing or incomplete."
                        ))

                        return None

                    backCount = wRule.unpack("H")

                    subLogger2.debug(
                        ('Vxxxx', (backCount, ), "Backtrack count is %d"))

                    if wRule.length() < 2 * backCount:
                        subLogger2.error(('V0382', (
                        ), "The Backtrack classes are missing or incomplete."))

                        return None

                    tBack = ClassTuple(reversed(wRule.group("H", backCount)))

                    subLogger2.debug(('Vxxxx', (tBack, ),
                                      "Backtrack classes (reversed) are %s"))

                    if wRule.length() < 2:
                        subLogger2.error(
                            ('V0383', (),
                             "The InputGlyphCount is missing or incomplete."))

                        return None

                    inCount = wRule.unpack("H") - 1

                    subLogger2.debug(
                        ('Vxxxx', (inCount + 1, ), "Input count is %d"))

                    if wRule.length() < 2 * inCount:
                        subLogger2.error(
                            ('V0384', (),
                             "The Input classes are missing or incomplete."))

                        return None

                    tIn = ClassTuple((firstClassIndex, ) +
                                     wRule.group("H", inCount))

                    subLogger2.debug(
                        ('Vxxxx', (tIn, ), "Input classes are %s"))

                    if wRule.length() < 2:
                        subLogger2.error((
                            'V0385', (),
                            "The LookaheadGlyphCount is missing or incomplete."
                        ))

                        return None

                    lookCount = wRule.unpack("H")

                    subLogger2.debug(
                        ('Vxxxx', (lookCount, ), "Lookahead count is %d"))

                    if wRule.length() < 2 * lookCount:
                        subLogger2.error(('V0386', (
                        ), "The Lookahead classes are missing or incomplete."))

                        return None

                    tLook = ClassTuple(wRule.group("H", lookCount))

                    subLogger2.debug(
                        ('Vxxxx', (tLook, ), "Lookahead classes are %s"))

                    if wRule.length() < 2:
                        subLogger2.error(
                            ('V0387', (),
                             "The lookup count is missing or incomplete."))

                        return None

                    posCount = wRule.unpack("H")

                    subLogger2.debug(
                        ('Vxxxx', (posCount, ), "Action count is %d"))

                    key = Key([tBack, tIn, tLook], ruleOrder=ruleOrder)

                    obj = fvw(wRule,
                              count=posCount,
                              fixupList=fixupList,
                              logger=subLogger2,
                              **kwArgs)

                    if obj is None:
                        return None

                    r[key] = obj

            else:
                subLogger.debug(('Vxxxx', (), "Set offset is zero"))

        # Now that we have the keys we can reconcile

        okToProceed, covSet = coverageutilities.reconcile(covTable,
                                                          {k[1]
                                                           for k in r}, [cdIn],
                                                          logger=logger,
                                                          **kwArgs)

        r.coverageExtras.update(covSet - set(cdIn))

        if not okToProceed:
            r.clear()

        return r

    @classmethod
    def fromwalker(cls, w, **kwArgs):
        """
        Creates and returns a new PSChainClass from the specified walker.
        
        There is one required keyword argument:
        
            fixupList   A list, to which (lookupListIndex, fixupFunc) pairs
                        will be appended. The actual lookup won't be set in the
                        PSLookupRecord until this call is made, usually by the
                        top-level GPOS construction logic. The fixup call takes
                        one argument, the Lookup being set into it.
        
        >>> w = writer.LinkedWriter()
        >>> obj = _testingValues[1]
        >>> obj.buildBinary(w, forGPOS=False)
        >>> d = {obj[k][0].lookup.asImmutable(): 22 for k in obj}
        >>> w.addIndexMap("lookupList_GSUB", d)
        >>> s = w.binaryString()
        >>> FL = []
        >>> obj2 = PSChainClass.frombytes(s, fixupList=FL)
        >>> d = {22: obj[k][0].lookup for k in obj}
        >>> for index, func in FL:
        ...     func(d[index])
        
        At this point we have the object; note that the reconciliation phase
        has removed some unneeded classes from the ClassDef (see the doctest
        output for the validated method to see more details on this).
        
        >>> obj2.pprint_changes(obj)
        Class definition table (input):
          Deleted records:
            40: 3
            41: 3
        """

        assert 'fixupList' in kwArgs
        format = w.unpack("H")
        assert format == 2
        covTable = coverage.Coverage.fromwalker(w.subWalker(w.unpack("H")))
        f = classdef.ClassDef.fromwalker

        backOffset = w.unpack("H")

        if backOffset:
            cdBack = f(w.subWalker(backOffset))
        else:
            cdBack = classdef.ClassDef()

        cdIn = classdef.ClassDef.fromwalker(w.subWalker(w.unpack("H")))

        lookOffset = w.unpack("H")

        if lookOffset:
            cdLook = f(w.subWalker(lookOffset))
        else:
            cdLook = classdef.ClassDef()

        r = cls({},
                classDefBacktrack=cdBack,
                classDefInput=cdIn,
                classDefLookahead=cdLook)

        setOffsets = w.group("H", w.unpack("H"))
        f = pslookupgroup.PSLookupGroup.fromwalker
        fixupList = kwArgs['fixupList']
        ClassTuple = pschainclass_classtuple.ClassTuple
        Key = pschainclass_key.Key

        for firstClassIndex, setOffset in enumerate(setOffsets):
            if setOffset:
                wSet = w.subWalker(setOffset)
                it = enumerate(wSet.group("H", wSet.unpack("H")))

                for ruleOrder, ruleOffset in it:
                    wRule = wSet.subWalker(ruleOffset)

                    tBack = ClassTuple(
                        reversed(wRule.group("H", wRule.unpack("H"))))

                    tIn = ClassTuple((firstClassIndex, ) +
                                     wRule.group("H",
                                                 wRule.unpack("H") - 1))

                    tLook = ClassTuple(wRule.group("H", wRule.unpack("H")))
                    key = Key([tBack, tIn, tLook], ruleOrder=ruleOrder)

                    r[key] = f(wRule,
                               count=wRule.unpack("H"),
                               fixupList=fixupList)

        # Now that we have the keys we can reconcile

        okToProceed, covSet = coverageutilities.reconcile(
            covTable, {k[1]
                       for k in r}, [cdIn], **kwArgs)

        r.coverageExtras.update(covSet - set(cdIn))

        if not okToProceed:
            r.clear()

        return r

    def writeFontWorkerSource(self, s, **kwArgs):
        """
        Writes contents of lookup to provided stream 's'. Uses
        namer.bestNameForGlyphIndex if a namer is provided, otherwise
        uses Font Worker glyph index labeling ("# <id>").
        """

        namer = kwArgs.get('namer')
        bnfgi = namer.bestNameForGlyphIndex

        if self.classDefBacktrack:
            s.write("backtrackclass definition begin\n")

            it = sorted(self.classDefBacktrack,
                        key=(lambda x: (self.classDefBacktrack[x], x)))

            for k in it:
                v = self.classDefBacktrack[k]
                s.write("%s\t%d\n" % (bnfgi(k), v))

            s.write("class definition end\n\n")

        s.write("class definition begin\n")

        it = sorted(self.classDefInput,
                    key=(lambda x: (self.classDefInput[x], x)))

        for k in it:
            v = self.classDefInput[k]
            s.write("%s\t%d\n" % (bnfgi(k), v))

        s.write("class definition end\n\n")

        if self.classDefLookahead:
            s.write("lookaheadclass definition begin\n")

            it = sorted(self.classDefLookahead,
                        key=(lambda x: (self.classDefLookahead[x], x)))

            for k in it:
                v = self.classDefLookahead[k]
                s.write("%s\t%d\n" % (bnfgi(k), v))

            s.write("class definition end\n\n")

        for k in iter(self):
            v = self[k]
            btSeq, inSeq, laSeq = k[0], k[1], k[2]

            actionStr = "\t".join([
                "%d, %d" % (vi.sequenceIndex + 1, vi.lookup.sequence)
                for vi in v
            ])

            if btSeq:
                btStr = ", ".join(
                    [str(btSeq[sv - 1]) for sv in range(len(btSeq), 0, -1)])

            else:
                btStr = ""

            inStr = ", ".join([str(sv) for sv in inSeq])
            laStr = ", ".join([str(sv) for sv in laSeq]) if laSeq else ""

            s.write("class-chain\t%s\t%s\t%s\t%s\n" %
                    (btStr, inStr, laStr, actionStr))
Ejemplo n.º 13
0
class PSContextGlyph(dict, metaclass=mapmeta.FontDataMetaclass):
    """
    Objects containing format 1 contextual lookups. Note that these work for
    both GPOS and GSUB tables.
    
    These are dicts mapping Keys to PSLookupGroups. There is an explicit
    iterator present for this class that ensures the keys' ruleOrder attribute
    is respected.
    
    >>> _testingValues[0].pprint(namer=namer.testingNamer())
    (xyz26, xyz51), Relative order = 0:
      Effect #1:
        Sequence index: 1
        Lookup:
          Subtable 0 (Pair (class) positioning table):
            (First class 1, Second class 1):
              Second adjustment:
                FUnit adjustment to origin's x-coordinate: -10
            (First class 2, Second class 0):
              First adjustment:
                Device for vertical advance:
                  Tweak at 12 ppem: -2
                  Tweak at 14 ppem: -1
                  Tweak at 18 ppem: 1
            (First class 2, Second class 1):
              First adjustment:
                FUnit adjustment to origin's x-coordinate: 30
                Device for vertical advance:
                  Tweak at 12 ppem: -2
                  Tweak at 14 ppem: -1
                  Tweak at 18 ppem: 1
              Second adjustment:
                Device for origin's x-coordinate:
                  Tweak at 12 ppem: -2
                  Tweak at 14 ppem: -1
                  Tweak at 18 ppem: 1
                Device for origin's y-coordinate:
                  Tweak at 12 ppem: -5
                  Tweak at 13 ppem: -3
                  Tweak at 14 ppem: -1
                  Tweak at 18 ppem: 2
                  Tweak at 20 ppem: 3
            Class definition table for first glyph:
              xyz16: 1
              xyz6: 1
              xyz7: 1
              xyz8: 2
            Class definition table for second glyph:
              xyz21: 1
              xyz22: 1
              xyz23: 1
          Lookup flags:
            Right-to-left for Cursive: True
            Ignore base glyphs: False
            Ignore ligatures: False
            Ignore marks: False
          Sequence order (lower happens first): 2
    (xyz26, xyz41), Relative order = 1:
      Effect #1:
        Sequence index: 0
        Lookup:
          Subtable 0 (Pair (glyph) positioning table):
            (xyz11, xyz21):
              First adjustment:
                FUnit adjustment to origin's x-coordinate: 30
                Device for vertical advance:
                  Tweak at 12 ppem: -2
                  Tweak at 14 ppem: -1
                  Tweak at 18 ppem: 1
              Second adjustment:
                Device for origin's x-coordinate:
                  Tweak at 12 ppem: -2
                  Tweak at 14 ppem: -1
                  Tweak at 18 ppem: 1
                Device for origin's y-coordinate:
                  Tweak at 12 ppem: -5
                  Tweak at 13 ppem: -3
                  Tweak at 14 ppem: -1
                  Tweak at 18 ppem: 2
                  Tweak at 20 ppem: 3
            (xyz9, xyz16):
              Second adjustment:
                FUnit adjustment to origin's x-coordinate: -10
            (xyz9, xyz21):
              First adjustment:
                Device for vertical advance:
                  Tweak at 12 ppem: -2
                  Tweak at 14 ppem: -1
                  Tweak at 18 ppem: 1
          Lookup flags:
            Right-to-left for Cursive: False
            Ignore base glyphs: True
            Ignore ligatures: False
            Ignore marks: False
          Sequence order (lower happens first): 1
      Effect #2:
        Sequence index: 1
        Lookup:
          Subtable 0 (Pair (class) positioning table):
            (First class 1, Second class 1):
              Second adjustment:
                FUnit adjustment to origin's x-coordinate: -10
            (First class 2, Second class 0):
              First adjustment:
                Device for vertical advance:
                  Tweak at 12 ppem: -2
                  Tweak at 14 ppem: -1
                  Tweak at 18 ppem: 1
            (First class 2, Second class 1):
              First adjustment:
                FUnit adjustment to origin's x-coordinate: 30
                Device for vertical advance:
                  Tweak at 12 ppem: -2
                  Tweak at 14 ppem: -1
                  Tweak at 18 ppem: 1
              Second adjustment:
                Device for origin's x-coordinate:
                  Tweak at 12 ppem: -2
                  Tweak at 14 ppem: -1
                  Tweak at 18 ppem: 1
                Device for origin's y-coordinate:
                  Tweak at 12 ppem: -5
                  Tweak at 13 ppem: -3
                  Tweak at 14 ppem: -1
                  Tweak at 18 ppem: 2
                  Tweak at 20 ppem: 3
            Class definition table for first glyph:
              xyz16: 1
              xyz6: 1
              xyz7: 1
              xyz8: 2
            Class definition table for second glyph:
              xyz21: 1
              xyz22: 1
              xyz23: 1
          Lookup flags:
            Right-to-left for Cursive: True
            Ignore base glyphs: False
            Ignore ligatures: False
            Ignore marks: False
          Sequence order (lower happens first): 2
    (xyz31, xyz11, xyz31), Relative order = 0:
      Effect #1:
        Sequence index: 2
        Lookup:
          Subtable 0 (Single positioning table):
            xyz11:
              FUnit adjustment to origin's x-coordinate: -10
          Lookup flags:
            Right-to-left for Cursive: False
            Ignore base glyphs: False
            Ignore ligatures: False
            Ignore marks: False
            Mark attachment type: 4
          Sequence order (lower happens first): 0
    """

    #
    # Class definition variables
    #

    mapSpec = dict(
        item_followsprotocol=True,
        item_pprintlabelpresortfunc=_keySort,
        item_renumberdeepkeys=True,
        item_usenamerforstr=True,
        map_compactiblefunc=(lambda d, k, **kw: False),
        #map_compactremovesfalses = True,
        map_maxcontextfunc=(lambda d: utilities.safeMax(len(k) for k in d)),
        map_validatefunc_partial=_validate)

    #
    # Methods
    #

    def __iter__(self):
        """
        We provide a custom iterator to make sure the ruleOrder is correctly
        being followed.
        
        >>> for k in _testingValues[0]: print(k)
        (25, 50), Relative order = 0
        (25, 40), Relative order = 1
        (30, 10, 30), Relative order = 0
        """

        v = list(super(PSContextGlyph, self).__iter__())
        return iter(sorted(v, key=_keySort))

    def buildBinary(self, w, **kwArgs):
        """
        Adds the binary data for the PSContextGlyph to the specified
        LinkedWriter.
        
        NOTE! There will be unresolved lookup list indices in the LinkedWriter
        after this method is finished. The caller (or somewhere higher up) is
        responsible for adding an index map to the LinkedWriter with the tag
        "lookupList" before the LinkedWriter's binaryString() method is called.
        
        >>> w = writer.LinkedWriter()
        >>> _testingValues[0].buildBinary(w, forGPOS=True)
        >>> ltv = lookup._testingValues
        >>> w.addIndexMap(
        ...   "lookupList_GPOS",
        ...   { ltv[0].asImmutable(): 10,
        ...     ltv[1].asImmutable(): 11,
        ...     ltv[2].asImmutable(): 25})
        >>> utilities.hexdump(w.binaryString())
               0 | 0001 000A 0002 0012  0018 0001 0002 0019 |................|
              10 | 001E 0002 000A 0014  0001 001C 0002 0001 |................|
              20 | 0032 0001 0019 0002  0002 0028 0000 000B |.2.........(....|
              30 | 0001 0019 0003 0001  000A 001E 0002 000A |................|
        """

        if 'stakeValue' in kwArgs:
            stakeValue = kwArgs.pop('stakeValue')
            w.stakeCurrentWithValue(stakeValue)
        else:
            stakeValue = w.stakeCurrent()

        w.add("H", 1)  # format
        firstGlyphs = sorted(set(k[0] for k in self))
        covTable = coverage.Coverage.fromglyphset(firstGlyphs)
        covStake = w.getNewStake()
        w.addUnresolvedOffset("H", stakeValue, covStake)
        w.add("H", len(firstGlyphs))

        setStakes = dict(
            (firstGlyph, w.getNewStake()) for firstGlyph in firstGlyphs)

        for firstGlyph in firstGlyphs:
            w.addUnresolvedOffset("H", stakeValue, setStakes[firstGlyph])

        covTable.buildBinary(w, stakeValue=covStake)
        orderings = {}
        ruleStakes = {}

        for firstGlyph in firstGlyphs:
            w.stakeCurrentWithValue(setStakes[firstGlyph])

            o = orderings[firstGlyph] = sorted(
                (k.ruleOrder, k) for k in self if k[0] == firstGlyph)

            w.add("H", len(o))

            for order, key in o:
                stake = ruleStakes[(firstGlyph, order)] = w.getNewStake()
                w.addUnresolvedOffset("H", setStakes[firstGlyph], stake)

        for firstGlyph in firstGlyphs:
            for order, key in orderings[firstGlyph]:
                w.stakeCurrentWithValue(ruleStakes[(firstGlyph, order)])
                obj = self[key]
                w.add("HH", len(key), len(obj))
                w.addGroup("H", key[1:])
                obj.buildBinary(w, **kwArgs)

    @classmethod
    def fromValidatedFontWorkerSource(cls, fws, **kwArgs):
        """
        Creates and returns a new PSContextGlyph from the specified
        FontWorkerSource, doing source validation.

        >>> logger = utilities.makeDoctestLogger("FW_test")
        >>> obj = PSContextGlyph.fromValidatedFontWorkerSource(
        ...   _test_FW_fws2,
        ...   namer = _test_FW_namer,
        ...   forGPOS = True,
        ...   lookupDict = _test_FW_lookupDict,
        ...   logger = logger,
        ...   editor={})
        FW_test.pscontextglyph - WARNING - line 2 -- unexpected token: foo
        FW_test.pscontextglyph - WARNING - line 0 -- did not find matching 'subtable end/lookup end'
        >>> obj.pprint()
        Key((1, 3, 5), ruleOrder=0):
          Effect #1:
            Sequence index: 0
            Lookup:
              3:
                FUnit adjustment to horizontal advance: 678
          Effect #2:
            Sequence index: 0
            Lookup:
              3:
                FUnit adjustment to horizontal advance: 901
        """

        logger = kwArgs.pop('logger', logging.getLogger())
        logger = logger.getChild("pscontextglyph")
        terminalStrings = ('subtable end', 'lookup end')
        namer = kwArgs['namer']
        startingLineNumber = fws.lineNumber
        ruleOrder = 0
        lookupGroups = {}

        gIFS = namer.glyphIndexFromString

        for line in fws:
            if line in terminalStrings:
                return cls(lookupGroups)

            if len(line) > 0:
                tokens = [x.strip() for x in line.split('\t')]

                if tokens[0].lower() == 'glyph':
                    glyphNames = tokens[1].split(',')
                    glyphIndices = [gIFS(t.strip()) for t in glyphNames]
                    glyphsOK = True
                    for i in range(len(glyphIndices)):
                        if glyphIndices[i] is None:
                            glyphsOK = False
                            logger.warning(
                                ('V0956', (fws.lineNumber, glyphNames[i]),
                                 "line %d -- glyph '%s' not found; "
                                 "will not make entry for this line."))

                    if glyphsOK:
                        glyphTuple = tuple(glyphIndices)

                        lookupList = []

                        for effect in tokens[2:]:
                            effectTokens = [
                                x.strip() for x in effect.split(',')
                            ]
                            sequenceIndex = int(effectTokens[0]) - 1
                            lookupName = effectTokens[1]

                            lookupList.append(
                                pslookuprecord.PSLookupRecord(
                                    sequenceIndex,
                                    lookup.Lookup.
                                    fromValidatedFontWorkerSource(
                                        fws,
                                        lookupName,
                                        logger=logger,
                                        **kwArgs)))

                        key = pscontextglyph_key.Key(glyphTuple)
                        key.ruleOrder = ruleOrder
                        ruleOrder += 1
                        lookupGroup = pslookupgroup.PSLookupGroup(lookupList)
                        lookupGroups[key] = lookupGroup

                else:
                    logger.warning(('V0960', (fws.lineNumber, tokens[0]),
                                    'line %d -- unexpected token: %s'))

        logger.warning(
            ('V0958', (startingLineNumber, "/".join(terminalStrings)),
             'line %d -- did not find matching \'%s\''))

        return cls(lookupGroups)

    @classmethod
    def fromvalidatedwalker(cls, w, **kwArgs):
        """
        Creates and returns a new PSContextGlyph object from the specified
        walker, doing source validation. The following keyword arguments are
        supported:
        
            fixupList   A list, to which (lookupListIndex, fixupFunc) pairs
                        will be appended. The actual lookup won't be set in the
                        PSLookupRecord until this fixupFunc is called by
                        lookuplist.fromvalidatedwalker(). The fixup call takes
                        one argument: the Lookup being set into it.
            
            logger      A logger to which messages will be posted.
        
        >>> w = writer.LinkedWriter()
        >>> _testingValues[0].buildBinary(w, forGPOS=True)
        >>> ltv = lookup._testingValues
        >>> d = {
        ...   ltv[0].asImmutable(): 10,
        ...   ltv[1].asImmutable(): 11,
        ...   ltv[2].asImmutable(): 25}
        >>> w.addIndexMap("lookupList_GPOS", d)
        >>> s = w.binaryString()
        >>> FL = []
        >>> logger = utilities.makeDoctestLogger("pscontextglyph_test")
        >>> fvb = PSContextGlyph.fromvalidatedbytes
        >>> obj = fvb(s, fixupList=FL, logger=logger)
        pscontextglyph_test.pscontextglyph - DEBUG - Walker has 64 bytes remaining.
        pscontextglyph_test.pscontextglyph - DEBUG - Format is 1
        pscontextglyph_test.pscontextglyph.coverage - DEBUG - Walker has 54 remaining bytes.
        pscontextglyph_test.pscontextglyph.coverage - DEBUG - Format is 1, count is 2
        pscontextglyph_test.pscontextglyph.coverage - DEBUG - Raw data are [25, 30]
        pscontextglyph_test.pscontextglyph - DEBUG - RuleSetCount is 2
        pscontextglyph_test.pscontextglyph - DEBUG - RuleSet offsets are (18, 24)
        pscontextglyph_test.pscontextglyph.rule set 0 - DEBUG - Rule count is 2
        pscontextglyph_test.pscontextglyph.rule set 0.rule order 0 - DEBUG - Glyph count is 2
        pscontextglyph_test.pscontextglyph.rule set 0.rule order 0 - DEBUG - Action count is 1
        pscontextglyph_test.pscontextglyph.rule set 0.rule order 0.pslookupgroup - DEBUG - Walker has 30 bytes remaining.
        pscontextglyph_test.pscontextglyph.rule set 0.rule order 0.pslookupgroup.[0].pslookuprecord - DEBUG - Walker has 30 remaining bytes.
        pscontextglyph_test.pscontextglyph.rule set 0.rule order 0.pslookupgroup.[0].pslookuprecord - DEBUG - Sequence index is 1
        pscontextglyph_test.pscontextglyph.rule set 0.rule order 0.pslookupgroup.[0].pslookuprecord - DEBUG - Lookup index is 25
        pscontextglyph_test.pscontextglyph.rule set 0.rule order 1 - DEBUG - Glyph count is 2
        pscontextglyph_test.pscontextglyph.rule set 0.rule order 1 - DEBUG - Action count is 2
        pscontextglyph_test.pscontextglyph.rule set 0.rule order 1.pslookupgroup - DEBUG - Walker has 20 bytes remaining.
        pscontextglyph_test.pscontextglyph.rule set 0.rule order 1.pslookupgroup.[0].pslookuprecord - DEBUG - Walker has 20 remaining bytes.
        pscontextglyph_test.pscontextglyph.rule set 0.rule order 1.pslookupgroup.[0].pslookuprecord - DEBUG - Sequence index is 0
        pscontextglyph_test.pscontextglyph.rule set 0.rule order 1.pslookupgroup.[0].pslookuprecord - DEBUG - Lookup index is 11
        pscontextglyph_test.pscontextglyph.rule set 0.rule order 1.pslookupgroup.[1].pslookuprecord - DEBUG - Walker has 16 remaining bytes.
        pscontextglyph_test.pscontextglyph.rule set 0.rule order 1.pslookupgroup.[1].pslookuprecord - DEBUG - Sequence index is 1
        pscontextglyph_test.pscontextglyph.rule set 0.rule order 1.pslookupgroup.[1].pslookuprecord - DEBUG - Lookup index is 25
        pscontextglyph_test.pscontextglyph.rule set 1 - DEBUG - Rule count is 1
        pscontextglyph_test.pscontextglyph.rule set 1.rule order 0 - DEBUG - Glyph count is 3
        pscontextglyph_test.pscontextglyph.rule set 1.rule order 0 - DEBUG - Action count is 1
        pscontextglyph_test.pscontextglyph.rule set 1.rule order 0.pslookupgroup - DEBUG - Walker has 4 bytes remaining.
        pscontextglyph_test.pscontextglyph.rule set 1.rule order 0.pslookupgroup.[0].pslookuprecord - DEBUG - Walker has 4 remaining bytes.
        pscontextglyph_test.pscontextglyph.rule set 1.rule order 0.pslookupgroup.[0].pslookuprecord - DEBUG - Sequence index is 2
        pscontextglyph_test.pscontextglyph.rule set 1.rule order 0.pslookupgroup.[0].pslookuprecord - DEBUG - Lookup index is 10
        """

        assert 'fixupList' in kwArgs
        fixupList = kwArgs.pop('fixupList')
        logger = kwArgs.pop('logger', logging.getLogger())
        logger = logger.getChild("pscontextglyph")

        logger.debug(
            ('V0001', (w.length(), ), "Walker has %d bytes remaining."))

        if w.length() < 6:
            logger.error(('V0004', (), "Insufficient bytes."))
            return None

        format = w.unpack("H")

        if format != 1:
            logger.error(
                ('V0002', (format, ), "Expected format 1, but got format %d."))

            return None

        logger.debug(('Vxxxx', (), "Format is 1"))

        covTable = coverage.Coverage.fromvalidatedwalker(w.subWalker(
            w.unpack("H")),
                                                         logger=logger,
                                                         **kwArgs)

        if covTable is None:
            return None

        firstGlyphs = sorted(covTable)
        count = w.unpack("H")
        logger.debug(('Vxxxx', (count, ), "RuleSetCount is %d"))

        if count != len(firstGlyphs):
            logger.error((
                'V0350', (),
                "The RuleSetCount does not match the length of the Coverage."))

            return None

        if w.length() < 2 * count:
            logger.error(('V0351', (),
                          "The RuleSet is missing or only partially present."))

            return None

        setOffsets = w.group("H", count)
        logger.debug(('Vxxxx', (setOffsets, ), "RuleSet offsets are %s"))
        r = cls()
        fvw = pslookupgroup.PSLookupGroup.fromvalidatedwalker
        Key = pscontextglyph_key.Key

        for i, setOffset in enumerate(setOffsets):
            subLogger = logger.getChild("rule set %d" % (i, ))
            wSet = w.subWalker(setOffset)

            if wSet.length() < 2:
                subLogger.error(
                    ('V0352', (),
                     "The RuleCount is missing or only partially present."))

                return None

            ruleCount = wSet.unpack("H")
            subLogger.debug(('Vxxxx', (ruleCount, ), "Rule count is %d"))

            if wSet.length() < 2 * ruleCount:
                subLogger.error((
                    'V0353', (),
                    "The Rule offsets are missing or only partially present."))

                return None

            it = enumerate(wSet.group("H", ruleCount))

            for ruleOrder, ruleOffset in it:
                subLogger2 = subLogger.getChild("rule order %d" %
                                                (ruleOrder, ))
                wRule = wSet.subWalker(ruleOffset)

                if wRule.length() < 4:
                    subLogger2.error(
                        ('V0354', (),
                         "Rule is missing or only partially present."))

                    return None

                glyphCount, posCount = wRule.unpack("2H")
                subLogger2.debug(
                    ('Vxxxx', (glyphCount, ), "Glyph count is %d"))
                subLogger2.debug(('Vxxxx', (posCount, ), "Action count is %d"))

                if wRule.length() < 2 * (glyphCount - 1):
                    subLogger2.error(
                        ('V0354', (),
                         "Rule is missing or only partially present."))

                    return None

                key = Key(
                    (firstGlyphs[i], ) + wRule.group("H", glyphCount - 1),
                    ruleOrder=ruleOrder)

                obj = fvw(wRule,
                          count=posCount,
                          fixupList=fixupList,
                          logger=subLogger2,
                          **kwArgs)

                if obj is None:
                    return None

                r[key] = obj

        return r

    @classmethod
    def fromwalker(cls, w, **kwArgs):
        """
        Creates and returns a new PSContextGlyph from the specified walker.
        
        There is one required keyword argument:
        
            fixupList   A list, to which (lookupListIndex, fixupFunc) pairs
                        will be appended. The actual lookup won't be set in the
                        PSLookupRecord until this fixupFunc is called by
                        lookuplist.fromwalker(). The fixup call takes one
                        argument: the Lookup being set into it.
        
        >>> w = writer.LinkedWriter()
        >>> _testingValues[0].buildBinary(w, forGPOS=True)
        >>> ltv = lookup._testingValues
        >>> d = {
        ...   ltv[0].asImmutable(): 10,
        ...   ltv[1].asImmutable(): 11,
        ...   ltv[2].asImmutable(): 25}
        >>> w.addIndexMap("lookupList_GPOS", d)
        >>> s = w.binaryString()
        >>> FL = []
        >>> obj = PSContextGlyph.frombytes(s, fixupList=FL)
        >>> d = {10: ltv[0], 11: ltv[1], 25: ltv[2]}
        >>> for index, func in FL:
        ...     func(d[index])
        >>> obj == _testingValues[0]
        True
        """

        format = w.unpack("H")
        assert format == 1
        covTable = coverage.Coverage.fromwalker(w.subWalker(w.unpack("H")))
        firstGlyphs = sorted(covTable)
        count = w.unpack("H")
        assert count == len(covTable)
        setOffsets = w.group("H", count)
        r = cls()
        fixupList = kwArgs['fixupList']
        f = pslookupgroup.PSLookupGroup.fromwalker
        Key = pscontextglyph_key.Key

        for i, setOffset in enumerate(setOffsets):
            wSet = w.subWalker(setOffset)
            it = enumerate(wSet.group("H", wSet.unpack("H")))

            for ruleOrder, ruleOffset in it:
                wRule = wSet.subWalker(ruleOffset)
                glyphCount, posCount = wRule.unpack("2H")

                key = Key(
                    (firstGlyphs[i], ) + wRule.group("H", glyphCount - 1),
                    ruleOrder=ruleOrder)

                r[key] = f(wRule, count=posCount, fixupList=fixupList)

        return r

    def writeFontWorkerSource(self, s, **kwArgs):
        """
        Writes contents of lookup to provided stream 's'. Uses
        namer.bestNameForGlyphIndex if a namer is provided, otherwise
        uses Font Worker glyph index labeling ("# <id>").
        """

        namer = kwArgs.get('namer')
        bnfgi = namer.bestNameForGlyphIndex

        for k in iter(self):
            v = self[k]
            ctxStr = ", ".join([bnfgi(g) for g in k])

            actionStr = "\t".join([
                "%d, %d" % (vi.sequenceIndex + 1, vi.lookup.sequence)
                for vi in v
            ])

            s.write("glyph\t%s\t%s\n" % (ctxStr, actionStr))
Ejemplo n.º 14
0
 def fromvalidatedwalker(cls, w, **kwArgs):
     """
     Creates and returns a new PairClasses object from the specified walker,
     with validation of the source data.
     
     >>> s = _testingValues[0].binaryString()
     >>> logger = utilities.makeDoctestLogger("pctest")
     >>> fvb = PairClasses.fromvalidatedbytes
     >>> obj = fvb(s, logger=logger)
     pctest.pairclasses - DEBUG - Walker has 140 remaining bytes.
     pctest.pairclasses.coverage - DEBUG - Walker has 64 remaining bytes.
     pctest.pairclasses.coverage - DEBUG - Format is 1, count is 4
     pctest.pairclasses.coverage - DEBUG - Raw data are [5, 6, 7, 15]
     pctest.pairclasses.first.classDef - DEBUG - Walker has 52 remaining bytes.
     pctest.pairclasses.first.classDef - DEBUG - ClassDef is format 2.
     pctest.pairclasses.first.classDef - DEBUG - Count is 3
     pctest.pairclasses.first.classDef - DEBUG - Raw data are [(5, 6, 1), (7, 7, 2), (15, 15, 1)]
     pctest.pairclasses.second.classDef - DEBUG - Walker has 30 remaining bytes.
     pctest.pairclasses.second.classDef - DEBUG - ClassDef is format 2.
     pctest.pairclasses.second.classDef - DEBUG - Count is 1
     pctest.pairclasses.second.classDef - DEBUG - Raw data are [(20, 22, 1)]
     pctest.pairclasses.class (0,0).pairvalues - DEBUG - Walker has 124 remaining bytes.
     pctest.pairclasses.class (0,0).pairvalues.value - DEBUG - Walker has 124 remaining bytes.
     pctest.pairclasses.class (0,0).pairvalues.value - DEBUG - Walker has 120 remaining bytes.
     pctest.pairclasses.class (0,1).pairvalues - DEBUG - Walker has 114 remaining bytes.
     pctest.pairclasses.class (0,1).pairvalues.value - DEBUG - Walker has 114 remaining bytes.
     pctest.pairclasses.class (0,1).pairvalues.value - DEBUG - Walker has 110 remaining bytes.
     pctest.pairclasses.class (1,0).pairvalues - DEBUG - Walker has 104 remaining bytes.
     pctest.pairclasses.class (1,0).pairvalues.value - DEBUG - Walker has 104 remaining bytes.
     pctest.pairclasses.class (1,0).pairvalues.value - DEBUG - Walker has 100 remaining bytes.
     pctest.pairclasses.class (1,1).pairvalues - DEBUG - Walker has 94 remaining bytes.
     pctest.pairclasses.class (1,1).pairvalues.value - DEBUG - Walker has 94 remaining bytes.
     pctest.pairclasses.class (1,1).pairvalues.value - DEBUG - Walker has 90 remaining bytes.
     pctest.pairclasses.class (2,0).pairvalues - DEBUG - Walker has 84 remaining bytes.
     pctest.pairclasses.class (2,0).pairvalues.value - DEBUG - Walker has 84 remaining bytes.
     pctest.pairclasses.class (2,0).pairvalues.value.yAdvDevice.device - DEBUG - Walker has 8 remaining bytes.
     pctest.pairclasses.class (2,0).pairvalues.value.yAdvDevice.device - DEBUG - StartSize=12, endSize=18, format=1
     pctest.pairclasses.class (2,0).pairvalues.value.yAdvDevice.device - DEBUG - Data are (35844,)
     pctest.pairclasses.class (2,0).pairvalues.value - DEBUG - Walker has 80 remaining bytes.
     pctest.pairclasses.class (2,1).pairvalues - DEBUG - Walker has 74 remaining bytes.
     pctest.pairclasses.class (2,1).pairvalues.value - DEBUG - Walker has 74 remaining bytes.
     pctest.pairclasses.class (2,1).pairvalues.value.yAdvDevice.device - DEBUG - Walker has 8 remaining bytes.
     pctest.pairclasses.class (2,1).pairvalues.value.yAdvDevice.device - DEBUG - StartSize=12, endSize=18, format=1
     pctest.pairclasses.class (2,1).pairvalues.value.yAdvDevice.device - DEBUG - Data are (35844,)
     pctest.pairclasses.class (2,1).pairvalues.value - DEBUG - Walker has 70 remaining bytes.
     pctest.pairclasses.class (2,1).pairvalues.value.xPlaDevice.device - DEBUG - Walker has 8 remaining bytes.
     pctest.pairclasses.class (2,1).pairvalues.value.xPlaDevice.device - DEBUG - StartSize=12, endSize=18, format=1
     pctest.pairclasses.class (2,1).pairvalues.value.xPlaDevice.device - DEBUG - Data are (35844,)
     pctest.pairclasses.class (2,1).pairvalues.value.yPlaDevice.device - DEBUG - Walker has 20 remaining bytes.
     pctest.pairclasses.class (2,1).pairvalues.value.yPlaDevice.device - DEBUG - StartSize=12, endSize=20, format=2
     pctest.pairclasses.class (2,1).pairvalues.value.yPlaDevice.device - DEBUG - Data are (48624, 32, 12288)
     pctest.pairclasses - INFO - The following glyphs appear in non-first ClassDefs only, and are not in the Coverage: [20, 21, 22]
     pctest.pairclasses - INFO - The following glyphs appear in the Coverage and in only the first ClassDef: [5, 6, 7, 15]
     """
     
     logger = kwArgs.pop('logger', logging.getLogger())
     logger = logger.getChild("pairclasses")
     
     logger.debug((
       'V0001',
       (w.length(),),
       "Walker has %d remaining bytes."))
     
     if w.length() < 16:
         logger.error(('V0004', (), "Insufficient bytes."))
         return None
     
     format = w.unpack("H")
     
     if format != 2:
         logger.error((
           'V0002',
           (format,),
           "Expected format 2, but got %d instead."))
         
         return None
     
     covOffset = w.unpack("H")
     
     if not covOffset:
         logger.error((
           'V0330',
           (),
           "The offset to the Coverage is zero."))
         
         return None
     
     covTable = coverage.Coverage.fromvalidatedwalker(
       w.subWalker(covOffset),
       logger = logger)
     
     if covTable is None:
         return None
     
     vf1, vf2 = w.unpack("2H")
     
     if vf1 & 0xFF00:
         logger.error((
           'E4110',
           (vf1,),
           "Reserved bits are set in the 0x%04X ValueFormat1 field."))
         
         return None
     
     if vf2 & 0xFF00:
         logger.error((
           'E4110',
           (vf2,),
           "Reserved bits are set in the 0x%04X ValueFormat2 field."))
         
         return None
     
     if not (vf1 or vf2):
         logger.warning((
           'V0328',
           (),
           "Both ValueFormat1 and ValueFormat2 are zero, so there is "
           "no data to unpack."))
         
         return None
     
     cdOffset1, cdOffset2, count1, count2 = w.unpack("4H")
     r = cls()
     fvw = classdef.ClassDef.fromvalidatedwalker
     subLogger = logger.getChild("first")
     r.classDef1 = fvw(w.subWalker(cdOffset1), logger=subLogger)
     
     if r.classDef1 is None:
         return None
     
     if count1 != len({0} | set(r.classDef1.values())):
         if count1 == (utilities.safeMax(r.classDef1.values()) + 1):
             logger.warning((
               'V0911',
               (),
               "The values in ClassDef1 are sparse; they should be dense."))
         
         else:
             logger.error((
               'V0332',
               (),
               "The Class1Count does not match the values in ClassDef1."))
         
             return None
     
     subLogger = logger.getChild("second")
     r.classDef2 = fvw(w.subWalker(cdOffset2), logger=subLogger)
     
     if r.classDef2 is None:
         return None
     
     if count2 != len({0} | set(r.classDef2.values())):
         if count2 == (utilities.safeMax(r.classDef2.values()) + 1):
             logger.warning((
               'V0911',
               (),
               "The values in ClassDef2 are sparse; they should be dense."))
         
         else:
             logger.error((
               'V0332',
               (),
               "The Class2Count does not match the values in ClassDef2."))
         
             return None
     
     fvw = pairvalues.PairValues.fromvalidatedwalker
     Key = pairclasses_key.Key
     unusedFirst = set(range(1, count1))
     unusedSecond = set(range(1, count2))
     
     for c1 in range(count1):
         for c2 in range(count2):
             subLogger = logger.getChild("class (%d,%d)" % (c1, c2))
             
             obj = fvw(
               w,
               posBase = w,
               valueFormatFirst = vf1,
               valueFormatSecond = vf2,
               logger = subLogger,
               **kwArgs)
             
             if obj is None:
                 return None
             
             if obj:  # only bother with nonzero PairValues
                 r[Key([c1, c2])] = obj
                 unusedFirst.discard(c1)
                 unusedSecond.discard(c2)
             
             elif c1 and c2:
                 logger.info((
                   'V0333',
                   (c1, c2),
                   "The PairValue for class (%d,%d) has no effect."))
     
     if unusedFirst:
         logger.warning((
           'V1073',
           (sorted(unusedFirst),),
           "The following classes are defined for ClassDef1 but "
           "no rules using them are present: %s"))
     
     if unusedSecond:
         logger.warning((
           'V1073',
           (sorted(unusedSecond),),
           "The following classes are defined for ClassDef2 but "
           "no rules using them are present: %s"))
     
     # Now that we have the keys we can reconcile
     
     okToProceed, covSet = coverageutilities.reconcile(
       covTable,
       r,
       [r.classDef1, r.classDef2],
       logger = logger,
       **kwArgs)
     
     r.coverageExtras.update(covSet - set(r.classDef1))
     
     if not okToProceed:
         r.clear()
     
     return r
Ejemplo n.º 15
0
    def fromwalker(cls, w, **kwArgs):
        """
        Creates and returns a new Format4 object from the specified walker.
        
        >>> obj = _makePointExample()
        >>> k = {'coverage': obj.coverage, 'tupleIndex': obj.tupleIndex}
        >>> bs = obj.binaryString()
        >>> obj2 = Format4.frombytes(bs, **k)
        >>> obj == obj2
        True
        
        >>> obj = _makeAnchorExample()
        >>> bs = obj.binaryString()
        >>> obj2 = Format4.frombytes(bs, **k)
        >>> obj == obj2
        True
        
        >>> obj = _makeCoordExample()
        >>> bs = obj.binaryString()
        >>> obj2 = Format4.frombytes(bs, **k)
        >>> obj == obj2
        True
        """

        numClasses, oCT, oSA, oET, kind, oVT = w.unpack("4LBT")
        kind >>= 6

        if kind not in {0, 1, 2}:
            raise ValueError("Unknown action type mask!")

        t = (oCT, oSA, oET, oVT)
        wCT, wSA, wET, wVT = stutils.offsetsToSubWalkers(w.subWalker(0), *t)
        wETCopy = wET.subWalker(0, relative=True)
        v = wETCopy.unpackRest("3H", strict=False)
        wET = wET.subWalker(0, relative=True, newLimit=6 * len(v))
        numStates = max(2, 1 + utilities.safeMax(x[0] for x in v))
        nsObj = namestash.NameStash.readormake(w, t, numStates, numClasses)
        stateNames = nsObj.allStateNames()
        classNames = nsObj.allClassNames()
        fw = classtable.ClassTable.fromwalker
        classTable = fw(wCT, classNames=classNames)
        kwArgs.pop('classTable', None)

        r = cls({},
                classTable=classTable,
                **utilities.filterKWArgs(cls, kwArgs))

        wVTCopy = wVT.subWalker(0, relative=True)
        v = wVTCopy.unpackRest(("2H" if kind < 2 else "4h"), strict=False)

        wVT = wVT.subWalker(0,
                            relative=True,
                            newLimit=(4 if kind < 2 else 8) * len(v))

        v = _actionClasses[kind].groupfromwalker(wVT, **kwArgs)
        actionMap = dict(enumerate(v))
        gfw = entry4.Entry.groupfromwalker
        entries = gfw(wET, actionMap=actionMap, stateNames=stateNames)
        fw = staterow.StateRow.fromwalker

        for stateName in stateNames:
            r[stateName] = fw(wSA, classNames=classNames, entries=entries)

        return r
Ejemplo n.º 16
0
    def buildBinary(self, w, **kwArgs):
        """
        Adds the binary data to the specified LinkedWriter.
        
        >>> obj, ed = _makeTest()
        >>> utilities.hexdump(obj.binaryString())
               0 | 0001 000C 0016 0002  001E 0030 0002 0001 |...........0....|
              10 | 000C 000F 0000 0001  0002 0028 002D 0004 |...........(.-..|
              20 | 0000 001C 0000 0022  0001 0028 0001 002E |......."...(....|
              30 | 0002 0022 0028 002E  0034 0001 00FA 006E |...".(...4.....n|
              40 | 0001 015E 0064 0001  015E FFEC 0001 00FF |...^.d...^......|
              50 | 0005 0001 012C 06A4  0001 0122 FFB5 0001 |.....,....."....|
              60 | 01C2 0640 0001 01C2  FFE2                |...@......      |
        """

        if 'stakeValue' in kwArgs:
            stakeValue = kwArgs.pop('stakeValue')
            w.stakeCurrentWithValue(stakeValue)
        else:
            stakeValue = w.stakeCurrent()

        markBackMap, baseBackMap = {}, {}

        markCovTable = coverage.Coverage.fromglyphset(self.mark,
                                                      backMap=markBackMap)

        markCovStake = w.getNewStake()

        baseCovTable = coverage.Coverage.fromglyphset(self.base,
                                                      backMap=baseBackMap)

        baseCovStake = w.getNewStake()

        markClassCount = 1 + utilities.safeMax(obj.markClass
                                               for obj in self.mark.values())

        w.add("H", 1)  # format 1
        w.addUnresolvedOffset("H", stakeValue, markCovStake)
        w.addUnresolvedOffset("H", stakeValue, baseCovStake)
        w.add("H", markClassCount)
        markStake = w.getNewStake()
        baseStake = w.getNewStake()
        w.addUnresolvedOffset("H", stakeValue, markStake)
        w.addUnresolvedOffset("H", stakeValue, baseStake)

        # Resolve the references
        markCovTable.buildBinary(w, stakeValue=markCovStake, **kwArgs)
        baseCovTable.buildBinary(w, stakeValue=baseCovStake, **kwArgs)
        orderedKeys = []
        anchorPool = {}
        devicePool = {}

        d = {
            'anchorPool': anchorPool,
            'devicePool': devicePool,
            'orderedKeys': orderedKeys
        }

        self.mark.buildBinary(w, stakeValue=markStake, **d)
        self.base.buildBinary(w, stakeValue=baseStake, **d)
        kwArgs.pop('devicePool', None)

        for key in orderedKeys:
            obj, objStake = anchorPool[key]

            obj.buildBinary(w,
                            stakeValue=objStake,
                            devicePool=devicePool,
                            **kwArgs)

        it = sorted((obj.asImmutable(), obj, stake)
                    for obj, stake in devicePool.values())

        for immut, obj, objStake in it:
            obj.buildBinary(w, stakeValue=objStake, **kwArgs)
Ejemplo n.º 17
0
class PSChainGlyph(dict, metaclass=mapmeta.FontDataMetaclass):
    """
    These are objects representing chained contextual (glyph) mappings. They
    are dicts mapping Keys to PSLookupGroups.
    
    >>> _testingValues[0].pprint(namer=namer.testingNamer())
    ((xyz86, xyz87), (xyz26, xyz51), ()), Relative order = 0:
      Effect #1:
        Sequence index: 1
        Lookup:
          Subtable 0 (Pair (class) positioning table):
            (First class 1, Second class 1):
              Second adjustment:
                FUnit adjustment to origin's x-coordinate: -10
            (First class 2, Second class 0):
              First adjustment:
                Device for vertical advance:
                  Tweak at 12 ppem: -2
                  Tweak at 14 ppem: -1
                  Tweak at 18 ppem: 1
            (First class 2, Second class 1):
              First adjustment:
                FUnit adjustment to origin's x-coordinate: 30
                Device for vertical advance:
                  Tweak at 12 ppem: -2
                  Tweak at 14 ppem: -1
                  Tweak at 18 ppem: 1
              Second adjustment:
                Device for origin's x-coordinate:
                  Tweak at 12 ppem: -2
                  Tweak at 14 ppem: -1
                  Tweak at 18 ppem: 1
                Device for origin's y-coordinate:
                  Tweak at 12 ppem: -5
                  Tweak at 13 ppem: -3
                  Tweak at 14 ppem: -1
                  Tweak at 18 ppem: 2
                  Tweak at 20 ppem: 3
            Class definition table for first glyph:
              xyz16: 1
              xyz6: 1
              xyz7: 1
              xyz8: 2
            Class definition table for second glyph:
              xyz21: 1
              xyz22: 1
              xyz23: 1
          Lookup flags:
            Right-to-left for Cursive: True
            Ignore base glyphs: False
            Ignore ligatures: False
            Ignore marks: False
          Sequence order (lower happens first): 2
    ((), (xyz26, xyz41), (xyz81,)), Relative order = 1:
      Effect #1:
        Sequence index: 0
        Lookup:
          Subtable 0 (Pair (glyph) positioning table):
            (xyz11, xyz21):
              First adjustment:
                FUnit adjustment to origin's x-coordinate: 30
                Device for vertical advance:
                  Tweak at 12 ppem: -2
                  Tweak at 14 ppem: -1
                  Tweak at 18 ppem: 1
              Second adjustment:
                Device for origin's x-coordinate:
                  Tweak at 12 ppem: -2
                  Tweak at 14 ppem: -1
                  Tweak at 18 ppem: 1
                Device for origin's y-coordinate:
                  Tweak at 12 ppem: -5
                  Tweak at 13 ppem: -3
                  Tweak at 14 ppem: -1
                  Tweak at 18 ppem: 2
                  Tweak at 20 ppem: 3
            (xyz9, xyz16):
              Second adjustment:
                FUnit adjustment to origin's x-coordinate: -10
            (xyz9, xyz21):
              First adjustment:
                Device for vertical advance:
                  Tweak at 12 ppem: -2
                  Tweak at 14 ppem: -1
                  Tweak at 18 ppem: 1
          Lookup flags:
            Right-to-left for Cursive: False
            Ignore base glyphs: True
            Ignore ligatures: False
            Ignore marks: False
          Sequence order (lower happens first): 1
      Effect #2:
        Sequence index: 1
        Lookup:
          Subtable 0 (Pair (class) positioning table):
            (First class 1, Second class 1):
              Second adjustment:
                FUnit adjustment to origin's x-coordinate: -10
            (First class 2, Second class 0):
              First adjustment:
                Device for vertical advance:
                  Tweak at 12 ppem: -2
                  Tweak at 14 ppem: -1
                  Tweak at 18 ppem: 1
            (First class 2, Second class 1):
              First adjustment:
                FUnit adjustment to origin's x-coordinate: 30
                Device for vertical advance:
                  Tweak at 12 ppem: -2
                  Tweak at 14 ppem: -1
                  Tweak at 18 ppem: 1
              Second adjustment:
                Device for origin's x-coordinate:
                  Tweak at 12 ppem: -2
                  Tweak at 14 ppem: -1
                  Tweak at 18 ppem: 1
                Device for origin's y-coordinate:
                  Tweak at 12 ppem: -5
                  Tweak at 13 ppem: -3
                  Tweak at 14 ppem: -1
                  Tweak at 18 ppem: 2
                  Tweak at 20 ppem: 3
            Class definition table for first glyph:
              xyz16: 1
              xyz6: 1
              xyz7: 1
              xyz8: 2
            Class definition table for second glyph:
              xyz21: 1
              xyz22: 1
              xyz23: 1
          Lookup flags:
            Right-to-left for Cursive: True
            Ignore base glyphs: False
            Ignore ligatures: False
            Ignore marks: False
          Sequence order (lower happens first): 2
    ((), (xyz31, xyz11, xyz31), ()), Relative order = 0:
      Effect #1:
        Sequence index: 2
        Lookup:
          Subtable 0 (Single positioning table):
            xyz11:
              FUnit adjustment to origin's x-coordinate: -10
          Lookup flags:
            Right-to-left for Cursive: False
            Ignore base glyphs: False
            Ignore ligatures: False
            Ignore marks: False
            Mark attachment type: 4
          Sequence order (lower happens first): 0
    """

    #
    # Class definition variables
    #

    mapSpec = dict(
        item_followsprotocol=True,
        item_pprintlabelpresortfunc=_keySort,
        item_renumberdeepkeys=True,
        item_usenamerforstr=True,
        map_compactiblefunc=(lambda d, k, **kw: False),
        #map_compactremovesfalses = True,
        map_maxcontextfunc=(lambda d: utilities.safeMax(len(k) for k in d)),
        map_validatefunc_partial=_validate)

    #
    # Methods
    #

    def __iter__(self):
        """
        We provide a custom iterator to make sure the ruleOrder is correctly
        being followed.
        
        >>> for k in _testingValues[0]: print(k)
        ((85, 86), (25, 50), ()), Relative order = 0
        ((), (25, 40), (80,)), Relative order = 1
        ((), (30, 10, 30), ()), Relative order = 0
        """

        v = list(super(PSChainGlyph, self).__iter__())
        return iter(sorted(v, key=_keySort))

    def buildBinary(self, w, **kwArgs):
        """
        Adds the binary data for the ContextGlyph to the specified
        LinkedWriter.
        
        NOTE! There will be unresolved lookup list indices in the LinkedWriter
        after this method is finished. The caller (or somewhere higher up) is
        responsible for adding an index map to the LinkedWriter with the tag
        "lookupList" before the LinkedWriter's binaryString() method is called.
        
        >>> w = writer.LinkedWriter()
        >>> _testingValues[0].buildBinary(w, forGPOS=True)
        >>> ltv = lookup._testingValues
        >>> w.addIndexMap(
        ...   "lookupList_GPOS",
        ...   { ltv[0].asImmutable(): 10,
        ...     ltv[1].asImmutable(): 11,
        ...     ltv[2].asImmutable(): 25})
        >>> utilities.hexdump(w.binaryString())
               0 | 0001 000A 0002 0012  0018 0001 0002 0019 |................|
              10 | 001E 0002 000A 001C  0001 002A 0002 0056 |...........*...V|
              20 | 0055 0002 0032 0000  0001 0001 0019 0000 |.U...2..........|
              30 | 0002 0028 0001 0050  0002 0000 000B 0001 |...(...P........|
              40 | 0019 0000 0003 000A  001E 0000 0001 0002 |................|
              50 | 000A                                     |..              |
        """

        if 'stakeValue' in kwArgs:
            stakeValue = kwArgs.pop('stakeValue')
            w.stakeCurrentWithValue(stakeValue)
        else:
            stakeValue = w.stakeCurrent()

        w.add("H", 1)  # format
        firstGlyphs = sorted(set(k[1][0] for k in self))
        covTable = coverage.Coverage.fromglyphset(firstGlyphs)
        covStake = w.getNewStake()
        w.addUnresolvedOffset("H", stakeValue, covStake)
        w.add("H", len(firstGlyphs))

        setStakes = dict(
            (firstGlyph, w.getNewStake()) for firstGlyph in firstGlyphs)

        for firstGlyph in firstGlyphs:
            w.addUnresolvedOffset("H", stakeValue, setStakes[firstGlyph])

        covTable.buildBinary(w, stakeValue=covStake)
        orderings = {}
        ruleStakes = {}

        for firstGlyph in firstGlyphs:
            setStake = setStakes[firstGlyph]
            w.stakeCurrentWithValue(setStake)

            o = orderings[firstGlyph] = sorted(
                (k.ruleOrder, k[1], k) for k in self if k[1][0] == firstGlyph)

            w.add("H", len(o))

            for order, ignore, key in o:
                ruleStake = ruleStakes[(firstGlyph, order)] = w.getNewStake()
                w.addUnresolvedOffset("H", setStake, ruleStake)

        for firstGlyph in firstGlyphs:
            for order, ignore, key in orderings[firstGlyph]:
                w.stakeCurrentWithValue(ruleStakes[(firstGlyph, order)])
                obj = self[key]
                w.add("H", len(key[0]))
                w.addGroup("H", reversed(key[0]))
                w.add("H", len(key[1]))
                w.addGroup("H", key[1][1:])
                w.add("H", len(key[2]))
                w.addGroup("H", key[2])
                w.add("H", len(obj))
                obj.buildBinary(w, **kwArgs)

    @classmethod
    def fromValidatedFontWorkerSource(cls, fws, **kwArgs):
        """
        Creates and returns a new PSChainGlyph from the specified
        FontWorkerSource, doing source validation.

        >>> logger = utilities.makeDoctestLogger("FW_test")
        >>> obj = PSChainGlyph.fromValidatedFontWorkerSource(
        ...   _test_FW_fws2,
        ...   namer = _test_FW_namer,
        ...   forGPOS = True,
        ...   lookupDict = _test_FW_lookupDict,
        ...   logger=logger,
        ...   editor={})
        FW_test.pschainglyph - WARNING - line 2 -- unexpected token: foo
        FW_test.pschainglyph - WARNING - line 3 -- glyph 'X' not found
        FW_test.pschainglyph - WARNING - line 3 -- glyph 'Y' not found
        FW_test.pschainglyph - WARNING - line 3 -- glyph 'Z' not found
        FW_test.pschainglyph - WARNING - line 5 -- context '(A,B), (C,D,E), (F,G)' previously defined at line 4
        FW_test.pschainglyph - WARNING - line 0 -- did not find matching 'subtable end/lookup end'
        >>> obj.pprint()
        Key((GlyphTuple((2, 1)), GlyphTuple((3, 4, 5)), GlyphTuple((6, 7))), ruleOrder=0):
          Effect #1:
            Sequence index: 0
            Lookup:
              3:
                FUnit adjustment to horizontal advance: 678
          Effect #2:
            Sequence index: 0
            Lookup:
              3:
                FUnit adjustment to horizontal advance: 901
        """

        logger = kwArgs.pop('logger', logging.getLogger())
        logger = logger.getChild("pschainglyph")

        terminalStrings = ('subtable end', 'lookup end')
        namer = kwArgs['namer']
        startingLineNumber = fws.lineNumber

        ruleOrders = {}
        lookupGroups = {}
        stringKeys = {}

        GT = pschainglyph_glyphtuple.GlyphTuple
        gIFS = namer.glyphIndexFromString
        bNFGI = namer.bestNameForGlyphIndex

        for line in fws:
            if line in terminalStrings:
                return cls(lookupGroups)

            if len(line) > 0:
                tokens = [x.strip() for x in line.split('\t')]

                if tokens[0].lower() == 'glyph':

                    if len(tokens) < 4:
                        logger.warning(('Vxxxx', (fws.lineNumber, ),
                                        "line %d -- unexpected input"))
                        continue

                    glyphTuples = {}

                    glyphNamesOK = True
                    for i in range(1, 4):
                        if tokens[i].strip():
                            glyphNames = tokens[i].split(',')
                            glyphIndices = [
                                gIFS(t.strip()) for t in glyphNames
                            ]
                            for j in range(len(glyphIndices)):
                                if glyphIndices[j] is None:
                                    glyphNamesOK = False
                                    logger.warning(
                                        ('V0956', (fws.lineNumber,
                                                   glyphNames[j]),
                                         "line %d -- glyph '%s' not found"))
                        else:
                            glyphIndices = []

                        glyphTuples[i] = glyphIndices

                    if glyphNamesOK:
                        glyphTuples[1].reverse()
                        glyphTuple1 = GT(glyphTuples[1])
                        glyphTuple2 = GT(glyphTuples[2])
                        glyphTuple3 = GT(glyphTuples[3])

                        lookupList = []

                        for effect in tokens[4:]:
                            effectTokens = [
                                x.strip() for x in effect.split(',')
                            ]

                            sequenceIndex = int(effectTokens[0]) - 1
                            lookupName = effectTokens[1]

                            lookupList.append(
                                pslookuprecord.PSLookupRecord(
                                    sequenceIndex,
                                    lookup.Lookup.
                                    fromValidatedFontWorkerSource(
                                        fws,
                                        lookupName,
                                        logger=logger,
                                        **kwArgs)))

                        stringKey = "(%s), (%s), (%s)" % (",".join(
                            [bNFGI(gi) for gi in glyphTuple1[::-1]]), ",".join(
                                [bNFGI(gi) for gi in glyphTuple2]), ",".join(
                                    [bNFGI(gi) for gi in glyphTuple3]))

                        if stringKey in stringKeys:
                            logger.warning(('Vxxxx', (
                                fws.lineNumber, stringKey,
                                stringKeys[stringKey]
                            ), "line %d -- context '%s' previously defined at line %d"
                                            ))
                        else:
                            stringKeys[stringKey] = fws.lineNumber

                            key = pschainglyph_key.Key(
                                [glyphTuple1, glyphTuple2, glyphTuple3])

                            ruleOrder = ruleOrders.get(glyphTuple2[0], 0)
                            key.ruleOrder = ruleOrder
                            ruleOrders[glyphTuple2[0]] = ruleOrder + 1
                            lookupGroup = pslookupgroup.PSLookupGroup(
                                lookupList)
                            lookupGroups[key] = lookupGroup

                else:
                    logger.warning(('V0960', (fws.lineNumber, tokens[0]),
                                    'line %d -- unexpected token: %s'))

        logger.warning(
            ('V0958', (startingLineNumber, "/".join(terminalStrings)),
             "line %d -- did not find matching '%s'"))

        return cls(lookupGroups)

    @classmethod
    def fromvalidatedwalker(cls, w, **kwArgs):
        """
        Creates and returns a new PSChainGlyph object from the specified
        walker, doing source validation.
        
        >>> w = writer.LinkedWriter()
        >>> _testingValues[0].buildBinary(w, forGPOS=True)
        >>> ltv = lookup._testingValues
        >>> d = {
        ...   ltv[0].asImmutable(): 10,
        ...   ltv[1].asImmutable(): 11,
        ...   ltv[2].asImmutable(): 25}
        >>> w.addIndexMap("lookupList_GPOS", d)
        >>> s = w.binaryString()
        >>> FL = []
        >>> logger = utilities.makeDoctestLogger("pschainglyph_test")
        >>> fvb = PSChainGlyph.fromvalidatedbytes
        >>> obj = fvb(s, fixupList=FL, logger=logger)
        pschainglyph_test.pschainglyph - DEBUG - Walker has 82 remaining bytes.
        pschainglyph_test.pschainglyph - DEBUG - Format is 1
        pschainglyph_test.pschainglyph - DEBUG - Coverage offset is 10, and set count is 2
        pschainglyph_test.pschainglyph.coverage - DEBUG - Walker has 72 remaining bytes.
        pschainglyph_test.pschainglyph.coverage - DEBUG - Format is 1, count is 2
        pschainglyph_test.pschainglyph.coverage - DEBUG - Raw data are [25, 30]
        pschainglyph_test.pschainglyph - DEBUG - Set offsets are (18, 24)
        pschainglyph_test.pschainglyph.first glyph 25 - DEBUG - Rule count is 2
        pschainglyph_test.pschainglyph.first glyph 25 - DEBUG - Rule offsets are (10, 28)
        pschainglyph_test.pschainglyph.first glyph 25.rule order 0 - DEBUG - Backtrack count is 2
        pschainglyph_test.pschainglyph.first glyph 25.rule order 0 - DEBUG - Backtrack glyphs (reversed) are (85, 86)
        pschainglyph_test.pschainglyph.first glyph 25.rule order 0 - DEBUG - Input count is 2
        pschainglyph_test.pschainglyph.first glyph 25.rule order 0 - DEBUG - Input glyphs are (25, 50)
        pschainglyph_test.pschainglyph.first glyph 25.rule order 0 - DEBUG - Lookahead count is 0
        pschainglyph_test.pschainglyph.first glyph 25.rule order 0 - DEBUG - Lookahead glyphs are ()
        pschainglyph_test.pschainglyph.first glyph 25.rule order 0 - DEBUG - Action count is 1
        pschainglyph_test.pschainglyph.first glyph 25.rule order 0.pslookupgroup - DEBUG - Walker has 40 bytes remaining.
        pschainglyph_test.pschainglyph.first glyph 25.rule order 0.pslookupgroup.[0].pslookuprecord - DEBUG - Walker has 40 remaining bytes.
        pschainglyph_test.pschainglyph.first glyph 25.rule order 0.pslookupgroup.[0].pslookuprecord - DEBUG - Sequence index is 1
        pschainglyph_test.pschainglyph.first glyph 25.rule order 0.pslookupgroup.[0].pslookuprecord - DEBUG - Lookup index is 25
        pschainglyph_test.pschainglyph.first glyph 25.rule order 1 - DEBUG - Backtrack count is 0
        pschainglyph_test.pschainglyph.first glyph 25.rule order 1 - DEBUG - Backtrack glyphs (reversed) are ()
        pschainglyph_test.pschainglyph.first glyph 25.rule order 1 - DEBUG - Input count is 2
        pschainglyph_test.pschainglyph.first glyph 25.rule order 1 - DEBUG - Input glyphs are (25, 40)
        pschainglyph_test.pschainglyph.first glyph 25.rule order 1 - DEBUG - Lookahead count is 1
        pschainglyph_test.pschainglyph.first glyph 25.rule order 1 - DEBUG - Lookahead glyphs are (80,)
        pschainglyph_test.pschainglyph.first glyph 25.rule order 1 - DEBUG - Action count is 2
        pschainglyph_test.pschainglyph.first glyph 25.rule order 1.pslookupgroup - DEBUG - Walker has 24 bytes remaining.
        pschainglyph_test.pschainglyph.first glyph 25.rule order 1.pslookupgroup.[0].pslookuprecord - DEBUG - Walker has 24 remaining bytes.
        pschainglyph_test.pschainglyph.first glyph 25.rule order 1.pslookupgroup.[0].pslookuprecord - DEBUG - Sequence index is 0
        pschainglyph_test.pschainglyph.first glyph 25.rule order 1.pslookupgroup.[0].pslookuprecord - DEBUG - Lookup index is 11
        pschainglyph_test.pschainglyph.first glyph 25.rule order 1.pslookupgroup.[1].pslookuprecord - DEBUG - Walker has 20 remaining bytes.
        pschainglyph_test.pschainglyph.first glyph 25.rule order 1.pslookupgroup.[1].pslookuprecord - DEBUG - Sequence index is 1
        pschainglyph_test.pschainglyph.first glyph 25.rule order 1.pslookupgroup.[1].pslookuprecord - DEBUG - Lookup index is 25
        pschainglyph_test.pschainglyph.first glyph 30 - DEBUG - Rule count is 1
        pschainglyph_test.pschainglyph.first glyph 30 - DEBUG - Rule offsets are (42,)
        pschainglyph_test.pschainglyph.first glyph 30.rule order 0 - DEBUG - Backtrack count is 0
        pschainglyph_test.pschainglyph.first glyph 30.rule order 0 - DEBUG - Backtrack glyphs (reversed) are ()
        pschainglyph_test.pschainglyph.first glyph 30.rule order 0 - DEBUG - Input count is 3
        pschainglyph_test.pschainglyph.first glyph 30.rule order 0 - DEBUG - Input glyphs are (30, 10, 30)
        pschainglyph_test.pschainglyph.first glyph 30.rule order 0 - DEBUG - Lookahead count is 0
        pschainglyph_test.pschainglyph.first glyph 30.rule order 0 - DEBUG - Lookahead glyphs are ()
        pschainglyph_test.pschainglyph.first glyph 30.rule order 0 - DEBUG - Action count is 1
        pschainglyph_test.pschainglyph.first glyph 30.rule order 0.pslookupgroup - DEBUG - Walker has 4 bytes remaining.
        pschainglyph_test.pschainglyph.first glyph 30.rule order 0.pslookupgroup.[0].pslookuprecord - DEBUG - Walker has 4 remaining bytes.
        pschainglyph_test.pschainglyph.first glyph 30.rule order 0.pslookupgroup.[0].pslookuprecord - DEBUG - Sequence index is 2
        pschainglyph_test.pschainglyph.first glyph 30.rule order 0.pslookupgroup.[0].pslookuprecord - DEBUG - Lookup index is 10
        
        >>> fvb(s[:20], fixupList=FL, logger=logger)
        pschainglyph_test.pschainglyph - DEBUG - Walker has 20 remaining bytes.
        pschainglyph_test.pschainglyph - DEBUG - Format is 1
        pschainglyph_test.pschainglyph - DEBUG - Coverage offset is 10, and set count is 2
        pschainglyph_test.pschainglyph.coverage - DEBUG - Walker has 10 remaining bytes.
        pschainglyph_test.pschainglyph.coverage - DEBUG - Format is 1, count is 2
        pschainglyph_test.pschainglyph.coverage - DEBUG - Raw data are [25, 30]
        pschainglyph_test.pschainglyph - DEBUG - Set offsets are (18, 24)
        pschainglyph_test.pschainglyph.first glyph 25 - DEBUG - Rule count is 2
        pschainglyph_test.pschainglyph.first glyph 25 - ERROR - The ChainRule offsets are missing or incomplete.
        """

        assert 'fixupList' in kwArgs
        fixupList = kwArgs.pop('fixupList')
        logger = kwArgs.pop('logger', logging.getLogger())
        logger = logger.getChild("pschainglyph")

        logger.debug(
            ('V0001', (w.length(), ), "Walker has %d remaining bytes."))

        if w.length() < 6:
            logger.error(('V0004', (), "Insufficient bytes."))
            return None

        format = w.unpack("H")

        if format != 1:
            logger.error(
                ('V0002', (format, ), "Expected format 1, but got format %d."))

            return None

        logger.debug(('Vxxxx', (), "Format is 1"))
        covOffset, setCount = w.unpack("2H")

        logger.debug(('Vxxxx', (covOffset, setCount),
                      "Coverage offset is %d, and set count is %d"))

        covTable = coverage.Coverage.fromvalidatedwalker(
            w.subWalker(covOffset), logger=logger)

        if covTable is None:
            return None

        firstGlyphs = sorted(covTable)

        if setCount != len(firstGlyphs):
            logger.error((
                'V0364', (setCount, len(firstGlyphs)),
                "The ChainSetRuleCount is %d, but the Coverage length is %d."))

            return None

        if w.length() < 2 * setCount:
            logger.error(
                ('V0365', (),
                 "The ChainRuleSet offsets are missing or incomplete."))

            return None

        setOffsets = w.group("H", setCount)
        logger.debug(('Vxxxx', (setOffsets, ), "Set offsets are %s"))
        r = cls()
        fvw = pslookupgroup.PSLookupGroup.fromvalidatedwalker
        GlyphTuple = pschainglyph_glyphtuple.GlyphTuple
        Key = pschainglyph_key.Key

        for firstGlyph, setOffset in zip(firstGlyphs, setOffsets):
            wSet = w.subWalker(setOffset)
            subLogger = logger.getChild("first glyph %d" % (firstGlyph, ))

            if wSet.length() < 2:
                subLogger.error(
                    ('V0366', (),
                     "The ChainRuleCount is missing or incomplete."))

                return None

            ruleCount = wSet.unpack("H")
            subLogger.debug(('Vxxxx', (ruleCount, ), "Rule count is %d"))

            if wSet.length() < 2 * ruleCount:
                subLogger.error(
                    ('V0367', (),
                     "The ChainRule offsets are missing or incomplete."))

                return None

            ruleOffsets = wSet.group("H", ruleCount)
            subLogger.debug(('Vxxxx', (ruleOffsets, ), "Rule offsets are %s"))

            for ruleOrder, ruleOffset in enumerate(ruleOffsets):
                wRule = wSet.subWalker(ruleOffset)
                subLogger2 = subLogger.getChild("rule order %d" %
                                                (ruleOrder, ))

                if wRule.length() < 2:
                    subLogger2.error(
                        ('V0368', (),
                         "The BacktrackGlyphCount is missing or incomplete."))

                    return None

                backCount = wRule.unpack("H")

                subLogger2.debug(
                    ('Vxxxx', (backCount, ), "Backtrack count is %d"))

                if wRule.length() < 2 * backCount:
                    subLogger2.error(
                        ('V0369', (),
                         "The Backtrack glyphs are missing or incomplete."))

                    return None

                backTuple = GlyphTuple(reversed(wRule.group("H", backCount)))

                subLogger2.debug(('Vxxxx', (backTuple, ),
                                  "Backtrack glyphs (reversed) are %s"))

                if wRule.length() < 2:
                    subLogger2.error(
                        ('V0370', (),
                         "The InputGlyphCount is missing or incomplete."))

                    return None

                inCount = wRule.unpack("H") - 1  # firstGlyph is already there
                subLogger2.debug(
                    ('Vxxxx', (inCount + 1, ), "Input count is %d"))

                if wRule.length() < 2 * inCount:
                    subLogger2.error(
                        ('V0371', (),
                         "The Input glyphs are missing or incomplete."))

                    return None

                inTuple = GlyphTuple((firstGlyph, ) +
                                     wRule.group("H", inCount))
                subLogger2.debug(('Vxxxx', (inTuple, ), "Input glyphs are %s"))

                if wRule.length() < 2:
                    subLogger2.error(
                        ('V0372', (),
                         "The LookaheadGlyphCount is missing or incomplete."))

                    return None

                lookCount = wRule.unpack("H")

                subLogger2.debug(
                    ('Vxxxx', (lookCount, ), "Lookahead count is %d"))

                if wRule.length() < 2 * lookCount:
                    subLogger2.error(
                        ('V0373', (),
                         "The Lookahead glyphs are missing or incomplete."))

                    return None

                lookTuple = GlyphTuple(wRule.group("H", lookCount))

                subLogger2.debug(
                    ('Vxxxx', (lookTuple, ), "Lookahead glyphs are %s"))

                key = Key((backTuple, inTuple, lookTuple), ruleOrder=ruleOrder)

                if wRule.length() < 2:
                    subLogger2.error(
                        ('V0374', (),
                         "The LookupCount is missing or incomplete."))

                    return None

                posCount = wRule.unpack("H")
                subLogger2.debug(('Vxxxx', (posCount, ), "Action count is %d"))

                obj = fvw(wRule,
                          count=posCount,
                          fixupList=fixupList,
                          logger=subLogger2,
                          **kwArgs)

                if obj is None:
                    return None

                r[key] = obj

        return r

    @classmethod
    def fromwalker(cls, w, **kwArgs):
        """
        Creates and returns a new PSChainGlyph from the specified walker.
        
        There is one required keyword argument:
        
            fixupList   A list, to which (lookupListIndex, fixupFunc) pairs
                        will be appended. The actual lookup won't be set in the
                        PSLookupRecord until this call is made, usually by the
                        top-level GPOS construction logic. The fixup call takes
                        one argument, the Lookup being set into it.
        
        >>> w = writer.LinkedWriter()
        >>> _testingValues[0].buildBinary(w, forGPOS=True)
        >>> ltv = lookup._testingValues
        >>> d = {
        ...   ltv[0].asImmutable(): 10,
        ...   ltv[1].asImmutable(): 11,
        ...   ltv[2].asImmutable(): 25}
        >>> w.addIndexMap("lookupList_GPOS", d)
        >>> s = w.binaryString()
        >>> FL = []
        >>> obj = PSChainGlyph.frombytes(s, fixupList=FL)
        >>> d = {10: ltv[0], 11: ltv[1], 25: ltv[2]}
        >>> for index, func in FL:
        ...     func(d[index])
        >>> obj == _testingValues[0]
        True
        """

        assert 'fixupList' in kwArgs
        format = w.unpack("H")
        assert format == 1
        covTable = coverage.Coverage.fromwalker(w.subWalker(w.unpack("H")))
        firstGlyphs = sorted(covTable)
        setOffsets = w.group("H", w.unpack("H"))
        r = cls()
        f = pslookupgroup.PSLookupGroup.fromwalker
        GlyphTuple = pschainglyph_glyphtuple.GlyphTuple
        Key = pschainglyph_key.Key

        for setIndex, firstGlyph in enumerate(firstGlyphs):
            wSet = w.subWalker(setOffsets[setIndex])
            it = enumerate(wSet.group("H", wSet.unpack("H")))

            for ruleOrder, ruleOffset in it:
                wRule = wSet.subWalker(ruleOffset)

                backTuple = GlyphTuple(
                    reversed(wRule.group("H", wRule.unpack("H"))))

                inTuple = GlyphTuple((firstGlyph, ) +
                                     wRule.group("H",
                                                 wRule.unpack("H") - 1))

                lookTuple = GlyphTuple(wRule.group("H", wRule.unpack("H")))
                key = Key((backTuple, inTuple, lookTuple), ruleOrder=ruleOrder)
                posCount = wRule.unpack("H")
                r[key] = f(wRule, count=posCount, **kwArgs)

        return r

    def writeFontWorkerSource(self, s, **kwArgs):
        """
        Writes contents of lookup to provided stream 's'. Uses
        namer.bestNameForGlyphIndex if a namer is provided, otherwise
        uses Font Worker glyph index labeling ("# <id>").
        """
        namer = kwArgs.get('namer')
        bnfgi = namer.bestNameForGlyphIndex

        for k in iter(self):
            v = self[k]
            btSeq, inSeq, laSeq = k[0], k[1], k[2]
            actionStr = "\t".join([
                "%d, %d" % (vi.sequenceIndex + 1, vi.lookup.sequence)
                for vi in v
            ])
            btStr = ", ".join(
                [bnfgi(btSeq[sv - 1])
                 for sv in range(len(btSeq), 0, -1)]) if btSeq else ""
            inStr = ", ".join([bnfgi(sv) for sv in inSeq])
            laStr = ", ".join([bnfgi(sv) for sv in laSeq]) if laSeq else ""

            s.write("glyph\t%s\t%s\t%s\t%s\n" %
                    (btStr, inStr, laStr, actionStr))
Ejemplo n.º 18
0
class PSContextCoverage(dict, metaclass=mapmeta.FontDataMetaclass):
    """
    Objects containing format 3 contextual lookups. Note that these work for
    both GPOS and GSUB tables.
    
    These are dicts mapping a single Key to a PSLookupGroup. (Note that in the
    future, if OpenType permits a format for multiple entries instead of a
    single entry, the existing dict will suffice.)
    
    >>> _testingValues[0].pprint(namer=namer.testingNamer())
    ({xyz21, xyz22}, {xyz31, xyz32}, {afii60001, afii60002, xyz95}):
      Effect #1:
        Sequence index: 0
        Lookup:
          Subtable 0 (Pair (glyph) positioning table):
            (xyz11, xyz21):
              First adjustment:
                FUnit adjustment to origin's x-coordinate: 30
                Device for vertical advance:
                  Tweak at 12 ppem: -2
                  Tweak at 14 ppem: -1
                  Tweak at 18 ppem: 1
              Second adjustment:
                Device for origin's x-coordinate:
                  Tweak at 12 ppem: -2
                  Tweak at 14 ppem: -1
                  Tweak at 18 ppem: 1
                Device for origin's y-coordinate:
                  Tweak at 12 ppem: -5
                  Tweak at 13 ppem: -3
                  Tweak at 14 ppem: -1
                  Tweak at 18 ppem: 2
                  Tweak at 20 ppem: 3
            (xyz9, xyz16):
              Second adjustment:
                FUnit adjustment to origin's x-coordinate: -10
            (xyz9, xyz21):
              First adjustment:
                Device for vertical advance:
                  Tweak at 12 ppem: -2
                  Tweak at 14 ppem: -1
                  Tweak at 18 ppem: 1
          Lookup flags:
            Right-to-left for Cursive: False
            Ignore base glyphs: True
            Ignore ligatures: False
            Ignore marks: False
          Sequence order (lower happens first): 1
      Effect #2:
        Sequence index: 1
        Lookup:
          Subtable 0 (Pair (class) positioning table):
            (First class 1, Second class 1):
              Second adjustment:
                FUnit adjustment to origin's x-coordinate: -10
            (First class 2, Second class 0):
              First adjustment:
                Device for vertical advance:
                  Tweak at 12 ppem: -2
                  Tweak at 14 ppem: -1
                  Tweak at 18 ppem: 1
            (First class 2, Second class 1):
              First adjustment:
                FUnit adjustment to origin's x-coordinate: 30
                Device for vertical advance:
                  Tweak at 12 ppem: -2
                  Tweak at 14 ppem: -1
                  Tweak at 18 ppem: 1
              Second adjustment:
                Device for origin's x-coordinate:
                  Tweak at 12 ppem: -2
                  Tweak at 14 ppem: -1
                  Tweak at 18 ppem: 1
                Device for origin's y-coordinate:
                  Tweak at 12 ppem: -5
                  Tweak at 13 ppem: -3
                  Tweak at 14 ppem: -1
                  Tweak at 18 ppem: 2
                  Tweak at 20 ppem: 3
            Class definition table for first glyph:
              xyz16: 1
              xyz6: 1
              xyz7: 1
              xyz8: 2
            Class definition table for second glyph:
              xyz21: 1
              xyz22: 1
              xyz23: 1
          Lookup flags:
            Right-to-left for Cursive: True
            Ignore base glyphs: False
            Ignore ligatures: False
            Ignore marks: False
          Sequence order (lower happens first): 2
    """
    
    #
    # Class definition variables
    #
    
    mapSpec = dict(
        item_followsprotocol = True,
        item_renumberdeepkeys = True,
        item_usenamerforstr = True,
        map_compactiblefunc = (lambda d, k, **kw: False),
        #map_compactremovesfalses = True,
        map_maxcontextfunc = (lambda d: utilities.safeMax(len(k) for k in d)))
    
    #
    # Methods
    #
    
    def buildBinary(self, w, **kwArgs):
        """
        Adds the binary data for the PSContextCoverage to the specified
        LinkedWriter.
        
        NOTE! There will be unresolved lookup list indices in the LinkedWriter
        after this method is finished. The caller (or somewhere higher up) is
        responsible for adding an index map to the LinkedWriter with the tag
        "lookupList" before the LinkedWriter's binaryString() method is called.
        
        >>> w = writer.LinkedWriter()
        >>> _testingValues[0].buildBinary(w, forGPOS=True)
        >>> ltv = lookup._testingValues
        >>> w.addIndexMap(
        ...   "lookupList_GPOS",
        ...   { ltv[1].asImmutable(): 11,
        ...     ltv[2].asImmutable(): 25})
        >>> utilities.hexdump(w.binaryString())
               0 | 0003 0003 0002 0014  001C 0024 0000 000B |...........$....|
              10 | 0001 0019 0001 0002  0014 0015 0001 0002 |................|
              20 | 001E 001F 0001 0003  005E 0060 0061      |.........^.`.a  |
        
        >>> PSContextCoverage().binaryString()
        Traceback (most recent call last):
          ...
        ValueError: Cannot write empty PSContextCoverages!
        """
        
        if 'stakeValue' in kwArgs:
            stakeValue = kwArgs.pop('stakeValue')
            w.stakeCurrentWithValue(stakeValue)
        else:
            stakeValue = w.stakeCurrent()
        
        if not len(self):
            raise ValueError("Cannot write empty PSContextCoverages!")
        
        w.add("H", 3)
        ctStakes = []
        
        # Note the strong assumption that Python correctly walks the iterators
        # over keys and values in the same way in the following two loops.
        
        for i, key in enumerate(self):
            if i == 0:
                w.add("2H", len(key), len(self[key]))
            
            for ctIndex, covTable in enumerate(key):
                ctStakes.append(w.getNewStake())
                w.addUnresolvedOffset("H", stakeValue, ctStakes[-1])
        
        for value in self.values():
            value.buildBinary(w, **kwArgs)
        
        for ctIndex, covTable in enumerate(key):
            w.stakeCurrentWithValue(ctStakes[ctIndex])
            covTable.buildBinary(w)
    
    @classmethod
    def fromValidatedFontWorkerSource(cls, fws, **kwArgs):
        """
        Creates and returns a new PSContextCoverage from the specified
        FontWorkerSource, with source validation.

        >>> logger = utilities.makeDoctestLogger("FW_test")
        >>> obj = PSContextCoverage.fromValidatedFontWorkerSource(
        ...   _test_FW_fws2,
        ...   namer = _test_FW_namer,
        ...   forGPOS = True,
        ...   lookupDict = _test_FW_lookupDict,
        ...   logger = logger,
        ...   editor={})
        FW_test.pscontextcoverage - WARNING - line 14 -- unexpected token: foo
        FW_test.pscontextcoverage - WARNING - line 0 -- did not find matching 'subtable end/lookup end'
        >>> obj.pprint()
        Key((CoverageSet(frozenset({2})), CoverageSet(frozenset({5})), CoverageSet(frozenset({11})))):
          Effect #1:
            Sequence index: 0
            Lookup:
              3:
                FUnit adjustment to horizontal advance: 678
          Effect #2:
            Sequence index: 0
            Lookup:
              3:
                FUnit adjustment to horizontal advance: 901
        """
        
        logger = kwArgs.pop('logger', logging.getLogger())
        logger = logger.getChild("pscontextcoverage")
        terminalStrings = ('subtable end', 'lookup end')
        startingLineNumber=fws.lineNumber

        Key = pscontextcoverage_key.Key
        CoverageSet = coverageset.CoverageSet

        coverageList = []
        lookupGroups = {}
        fVFWS = CoverageSet.fromValidatedFontWorkerSource
        
        for line in fws:
            if line.lower() in terminalStrings:
                return cls(lookupGroups)
            
            if len(line) > 0:
                tokens = [x.strip() for x in line.split('\t')]
                
                if tokens[0].lower() == 'coverage definition begin':
                    coverageSet = fVFWS(fws, logger=logger, **kwArgs)
                    coverageList.append(coverageSet)
                
                elif tokens[0].lower() == 'coverage':
                    lookupList = []
                    
                    for effect in tokens[1:]:
                        effectTokens = [x.strip() for x in effect.split(',')]
                        sequenceIndex = int(effectTokens[0]) - 1
                        lookupName = effectTokens[1]
                        
                        lookupList.append(
                          pslookuprecord.PSLookupRecord(
                            sequenceIndex,
                            lookup.Lookup.fromValidatedFontWorkerSource(
                              fws,
                              lookupName,
                              logger=logger,
                              **kwArgs)))
                    
                    key = Key(coverageList)
                    lookupGroup = pslookupgroup.PSLookupGroup(lookupList)
                    lookupGroups[key] = lookupGroup
                
                else:
                    logger.warning((
                      'V0960',
                      (fws.lineNumber, tokens[0]),
                      'line %d -- unexpected token: %s'))
                
        logger.warning((
          'V0958',
           (startingLineNumber, "/".join(terminalStrings)),
           "line %d -- did not find matching '%s'"))
    
        return cls(lookupGroups)
    
    @classmethod
    def fromvalidatedwalker(cls, w, **kwArgs):
        """
        Creates and returns a new PSContextCoverage object from the specified
        walker, doing source validation. The following keyword arguments are
        supported:
        
            fixupList   A list, to which (lookupListIndex, fixupFunc) pairs
                        will be appended. The actual lookup won't be set in the
                        PSLookupRecord until this fixupFunc is called by
                        lookuplist.fromvalidatedwalker(). The fixup call takes
                        one argument: the Lookup being set into it.
            
            logger      A logger to which messages will be posted.
        
        >>> w = writer.LinkedWriter()
        >>> _testingValues[0].buildBinary(w, forGPOS=True)
        >>> ltv = lookup._testingValues
        >>> d = {ltv[1].asImmutable(): 11, ltv[2].asImmutable(): 25}
        >>> w.addIndexMap("lookupList_GPOS", d)
        >>> s = w.binaryString()
        >>> FL = []
        >>> logger = utilities.makeDoctestLogger("pscontextcoverage_test")
        >>> fvb = PSContextCoverage.fromvalidatedbytes
        >>> obj = fvb(s, fixupList=FL, logger=logger)
        pscontextcoverage_test.pscontextcoverage - DEBUG - Walker has 46 bytes remaining.
        pscontextcoverage_test.pscontextcoverage - DEBUG - Format is 3
        pscontextcoverage_test.pscontextcoverage - DEBUG - Coverage count is 3
        pscontextcoverage_test.pscontextcoverage - DEBUG - Action count is 2
        pscontextcoverage_test.pscontextcoverage - DEBUG - Coverage offsets are (20, 28, 36)
        pscontextcoverage_test.pscontextcoverage.coverage 0.coverageset - DEBUG - Walker has 26 remaining bytes.
        pscontextcoverage_test.pscontextcoverage.coverage 0.coverageset - DEBUG - Format is 1, count is 2
        pscontextcoverage_test.pscontextcoverage.coverage 0.coverageset - DEBUG - Raw data are [20, 21]
        pscontextcoverage_test.pscontextcoverage.coverage 1.coverageset - DEBUG - Walker has 18 remaining bytes.
        pscontextcoverage_test.pscontextcoverage.coverage 1.coverageset - DEBUG - Format is 1, count is 2
        pscontextcoverage_test.pscontextcoverage.coverage 1.coverageset - DEBUG - Raw data are [30, 31]
        pscontextcoverage_test.pscontextcoverage.coverage 2.coverageset - DEBUG - Walker has 10 remaining bytes.
        pscontextcoverage_test.pscontextcoverage.coverage 2.coverageset - DEBUG - Format is 1, count is 3
        pscontextcoverage_test.pscontextcoverage.coverage 2.coverageset - DEBUG - Raw data are [94, 96, 97]
        pscontextcoverage_test.pscontextcoverage.pslookupgroup - DEBUG - Walker has 34 bytes remaining.
        pscontextcoverage_test.pscontextcoverage.pslookupgroup.[0].pslookuprecord - DEBUG - Walker has 34 remaining bytes.
        pscontextcoverage_test.pscontextcoverage.pslookupgroup.[0].pslookuprecord - DEBUG - Sequence index is 0
        pscontextcoverage_test.pscontextcoverage.pslookupgroup.[0].pslookuprecord - DEBUG - Lookup index is 11
        pscontextcoverage_test.pscontextcoverage.pslookupgroup.[1].pslookuprecord - DEBUG - Walker has 30 remaining bytes.
        pscontextcoverage_test.pscontextcoverage.pslookupgroup.[1].pslookuprecord - DEBUG - Sequence index is 1
        pscontextcoverage_test.pscontextcoverage.pslookupgroup.[1].pslookuprecord - DEBUG - Lookup index is 25
        
        >>> fvb(s[:5], fixupList=FL, logger=logger)
        pscontextcoverage_test.pscontextcoverage - DEBUG - Walker has 5 bytes remaining.
        pscontextcoverage_test.pscontextcoverage - ERROR - Insufficient bytes.
        """
        
        assert 'fixupList' in kwArgs
        fixupList = kwArgs.pop('fixupList')
        
        logger = kwArgs.pop('logger', logging.getLogger())
        logger = logger.getChild("pscontextcoverage")
        
        logger.debug((
          'V0001',
          (w.length(),),
          "Walker has %d bytes remaining."))
        
        if w.length() < 6:
            logger.error(('V0004', (), "Insufficient bytes."))
            return None
        
        format = w.unpack("H")
        
        if format != 3:
            logger.error((
              'V0002',
              (format,),
              "Expected format 3, but got format %d instead."))
            
            return None
        
        logger.debug(('Vxxxx', (), "Format is 3"))
        covCount, posCount = w.unpack("2H")
        logger.debug(('Vxxxx', (covCount,), "Coverage count is %d"))
        logger.debug(('Vxxxx', (posCount,), "Action count is %d"))
        
        if w.length() < 2 * covCount:
            logger.error((
              'V0362',
              (),
              "The offsets to the Coverages are missing or incomplete."))
            
            return None
        
        covOffsets = w.group("H", covCount)
        logger.debug(('Vxxxx', (covOffsets,), "Coverage offsets are %s"))
        fvw = coverageset.CoverageSet.fromvalidatedwalker
        v = [None] * covCount
        
        for i, offset in enumerate(covOffsets):
            cov = fvw(
              w.subWalker(offset),
              logger = logger.getChild("coverage %d" % (i,)))
            
            if cov is None:
                return None
            
            v[i] = cov
        
        key = pscontextcoverage_key.Key(v)
        
        group = pslookupgroup.PSLookupGroup.fromvalidatedwalker(
          w,
          count = posCount,
          fixupList = fixupList,
          logger = logger)
        
        if group is None:
            return None
        
        return cls({key: group})
    
    @classmethod
    def fromwalker(cls, w, **kwArgs):
        """
        Creates and returns a new PSContextCoverage from the specified walker.
        
        There is one required keyword argument:
        
            fixupList   A list, to which (lookupListIndex, fixupFunc) pairs
                        will be appended. The actual lookup won't be set in the
                        PSLookupRecord until this fixupFunc is called by
                        lookuplist.fromwalker(). The fixup call takes one
                        argument: the Lookup being set into it.
        
        >>> w = writer.LinkedWriter()
        >>> _testingValues[0].buildBinary(w, forGPOS=True)
        >>> ltv = lookup._testingValues
        >>> d = {ltv[1].asImmutable(): 11, ltv[2].asImmutable(): 25}
        >>> w.addIndexMap("lookupList_GPOS", d)
        >>> s = w.binaryString()
        >>> FL = []
        >>> obj = PSContextCoverage.frombytes(s, fixupList=FL)
        >>> d = {11: ltv[1], 25: ltv[2]}
        >>> for index, func in FL:
        ...     func(d[index])
        >>> obj == _testingValues[0]
        True
        """
        
        format = w.unpack("H")
        assert format == 3
        covCount, posCount = w.unpack("2H")
        covOffsets = w.group("H", covCount)
        f = coverageset.CoverageSet.fromwalker
        
        key = pscontextcoverage_key.Key(
          f(w.subWalker(offset))
          for offset in covOffsets)
        
        fixupList = kwArgs['fixupList']
        
        group = pslookupgroup.PSLookupGroup.fromwalker(
          w,
          count = posCount,
          fixupList = fixupList)
        
        return cls({key: group})


    def writeFontWorkerSource(self, s, **kwArgs):
        """
        Writes contents of lookup to provided stream 's'. Uses
        namer.bestNameForGlyphIndex if a namer is provided, otherwise uses Font
        Worker glyph index labeling ("# <id>").
        """

        namer = kwArgs.get('namer')
        bnfgi = namer.bestNameForGlyphIndex

        for k in iter(self):
            lookupList = self[k]

            for i, cvg in enumerate(k):
                s.write("coverage definition begin\t%d\n" % (i,))
                for g in sorted(cvg):
                    s.write("%s\n" % (bnfgi(g),))
                s.write("coverage definition end\n\n")

            s.write("coverage")
            for lkp in lookupList:
                s.write("\t%d,%d" % (lkp.sequenceIndex + 1, lkp.lookup.sequence))