def setDeferredValue(self, stakeName, format, value): """ Given a deferred value added earlier, replace it with a new value. Note the new value must take the same number of bytes as the original value; if you need to add a potentially variably-sized piece of data, use the addReplaceableString()/setReplaceableString() methods instead. >>> w = LinkedFileWriter() >>> w.addString(b"ab") >>> stake = w.addDeferredValue("H") >>> w.addString(b"yz") >>> utilities.hexdump(w.binaryString()) 0 | 6162 0000 797A |ab..yz | >>> w.setDeferredValue(stake, "h", -1) >>> utilities.hexdump(w.binaryString()) 0 | 6162 FFFF 797A |ab..yz | """ assert stakeName in self.stakes, "Undefined stake!" bs = utilitiesbackend.utPack(format, value) pieceIndex = self.stakes[stakeName] piece = self.pieces[pieceIndex] assert piece[2] is None, "Cannot set deferred value with bit width!" test = (len(bs) == piece[1]) assert test, "Cannot change length via setDeferredValue()!" self.pieces[pieceIndex] = (self.backingFile.tell(), len(bs), None) self.backingFile.write(bs)
def add(self, format, *args): """ Adds the specified arguments according to the specified format. >>> w = LinkedFileWriter() >>> w.add("h", -15) >>> w.add("BB", 2, 3) >>> utilities.hexdump(w.binaryString()) 0 | FFF1 0203 |.... | """ self.addString(utilitiesbackend.utPack(format, *args))
def _packCFFNumber(n): """ Converts a number to a bytestring, formatted for writing in a CFF DICT. This function is passed in for LinkedWriter.addUnresolvedOffset for offset operators such as 'Encoding', 'charset', 'charStrings', and 'Private'. The function will expand to the CFF Dict-specific byte encoding and length. Note that in CFF DICTs, following the PostScript model, operands (values) precede operators (keys). """ if -107 <= n <= 107: return utilitiesbackend.utPack('B', n + 139) elif 108 <= n <= 1131: return utilitiesbackend.utPack('H', n + 0xF694) elif -1131 <= n <= -108: return utilitiesbackend.utPack('H', 0xFA94 - n) elif -32768 <= n <= 32767: return utilitiesbackend.utPack('Bh', 28, n) else: return utilitiesbackend.utPack('Bl', 29, n)
def bestNameForGlyphIndex(self, glyphIndex, **kwArgs): """ Finds the best name for a glyph. :param int glyphIndex: The glyph needing a name :param kwArgs: Optional keyword arguments (none currently used) :return: The best name for the specified glyph :rtype: str The name will be obtained from the first source found in the following prioritized list: - ``'Zapf'`` name - ``'post'`` name - Unicode name (if enabled and Unicode is present) - U+nnnn or U+nnnnnn name (if Unicode is present) - The glyph index as a string >>> d = {'remapAFII': False, 'useUnicodeName': False, 'annotate': False} >>> f = testingNamer(**d).bestNameForGlyphIndex >>> f(10), f(77), f(95), f(99), f(300) ('xyz11', 'xyz78', 'afii60000', 'U+0163', '300') >>> d['useUnicodeName'] = True >>> f = testingNamer(**d).bestNameForGlyphIndex >>> f(10), f(77), f(95), f(99) ('xyz11', 'xyz78', 'afii60000', 'LATIN SMALL LETTER T WITH CEDILLA') >>> d['remapAFII'] = True >>> d['useUnicodeName'] = False >>> f = testingNamer(**d).bestNameForGlyphIndex >>> f(10), f(77), f(95), f(99) ('xyz11', 'xyz78', 'U+015F', 'U+0163') >>> d['useUnicodeName'] = True >>> f = testingNamer(**d).bestNameForGlyphIndex >>> f(10), f(77), f(95), f(99) ('xyz11', 'xyz78', 'LATIN SMALL LETTER S WITH CEDILLA', 'LATIN SMALL LETTER T WITH CEDILLA') >>> d = {'remapAFII': False, 'useUnicodeName': False, 'annotate': True} >>> f = testingNamer(**d).bestNameForGlyphIndex >>> for s in (f(10), f(77), f(95), f(99), f(300)): print(s) xyz11 (glyph 10) xyz78 (glyph 77) afii60000 (glyph 95) U+0163 (glyph 99) 300 >>> d['useUnicodeName'] = True >>> f = testingNamer(**d).bestNameForGlyphIndex >>> for s in (f(10), f(77), f(95), f(99)): print(s) xyz11 (glyph 10) xyz78 (glyph 77) afii60000 (glyph 95) LATIN SMALL LETTER T WITH CEDILLA (glyph 99) >>> d['remapAFII'] = True >>> d['useUnicodeName'] = False >>> f = testingNamer(**d).bestNameForGlyphIndex >>> for s in (f(10), f(77), f(95), f(99)): print(s) xyz11 (glyph 10) xyz78 (glyph 77) U+015F (glyph 95) U+0163 (glyph 99) >>> d['useUnicodeName'] = True >>> f = testingNamer(**d).bestNameForGlyphIndex >>> for s in (f(10), f(77), f(95), f(99)): print(s) xyz11 (glyph 10) xyz78 (glyph 77) LATIN SMALL LETTER S WITH CEDILLA (glyph 95) LATIN SMALL LETTER T WITH CEDILLA (glyph 99) >>> d = {'useFontWorkerName': True} >>> f = testingNamer(**d).bestNameForGlyphIndex >>> for s in (f(10), f(95), f(99), f(500)): print(s) xyz11 afii60000 u 0163 # 500 >>> f = CFFtestingNamer().bestNameForGlyphIndex >>> for s in (f(5), f(10)): print(s) cff5 cff10 >>> f = CFFtestingNamer(annotate=True, useUnicodeName=True).bestNameForGlyphIndex >>> for s in (f(49), f(50)): print(s) cff49 (glyph 49) LATIN CAPITAL LETTER R (glyph 50) >>> f = CFFtestingNamer().bestNameForGlyphIndex >>> f(None) >>> d['useUnicodeName'] = False >>> f = testingNamer(**d).bestNameForGlyphIndex >>> for s in (f(65535), f(65536), f(65537), f(65534), f(65533)): print(s) # 65535 # 65536 # 65537 # 65534 # 65533 >>> d = {'remapAFII': False, 'useUnicodeName': False, 'annotate': False} """ #>>> a = fontedit.Editor() #>>> currentDir = os.getcwd() #>>> pathFont4 = os.path.join( currentDir,'qe','testfontdata','Zapfino.ttf') #>>> b4 = a.frompath(pathFont4) #>>> nZapfinottf = Namer(b4, **d) #>>> nZapfinottf.bestNameForGlyphIndex(79) #'S.4' #>>> d['useUnicodeName']= True #>>> nZapfinottf = Namer(b4, **d) #>>> nZapfinottf.bestNameForGlyphIndex(34) #'H.3' #>>> d['useFontWorkerName']= True #>>> nZapfinottf = Namer(b4, **d) #>>> nZapfinottf.bestNameForGlyphIndex(65339) #'# 65339' if glyphIndex is None: return None if self.useFontWorkerName: self.annotate = False self.remapAFII = False self.useUnicodeName = False self._makeMaps() uNameFromZapf = None annotation = (" (glyph %d)" % (glyphIndex, ) if self.annotate else "") if glyphIndex in self._zapfTable: kn = self._zapfTable[glyphIndex].kindNames if kn: s = kn.bestName() if s is not None: return "%s%s" % (s, annotation) if glyphIndex in self._cffCharset: b = self._cffCharset[glyphIndex] s = str(utilities.ensureUnicode(b)) if not (self.remapAFII and s.startswith('afii')): return "%s%s" % (s, annotation) if glyphIndex in self._glyphToPost: s = self._glyphToPost[glyphIndex] if not (self.remapAFII and s.startswith('afii')): return "%s%s" % (s, annotation) if glyphIndex not in self._glyphToUnicode: if self.useFontWorkerName: return "# %d" % (glyphIndex, ) return str(glyphIndex) u = self._glyphToUnicode[glyphIndex] if self.useUnicodeName: try: s = unicodedata.name(chr(u), None) except ValueError: # unichr(0x10001) fails in narrow builds... bs = utilitiesbackend.utPack('L', u) s = unicodedata.name(str(bs, 'utf-32be'), None) if s is not None: return "%s%s" % (s, annotation) if self.useFontWorkerName: if u <= 0xFFFF: return "u %04X" % (u, ) return "u %X" % (u, ) if u <= 0xFFFF: return "U+%04X%s" % (u, annotation) return "U+%06X%s" % (u, annotation)
def _resolveVariableFormatOffsets(self, **kwArgs): """ """ # First, fill out v assuming all variable-length links are zero-length, # which is how they are to start with. This will give us our first pass # at the pieceStarts array. v = list(self.pieces) + [(0, 0, None)] g = (t[1] * 8 if t[2] is None else t[2] for t in v) pieceStarts = utilities.cumulCount(g) unresolvedOK = kwArgs.get('unresolvedOK', False) sawCallable = False for i, t in enumerate(self.links): if unresolvedOK: if t.tagFrom not in self.stakes or t.tagTo not in self.stakes: continue else: assert t.tagFrom in self.stakes, "Undefined tagFrom!" assert t.tagTo in self.stakes, "Undefined tagTo!" toStart = pieceStarts[self.stakes[t.tagTo]] fromStart = pieceStarts[self.stakes[t.tagFrom]] actualBitDelta = (toStart - fromStart) + t.offsetBitDelta if t.offsetDivisor != 1: test = (actualBitDelta & t.offsetDivisor) == 0 assert test, "Word alignment bad boundary!" actualBitDelta //= t.offsetDivisor test = (actualBitDelta >= 0) or self.negOffsetsOK or t.negOK assert test, "Impermissible negative offset!" assert (actualBitDelta & 7) == 0, "Not at byte boundary!" if callable(t.format): v[t.pieceIndex] = self.pieces[t.pieceIndex] sawCallable = True elif t.bitLength is None: try: bs = utilitiesbackend.utPack(t.format, actualBitDelta // 8) except ValueError: if self.linkHistory is not None: print(self.linkHistory[i], file=sys.stderr) raise v[t.pieceIndex] = (self.backingFile.tell(), len(bs), None) self.backingFile.write(bs) else: bs = self._bitsFromNumber(actualBitDelta // 8, t.bitLength) v[t.pieceIndex] = (self.backingFile.tell(), len(bs), t.bitLength) self.backingFile.write(bs) if not sawCallable: return v # Second, given the current (wrong) pieceStarts, replace all the v[] # variable-length entries with the results from calling their format() # routines. for t in self.links: if not callable(t.format): continue toStart = pieceStarts[self.stakes[t.tagTo]] fromStart = pieceStarts[self.stakes[t.tagFrom]] actualBitDelta = (toStart - fromStart) + t.offsetBitDelta if t.offsetDivisor != 1: test = (actualBitDelta & t.offsetDivisor) == 0 assert test, "Word alignment bad boundary!" actualBitDelta //= t.offsetDivisor test = (actualBitDelta >= 0) or self.negOffsetsOK or t.negOK assert test, "Impermissible negative offset!" assert (actualBitDelta & 7) == 0, "Not at byte boundary!" bs = format(actualBitDelta // 8) v[t.pieceIndex] = (self.backingFile.tell(), len(bs), None) self.backingFile.write(bs) # Third, adjust the v[] entries corresponding to the variable-length # entries, and recalculate pieceStarts again. Keep doing this until # things stop wiggling. pieceStartsHistory = set([tuple(pieceStarts)]) while True: g = (t[1] * 8 if t[2] is None else t[2] for t in v) newPieceStarts = utilities.cumulCount(g) if newPieceStarts == pieceStarts: break pieceStarts = newPieceStarts if tuple(pieceStarts) in pieceStartsHistory: raise ValueError("Critical resize loop!") pieceStartsHistory.add(tuple(pieceStarts)) for t in self.links: if not callable(t.format): continue toStart = pieceStarts[self.stakes[t.tagTo]] fromStart = pieceStarts[self.stakes[t.tagFrom]] actualBitDelta = (toStart - fromStart) + t.offsetBitDelta if t.offsetDivisor != 1: test = (actualBitDelta & t.offsetDivisor) == 0 assert test, "Word alignment bad boundary!" actualBitDelta //= t.offsetDivisor test = (actualBitDelta >= 0) or self.negOffsetsOK or t.negOK assert test, "Impermissible negative offset!" assert (actualBitDelta & 7) == 0, "Not at byte boundary!" bs = format(actualBitDelta // 8) v[t.pieceIndex] = (self.backingFile.tell(), len(bs), None) self.backingFile.write(bs) return v
def _makeResolvedIterator(self, **kwArgs): """ Returns an iterator over bytes objects, based on self.pieces but with all references resolved. An AssertionError is raised if one or more of the following conditions are true: - A reference is made to a stakeName that is not defined. (Note that this assertion can be neutralized using the unresolvedOK keyword; this is useful for checksumming, where the whole writer might not yet be resolved, but the part being checksummed is). - A computed offset is negative and self.allowNegativeOffsets() has not been called. - A computed offset is not a multiple of 8. """ v = self._resolveVariableFormatOffsets(**kwArgs) f = self.backingFile for format, pieceIndex, tag1, tag2 in self.indexLinks: # v[pieceIndex] needs to be changed to the value in the related # indexMap (note this is only supported for whole strings). thisMap = self.indexMaps[tag1] bs = utilitiesbackend.utPack(format, thisMap[tag2]) v[pieceIndex] = (f.tell(), len(bs), None) f.write(bs) # Now create the actual string list to be returned rememberTell = f.tell() if any((t[2] is not None) and (t[2] & 7) for t in v): partial = [] implode = utilitiesbackend.utImplode explode = utilitiesbackend.utExplode for offset, byteLen, bitCount in v: if not byteLen: continue f.seek(offset) bs = f.read(byteLen) if bitCount is None: if partial: thisBitLen = 8 * len(bs) partial += explode(bs) yield implode(partial[0:thisBitLen]) partial = partial[thisBitLen:] else: # this is purely an accelerator yield bs else: partial += explode(bs)[0:bitCount] if len(partial) > 7: thisBitLen = 8 * (len(partial) // 8) yield implode(partial[0:thisBitLen]) partial = partial[thisBitLen:] if partial: yield implode(partial) else: for offset, byteLen, bitCount in v: f.seek(offset) bs = f.read(byteLen) yield bs f.seek(rememberTell)
def __call__(self, n): if self.maxoffset < 256: return utPack("B", n) elif self.maxoffset < 65536: return utPack("H", n) elif self.maxoffset < 16777216: return utPack("T", n) else: return utPack("I", n)
51: ('cp864', False, 'has864'), 52: ('cp863', False, 'has863'), 53: ('cp862', False, 'has862'), 54: ('cp861', False, 'has861'), 55: ('cp860', False, 'has860'), 56: ('cp857', False, 'has857'), 57: ('cp855', False, 'has855'), 58: ('cp852', False, 'has852'), 59: ('cp775', False, 'has775'), 60: ('cp737', False, 'has737'), # bit 61, cp708 (ASMO 708) is not available in a Python encoding 62: ('cp850', False, 'has850'), 63: ('cp437', False, 'has437') } _b8BitSmall = utilitiesbackend.utPack("256B", *list(range(256))) _b16BitBig = utilitiesbackend.utPack( "63232H", *(list(range(0x0100, 0xD800)) + list(range(0xE000, 0x10000)))) # ----------------------------------------------------------------------------- # # Private functions # def _getUSpan(key, onlyPrintables=False): """ Given a bit number as the key, returns a Span with the Unicodes in the full version of that code page. If onlyPrintable is True, the Unicodes with