def get_words(words, font_url): woff = requests.get(font_url).content with open('fonts.woff', 'wb') as f: f.write(woff) online_fonts = TTFont('fonts.woff') online_fonts.saveXML("text.xml") _dict = online_fonts.getBestCmap() # print(_dict) _dic = { "six": "6", "three": "3", "period": ".", "eight": "8", "zero": "0", "five": "5", "nine": "9", "four": "4", "seven": '7', "one": "1", "two": "2" } d = words.split(';') cc = [] for i in d: if i: add = int(i.replace('&#', '')) cc.append(_dic[_dict[add]]) values = ''.join(cc) # print('values',values) return values
def getAddrMapping(url): res = {} json_data = getDict() woff = getAddrWoff(url) rs = requests.get(woff) with open('../dzdp_woff/addrTest.woff', "wb") as f: f.write(rs.content) f.close() font = TTFont('addrTest.woff') # 读取woff文件 font.saveXML('addrTest.xml') # 转成xml result = font['cmap'] cmap_dict = result.getBestCmap() for key, value in cmap_dict.items(): k_tmp = str(hex(eval(str(key)))) b = k_tmp.replace("0x", '') glyf = font.get('glyf') c = glyf[value] coordinates = str(c.coordinates) for data in json_data: position = data["position"] if position == coordinates and data["word"] not in [ '', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9' ]: res[value.replace("uni", "&#x")] = data["word"] print("getAddrMapping", res) return res
def _verifyOutput(outPath, tables=None): f = TTFont(outPath) f.saveXML(outPath + ".ttx", tables=tables) with open(outPath + ".ttx") as f: testData = strip_VariableItems(f.read()) refData = strip_VariableItems(getTestData(os.path.basename(outPath) + ".ttx")) assert refData == testData
def __init__(self, file1, file2): errors = [] # Open the ttf files try: font1 = TTFont(file1) except Exception as e: errors.append("Can't open " + file1) errors.append(e.__str__()) try: font2 = TTFont(file2) except Exception as e: errors.append("Can't open " + file2) errors.append(e.__str__()) if errors: self.diff = "" self.errors = "\n".join(errors) self.returncode = 2 return # Create ttx xml strings from each font ttx1 = _ttx() ttx2 = _ttx() font1.saveXML(ttx1) font2.saveXML(ttx2) if ttx1.txt() == ttx2.txt(): self.diff = "" self.errors = "" self.returncode = 0 else: self.diff = file1 + " and " + file2 + " are different - compare with external tools" self.errors = "" self.returncode = 1
def initialize_woff(): while True: try: #这个是普通的请求网站 response = requests.get( 'https://www.shixiseng.com/interns?page=10&keyword=%E6%95%B0%E6%8D%AE%E5%BA%93&type=intern&months=&days=°ree=&official=&enterprise=&salary=-0&publishTime=&sortType=&city=%E5%85%A8%E5%9B%BD&internExtend=', timeout=5) i = r'@font-face{font-family:myFont;src:url((.*?));}' #原始匹配数据:[('(/interns/iconfonts/file?rand=0.16423818208757135)', '(/interns/iconfonts/file?rand=0.16423818208757135)')] base64_code = re.findall(i, response.text.replace( ' ', ''))[0][0].replace('(', '').replace(')', '') #print(base64_code) # base64_code='d09GMgABAAAAAA0gAAsAAAAAF3wAAAzQAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHEIGVgCFcgqdHJZ/ATYCJANUCywABCAFhG0HgigbaxMzkpJWMLL/+nhjqaBHSmalUgf2gcFCYZBl4ftjgb0nzg5Z8ZK49Dj+7E4s9c8CKumkGOlSR465lgiqZeu5FD9asvp6hdDgMYpsJDJJixLoJSqGaJv3AXt9gwdjWNDaYDVpNaCNaGMkWJvvunXFIir537k8BmEizBHaASqEJP2BQtKXJgfPz+kZMT0hN+GWAwR7Bq5tN7F3SQ8e1SqZVgiJRmy0OfrA/0G3LBz3lnBVpC7j9zdblpM3kbCS1aoCVFTfEV0dxxBgKO/vtq0/79e8+6W2d2UJ6DDFlIXpq1XJ8Qcspgik7tXICoWUKwC4Cr0pMzvh5ITQsxibHK4C2qjXRcXZBDoH9uLi+PImhSkINtDqUXKcwjLJihq30IgVcWBh8AN5TfaFzwLe/O+PX3AemsiUORE0V9dHXkqSsPaNNJNe8pFAGpZH0DqMHHsUxD+Z9QfyiNl7rqM65h1w4oj/mVnCEIREAoRLkqXSgFaubb+N9t1MvWNYkrNHIqzDiXQ8S7z7zzw6mdqsVAij3GBhaWVtozJpNXqiIrAaOPHNbEiYhnQkApKRhJCaJILMpABISQqDFKRwRYz4JEBGUhYkJ1VCBtJsyII0B7Ik9UNWpAHImjRPv738aigq8MegmMDfBNDyCWn42pepJ7itPWCHS6B4AqnyA0B49lmhvvaZLOJKi4DglmuYzmdWAswgvipIuJfHdZX5zPNy2/J2v6y5URC0RdkgshYnp3fhGvNK+2CN1nVLL4xQwb2YmZlv8rnz2A14txzumc4BpBQzhIgweUqxEm2M3EeCqxAWPuoOVKb+gCj+GZw+u1p87Cq2K9IsN++YOVgdA01OJYpel1Di6WyngQqlnzI4lyRTEAZAuiT86TF9b3HTSAmiJRimlp6M00sWl45Nkh3FdJv+DvSlseaubdXdd/PUSNvymfSB1WJZv/ugeQGmWXM59xxSL91Iv/vCciXZHCQOiGAbpho8gVL7v5DN3dUFQMvGDsokg6b73BzctN4OAMojZeaf25SMU1I0KMkis1rrn7IP59++QZFObz27BtxjPSEeA0rvEoji5cqNOFP+lTHPni3Wd53rtUGTj5PQGblnMN2q1XJn3hBBwMiGsKNO6EGg9pBDVa0RgN61bjnLpDW7Rn1flb4b8haqdg2k6QhVPhMnTysSkqCOxYWBAAIQ4bn262Yg55bf8cRPXENpYHW4Gv+rvKlkxHjQYrpWxd0k38Nq7UvY6tV18qjy772HtF/YmvDOhyDWhBtVCGiv1a9LkDebLQaSxADp0z4QbaYVfabGU33bODGaJbaGmjjYtlxzMG50qTT4bJrOKjJbnUiuhrpyFjUlFN0IvtlPuTuIU3rnloZABA8vz6PYbMjqD9RrO6vIR7cmbbR84GMKtFNf5e2pmiVfNRvKFwL9Dm5DPRXzNSSERJBE+/NFmMSDBDpVLIK8A4c30aLBKqp4p6fwxOsJj1FmDufX685MnG12p+xWhww+EIZBkc5AGlcy06xWMzZlpgMhl3FGlgOsqkJpaEeDNvUNGKOh2y6X909V0MZ0gZEQYjn4iOnnq2QMcK3LgFkKhArUqdcVRkt3nbvD722r/nJAGLSBeG4Z5hkmZ4vlwgY0ANhXIXagFBXugJ1pPEdIrYq9nGRS55Yap3jppyBGKtI0v5gvJ1ifYQjzS8bi2MyDXHxb6XcvaILtTzfjPfTrvGVgoK9Ju5Z+o2Vw2sm5Su3b/W6e3HJT1MS5q1+uf55LTXbFLQ773fwromWgr69JvUjtW7IixWQUiQEl8sWbnKsMX2sdAndAGaXohu7eP9IxO53L1C+mCz9aNvh05ysvBm7nDlI8Ff/84lq8WJjQ3ojdbKRK6b3xJiQSFloVoUKDhshITJBKqSJfIqVQpSKRGJkibdpEBRf6/5K7GXss/lbWMG63Le+w2JPSI4sUVJFMFb/O7tpHkjFiwP/AtxUnSMx0foVEtVhBjELe+0ONl0A1+5Lr1fhL9nsuunZbzIysbcN3gzAy2tb0Dl+Dv3e4j0+9O9XyPt9hDX6vuQ7/YG94eNPIMJr6BvsPdobzJ+TTGa4fwt9bzsPf2b/D51m+B5GfPeWK7u6uboVi2zY7EHq5w4PB4bRHV4CC54FNBKuB4uNqMBELcRCJpjFahWPINLGoIrodgNVea1UqUNJrQ9hoNediJ5xPrf+JZZJgqiYpqcIkphJWwl3qY4mN1DpTQC58cLAi/6ENxxk4UKwPknYPGGdQYCowao6oY4K4w9c3eCcsixcHijz+cw2ZJxM9C382JchlrGvglDkmnhWYyVCgWb+3Ypavf4+9S3dLcA8LiGN30jqZHBCWuoIb/e7Y368r2NbfaJbCop3Kc7NpQPWdE2sgiSwlRZ9iDb9yw6ntIEJ70wQp8vMVEKaQy4EUWOYnHlQnrdKez0sXhMiGJqlALlSRh8pZecVDqFKqerD0qGs8e3Hkcd7xHKCfkqm7wrsSudZR65vKqWW/asiDg9P0iQ2cyEa396dqJMDcewtls1FjudXEMCc2JPv/93kvlz30Mu98ljNJmr0lFXFTZ3qm102vGp/q7V0toBns6Ipkz5oCInZu1nssiC0p+hvQ498dNPk6GBkgiyKUQQVE0AwdmT/17LpNn5gfN61b52zLZDmv4+4fmZ++isW0dQbzaLaPHEo3dhmhkfCWWUgrkEhrT7JKGkKPJbraPlzIg2tmTQzR0HMEOfyjgq2n0xhbLZ1AwVfSV/9DSgf6GqBWaFYfXApr2albocEvDWqRQ9qX6MLyQisuk2vFkDFl86Ir0LOYWXSwzmXm2VJUqZRuIy/xfJjbmL7cy/gXdVEeOiF3dbKSrWqMliIVaHTfh0saRs9gqUPPJcx5TNAy5jIlpZIiy1zqvDRoJJEsULyaUyxgLI/wKElATRmRaKHGBWzuEu6rq158wvHAhFz3B9Lt3rRz3UMRL/ljmvcwuUWqN5EF7gRWvONbpewvg+NntjfCJDSqKAGJW1zbDDWAUdmQcjorIEk+TAmnRjXoUuL6ItKj870G9dv02/Zf7H8MLy/fgc7yOOt4JVN72YRqTY4bm+DLh66VZtIkZSNeVsiJgDfneURQcEi3A8PDUeDtQ3EAKAXDLS0tUKpfVBgjqahgwbV6lJK1wj7OoEmgiqcLrr4smvyUXs7u06IbcfVoIoBOEHQBDCMCKkJB6QhEgVBHJhn0y3P+u/astSc8d+pAaJMVTcFS0CxL0EYT0FFqJ0+oRZw6HQun1pZQxkKm9WAMhyKjpVcp03T6FCrnIY/Fe8jRDxo/s05TPDbDlTr30tm5UCOyySioIW2WbUHInllnx2Kj7g2f1m90qJq6wGBvGBWukJHbggYFoa2pmvY+hoEKOxC7edolupFvpPPCmDlG2lBrvMIg+dqRtDZWxgp8uGtW7iE8pS88uY8zIzAJyj8jqy3jKM0IdcAGvoHRQYw5Cj5SUZDReYRrdu4lPMjISaJrARuhRxDtMIdys33bnFcVlYGrvZ+v3V3bqd4ZMJ8rp0HeK9+MB8rTbqVR5qU2f7QRgqWiaG1xMYhYoibGEGIJhd5BgOzlkJFGhcHin997iT9sPdDXsbdYrkfhlEvQFI3BrU3wknzdLqg03e7MJw6ksqbVDttF7bBK0PvhJBCrIhL4PADm9XqQRF91xGIkhso6CDfAoxfes1ou7A2A+YBDiJS5H3QQw02HOASctxxxCtpjC6LMOgC/h4cQvpl/GH5pFmuP9iMjATCTCRESrjYYfm43ApKC+NsltlHfMPwfuPRW57mpvN98+k9tHkxt+HEl6ByzSK9/aEKKvWLRH6GeTlB4qYskgH4NQVvfKiRDIl27309r3srfdkjbvyoSdMbQAhmlAzlt0BSOS1ByrEBFu4LOwsl6vYSRhcL8uAifABD6ApTvD5jQj4Y7/gIh9w+kMBBgvaNL6RhHTeXAALbgIbJHkmrFVJqthK2vgLsQm7TJ+9+BSWQpGvWH+doTUGCWsUuy4WNrGWJGR+iYnQ/CUKPYaB+o7Qtr4/VgwKhd+1RHqYonA2DWhvcgxD6hRGkKW9qqFH79FcA5IczUDDpIeAcYCbl8ZKRvOIXmhFNTDbotHRMb3JjlhEG6p6FFkGNOQCirNCSm1/MByuoTc1TE1gZ8MjYt11+/EN0GggtxCLH/NmAwcAhIKGiYj3n8+XgD+AgQBhH0YIRd2IeD2q2NcDg7P5njUdXRJmCHo9GynQisOBUOqy0oXzKxeRJ1qQ5Wu9Jeu10rlTdznbExejtdzouz4YZSIBRCZXkLpNfer1fqEpI5gnXFc5K3s3TRTrAFLmxB7qQq9xpMPmdFdGUThNIiU0elycmom8y/slMGFMXy3cpE2MLizsEy1VQKAAAA' wofffile = requests.get('https://www.shixiseng.com' + str(base64_code)) with open(r'E:\vscode_code\爬虫测试\反爬虫\其他\字体.woff', 'wb') as f: f.write(wofffile.content) font1 = TTFont(r'E:\vscode_code\爬虫测试\反爬虫\其他\字体.woff') font1.saveXML(r'E:\vscode_code\爬虫测试\反爬虫\其他\字体.xml') break except requests.exceptions.RequestException: print('Retry woff!') initialize_woff()
def save_xml(self): font = "d09GRgABAAAAAAgkAAsAAAAAC7gAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAABHU1VCAAABCAAAADMAAABCsP6z7U9TLzIAAAE8AAAARAAAAFZW7lVbY21hcAAAAYAAAAC8AAACTDduo/NnbHlmAAACPAAAA5gAAAQ0l9+jTWhlYWQAAAXUAAAALwAAADYTFodmaGhlYQAABgQAAAAcAAAAJAeKAzlobXR4AAAGIAAAABIAAAAwGhwAAGxvY2EAAAY0AAAAGgAAABoGRAUcbWF4cAAABlAAAAAfAAAAIAEZADxuYW1lAAAGcAAAAVcAAAKFkAhoC3Bvc3QAAAfIAAAAXAAAAI/cSrPVeJxjYGRgYOBikGPQYWB0cfMJYeBgYGGAAJAMY05meiJQDMoDyrGAaQ4gZoOIAgCKIwNPAHicY2Bk0mWcwMDKwMHUyXSGgYGhH0IzvmYwYuRgYGBiYGVmwAoC0lxTGBwYKr4+Ydb5r8MQw6zDcAUozAiSAwDoGAvreJzFkrENg0AMRf8FQgikSJmKCTIBS7AOtBSZIIOgVBmDFU5CICSaA1Ei8g/TRII28emdZPtkW/4H4AjAIXfiAuoNBWsvRtUSdxAscRcP+jdcGfGR6lhPddJEbdb1pjDlkI/VPPPFfmbLFCtuHZtxcGLXACGnPOMAj32hvJ1KPzD1v9bfdlnu5+qFJF3hiDoWrK56ErhJ1IlgdW4iwWrbZoKt0/WC/RemELh7mFKgChhygXpgrAT4H5rhSAB4nD2TzW8aRxjGZ5Zo11kTjMsuG3ACLIt3F7DNer8wsAbCGhJ/UmzAGCfEWAkhbpO4Vpw6idUmpK2UVP0D0kulHnqJesg9larm1KZqfOgfUKnX3hoplwh3Fmj2MNoZ6Z33eX/PMwACcPwPkAEFMADiCk35KBGgD7MW+A77HZDgFACsxmpQGVVojhZGbdDs/gYLF5rN2l/PSvCoK5WevUNnP1kl6L5/4TH2GkTRfT6oyLoq8FyQiDOyrqnoDycEPa4rMuODFE74IE3hXJAXOiPn9XRFCBveEOlIbGR0ZY6sORPJclKe1uTpzPnH7cuHJ39dzFUPBZFchqlZKZPOjdRj097Tta1F98jFwqVHu/WB9uM3SPtrNJUfAJcaTyMdjJtxo16EA3JBgY9Tbll3CnyQwCMeb3tlL3XG6bQ7xq4WrxmFeuneWkS8H5qEzc7CSnkzkjVuZFrCytpC7dXz2/twK5VUcr1Z0bKP+tgRIW7UAQlNR73iCtyvBdri/MyYOJzAJJ/hrARlj8SA/2veopoQAOM0i7TZrBJ9gCYN5YFKwmEj4NuuMEyOiQk+WaTDi0ZmCdZPHvxxwEYpUxJl5oOhctnv88RiWkBaODtzdX6hQLau71Uml2UmI7KTpxnkHbBZPDAw4DGJiPCaqlt9+vgRDrRHRBTZ0hHEbRRihZzr755/vPtibyeX7/x5LluQcqrEsWbr3JngeDAcUOhw+bMS/ELc+fD6raW26L6cu3SYNpqFxg9qJuBvmNnuYyFPuWhKeLBaeu9NF2kJgAlEgu+Fw5qXtkxCmix/kJ5eRNwMpHoZ0iwh8Fs7HVIjgQhjPxXYVNYPk1dyN58smZ9UdM3efSrkeb1UvFPG3CozzvgTZ9f06alOy7w9+92Lo8aqNFXuvpqoROvL8+vVvo4eEw7EBklFNqThLFQFnMB7GpAEC1A/MQLsWURTDErO18OGFEkJDpyAnthEfOPe59tz+0bqTrGi6iRsr86kquHI3eKPhjae1rz62NAJPOL1Pti58eXiN50n31emYhWYWtporBTC0XXwPhfH2EvgQv5oLI3cxwnOSoYVjxg84sw5xeUZ2oSjTn/Kl2Wxm5V8qHn3frb+UaRlHNxKXOQH7+8NdgL7xUr+4P31k+ViaZYYMLbmQxN9Rc7r2VrVjJrUWh5e6f4tBOa4xsNE/tPt2fTQy3xu+2mV95Nwt/yzm3l4bevCuj5TB/8BTZLgwnicY2BkYGAA4rgvAqvj+W2+MnCzMIDA9d8LryPo/29YGJjOA7kcDEwgUQBngg0cAHicY2BkYGDW+a/DEMPCAAJAkpEBFfAAADNiAc14nGNhAIIUBgYmHeIwADeMAjUAAAAAAAAADAAoAGoAngC4APIBOAF8AcQB6AIaAAB4nGNgZGBg4GEwYGBmAAEmIOYCQgaG/2A+AwAOgwFWAHicZZG7bsJAFETHPPIAKUKJlCaKtE3SEMxDqVA6JCgjUdAbswYjv7RekEiXD8h35RPSpcsnpM9grhvHK++eOzN3fSUDuMY3HJyee74ndnDB6sQ1nONBuE79SbhBfhZuoo0X4TPqM+EWungVbuMGb7zBaVyyGuND2EEHn8I1XOFLuE79R7hB/hVu4tZpCp+h49wJt7BwusJtPDrvLaUmRntWr9TyoII0sT3fMybUhk7op8lRmuv1LvJMWZbnQps8TBM1dAelNNOJNuVt+X49sjZQgUljNaWroyhVmUm32rfuxtps3O8Hort+GnM8xTWBgYYHy33FeokD9wApEmo9+PQMV0jfSE9I9eiXqTm9NXaIimzVrdaL4qac+rFWGMLF4F9qxlRSJKuz5djzayOqlunjrIY9MWkqvZqTRGSFrPC2VHzqLjZFV8af3ecKKnm3mCH+A9idcsEAeJxti0kOgCAQBKdxV/yLCC4cVZi/ePFm4vONw9W+VDqVIkVpLf1PQyFDjgIlKtRo0KKDRk94qvs62YThYzR2E86OhQeP4u06Js9B/hRd6vbULSYK/eKJXh5XF6A=" fontdata = base64.b64decode(font) with open("./maoyan.woff","wb") as f: f.write(fontdata) maoyan_fonts = TTFont('./maoyan.woff') maoyan_fonts.saveXML("text.xml")
def test_build_otf(tmpdir): outPath = os.path.join(str(tmpdir), "test.otf") fb, advanceWidths, nameStrings = _setupFontBuilder(False) pen = T2CharStringPen(600, None) drawTestGlyph(pen) charString = pen.getCharString() charStrings = {".notdef": charString, "A": charString, "a": charString, ".null": charString} fb.setupCFF(nameStrings['psName'], {"FullName": nameStrings['psName']}, charStrings, {}) metrics = {} for gn, advanceWidth in advanceWidths.items(): metrics[gn] = (advanceWidth, 100) # XXX lsb from glyph fb.setupHorizontalMetrics(metrics) fb.setupHorizontalHeader(ascent=824, descent=200) fb.setupNameTable(nameStrings) fb.setupOS2() fb.setupPost() fb.setupDummyDSIG() fb.save(outPath) f = TTFont(outPath) f.saveXML(outPath + ".ttx") with open(outPath + ".ttx") as f: testData = strip_VariableItems(f.read()) refData = strip_VariableItems(getTestData("test.otf.ttx")) assert refData == testData
def get_font_dict(res): data = re.findall(r'base64,(.*)\) ', res.text)[0] res_data = base64.b64decode(data) with open('font.woff', 'wb') as f: f.write(res_data) # # 加载woff字体 font = TTFont('font.woff') # 将woff字体保存成xml格式 font.saveXML('font.xml') number_dict = { 'zero': 0, 'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5, 'six': 6, 'seven': 7, 'eight': 8, 'nine': 9, } uniList = font['cmap'].tables[0].ttFont.getGlyphOrder() font_dict = {} for i in range(0, 10): key = number_dict[uniList[i + 1]] # 混淆后的数值 font_dict[key] = i # i是对应的真实数值 return font_dict
def test_build_ttf(tmpdir): outPath = os.path.join(str(tmpdir), "test.ttf") fb, advanceWidths, nameStrings = _setupFontBuilder(True) pen = TTGlyphPen(None) drawTestGlyph(pen) glyph = pen.glyph() glyphs = {".notdef": glyph, "A": glyph, "a": glyph, ".null": glyph} fb.setupGlyf(glyphs) metrics = {} glyphTable = fb.font["glyf"] for gn, advanceWidth in advanceWidths.items(): metrics[gn] = (advanceWidth, glyphTable[gn].xMin) fb.setupHorizontalMetrics(metrics) fb.setupHorizontalHeader(ascent=824, descent=200) fb.setupNameTable(nameStrings) fb.setupOS2() fb.setupPost() fb.setupDummyDSIG() fb.save(outPath) f = TTFont(outPath) f.saveXML(outPath + ".ttx") with open(outPath + ".ttx") as f: testData = strip_VariableItems(f.read()) refData = strip_VariableItems(getTestData("test.ttf.ttx")) assert refData == testData
def get_dict(): ''' 输入:无 输出:字体文件对应的字体字典(包含字体编码和对应字体)以及字体编码 ''' r = requests.get('https://www.shixiseng.com/interns/iconfonts/file') with open('new_font.woff', 'wb') as f: f.write(r.content) # font1=TTFont('file2.woff') # font1.saveXML('font2.xml') font1 = TTFont('new_font.woff') font1.saveXML('new_font.xml') # with open('font2.xml') as f: with open('new_font.xml') as f: xml = f.read() keys = re.findall('<map code="(0x.*?)" name="uni.*?"/>', xml)[:99] values = re.findall('<map code="0x.*?" name="uni(.*?)"/>', xml)[:99] for i in range(len(values)): if len(values[i]) < 4: values[i] = ('\\u00' + values[i]).encode('utf-8').decode('unicode_escape') else: values[i] = ('\\u' + values[i]).encode('utf-8').decode('unicode_escape') word_dict = dict(zip(keys, values)) # print(word_dict) return word_dict, keys
def find_and_replace_ttx(ttx_path, find_string, replace_string, tables=['name']): count = 0 # 1. modify 'name' table if 'name' in tables: tree = parse(ttx_path) root = tree.getroot() for child in root.find('name'): if child.text.find(find_string) != -1: new_text = child.text.replace(find_string, replace_string) child.text = new_text count += 1 tree.write(ttx_path) # 2. modify 'CFF ' table if 'CFF ' in tables: CFF_elements = ['version', 'Notice', 'Copyright', 'FullName', 'FamilyName', 'Weight'] tt_font = TTFont() tt_font.importXML(ttx_path) font_dict = tt_font['CFF '].cff.topDictIndex.items[0] for element in CFF_elements: text = getattr(font_dict, element) if text.find(find_string) != -1: new_text = text.replace(find_string, replace_string) setattr(font_dict, element, new_text) count += 1 tt_font.saveXML(ttx_path) # done return count
def get_font_xml(font_face, save_name): """ 将base64字体转换为xml文件与ttf文件 """ onlineFont = TTFont(io.BytesIO(base64.b64decode(font_face))) onlineFont.saveXML(save_name) onlineFont.save('58.ttf')
def doStuff(f): if f is not None: # cut f to just .otf files paths = collectSources(f) for x in range(0, len(SerialDump)): currentNo = str(SerialDump[x]) makeDestination(f, currentNo) for i in paths: instanceFolder, instanceName = os.path.split(i) q = TTFont(i) serialfolder = instanceFolder + "/Serialized/" + currentNo + "/" savetemp = serialfolder + instanceName + "-" + currentNo + ".ttx" savetemp = savetemp.replace(".otf", "") newfile = serialfolder + instanceName.replace(".otf", ".ttx") q.saveXML(savetemp) seriaLize(savetemp, currentNo) os.rename(savetemp, newfile) stderr, stdout = executeCommand(['ttx', newfile]) os.remove(newfile)
def get_glyph_id(self, glyph): ttf = TTFont(self.file_path) ttf.saveXML('../fonts/01.xml') # gly_list = ttf.getGlyphOrder() # 获取 GlyphOrder 字段的值 index = ttf.getGlyphID(glyph) # os.remove(self.file_path) return index
def createTtfAndXml(fontsstr, isxml): try: b = base64.b64decode(fontsstr) curtime = time.strftime("%Y%m%d_%H%M%S") pathnameTtf = CreateFile.createFile('zt_' + curtime + '.ttf', 'DataHub/cv') with open(pathnameTtf, 'wb') as f: f.write(b) pathnameXml = '' if isxml: font = TTFont(pathnameTtf) pathnameXml = CreateFile.createFile('zt_' + curtime + '.xml', 'DataHub/cv') font.saveXML(pathnameXml) return {'ttf': pathnameTtf, 'xml': pathnameXml} except Exception as ex: print('utils -> createTtfAndXml(fontsstr) has errors. \n', ex) return {'ttf': '', 'xml': ''} # pathnameTtf = CreateFile.createFile('msyh.ttf', 'DataHub/cv') # font = TTFont(pathnameTtf) # pathnameXml = CreateFile.createFile('msyh.xml', 'DataHub/cv') # font.saveXML(pathnameXml)
def gouzao(): base64_str_decode = base64.b64decode(base64_str) filr_name = "验证.ttf" with open(filr_name, 'wb') as f: f.write(base64_str_decode) font = TTFont('验证.ttf') # 打开本地的ttf文件 font.saveXML('验证.xml') # 转换成xml
def font_dict(): font = TTFont('shixi.ttf') font.saveXML('shixi.xml') ccmap = font['cmap'].getBestCmap() #print("ccmap:\n",ccmap) newmap = {} for key, value in ccmap.items(): #转换成十六进制 key = hex(key) value = value.replace('uni', '') a = 'u' + '0' * (4 - len(value)) + value newmap[key] = a #print("newmap:\n",newmap) #删除第一个没用的元素 newmap.pop('0x78') #加上前缀u变成unicode.... for i, j in newmap.items(): newmap[i] = eval("u" + "\'\\" + j + "\'") #print("newmap:\n",newmap) new_dict = {} #根据网页上显示的字符样式改变键值对的显示 for key, value in newmap.items(): key_ = key.replace('0x', '&#x') new_dict[key_] = value return new_dict
def text_ttf(self, url): User_Agent = {'User-Agent': User_AgentMiddleware.get_ug()} try: text = requests.get(url=url, headers=User_Agent) except: logging.log(msg='Proxy request timeout Wait for two seconds', level=logging.INFO) return 0 else: # window # with open("./luntan/text_dazhong1.ttf", "bw")as f: # f.write(text.content) # linux with open("./luntan/text_dazhong1.ttf", "bw") as f: f.write(text.content) # window # font = TTFont("./luntan/text_dazhong1.ttf") # linxu font = TTFont("./luntan/text_dazhong1.ttf") # window # font.saveXML("text_dazhong1.xml") # linux font.saveXML("./luntan/text_dazhong1.xml") font_map = get_map1(self.be_p1, self.word_list) # print(font_map, "/\\" * 50) return font_map
def get_font(): font = TTFont(r'E:\vscode_code\练习\实习僧\file.woff') font.saveXML(r'E:\vscode_code\练习\实习僧\file.xml') font_names = font.getGlyphOrder() ccmap = font['cmap'].getBestCmap() newmap = {} # 这些文字就是在FontEditor软件打开字体文件后看到的文字名字 texts = ['', '', '1', '3', '7', '9', '6', '8', '2', '4', '5', '0'] font_name = {} # 将字体名字和它们所对应的乱码构成一个字典 for key, value in ccmap.items(): #转换成十六进制 key = hex(key) value = value.replace('uni', '') a = 'u' + '0' * (4 - len(value)) + value newmap[key] = a #删除第一个没用的元素 newmap.pop('0x78') #加上前缀变成Unicode for i, j in newmap.items(): newmap[i] = eval("u" + "\'\\" + j + "\'") newdict = {} #根据网页上显示的字符样式改变键值对的显示 for key, value in newmap.items(): key_ = key.replace('0x', '&#x') newdict[key_] = value return newdict
def handle_douyin_web_share(task): font = TTFont('DY.woff') font.saveXML('font2.xml') #提取font.xml中的cmap节点 best_map = font['cmap'].getBestCmap() #进制发生了改变 new_best_map = {} for key, value in best_map.items(): new_best_map[hex(key)] = value #print(new_best_map) #构建网站上的字典 new_map = { 'x': '', 'num_': '1', 'num_1': '0', 'num_2': '3', 'num_3': '2', 'num_4': '4', 'num_5': '5', 'num_6': '6', 'num_7': '9', 'num_8': '7', 'num_9': '8', } new_data = {} for k, v in new_best_map.items(): new_data[k] = new_map[v] #把new_data中的key值的0替换为&# rs = {} for k, v in new_data.items(): rs['&#' + k[1:] + ';'] = v headers = { 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36' } url = 'https://www.iesdouyin.com/share/user/%s'%task['share_id'] temp = requests.get(url=url, headers=headers) req = temp.text for k, v in rs.items(): if k in req: req = req.replace(k, v) share_web_html = etree.HTML(req) #数据字典 user_info = {} user_info['nickname'] = share_web_html.xpath("//p[@class='nickname']/text()")[0] douyin_id1 = share_web_html.xpath("//p[@class='shortid']/text()")[0].replace(' ','') douyin_id2 = ''.join(share_web_html.xpath("////p[@class='shortid']/i/text()")).replace(' ','') search_douyin_str = re.compile(r'抖音ID:') user_info['douyin_id'] = re.sub(search_douyin_str,'',douyin_id1 + douyin_id2) user_info['job'] = share_web_html.xpath("//span[@class='info']/text()")[0].replace(' ','') user_info['describe'] = share_web_html.xpath("//p[@class='signature']/text()")[0] user_info['guanzhu'] = ''.join(share_web_html.xpath("//p[@class='follow-info']/span[1]//i/text()")).replace(' ','') fans = ''.join(share_web_html.xpath("//p[@class='follow-info']/span[2]//i/text()")).replace(' ','') danwei1 = share_web_html.xpath("//p[@class='follow-info']/span[2]/span[1]/text()")[-1] if danwei1.strip() == 'w': user_info['fans'] = str(int(fans)/10) + 'w' like = ''.join(share_web_html.xpath("//p[@class='follow-info']/span[3]//i/text()")).replace(' ','') danwei2 = share_web_html.xpath("//p[@class='follow-info']/span[3]/span[1]/text()")[-1] if danwei2.strip() == 'w': user_info['like'] = str(int(like) / 10) + 'w' print(user_info)
def fourStyleFamily(self, position, suffix=None): """ Replaces the name table and certain OS/2 values with those that will make a four-style family. """ f = self.f source = TTFont(fourStyleFamilySources[position]) tf = tempfile.mkstemp() pathToXML = tf[1] source.saveXML(pathToXML, tables=['name']) os.close(tf[0]) with open(pathToXML, "r") as temp: xml = temp.read() # make the changes if suffix: xml = xml.replace("Input", "Input" + suffix) # save the table with open(pathToXML, 'w') as temp: temp.write(xml) temp.write('\r') f['OS/2'].usWeightClass = source['OS/2'].usWeightClass f['OS/2'].fsType = source['OS/2'].fsType # write the table f['name'] = newTable('name') importXML(f, pathToXML)
def get_font_xml(self): """ 获取字体 xml :return: """ font = TTFont(self.font_path) font.saveXML(self.xml_path)
def font_decode(num, html): ''' 破解字体反爬 :param num: :return: ''' # 保存字体映射文件 font_url = re.findall("charset=utf-8;base64,(.*?)'\) format", html, re.S)[0] font_data = base64.b64decode(font_url) file = open('../output/font.woff', 'wb') file.write(font_data) file.close() # 保存为 xml fonts = TTFont('../output/font.woff') fonts.saveXML('../output/font.xml') font = TTFont(BytesIO(base64.decodebytes(font_url.encode()))) c = font['cmap'].tables[0].ttFont.tables['cmap'].tables[0].cmap ret_list = [] for char in num: decode_num = ord(char) if decode_num in c: num = c[decode_num] num = int(num[-2:]) - 1 ret_list.append(num) else: ret_list.append(char) ret_str_show = '' for num in ret_list: ret_str_show += str(num) return ret_str_show
def getContent(url): html = requests.get(url, headers=heard).text ttf = re.search( "format\('embedded-opentype'\),url\('(//k3.autoimg.cn/g1/.*?\.ttf)", html).group(1) ttflink = 'http:' + ttf r = requests.get(ttflink) with open('../autoHome/auto1.ttf', "wb") as f: f.write(r.content) f.close() font1 = TTFont('auto1.ttf') # 读取woff文件 font1.saveXML('auto1.xml') # 转成xml data = mapRelation(mapping('auto1.xml'), mapping()) for k, v in data.items(): html = html.replace("&#" + k + ";", v) htmltxet = BeautifulSoup(html, "html.parser") contents = htmltxet.find_all("div", {"class": "clearfix contstxt outer-section"}) for c in contents[1:]: ct = c.find("div", {"class": "x-reply font14"}).get_text().strip() if "本楼已被删除" not in ct: pubTime = c.find("span", {"xname": "date"}).get_text().strip() print("评论:" + ct) print("发布时间:" + pubTime) print("=======")
def parse_font(): font = TTFont(r'E:\vscode_code\练习\58\maoyan.woff') font.saveXML(r'E:\vscode_code\练习\58\maoyan.xml') font_base_order = font.getGlyphOrder()[2:] print(font_base_order) # 根据第一次下载的文件写出对应 map_list = ['1', '2', '8', '0', '7', '6', '3', '5', '9', '4'] font_new = TTFont(r'E:\vscode_code\练习\58\maoyan.woff') font_new.saveXML(r'E:\vscode_code\练习\58\maoyan.xml') font_new_order = font_new.getGlyphOrder()[2:] print(font_new_order) base_flag_list = list() new_flag_list = list() # 得到两个二维列表,对里面没个一维列表进行内容的比对,得到对应的字体 for i, j in zip(font_base_order, font_new_order): flag_base = font['glyf'][i].flags flag_new = font_new['glyf'][j].flags base_flag_list.append(list(flag_base)) new_flag_list.append(list(flag_new)) memory_dict = dict() for index1, x in enumerate(base_flag_list): for index2, y in enumerate(new_flag_list): if common(x, y): key = font_new_order[index2] key = '&#x' + key.replace('uni', '').lower() + ';' memory_dict[key] = map_list[index1] print(memory_dict) return memory_dict
def handle_decode(): # 下载过来的原文件 font = TTFont("DY.woff") # 转化之后的新文件 font.saveXML("font.xml") # 提取的文件内容 best_map = font['cmap'].getBestCmap() # print(best_map) # 如果存在进制问题 就需要转化(这里是10进制转16进制) new_best_map = {} for key, value in best_map.items(): new_best_map[hex(key)] = value new_map = { 'x': '', 'num_': '1', 'num_1': '0', 'num_2': '3', 'num_3': '2', 'num_4': '4', 'num_5': '5', 'num_6': '6', 'num_7': '9', 'num_8': '7', 'num_9': '8' } new_date = {} for k, v in new_best_map.items(): new_date[k] = new_map[v] rs = {} for k, v in new_date.items(): rs['&#' + k[1:] + '; '] = v return rs
def translate_func(self, font_url): #手动确认编码和数字之间的对应关系,保存到字典中 initial_dict={'glyph00001': '0', 'glyph00002': '1', 'glyph00003': '2', 'glyph00004': '3', 'glyph00005': '4', 'glyph00006': '5', 'glyph00007': '6', 'glyph00008': '7', 'glyph00009': '8', 'glyph00010': '9'} b = base64.b64decode(font_url) code_file = 0 while True: file_ttf = f"{code_file}.ttf" if os.path.exists(file_ttf): code_file +=1 else: with open(file_ttf,"wb") as f: f.write(b) break # 获取新ttf文件并转化为xml方便解析 font_2 = TTFont(file_ttf) file_xml=file_ttf.replace('ttf','xml') font_2.saveXML(file_xml) os.remove(file_ttf) pattern_code_obj = re.compile(r"""( <map\s+code="(\S+)"\s+name="(\w+\d+)"/> # 用于获取响应中新的编码和字符对象编号 )""",re.VERBOSE|re.S) with open(file_xml,'r',encoding='utf-8') as online_ttf: # 实时字符对应字典 online_codes_dict = dict([(code_obj[1],code_obj[2]) for code_obj in re.findall(pattern_code_obj, online_ttf.read())]) # 和手动编码字典对应进行更新 result_dict = {online_code:initial_dict[online_obj] for online_code,online_obj in online_codes_dict.items()} os.remove(file_xml) return result_dict
def parse_ziti(self, class_name, datas): if class_name == 'shopNum': # 评论数, 人均消费, 口味环境服务分数 woff_name = 'ebb40305.woff' elif class_name == 'tagName': # 店铺分类,哪个商圈 woff_name = '9b3f551f.woff' else: woff_name = '1d742900.woff' # 店铺具体地址 # 评分 font_data = TTFont(woff_name) font_data.saveXML(woff_name) # 保存xml便于做分析 words = '1234567890店中美家馆小车大市公酒行国品发电金心业商司超生装园场食有新限天面工服海华水房饰城乐汽香部利子老艺花专东肉菜学福饭人百餐茶务通味所山区门药银农龙停尚安广鑫一容动南具源兴鲜记时机烤文康信果阳理锅宝达地儿衣特产西批坊州牛佳化五米修爱北养卖建材三会鸡室红站德王光名丽油院堂烧江社合星货型村自科快便日民营和活童明器烟育宾精屋经居庄石顺林尔县手厅销用好客火雅盛体旅之鞋辣作粉包楼校鱼平彩上吧保永万物教吃设医正造丰健点汤网庆技斯洗料配汇木缘加麻联卫川泰色世方寓风幼羊烫来高厂兰阿贝皮全女拉成云维贸道术运都口博河瑞宏京际路祥青镇厨培力惠连马鸿钢训影甲助窗布富牌头四多妆吉苑沙恒隆春干饼氏里二管诚制售嘉长轩杂副清计黄讯太鸭号街交与叉附近层旁对巷栋环省桥湖段乡厦府铺内侧元购前幢滨处向座下臬凤港开关景泉塘放昌线湾政步宁解白田町溪十八古双胜本单同九迎第台玉锦底后七斜期武岭松角纪朝峰六振珠局岗洲横边济井办汉代临弄团外塔杨铁浦字年岛陵原梅进荣友虹央桂沿事津凯莲丁秀柳集紫旗张谷的是不了很还个也这我就在以可到错没去过感次要比觉看得说常真们但最喜哈么别位能较境非为欢然他挺着价那意种想出员两推做排实分间甜度起满给热完格荐喝等其再几只现朋候样直而买于般豆量选奶打每评少算又因情找些份置适什蛋师气你姐棒试总定啊足级整带虾如态且尝主话强当更板知己无酸让入啦式笑赞片酱差像提队走嫩才刚午接重串回晚微周值费性桌拍跟块调糕' gly_list = font_data.getGlyphOrder()[2:] # print(gly_list) # ['unie8a0', 'unie910', 'unif6a4', 'unif3d3', 'unie2f4', 'unie7a6', 'uniea32', 'unif0f9', 'unie2ac'] new_dict = {} for index, value in enumerate(words): new_dict[gly_list[index]] = value print(new_dict) rel = '' for j in datas: if j.startswith('u'): rel += new_dict[j] else: rel += j return rel
def main(): if EXT == 'ttf': # convert source to xml print('[+] Loading {}..'.format(args.filename)) font = TTFont(args.filename) font.saveXML(OUT_TTX) # parsing XML print('[+] Generating {} file...'.format(OUT_TTX)) font_tree = ET.parse(OUT_TTX) font_root = font_tree.getroot() for letter in font_root.iter('map'): if letter.attrib['name'] in subs.keys(): letter.set('code', hex(ord(subs[letter.attrib['name']]))) font_tree.write(OUT_TTX) # convert to ttf print('[+] Generating {} file...'.format(OUT_TTF)) font = TTFont() font.importXML(OUT_TTX) font.save(OUT_TTF) elif EXT == 'ttx': # load the xml font_root = ET.parse(args.filename).getroot() for letter in asci: for item in font_root.iter('map'): if item.attrib['code'] == hex(ord(letter)): subs[item.attrib['name']] = letter if args.t: print('\n' + ''.join(subs[letter] if letter in subs.keys() else letter for letter in args.t))
def test_build_otf(tmpdir): outPath = os.path.join(str(tmpdir), "test.otf") fb, advanceWidths, nameStrings = _setupFontBuilder(False) pen = T2CharStringPen(600, None) drawTestGlyph(pen) charString = pen.getCharString() charStrings = { ".notdef": charString, "A": charString, "a": charString, ".null": charString } fb.setupCFF(nameStrings['psName'], {"FullName": nameStrings['psName']}, charStrings, {}) metrics = {} for gn, advanceWidth in advanceWidths.items(): metrics[gn] = (advanceWidth, 100) # XXX lsb from glyph fb.setupHorizontalMetrics(metrics) fb.setupHorizontalHeader(ascent=824, descent=200) fb.setupNameTable(nameStrings) fb.setupOS2() fb.setupPost() fb.setupDummyDSIG() fb.save(outPath) f = TTFont(outPath) f.saveXML(outPath + ".ttx") with open(outPath + ".ttx") as f: testData = strip_VariableItems(f.read()) refData = strip_VariableItems(getTestData("test.otf.ttx")) assert refData == testData
def woff_to_xml(self): dir_name, file_name_stem, _ = Woff2Text.get_dir_file_name( self.woff_target) xml_file = os.path.join(dir_name, file_name_stem + '.xml') font = TTFont(self.woff_target) font.saveXML(xml_file) return xml_file
def _verifyOutput(outPath): f = TTFont(outPath) f.saveXML(outPath + ".ttx") with open(outPath + ".ttx") as f: testData = strip_VariableItems(f.read()) refData = strip_VariableItems(getTestData(os.path.basename(outPath) + ".ttx")) assert refData == testData
def get_map_url(font_name): # 把字体文件读取为python能理解的对象 base_font = TTFont(font_name) base_font.saveXML('font.xml') # 你可以自行查看xml文件,下得cmap 里面有对应得编码关系 font_set = base_font.getBestCmap() print('font_set:->', font_set) # 打印看font_set 这步可以直接用遍历字典做 print(type(font_set), font_set.keys(), type(font_set.keys())) # 从第五个开始取,是因为第五个后才是跟数字有关系 new_keys_1 = [(str(chr(i))) for i in list(font_set.keys())[5:]] # 针对编码问题,先进行unicode编码在进行decode解码 new_keys = [ i.encode('unicode_escape').decode().replace('\\U000', '&#x') for i in new_keys_1 ] print(new_keys) new_values = [] # 得到相对应得字体所对应得数字 for i in list(font_set.values())[5:]: new_values.append(english_2_num[i]) print('new_values:->', new_values) # 设置字字体与数字得对应关系: new_font_set_1 = {} if len(new_values) == len(new_keys): for i in range(len(new_values)): new_font_set_1[new_keys[i]] = new_values[i] print(new_font_set_1) return new_font_set_1
def saveFont(glyphs, outputFileName, asXML, pixelSize, descent, fontName, copyrightYear, creator, version): f = TTFont() vectorizedGlyphs = {glyph : [vectorizeGlyph(glyphs[glyph][0], pixelSize, descent), glyphs[glyph][1]] for glyph in glyphs} unicodes = [code for glyph in glyphs for code in glyphs[glyph][1]] # Populate basic tables (there are a few dependencies so order matters) makeTable_glyf(f, vectorizedGlyphs) makeTable_maxp(f) makeTable_loca(f) makeTable_head(f) makeTable_hmtx(f) makeTable_hhea(f, pixelSize, descent) makeTable_OS2(f, pixelSize, descent, min(unicodes), max(unicodes)) makeTable_cmap(f, glyphs) makeTable_name(f, fontName, "Regular", copyrightYear, creator, version) makeTable_post(f, pixelSize, descent) if asXML: # We have to compile the TTFont manually when saving as TTX # (to auto-calculate stuff here and there) f["glyf"].compile(f) f["maxp"].compile(f) f["loca"].compile(f) f["head"].compile(f) f["hmtx"].compile(f) f["hhea"].compile(f) f["OS/2"].compile(f) f["cmap"].compile(f) f["name"].compile(f) f["post"].compile(f) print "PLEASE NOTE: When exporting directly to XML, the checkSumAdjustment value in the head table will be 0." f.saveXML(outputFileName) else: f.save(outputFileName)
def generate_ttx(self): '''Generate a ``.ttx`` file from the available ``.otf`` font.''' otf_path = self.otf_path() ttx_path = self.ttx_path() if not os.path.exists(otf_path): self.generate_otf() tt = TTFont(otf_path) tt.saveXML(ttx_path)
def test_toXML(self): font = TTFont(sfntVersion='OTTO') cffTable = font['CFF '] = newTable('CFF ') cffTable.decompile(self.cffData, font) out = UnicodeIO() font.saveXML(out) cffXML = strip_ttLibVersion(out.getvalue()).splitlines() self.assertEqual(cffXML, self.cffXML)
def test_toXML(self): font = TTFont(file=CFF_BIN) cffTable = font['CFF2'] cffData = cffTable.compile(font) out = UnicodeIO() font.saveXML(out) cff2XML = out.getvalue() cff2XML = strip_VariableItems(cff2XML) cff2XML = cff2XML.splitlines() self.assertEqual(cff2XML, self.cff2XML)
def write_checksum(filepaths, stdout_write=False, use_ttx=False, include_tables=None, exclude_tables=None, do_not_cleanup=False): checksum_dict = {} for path in filepaths: if not os.path.exists(path): sys.stderr.write("[checksum.py] ERROR: " + path + " is not a valid file path" + os.linesep) sys.exit(1) if use_ttx: # append a .ttx extension to existing extension to maintain data about the binary that # was used to generate the .ttx XML dump. This creates unique checksum path values for # paths that would otherwise not be unique with a file extension replacement with .ttx # An example is woff and woff2 web font files that share the same base file name: # # coolfont-regular.woff ==> coolfont-regular.ttx # coolfont-regular.woff2 ==> coolfont-regular.ttx (KAPOW! checksum data lost as this would overwrite dict value) temp_ttx_path = path + ".ttx" tt = TTFont(path) # important to keep the newlinestr value defined here as hash values will change across platforms # if platform specific newline values are assumed tt.saveXML(temp_ttx_path, newlinestr="\n", skipTables=exclude_tables, tables=include_tables) checksum_path = temp_ttx_path else: if include_tables is not None: sys.stderr.write("[checksum.py] -i and --include are not supported for font binary filepaths. \ Use these flags for checksums with the --ttx flag.") sys.exit(1) if exclude_tables is not None: sys.stderr.write("[checksum.py] -e and --exclude are not supported for font binary filepaths. \ Use these flags for checksums with the --ttx flag.") sys.exit(1) checksum_path = path file_contents = _read_binary(checksum_path) # store SHA1 hash data and associated file path basename in the checksum_dict dictionary checksum_dict[basename(checksum_path)] = hashlib.sha1(file_contents).hexdigest() # remove temp ttx files when present if use_ttx and do_not_cleanup is False: os.remove(temp_ttx_path) # generate the checksum list string for writes checksum_out_data = "" for key in checksum_dict.keys(): checksum_out_data += checksum_dict[key] + " " + key + "\n" # write to stdout stream or file based upon user request (default = file write) if stdout_write: sys.stdout.write(checksum_out_data) else: checksum_report_filepath = "checksum.txt" with open(checksum_report_filepath, "w") as file: file.write(checksum_out_data)
def ttDump(input, output, options): print('Dumping "%s" to "%s"...' % (input, output)) ttf = TTFont(input, 0, verbose=options.verbose, allowVID=options.allowVID, ignoreDecompileErrors=options.ignoreDecompileErrors, fontNumber=options.fontNumber) ttf.saveXML(output, tables=options.onlyTables, skipTables=options.skipTables, splitTables=options.splitTables, disassembleInstructions=options.disassembleInstructions) ttf.close()
def ttDump(input): output = tempfile.TemporaryFile(suffix=".ttx") ttf = TTFont(input, 0, allowVID=False, quiet=None, ignoreDecompileErrors=True, fontNumber=-1) ttf.saveXML(output, tables= [], skipTables= [], splitTables=False, disassembleInstructions=True, bitmapGlyphDataFormat='raw') ttf.close() return output
def otf2ttx(otf_path, ttx_path=None): """Generate a ``.ttx`` font from an ``.otf`` file. :param str otf_path: Path of the .otf font source. :param str ttx_path: Path of the target .ttx font. """ # make ttx path if not ttx_path: ttx_path = '%s.ttx' % os.path.splitext(otf_path)[0] # save ttx font tt = TTFont(otf_path) tt.saveXML(ttx_path)
def ttDump(input, output, options): log.info('Dumping "%s" to "%s"...', input, output) if options.unicodedata: setUnicodeData(options.unicodedata) ttf = TTFont(input, 0, allowVID=options.allowVID, ignoreDecompileErrors=options.ignoreDecompileErrors, fontNumber=options.fontNumber) ttf.saveXML(output, tables=options.onlyTables, skipTables=options.skipTables, splitTables=options.splitTables, disassembleInstructions=options.disassembleInstructions, bitmapGlyphDataFormat=options.bitmapGlyphDataFormat) ttf.close()
def __init__(self, fileish): self._infile = fileish self._xml = StrungIO() #grrrrrrrr stdout = sys.stdout sys.stdout = open(os.devnull, 'w') tt = TTFont(fileish) tt.saveXML(self._xml, tables=['name'], progress=False) sys.stdout = stdout self._xml.seek(0) self._tree = etree.parse(self._xml) self._name = self._tree.getroot().find('name')
def test_toXML(self): font = TTFont(sfntVersion="\x00\x01\x00\x00") glyfTable = font['glyf'] = newTable('glyf') font['head'] = newTable('head') font['loca'] = newTable('loca') font['maxp'] = newTable('maxp') font['maxp'].decompile(self.maxpData, font) font['head'].decompile(self.headData, font) font['loca'].decompile(self.locaData, font) glyfTable.decompile(self.glyfData, font) out = UnicodeIO() font.saveXML(out) glyfXML = strip_ttLibVersion(out.getvalue()).splitlines() self.assertEqual(glyfXML, self.glyfXML)
def otf2ttx(otf_path, ttx_path=None): """Generate a .ttx font from an .otf file. otf_path: Path of the .otf font source. ttx_path: Path of the target .ttx font. """ # make ttx path if not ttx_path: ttx_path = '%s.ttx' % os.path.splitext(otf_path)[0] # save ttx font with SuppressPrint(): tt = TTFont(otf_path) tt.verbose = False tt.saveXML(ttx_path)
def ttDump(input, output, options): if not options.quiet: print('Dumping "%s" to "%s"...' % (input, output)) ttf = TTFont(input, 0, verbose=options.verbose, allowVID=options.allowVID, quiet=options.quiet, ignoreDecompileErrors=options.ignoreDecompileErrors, fontNumber=options.fontNumber) ttf.saveXML(output, quiet=options.quiet, tables=options.onlyTables, skipTables=options.skipTables, splitTables=options.splitTables, disassembleInstructions=options.disassembleInstructions, bitmapGlyphDataFormat=options.bitmapGlyphDataFormat) ttf.close()
def vtt_dump(infile, outfile=None, **kwargs): if not os.path.exists(infile): raise VTTLibArgumentError("'%s' not found" % infile) font = TTFont(infile) if font.sfntVersion not in ("\x00\x01\x00\x00", "true"): raise VTTLibArgumentError("Not a TrueType font (bad sfntVersion)") for table_tag in VTT_TABLES: if table_tag not in font: raise VTTLibArgumentError("Table '%s' not found in input font" % table_tag) if not outfile: ufo = os.path.splitext(infile)[0] + ".ufo" else: ufo = outfile if not os.path.exists(ufo) or not os.path.isdir(ufo): raise VTTLibArgumentError("No such directory: '%s'" % ufo) check_ufo_version(ufo) folder = os.path.join(ufo, "data", TTX_DATA_FOLDER) # create data sub-folder if it doesn't exist already try: os.makedirs(folder) except OSError as e: if e.errno != errno.EEXIST or not os.path.isdir(folder): raise normalize_vtt_programs(font) ufo_contents = read_ufo_contents(ufo) subset_vtt_glyph_programs(font, list(ufo_contents)) for tag in VTT_TABLES: # dump each table individually instead of using 'splitTables' # to avoid creating an extra index file outfile = os.path.join(folder, tagToIdentifier(tag) + ".ttx") # always use Unix LF newlines font.saveXML(outfile, tables=[tag], newlinestr="\n") write_maxp_data(font, ufo)
def process(jobs, options): for (input, origin) in jobs: tt = TTFont() tt.importXML(input, quiet=None) bc = BytecodeContainer(tt) if (options.allGlyphs): glyphs = filter(lambda x: x != 'fpgm' and x != 'prep', bc.tag_to_programs.keys()) else: glyphs = map(lambda x: 'glyf.'+x, options.glyphs) if options.outputIR or options.reduceFunctions: ae, called_functions = analysis(bc, glyphs) if (options.outputPrep): print ("PREP:") if (options.outputIR): if 'prep' in bc.tag_to_programs: bc.print_IR(bc.IRs['prep']) else: print (" <no prep>") else: bc.tag_to_programs['prep'].body.pretty_print() print () if (options.outputFunctions): for key, value in bc.function_table.items(): print ("Function #%d:" % (key)) if (options.outputIR): tag = "fpgm_%s" % key if tag in bc.IRs: bc.print_IR(bc.IRs[tag]) else: print (" <not executed, no IR>") else: value.body.pretty_print() print () if (options.outputGlyfPrograms): for glyph in glyphs: print ("%s:" % glyph) if (options.outputIR): bc.print_IR(bc.IRs[glyph]) else: bc.tag_to_programs[glyph].body.pretty_print() print () if (options.outputCallGraph): print ("called function set:") print (called_functions) print ("call graph (function, # calls to):") for item in ae.global_function_table.items(): print (item) if (options.outputState): ae.environment.pretty_print() if (options.outputCVT): print("CVT = ", ae.environment.cvt) if (options.outputMaxStackDepth): print("Max Stack Depth =", ae.maximum_stack_depth) if (options.reduceFunctions): function_set = bc.function_table.keys() unused_functions = [item for item in function_set if item not in called_functions] bc.removeFunctions(unused_functions) bc.updateTTFont(tt) output = "Reduced"+origin if (options.outputXML): output = makeOutputFileName(output, ".ttx") tt.saveXML(output) else: output = makeOutputFileName(output, ".ttf") tt.save(output) if type(input) is file: input.close()
import glob parser = argparse.ArgumentParser(prog='emoji-extractor', description="""Extract emojis from NotoColorEmoji.ttf. Requires FontTools.""") parser.add_argument('-i', '--input', help='the TTF file to parse', default='NotoColorEmoji.ttf', required=False) parser.add_argument('-o', '--output', help='the png output folder', default='output/', required=False) args = parser.parse_args() try: shutil.rmtree(args.output) except: pass os.mkdir(args.output) font = TTFont(args.input) font.saveXML('.NotoColorEmoji.ttx') ttx = ElementTree.parse('.NotoColorEmoji.ttx').getroot() os.remove('.NotoColorEmoji.ttx') for element in ttx.find('CBDT').find('strikedata'): data = element.find('rawimagedata').text.split() name = element.attrib['name'].lower() name = name.replace('uni', 'u') imagePath = os.path.abspath(args.output + 'emoji_' + name + '.png') print 'Extracting emoji_' + name + '.png' emoji = open(imagePath, "wb") for char in data: hexChar = binascii.unhexlify(char) emoji.write(hexChar) emoji.close
from fontTools.ttLib import TTFont ft=TTFont('my.ttf') ft.saveXML('my.xml')
def extract_tables(otf_path, dest_folder, table_names=['name'], split=True): """Extract font tables from an OpenType font as .ttx.""" ttfont = TTFont(otf_path) info_file = os.path.splitext(os.path.split(otf_path)[1])[0] info_path = os.path.join(dest_folder, '%s.ttx' % info_file) ttfont.saveXML(info_path, tables=table_names, splitTables=split)
def test_build_var(tmpdir): outPath = os.path.join(str(tmpdir), "test_var.ttf") fb = FontBuilder(1024, isTTF=True) fb.setupGlyphOrder([".notdef", ".null", "A", "a"]) fb.setupCharacterMap({65: "A", 97: "a"}) advanceWidths = {".notdef": 600, "A": 600, "a": 600, ".null": 600} familyName = "HelloTestFont" styleName = "TotallyNormal" nameStrings = dict(familyName=dict(en="HelloTestFont", nl="HalloTestFont"), styleName=dict(en="TotallyNormal", nl="TotaalNormaal")) nameStrings['psName'] = familyName + "-" + styleName pen = TTGlyphPen(None) pen.moveTo((100, 0)) pen.lineTo((100, 400)) pen.lineTo((500, 400)) pen.lineTo((500, 000)) pen.closePath() glyph = pen.glyph() pen = TTGlyphPen(None) emptyGlyph = pen.glyph() glyphs = {".notdef": emptyGlyph, "A": glyph, "a": glyph, ".null": emptyGlyph} fb.setupGlyf(glyphs) metrics = {} glyphTable = fb.font["glyf"] for gn, advanceWidth in advanceWidths.items(): metrics[gn] = (advanceWidth, glyphTable[gn].xMin) fb.setupHorizontalMetrics(metrics) fb.setupHorizontalHeader(ascent=824, descent=200) fb.setupNameTable(nameStrings) axes = [ ('LEFT', 0, 0, 100, "Left"), ('RGHT', 0, 0, 100, "Right"), ('UPPP', 0, 0, 100, "Up"), ('DOWN', 0, 0, 100, "Down"), ] instances = [ dict(location=dict(LEFT=0, RGHT=0, UPPP=0, DOWN=0), stylename="TotallyNormal"), dict(location=dict(LEFT=0, RGHT=100, UPPP=100, DOWN=0), stylename="Right Up"), ] fb.setupFvar(axes, instances) variations = {} # Four (x, y) pairs and four phantom points: leftDeltas = [(-200, 0), (-200, 0), (0, 0), (0, 0), None, None, None, None] rightDeltas = [(0, 0), (0, 0), (200, 0), (200, 0), None, None, None, None] upDeltas = [(0, 0), (0, 200), (0, 200), (0, 0), None, None, None, None] downDeltas = [(0, -200), (0, 0), (0, 0), (0, -200), None, None, None, None] variations['a'] = [ TupleVariation(dict(RGHT=(0, 1, 1)), rightDeltas), TupleVariation(dict(LEFT=(0, 1, 1)), leftDeltas), TupleVariation(dict(UPPP=(0, 1, 1)), upDeltas), TupleVariation(dict(DOWN=(0, 1, 1)), downDeltas), ] fb.setupGvar(variations) fb.setupOS2() fb.setupPost() fb.setupDummyDSIG() fb.save(outPath) f = TTFont(outPath) f.saveXML(outPath + ".ttx") with open(outPath + ".ttx") as f: testData = strip_VariableItems(f.read()) refData = strip_VariableItems(getTestData("test_var.ttf.ttx")) assert refData == testData