def RegenerateAll(rootDirectory): root = pathlib.Path(rootDirectory) scintillaBase = root.resolve() sci = ScintillaData.ScintillaData(scintillaBase) Regenerate(scintillaBase / "src/Catalogue.cxx", "//", sci.lexerModules) Regenerate(scintillaBase / "win32/scintilla.mak", "#", sci.lexFiles) startDir = os.getcwd() os.chdir(os.path.join(scintillaBase, "win32")) win32.DepGen.Generate() os.chdir(os.path.join(scintillaBase, "gtk")) gtk.DepGen.Generate() os.chdir(startDir) RegenerateXcodeProject( root / "cocoa/ScintillaFramework/ScintillaFramework.xcodeproj/project.pbxproj", sci.lexFiles, sci.lexersXcode) UpdateVersionNumbers(sci, root) HFacer.RegenerateAll(root, False)
def RegenerateAll(rootDirectory): root = pathlib.Path(rootDirectory) lexillaBase = root.resolve() lex = LexillaData.LexillaData(lexillaBase) lexillaDir = lexillaBase srcDir = lexillaDir / "src" docDir = lexillaDir / "doc" Regenerate(srcDir / "Lexilla.cxx", "//", lex.lexerModules) Regenerate(srcDir / "lexilla.mak", "#", lex.lexFiles) # Discover version information version = (lexillaDir / "version.txt").read_text().strip() versionDotted = version[0] + '.' + version[1] + '.' + version[2] versionCommad = versionDotted.replace(".", ", ") + ', 0' rcPath = srcDir / "LexillaVersion.rc" UpdateLineInFile(rcPath, "#define VERSION_LEXILLA", "#define VERSION_LEXILLA \"" + versionDotted + "\"") UpdateLineInFile(rcPath, "#define VERSION_WORDS", "#define VERSION_WORDS " + versionCommad) ReplaceREInFile(docDir / "LexillaDownload.html", r"/www.scintilla.org/([a-zA-Z]+)\d\d\d", r"/www.scintilla.org/\g<1>" + version) pathMain = lexillaDir / "doc" / "Lexilla.html" UpdateLineInFile(pathMain, ' <font color="#FFCC99" size="3"> Release version', ' <font color="#FFCC99" size="3"> Release version ' + \ versionDotted + '<br />') UpdateLineInFile( pathMain, ' Site last modified', ' Site last modified ' + lex.mdyModified + '</font>') UpdateLineInFile( pathMain, ' <meta name="Date.Modified"', ' <meta name="Date.Modified" content="' + lex.dateModified + '" />') lexillaXcode = lexillaDir / "src" / "Lexilla" lexillaXcodeProject = lexillaXcode / "Lexilla.xcodeproj" / "project.pbxproj" lexerReferences = LexillaData.FindLexersInXcode(lexillaXcodeProject) UpdateLineInPlistFile(lexillaXcode / "Info.plist", "CFBundleShortVersionString", versionDotted) ReplaceREInFile(lexillaXcodeProject, "CURRENT_PROJECT_VERSION = [0-9.]+;", f'CURRENT_PROJECT_VERSION = {versionDotted};') RegenerateXcodeProject(lexillaXcodeProject, lex.lexFiles, lexerReferences) LexFacer.RegenerateAll(root, False) currentDirectory = pathlib.Path.cwd() os.chdir(srcDir) DepGen.Generate() os.chdir(currentDirectory)
def RegenerateAll(root): f = Face.Face() f.ReadFromFile(root + "../scintilla/" + "include/Scintilla.iface") Regenerate(root + "src/ScintillaMessages.h", "//", HMessages(f)) Regenerate(root + "src/ScintillaTypes.h", "//", HEnumerations(f), HConstants(f)) Regenerate(root + "src/ScintillaCall.h", "//", HMethods(f)) Regenerate(root + "src/ScintillaCall.cxx", "//", CXXMethods(f))
def RegenerateAll(): root="../../" sci = ScintillaData.ScintillaData(root + "scintilla/") # Generate HTML to document each property # This is done because tags can not be safely put inside comments in HTML documentProperties = list(sci.propertyDocuments.keys()) ScintillaData.SortListInsensitive(documentProperties) propertiesHTML = [] for k in documentProperties: propertiesHTML.append("\t<tr id='property-%s'>\n\t<td>%s</td>\n\t<td>%s</td>\n\t</tr>" % (k, k, sci.propertyDocuments[k])) # Find all the SciTE properties files otherProps = [ "abbrev.properties", "Embedded.properties", "SciTEGlobal.properties", "SciTE.properties"] propFilePaths = glob.glob(root + "scite/src/*.properties") ScintillaData.SortListInsensitive(propFilePaths) propFiles = [os.path.basename(f) for f in propFilePaths if os.path.basename(f) not in otherProps] ScintillaData.SortListInsensitive(propFiles) Regenerate(root + "scite/win32/makefile", "#", propFiles) Regenerate(root + "scite/win32/scite.mak", "#", propFiles) Regenerate(root + "scite/src/SciTEProps.cxx", "//", sci.lexerProperties) Regenerate(root + "scite/doc/SciTEDoc.html", "<!--", propertiesHTML) Generate(root + "scite/boundscheck/vcproj.gen", root + "scite/boundscheck/SciTE.vcproj", "#", sci.lexFiles) UpdateVersionNumbers(sci, root)
def main(): f = Face.Face() f.ReadFromFile(os.path.join(scintillaIfacePath,"Scintilla.iface")) Regenerate(os.path.join(templatePath,"Scintilla_iface.cs"), "/* ", printLexCSFile(f)) Regenerate(os.path.join(templatePath,"ScintillaGateway.cs"), "/* ", printLexGatewayFile(f)) Regenerate(os.path.join(templatePath,"IScintillaGateway.cs"), "/* ", printLexIGatewayFile(f)) Regenerate(os.path.join(templatePath,"GatewayDomain.cs"), "/* ", printEnumDefinitions(f))
def RegenerateAll(root): sci = ScintillaData.ScintillaData(root) Regenerate(root + "src/Catalogue.cxx", "//", sci.lexerModules) Regenerate(root + "win32/scintilla.mak", "#", sci.lexFiles) UpdateVersionNumbers(sci, root)
def RegenerateAll(root, showMaxID): f = Face.Face() f.ReadFromFile(root / "include/Scintilla.iface") Regenerate(root / "include/Scintilla.h", "/* ", printHFile(f)) Regenerate(root / "include/SciLexer.h", "/* ", printLexHFile(f)) if showMaxID: valueSet = set(int(x) for x in f.values if int(x) < 3000) maximumID = max(valueSet) print("Maximum ID is %d" % maximumID)
def updateCharacterCategoryTable(filename): categories = findCategories("../lexlib/CharacterCategory.h") output = [ f"// Created with Python {platform.python_version()}, Unicode {unicodedata.unidata_version}" ] indexTable = [0] * UnicodeCharacterCount for ch in range(UnicodeCharacterCount): uch = chr(ch) category = unicodedata.category(uch) value = categories.index(category) indexTable[ch] = value # the sentinel value is used to simplify CharacterMap::Optimize() sentinel = UnicodeCharacterCount * 32 + categories.index('Cn') valueBit, rangeList = rangeEncode('catRanges', indexTable, sentinel=sentinel) assert valueBit == 5 output.append("#if CharacterCategoryUseRangeList") output.append("const int catRanges[] = {") #output.extend(f"{value}," for value in rangeList) output.append("};") output.append("") output.append("#else") config = { 'tableName': 'catTable', 'function': """CharacterCategory CategoriseCharacter(int character) noexcept { if (character < 0 || character > maxUnicode) { return ccCn; }""", 'returnType': 'CharacterCategory', } table, function = buildMultiStageTable('CharacterCategory', indexTable, config=config, level=3) output.append("") output.extend(table) valueBit, totalBit, data = runLengthEncode( 'CharacterCategory BMP', indexTable[:BMPCharacterCharacterCount]) assert valueBit == 5 assert totalBit == 16 output.append("") output.append('const uint16_t CatTableRLE_BMP[] = {') output.extend(dumpArray(data, 20)) output.append("};") output.append("") output.append("#endif") Regenerate(filename, "//", output) Regenerate(filename, "//function", function)
def RegenerateAll(root): scintillaBase = os.path.abspath(root) sci = ScintillaData.ScintillaData(root + os.sep) src = os.path.join(root, "lexilla", "src") Regenerate(os.path.join(src, "Lexilla.cxx"), "//", sci.lexerModules) Regenerate(os.path.join(src, "lexilla.mak"), "#", sci.lexFiles)
def RegenerateAll(): f = Face.Face() f.ReadFromFile(srcRoot + "/scintilla/include/Scintilla.iface") menuIDs = ReadMenuIDs(srcRoot + "/scite/src/SciTE.h") idsInOrder = idsFromDocumentation(srcRoot + "/scintilla/doc/ScintillaDoc.html") Regenerate(srcRoot + "/scite/src/IFaceTable.cxx", "//", printIFaceTableCXXFile([f, menuIDs])) Regenerate(srcRoot + "/scite/doc/PaneAPI.html", "<!--", printIFaceTableHTMLFile([f, menuIDs, idsInOrder]))
def updateCharClassifyTable(filename, headfile): indexTable = [0] * UnicodeCharacterCount for ch in range(UnicodeCharacterCount): uch = chr(ch) category = unicodedata.category(uch) value = CategoryClassifyMap[category] if isCJKCharacter(category, ch): value = CharacterClass.CJKWord indexTable[ch] = int(value) output = [ f"// Created with Python {platform.python_version()}, Unicode {unicodedata.unidata_version}" ] head_output = output[:] valueBit, totalBit, data = runLengthEncode( 'CharClassify Unicode BMP', indexTable[:BMPCharacterCharacterCount]) assert valueBit == 3 assert totalBit == 16 output.append('const uint16_t CharClassifyRLE_BMP[] = {') output.extend(dumpArray(data, 20)) output.append("};") output.append("") output.append("}") # namespace output.append("") config = { 'tableVarName': 'CharClassify::CharClassifyTable', 'tableName': 'CharClassifyTable', 'function': """static CharacterClass ClassifyCharacter(uint32_t ch) noexcept { if (ch < sizeof(classifyMap)) { return static_cast<CharacterClass>(classifyMap[ch]); } if (ch > maxUnicode) { // Cn return CharacterClass::space; } ch -= sizeof(classifyMap);""", 'returnType': 'CharacterClass' } table = indexTable[BMPCharacterCharacterCount:] data, function = buildMultiStageTable('CharClassify Unicode', table, config=config, level=3) output.extend(data) head_output.extend('\t' + line for line in function) Regenerate(filename, "//", output) Regenerate(headfile, "//", head_output)
def main(): f = Face.Face() f.ReadFromFile( "../../../notepad-plus-plus/scintilla/include/Scintilla.iface") Regenerate( "../../Visual Studio Project Template C#/PluginInfrastructure/Scintilla_iface.cs", "/* ", printLexCSFile(f)) Regenerate( "../../Visual Studio Project Template C#/PluginInfrastructure/ScintillaGateWay.cs", "/* ", printLexGatewayFile(f)) Regenerate( "../../Visual Studio Project Template C#/PluginInfrastructure/IScintillaGateWay.cs", "/* ", printLexIGatewayFile(f))
def updateCharClassifyTable(filename, headfile): indexTable = [0] * UnicodeCharacterCount for ch in range(UnicodeCharacterCount): uch = chr(ch) category = unicodedata.category(uch) value = ClassifyMap[category] if isCJKCharacter(category, ch): value = CharClassify.ccCJKWord indexTable[ch] = int(value) output = [ "// Created with Python %s, Unicode %s" % (platform.python_version(), unicodedata.unidata_version) ] head_output = output[:] data = runLengthEncode('CharClassify Unicode BMP', indexTable[:BMPCharacterCharacterCount], int(CharClassify.RLEValueBit)) output.append(f'const unsigned short CharClassifyRLE_BMP[] = {{') output.extend(dumpArray(data, 20)) output.append("};") output.append("") output.append("}") # namespace output.append("") args = { 'table_var': 'CharClassify::CharClassifyTable', 'table': 'CharClassifyTable', 'function': """static cc ClassifyCharacter(unsigned int ch) noexcept { if (ch < sizeof(classifyMap)) { return static_cast<cc>(classifyMap[ch]); } if (ch > maxUnicode) { // Cn return ccSpace; } ch -= sizeof(classifyMap);""", 'returnType': 'cc' } table, function = compressIndexTable( 'CharClassify Unicode', indexTable[BMPCharacterCharacterCount:], args) output.extend(table.splitlines()) for line in function.splitlines(): head_output.append('\t' + line) Regenerate(filename, "//", output) Regenerate(headfile, "//", head_output)
def RegenerateAll(root): sci = ScintillaData.ScintillaData(root) Regenerate(root + "src/Catalogue.cxx", "//", sci.lexerModules) Regenerate(root + "win32/scintilla.mak", "#", sci.lexFiles) RegenerateXcodeProject( root + "cocoa/ScintillaFramework/ScintillaFramework.xcodeproj/project.pbxproj", sci.lexFiles, sci.lexersXcode) UpdateVersionNumbers(sci, root) HFacer.RegenerateAll(root, False)
def updateCharacterCategoryTable(filename): categories = findCategories("../lexlib/CharacterCategory.h") output = updateCharacterCategory(categories) indexTable = [0] * UnicodeCharacterCount for ch in range(UnicodeCharacterCount): uch = chr(ch) category = unicodedata.category(uch) value = categories.index(category) indexTable[ch] = value args = { 'table': 'catTable', 'with_function': False, } table, function = compressIndexTable('CharacterCategoryTable', indexTable, args) output.append("") output.extend(table.splitlines()) output.extend(function.splitlines()) data = runLengthEncode('CharacterCategoryTable', indexTable[:BMPCharacterCharacterCount], 5) output.append("") output.append(f'const unsigned short CatTableRLE_BMP[] = {{') output.extend(dumpArray(data, 20)) output.append("};") output.append("") output.append("#endif") Regenerate(filename, "//", output)
def updateCharacterCategoryTable(filename): categories = findCategories("../lexlib/CharacterCategory.h") indexTable = [0] * UnicodeCharacterCount for ch in range(UnicodeCharacterCount): uch = chr(ch) category = unicodedata.category(uch) value = categories.index(category) indexTable[ch] = value output = ["// Created with Python %s, Unicode %s" % ( platform.python_version(), unicodedata.unidata_version)] args = { 'table': 'CharacterCategoryTable', 'function': """CharacterCategory CategoriseCharacter(unsigned int ch) noexcept { if (ch > maxUnicode) { // Cn return ccCn; }""", 'returnType': 'CharacterCategory' } table, function = compressIndexTable('CharacterCategoryTable', indexTable, args) output.append(table) output.append('') output.append("}\n") # namespace output.append(function) Regenerate(filename, "//", output)
def updateCharacterCategory(filename): values = [ "// Created with Python %s, Unicode %s" % (platform.python_version(), unicodedata.unidata_version) ] startRange = 0 category = unicodedata.category(chr(startRange)) table = [] for ch in range(sys.maxunicode): uch = chr(ch) current = unicodedata.category(uch) if current != category: value = startRange * 32 + categories.index(category) table.append(value) category = current startRange = ch value = startRange * 32 + categories.index(category) table.append(value) # the sentinel value is used to simplify CharacterCategoryMap::Optimize() category = 'Cn' value = (sys.maxunicode + 1) * 32 + categories.index(category) table.append(value) values.extend(["%d," % value for value in table]) Regenerate(filename, "//", values)
def buildANSICharClassifyTable(filename): encodingList = [ ('cp1250', 1250, 'Central European (Windows-1250)'), ('cp1251', 1251, 'Cyrillic (Windows-1251)'), ('cp1252', 1252, 'Western European (Windows-1252)'), ('cp1253', 1253, 'Greek (Windows-1253)'), ('cp1254', 1254, 'Turkish (Windows-1254)'), ('cp1255', 1255, 'Hebrew (Windows-1255)'), ('cp1256', 1256, 'Arabic (Windows-1256)'), ('cp1257', 1257, 'Baltic (Windows-1257)'), ('cp1258', 1258, 'Vietnamese (Windows-1258)'), ('cp874', 874, 'Thai (Windows-874)'), ] result = OrderedDict() offset = 0 for encoding, codepage, comment in encodingList: s, m = buildCharClassify(encoding) if not s: continue if s not in result: result[s] = { 'data': m, 'offset': offset, 'codepage': [(codepage, comment)]} offset += len(m) else: result[s]['codepage'].append((codepage, comment)) output = ["// Created with Python %s, Unicode %s" % ( platform.python_version(), unicodedata.unidata_version)] output.append("static const uint8_t ANSICharClassifyTable[] = {") for item in result.values(): for page in item['codepage']: output.append('// ' + page[1]) data = item['data'] output.extend(dumpArray(data, 16, '0x%02X')) output.append("};") output.append("") output.append("static const uint8_t* GetANSICharClassifyTable(UINT cp, int *length) {") output.append("\tswitch (cp) {") for item in result.values(): for page in item['codepage']: output.append("\tcase %d: // %s" % (page[0], page[1])) output.append("\t\t*length = %d;" % len(item['data'])) output.append("\t\treturn ANSICharClassifyTable + %d;" % item['offset']) output.append("\tdefault:") output.append("\t\t*length = 0;") output.append("\t\treturn NULL;") output.append("\t}") output.append("}") output.append("") ellipsis = buildFoldDisplayEllipsis() output.extend(ellipsis) print('ANSICharClassifyTable:', len(result), len(encodingList)) Regenerate(filename, "//", output)
def RegenerateAll(root): sci = ScintillaData.ScintillaData(root) Regenerate(root + "src/Catalogue.cxx", "//", sci.lexerModules) #Regenerate(root + "win32/scintilla.mak", "#", sci.lexFiles) #commented out #RegenerateXcodeProject(root + "cocoa/ScintillaFramework/ScintillaFramework.xcodeproj/project.pbxproj", sci.lexFiles, sci.lexersXcode) #commented out
def RegenerateAll(): root = "../../" sci = ScintillaData.ScintillaData(root + "scintilla/") pathSciTE = os.path.join(root, "scite") # Generate HTML to document each property # This is done because tags can not be safely put inside comments in HTML documentProperties = list(sci.propertyDocuments.keys()) ScintillaData.SortListInsensitive(documentProperties) propertiesHTML = [] for k in documentProperties: propertiesHTML.append( "\t<tr id='property-%s'>\n\t<td>%s</td>\n\t<td>%s</td>\n\t</tr>" % (k, k, sci.propertyDocuments[k])) # Find all the SciTE properties files otherProps = [ "abbrev.properties", "Embedded.properties", "SciTEGlobal.properties", "SciTE.properties" ] propFilePaths = glob.glob(os.path.join(pathSciTE, "src", "*.properties")) ScintillaData.SortListInsensitive(propFilePaths) propFiles = [ os.path.basename(f) for f in propFilePaths if os.path.basename(f) not in otherProps ] ScintillaData.SortListInsensitive(propFiles) UpdateEmbedded(pathSciTE, propFiles) Regenerate(os.path.join(pathSciTE, "win32", "makefile"), "#", propFiles) Regenerate(os.path.join(pathSciTE, "win32", "scite.mak"), "#", propFiles) Regenerate(os.path.join(pathSciTE, "src", "SciTEProps.cxx"), "//", sci.lexerProperties) Regenerate(os.path.join(pathSciTE, "doc", "SciTEDoc.html"), "<!--", propertiesHTML) credits = [OctalEscape(c.encode("utf-8")) for c in sci.credits] Regenerate(os.path.join(pathSciTE, "src", "Credits.cxx"), "//", credits) win32.AppDepGen.Generate() gtk.AppDepGen.Generate() UpdateVersionNumbers(sci, pathSciTE)
def RegenerateAll(rootDirectory): root = pathlib.Path(rootDirectory) scintillaBase = root.resolve() sci = ScintillaData.ScintillaData(scintillaBase) lexillaDir = scintillaBase / "lexilla" srcDir = lexillaDir / "src" Regenerate(srcDir / "Lexilla.cxx", "//", sci.lexerModules) Regenerate(srcDir / "lexilla.mak", "#", sci.lexFiles) # Discover version information version = (lexillaDir / "version.txt").read_text().strip() versionDotted = version[0] + '.' + version[1] + '.' + version[2] versionCommad = versionDotted.replace(".", ", ") + ', 0' rcPath = srcDir / "LexillaVersion.rc" UpdateLineInFile(rcPath, "#define VERSION_LEXILLA", "#define VERSION_LEXILLA \"" + versionDotted + "\"") UpdateLineInFile(rcPath, "#define VERSION_WORDS", "#define VERSION_WORDS " + versionCommad) lexillaXcode = lexillaDir / "src" / "Lexilla" lexillaXcodeProject = lexillaXcode / "Lexilla.xcodeproj" / "project.pbxproj" lexerReferences = ScintillaData.FindLexersInXcode(lexillaXcodeProject) UpdateLineInPlistFile(lexillaXcode / "Info.plist", "CFBundleShortVersionString", versionDotted) ReplaceREInFile(lexillaXcodeProject, "CURRENT_PROJECT_VERSION = [0-9.]+;", f'CURRENT_PROJECT_VERSION = {versionDotted};') RegenerateXcodeProject(lexillaXcodeProject, sci.lexFiles, lexerReferences) currentDirectory = pathlib.Path.cwd() os.chdir(srcDir) DepGen.Generate() os.chdir(currentDirectory)
def update_latex_input_header(latex_map, emoji_map): output = [ '// LaTeX input sequences based on ' + source_info['latex_version'] ] output.append('// documented at ' + source_info['latex_link'] + '.') output.append('// Emoji input sequences based on ' + source_info['emoji_link'] + ',') output.append('// downloaded on ' + source_info['emoji_version'] + '.') output.append('') output.append('enum {') min_latex_len, max_latex_len = get_input_map_size_info('LaTeX', latex_map) output.append('\tMinLaTeXInputSequenceLength = %d,' % min_latex_len) output.append('\tMaxLaTeXInputSequenceLength = %d,' % max_latex_len) output.append('') min_emoji_len, max_emoji_len = get_input_map_size_info('Emoji', emoji_map) prefix = ':' suffix = ':' output.append('#if EnableLaTeXLikeEmojiInput') output.append('\tEmojiInputSequencePrefixLength = %d,' % len(prefix)) output.append('\tEmojiInputSequenceSuffixLength = %d,' % len(suffix)) output.append( '\tMinEmojiInputSequenceLength = %d + EmojiInputSequencePrefixLength, // suffix is optional' % min_emoji_len) output.append( '\tMaxEmojiInputSequenceLength = %d + EmojiInputSequencePrefixLength + EmojiInputSequenceSuffixLength,' % max_emoji_len) output.append('') if max_latex_len >= max_emoji_len + len(prefix) + len(suffix): output.append( '\tMaxLaTeXInputBufferLength = 1 + MaxLaTeXInputSequenceLength + 1,' ) else: output.append( '\tMaxLaTeXInputBufferLength = 1 + MaxEmojiInputSequenceLength + 1,' ) output.append('#else') output.append( '\tMaxLaTeXInputBufferLength = 1 + MaxLaTeXInputSequenceLength + 1,') output.append('#endif') output.append('};') latex_punctuation = find_word_contains_punctuation(latex_map.keys()) emoji_punctuation = find_word_contains_punctuation(emoji_map.keys()) print('LaTeX punctuation:', latex_punctuation) print('Emoji punctuation:', emoji_punctuation) latex_charset = set(''.join(latex_map.keys())) emoji_charset = set(''.join(emoji_map.keys())) emoji_charset.update(prefix + suffix) build_charset_function(latex_charset, emoji_charset, output) Regenerate(header_path, '//', output)
def updateDBCSCharClassifyTable(filename): output = ["// Created with Python %s, Unicode %s" % ( platform.python_version(), unicodedata.unidata_version)] makeDBCSCharClassifyTable(output, ['cp932', 'shift_jis', 'shift_jis_2004', 'shift_jisx0213']) makeDBCSCharClassifyTable(output, ['cp936', 'gbk'], isReservedOrUDC_GBK) makeDBCSCharClassifyTable(output, ['cp949']) # UHC makeDBCSCharClassifyTable(output, ['cp950', 'big5', 'big5hkscs'], isReservedOrUDC_Big5) makeDBCSCharClassifyTable(output, ['cp1361']) # Johab output.pop() Regenerate(filename, "//dbcs", output)
def RegenerateAll(root): #scintillaBase = os.path.abspath(root) sci = ScintillaData.ScintillaData(root) Regenerate(root + "src/Catalogue.cxx", "//", sci.lexerModules) Regenerate(root + "win32/scintilla.mak", "#", sci.lexFiles) #startDir = os.getcwd() #os.chdir(os.path.join(scintillaBase, "win32")) #win32.DepGen.Generate() #os.chdir(os.path.join(scintillaBase, "gtk")) #gtk.DepGen.Generate() #os.chdir(startDir) #RegenerateXcodeProject(root + "cocoa/ScintillaFramework/ScintillaFramework.xcodeproj/project.pbxproj", # sci.lexFiles, sci.lexersXcode) UpdateVersionNumbers(sci, root) HFacer.RegenerateAll(root, False)
def RegenerateAll(root): scintillaBase = os.path.abspath(root) sci = ScintillaData.ScintillaData(root + os.sep) lexillaDir = os.path.join(root, "lexilla") srcDir = os.path.join(lexillaDir, "src") Regenerate(os.path.join(srcDir, "Lexilla.cxx"), "//", sci.lexerModules) Regenerate(os.path.join(srcDir, "lexilla.mak"), "#", sci.lexFiles) # Discover version information with open(os.path.join(lexillaDir, "version.txt")) as f: version = f.read().strip() versionDotted = version[0] + '.' + version[1] + '.' + version[2] versionCommad = versionDotted.replace(".", ", ") + ', 0' rcPath = os.path.join(srcDir, "LexillaVersion.rc") UpdateLineInFile(rcPath, "#define VERSION_LEXILLA", "#define VERSION_LEXILLA \"" + versionDotted + "\"") UpdateLineInFile(rcPath, "#define VERSION_WORDS", "#define VERSION_WORDS " + versionCommad)
def updateCharacterCategory(filename): values = ["// Created with Python %s, Unicode %s" % ( platform.python_version(), unicodedata.unidata_version)] category = unicodedata.category(chr(0)) startRange = 0 for ch in range(sys.maxunicode): uch = chr(ch) if unicodedata.category(uch) != category: value = startRange * 32 + categories.index(category) values.append("%d," % value) category = unicodedata.category(uch) startRange = ch value = startRange * 32 + categories.index(category) values.append("%d," % value) Regenerate(filename, "//", values)
def updateCaseConvert(): symmetrics, complexes = conversionSets() rangeGroups, nonRanges = groupRanges(symmetrics) print(len(rangeGroups), "ranges") rangeLines = ["%d,%d,%d,%d," % x for x in rangeGroups] print(len(nonRanges), "non ranges") nonRangeLines = ["%d,%d," % x for x in nonRanges] print(len(symmetrics), "symmetric") complexLines = ['"%s|%s|%s|%s|"' % tuple(escape(t) for t in x) for x in complexes] print(len(complexLines), "complex") Regenerate("../src/CaseConvert.cxx", "//", rangeLines, nonRangeLines, complexLines)
def update_lexer_keyword_attr(path): output = [] for rid, nonzero in sorted(AllKeywordAttrList.items()): output.append(f'\tcase {rid}:') tab_width = 4 max_width = 36 for index, attr, comment in nonzero: expr = KeywordAttr.get_c_expr(attr) line = f'attr[{index}] = {expr};' if '|' in line: padding = 1 else: padding = (max_width - len(line) + tab_width - 1) // tab_width padding = '\t'*padding output.append(f'\t\t{line}{padding}// {comment}') output.append('\t\tbreak;') Regenerate(path, '//', output)
def RegenerateAll(rootDirectory): root = pathlib.Path(rootDirectory) scintillaBase = root.resolve() sci = ScintillaData.ScintillaData(scintillaBase) Regenerate(scintillaBase / "win32/scintilla.mak", "#", sci.lexFiles) startDir = os.getcwd() os.chdir(os.path.join(scintillaBase, "win32")) win32.DepGen.Generate() os.chdir(os.path.join(scintillaBase, "gtk")) gtk.DepGen.Generate() os.chdir(startDir) UpdateVersionNumbers(sci, root) HFacer.RegenerateAll(root, False)
def updateUnicodeLineBreak(filename): indexTable = [0] * UnicodeCharacterCount for ch, prop in enumerate(kUnicodeLineBreak): category = unicodedata.category(chr(ch)) cc = ClassifyMap[category] lb = LineBreakMap[prop] if cc == CharClassify.ccWord and lb != LineBreak.BreakAny: lb = LineBreak.NonBreak indexTable[ch] = int(lb) #runLengthEncode('Unicode LineBreak', indexTable[:BMPCharacterCharacterCount], LineBreak.RLEValueBit) #compressIndexTable('Unicode LineBreak', indexTable, args) output = [ "// Created with Python %s, Unicode %s" % (platform.python_version(), kUnicodeLineBreakVersion) ] lines = dumpArray(indexTable[:128], 16) output.extend(lines) Regenerate(filename, "//", output) escapeMap = {'\r': '\\r', '\n': '\\n', '\t': '\\t', ' ': 'SP'} with open('linebreak.log', 'w', encoding='utf-8', newline='\n') as fd: for ch, value in enumerate(indexTable[:128]): uch = chr(ch) if uch.isalnum(): continue prop = kUnicodeLineBreak[ch] lb = LineBreakMap[prop] category = unicodedata.category(uch) name = '' try: name = unicodedata.name(uch) except ValueError: pass uch = escapeMap.get(uch, uch) fd.write( f'{ch :02X} {value}; {category} {prop} {lb.name}; {uch} {name}\n' )