def main(): # get unicode character database codepoint_list = utils.read_all_text_from_file( path.join(utils.get_script_folder(), 'UnicodeData.txt'), 'https://www.unicode.org/Public/UCD/latest/ucd/UnicodeData.txt') # parse the database file into codepoints re_codepoint = re.compile(r'^([0-9a-fA-F]+);(.+?);([a-zA-Z]+);') current_range_start = -1 codepoints = [] parsed_codepoints = 0 for codepoint_entry in codepoint_list.split('\n'): match = re_codepoint.search(codepoint_entry) if (match is None): if (current_range_start > -1): raise Exception( 'Previous codepoint indicated the start of a range but the next one was null' ) continue codepoint = int('0x{}'.format(match.group(1)), 16) if (current_range_start > -1): for cp in range(current_range_start, codepoint): parsed_codepoints += 1 append_codepoint(codepoints, cp, match.group(3)) current_range_start = -1 else: if (match.group(2).endswith(', First>')): current_range_start = codepoint else: parsed_codepoints += 1 append_codepoint(codepoints, codepoint, match.group(3)) print("Extracted {} of {} codepoints from unicode database file.".format( len(codepoints), parsed_codepoints)) codepoints.sort(key=lambda r: r[0]) # write the output files header_file_path = path.join(utils.get_script_folder(), '..', 'include', 'toml++', 'toml_utf8_generated.h') test_file_path = path.join(utils.get_script_folder(), '..', 'tests', 'unicode_generated.cpp') print("Writing to {}".format(header_file_path)) with open(header_file_path, 'w', encoding='utf-8', newline='\n') as header_file: if G.generate_tests: print("Writing to {}".format(test_file_path)) with open(test_file_path, 'w', encoding='utf-8', newline='\n') as test_file: write_to_files(codepoints, header_file, test_file) else: write_to_files(codepoints, header_file, None)
def __preprocess(self, match): raw_incl = match if isinstance(match, str) else match.group(1) incl = raw_incl.strip().lower() if incl in self.__processed_includes: return '' self.__processed_includes.append(incl) text = utils.read_all_text_from_file( path.join(utils.get_script_folder(), '..', 'include', 'toml++', incl)).strip() + '\n' text = text.replace('\r\n', '\n') # convert windows newlines text = self.__re_strip_blocks.sub('', text, 0) # strip {{ }} blocks self.__current_level += 1 text = self.__re_includes.sub(lambda m: self.__preprocess(m), text, 0) self.__current_level -= 1 if (self.__current_level == 1): header_text = '↓ ' + raw_incl lpad = 20 + ((25 * (self.__header_indent % 4)) - int( (len(header_text) + 4) / 2)) self.__header_indent += 1 text = '#if 1 {}\n{}\n\n#endif {}\n'.format( utils.make_divider(header_text, lpad, line_length=113), text, utils.make_divider('↑ ' + raw_incl, lpad, line_length=113)) return '\n\n' + text + '\n\n' # will get merged later
def __single_tags_substitute(cls, m): if (str(m[1]).lower() == 'emoji'): emoji = str(m[2]).strip().lower() if emoji == '': return '' if cls.__emojis is None: file_path = path.join(utils.get_script_folder(), 'emojis.json') cls.__emojis = json.loads( utils.read_all_text_from_file( file_path, 'https://api.github.com/emojis')) if '__processed' not in cls.__emojis: emojis = {} for key, uri in cls.__emojis.items(): m2 = cls.__emoji_uri.fullmatch(uri) if m2: emojis[key] = [str(m2[1]).upper(), uri] aliases = [('sundae', 'ice_cream')] for alias, key in aliases: emojis[alias] = emojis[key] emojis['__processed'] = True with open(file_path, 'w', encoding='utf-8', newline='\n') as f: f.write(json.dumps(emojis, sort_keys=True, indent=4)) cls.__emojis = emojis if emoji not in cls.__emojis: return '' return '&#x{}'.format(cls.__emojis[emoji][0]) else: return '<{}{}>'.format(m[1], (' ' + str(m[2]).strip()) if m[2] else '')
def main(): extern_root = path.join(utils.get_script_folder(), '..', 'extern') tests = {'valid': dict(), 'invalid': dict()} load_valid_inputs(tests, extern_root) load_invalid_inputs(tests, extern_root) for test_type, test_groups in tests.items(): for test_group, test_cases in test_groups.items(): write_test_file('{}/{}'.format(test_group, test_type), test_cases)
def main(): global _threadError num_threads = os.cpu_count() * 2 root_dir = path.join(utils.get_script_folder(), '..') docs_dir = path.join(root_dir, 'docs') xml_dir = path.join(docs_dir, 'xml') html_dir = path.join(docs_dir, 'html') mcss_dir = path.join(root_dir, 'extern', 'mcss') doxygen = path.join(mcss_dir, 'documentation', 'doxygen.py') # delete any previously generated html and xml utils.delete_directory(xml_dir) utils.delete_directory(html_dir) # run doxygen subprocess.check_call( ['doxygen', 'Doxyfile'], shell=True, cwd=docs_dir ) # fix some shit that's broken in the xml preprocess_xml(xml_dir) # run doxygen.py (m.css) utils.run_python_script(doxygen, path.join(docs_dir, 'Doxyfile-mcss'), '--no-doxygen') # delete xml utils.delete_directory(xml_dir) # post-process html files fixes = [ CustomTagsFix() , SyntaxHighlightingFix() #, NavBarFix() , IndexPageFix() , ModifiersFix1() , ModifiersFix2() , InlineNamespaceFix1() , InlineNamespaceFix2() , InlineNamespaceFix3() , ExtDocLinksFix() , EnableIfFix() , ExternalLinksFix() ] files = [path.split(f) for f in utils.get_all_files(html_dir, any=('*.html', '*.htm'))] if files: with futures.ThreadPoolExecutor(max_workers=min(len(files), num_threads)) as executor: jobs = { executor.submit(postprocess_file, dir, file, fixes) : file for dir, file in files } for job in futures.as_completed(jobs): if _threadError: executor.shutdown(False) break else: file = jobs[job] print('Finished processing {}.'.format(file)) if _threadError: return 1
def main(): hpp_path = path.join(utils.get_script_folder(), '..', 'toml.hpp') hash1 = hashlib.sha1( utils.read_all_text_from_file(hpp_path).encode('utf-8')).hexdigest() print("Hash 1: {}".format(hash1)) utils.run_python_script('generate_single_header.py') hash2 = hashlib.sha1( utils.read_all_text_from_file(hpp_path).encode('utf-8')).hexdigest() print("Hash 2: {}".format(hash2)) if (hash1 != hash2): print( "toml.hpp wasn't up-to-date!\nRun generate_single_header.py before your commit to prevent this error.", file=sys.stderr) return 1 print("toml.hpp was up-to-date") return 0
def preprocess(self, match): raw_incl = match if isinstance(match, str) else match.group(1) incl = raw_incl.strip().lower() if incl in self.processed_includes: return '' self.processed_includes.append(incl) text = utils.read_all_text_from_file(path.join(utils.get_script_folder(), '..', 'include', 'toml++', incl)).strip() + '\n' text = re.sub('\r\n', '\n', text, 0, re.I | re.M) # convert windows newlines text = re.sub(r'//[#!]\s*[{][{].*?//[#!]\s*[}][}]*?\n', '', text, 0, re.I | re.S) # strip {{ }} blocks self.current_level += 1 text = re.sub(r'^\s*#\s*include\s+"(.+?)"', lambda m : self.preprocess(m), text, 0, re.I | re.M) self.current_level -= 1 if (self.current_level == 1): header_text = '↓ ' + raw_incl lpad = 28 + ((25 * (self.header_indent % 4)) - int((len(header_text) + 4) / 2)) self.header_indent += 1 text = '{}\n#if 1\n\n{}\n\n#endif\n{}\n'.format( utils.make_divider(header_text, lpad), text, utils.make_divider('↑ ' + raw_incl, lpad) ) return '\n\n' + text + '\n\n' # will get merged later
def main(): mode_keys = ['!!debug', '!x86', 'cpplatest', 'unrel', 'noexcept'] modes = [[]] for n in range(1, len(mode_keys)): for combo in itertools.combinations(mode_keys, n): modes.append([i for i in combo]) modes.append(mode_keys) for mode in modes: if '!x86' not in mode: mode.insert(0, '!x64') if '!!debug' not in mode: mode.insert(0, '!!release') mode.sort() for i in range(0, len(mode)): while mode[i].startswith('!'): mode[i] = mode[i][1:] modes.sort() test_root = path.join(utils.get_script_folder(), '..', 'vs', 'tests') uuid_namespace = UUID('{51C7001B-048C-4AF0-B598-D75E78FF31F0}') configuration_name = lambda x: 'Debug' if x.lower( ) == 'debug' else 'Release' platform_name = lambda x: 'Win32' if x == 'x86' else x for mode in modes: file_path = path.join(test_root, 'test_{}.vcxproj'.format('_'.join(mode))) print("Writing to {}".format(file_path)) with open(file_path, 'w', encoding='utf-8-sig', newline='\r\n') as file: write = lambda txt: print(txt, file=file) write(r''' <?xml version="1.0" encoding="utf-8"?> <Project DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> <ItemGroup Label="ProjectConfigurations"> <ProjectConfiguration Include="{configuration}|{platform}"> <Configuration>{configuration}</Configuration> <Platform>{platform}</Platform> </ProjectConfiguration> </ItemGroup> <PropertyGroup Label="Globals"> <VCProjectVersion>16.0</VCProjectVersion> <ProjectGuid>{{{uuid}}}</ProjectGuid> <WindowsTargetPlatformVersion>10.0</WindowsTargetPlatformVersion> </PropertyGroup> <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" /> <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|{platform}'" Label="Configuration"> <ConfigurationType>Application</ConfigurationType> <UseDebugLibraries>true</UseDebugLibraries> <PlatformToolset>v142</PlatformToolset> <CharacterSet>MultiByte</CharacterSet> </PropertyGroup> <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|{platform}'" Label="Configuration"> <ConfigurationType>Application</ConfigurationType> <UseDebugLibraries>false</UseDebugLibraries> <PlatformToolset>v142</PlatformToolset> <WholeProgramOptimization>true</WholeProgramOptimization> <CharacterSet>MultiByte</CharacterSet> </PropertyGroup> <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" /> <ImportGroup Label="PropertySheets"> <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> </ImportGroup> <Import Project="../toml++.props" /> <ItemDefinitionGroup> <ClCompile> <AdditionalIncludeDirectories>..\tests;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <ExceptionHandling>{exceptions}</ExceptionHandling> <PrecompiledHeader>Use</PrecompiledHeader> <PrecompiledHeaderFile>tests.h</PrecompiledHeaderFile> <PreprocessorDefinitions>TOML_UNRELEASED_FEATURES={unreleased_features};%(PreprocessorDefinitions)</PreprocessorDefinitions> <PreprocessorDefinitions Condition="'%(ExceptionHandling)'=='false'">_HAS_EXCEPTIONS=0;%(PreprocessorDefinitions)</PreprocessorDefinitions> <PreprocessorDefinitions Condition="'%(ExceptionHandling)'=='false'">SHOULD_HAVE_EXCEPTIONS=0;%(PreprocessorDefinitions)</PreprocessorDefinitions> <PreprocessorDefinitions Condition="'%(ExceptionHandling)'!='false'">SHOULD_HAVE_EXCEPTIONS=1;%(PreprocessorDefinitions)</PreprocessorDefinitions> <LanguageStandard>std{standard}</LanguageStandard> <MultiProcessorCompilation>true</MultiProcessorCompilation> </ClCompile> </ItemDefinitionGroup> <PropertyGroup> <LocalDebuggerWorkingDirectory>..\..\tests\</LocalDebuggerWorkingDirectory> </PropertyGroup> <ItemGroup> <ClCompile Include="..\..\tests\conformance_burntsushi_invalid.cpp" /> <ClCompile Include="..\..\tests\conformance_burntsushi_valid.cpp" /> <ClCompile Include="..\..\tests\conformance_iarna_invalid.cpp" /> <ClCompile Include="..\..\tests\conformance_iarna_valid.cpp" /> <ClCompile Include="..\..\tests\impl_catch2.cpp"> <PrecompiledHeader>NotUsing</PrecompiledHeader> </ClCompile> <ClCompile Include="..\..\tests\impl_toml.cpp"> <PrecompiledHeader>NotUsing</PrecompiledHeader> </ClCompile> <ClCompile Include="..\..\tests\manipulating_arrays.cpp" /> <ClCompile Include="..\..\tests\manipulating_tables.cpp" /> <ClCompile Include="..\..\tests\manipulating_parse_result.cpp" /> <ClCompile Include="..\..\tests\manipulating_values.cpp" /> <ClCompile Include="..\..\tests\parsing_arrays.cpp" /> <ClCompile Include="..\..\tests\parsing_booleans.cpp" /> <ClCompile Include="..\..\tests\parsing_comments.cpp" /> <ClCompile Include="..\..\tests\parsing_dates_and_times.cpp" /> <ClCompile Include="..\..\tests\parsing_floats.cpp" /> <ClCompile Include="..\..\tests\parsing_integers.cpp" /> <ClCompile Include="..\..\tests\parsing_key_value_pairs.cpp" /> <ClCompile Include="..\..\tests\parsing_spec_example.cpp" /> <ClCompile Include="..\..\tests\parsing_strings.cpp" /> <ClCompile Include="..\..\tests\parsing_tables.cpp" /> <ClCompile Include="..\..\tests\tests.cpp"> <PrecompiledHeader>Create</PrecompiledHeader> </ClCompile> <ClCompile Include="..\..\tests\unicode.cpp" /> <ClCompile Include="..\..\tests\user_feedback.cpp" /> <ClCompile Include="..\..\tests\windows_compat.cpp" /> </ItemGroup> <ItemGroup> <Natvis Include="..\toml++.natvis" /> </ItemGroup> <ItemGroup> <ClInclude Include="..\..\tests\catch2.h" /> <ClInclude Include="..\..\tests\evil_macros.h" /> <ClInclude Include="..\..\tests\settings.h" /> <ClInclude Include="..\..\tests\tests.h" /> <ClInclude Include="..\..\tests\tloptional.h" /> </ItemGroup> <ItemGroup> <None Include="..\..\tests\meson.build" /> </ItemGroup> <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> </Project> '''.strip().format( configuration=next( configuration_name(x) for x in mode if x in ('debug', 'release')), platform=next( platform_name(x) for x in mode if x in ('x64', 'x86')), uuid=str(uuid5(uuid_namespace, '_'.join(mode))).upper(), exceptions='false' if 'noexcept' in mode else 'Sync', unreleased_features=1 if 'unrel' in mode else 0, standard='cpplatest' if 'cpplatest' in mode else 'cpp17'))
def main(): # preprocess header(s) source_text = str(Preprocessor('toml.h')) # strip various things: # 'pragma once' source_text = re.sub(r'^\s*#\s*pragma\s+once\s*$', '', source_text, 0, re.I | re.M) # clang-format directives source_text = re.sub(r'^\s*//\s*clang-format\s+.+?$', '', source_text, 0, re.I | re.M) # spdx license identifiers source_text = re.sub(r'^\s*//\s*SPDX-License-Identifier:.+?$', '', source_text, 0, re.I | re.M) # 'magic' comment blocks (incl. doxygen) source_text = re.sub('(?:(?:\n|^)[ \t]*//[/#!<]+[^\n]*)+\n', '\n', source_text, 0, re.I | re.M) # 'magic' comments (incl. doxygen) source_text = re.sub('(?://[/#!<].*?)\n', '\n', source_text, 0, re.I | re.M) # remove trailing whitespace source_text = re.sub('([^ \t])[ \t]+\n', '\\1\n', source_text, 0, re.I | re.M) # bookended namespace blocks source_text = re.sub( '}\n+TOML_NAMESPACE_END\n+TOML_NAMESPACE_START\n+{\n+', '\n', source_text, 0, re.I | re.M) source_text = re.sub( '}\n+TOML_IMPL_NAMESPACE_END\n+TOML_IMPL_NAMESPACE_START\n+{\n+', '\n', source_text, 0, re.I | re.M) # blank lines before some preprocessor directives #source_text = re.sub('\n+\n(\s*)#\s*(elif|else|endif)(.*?)\n', '\n\\1#\\2\\3\n', source_text, 0, re.I | re.M) # blank lines after some preprocessor directives #source_text = re.sub('#\s*(if|ifn?def|elif|else)(.*?)\n\n+', '#\\1\\2\n', source_text, 0, re.I | re.M) # blank lines after opening braces source_text = re.sub('[{]\s*\n\s*\n+', '{\n', source_text, 0, re.I | re.M) # double newlines source_text = re.sub('\n(?:[ \t]*\n[ \t]*)+\n', '\n\n', source_text, 0, re.I | re.M) # source_text = re.sub( # blank lines between various preprocessor directives # '[#](endif(?:\s*//[^\n]*)?)\n{2,}[#](ifn?(?:def)?|define)', # '#\\1\n#\\2', # source_text, 0, re.I | re.M # ) return_type_pattern \ = r'(?:' \ + r'(?:\[\[nodiscard\]\]\s*)?' \ + r'(?:(?:friend|explicit|virtual|inline|const|operator)\s+)*' \ + r'(?:' \ + r'bool|int64_t|(?:const_)?iterator|double|void' \ + r'|node(?:_(?:view|of)<.+?>|)?|table|array|value(?:<.+?>)?' \ + r'|T|U|parse_(?:error|result)' \ + r')' \ + r'(?:\s*[&*]+)?' \ + r'(?:\s*[(]\s*[)])?' \ + r'\s+' \ + r')' blank_lines_between_returns_pattern = '({}[^\n]+)\n\n([ \t]*{})'.format( return_type_pattern, return_type_pattern) for i in range( 0, 5): # remove blank lines between simple one-liner definitions source_text = re.sub('(using .+?;)\n\n([ \t]*using)', '\\1\n\\2', source_text, 0, re.I | re.M) source_text = re.sub( '([a-zA-Z_][a-zA-Z0-9_]*[ \t]+[a-zA-Z_][a-zA-Z0-9_]*[ \t]*;)' \ + '\n\n([ \t]*[a-zA-Z_][a-zA-Z0-9_]*[ \t]+[a-zA-Z_][a-zA-Z0-9_]*[ \t]*;)', '\\1\n\\2', source_text, 0, re.I | re.M) source_text = re.sub(blank_lines_between_returns_pattern, '\\1\n\\2', source_text, 0, re.I | re.M) source_text = source_text.strip() + '\n' # change TOML_LIB_SINGLE_HEADER to 1 source_text = re.sub('#\s*define\s+TOML_LIB_SINGLE_HEADER\s+[0-9]+', '#define TOML_LIB_SINGLE_HEADER 1', source_text, 0, re.I) # extract library version library_version = {'major': 0, 'minor': 0, 'patch': 0} match = re.search(r'^\s*#\s*define\s+TOML_LIB_MAJOR\s+([0-9]+)\s*$', source_text, re.I | re.M) if match is not None: library_version['major'] = match.group(1) match = re.search(r'^\s*#\s*define\s+TOML_LIB_MINOR\s+([0-9]+)\s*$', source_text, re.I | re.M) if match is not None: library_version['minor'] = match.group(1) match = re.search( r'^\s*#\s*define\s+TOML_LIB_(?:REVISION|PATCH)\s+([0-9]+)\s*$', source_text, re.I | re.M) if match is not None: library_version['patch'] = match.group(1) # build the preamble (license etc) preamble = [] preamble.append(''' // toml++ v{major}.{minor}.{patch} // https://github.com/marzer/tomlplusplus // SPDX-License-Identifier: MIT'''.format(**library_version)) preamble.append(''' // - THIS FILE WAS ASSEMBLED FROM MULTIPLE HEADER FILES BY A SCRIPT - PLEASE DON'T EDIT IT DIRECTLY - // // If you wish to submit a contribution to toml++, hooray and thanks! Before you crack on, please be aware that this // file was assembled from a number of smaller files by a python script, and code contributions should not be made // against it directly. You should instead make your changes in the relevant source file(s). The file names of the files // that contributed to this header can be found at the beginnings and ends of the corresponding sections of this file.''' ) preamble.append(''' // TOML Language Specifications: // latest: https://github.com/toml-lang/toml/blob/master/README.md // v1.0.0-rc.2: https://toml.io/en/v1.0.0-rc.2 // v1.0.0-rc.1: https://toml.io/en/v1.0.0-rc.1 // v0.5.0: https://toml.io/en/v0.5.0 // changelog: https://github.com/toml-lang/toml/blob/master/CHANGELOG.md''') preamble.append( utils.read_all_text_from_file( path.join(utils.get_script_folder(), '..', 'LICENSE'))) # write the output with StringIO(newline='\n') as output: # build in a string buffer write = lambda txt, end='\n': print(txt, file=output, end=end) if (len(preamble) > 0): write(utils.make_divider()) for pre in preamble: write('//') for line in pre.strip().splitlines(): if len(line) == 0: write('//') continue if not line.startswith('//'): write('// ', end='') write(line) write('//') write(utils.make_divider()) write(source_text) write('') output_str = output.getvalue().strip() # analyze the output to find any potentially missing #undefs re_define = re.compile(r'^\s*#\s*define\s+([a-zA-Z0-9_]+)(?:$|\s|\()') re_undef = re.compile(r'^\s*#\s*undef\s+([a-zA-Z0-9_]+)(?:$|\s|//)') defines = dict() for output_line in output_str.splitlines(): defined = True m = re_define.match(output_line) if not m: defined = False m = re_undef.match(output_line) if m: defines[m.group(1)] = defined ignore_list = ( # macros that are meant to stay public (user configs etc) 'INCLUDE_TOMLPLUSPLUS_H', 'TOML_API', 'TOML_UNRELEASED_FEATURES', 'TOML_LARGE_FILES', 'TOML_PARSER', 'TOML_WINDOWS_COMPAT', 'TOML_EXCEPTIONS', 'TOML_LIB_SINGLE_HEADER', 'TOML_LIB_MAJOR', 'TOML_LIB_MINOR', 'TOML_LIB_PATCH', 'TOML_LANG_MAJOR', 'TOML_LANG_MINOR', 'TOML_LANG_PATCH', 'TOML_UNDEF_MACROS', 'TOML_HEADER_ONLY', 'TOML_ALL_INLINE') set_defines = [] for define, currently_set in defines.items(): if currently_set and define not in ignore_list: set_defines.append(define) if len(set_defines) > 0: set_defines.sort() print(f"Potentially missing #undefs:") for define in set_defines: print(f"\t#undef {define}") # write the output file output_file_path = path.join(utils.get_script_folder(), '..', 'toml.hpp') print("Writing to {}".format(output_file_path)) with open(output_file_path, 'w', encoding='utf-8', newline='\n') as output_file: print(output_str, file=output_file)
def write_test_file(name, test_cases): conditions = set() for test in test_cases: conditions.add(test.condition) test_file_path = path.join( utils.get_script_folder(), '..', 'tests', 'conformance_{}.cpp'.format(sanitize(name.strip()))) print("Writing to {}".format(test_file_path)) with open(test_file_path, 'w', encoding='utf-8', newline='\n') as test_file: write = lambda txt: print(txt, file=test_file) # preamble write( '// This file is a part of toml++ and is subject to the the terms of the MIT license.' ) write( '// Copyright (c) 2019-2020 Mark Gillard <*****@*****.**>' ) write( '// See https://github.com/marzer/tomlplusplus/blob/master/LICENSE for the full license text.' ) write('// SPDX-License-Identifier: MIT') write('//-----') write( '// this file was generated by generate_conformance_tests.py - do not modify it directly' ) write('') write('#include "tests.h"') write('using namespace toml::impl;') write('') # test data write('TOML_PUSH_WARNINGS') write('TOML_DISABLE_ALL_WARNINGS // unused variable spam') write('') write('namespace') write('{') for test in test_cases: write('\t{}'.format(test)) write('}') write('') write('TOML_POP_WARNINGS') write('') # tests write('TEST_CASE("conformance - {}")'.format(name)) write('{') for condition in conditions: if condition != '': write('') write('\t#if {}'.format(condition)) for test in test_cases: if test.condition != condition: continue expected = test.expected() if isinstance(expected, bool): if expected: write('\tparsing_should_succeed(FILE_LINE_ARGS, {});'. format(test.identifier())) else: write('\tparsing_should_fail(FILE_LINE_ARGS, {});'. format(test.identifier())) else: write( '\tparsing_should_succeed(FILE_LINE_ARGS, {}, [](toml::table&& tbl)' .format(test.identifier())) write('\t{') write('\t\tauto expected = {};'.format( expected.render('\t\t'))) write('\t\tREQUIRE(tbl == expected);') write('\t});') write('') if condition != '': write('\t#endif // {}'.format(condition)) write('}') write('')
def main(): # preprocess header(s) source_text = Preprocessor()('toml.h') source_text = re.sub(r'^\s*#\s*pragma\s+once\s*$', '', source_text, 0, re.I | re.M) # 'pragma once' source_text = re.sub(r'^\s*//\s*clang-format\s+.+?$', '', source_text, 0, re.I | re.M) # clang-format directives source_text = re.sub(r'^\s*//\s*SPDX-License-Identifier:.+?$', '', source_text, 0, re.I | re.M) # spdx source_text = re.sub('(?:(?:\n|^)[ \t]*//[/#!<]+[^\n]*)+\n', '\n', source_text, 0, re.I | re.M) # remove 'magic' comment blocks source_text = re.sub('(?://[/#!<].*?)\n', '\n', source_text, 0, re.I | re.M) # remove 'magic' comments source_text = re.sub('([^ \t])[ \t]+\n', '\\1\n', source_text, 0, re.I | re.M) # remove trailing whitespace source_text = re.sub('\n(?:[ \t]*\n[ \t]*)+\n', '\n\n', source_text, 0, re.I | re.M) # remove double newlines # source_text = re.sub( # blank lines between various preprocessor directives # '[#](endif(?:\s*//[^\n]*)?)\n{2,}[#](ifn?(?:def)?|define)', # '#\\1\n#\\2', # source_text, 0, re.I | re.M # ) return_type_pattern \ = r'(?:' \ + r'(?:\[\[nodiscard\]\]\s*)?' \ + r'(?:(?:friend|explicit|virtual|inline|const|operator)\s+)*' \ + r'(?:' \ + r'bool|int64_t|(?:const_)?iterator|double|void' \ + r'|node(?:_(?:view|of)<.+?>|)?|table|array|value(?:<.+?>)?' \ + r'|T|U|parse_(?:error|result)' \ + r')' \ + r'(?:\s*[&*]+)?' \ + r'(?:\s*[(]\s*[)])?' \ + r'\s+' \ + r')' blank_lines_between_returns_pattern = '({}[^\n]+)\n\n([ \t]*{})'.format(return_type_pattern, return_type_pattern) for i in range(0, 5): # remove blank lines between simple one-liner definitions source_text = re.sub('(using .+?;)\n\n([ \t]*using)', '\\1\n\\2', source_text, 0, re.I | re.M) source_text = re.sub( '([a-zA-Z_][a-zA-Z0-9_]*[ \t]+[a-zA-Z_][a-zA-Z0-9_]*[ \t]*;)' \ + '\n\n([ \t]*[a-zA-Z_][a-zA-Z0-9_]*[ \t]+[a-zA-Z_][a-zA-Z0-9_]*[ \t]*;)', '\\1\n\\2', source_text, 0, re.I | re.M) source_text = re.sub(blank_lines_between_returns_pattern, '\\1\n\\2', source_text, 0, re.I | re.M) source_text = source_text.strip() + '\n' # extract library version library_version = { 'major': 0, 'minor': 0, 'patch': 0 } match = re.search(r'^\s*#\s*define\s+TOML_LIB_MAJOR\s+([0-9]+)\s*$', source_text, re.I | re.M) if match is not None: library_version['major'] = match.group(1) match = re.search(r'^\s*#\s*define\s+TOML_LIB_MINOR\s+([0-9]+)\s*$', source_text, re.I | re.M) if match is not None: library_version['minor'] = match.group(1) match = re.search(r'^\s*#\s*define\s+TOML_LIB_(?:REVISION|PATCH)\s+([0-9]+)\s*$', source_text, re.I | re.M) if match is not None: library_version['patch'] = match.group(1) # build the preamble (license etc) preamble = [] preamble.append(''' toml++ v{major}.{minor}.{patch} https://github.com/marzer/tomlplusplus SPDX-License-Identifier: MIT'''.format(**library_version)) preamble.append(''' - THIS FILE WAS ASSEMBLED FROM MULTIPLE HEADER FILES BY A SCRIPT - PLEASE DON'T EDIT IT DIRECTLY - If you wish to submit a contribution to toml++, hooray and thanks! Before you crack on, please be aware that this file was assembled from a number of smaller files by a python script, and code contributions should not be made against it directly. You should instead make your changes in the relevant source file(s). The file names of the files that contributed to this header can be found at the beginnings and ends of the corresponding sections of this file.''') preamble.append(''' TOML language specifications: Latest: https://github.com/toml-lang/toml/blob/master/README.md v1.0.0-rc.1: https://toml.io/en/v1.0.0-rc.1 v0.5.0: https://toml.io/en/v0.5.0''') preamble.append(utils.read_all_text_from_file(path.join(utils.get_script_folder(), '..', 'LICENSE'))) # write the output file output_file_path = path.join(utils.get_script_folder(), '..', 'toml.hpp') print("Writing to {}".format(output_file_path)) with open(output_file_path,'w', encoding='utf-8', newline='\n') as output_file: if (len(preamble) > 0): print(utils.make_divider(), file=output_file) for pre in preamble: print('//', file=output_file) for line in pre.strip().splitlines(): print('//', file=output_file, end = '') if (len(line) > 0): print(' ', file=output_file, end = '') print(line, file=output_file) else: print('\n', file=output_file, end = '') print('//', file=output_file) print(utils.make_divider(), file=output_file) print('''// clang-format off #ifndef INCLUDE_TOMLPLUSPLUS_H #define INCLUDE_TOMLPLUSPLUS_H #define TOML_LIB_SINGLE_HEADER 1 ''', file=output_file) print(source_text, file=output_file) print(''' #endif // INCLUDE_TOMLPLUSPLUS_H // clang-format on''', file=output_file)