def testCombined(self): prepare() # 词法分析 syntaxFile = os.path.abspath( os.path.join(__file__, '../../test/combined_test/syntax.txt')) with open(syntaxFile, encoding='utf-8') as f: buf = f.read() tokens = lexer.analyze(buf, isKeepSpace=False, isKeepComment=False) lexOutputFile = os.path.abspath( os.path.join(__file__, '../output/lex_output.txt')) with open(lexOutputFile, 'w', encoding='utf-8') as f: for token in tokens: f.write(str(token) + '\n') # 语法分析 productionNonterminals = collectNonterminalOfProduction(tokens) productionList = analyze(tokens) translator.writeProductionList(productionList, productionNonterminals, TokenType) translator.mergeProductionListToFile( productionList, productionNonterminals, TokenType, os.path.join(__file__, '../../test/cg_test/productions.py')) translator.writeNonterminals(productionList, productionNonterminals) translator.mergeNonterminalsToFile( productionList, productionNonterminals, os.path.join(__file__, '../../test/cg_test/nonterminals.py')) to_code_translator.writeToCode(productionList, productionNonterminals)
def verifyAll(): def json2Edges(d): edges = {} for key in d: edges[int(key)] = d[key] return edges with open(os.path.join(__file__, '../2_edges.json')) as f: edges = json2Edges(json.load(f)) # 消除已知的冲突 edges = known_conflicts.applyTo(edges) with open(os.path.join(__file__, '../2_edges_conflict_free.json'), 'w') as f: json.dump(edges, f, indent=4) # 用shader进行验证 shaderFiles = [] for path, dirs, files in os.walk( r'D:\Protable_Program\Sublime Text Build 3126 x64\Data\Packages\UnityShader' ): for file in files: if os.path.splitext(file)[1] == '.shader': filePath = os.path.join(path, file) shaderFiles.append(filePath) count = 0 for filePath in shaderFiles: if os.path.split(filePath)[1] == 'Internal-DeferredShading.shader': continue with open(filePath) as f: inputText = f.read() count += 1 for match in re.finditer(r'CGPROGRAM.*?ENDCG|CGINCLUDE.*?ENDCG', inputText, re.DOTALL): try: filterText = match.group() tokens = lexer.analyze(filterText, isEnding=True) tokens = preprocessor.analyze(tokens) ast = dfm.run(edges, productionList, tokens, isDebug=False) print(count, filePath, 'ok') except Exception as e: print(filePath, 'failed') with open(os.path.join(__file__, '../test.shader'), 'w') as testShaderFile: testShaderFile.write(filterText) raise e
def test(self): with open(os.path.join(__file__, '../formatted.shader')) as f: inputText = f.read() tokens = lexer.analyze(inputText, isEnding=True) tokens = preprocessor.analyze(tokens) ast = parser.analyze(tokens) with open(os.path.join(__file__, '../output/lex_output.txt'), 'w') as f: for token in tokens: f.write(str(token) + '\n') from app.extension.formatter import Formatter formatter = Formatter(tokens, ast) outFilePath = os.path.abspath( os.path.join(__file__, '../output/output.shader')) with open(outFilePath, 'w') as f: f.write(formatter.toCode()) formatter.reset() self.maxDiff = None self.assertEqual(inputText, formatter.toCode())
def Dtest(self): prepare() # 词法分析 syntaxFile = os.path.abspath( os.path.join(__file__, '../../doc/syntax.txt')) with open(syntaxFile, encoding='utf-8') as f: buf = f.read() tokens = lexer.analyze(buf, isKeepSpace=False, isKeepComment=False) lexOutputFile = os.path.abspath( os.path.join(__file__, '../lex_output.txt')) with open(lexOutputFile, 'w', encoding='utf-8') as f: for token in tokens: f.write(str(token) + '\n') # 语法分析 productionNonterminals = collectNonterminalOfProduction(tokens) productionList = analyze(tokens) translator.writeProductionList(productionList, productionNonterminals, TokenType) translator.writeNonterminals(productionList, productionNonterminals)
def verify(): def json2Edges(d): edges = {} for key in d: edges[int(key)] = d[key] return edges with open(os.path.join(__file__, '../2_edges.json')) as f: edges = json2Edges(json.load(f)) # 消除已知的冲突 edges = known_conflicts.applyTo(edges) with open(os.path.join(__file__, '../2_edges_conflict_free.json'), 'w') as f: json.dump(edges, f, indent=4) # 用shader进行验证 with open(os.path.join(__file__, '../test.shader')) as f: inputText = f.read() tokens = lexer.analyze(inputText, isEnding=True) tokens = preprocessor.analyze(tokens) with open(os.path.join(__file__, '../1_lex_output.txt'), 'w') as f: for token in tokens: f.write(str(token)) f.write('\n') ast = dfm.run(edges, productionList, tokens, isDebug=False) outputFile = os.path.abspath( os.path.join(__file__, '../2_syntax_output.txt')) with open(outputFile, 'w') as f: json.dump(ast.toDict(), f, indent=4) outputFile = os.path.abspath( os.path.join(__file__, '../3_formatted_code.shader')) with open(outputFile, 'w') as f: f.write(ast.toCode())