Beispiel #1
0
 def test_logging(self):
     # try:
     #     log_dir()
     #     assert False, "AttributeError should be raised when log_dir() is called outside " \
     #                   "a logging context."
     # except AttributeError:
     #     pass
     res = log_dir()
     if res:
         suspend_logging()
     start_logging(self.LOGDIR)
     assert not os.path.exists(self.LOGDIR), \
         "Log dir should be created lazily!"
     dirname = log_dir()
     # print(type(dirname), dirname)
     assert dirname == self.LOGDIR
     assert is_logging(
     ), "is_logging() should return True, if logging is on"
     save_log_dir = suspend_logging()
     assert not is_logging(), \
         "is_logging() should return False, if innermost logging context " \
         "has logging turned off."
     resume_logging(save_log_dir)
     assert is_logging(), "is_logging() should return True after logging off " \
                          "context has been left"
     info_path = os.path.join(self.LOGDIR, 'info.txt')
     assert os.path.exists(info_path), "an 'info.txt' file should be " \
         "created within a newly created log dir"
     # cleanup
     os.remove(info_path)
     os.rmdir(self.LOGDIR)
Beispiel #2
0
 def logging_task(self):
     log_dir()
     assert is_logging(), "Logging should be on inside logging context"
     save_log_dir = suspend_logging()
     assert not is_logging(
     ), "Logging should be off outside logging context"
     resume_logging(save_log_dir)
     # TODO: Some race condition occurs here, but which and why???
     #       Maybe: Some other thread has created logdir but not yet info.txt
     #       Solution: Just return True, cause log_dir() does not guarantee
     #                 existence of 'info.txt', anyway...
     return True
Beispiel #3
0
def grammar_provider(ebnf_src: str, branding="DSL", additional_code: str = '') -> ParserFactoryFunc:
    """
    Compiles an EBNF-grammar and returns a grammar-parser provider
    function for that grammar.

    Args:
        ebnf_src(str):  Either the file name of an EBNF grammar or
            the EBNF-grammar itself as a string.
        branding (str or bool):  Branding name for the compiler
            suite source code.
        additional_code: Python code added to the generated source. This typically
            contains the source code of semantic actions referred to in the
            generated source, e.g. filter-functions, resume-point-search-functions

    Returns:
        A provider function for a grammar object for texts in the
        language defined by ``ebnf_src``.
    """
    grammar_src = compileDSL(ebnf_src, nil_preprocessor, get_ebnf_grammar(),
                             get_ebnf_transformer(), get_ebnf_compiler(branding, ebnf_src))
    log_name = get_config_value('compiled_EBNF_log')
    if log_name:
        if is_logging():
            append_log(log_name, grammar_src)
        else:
            print(grammar_src)
    imports = DHPARSER_IMPORTS.format(dhparser_parentdir=relative_path('.', DHPARSER_PARENTDIR))
    grammar_factory = compile_python_object('\n'.join([imports, additional_code, grammar_src]),
                                            r'get_(?:\w+_)?grammar$')
    if callable(grammar_factory):
        grammar_factory.python_src__ = grammar_src
        return grammar_factory
    raise ValueError('Could not compile grammar provider!')
Beispiel #4
0
def grammar_suite(directory,
                  parser_factory,
                  transformer_factory,
                  fn_patterns=('*test*', ),
                  ignore_unknown_filetypes=False,
                  report='REPORT',
                  verbose=True):
    """
    Runs all grammar unit tests in a directory. A file is considered a test
    unit, if it has the word "test" in its name.
    """
    assert isinstance(report, str)

    if not isinstance(fn_patterns, collections.abc.Iterable):
        fn_patterns = [fn_patterns]
    all_errors = collections.OrderedDict()
    if verbose:
        print("\nScanning test-directory: " + directory)
    save_cwd = os.getcwd()
    os.chdir(directory)
    if is_logging():
        clear_logs()

    tests = [
        fn for fn in sorted(os.listdir('.')) if any(
            fnmatch.fnmatch(fn, pattern) for pattern in fn_patterns)
    ]

    # TODO: fix "handle is closed" error in pypy3 when exiting the interpreter!
    with instantiate_executor(
            get_config_value('test_parallelization') and len(tests) > 1,
            concurrent.futures.ProcessPoolExecutor) as pool:
        results = []
        for filename in tests:
            parameters = filename, parser_factory, transformer_factory, report, verbose
            results.append(pool.submit(grammar_unit, *parameters))
        done, not_done = concurrent.futures.wait(results)
        assert not not_done, str(not_done)
        for filename, err_future in zip(tests, results):
            try:
                errata = err_future.result()
                if errata:
                    all_errors[filename] = errata
            except ValueError as e:
                if not ignore_unknown_filetypes or str(e).find("Unknown") < 0:
                    raise e
            except AssertionError as e:
                e.args = ('When processing "%s":\n%s' %
                          (filename, e.args[0]) if e.args else '', )
                raise e
    os.chdir(save_cwd)
    error_report = []
    err_N = 0
    if all_errors:
        for filename in all_errors:
            error_report.append('\n\nErrors found by unit test "%s":\n' %
                                filename)
            err_N += len(all_errors[filename])
            for error in all_errors[filename]:
                error_report.append('\t' + '\n\t'.join(error.split('\n')))
    if error_report:
        # if verbose:
        #     print("\nFAILURE! %i error%s found!\n" % (err_N, 's' if err_N > 1 else ''))
        return ('Test suite "%s" revealed %s error%s:\n\n' %
                (directory, err_N, 's' if err_N > 1 else '') +
                '\n'.join(error_report))
    if verbose:
        print("\nSUCCESS! All tests passed :-)\n")
    return ''
Beispiel #5
0
def grammar_unit(test_unit,
                 parser_factory,
                 transformer_factory,
                 report='REPORT',
                 verbose=False):
    """
    Unit tests for a grammar-parser and ast transformations.
    """
    output = []

    def write(s):
        nonlocal output
        """Append string `s` to output. The purpose is to defer printing to
        stdout in order to avoid muddled output when several unit tests run
        at the same time."""
        output.append(s)

    def clean_key(k):
        try:
            return k.replace('*', '')
        except AttributeError:
            return k

    def get(tests, category, key) -> str:
        try:
            value = tests[category][key] if key in tests[category] \
                else tests[category][clean_key(key)]
        except KeyError:
            return ''
            # raise AssertionError('%s-test %s for parser %s missing !?'
            #                      % (category, test_name, parser_name))
        return value

    if isinstance(test_unit, str):
        _, unit_name = os.path.split(os.path.splitext(test_unit)[0])
        test_unit = unit_from_file(test_unit)
    else:
        unit_name = 'unit_test_' + str(id(test_unit))
    if verbose:
        write("\nGRAMMAR TEST UNIT: " + unit_name)
    errata = []
    parser = parser_factory()
    transform = transformer_factory()

    def has_lookahead(parser_name: str) -> bool:
        """Returns True if the parser or any of its descendant parsers is a
        Lookahead parser."""
        return parser[parser_name].apply(
            lambda ctx: isinstance(ctx[-1], Lookahead))
        # lookahead_found = False
        #
        # def find_lookahead(p: Parser):
        #     nonlocal lookahead_found
        #     if not lookahead_found:
        #         lookahead_found = isinstance(p, Lookahead)
        #
        # parser[parser_name].apply(find_lookahead)
        # return lookahead_found

    def lookahead_artifact(syntax_tree: Node):
        """
        Returns True, if the error merely occurred, because the parser
        stopped in front of a sequence that was captured by a lookahead
        operator or if a mandatory lookahead failed at the end of data.
        This is required for testing of parsers that put a lookahead
        operator at the end. See test_testing.TestLookahead.
        """
        if not get_config_value('test_suppress_lookahead_failures'):
            return False
        raw_errors = cast(RootNode, syntax_tree).errors_sorted
        is_artifact = (
            {e.code
             for e in raw_errors} <= {
                 PARSER_LOOKAHEAD_FAILURE_ONLY,
                 AUTORETRIEVED_SYMBOL_NOT_CLEARED, PARSER_LOOKAHEAD_MATCH_ONLY
             } or (len(raw_errors) == 1 and
                   (raw_errors[-1].code == PARSER_LOOKAHEAD_MATCH_ONLY
                    #  case 2:  mandatory lookahead failure at end of text
                    or raw_errors[-1].code == MANDATORY_CONTINUATION_AT_EOF)))
        if is_artifact:
            # don't remove zombie node with error message at the end
            # but change it's tag_name to indicate that it is an artifact!
            for parent in syntax_tree.select_if(
                    lambda node: any(child.tag_name == ZOMBIE_TAG
                                     for child in node.children),
                    include_root=True,
                    reverse=True):
                zombie = parent.pick_child(ZOMBIE_TAG)
                zombie.tag_name = TEST_ARTIFACT
                zombie.result = 'Artifact can be ignored. Be aware, though, that also the ' \
                                'tree structure may not be the same as in a non-testing ' \
                                'environment, when a testing artifact has occurred!'
                # parent.result = tuple(c for c in parent.children if c.tag_name != ZOMBIE_TAG)
                break
        return is_artifact

    for parser_name, tests in test_unit.items():
        # if not get_config_value('test_parallelization'):
        #     print('  Testing parser: ' + parser_name)

        track_history = get_config_value('history_tracking')
        try:
            if has_lookahead(parser_name):
                set_tracer(all_descendants(parser[parser_name]), trace_history)
                track_history = True
        except AttributeError:
            pass

        assert parser_name, "Missing parser name in test %s!" % unit_name
        assert not any(test_type in RESULT_STAGES for test_type in tests), \
            ("Test %s in %s already has results. Use reset_unit() before running again!"
             % (parser_name, unit_name))
        assert set(tests.keys()).issubset(UNIT_STAGES), \
            'Unknown test-types: %s ! Must be one of %s' \
            % (set(tests.keys()) - UNIT_STAGES, UNIT_STAGES)
        if verbose:
            write('  Match-Tests for parser "' + parser_name + '"')
        match_tests = set(tests['match'].keys()) if 'match' in tests else set()
        if 'ast' in tests:
            ast_tests = set(tests['ast'].keys())
            if not {clean_key(k)
                    for k in ast_tests} <= {clean_key(k)
                                            for k in match_tests}:
                raise AssertionError(
                    'AST-Tests %s for parser %s lack corresponding match-tests!'
                    % (str(ast_tests - match_tests), parser_name))
        if 'cst' in tests:
            cst_tests = set(tests['cst'].keys())
            if not {clean_key(k)
                    for k in cst_tests} <= {clean_key(k)
                                            for k in match_tests}:
                raise AssertionError(
                    'CST-Tests %s lack corresponding match-tests!' %
                    str(cst_tests - match_tests))

        # run match tests

        for test_name, test_code in tests.get('match', dict()).items():
            # if not get_config_value('test_parallelization'):
            #     print('    Test: ' + str(test_name))

            errflag = len(errata)
            try:
                cst = parser(test_code, parser_name)
            except AttributeError as upe:
                cst = RootNode()
                cst = cst.new_error(Node(ZOMBIE_TAG, "").with_pos(0), str(upe))
            clean_test_name = str(test_name).replace('*', '')
            tests.setdefault('__cst__', {})[test_name] = cst
            errors = []  # type: List[Error]
            if is_error(cst.error_flag) and not lookahead_artifact(cst):
                errors = [
                    e for e in cst.errors_sorted
                    if e.code not in POSSIBLE_ARTIFACTS
                ]
                errata.append(
                    'Match test "%s" for parser "%s" failed:'
                    '\nExpr.:  %s\n\n%s\n\n' %
                    (test_name, parser_name, md_codeblock(test_code),
                     '\n'.join(str(m).replace('\n', '\n') for m in errors)))
            if "ast" in tests or report:
                ast = copy.deepcopy(cst)
                old_errors = set(ast.errors)
                traverse(ast, {'*': remove_children({TEST_ARTIFACT})})
                try:
                    transform(ast)
                except AssertionError as e:
                    e.args = ('Test %s of parser %s failed, because:\n%s' %
                              (test_name, parser_name, e.args[0]), )
                    raise e
                tests.setdefault('__ast__', {})[test_name] = ast
                ast_errors = [e for e in ast.errors if e not in old_errors]
                ast_errors.sort(key=lambda e: e.pos)
                if is_error(
                        max(e.code for e in ast_errors) if ast_errors else 0):
                    if ast_errors:
                        if errata: errata[-1] = errata[-1].rstrip('\n')
                        ast_errors.append('\n')
                        errata.append('\t' + '\n\t'.join(
                            str(msg).replace('\n', '\n\t\t')
                            for msg in ast_errors))

            if verbose:
                infostr = '    match-test "' + test_name + '" ... '
                write(infostr + ("OK" if len(errata) == errflag else "FAIL"))

            if "cst" in tests and len(errata) == errflag:
                try:
                    compare = parse_tree(get(tests, "cst", test_name))
                except ValueError as e:
                    raise SyntaxError(
                        'CST-TEST "%s" of parser "%s" failed with:\n%s' %
                        (test_name, parser_name, str(e)))
                if compare:
                    if not compare.equals(cst):
                        errata.append(
                            'Concrete syntax tree test "%s" for parser "%s" failed:\n%s'
                            % (test_name, parser_name, cst.serialize('cst')))
                    if verbose:
                        infostr = '      cst-test "' + test_name + '" ... '
                        write(infostr +
                              ("OK" if len(errata) == errflag else "FAIL"))

            if "ast" in tests and len(errata) == errflag:
                try:
                    compare = parse_tree(get(tests, "ast", test_name))
                except ValueError as e:
                    raise SyntaxError(
                        'AST-TEST "%s" of parser "%s" failed with:\n%s' %
                        (test_name, parser_name, str(e)))
                if compare:
                    traverse(compare, {'*': remove_children({TEST_ARTIFACT})})
                    if not compare.equals(
                            ast):  # no worry: ast is defined if "ast" in tests
                        ast_str = flatten_sxpr(ast.as_sxpr())
                        compare_str = flatten_sxpr(compare.as_sxpr())
                        # differ = difflib.Differ()
                        # difference = ''.join(differ.compare([compare_str + '\n'], [ast_str + '\n']))
                        errata.append(
                            'Abstract syntax tree test "%s" for parser "%s" failed:'
                            '\n\tExpr.:     %s\n\tExpected:  %s\n\tReceived:  %s'
                            % (test_name, parser_name, '\n\t'.join(
                                test_code.split('\n')), compare_str, ast_str))
                    if verbose:
                        infostr = '      ast-test "' + test_name + '" ... '
                        write(infostr +
                              ("OK" if len(errata) == errflag else "FAIL"))

            if len(errata) > errflag:
                tests.setdefault('__err__', {})[test_name] = errata[-1]
                # write parsing-history log only in case of failure!
                if is_logging() and track_history:
                    with local_log_dir('./LOGS'):
                        log_parsing_history(
                            parser,
                            "match_%s_%s.log" % (parser_name, clean_test_name))

        if verbose and 'fail' in tests:
            write('  Fail-Tests for parser "' + parser_name + '"')

        # run fail tests

        for test_name, test_code in tests.get('fail', dict()).items():
            errflag = len(errata)
            try:
                cst = parser(test_code, parser_name)
            except AttributeError as upe:
                node = Node(ZOMBIE_TAG, "").with_pos(0)
                cst = RootNode(node).new_error(node, str(upe))
                errata.append('Unknown parser "{}" in fail test "{}"!'.format(
                    parser_name, test_name))
                tests.setdefault('__err__', {})[test_name] = errata[-1]
            if "ast" in tests or report:
                traverse(cst, {'*': remove_children({TEST_ARTIFACT})})
                transform(cst)
            if not (is_error(cst.error_flag) and not lookahead_artifact(cst)):
                errata.append(
                    'Fail test "%s" for parser "%s" yields match instead of '
                    'expected failure!\n' % (test_name, parser_name))
                tests.setdefault('__err__', {})[test_name] = errata[-1]
                # write parsing-history log only in case of test-failure
                if is_logging() and track_history:
                    with local_log_dir('./LOGS'):
                        log_parsing_history(
                            parser,
                            "fail_%s_%s.log" % (parser_name, test_name))
            if cst.error_flag:
                tests.setdefault('__msg__', {})[test_name] = \
                    "\n".join(str(e) for e in cst.errors_sorted)
            if verbose:
                infostr = '    fail-test  "' + test_name + '" ... '
                write(infostr + ("OK" if len(errata) == errflag else "FAIL"))

    # remove tracers, in case there are any:
    set_tracer(all_descendants(parser.root_parser__), None)

    # write test-report
    if report:
        test_report = get_report(test_unit)
        if test_report:
            try:
                os.mkdir(report)  # is a process-Lock needed, here?
            except FileExistsError:
                pass
            with open(os.path.join(report, unit_name + '.md'),
                      'w',
                      encoding='utf8') as f:
                f.write(test_report)
                f.flush()

    print('\n'.join(output))
    return errata
Beispiel #6
0
def compile_source(source: str,
                   preprocessor: Optional[PreprocessorFunc],  # str -> str
                   parser: GrammarCallable,  # str -> Node (concrete syntax tree (CST))
                   transformer: TransformationFunc,  # Node (CST) -> Node (abstract ST (AST))
                   compiler: CompilerCallable,  # Node (AST), Source -> Any
                   # out_source_data: list = NOPE,  # Tuple[str, SourceMapFunc]
                   *, preserve_AST: bool = False) \
        -> Tuple[Optional[Any], List[Error], Optional[Node]]:
    """Compiles a source in four stages:

    1. Pre-Processing (if needed)
    2. Parsing
    3. AST-transformation
    4. Compiling.

    The later stages AST-transformation, compilation will only be invoked if
    no fatal errors occurred in any of the earlier stages of the processing
    pipeline.

    :param source: The input text for compilation or a the name of a
            file containing the input text.
    :param preprocessor:  text -> text. A preprocessor function
            or None, if no preprocessor is needed.
    :param parser:  A parsing function or grammar class
    :param transformer:  A transformation function that takes
            the root-node of the concrete syntax tree as an argument and
            transforms it (in place) into an abstract syntax tree.
    :param compiler: A compiler function or compiler class
            instance
    :param preserve_AST: Preserves the AST-tree.

    :returns: The result of the compilation as a 3-tuple
        (result, errors, abstract syntax tree). In detail:

        1. The result as returned by the compiler or ``None`` in case of failure
        2. A list of error or warning messages
        3. The root-node of the abstract syntax tree if `preserve_ast` is True
            or `None` otherwise.
    """
    ast = None  # type: Optional[Node]
    original_text = load_if_file(source)  # type: str
    source_name = source if is_filename(source) else 'source'
    compiler.source = original_text
    log_file_name = logfile_basename(
        source, compiler) if is_logging() else ''  # type: str
    if not hasattr(parser,
                   'free_char_parsefunc__') or parser.history_tracking__:
        # log only for custom parser/transformer/compilers
        log_syntax_trees = get_config_value('log_syntax_trees')
    else:
        log_syntax_trees = set()

    # preprocessing

    errors = []
    if preprocessor is None:
        source_text = original_text  # type: str
        source_mapping = gen_neutral_srcmap_func(source_text, source_name)
        # lambda i: SourceLocation(source_name, 0, i)    # type: SourceMapFunc
    else:
        _, source_text, source_mapping, errors = preprocessor(
            original_text, source_name)

    if has_errors(errors, FATAL):
        return None, errors, None

    # parsing

    syntax_tree = parser(source_text,
                         source_mapping=source_mapping)  # type: RootNode
    for e in errors:
        syntax_tree.add_error(None, e)
    syntax_tree.source = original_text
    syntax_tree.source_mapping = source_mapping
    if 'cst' in log_syntax_trees:
        log_ST(syntax_tree, log_file_name + '.cst')
    if parser.history_tracking__:
        log_parsing_history(parser, log_file_name)

    # assert is_error(syntax_tree.error_flag) or str(syntax_tree) == strip_tokens(source_text), \
    #     str(syntax_tree) # Ony valid if neither tokens or whitespace are dropped early

    result = None
    if not is_fatal(syntax_tree.error_flag):

        # AST-transformation

        if is_error(syntax_tree.error_flag):
            # catch Python exception, because if an error has occurred
            # earlier, the syntax tree might not look like expected,
            # which could (fatally) break AST transformations.
            try:
                transformer(syntax_tree)
            except Exception as e:
                syntax_tree.new_error(
                    syntax_tree,
                    "AST-Transformation failed due to earlier parser errors. "
                    "Crash Message: %s: %s" % (e.__class__.__name__, str(e)),
                    AST_TRANSFORM_CRASH)
        else:
            transformer(syntax_tree)

        if 'ast' in log_syntax_trees:
            log_ST(syntax_tree, log_file_name + '.ast')

        if not is_fatal(syntax_tree.error_flag):
            if preserve_AST:
                ast = copy.deepcopy(syntax_tree)

            # Compilation

            if is_error(syntax_tree.error_flag):
                # assume Python crashes are merely a consequence of earlier
                # errors, so let's catch them
                try:
                    result = compiler(syntax_tree)
                except Exception as e:
                    # raise e
                    node = syntax_tree  # type: Node
                    if isinstance(compiler, Compiler) and compiler.context:
                        node = compiler.context[-1]
                    st = traceback.format_list(
                        traceback.extract_tb(e.__traceback__))
                    trace = ''.join(filter_stacktrace(st))
                    syntax_tree.new_error(
                        node,
                        "Compilation failed, most likely, due to errors earlier "
                        "in the processing pipeline. Crash Message: %s: %s\n%s"
                        % (e.__class__.__name__, str(e), trace),
                        COMPILER_CRASH)
            else:
                # assume Python crashes are programming mistakes, so let
                # the exceptions through
                result = compiler(syntax_tree)

    messages = syntax_tree.errors_sorted  # type: List[Error]
    # Obsolete, because RootNode adjusts error locations whenever an error is added:
    # adjust_error_locations(messages, original_text, source_mapping)
    return result, messages, ast
Beispiel #7
0
def compile_on_disk(source_file: str, compiler_suite="", extension=".xml") -> Iterable[Error]:
    """
    Compiles the a source file with a given compiler and writes the
    result to a file.

    If no ``compiler_suite`` is given it is assumed that the source
    file is an EBNF grammar. In this case the result will be a Python
    script containing a parser for that grammar as well as the
    skeletons for a preprocessor, AST transformation table, and compiler.
    If the Python script already exists only the parser name in the
    script will be updated. (For this to work, the different names
    need to be delimited section marker blocks.). `compile_on_disk()`
    returns a list of error messages or an empty list if no errors
    occurred.

    Parameters:
        source_file(str):  The file name of the source text to be
            compiled.
        compiler_suite(str):  The file name of the parser/compiler-suite
            (usually ending with 'Parser.py'), with which the source
            file shall be compiled. If this is left empty, the source
            file is assumed to be an EBNF-Grammar that will be compiled
            with the internal EBNF-Compiler.
        extension(str):  The result of the compilation (if successful)
            is written to a file with the same name but a different
            extension than the source file. This parameter sets the
            extension.

    Returns:
        A (potentially empty) list of error or warning messages.
    """
    filepath = os.path.normpath(source_file)
    f = None  # Optional[TextIO]
    with open(source_file, encoding="utf-8") as f:
        source = f.read()
    rootname = os.path.splitext(filepath)[0]
    dhpath = relative_path(os.path.dirname(rootname), DHPARSER_PARENTDIR)
    compiler_name = as_identifier(os.path.basename(rootname))
    if compiler_suite:
        sfactory, pfactory, tfactory, cfactory = load_compiler_suite(compiler_suite)
        compiler1 = cfactory()
    else:
        sfactory = get_ebnf_preprocessor  # PreprocessorFactoryFunc
        pfactory = get_ebnf_grammar       # ParserFactoryFunc
        tfactory = get_ebnf_transformer   # TransformerFactoryFunc
        cfactory = get_ebnf_compiler      # CompilerFactoryFunc
        compiler1 = cfactory()            # Compiler

    is_ebnf_compiler = False  # type: bool
    if isinstance(compiler1, EBNFCompiler):
        is_ebnf_compiler = True
        compiler1.set_grammar_name(compiler_name, source_file)

    result, messages, _ = compile_source(source, sfactory(), pfactory(), tfactory(), compiler1)

    if has_errors(messages):
        return messages

    elif is_ebnf_compiler:
        # trans == get_ebnf_transformer or trans == EBNFTransformer:
        # either an EBNF- or no compiler suite given
        ebnf_compiler = cast(EBNFCompiler, compiler1)  # type: EBNFCompiler
        global SECTION_MARKER, RX_SECTION_MARKER, PREPROCESSOR_SECTION, PARSER_SECTION, \
            AST_SECTION, COMPILER_SECTION, END_SECTIONS_MARKER, RX_WHITESPACE
        f = None
        try:
            parser_name = rootname + 'Parser.py'
            f = open(parser_name, 'r', encoding="utf-8")
            source = f.read()
            sections = split_source(parser_name, source)
            intro, imports, preprocessor, _, ast, compiler, outro = sections
            ast_trans_python_src = imports + ast  # DHPARSER_IMPORTS.format(dhparser_parentdir=dhpath)
            ast_trans_table = dict()  # type: TransformationDict
            try:
                ast_trans_table = compile_python_object(ast_trans_python_src,
                                                        r'(?:\w+_)?AST_transformation_table$')
            except Exception as e:
                if isinstance(e, NameError):
                    err_str = 'NameError "{}" while compiling AST-Transformation. ' \
                              'Possibly due to a forgotten import at the beginning ' \
                              'of the AST-Block (!)'.format(str(e))
                else:
                    err_str = 'Exception {} while compiling AST-Transformation: {}' \
                              .format(str(type(e)), str(e))
                messages.append(Error(err_str, 0, CANNOT_VERIFY_TRANSTABLE_WARNING))
                if is_logging():
                    with open(os.path.join(log_dir(), rootname + '_AST_src.py'), 'w',
                              encoding='utf-8') as f:
                        f.write(ast_trans_python_src)
            messages.extend(ebnf_compiler.verify_transformation_table(ast_trans_table))
            # TODO: Verify compiler
        except (PermissionError, FileNotFoundError, IOError):
            intro, imports, preprocessor, _, ast, compiler, outro = '', '', '', '', '', '', ''
        finally:
            if f:
                f.close()
                f = None

        if RX_WHITESPACE.fullmatch(intro):
            intro = '#!/usr/bin/env python3'
        if RX_WHITESPACE.fullmatch(outro):
            outro = read_template('DSLParser.pyi').format(NAME=compiler_name)
        if RX_WHITESPACE.fullmatch(imports):
            imports = DHParser.ebnf.DHPARSER_IMPORTS.format(dhparser_parentdir=dhpath)
        if RX_WHITESPACE.fullmatch(preprocessor):
            preprocessor = ebnf_compiler.gen_preprocessor_skeleton()
        if RX_WHITESPACE.fullmatch(ast):
            ast = ebnf_compiler.gen_transformer_skeleton()
        if RX_WHITESPACE.fullmatch(compiler):
            compiler = ebnf_compiler.gen_compiler_skeleton()

        compilerscript = rootname + 'Parser.py'
        try:
            f = open(compilerscript, 'w', encoding="utf-8")
            f.write(intro)
            f.write(SECTION_MARKER.format(marker=SYMBOLS_SECTION))
            f.write(imports)
            f.write(SECTION_MARKER.format(marker=PREPROCESSOR_SECTION))
            f.write(preprocessor)
            f.write(SECTION_MARKER.format(marker=PARSER_SECTION))
            f.write(cast(str, result))
            f.write(SECTION_MARKER.format(marker=AST_SECTION))
            f.write(ast)
            f.write(SECTION_MARKER.format(marker=COMPILER_SECTION))
            f.write(compiler)
            f.write(SECTION_MARKER.format(marker=END_SECTIONS_MARKER))
            f.write(outro)
        except (PermissionError, FileNotFoundError, IOError) as error:
            print('# Could not write file "' + compilerscript + '" because of: '
                  + "\n# ".join(str(error).split('\n)')))
            print(result)
        finally:
            if f:
                f.close()

        if platform.system() != "Windows":
            # set file permissions so that the compilerscript can be executed
            st = os.stat(compilerscript)
            os.chmod(compilerscript, st.st_mode | stat.S_IEXEC)

    else:
        f = None
        try:
            f = open(rootname + extension, 'w', encoding="utf-8")
            if isinstance(result, Node):
                if extension.lower() == '.xml':
                    f.write(result.as_xml())
                else:
                    f.write(result.as_sxpr())
            elif isinstance(result, str):
                f.write(result)
            else:
                raise AssertionError('Illegal result type: ' + str(type(result)))
        except (PermissionError, FileNotFoundError, IOError) as error:
            print('# Could not write file "' + rootname + '.py" because of: '
                  + "\n# ".join(str(error).split('\n)')))
            print(result)
        finally:
            if f:
                f.close()

    return messages