Beispiel #1
0
def __copy_files(FileTxt):

    input_directory  = QUEX_PATH               
    output_directory = Setup.output_directory 

    file_list = map(lambda x: Lng["$code_base"] + x.strip(), FileTxt.split())

    # Ensure that all directories exist
    directory_list = []
    for file in file_list:
        directory = path.dirname(output_directory + file)
        if directory in directory_list: continue
        directory_list.append(directory)

    # Sort directories according to length --> create parent directories before child
    for directory in sorted(directory_list, key=len):
        if os.access(directory, os.F_OK) == True: continue
        # Create also parent directories, if required
        os.makedirs(directory)

    for file in file_list:
        input_file  = input_directory + file
        output_file = output_directory + file
        # Copy
        content     = open_file_or_die(input_file, "rb").read()
        write_safely_and_close(output_file, content)
Beispiel #2
0
def get_codec_list_db():
    """
       ...
       [ CODEC_NAME  [CODEC_NAME_LIST]  [LANGUAGE_NAME_LIST] ]
       ...
    """
    global _codec_list_db
    if _codec_list_db: return _codec_list_db

    fh = open_file_or_die(QUEX_CODEC_DB_PATH + "/00-ALL.txt", "rb")
    # FIELD SEPARATOR:  ';'
    # RECORD SEPARATOR: '\n'
    # FIELDS:           [Python Coding Name]   [Aliases]   [Languages]
    # Aliases and Languages are separated by ','
    _codec_list_db = []
    for line in fh.readlines():
        line = line.strip()
        if len(line) == 0 or line[0] == "#": continue
        fields = map(lambda x: x.strip(), line.split(";"))
        try:
            codec = fields[0]
            aliases_list = map(lambda x: x.strip(), fields[1].split(","))
            language_list = map(lambda x: x.strip(), fields[2].split(","))
        except:
            print "Error in line:\n%s\n" % line
        _codec_list_db.append([codec, aliases_list, language_list])

    fh.close()
    return _codec_list_db
Beispiel #3
0
def UserCodeFragment_straighten_open_line_pragmas(filename, Language):
    if Language not in UserCodeFragment_OpenLinePragma.keys():
        return

    fh = open_file_or_die(filename)
    norm_filename = Setup.get_file_reference(filename)

    new_content = []
    line_n      = 0
    LinePragmaInfoList = UserCodeFragment_OpenLinePragma[Language]
    for line in fh.readlines():
        line_n += 1
        if Language == "C":
            for info in LinePragmaInfoList:
                if line.find(info[0]) == -1: continue
                line = info[1]
                # Since by some definition, line number pragmas < 32768; let us avoid
                # compiler warnings by setting line_n = min(line_n, 32768)
                line = line.replace("NUMBER", repr(int(min(line_n + 1, 32767))))
                # Even under Windows (tm), the '/' is accepted. Thus do not rely on 'normpath'
                line = line.replace("FILENAME", norm_filename)
                if len(line) == 0 or line[-1] != "\n":
                    line = line + "\n"
        new_content.append(line)

    fh.close()

    write_safely_and_close(filename, "".join(new_content))
Beispiel #4
0
def UserCodeFragment_straighten_open_line_pragmas(filename, Language):
    if Language not in UserCodeFragment_OpenLinePragma.keys():
        return

    fh = open_file_or_die(filename)
    norm_filename = Setup.get_file_reference(filename)

    new_content = []
    line_n = 0
    LinePragmaInfoList = UserCodeFragment_OpenLinePragma[Language]
    for line in fh.readlines():
        line_n += 1
        if Language == "C":
            for info in LinePragmaInfoList:
                if line.find(info[0]) == -1: continue
                line = info[1]
                # Since by some definition, line number pragmas < 32768; let us avoid
                # compiler warnings by setting line_n = min(line_n, 32768)
                line = line.replace("NUMBER", repr(int(min(line_n + 1,
                                                           32767))))
                # Even under Windows (tm), the '/' is accepted. Thus do not rely on 'normpath'
                line = line.replace("FILENAME", norm_filename)
                if len(line) == 0 or line[-1] != "\n":
                    line = line + "\n"
        new_content.append(line)

    fh.close()

    write_safely_and_close(filename, "".join(new_content))
Beispiel #5
0
def get_codec_list_db():
    """
       ...
       [ CODEC_NAME  [CODEC_NAME_LIST]  [LANGUAGE_NAME_LIST] ]
       ...
    """
    global __codec_list_db
    if len(__codec_list_db) != 0: return __codec_list_db

    fh = open_file_or_die(__codec_db_path + "/00-ALL.txt", "rb")
    # FIELD SEPARATOR:  ';'
    # RECORD SEPARATOR: '\n'
    # FIELDS:           [Python Coding Name]   [Aliases]   [Languages] 
    # Aliases and Languages are separated by ','
    __codec_list_db = []
    for line in fh.readlines():
        line = line.strip()
        if len(line) == 0 or line[0] == "#": continue
        fields = map(lambda x: x.strip(), line.split(";"))
        try:
            codec         = fields[0]
            aliases_list  = map(lambda x: x.strip(), fields[1].split(","))
            language_list = map(lambda x: x.strip(), fields[2].split(","))
        except:
            print "Error in line:\n%s\n" % line
        __codec_list_db.append([codec, aliases_list, language_list])

    fh.close()
    return __codec_list_db
Beispiel #6
0
def __copy_files(FileTxt):

    input_directory = QUEX_PATH
    output_directory = Setup.output_directory

    file_list = map(lambda x: Setup.language_db["$code_base"] + x.strip(),
                    FileTxt.split())

    # Ensure that all directories exist
    directory_list = []
    for file in file_list:
        directory = path.dirname(output_directory + file)
        if directory in directory_list: continue
        directory_list.append(directory)

    # Sort directories according to length --> create parent directories before child
    for directory in sorted(directory_list, key=len):
        if os.access(directory, os.F_OK) == True: continue
        # Create also parent directories, if required
        os.makedirs(directory)

    for file in file_list:
        input_file = input_directory + file
        output_file = output_directory + file
        # Copy
        content = open_file_or_die(input_file, "rb").read()
        write_safely_and_close(output_file, content)
Beispiel #7
0
    def __load(self, FileName, ExitOnErrorF):
        fh = open_file_or_die(FileName, "rb")
        source_set, drain_set, error_str = parser.do(self, fh)

        if error_str is not None:
            error_msg(error_str, fh, DontExitF=not ExitOnErrorF)
            self.__set_invalid()  # Transformation is not valid.

        return source_set, drain_set
Beispiel #8
0
    def __load(self, FileName, ExitOnErrorF):
        fh = open_file_or_die(FileName, "rb")
        source_set, drain_set, error_str = parser.do(self, fh)

        if error_str is not None:
            error_msg(error_str, fh, DontExitF=not ExitOnErrorF)
            self.__set_invalid() # Transformation is not valid.

        return source_set, drain_set
Beispiel #9
0
def __extract_extra_options_from_file(FileName):
    """Extract an option section from a given file. The quex command line 
       options may be given in a section surrounded by '<<<QUEX-OPTIONS>>>'
       markers. For example:

           <<<QUEX-OPTIONS>>>
              --token-class-file      Common-token
              --token-class           Common::Token
              --token-id-type         uint32_t
              --buffer-element-type   uint8_t
              --lexeme-null-object    ::Common::LexemeNullObject
              --foreign-token-id-file Common-token_ids
           <<<QUEX-OPTIONS>>>

       This function extracts those options and builds a new 'argv' array, i.e.
       an array of strings are if they would come from the command line.
    """
    MARKER = "<<<QUEX-OPTIONS>>>"
    fh = open_file_or_die(FileName)

    while 1 + 1 == 2:
        line = fh.readline()
        if line == "":
            return None  # Simply no starting marker has been found
        elif line.find(MARKER) != -1:
            pos = fh.tell()
            break

    result = []

    while 1 + 1 == 2:
        line = fh.readline()
        if line == "":
            fh.seek(pos)
            error_msg("Missing terminating '%s'." % MARKER, fh)

        if line.find(MARKER) != -1:
            break

        idx = line.find("-")
        if idx == -1: continue
        options = line[idx:].split()
        result.extend(options)

    if len(result) == 0: return None

    if setup.message_on_extra_options_f:
        if len(result) < 2: arg_str = result[0]
        else:
            arg_str = reduce(lambda x, y: "%s %s" % (x.strip(), y.strip()),
                             result)
        print "## Command line options from file '%s'" % FileName
        print "## %s" % arg_str
        print "## (suppress this message with --no-message-on-extra-options)"

    return result
Beispiel #10
0
def __extract_extra_options_from_file(FileName):
    """Extract an option section from a given file. The quex command line 
       options may be given in a section surrounded by '<<<QUEX-OPTIONS>>>'
       markers. For example:

           <<<QUEX-OPTIONS>>>
              --token-class-file      Common-token
              --token-class           Common::Token
              --token-id-type         uint32_t
              --buffer-element-type   uint8_t
              --lexeme-null-object    ::Common::LexemeNullObject
              --foreign-token-id-file Common-token_ids
           <<<QUEX-OPTIONS>>>

       This function extracts those options and builds a new 'argv' array, i.e.
       an array of strings are if they would come from the command line.
    """
    MARKER = "<<<QUEX-OPTIONS>>>"
    fh     = open_file_or_die(FileName)

    while 1 + 1 == 2:
        line = fh.readline()
        if line == "":
            return None # Simply no starting marker has been found
        elif line.find(MARKER) != -1: 
            pos = fh.tell()
            break

    result = []

    while 1 + 1 == 2:
        line = fh.readline()
        if line == "":
            fh.seek(pos)
            error_msg("Missing terminating '%s'." % MARKER, fh)

        if line.find(MARKER) != -1: 
            break
        
        idx = line.find("-")
        if idx == -1: continue
        options = line[idx:].split()
        result.extend(options)

    if len(result) == 0: return None

    if setup.message_on_extra_options_f:
        if len(result) < 2: arg_str = result[0]
        else:               arg_str = reduce(lambda x, y: "%s %s" % (x.strip(), y.strip()), result)
        print "## Command line options from file '%s'" % FileName
        print "## %s" % arg_str
        print "## (suppress this message with --no-message-on-extra-options)"

    return result
Beispiel #11
0
def __extra_option_extract_from_file(FileName):
    """Extract an option section from a given file. The quex command line 
       options may be given in a section surrounded by '<<<QUEX-OPTIONS>>>'
       markers. For example:

           <<<QUEX-OPTIONS>>>
              --token-class-file      Common-token
              --token-class           Common::Token
              --token-id-type         uint32_t
              --buffer-element-type   uint8_t
              --lexeme-null-object    ::Common::LexemeNullObject
              --foreign-token-id-file Common-token_ids
           <<<QUEX-OPTIONS>>>

       This function extracts those options and builds a new 'argv' array, i.e.
       an array of strings are if they would come from the command line.
    """
    MARKER = "<<<QUEX-OPTIONS>>>"
    fh     = open_file_or_die(FileName)

    while 1 + 1 == 2:
        line = fh.readline()
        if line == "":
            return None, [] # Simply no starting marker has been found
        elif line.find(MARKER) != -1: 
            pos = fh.tell()
            break

    result = []
    location_list = []

    line_n = 0
    while 1 + 1 == 2:
        line_n += 1
        line    = fh.readline()
        if line == "":
            fh.seek(pos)
            error_msg("Missing terminating '%s'." % MARKER, fh)

        if line.find(MARKER) != -1: 
            break
        
        idx = line.find("-")
        if idx == -1: continue
        options = line[idx:].split()

        location_list.append((SourceRef(FileName, line_n), options))
        result.extend(options)

    if len(result) == 0: return None, location_list

    return result, location_list
Beispiel #12
0
    def get_line_n_of_include(FileName, IncludedFileName):
        fh = open_file_or_die(FileName, Mode="rb")
        line_n = 0
        for line in fh.readlines():
            line_n += 1
            if include_re_obj.search(line) is not None and line.find(IncludedFileName) != -1:
                break
        else:
            # Included file must appear in including file, but tolerate for safety.
            pass

        fh.close()
        return line_n
Beispiel #13
0
def __create_database_file(TargetEncoding, TargetEncodingName):
    """Writes a database file for a given TargetEncodingName. The 
       TargetEncodingName is required to name the file where the 
       data is to be stored.
    """
    encoder = codecs.getencoder(TargetEncoding)
    prev_output = -1
    db = []
    bytes_per_char = -1
    for input in range(0x110000):
        output, n = __get_transformation(encoder, input)

        if bytes_per_char == -1:
            bytes_per_char = n
        elif n != -1 and bytes_per_char != n:
            print "# not a constant size byte format."
            return False

        # Detect discontinuity in the mapping
        if prev_output == -1:
            if output != -1:
                input_interval = Interval(input)
                target_interval_begin = output

        elif output != prev_output + 1:
            # If interval was valid, append it to the database
            input_interval.end = input
            db.append((input_interval, target_interval_begin))
            # If interval ahead is valid, prepare an object for it
            if output != -1:
                input_interval = Interval(input)
                target_interval_begin = output

        prev_output = output

    if prev_output != -1:
        input_interval.end = input
        db.append((input_interval, target_interval_begin))

    fh = open_file_or_die(QUEX_CODEC_DB_PATH + "/%s.dat" % TargetEncoding,
                          "wb")
    fh.write("// Describes mapping from Unicode Code pointer to Character code in %s (%s)\n" \
             % (TargetEncoding, TargetEncodingName))
    fh.write(
        "// [SourceInterval.begin] [SourceInterval.Size]  [TargetInterval.begin] (all in hexidecimal)\n"
    )
    for i, t in db:
        fh.write("0x%X %i 0x%X\n" % (i.begin, i.end - i.begin, t))
    fh.close()

    return True
Beispiel #14
0
    def get_line_n_of_include(FileName, IncludedFileName):
        fh = open_file_or_die(FileName, Mode="rb")
        line_n = 0
        for line in fh.readlines():
            line_n += 1
            if include_re_obj.search(
                    line) is not None and line.find(IncludedFileName) != -1:
                break
        else:
            # Included file must appear in including file, but tolerate for safety.
            pass

        fh.close()
        return line_n
Beispiel #15
0
def parse_token_id_file(ForeignTokenIdFile, TokenPrefix, CommentDelimiterList,
                        IncludeRE):
    """This function somehow interprets the user defined token id file--if there is
       one. It does this in order to find the names of defined token ids. It does
       some basic interpretation and include file following, but: **it is in no
       way perfect**. Since its only purpose is to avoid warnings about token ids
       that are not defined it is not essential that it may fail sometimes.

       It is more like a nice feature that quex tries to find definitions on its own.
       
       Nevertheless, it should work in the large majority of cases.
    """
    include_re_obj = re.compile(IncludeRE)

    # validate(...) ensured, that the file exists.
    work_list = [ForeignTokenIdFile]
    done_list = []
    while len(work_list) != 0:
        fh = open_file_or_die(work_list.pop(), Mode="rb")
        content = fh.read()

        # delete any comment inside the file
        for opener, closer in CommentDelimiterList:
            content = delete_comment(content,
                                     opener,
                                     closer,
                                     LeaveNewlineDelimiter=True)

        # add any found token id to the list
        token_id_finding_list = extract_identifiers_with_specific_prefix(
            content, TokenPrefix)
        for token_name, line_n in token_id_finding_list:
            prefix_less_token_name = token_name[len(TokenPrefix):]
            # NOTE: The line number might be wrong, because of the comment deletion
            # NOTE: The actual token value is not important, since the token's numeric
            #       identifier is defined in the user's header. We do not care.
            token_id_db[prefix_less_token_name] = \
                    TokenInfo(prefix_less_token_name, None, None, fh.name, line_n)

        # find "#include" statements
        include_file_list = include_re_obj.findall(content)
        include_file_list = filter(lambda file: file not in done_list,
                                   include_file_list)
        include_file_list = filter(lambda file: os.access(file, os.F_OK),
                                   include_file_list)
        work_list.extend(include_file_list)

        fh.close()
Beispiel #16
0
def __create_database_file(TargetEncoding, TargetEncodingName):
    """Writes a database file for a given TargetEncodingName. The 
       TargetEncodingName is required to name the file where the 
       data is to be stored.
    """
    encoder     = codecs.getencoder(TargetEncoding)
    prev_output = -1
    db          = []
    bytes_per_char = -1
    for input in range(0x110000):
        output, n = __get_transformation(encoder, input)

        if bytes_per_char == -1: 
            bytes_per_char = n
        elif n != -1 and bytes_per_char != n:
            print "# not a constant size byte format."
            return False

        # Detect discontinuity in the mapping
        if   prev_output == -1:
            if output != -1:
                input_interval        = Interval(input)
                target_interval_begin = output

        elif output != prev_output + 1:
            # If interval was valid, append it to the database
            input_interval.end    = input
            db.append((input_interval, target_interval_begin))
            # If interval ahead is valid, prepare an object for it
            if output != -1:
                input_interval        = Interval(input)
                target_interval_begin = output

        prev_output = output

    if prev_output != -1:
        input_interval.end = input
        db.append((input_interval, target_interval_begin))

    fh = open_file_or_die(QUEX_CODEC_DB_PATH + "/%s.dat" % TargetEncoding, "wb")
    fh.write("// Describes mapping from Unicode Code pointer to Character code in %s (%s)\n" \
             % (TargetEncoding, TargetEncodingName))
    fh.write("// [SourceInterval.begin] [SourceInterval.Size]  [TargetInterval.begin] (all in hexidecimal)\n")
    for i, t in db:
        fh.write("0x%X %i 0x%X\n" % (i.begin, i.end - i.begin, t))
    fh.close()

    return True
Beispiel #17
0
def do(file_list):
    if len(file_list) == 0 and not Setup.token_class_only_f:
        error_msg("No input files.")

    prepare_default_standard_token_ids()

    for file in file_list:
        fh = open_file_or_die(file, CodecCheckF=True)

        # read all modes until end of file
        try:
            while 1 + 1 == 2:
                parse_section(fh)
        except EndOfStreamException:
            pass
        except RegularExpressionException, x:
            error_msg(x.message, fh)
Beispiel #18
0
def get_codec_transformation_info(Codec=None, FileName=None, FH=-1, LineN=None):
    """Provides the information about the relation of character codes in a particular 
       coding to unicode character codes. It is provided in the following form:

       # Codec Values                 Unicode Values
       [ (Source0_Begin, Source0_End, TargetInterval0_Begin), 
         (Source1_Begin, Source1_End, TargetInterval1_Begin),
         (Source2_Begin, Source2_End, TargetInterval2_Begin), 
         ... 
       ]

       Arguments FH and LineN correspond to the arguments of error_msg.
    """
    assert Codec is not None or FileName is not None

    if FileName is not None:
        file_name = FileName
    else:
        distinct_codec = __get_distinct_codec_name_for_alias(Codec)
        file_name      = __codec_db_path + "/%s.dat" % distinct_codec

    fh = open_file_or_die(file_name, "rb")

    # Read coding into data structure
    transformation_list = []
    try:
        while 1 + 1 == 2:
            skip_whitespace(fh)
            source_begin = read_integer(fh)
            if source_begin is None:
                error_msg("Missing integer (source interval begin) in codec file.", fh)
            skip_whitespace(fh)
            source_size = read_integer(fh)
            if source_size is None:
                error_msg("Missing integer (source interval size) in codec file.", fh)
            skip_whitespace(fh)
            target_begin = read_integer(fh)
            if target_begin is None:
                error_msg("Missing integer (target interval begin) in codec file.", fh)

            source_end = source_begin + source_size
            transformation_list.append([source_begin, source_end, target_begin])
    except EndOfStreamException:
        pass

    return transformation_list
Beispiel #19
0
def do(file_list):
    if len(file_list) == 0 and not Setup.token_class_only_f: 
        error_msg("No input files.")

    prepare_default_standard_token_ids()

    for file in file_list:
        fh = open_file_or_die(file, CodecCheckF=True)

        # read all modes until end of file
        try:
            while 1 + 1 == 2:
                parse_section(fh)
        except EndOfStreamException:
            pass
        except RegularExpressionException, x:
            error_msg(x.message, fh)
Beispiel #20
0
    def straighten_open_line_pragmas(self, FileName):
        norm_filename   = Setup.get_file_reference(FileName)
        line_pragma_txt = self._SOURCE_REFERENCE_END().strip()

        new_content = []
        line_n      = 1 # NOT: 0!
        fh          = open_file_or_die(FileName)
        while 1 + 1 == 2:
            line = fh.readline()
            line_n += 1
            if not line: 
                break
            elif line.strip() != line_pragma_txt:
                new_content.append(line)
            else:
                line_n += 1
                new_content.append(self._SOURCE_REFERENCE_BEGIN(SourceRef(norm_filename, line_n)))
        fh.close()
        write_safely_and_close(FileName, "".join(new_content))
Beispiel #21
0
def do(file_list):
    if len(file_list) == 0 and not Setup.token_class_only_f: 
        error_msg("No input files.")

    # If a foreign token-id file was presented even the standard token ids
    # must be defined there.
    if not Setup.token_id_foreign_definition:
        prepare_default_standard_token_ids()

    for file in file_list:
        fh = open_file_or_die(file, CodecCheckF=True)

        # read all modes until end of file
        try:
            while 1 + 1 == 2:
                parse_section(fh)
        except EndOfStreamException:
            pass
        except RegularExpressionException, x:
            error_msg(x.message, fh)
Beispiel #22
0
def parse_token_id_file(ForeignTokenIdFile, TokenPrefix, CommentDelimiterList, IncludeRE):
    """This function somehow interprets the user defined token id file--if there is
       one. It does this in order to find the names of defined token ids. It does
       some basic interpretation and include file following, but: **it is in no
       way perfect**. Since its only purpose is to avoid warnings about token ids
       that are not defined it is not essential that it may fail sometimes.

       It is more like a nice feature that quex tries to find definitions on its own.
       
       Nevertheless, it should work in the large majority of cases.
    """
    include_re_obj = re.compile(IncludeRE)

    # validate(...) ensured, that the file exists.
    work_list    = [ ForeignTokenIdFile ] 
    done_list    = []
    while len(work_list) != 0:
        fh = open_file_or_die(work_list.pop(), Mode="rb")
        content = fh.read()

        # delete any comment inside the file
        for opener, closer in CommentDelimiterList:
            content = delete_comment(content, opener, closer, LeaveNewlineDelimiter=True)

        # add any found token id to the list
        token_id_finding_list = extract_identifiers_with_specific_prefix(content, TokenPrefix)
        for token_name, line_n in token_id_finding_list:
            prefix_less_token_name = token_name[len(TokenPrefix):]
            # NOTE: The line number might be wrong, because of the comment deletion
            # NOTE: The actual token value is not important, since the token's numeric
            #       identifier is defined in the user's header. We do not care.
            token_id_db[prefix_less_token_name] = \
                    TokenInfo(prefix_less_token_name, None, None, fh.name, line_n) 
        
        # find "#include" statements
        include_file_list = include_re_obj.findall(content)
        include_file_list = filter(lambda file: file not in done_list,    include_file_list)
        include_file_list = filter(lambda file: os.access(file, os.F_OK), include_file_list)
        work_list.extend(include_file_list)

        fh.close()
Beispiel #23
0
    def straighten_open_line_pragmas(self, FileName):
        norm_filename = Setup.get_file_reference(FileName)
        line_pragma_txt = self._SOURCE_REFERENCE_END().strip()

        new_content = []
        line_n = 1  # NOT: 0!
        fh = open_file_or_die(FileName)
        while 1 + 1 == 2:
            line = fh.readline()
            line_n += 1
            if not line:
                break
            elif line.strip() != line_pragma_txt:
                new_content.append(line)
            else:
                line_n += 1
                new_content.append(
                    self._SOURCE_REFERENCE_BEGIN(
                        SourceRef(norm_filename, line_n)))
        fh.close()
        write_safely_and_close(FileName, "".join(new_content))
Beispiel #24
0
def _do(Descr):
    # The following things must be ensured before the function is called
    assert Descr is not None
    assert Descr.__class__.__name__ == "TokenTypeDescriptor"

    ## ALLOW: Descr.get_member_db().keys() == empty

    TemplateFile = QUEX_PATH \
                   + Lng["$code_base"] \
                   + Lng["$token_template_file"]

    TemplateIFile = QUEX_PATH \
                   + Lng["$code_base"] \
                   + Lng["$token_template_i_file"]

    template_str = open_file_or_die(TemplateFile, Mode="rb").read()
    template_i_str = open_file_or_die(TemplateIFile, Mode="rb").read()

    virtual_destructor_str = ""
    if Descr.open_for_derivation_f: virtual_destructor_str = "virtual "

    if Descr.copy is None:
        # Default copy operation: Plain Copy of token memory
        copy_str = "__QUEX_STD_memcpy((void*)__this, (void*)__That, sizeof(QUEX_TYPE_TOKEN));\n"
    else:
        copy_str = Lng.SOURCE_REFERENCED(Descr.copy)

    if Descr.take_text is None:
        take_text_str = "return true;\n"
    else:
        take_text_str = Lng.SOURCE_REFERENCED(Descr.take_text)

    include_guard_extension_str = get_include_guard_extension(
        Lng.NAMESPACE_REFERENCE(Descr.name_space) + "__" + Descr.class_name)

    # In case of plain 'C' the class name must incorporate the namespace (list)
    token_class_name = Descr.class_name
    if Setup.language == "C":
        token_class_name = Setup.token_class_name_safe

    converter_declaration_include,   \
    converter_implementation_include, \
    converter_string,                 \
    converter_wstring                 = __get_converter_configuration(include_guard_extension_str)

    extra_at_begin_str = lexeme_null_declaration()
    extra_at_end_str = ""
    if Setup.token_class_only_f:
        extra_at_begin_str = QUEX_NAME_TOKEN_define_str % include_guard_extension_str \
                             + extra_at_begin_str
        extra_at_end_str   = QUEX_NAME_TOKEN_undef_str % include_guard_extension_str \
                             + extra_at_end_str

    namespace_open, namespace_close = __namespace_brackets()
    helper_variable_replacements = [
        ["$INCLUDE_CONVERTER_DECLARATION", converter_declaration_include],
        [
            "$INCLUDE_CONVERTER_IMPLEMENTATION",
            converter_implementation_include
        ],
        ["$CONVERTER_STRING", converter_string],
        ["$CONVERTER_WSTRING", converter_wstring],
        ["$NAMESPACE_CLOSE", namespace_close],
        ["$NAMESPACE_OPEN", namespace_open],
        ["$TOKEN_CLASS", token_class_name],
    ]

    txt = blue_print(template_str, [
        ["$$EXTRA_AT_BEGIN$$", extra_at_begin_str],
        ["$$EXTRA_AT_END$$", extra_at_end_str],
    ])
    txt = blue_print(txt, [
        ["$$BODY$$", Lng.SOURCE_REFERENCED(Descr.body)],
        ["$$CONSTRUCTOR$$",
         Lng.SOURCE_REFERENCED(Descr.constructor)],
        ["$$COPY$$", copy_str],
        ["$$DESTRUCTOR$$",
         Lng.SOURCE_REFERENCED(Descr.destructor)],
        ["$$DISTINCT_MEMBERS$$",
         get_distinct_members(Descr)],
        ["$$FOOTER$$", Lng.SOURCE_REFERENCED(Descr.footer)],
        ["$$FUNC_TAKE_TEXT$$", take_text_str],
        ["$$HEADER$$", Lng.SOURCE_REFERENCED(Descr.header)],
        ["$$INCLUDE_GUARD_EXTENSION$$", include_guard_extension_str],
        ["$$NAMESPACE_CLOSE$$",
         Lng.NAMESPACE_CLOSE(Descr.name_space)],
        ["$$NAMESPACE_OPEN$$",
         Lng.NAMESPACE_OPEN(Descr.name_space)],
        ["$$QUICK_SETTERS$$", get_quick_setters(Descr)],
        ["$$SETTERS_GETTERS$$",
         get_setter_getter(Descr)],
        [
            "$$TOKEN_REPETITION_N_GET$$",
            Lng.SOURCE_REFERENCED(Descr.repetition_get)
        ],
        [
            "$$TOKEN_REPETITION_N_SET$$",
            Lng.SOURCE_REFERENCED(Descr.repetition_set)
        ],
        ["$$UNION_MEMBERS$$", get_union_members(Descr)],
        ["$$VIRTUAL_DESTRUCTOR$$", virtual_destructor_str],
        ["$$TOKEN_CLASS_NAME_SAFE$$", Descr.class_name_safe],
    ])

    txt = blue_print(txt, helper_variable_replacements)

    if Setup.language.upper() != "C++" and Setup.token_class_only_f:
        extra_at_begin_str += local_strlen_str % (Descr.class_name_safe,
                                                  Setup.buffer_element_type,
                                                  Setup.buffer_element_type)

    txt_i = blue_print(template_i_str, [
        ["$$EXTRA_AT_BEGIN$$", extra_at_begin_str],
        ["$$EXTRA_AT_END$$", extra_at_end_str],
    ])
    txt_i = blue_print(txt_i, [
        ["$$CONSTRUCTOR$$",
         Lng.SOURCE_REFERENCED(Descr.constructor)],
        ["$$COPY$$", copy_str],
        ["$$DESTRUCTOR$$",
         Lng.SOURCE_REFERENCED(Descr.destructor)],
        ["$$FOOTER$$", Lng.SOURCE_REFERENCED(Descr.footer)],
        ["$$FUNC_TAKE_TEXT$$", take_text_str],
        [
            "$$TOKEN_CLASS_HEADER$$",
            Setup.get_file_reference(
                blackboard.token_type_definition.get_file_name())
        ],
        ["$$INCLUDE_GUARD_EXTENSION$$", include_guard_extension_str],
        ["$$NAMESPACE_OPEN$$",
         Lng.NAMESPACE_OPEN(Descr.name_space)],
        ["$$NAMESPACE_CLOSE$$",
         Lng.NAMESPACE_CLOSE(Descr.name_space)],
        [
            "$$TOKEN_REPETITION_N_GET$$",
            Lng.SOURCE_REFERENCED(Descr.repetition_get)
        ],
        [
            "$$TOKEN_REPETITION_N_SET$$",
            Lng.SOURCE_REFERENCED(Descr.repetition_set)
        ],
        ["$$TOKEN_CLASS_NAME_SAFE$$", Descr.class_name_safe],
    ])

    txt_i = blue_print(txt_i, helper_variable_replacements)

    return txt, txt_i
Beispiel #25
0
def parse_default_token_definition():
    sub_fh = open_file_or_die(os.environ["QUEX_PATH"] 
                              + Lng["$code_base"] 
                              + Lng["$token-default-file"])
    parse_section(sub_fh)
    sub_fh.close()
import sys

sys.path.insert(0, os.environ["QUEX_PATH"])

import quex.output.cpp.configuration as configuration
from quex.blackboard import Lng, setup as Setup
from quex.engine.generator.languages.core import db
import quex.input.command_line.core as command_line
from quex.engine.misc.file_in import open_file_or_die
import quex.input.files.core as quex_file_parser

Setup.language_db = db[Setup.language]

command_line.do([
    "-i", "nothing.qx", "-o", "TestAnalyzer", "--token-policy", "single",
    "--no-include-stack"
])

# Parse default token file
fh = open_file_or_die(os.environ["QUEX_PATH"] + Lng["$code_base"] +
                      Lng["$token-default-file"])
quex_file_parser.parse_section(fh)
fh.close()

BeginOfLineSupportF = True
IndentationSupportF = False

txt = configuration.do({})

open("TestAnalyzer-configuration", "w").write(txt)
import os
import sys

sys.path.insert(0, os.environ["QUEX_PATH"])

import quex.output.cpp.configuration   as configuration
from   quex.blackboard                 import setup as Setup
import quex.input.command_line.core    as command_line
from   quex.engine.misc.file_in        import open_file_or_die
import quex.input.files.core           as quex_file_parser


command_line.do(["-i", "nothing", "-o", "TestAnalyzer", "--token-policy", "single", "--no-include-stack"])

# Parse default token file
fh = open_file_or_die(os.environ["QUEX_PATH"] 
                      + Setup.language_db["$code_base"] 
                      + Setup.language_db["$token-default-file"])
quex_file_parser.parse_section(fh)
fh.close()

BeginOfLineSupportF = True
IndentationSupportF = False     

txt = configuration.do({})

open("TestAnalyzer-configuration", "w").write(txt)
Beispiel #28
0
import os
import sys

sys.path.insert(0, os.environ["QUEX_PATH"])

import quex.output.cpp.configuration as configuration
from quex.blackboard import setup as Setup
import quex.input.command_line.core as command_line
from quex.engine.misc.file_in import open_file_or_die
import quex.input.files.core as quex_file_parser

command_line.do([
    "-i", "nothing", "-o", "TestAnalyzer", "--token-policy", "single",
    "--no-include-stack"
])

# Parse default token file
fh = open_file_or_die(os.environ["QUEX_PATH"] +
                      Setup.language_db["$code_base"] +
                      Setup.language_db["$token-default-file"])
quex_file_parser.parse_section(fh)
fh.close()

BeginOfLineSupportF = True
IndentationSupportF = False

txt = configuration.do({})

open("TestAnalyzer-configuration", "w").write(txt)
Beispiel #29
0
def _do(Descr):
    # The following things must be ensured before the function is called
    assert Descr is not None
    assert Descr.__class__.__name__ == "TokenTypeDescriptor"

    ## ALLOW: Descr.get_member_db().keys() == empty

    TemplateFile = QUEX_PATH \
                   + Lng["$code_base"] \
                   + Lng["$token_template_file"]

    TemplateIFile = QUEX_PATH \
                   + Lng["$code_base"] \
                   + Lng["$token_template_i_file"]

    template_str   = open_file_or_die(TemplateFile, Mode="rb").read()
    template_i_str = open_file_or_die(TemplateIFile, Mode="rb").read()
    
    virtual_destructor_str = ""
    if Descr.open_for_derivation_f: virtual_destructor_str = "virtual "

    if Descr.copy is None:
        # Default copy operation: Plain Copy of token memory
        copy_str = "__QUEX_STD_memcpy((void*)__this, (void*)__That, sizeof(QUEX_TYPE_TOKEN));\n"
    else:
        copy_str = Lng.SOURCE_REFERENCED(Descr.copy)

    if Descr.take_text is None:
        take_text_str = "return true;\n" 
    else:
        take_text_str = Lng.SOURCE_REFERENCED(Descr.take_text)

    include_guard_extension_str = get_include_guard_extension(
                                        Lng.NAMESPACE_REFERENCE(Descr.name_space) 
                                        + "__" + Descr.class_name)

    # In case of plain 'C' the class name must incorporate the namespace (list)
    token_class_name = Descr.class_name
    if Setup.language == "C":
        token_class_name = Setup.token_class_name_safe

    converter_declaration_include,   \
    converter_implementation_include, \
    converter_string,                 \
    converter_wstring                 = __get_converter_configuration(include_guard_extension_str)

    extra_at_begin_str = lexeme_null_declaration()
    extra_at_end_str   = ""
    if Setup.token_class_only_f:
        extra_at_begin_str = QUEX_NAME_TOKEN_define_str % include_guard_extension_str \
                             + extra_at_begin_str
        extra_at_end_str   = QUEX_NAME_TOKEN_undef_str % include_guard_extension_str \
                             + extra_at_end_str

    namespace_open, namespace_close = __namespace_brackets()
    helper_variable_replacements = [
              ["$INCLUDE_CONVERTER_DECLARATION",    converter_declaration_include],
              ["$INCLUDE_CONVERTER_IMPLEMENTATION", converter_implementation_include],
              ["$CONVERTER_STRING",                 converter_string],
              ["$CONVERTER_WSTRING",                converter_wstring],
              ["$NAMESPACE_CLOSE",                  namespace_close],
              ["$NAMESPACE_OPEN",                   namespace_open],
              ["$TOKEN_CLASS",                      token_class_name],
    ]

    txt = blue_print(template_str, 
            [
              ["$$EXTRA_AT_BEGIN$$",  extra_at_begin_str],
              ["$$EXTRA_AT_END$$",    extra_at_end_str],
            ])
    txt = blue_print(txt,
             [
              ["$$BODY$$",                    Lng.SOURCE_REFERENCED(Descr.body)],
              ["$$CONSTRUCTOR$$",             Lng.SOURCE_REFERENCED(Descr.constructor)],
              ["$$COPY$$",                    copy_str],
              ["$$DESTRUCTOR$$",              Lng.SOURCE_REFERENCED(Descr.destructor)],
              ["$$DISTINCT_MEMBERS$$",        get_distinct_members(Descr)],
              ["$$FOOTER$$",                  Lng.SOURCE_REFERENCED(Descr.footer)],
              ["$$FUNC_TAKE_TEXT$$",          take_text_str],
              ["$$HEADER$$",                  Lng.SOURCE_REFERENCED(Descr.header)],
              ["$$INCLUDE_GUARD_EXTENSION$$", include_guard_extension_str],
              ["$$NAMESPACE_CLOSE$$",         Lng.NAMESPACE_CLOSE(Descr.name_space)],
              ["$$NAMESPACE_OPEN$$",          Lng.NAMESPACE_OPEN(Descr.name_space)],
              ["$$QUICK_SETTERS$$",           get_quick_setters(Descr)],
              ["$$SETTERS_GETTERS$$",         get_setter_getter(Descr)],
              ["$$TOKEN_REPETITION_N_GET$$",  Lng.SOURCE_REFERENCED(Descr.repetition_get)],
              ["$$TOKEN_REPETITION_N_SET$$",  Lng.SOURCE_REFERENCED(Descr.repetition_set)],
              ["$$UNION_MEMBERS$$",           get_union_members(Descr)],
              ["$$VIRTUAL_DESTRUCTOR$$",      virtual_destructor_str],
              ["$$TOKEN_CLASS_NAME_SAFE$$",   Descr.class_name_safe],
             ])

    txt   = blue_print(txt, helper_variable_replacements)

    if Setup.language.upper() != "C++" and Setup.token_class_only_f:
        extra_at_begin_str += local_strlen_str % (Descr.class_name_safe, Setup.buffer_element_type, Setup.buffer_element_type)

    txt_i = blue_print(template_i_str, 
            [
              ["$$EXTRA_AT_BEGIN$$",  extra_at_begin_str],
              ["$$EXTRA_AT_END$$",    extra_at_end_str],
            ])
    txt_i = blue_print(txt_i, 
                       [
                        ["$$CONSTRUCTOR$$",             Lng.SOURCE_REFERENCED(Descr.constructor)],
                        ["$$COPY$$",                    copy_str],
                        ["$$DESTRUCTOR$$",              Lng.SOURCE_REFERENCED(Descr.destructor)],
                        ["$$FOOTER$$",                  Lng.SOURCE_REFERENCED(Descr.footer)],
                        ["$$FUNC_TAKE_TEXT$$",          take_text_str],
                        ["$$TOKEN_CLASS_HEADER$$",      Setup.get_file_reference(blackboard.token_type_definition.get_file_name())],
                        ["$$INCLUDE_GUARD_EXTENSION$$", include_guard_extension_str],
                        ["$$NAMESPACE_OPEN$$",          Lng.NAMESPACE_OPEN(Descr.name_space)],
                        ["$$NAMESPACE_CLOSE$$",         Lng.NAMESPACE_CLOSE(Descr.name_space)],
                        ["$$TOKEN_REPETITION_N_GET$$",  Lng.SOURCE_REFERENCED(Descr.repetition_get)],
                        ["$$TOKEN_REPETITION_N_SET$$",  Lng.SOURCE_REFERENCED(Descr.repetition_set)],
                        ["$$TOKEN_CLASS_NAME_SAFE$$",   Descr.class_name_safe],
                       ])


    txt_i = blue_print(txt_i, helper_variable_replacements)

    return txt, txt_i
import os
import sys

sys.path.insert(0, os.environ["QUEX_PATH"])

import quex.output.cpp.configuration   as configuration
from   quex.blackboard                 import Lng, setup as Setup
from   quex.engine.generator.languages.core        import db
import quex.input.command_line.core    as command_line
from   quex.engine.misc.file_in        import open_file_or_die
import quex.input.files.core           as quex_file_parser

Setup.language_db = db[Setup.language]

command_line.do(["-i", "nothing.qx", "-o", "TestAnalyzer", "--token-policy", "single", "--no-include-stack"])

# Parse default token file
fh = open_file_or_die(os.environ["QUEX_PATH"] 
                      + Lng["$code_base"] 
                      + Lng["$token-default-file"])
quex_file_parser.parse_section(fh)
fh.close()

BeginOfLineSupportF = True
IndentationSupportF = False     

txt = configuration.do({})

open("TestAnalyzer-configuration", "w").write(txt)