Пример #1
0
    def str(self, max_len_first, max_len_following=0):
        """ Gives this line as a list of strings. The first line with maximum length max_len_first,
                    the following lines of maximum length max_len_following
		    (The default values max_len_following=0 means 'same as first line')
		    For comments, the maximum lengths can be ignored! 
		"""

        result = ["PARAMETER ("]
        indent = len(result[0])
        max_len_first -= indent + 1  ## we assume it fits on the line
        if not max_len_following == 0:
            max_len_following -= indent

        names = []
        for param in self.parameters:
            names += [param[0], "=", param[1], ","]
        del names[-1]

        ## FIXME: maybe there is a really long right hand side in the parameter
        ## statement. So catch the exeption and in nessacasy split the rh-sides

        params = tokenizer.join_tokens(names, max_len_first, max_len_following)

        result[0] += params[0]
        for line in params[1:]:
            result.append(indent * " " + line)

        result[-1] += ")"

        return [result]
Пример #2
0
    def str(self, max_len_first, max_len_following=0):
        """ Gives this line as a list of strings. The first line with maximum length max_len_first,
                    the following lines of maximum length max_len_following
		    (The default values max_len_following=0 means 'same as first line')
		    For comments, the maximum lengths can be ignored! 
		"""

        result = ["EQUIVALENCE "]
        indent = len(result[0])
        max_len_first -= indent + 1  ## we assume it fits on the line
        if not max_len_following == 0:
            max_len_following -= indent

        names = []
        for l in self.equ_lists:
            names += ["(", []]
            for var_name in l:
                names[-1] += var_name + [","]
            del names[-1][-1]

        params = tokenizer.join_tokens(names, max_len_first, max_len_following)

        result[0] += params[0]
        for line in params[1:]:
            result.append(indent * " " + line)

        return [result]
Пример #3
0
    def str(self, max_len_first, max_len_following=0):
        """ Gives this line as a list of strings. The first line with maximum length max_len_first,
                    the following lines of maximum length max_len_following
		    (The default values max_len_following=0 means 'same as first line')
		    For comments, the maximum lengths can be ignored! 
		"""

        result = ["COMMON /" + self.common_name + "/ "]
        indent = len(result[0])
        max_len_first -= indent  ## we assume it fits on the line
        if not max_len_following == 0:
            max_len_following -= indent

        ## for the variable names build a token_list like that:
        ## ["INT1", ",", "INT2", ",", ... ]
        var_names = []
        for var in self.variable_names:
            if not self.remove_dimensions:
                var_names += [var, ","]
            else:
                var_names += [tokenizer.tokenize(var)[0], ","]
        del var_names[-1]

        vars = tokenizer.join_tokens(var_names, max_len_first,
                                     max_len_following)

        result[0] += vars[0]
        for line in vars[1:]:
            result.append(indent * " " + line)

        return [result]
Пример #4
0
    def __init__(self, tokens):
        unknown.__init__(self, tokens)
        self.is_declaration = True
        self.variables = []
        self.type = "declaration"

        ## we want to buid a list like that: [['TYPE'], ['name1', 'name2', ...] ]

        ## F90 syntax is easy:
        if "::" in self.tokens:
            declaration = tokenizer.split_token_list(self.tokens, ["::"])
            declaration[1] = tokenizer.split_token_list(declaration[1], [','])

        ## F77 is harder:
        else:
            end = 1
            if self.tokens[end] == '*':
                if self.tokens[end + 1] == '(':
                    end += 3
                else:
                    end += 2
            if self.tokens[end] == '(':
                end += 2
            declaration = [[], []]
            declaration[0] = self.tokens[:end]
            declaration[1] = tokenizer.split_token_list(
                self.tokens[end:], [','])

        self.variable_type = tokenizer.join_tokens(declaration[0])

        ## now process this list;
        for i in range(len(declaration[1])):
            var = variable(declaration[1][i], declaration[0])
            self.variables.append(var)
Пример #5
0
    def __init__(self, tokens):
        unknown.__init__(self, tokens)
        self.is_declaration = True
        self.type = "parameter"

        if not tokens[0].upper() == "PARAMETER" and tokens[1] == "(":
            raise error.ParsingError("Invalid PARAMETER statement")

        name_list = tokenizer.split_token_list(tokens[2], [","])
        self.parameters = [(i[0], tokenizer.join_tokens(i[2:]))
                           for i in name_list]
Пример #6
0
    def str(self, max_len_first, max_len_following=0):
        """ Gives this line as a list of strings. The first line with maximum length max_len_first,
                    the following lines of maximum length max_len_following
		    (The default values max_len_following=0 means 'same as first line')
		    For comments, the maximum lengths can be ignored! 
		"""

        ## F77 style of variable declarations

        if self.variable_type in ["", "CHARACTER"]:
            types = []
            for var in self.variables:
                if var.type == "CHARACTER":
                    t = "CHARACTER*" + str(var.char_len)
                else:
                    t = var.type
                if not t in types:
                    types.append(t)
        else:
            types = [self.variable_type]

        result = []

        for t in types:
            result.append([t + " "])
            indent = len(result[-1][-1])
            mlen_first = max_len_first - indent  ## we assume it fits on the line
            if not max_len_following == 0:
                mlen_following = max_len_following - indent
            else:
                mlen_following = max_len_following

        ## for the variable name build a token_list like that:
        ## ["INT1", ",", "INT2(10)", ",", ..."
            variable_names = []
            for var in self.variables:
                if t.startswith("CHARACTER") and var.type == "CHARACTER":
                    if t[t.find('*') + 1:] == var.char_len:
                        variable_names += [var.name + var.dimension, ","]
                elif var.type == t:
                    variable_names += [var.name + var.dimension, ","]

            del variable_names[-1]

            vars = tokenizer.join_tokens(variable_names, mlen_first,
                                         mlen_following)

            result[-1][-1] += vars[0]
            for line in vars[1:]:
                result[-1].append(indent * " " + line)

        return result
Пример #7
0
    def __init__(self, tokens):
        unknown.__init__(self, tokens)
        self.is_declaration = True
        self.type = "common block"
        self.remove_dimensions = False

        if not tokens[0].upper(
        ) == "COMMON" and tokens[1] == "/" and tokens[3] == "/":
            raise error.ParsingError("Invalid COMMON statement")
        if "/" in tokens[4:]:
            raise error.NotImplementedError(
                "Two common blocks declared in one COMMON statement", False)

        self.common_name = tokens[2]

        name_list = tokenizer.split_token_list(tokens[4:], [","])
        self.variable_names = [tokenizer.join_tokens(i) for i in name_list]
Пример #8
0
    def str(self, max_len_first, max_len_following=0):
        """ Gives this line as a of lists of strings. Each list is one 'program line'
		    (that means: actually belongs to one line, but is maybe split over
		    multiple line that have to be joined by appropriate continuation marks).
		    The first line with maximum length max_len_first,
                    the following lines of maximum length max_len_following
		    (The default values max_len_following=0 means 'same as first line')
		    For comments, the maximum lengths can be ignored! 
		"""

        ## This version indents continuation lines by 1 space
        if not max_len_following == 0:
            max_len_following -= 1

        result = tokenizer.join_tokens(self.tokens, max_len_first,
                                       max_len_following)
        result[1:] = [(" " + r) for r in result[1:]]

        return [result]
Пример #9
0
def new_statement(tokens):
    """ generic interface to this module:
	    Take a list of tokens and returns a suitable statement-class
	"""

    ## a declaration
    if tokens[0].upper() in [
            "INTEGER", "REAL", "LOGICAL", "DOUBLE PRECISION", "CHARACTER"
    ]:
        return declaration(tokens)
    elif tokens[0].upper() == "COMMON":
        return common_block(tokens)
    elif tokens[0].upper() == "PARAMETER":
        return parameter(tokens)
    elif tokens[0].upper() == "EQUIVALENCE":
        return equivalence(tokens)
    else:
        s = tokenizer.join_tokens(tokens)
        raise error.NotImplementedError("Unknown statement: " + s)

        return unknown(tokens)
Пример #10
0
    def __init__(self, decl_token_list, type_token_list=[]):
        """ decl_token_list:    the list of tokens of the (implicit or explicit) declaration,
                                        beginnig with the variable name
                    type_token_list:    the token list declaring the type in an explicit declaration
                                        ( for example INTEGER, REAL*8 or 'INTEGER, DIMENSION(10)'
                                        if empty, implicit typing [IMPLICIT REAL*8 (A-H,O-Z)] is assumed
                """

        self.dimension = ""
        self.type = ""
        implicit = (type_token_list == [])

        ## first process the declaration part
        if not implicit:
            type_decl = tokenizer.split_token_list(type_token_list, [','])

            ## up to now we only consider the type name and a dimension statement
            ## type name
            self.type = tokenizer.join_tokens(type_decl[0])

            if type_decl[0][0].upper() == "CHARACTER":
                self.char_len = '1'
                self.type = "CHARACTER"
                if "*" in type_decl[0]:
                    i = type_decl[0].index("*")
                    if type_decl[0][i + 1] == '(':
                        self.char_len = tokenizer.join_tokens(
                            type_decl[0][i + 1:i + 3])
                    else:
                        self.char_len = type_decl[0][i + 1]

                elif len(type_decl[0]) > 1 and type_decl[0][1] == "(":
                    if type_decl[0][2][0:1] == ["LEN", "="]:
                        self.char_len = int(type_decl[0][2][2])

            for i in range(1, len(type_decl)):
                if type_decl[i][0].upper() == "DIMENSION":
                    if len(type_decl[i]) < 3 or not type_decl[i][1] == '(':
                        raise ParsingError("Invalid DIMENSION statement")
                    else:
                        self.dimension = tokenizer.join_tokens(
                            type_decl[i][1:3])
                if type_decl[0][0].upper() == "CHARACTER" and \
                       type_decl[i][0].upper() == "LEN" and type_decl[i][1] == "=":
                    char_len = typle_decl[i][2]

        ## now process the variable name
        self.name = decl_token_list[0]

        ## is there a dimesion specification following the variable name?
        ## (if so, it is overriding a possible previos DIMENSION statement
        if len(decl_token_list) >= 3 and decl_token_list[1] == '(':
            self.dimension = tokenizer.join_tokens(decl_token_list[1:3])

## same for character length
        if self.type == 'CHARACTER' and "*" in decl_token_list:
            i = decl_token_list.index("*")
            if decl_token_list[i + 1] == '(':
                self.char_len = tokenizer.join_tokens(decl_token_list[i + 1:i +
                                                                      3])
            else:
                self.char_len = decl_token_list[i + 1]

        ## if needed, do implicit typing
        if implicit:
            if self.name[0].upper() in "IJKLMN":
                self.type = "INTEGER"
            else:
                self.type = "REAL*8"