示例#1
0
    def _readstream(self, nml_file, nml_patch_in=None):
        """Parse an input stream containing a Fortran namelist."""
        nml_patch = nml_patch_in if nml_patch_in is not None else Namelist()

        tokenizer = Tokenizer()
        tokenizer.comment_tokens = self.comment_tokens
        f90lex = []
        for line in nml_file:
            toks = tokenizer.parse(line)
            while tokenizer.prior_delim:
                new_toks = tokenizer.parse(next(nml_file))

                # Skip empty lines
                if not new_toks:
                    continue

                # The tokenizer always pre-tokenizes the whitespace (leftover
                # behaviour from Fortran source parsing) so this must be added
                # manually.
                if new_toks[0].isspace():
                    toks[-1] += new_toks.pop(0)

                # Append the rest of the string (if present)
                if new_toks:
                    toks[-1] += new_toks[0]

                    # Attach the rest of the tokens
                    toks.extend(new_toks[1:])

            toks.append('\n')
            f90lex.extend(toks)

        self.tokens = iter(f90lex)

        nmls = Namelist()

        # Attempt to get first token; abort on empty file
        try:
            self._update_tokens(write_token=False)
        except StopIteration:
            return nmls

        # TODO: Replace "while True" with an update_token() iterator
        while True:
            try:
                # Check for classic group terminator
                if self.token == 'end':
                    self._update_tokens()

                # Ignore tokens outside of namelist groups
                while self.token not in ('&', '$'):
                    self._update_tokens()

            except StopIteration:
                break

            # Create the next namelist
            try:
                self._update_tokens()
            except StopIteration:
                raise ValueError('End-of-file after namelist group token `&`.')
            g_name = self.token

            g_vars = Namelist()
            v_name = None

            # TODO: Edit `Namelist` to support case-insensitive `get` calls
            grp_patch = nml_patch.get(g_name.lower(), Namelist())

            # Populate the namelist group
            while g_name:

                if self.token not in ('=', '%', '('):
                    try:
                        self._update_tokens()
                    except StopIteration:
                        raise ValueError(
                            'End-of-file before end of namelist group: \'&{}\''
                            ''.format(g_name))

                # Set the next active variable
                if self.token in ('=', '(', '%'):

                    v_name, v_values = self._parse_variable(
                        g_vars, patch_nml=grp_patch)

                    if v_name in g_vars:
                        v_prior_values = g_vars[v_name]
                        v_values = merge_values(v_prior_values, v_values)

                    g_vars[v_name] = v_values

                    # Deselect variable
                    v_name = None
                    v_values = []

                # Finalise namelist group
                if self.token in ('/', '&', '$'):

                    # Append any remaining patched variables
                    for v_name, v_val in grp_patch.items():
                        g_vars[v_name] = v_val
                        v_strs = nmls._var_strings(v_name, v_val)
                        for v_str in v_strs:
                            self.pfile.write('{0}{1}\n'.format(
                                nml_patch.indent, v_str))

                    # Append the grouplist to the namelist
                    if g_name in nmls:
                        g_update = nmls[g_name]

                        # Update to list of groups
                        if not isinstance(g_update, list):
                            g_update = [g_update]

                        g_update.append(g_vars)

                    else:
                        g_update = g_vars

                    nmls[g_name] = g_update

                    # Reset state
                    g_name, g_vars = None, None

            try:
                self._update_tokens()
            except StopIteration:
                break

        return nmls
示例#2
0
文件: parser.py 项目: jamesp/f90nml
    def readstream(self, nml_file, nml_patch):
        """Parse an input stream containing a Fortran namelist."""

        tokenizer = Tokenizer()
        f90lex = []
        for line in nml_file:
            toks = tokenizer.parse(line)
            toks.append('\n')
            f90lex.extend(toks)

        self.tokens = iter(f90lex)

        nmls = Namelist()

        # Attempt to get first token; abort on empty file
        try:
            self.update_tokens(write_token=False)
        except StopIteration:
            return nmls

        # TODO: Replace "while True" with an update_token() iterator
        while True:
            try:
                # Check for classic group terminator
                if self.token == 'end':
                    self.update_tokens()

                # Ignore tokens outside of namelist groups
                while self.token not in ('&', '$'):
                    self.update_tokens()

            except StopIteration:
                break

            # Create the next namelist
            self.update_tokens()
            g_name = self.token

            g_vars = Namelist()
            v_name = None

            # TODO: Edit `Namelist` to support case-insensitive `get` calls
            grp_patch = nml_patch.get(g_name.lower(), {})

            # Populate the namelist group
            while g_name:

                if self.token not in ('=', '%', '('):
                    self.update_tokens()

                # Set the next active variable
                if self.token in ('=', '(', '%'):

                    v_name, v_values = self.parse_variable(g_vars,
                                                           patch_nml=grp_patch)

                    if v_name in g_vars:
                        v_prior_values = g_vars[v_name]
                        v_values = merge_values(v_prior_values, v_values)

                    g_vars[v_name] = v_values

                    # Deselect variable
                    v_name = None
                    v_values = []

                # Finalise namelist group
                if self.token in ('/', '&', '$'):

                    # Append any remaining patched variables
                    for v_name, v_val in grp_patch.items():
                        g_vars[v_name] = v_val
                        v_strs = nmls.var_strings(v_name, v_val)
                        for v_str in v_strs:
                            self.pfile.write('    {0}\n'.format(v_str))

                    # Append the grouplist to the namelist
                    if g_name in nmls:
                        g_update = nmls[g_name]

                        # Update to list of groups
                        if not isinstance(g_update, list):
                            g_update = [g_update]

                        g_update.append(g_vars)

                    else:
                        g_update = g_vars

                    nmls[g_name] = g_update

                    # Reset state
                    g_name, g_vars = None, None

            try:
                self.update_tokens()
            except StopIteration:
                break

        return nmls
示例#3
0
    def _readstream(self, nml_file, nml_patch_in=None):
        """Parse an input stream containing a Fortran namelist."""
        nml_patch = nml_patch_in if nml_patch_in is not None else Namelist()

        tokenizer = Tokenizer()
        f90lex = []
        for line in nml_file:
            toks = tokenizer.parse(line)
            while tokenizer.prior_delim:
                new_toks = tokenizer.parse(next(nml_file))

                # Skip empty lines
                if not new_toks:
                    continue

                # The tokenizer always pre-tokenizes the whitespace (leftover
                # behaviour from Fortran source parsing) so this must be added
                # manually.
                if new_toks[0].isspace():
                    toks[-1] += new_toks.pop(0)

                # Append the rest of the string (if present)
                if new_toks:
                    toks[-1] += new_toks[0]

                    # Attach the rest of the tokens
                    toks.extend(new_toks[1:])

            toks.append('\n')
            f90lex.extend(toks)

        self.tokens = iter(f90lex)

        nmls = Namelist()

        # Attempt to get first token; abort on empty file
        try:
            self._update_tokens(write_token=False)
        except StopIteration:
            return nmls

        # TODO: Replace "while True" with an update_token() iterator
        while True:
            try:
                # Check for classic group terminator
                if self.token == 'end':
                    self._update_tokens()

                # Ignore tokens outside of namelist groups
                while self.token not in ('&', '$'):
                    self._update_tokens()

            except StopIteration:
                break

            # Create the next namelist
            self._update_tokens()
            g_name = self.token

            g_vars = Namelist()
            v_name = None

            # TODO: Edit `Namelist` to support case-insensitive `get` calls
            grp_patch = nml_patch.get(g_name.lower(), Namelist())

            # Populate the namelist group
            while g_name:

                if self.token not in ('=', '%', '('):
                    self._update_tokens()

                # Set the next active variable
                if self.token in ('=', '(', '%'):

                    v_name, v_values = self._parse_variable(
                        g_vars,
                        patch_nml=grp_patch
                    )

                    if v_name in g_vars:
                        v_prior_values = g_vars[v_name]
                        v_values = merge_values(v_prior_values, v_values)

                    g_vars[v_name] = v_values

                    # Deselect variable
                    v_name = None
                    v_values = []

                # Finalise namelist group
                if self.token in ('/', '&', '$'):

                    # Append any remaining patched variables
                    for v_name, v_val in grp_patch.items():
                        g_vars[v_name] = v_val
                        v_strs = nmls._var_strings(v_name, v_val)
                        for v_str in v_strs:
                            self.pfile.write(
                                '{0}{1}\n'.format(nml_patch.indent, v_str)
                            )

                    # Append the grouplist to the namelist
                    if g_name in nmls:
                        g_update = nmls[g_name]

                        # Update to list of groups
                        if not isinstance(g_update, list):
                            g_update = [g_update]

                        g_update.append(g_vars)

                    else:
                        g_update = g_vars

                    nmls[g_name] = g_update

                    # Reset state
                    g_name, g_vars = None, None

            try:
                self._update_tokens()
            except StopIteration:
                break

        return nmls
示例#4
0
    def readstream(self, nml_file, nml_patch):
        """Parse an input stream containing a Fortran namelist."""

        #f90lex = shlex.shlex(nml_file)
        #f90lex.whitespace = ''
        #f90lex.wordchars += '.-+'       # Include floating point tokens
        #if nml_patch:
        #    f90lex.commenters = ''
        #else:
        #    f90lex.commenters = self.comment_tokens

        ##print(list(f90lex))
        #self.tokens = iter(f90lex)

        tokenizer = Tokenizer()
        f90lex = []
        for line in nml_file:
            toks = tokenizer.parse(line)
            toks.append('\n')
            f90lex.extend(toks)

        print(f90lex)
        self.tokens = iter(f90lex)

        nmls = Namelist()

        # Attempt to get first token; abort on empty file
        try:
            self.update_tokens(write_token=False)
        except StopIteration:
            return nmls

        # TODO: Replace "while True" with an update_token() iterator
        while True:
            try:
                # Check for classic group terminator
                if self.token == 'end':
                    self.update_tokens()

                # Ignore tokens outside of namelist groups
                while self.token not in ('&', '$'):
                    self.update_tokens()

            except StopIteration:
                break

            # Create the next namelist
            self.update_tokens()
            g_name = self.token

            g_vars = Namelist()
            v_name = None

            # TODO: Edit `Namelist` to support case-insensitive `get` calls
            grp_patch = nml_patch.get(g_name.lower(), {})

            # Populate the namelist group
            while g_name:

                if self.token not in ('=', '%', '('):
                    self.update_tokens()

                # Set the next active variable
                if self.token in ('=', '(', '%'):

                    v_name, v_values = self.parse_variable(g_vars,
                                                           patch_nml=grp_patch)

                    if v_name in g_vars:
                        v_prior_values = g_vars[v_name]
                        v_values = merge_values(v_prior_values, v_values)

                    g_vars[v_name] = v_values

                    # Deselect variable
                    v_name = None
                    v_values = []

                # Finalise namelist group
                if self.token in ('/', '&', '$'):

                    # Append any remaining patched variables
                    for v_name, v_val in grp_patch.items():
                        g_vars[v_name] = v_val
                        v_strs = nmls.var_strings(v_name, v_val)
                        for v_str in v_strs:
                            self.pfile.write('    {0}\n'.format(v_str))

                    # Append the grouplist to the namelist
                    if g_name in nmls:
                        g_update = nmls[g_name]

                        # Update to list of groups
                        if not isinstance(g_update, list):
                            g_update = [g_update]

                        g_update.append(g_vars)

                    else:
                        g_update = g_vars

                    nmls[g_name] = g_update

                    # Reset state
                    g_name, g_vars = None, None

            try:
                self.update_tokens()
            except StopIteration:
                break

        return nmls