Пример #1
0
    def _parse_proxies(tok: Tokenizer):
        # Parse the proxy block.

        for token, proxy_name in tok.skipping_newlines():
            if token is Tok.BRACE_CLOSE:
                return
            elif token is not Tok.STRING:
                raise tok.error(token)

            opts = {}

            tok.expect(Tok.BRACE_OPEN)  # Start of proxy values.

            # Looking for key-value parameters for a proxy
            for token, param_name in tok.skipping_newlines():
                if token is Tok.BRACE_CLOSE:
                    break
                elif token is not Tok.STRING:
                    raise tok.error(token)

                token, param_value = tok()

                if token is Tok.STRING:
                    opts[param_name.casefold()] = param_value
                else:
                    raise tok.error(token)
            else:
                raise tok.error('EOF while reading options for "{}" proxy', proxy_name)
            yield proxy_name, opts
        else:
            raise tok.error('Proxy block not closed!')
Пример #2
0
    def _parse_file(self, filesys: FileSystem, file: File):
        """Parse one file (recursively if needed)."""

        if file in self._parse_list:
            return

        self._parse_list.append(file)

        with filesys, file.open_str() as f:
            tokeniser = Tokenizer(
                f,
                filename=file.path,
                error=FGDParseError,
                string_bracket=False,
            )
            for token, token_value in tokeniser:
                # The only things at top-level would be bare strings, and empty lines.
                if token is Token.NEWLINE:
                    continue
                if token is not Token.STRING:
                    raise tokeniser.error(token)
                token_value = token_value.casefold()

                if token_value == '@include':
                    include_file = tokeniser.expect(Token.STRING)
                    if not include_file.endswith('.fgd'):
                        include_file += '.fgd'

                    try:
                        include = filesys[include_file]
                    except KeyError:
                        raise FileNotFoundError(file)
                    self._parse_file(filesys, include)

                elif token_value == '@mapsize':
                    # Max/min map size definition
                    mapsize_args = tokeniser.expect(Token.PAREN_ARGS)
                    try:
                        min_size, max_size = mapsize_args.split(',')
                        self.map_size_min = int(min_size.strip())
                        self.map_size_max = int(max_size.strip())
                    except ValueError:
                        raise tokeniser.error(
                            'Invalid @MapSize: ({})',
                            mapsize_args,
                        )
                # Entity definition...
                elif token_value[:1] == '@':
                    try:
                        ent_type = EntityTypes(token_value[1:])
                    except ValueError:
                        raise tokeniser.error(
                            'Invalid Entity type "{}"!',
                            ent_type[1:],
                        )
                    EntityDef.parse(self, tokeniser, ent_type)
                else:
                    raise tokeniser.error('Bad keyword {!r}', token_value)
Пример #3
0
def read_colon_list(tok: Tokenizer, had_colon=False):
    """Read strings seperated by colons, up to the end of the line.
    
    The token found at the end is returned.
    """
    strings = []
    ready_for_string = had_colon  # Did we have a colon before?
    token = Token.EOF
    for token, tok_value in tok:
        if token is Token.STRING:
            if not ready_for_string:
                raise tok.error('Too many strings ({!r})!', tok_value)
            strings.append(tok_value)
            ready_for_string = False
        elif token is Token.COLON:
            if ready_for_string:
                # ': :' means to have an empty string there.
                strings.append('')
            ready_for_string = True
        elif token is Token.PLUS:
            if ready_for_string or not strings:
                raise tok.error('"+" without a string before it!')
            strings[-1] += tok.expect(Token.STRING)
        elif ready_for_string and token is Token.NEWLINE:
            continue  # skip over this in particular..
        else:
            if ready_for_string:
                raise tok.error(token)
            return strings, token
    else:
        raise tok.error(token)
Пример #4
0
    def parse(
        file_contents: Union[str, Iterator[str]],
        filename='',
        flags: Dict[str, bool]=EmptyMapping,
    ) -> "Property":
        """Returns a Property tree parsed from given text.

        filename, if set should be the source of the text for debug purposes.
        file_contents should be an iterable of strings or a single string.
        flags should be a mapping for additional flags to accept
        (which overrides defaults).
        """
        # The block we are currently adding to.

        # The special name 'None' marks it as the root property, which
        # just outputs its children when exported. This way we can handle
        # multiple root blocks in the file, while still returning a single
        # Property object which has all the methods.
        cur_block = Property(None, [])

        # A queue of the properties we are currently in (outside to inside).
        open_properties = [cur_block]

        # Grab a reference to the token values, so we avoid global lookups.
        STRING = Token.STRING
        PROP_FLAG = Token.PROP_FLAG
        NEWLINE = Token.NEWLINE
        BRACE_OPEN = Token.BRACE_OPEN
        BRACE_CLOSE = Token.BRACE_CLOSE

        tokenizer = Tokenizer(
            file_contents,
            filename,
            KeyValError,
            string_bracket=True,
        )

        # Do we require a block to be opened next? ("name"\n must have { next.)
        requires_block = False
        # Are we permitted to replace the last property with a flagged version of the same?
        can_flag_replace = False

        for token_type, token_value in tokenizer:
            if token_type is BRACE_OPEN:
                # Open a new block - make sure the last token was a name..
                if not requires_block:
                    raise tokenizer.error(
                        'Property cannot have sub-section if it already '
                        'has an in-line value.',
                    )
                requires_block = can_flag_replace = False
                cur_block = cur_block[-1]
                cur_block.value = []
                open_properties.append(cur_block)
                continue
            # Something else, but followed by '{'
            elif requires_block and token_type is not NEWLINE:
                raise tokenizer.error(
                    "Block opening ('{{') required!",
                )

            if token_type is NEWLINE:
                continue
            if token_type is STRING:   # "string"
                # We need to check the next token to figure out what kind of
                # prop it is.
                prop_type, prop_value = tokenizer()

                # It's a block followed by flag.
                if prop_type is PROP_FLAG:
                    # That must be the end of the line..
                    tokenizer.expect(NEWLINE)
                    requires_block = True
                    if _read_flag(flags, prop_value):
                        # Special function - if the last prop was a
                        # keyvalue with this name, replace it instead.
                        if (
                            can_flag_replace and
                            cur_block.value[-1].real_name == token_value and
                            cur_block.value[-1].has_children()
                        ):
                            cur_block.value[-1] = Property(token_value, [])
                        else:
                            cur_block.append(Property(token_value, []))
                        # Can't do twice in a row
                        can_flag_replace = False

                elif prop_type is NEWLINE:
                    # It's a block...
                    requires_block = True
                    can_flag_replace = False
                    cur_block.append(Property(token_value, []))
                    continue
                elif prop_type is STRING:
                    # A value..
                    if requires_block:
                        raise tokenizer.error('Keyvalue split across lines!')
                    requires_block = False

                    keyvalue = Property(token_value, prop_value)

                    # Check for flags.
                    flag_token, flag_val = tokenizer()
                    if flag_token is PROP_FLAG:
                        # Should be the end of the line here.
                        tokenizer.expect(NEWLINE)
                        if _read_flag(flags, flag_val):
                            # Special function - if the last prop was a
                            # keyvalue with this name, replace it instead.
                            if (
                                can_flag_replace and
                                cur_block.value[-1].real_name == token_value and
                                not cur_block.value[-1].has_children()
                            ):
                                cur_block.value[-1] = keyvalue
                            else:
                                cur_block.append(keyvalue)
                            # Can't do twice in a row
                            can_flag_replace = False
                    elif flag_token is NEWLINE:
                        # Normal, unconditionally add
                        cur_block.append(keyvalue)
                        can_flag_replace = True
                    # Otherwise it must be a new line.
                    else:
                        raise tokenizer.error(flag_token)
                    continue
            elif token_type is BRACE_CLOSE:
                # Move back a block
                open_properties.pop()
                try:
                    cur_block = open_properties[-1]
                except IndexError:
                    # No open blocks!
                    raise tokenizer.error(
                        'Too many closing brackets.',
                    )
                # For replacing the block.
                can_flag_replace = True
            else:
                raise tokenizer.error(token_type)

        if requires_block:
            raise KeyValError(
                "Block opening ('{') required, but hit EOF!",
                tokenizer.filename,
                line=None,
            )
        
        if len(open_properties) > 1:
            raise KeyValError(
                'End of text reached with remaining open sections.',
                tokenizer.filename,
                line=None,
            )
        return open_properties[0]
Пример #5
0
    def parse(cls, data: Iterable[str], filename: str=''):
        """Parse a VMT from the file. 
        
        """
        # Block escapes, so "files\test\tex" doesn't have a tab in it.
        tok = Tokenizer(data, filename, string_bracket=True, allow_escapes=False)
        
        # First look for the shader name -
        # which must be the first string
        # in the file.
        shader_name = None
        for token, shader_name in tok:
            if token is Tok.NEWLINE:
                continue
            elif token is Tok.STRING:
                break
            else:
                raise tok.error(token)

        if not shader_name:
            raise tok.error("No shader name!")

        # Open the parameters body.
        tok.expect(Tok.BRACE_OPEN)
        
        params = {}
        proxies = []
        
        # Look for parameter names
        for token, param_name in tok:
            if token is Tok.NEWLINE:
                continue
            # End of body.
            elif token is Tok.BRACE_CLOSE:
                break
            elif token is Tok.PROP_FLAG:
                tok.expect(Tok.NEWLINE)
                continue
            elif token is not Tok.STRING:
                raise tok.error(token)
            token, param_value = tok()
            
            if token is Tok.STRING:
                # We have the value.
                pass
            elif token is Tok.NEWLINE:
                # Name by itself: '%compilenodraw' etc...
                param_value = None
                # We need to check there's a newline after that - for proxies, 
                # or errors.
                token, ignored = tok()
                while token is Tok.NEWLINE:
                    token, ignored = tok()

                if token is Tok.BRACE_OPEN:
                    if param_name.casefold() == "proxies":
                        proxies.extend(cls._parse_block(tok, 'Proxy'))
                    else:
                        params[
                            param_name.casefold()
                        ] = cls._parse_block(tok, param_name)

                    continue  # Don't replace with None.
                elif token is Tok.BRACE_CLOSE:
                    # End of us after single name.
                    params[param_name.casefold()] = param_value
                    break
                else:
                    raise tok.error(token)
            else:
                raise tok.error(token)
                
            params[param_name.casefold()] = param_value
             
        # We expect nothing else now.
        tok.expect(Tok.EOF)
        
        return cls(shader_name, params, proxies)
Пример #6
0
    def parse(
        file_contents: Union[str, BaseTokenizer, Iterator[str]],
        filename='', *,
        flags: Mapping[str, bool]=EmptyMapping,
        allow_escapes: bool=True,
        single_line: bool=False,
    ) -> "Property":
        """Returns a Property tree parsed from given text.

        filename, if set should be the source of the text for debug purposes.
        file_contents should be an iterable of strings or a single string.
        flags should be a mapping for additional flags to accept
        (which overrides defaults).
        allow_escapes allows choosing if \\t or similar escapes are parsed.
        If single_line is set, allow multiple properties to be on the same line.
        This means unterminated strings will be caught late (if at all), but
        it allows parsing some 'internal' data blocks.

        Alternatively, file_contents may be an already created tokenizer.
        In this case allow_escapes is ignored.
        """
        # The block we are currently adding to.

        # The special name 'None' marks it as the root property, which
        # just outputs its children when exported. This way we can handle
        # multiple root blocks in the file, while still returning a single
        # Property object which has all the methods.
        # Skip calling __init__ for speed.
        cur_block = root = Property.__new__(Property)
        cur_block._folded_name = cur_block.real_name = None
        cur_block.value = []

        # A queue of the properties we are currently in (outside to inside).
        # And the line numbers of each of these, for error reporting.
        open_properties = [(cur_block, 1)]

        # Grab a reference to the token values, so we avoid global lookups.
        STRING = Token.STRING
        PROP_FLAG = Token.PROP_FLAG
        NEWLINE = Token.NEWLINE
        BRACE_OPEN = Token.BRACE_OPEN
        BRACE_CLOSE = Token.BRACE_CLOSE

        if isinstance(file_contents, BaseTokenizer):
            tokenizer = file_contents
            tokenizer.filename = filename
            tokenizer.error_type = KeyValError
        else:
            tokenizer = Tokenizer(
                file_contents,
                filename,
                KeyValError,
                string_bracket=True,
                allow_escapes=allow_escapes,
            )

        # If not None, we're requiring a block to open next ("name"\n must have { next.)
        # It's the line number of the header name.
        block_line: Optional[int] = None
        # Are we permitted to replace the last property with a flagged version of the same?
        can_flag_replace = False

        for token_type, token_value in tokenizer:
            if token_type is BRACE_OPEN:  # {
                # Open a new block - make sure the last token was a name..
                if block_line is None:
                    raise tokenizer.error(
                        'Property cannot have sub-section if it already '
                        'has an in-line value.\n\n'
                        'A "name" "value" line cannot then open a block.',
                    )
                can_flag_replace = False
                cur_block = cur_block.value[-1]
                cur_block.value = []
                open_properties.append((cur_block, block_line))
                block_line = None
                continue
            # Something else, but followed by '{'
            elif block_line is not None and token_type is not NEWLINE:
                raise tokenizer.error(
                    'Block opening ("{{") required!\n\n'
                    'A single "name" on a line should next have a open brace '
                    'to begin a block.',
                )

            if token_type is NEWLINE:
                continue
            if token_type is STRING:   # "string"
                # Skip calling __init__ for speed. Value needs to be set
                # before using this, since it's unset here.
                keyvalue = Property.__new__(Property)
                keyvalue._folded_name = sys.intern(token_value.casefold())
                keyvalue.real_name = sys.intern(token_value)

                # We need to check the next token to figure out what kind of
                # prop it is.
                prop_type, prop_value = tokenizer()

                # It's a block followed by flag. ("name" [stuff])
                if prop_type is PROP_FLAG: 
                    # That must be the end of the line..
                    tokenizer.expect(NEWLINE)
                    block_line = tokenizer.line_num
                    if _read_flag(flags, prop_value):
                        keyvalue.value = []

                        # Special function - if the last prop was a
                        # keyvalue with this name, replace it instead.
                        if (
                            can_flag_replace and
                            cur_block.value[-1].real_name == token_value and
                            type(cur_block.value[-1].value) == list
                        ):
                            cur_block.value[-1] = keyvalue
                        else:
                            cur_block.value.append(keyvalue)
                        # Can't do twice in a row
                        can_flag_replace = False

                elif prop_type is STRING:
                    # A value.. ("name" "value")
                    if block_line is not None:
                        raise tokenizer.error(
                            'Keyvalue split across lines!\n\n'
                            'A value like "name" "value" must be on the same '
                            'line.'
                        )
                    block_line = None

                    keyvalue.value = prop_value

                    # Check for flags.
                    flag_token, flag_val = tokenizer()
                    if flag_token is PROP_FLAG:
                        # Should be the end of the line here.
                        tokenizer.expect(NEWLINE)
                        if _read_flag(flags, flag_val):
                            # Special function - if the last prop was a
                            # keyvalue with this name, replace it instead.
                            if (
                                can_flag_replace and
                                cur_block.value[-1].real_name == token_value and
                                type(cur_block.value[-1].value) == str
                            ):
                                cur_block.value[-1] = keyvalue
                            else:
                                cur_block.value.append(keyvalue)
                            # Can't do twice in a row
                            can_flag_replace = False
                    elif flag_token is STRING:
                        # Specifically disallow multiple text on the same line
                        # normally.
                        # ("name" "value" "name2" "value2")
                        if single_line:
                            cur_block.value.append(keyvalue)
                            tokenizer.push_back(flag_token, flag_val)
                            continue
                        else:
                            raise tokenizer.error(
                                "Cannot have multiple names on the same line!"
                            )
                    else:
                        # Otherwise, it's got nothing after.
                        # So insert the keyvalue, and check the token
                        # in the next loop. This allows braces to be
                        # on the same line.
                        cur_block.value.append(keyvalue)
                        can_flag_replace = True
                        tokenizer.push_back(flag_token, flag_val)
                    continue
                else:
                    # Something else - treat this as a block, and
                    # then re-evaluate the token in the next loop.
                    keyvalue.value = []

                    block_line = tokenizer.line_num
                    can_flag_replace = False
                    cur_block.value.append(keyvalue)
                    tokenizer.push_back(prop_type, prop_value)
                    continue

            elif token_type is BRACE_CLOSE:  # }
                # Move back a block
                open_properties.pop()
                try:
                    cur_block, _ = open_properties[-1]
                except IndexError:
                    # It's empty, we've closed one too many properties.
                    raise tokenizer.error(
                        'Too many closing brackets.\n\n'
                        'An extra closing bracket was added which would '
                        'close the outermost level.',
                    )
                # For replacing the block.
                can_flag_replace = True
            else:
                raise tokenizer.error(token_type, token_value)

        # We're out of data, do some final sanity checks.
        
        # We last had a ("name"\n), so we were expecting a block
        # next.
        if block_line is not None:
            raise KeyValError(
                'Block opening ("{") required, but hit EOF!\n'
                'A "name" line was located at the end of the file, which needs'
                ' a {} block to follow.',
                tokenizer.filename,
                line=None,
            )
        
        # All the properties in the file should be closed,
        # so the only thing in open_properties should be the 
        # root one we added.
        
        if len(open_properties) > 1:
            raise KeyValError(
                'End of text reached with remaining open sections.\n\n'
                "File ended with at least one property that didn't "
                'have an ending "}".\n'
                'Open properties: \n- Root at line 1\n' + '\n'.join([
                    f'- "{prop.real_name}" on line {line_num}'
                    for prop, line_num in open_properties[1:]
                ]),
                tokenizer.filename,
                line=None,
            )
        # Return that root property.
        return root
Пример #7
0
def parse_qc(
        qc_loc: Path, qc_path: Path
) -> Optional[Tuple[str, float, Path, float, Optional[Path]]]:
    """Parse a single QC file."""
    model_name = ref_smd = phy_smd = None
    scale_factor = ref_scale = phy_scale = 1.0

    with open(str(qc_path)) as f:
        tok = Tokenizer(
            f,
            qc_path,
            allow_escapes=False,
            allow_star_comments=True,
        )
        for token_type, token_value in tok:
            if token_type is Token.STRING:
                token_value = token_value.casefold()
                if token_value == '$scale':
                    scale_factor = float(tok.expect(Token.STRING))
                elif token_value == '$modelname':
                    model_name = tok.expect(Token.STRING)
                elif token_value in ('$bodygroup', '$body', '$model'):
                    tok.expect(Token.STRING)  # group name.
                    body_type, body_value = tok()
                    if body_type is Token.STRING:
                        # $body name "file.smd"
                        if ref_smd:
                            # Multiple bodygroups, can't deal with that.
                            return None
                        else:
                            ref_smd = qc_loc / body_value
                            ref_scale = scale_factor
                        continue
                    elif body_type is Token.NEWLINE:
                        tok.expect(Token.BRACE_OPEN)
                    elif body_type is not Token.BRACE_OPEN:
                        raise tok.error(body_type)

                    for body_type, body_value in tok:
                        if body_type is Token.BRACE_CLOSE:
                            break
                        elif body_type is Token.STRING:
                            if body_value.casefold() == "studio":
                                if ref_smd:
                                    return None
                                else:
                                    ref_smd = qc_loc / tok.expect(Token.STRING)
                                    ref_scale = scale_factor
                        elif body_type is not Token.NEWLINE:
                            raise tok.error(body_type)

                elif token_value == '$collisionmodel':
                    phy_smd = qc_loc / tok.expect(Token.STRING)
                    phy_scale = scale_factor

                # We can't support this.
                elif token_value in (
                        '$collisionjoints',
                        '$ikchain',
                        '$weightlist',
                        '$poseparameter',
                        '$proceduralbones',
                        '$jigglebone',
                        # Allow LOD models, propcombine is better than that.
                        # '$lod',
                ):
                    return None
            elif token_type is Token.BRACE_OPEN:
                # Skip other "compound" sections we don't care about.
                for body_type, body_value in tok:
                    if body_type is Token.BRACE_CLOSE:
                        break
                else:
                    raise tok.error("EOF reached without closing brace (})!")

    if model_name is None or ref_smd is None:
        # Malformed...
        LOGGER.warning('Cannot parse "{}"... ({}, {})', qc_path, model_name,
                       ref_smd)
        return None

    return (
        model_name,
        ref_scale,
        ref_smd,
        phy_scale,
        phy_smd,
    )
Пример #8
0
    def parse(
        cls,
        fgd: 'FGD',
        tok: Tokenizer,
        ent_type: EntityTypes,
    ):
        """Parse an entity definition."""
        entity = cls(ent_type)

        # First parse the bases part - lots of name(args) sections until an '='
        help_type = None
        for token, token_value in tok:
            if token is Token.NEWLINE:
                continue
            if token is Token.STRING:
                if help_type is None:
                    try:
                        help_type = HelperTypes(token_value)
                    except ValueError:
                        raise tok.error(
                            'Unknown HelperType "{}"!',
                            token_value,
                        )
                    continue
                else:
                    # No arguments for the previous helper - add it in like that.
                    entity.helpers.append((help_type, ''))

            elif token is Token.PAREN_ARGS:
                if help_type is None:
                    raise tok.error('Args without helper type! ({!r})',
                                    token_value)

                args = _RE_HELPER_ARGS.split(token_value)

                if help_type is HelperTypes.INHERIT:
                    for base in args:
                        base = base.casefold()
                        if base not in entity.bases:
                            entity.bases.append(base.strip())
                    help_type = None
                    continue

                entity.helpers.append((help_type, args))

                help_type = None

            elif token is Token.EQUALS:
                break
            else:
                raise tok.error(token)
        else:
            raise tok.error('Entity header never ended!')

        # We were waiting for arguments for the previous helper.
        # We need to add with none.
        if help_type:
            entity.helpers.append((help_type, ''))

        entity.classname = tok.expect(Token.STRING).strip()

        # We next might have a ':' then docstring before the [,
        # or directly to [.
        desc = None
        for doc_token, token_value in tok:
            if doc_token is Token.NEWLINE:
                continue
            if doc_token is Token.COLON:
                if desc is None:
                    desc = []
                else:
                    raise tok.error('Two colons in entity description!')
            elif doc_token is Token.STRING:
                if desc is None or desc:
                    # No colon yet, or we have text without '+' between
                    raise tok.error(doc_token)
                desc.append(token_value)
            elif doc_token is Token.PLUS:
                if not desc:
                    raise tok.error('+ without string before it!')
                desc.append(tok.expect(Token.STRING))
            elif doc_token is Token.BRACK_OPEN:
                if desc:
                    entity.desc = ''.join(desc)
                break
            else:
                raise tok.error(doc_token)

        fgd.entities[entity.classname.casefold()] = entity

        # Now parse keyvalues, and input/outputs
        for token, token_value in tok:
            if token is Token.BRACK_CLOSE:
                break  # End of this entity.

            if token is Token.NEWLINE:
                continue

            # IO - keyword at the start.
            if token is not Token.STRING:
                raise tok.error(token)

            io_type = token_value.casefold()
            if io_type in ('input', 'output'):

                name = tok.expect(Token.STRING)
                raw_value_type = tok.expect(Token.PAREN_ARGS).strip()
                try:
                    val_typ = VALUE_TYPE_LOOKUP[raw_value_type.casefold()]
                except KeyError:
                    raise tok.error('Unknown keyvalue type "{}"!',
                                    raw_value_type)

                # Can't have a spawnflags or choices input type...
                if val_typ.has_list:
                    raise tok.error(
                        '"{}" value type is not valid for an input or output!',
                        val_typ.value,
                    )

                # Read desc
                attrs, token = read_colon_list(tok)

                if token is token.EQUALS:
                    raise tok.error(token)

                if attrs:
                    try:
                        [desc] = attrs
                    except ValueError:
                        raise tok.error('Too many values for IO definition!')
                else:
                    desc = ''

                # entity.inputs or entity.outputs
                getattr(entity,
                        io_type + 's')[name] = IODef(name, val_typ, desc)

            else:
                # Keyvalue
                name = io_type

                raw_value_type = tok.expect(Token.PAREN_ARGS).strip()
                try:
                    val_typ = VALUE_TYPE_LOOKUP[raw_value_type.casefold()]
                except KeyError:
                    raise tok.error('Unknown keyvalue type "{}"!',
                                    raw_value_type)

                next_token, key_flag = tok()

                is_readonly = False
                had_colon = False
                attrs = None

                if next_token is Token.STRING:
                    # 'report' or 'readonly'
                    if key_flag.casefold() == 'readonly':
                        is_readonly = True
                elif next_token is Token.COLON:
                    had_colon = True
                elif next_token is Token.EQUALS:
                    # Special case - spawnflags doesn't have to have
                    # any info - skips straight to the end.
                    if val_typ is ValueTypes.SPAWNFLAGS:
                        attrs = []
                        has_equal = next_token
                elif next_token is Token.NEWLINE:
                    attrs = []
                    has_equal = next_token
                else:
                    raise tok.error(next_token)

                if attrs is None:
                    attrs, has_equal = read_colon_list(tok, had_colon)
                attr_len = len(attrs)

                desc = ''
                default = None
                if attr_len == 3:
                    disp_name, default, desc = attrs
                elif attr_len == 2:
                    disp_name, default = attrs
                elif attr_len == 1:
                    [disp_name] = attrs
                elif attr_len == 0:
                    disp_name = name
                else:
                    raise tok.error('Too many attributes for keyvalue!\n{!r}',
                                    attrs)

                if val_typ.has_list:
                    if has_equal is not Token.EQUALS:
                        raise tok.error('No list for "{}" value type!',
                                        val_typ.name)
                    # Read the choices in the []
                    val_list = []
                    tok.expect(Token.BRACK_OPEN)
                    for choices_token, choices_value in tok:
                        if choices_token is Token.NEWLINE:
                            continue
                        if choices_token is Token.BRACK_CLOSE:
                            break
                        elif choices_token is not Token.STRING:
                            raise tok.error(choices_token)
                        vals, has_equal = read_colon_list(tok, had_colon=False)

                        # Spawnflags can have a default, others don't
                        if len(vals) == 2 and val_typ is ValueTypes.SPAWNFLAGS:
                            val_list.append(
                                (choices_value, vals[0], bool(vals[1])))
                        elif len(vals) == 1:
                            if val_typ is ValueTypes.SPAWNFLAGS:
                                val_list.append((choices_value, vals[0], True))
                            else:
                                val_list.append((choices_value, vals[0]))
                        elif len(vals) == 0:
                            raise tok.error(Token.STRING)
                        else:
                            raise tok.error('Too many values!\n{}', vals)

                        # Handle ] at the end of a : : line.
                        if has_equal is Token.BRACK_CLOSE:
                            break
                    else:
                        raise tok.error(token.EOF)
                else:
                    val_list = None
                    if has_equal is Token.EQUALS:
                        raise tok.error('"{}" value types can\'t have lists!',
                                        val_typ.name)

                entity.keyvalues[name.casefold()] = KeyValues(
                    name,
                    val_typ,
                    disp_name,
                    default,
                    desc,
                    val_list,
                    is_readonly == 'readonly',
                )
Пример #9
0
    def parse(
        file_contents: Union[str, Iterator[str]],
        filename='',
        flags: Dict[str, bool] = EmptyMapping,
    ) -> "Property":
        """Returns a Property tree parsed from given text.

        filename, if set should be the source of the text for debug purposes.
        file_contents should be an iterable of strings or a single string.
        flags should be a mapping for additional flags to accept
        (which overrides defaults).
        """
        # The block we are currently adding to.

        # The special name 'None' marks it as the root property, which
        # just outputs its children when exported. This way we can handle
        # multiple root blocks in the file, while still returning a single
        # Property object which has all the methods.
        cur_block = Property(None, [])

        # A queue of the properties we are currently in (outside to inside).
        open_properties = [cur_block]

        # Grab a reference to the token values, so we avoid global lookups.
        STRING = Token.STRING
        PROP_FLAG = Token.PROP_FLAG
        NEWLINE = Token.NEWLINE
        BRACE_OPEN = Token.BRACE_OPEN
        BRACE_CLOSE = Token.BRACE_CLOSE

        tokenizer = Tokenizer(
            file_contents,
            filename,
            KeyValError,
            string_bracket=True,
        )

        # Do we require a block to be opened next? ("name"\n must have { next.)
        requires_block = False
        # Are we permitted to replace the last property with a flagged version of the same?
        can_flag_replace = False

        for token_type, token_value in tokenizer:
            if token_type is BRACE_OPEN:  # {
                # Open a new block - make sure the last token was a name..
                if not requires_block:
                    raise tokenizer.error(
                        'Property cannot have sub-section if it already '
                        'has an in-line value.\n\n'
                        'A "name" "value" line cannot then open a block.', )
                requires_block = can_flag_replace = False
                cur_block = cur_block[-1]
                cur_block.value = []
                open_properties.append(cur_block)
                continue
            # Something else, but followed by '{'
            elif requires_block and token_type is not NEWLINE:
                raise tokenizer.error(
                    'Block opening ("{{") required!\n\n'
                    'A single "name" on a line should next have a open brace '
                    'to begin a block.', )

            if token_type is NEWLINE:
                continue
            if token_type is STRING:  # "string"
                # We need to check the next token to figure out what kind of
                # prop it is.
                prop_type, prop_value = tokenizer()

                # It's a block followed by flag. ("name" [stuff])
                if prop_type is PROP_FLAG:
                    # That must be the end of the line..
                    tokenizer.expect(NEWLINE)
                    requires_block = True
                    if _read_flag(flags, prop_value):
                        # Special function - if the last prop was a
                        # keyvalue with this name, replace it instead.
                        if (can_flag_replace and cur_block.value[-1].real_name
                                == token_value
                                and cur_block.value[-1].has_children()):
                            cur_block.value[-1] = Property(token_value, [])
                        else:
                            cur_block.append(Property(token_value, []))
                        # Can't do twice in a row
                        can_flag_replace = False

                elif prop_type is STRING:
                    # A value.. ("name" "value")
                    if requires_block:
                        raise tokenizer.error(
                            'Keyvalue split across lines!\n\n'
                            'A value like "name" "value" must be on the same '
                            'line.')
                    requires_block = False

                    keyvalue = Property(token_value, prop_value)

                    # Check for flags.
                    flag_token, flag_val = tokenizer()
                    if flag_token is PROP_FLAG:
                        # Should be the end of the line here.
                        tokenizer.expect(NEWLINE)
                        if _read_flag(flags, flag_val):
                            # Special function - if the last prop was a
                            # keyvalue with this name, replace it instead.
                            if (can_flag_replace
                                    and cur_block.value[-1].real_name
                                    == token_value and
                                    not cur_block.value[-1].has_children()):
                                cur_block.value[-1] = keyvalue
                            else:
                                cur_block.append(keyvalue)
                            # Can't do twice in a row
                            can_flag_replace = False
                    elif flag_token is STRING:
                        # Specifically disallow multiple text on the same line.
                        # ("name" "value" "name2" "value2")
                        raise tokenizer.error(
                            "Cannot have multiple names on the same line!")
                    else:
                        # Otherwise, it's got nothing after.
                        # So insert the keyvalue, and check the token
                        # in the next loop. This allows braces to be
                        # on the same line.
                        cur_block.append(keyvalue)
                        can_flag_replace = True
                        tokenizer.push_back(flag_token, flag_val)
                    continue
                else:  # Something else - treat this as a block, and
                    # then re-evaluate this in the next loop.
                    requires_block = True
                    can_flag_replace = False
                    cur_block.append(Property(token_value, []))
                    tokenizer.push_back(prop_type, prop_value)
                    continue

            elif token_type is BRACE_CLOSE:  # }
                # Move back a block
                open_properties.pop()
                try:
                    cur_block = open_properties[-1]
                except IndexError:
                    # It's empty, we've closed one too many properties.
                    raise tokenizer.error(
                        'Too many closing brackets.\n\n'
                        'An extra closing bracket was added which would '
                        'close the outermost level.', )
                # For replacing the block.
                can_flag_replace = True
            else:
                raise tokenizer.error(token_type)

        # We're out of data, do some final sanity checks.

        # We last had a ("name"\n), so we were expecting a block
        # next.
        if requires_block:
            raise KeyValError(
                'Block opening ("{") required, but hit EOF!\n'
                'A "name" line was located at the end of the file, which needs'
                ' a {} block to follow.',
                tokenizer.filename,
                line=None,
            )

        # All the properties in the file should be closed,
        # so the only thing in open_properties should be the
        # root one we added.

        if len(open_properties) > 1:
            raise KeyValError(
                'End of text reached with remaining open sections.\n\n'
                "File ended with at least one property that didn't "
                'have an ending "}".',
                tokenizer.filename,
                line=None,
            )
        # Return that root property.
        return open_properties[0]