def is_assign(self, allow_assign_mark=False):
     if allow_assign_mark and self.string.endswith('='):
         return search_variable(rstrip(self.string[:-1])).is_assign()
     return (self.is_variable()
             and self.identifier in '$@&'
             and not self.items
             and not search_variable(self.base))
Пример #2
0
 def _validate_assign_mark(self, variable):
     if self._seen_assign_mark:
         raise DataError("Assign mark '=' can be used only with the last "
                         "variable.")
     if variable.endswith('='):
         self._seen_assign_mark = True
         return rstrip(variable[:-1])
     return variable
Пример #3
0
 def __init__(self, name, value):
     # TODO: Should this be done already by the parser?
     # Applies also to 'WITH NAME', 'NONE' and 'TASK(S)' handling
     # as well as joining doc/meta lines and tuple() conversion.
     if name.endswith('='):
         name = rstrip(name[:-1])
     self.name = name
     self.value = value
Пример #4
0
def is_var(string, identifiers='$@&', allow_assign_mark=False):
    if not is_string(string) or len(string) < 4:
        return False
    if allow_assign_mark and string[-1] == '=':
        string = rstrip(string[:-1])
    if string[0] not in identifiers or string[1] != '{' or string[-1] != '}':
        return False
    body = string[2:-1]
    return '{' not in body and '}' not in body
Пример #5
0
 def _tokenize_line(self, line, lineno, include_separators=True):
     # Performance optimized code.
     tokens = []
     append = tokens.append
     offset = 0
     if line[:1] != '|':
         splitter = self._split_from_spaces
     else:
         splitter = self._split_from_pipes
     for value, is_data in splitter(rstrip(line)):
         if is_data:
             append(Token(None, value, lineno, offset))
         elif include_separators:
             append(Token(Token.SEPARATOR, value, lineno, offset))
         offset += len(value)
     if include_separators:
         trailing_whitespace = line[len(rstrip(line)):]
         append(Token(Token.EOL, trailing_whitespace, lineno, offset))
     return tokens
Пример #6
0
 def _split_line(self, line, lineno, data_only=False):
     if line[:1] != '|':
         splitter = self._split_from_spaces
     else:
         splitter = self._split_from_pipes
     offset = 0
     data, sepa = Token.DATA, Token.SEPARATOR
     for value, is_data in splitter(rstrip(line)):
         if is_data or not data_only:
             yield Token(data if is_data else sepa, value, lineno, offset)
         offset += len(value)
     if not data_only:
         trailing_whitespace = re.search(r'\s*$', line, flags=re.UNICODE)
         yield Token(Token.EOL, trailing_whitespace.group(), lineno, offset)
Пример #7
0
 def _is_assign(self, value):
     return (is_var(value)
             or value.endswith('=') and is_var(rstrip(value[:-1])))