示例#1
0
 def parse_identifier(item):
     alias = item.get_alias()
     sp_idx = item.token_next_by(t=Whitespace)[0] or len(item.tokens)
     item_rev = Identifier(list(reversed(item.tokens[:sp_idx])))
     name = item_rev._get_first_name(real_name=True)
     alias = alias or name
     dot_idx, _ = item_rev.token_next_by(m=(Punctuation, '.'))
     if dot_idx is not None:
         schema_name = item_rev._get_first_name(dot_idx, real_name=True)
         dot_idx, _ = item_rev.token_next_by(m=(Punctuation, '.'),
                                             idx=dot_idx)
         if dot_idx is not None:
             catalog_name = item_rev._get_first_name(dot_idx,
                                                     real_name=True)
         else:
             catalog_name = None
     else:
         schema_name = None
         catalog_name = None
     schema_quoted = schema_name and item.value[0] == '"'
     if schema_name and not schema_quoted:
         schema_name = schema_name.lower()
     quote_count = item.value.count('"')
     name_quoted = quote_count > 2 or (quote_count and not schema_quoted)
     alias_quoted = alias and item.value[-1] == '"'
     if alias_quoted or name_quoted and not alias and name.islower():
         alias = '"' + (alias or name) + '"'
     if name and not name_quoted and not name.islower():
         if not alias:
             alias = name
         name = name.lower()
     return catalog_name, schema_name, name, alias
示例#2
0
    def from_identifier(cls,
                        identifier: token_groups.Identifier,
                        position: int = 0) -> Column:
        """Create a column from an SQL identifier token.

        Params:
        -------
        identifier: SQL token with column label.

        Returns:
        --------
        A Column object based on the given identifier token.
        """
        idx, identifier_name = identifier.token_next_by(
            t=token_types.Name, i=token_groups.Function)

        _, maybe_dot = identifier.token_next(idx, skip_ws=True, skip_cm=True)
        if maybe_dot is None or not maybe_dot.match(token_types.Punctuation,
                                                    "."):
            table_name = None
            name = identifier_name.value
        else:
            table_name = identifier_name.value
            idx, column_name_token = identifier.token_next_by(
                t=token_types.Name, idx=idx)
            # Fauna doesn't have an 'id' field, so we extract the ID value from the 'ref' included
            # in query responses, but we still want to map the field name to aliases as with other
            # fields for consistency when passing results to SQLAlchemy
            name = "ref" if column_name_token.value == "id" else column_name_token.value

        idx, as_keyword = identifier.token_next_by(m=(token_types.Keyword,
                                                      "AS"),
                                                   idx=idx)

        if as_keyword is None:
            alias = "id" if name == "ref" else name
        else:
            _, alias_identifier = identifier.token_next_by(
                i=token_groups.Identifier, idx=idx)
            alias = alias_identifier.value

        function_name: typing.Optional[Function] = None
        if re.match(COUNT_REGEX, name):
            function_name = Function.COUNT
        elif re.match(NOT_SUPPORTED_FUNCTION_REGEX, name):
            raise exceptions.NotSupportedError(
                "MIN, MAX, AVG, and SUM functions are not yet supported.")

        column_params: ColumnParams = {
            "table_name": table_name,
            "name": name,
            "alias": alias,
            "function_name": function_name,
            "position": position,
        }

        return Column(**column_params)
示例#3
0
 def get_identifier_parents(self):
     if self.identifier is None:
         return None, None
     item_rev = Identifier(list(reversed(self.identifier.tokens)))
     name = item_rev._get_first_name(real_name = True)
     dot_idx, _ = item_rev.token_next_by(m=(Punctuation, '.'))
     if dot_idx is not None:
         schema_name = item_rev._get_first_name(dot_idx, real_name = True)
         dot_idx, _ = item_rev.token_next_by(m=(Punctuation, '.'), idx = dot_idx)
         if dot_idx is not None:
             catalog_name = item_rev._get_first_name(dot_idx, real_name = True)
         else:
             catalog_name = None
     else:
         schema_name = None
         catalog_name = None
     return catalog_name, schema_name
示例#4
0
    def _extract_direction(
            cls, identifier: token_groups.Identifier) -> OrderDirection:
        _, direction_token = identifier.token_next_by(t=token_types.Keyword)

        if direction_token is None:
            PENULTIMATE_TOKEN = -2
            # For some reason, when ordering by multiple columns with a direction keyword,
            # sqlparse groups the final column with the direction in an Identifier token.
            # There is an open issue (https://github.com/andialbrecht/sqlparse/issues/606),
            # though without any response, so it seems to be a bug.
            _, direction_identifier = identifier.token_next_by(
                i=token_groups.Identifier, idx=PENULTIMATE_TOKEN)
            if direction_identifier is not None:
                _, direction_token = direction_identifier.token_next_by(
                    t=token_types.Keyword)

        return (getattr(OrderDirection, direction_token.value)
                if direction_token else None)
示例#5
0
    def from_identifier(cls, identifier: token_groups.Identifier) -> Table:
        """Extract table name from an SQL identifier.

        Params:
        -------
        identifier: SQL token that contains the table's name.

        Returns:
        --------
        A new Table object.
        """
        idx, name = identifier.token_next_by(t=token_types.Name)
        assert name is not None

        idx, _ = identifier.token_next_by(m=(token_types.Keyword, "AS"),
                                          idx=idx)
        if idx is None:
            return cls(name=name.value)

        _, alias = identifier.token_next_by(i=token_groups.Identifier, idx=idx)
        if alias is None:
            return cls(name=name.value)

        return cls(name=name.value, alias=alias.value)
示例#6
0
 def parse_identifier(item):
     alias = item.get_alias()
     sp_idx = item.token_next_by(t=Whitespace)[0] or len(item.tokens)
     item_rev = Identifier(list(reversed(item.tokens[:sp_idx])))
     name = item_rev._get_first_name(real_name=True)
     alias = alias or name
     dot_idx, _ = item_rev.token_next_by(m=(Punctuation, '.'))
     if dot_idx is not None:
         schema_name = item_rev._get_first_name(dot_idx, real_name=True)
         dot_idx, _ = item_rev.token_next_by(m=(Punctuation, '.'),
                                             idx=dot_idx)
         if dot_idx is not None:
             catalog_name = item_rev._get_first_name(dot_idx,
                                                     real_name=True)
         else:
             catalog_name = None
     else:
         schema_name = None
         catalog_name = None
     # TODO: this business below needs help
     # for one we need to apply this logic to catalog_name
     # then the logic around name_quoted = quote_count > 2 obviously
     # doesn't work.  Finally, quotechar needs to be customized
     schema_quoted = schema_name and item.value[0] == '"'
     if schema_name and not schema_quoted:
         schema_name = schema_name.lower()
     quote_count = item.value.count('"')
     name_quoted = quote_count > 2 or (quote_count and not schema_quoted)
     alias_quoted = alias and item.value[-1] == '"'
     if alias_quoted or name_quoted and not alias and name.islower():
         alias = '"' + (alias or name) + '"'
     if name and not name_quoted and not name.islower():
         if not alias:
             alias = name
         name = name.lower()
     return catalog_name, schema_name, name, alias