Exemplo n.º 1
0
 def _fixed_lookup_fixture(self):
     return [
         (sqltypes.String(), sqltypes.VARCHAR()),
         (sqltypes.String(1), sqltypes.VARCHAR(1)),
         (sqltypes.String(3), sqltypes.VARCHAR(3)),
         (sqltypes.Text(), sqltypes.TEXT()),
         (sqltypes.Unicode(), sqltypes.VARCHAR()),
         (sqltypes.Unicode(1), sqltypes.VARCHAR(1)),
         (sqltypes.UnicodeText(), sqltypes.TEXT()),
         (sqltypes.CHAR(3), sqltypes.CHAR(3)),
         (sqltypes.NUMERIC, sqltypes.NUMERIC()),
         (sqltypes.NUMERIC(10, 2), sqltypes.NUMERIC(10, 2)),
         (sqltypes.Numeric, sqltypes.NUMERIC()),
         (sqltypes.Numeric(10, 2), sqltypes.NUMERIC(10, 2)),
         (sqltypes.DECIMAL, sqltypes.DECIMAL()),
         (sqltypes.DECIMAL(10, 2), sqltypes.DECIMAL(10, 2)),
         (sqltypes.INTEGER, sqltypes.INTEGER()),
         (sqltypes.BIGINT, sqltypes.BIGINT()),
         (sqltypes.Float, sqltypes.FLOAT()),
         (sqltypes.TIMESTAMP, sqltypes.TIMESTAMP()),
         (sqltypes.DATETIME, sqltypes.DATETIME()),
         (sqltypes.DateTime, sqltypes.DATETIME()),
         (sqltypes.DateTime(), sqltypes.DATETIME()),
         (sqltypes.DATE, sqltypes.DATE()),
         (sqltypes.Date, sqltypes.DATE()),
         (sqltypes.TIME, sqltypes.TIME()),
         (sqltypes.Time, sqltypes.TIME()),
         (sqltypes.BOOLEAN, sqltypes.BOOLEAN()),
         (sqltypes.Boolean, sqltypes.BOOLEAN()),
     ]
Exemplo n.º 2
0
 def _fixed_lookup_fixture(self):
     return [
         (sqltypes.String(), sqltypes.VARCHAR()),
         (sqltypes.String(1), sqltypes.VARCHAR(1)),
         (sqltypes.String(3), sqltypes.VARCHAR(3)),
         (sqltypes.Text(), sqltypes.TEXT()),
         (sqltypes.Unicode(), sqltypes.VARCHAR()),
         (sqltypes.Unicode(1), sqltypes.VARCHAR(1)),
         (sqltypes.UnicodeText(), sqltypes.TEXT()),
         (sqltypes.CHAR(3), sqltypes.CHAR(3)),
         (sqltypes.NUMERIC, sqltypes.NUMERIC()),
         (sqltypes.NUMERIC(10, 2), sqltypes.NUMERIC(10, 2)),
         (sqltypes.Numeric, sqltypes.NUMERIC()),
         (sqltypes.Numeric(10, 2), sqltypes.NUMERIC(10, 2)),
         (sqltypes.DECIMAL, sqltypes.DECIMAL()),
         (sqltypes.DECIMAL(10, 2), sqltypes.DECIMAL(10, 2)),
         (sqltypes.INTEGER, sqltypes.INTEGER()),
         (sqltypes.BIGINT, sqltypes.BIGINT()),
         (sqltypes.Float, sqltypes.FLOAT()),
         (sqltypes.TIMESTAMP, sqltypes.TIMESTAMP()),
         (sqltypes.DATETIME, sqltypes.DATETIME()),
         (sqltypes.DateTime, sqltypes.DATETIME()),
         (sqltypes.DateTime(), sqltypes.DATETIME()),
         (sqltypes.DATE, sqltypes.DATE()),
         (sqltypes.Date, sqltypes.DATE()),
         (sqltypes.TIME, sqltypes.TIME()),
         (sqltypes.Time, sqltypes.TIME()),
         (sqltypes.BOOLEAN, sqltypes.BOOLEAN()),
         (sqltypes.Boolean, sqltypes.BOOLEAN()),
         (sqlite.DATE(storage_format="%(year)04d%(month)02d%(day)02d", ),
          sqltypes.DATE()),
         (sqlite.TIME(
             storage_format="%(hour)02d%(minute)02d%(second)02d", ),
          sqltypes.TIME()),
         (sqlite.DATETIME(storage_format="%(year)04d%(month)02d%(day)02d"
                          "%(hour)02d%(minute)02d%(second)02d", ),
          sqltypes.DATETIME()),
     ]
Exemplo n.º 3
0
def get_spark_type(field, required_type):
    if isinstance(required_type, type(db_types.DATE())):
        return spk_types.StructField(field, spk_types.DateType(), True)
    elif isinstance(required_type, type(db_types.DATETIME())):
        return spk_types.StructField(field, spk_types.TimestampType(), True)
    elif isinstance(required_type, type(db_types.VARCHAR())):
        return spk_types.StructField(field, spk_types.StringType(), True)
    elif isinstance(required_type, type(db_types.INT())):
        return spk_types.StructField(
            field, spk_types.LongType(), True
        )  # db type enforced earlier than spark ones, so spark types needs to be less restrictive than spark ones so needs to choose LongType instead of IntegerType
    elif isinstance(required_type, type(db_types.FLOAT())):
        return spk_types.StructField(field, spk_types.FloatType(), True)
    elif isinstance(required_type, type(db_types.BOOLEAN())):
        return spk_types.StructField(field, spk_types.BooleanType(), True)
    else:
        raise Exception(
            "Type not recognized, field={}, required_type={}".format(
                field, required_type))
Exemplo n.º 4
0
def cast_value(value, required_type, field_name):
    # TODO: make it less ugly.. or avoid using pandas to not require this.
    try:
        if isinstance(required_type, type(db_types.DATE())):
            if isinstance(value, str):
                return datetime.strptime(value,
                                         "%Y-%m-%d")  # assuming iso format
            elif isinstance(value, pd.Timestamp):  # == datetime
                return value.to_pydatetime().date()
            elif isinstance(value, date):
                return value
            elif pd.isnull(value):
                return None
            else:
                return required_type.python_type(value)
        if isinstance(required_type, type(db_types.DATETIME())):
            if isinstance(value, str):
                return datetime.strptime(
                    value, "%Y-%m-%d %H:%M:%S")  # assuming iso format
            elif isinstance(value, pd.Timestamp):
                return value.to_pydatetime()
            elif pd.isnull(value):
                return None
            else:
                return required_type.python_type(value)
        elif isinstance(required_type, type(db_types.VARCHAR())):
            return None if pd.isnull(value) else str(value)
        elif isinstance(required_type, type(db_types.INT())):
            return None if pd.isnull(value) else int(float(value))
        elif isinstance(required_type, type(db_types.BIGINT())):
            return None if pd.isnull(value) else long(value)
        elif isinstance(required_type, type(db_types.FLOAT())):
            return None if pd.isnull(value) else float(value)
        else:
            return required_type.python_type(value)
    except Exception as e:
        logger.error(u"cast_value issue: {}, {}, {}, {}, {}.".format(
            field_name, value, type(value), required_type, str(e)))
        return None
Exemplo n.º 5
0
def str_to_sqltype(expr):
    import re
    import sqlalchemy.types as sqltypes
    norm_expr = expr.lower()
    if norm_expr.startswith('integer'):
        match_result = re.match(r'integer\((\d+)\)', norm_expr)
        if match_result is not None:
            return sqltypes.BIGINT() if int(match_result.group(1)) > 11 else sqltypes.INTEGER()
        return sqltypes.BIGINT()
    if norm_expr == 'decimal':
        return sqltypes.DECIMAL()
    if norm_expr == 'date':
        return sqltypes.DATETIME()
    if norm_expr == 'bool' or norm_expr == 'boolean':
        return sqltypes.BOOLEAN()
    if norm_expr.startswith('string'):
        match_result = re.match(r'string\((\d+)\)', norm_expr)
        if match_result is not None:
            maxlen = int(match_result.group(1))
            return sqltypes.VARCHAR(maxlen) if maxlen < 65536 else sqltypes.TEXT
        return sqltypes.TEXT()
    raise RuntimeError("Unsupported data type [" + expr + "]")
 },
 {
     'name': 'boolean',
     'type': types.Boolean(),
     'nullable': True,
     'default': None
 },
 {
     'name': 'date',
     'type': types.DATE(),
     'nullable': True,
     'default': None
 },
 {
     'name': 'datetime',
     'type': types.DATETIME(),
     'nullable': True,
     'default': None
 },
 {
     'name': 'time',
     'type': types.TIME(),
     'nullable': True,
     'default': None
 },
 {
     'name': 'bytes',
     'type': types.BINARY(),
     'nullable': True,
     'default': None
 },
Exemplo n.º 7
0
        return self.name


# TODO:break these out into meaninful data types
quoted_types = (
    (
        "CHAR",
        types.CHAR(length=1, ),
    ),
    (
        "DATE",
        types.DATE(),
    ),
    (
        "DATETIME",
        types.DATETIME(),
    ),
    (
        "JSON",
        types.JSON(),
    ),
    (
        "TIME",
        types.TIME(timezone=False),
    ),
    (
        "TIMESTAMP_NTZ",
        types.TIMESTAMP(timezone=False),
    ),
    (
        "TIMESTAMP_TZ",
Exemplo n.º 8
0
 def _unsupported_args_fixture(self):
     return [(
         "INTEGER(5)",
         sqltypes.INTEGER(),
     ), ("DATETIME(6, 12)", sqltypes.DATETIME())]
Exemplo n.º 9
0
class PrestoEngineSpec(BaseEngineSpec):  # pylint: disable=too-many-public-methods
    engine = "presto"
    engine_name = "Presto"
    allows_alias_to_source_column = False

    _time_grain_expressions = {
        None:
        "{col}",
        "PT1S":
        "date_trunc('second', CAST({col} AS TIMESTAMP))",
        "PT1M":
        "date_trunc('minute', CAST({col} AS TIMESTAMP))",
        "PT1H":
        "date_trunc('hour', CAST({col} AS TIMESTAMP))",
        "P1D":
        "date_trunc('day', CAST({col} AS TIMESTAMP))",
        "P1W":
        "date_trunc('week', CAST({col} AS TIMESTAMP))",
        "P1M":
        "date_trunc('month', CAST({col} AS TIMESTAMP))",
        "P3M":
        "date_trunc('quarter', CAST({col} AS TIMESTAMP))",
        "P1Y":
        "date_trunc('year', CAST({col} AS TIMESTAMP))",
        "P1W/1970-01-03T00:00:00Z":
        "date_add('day', 5, date_trunc('week', "
        "date_add('day', 1, CAST({col} AS TIMESTAMP))))",
        "1969-12-28T00:00:00Z/P1W":
        "date_add('day', -1, date_trunc('week', "
        "date_add('day', 1, CAST({col} AS TIMESTAMP))))",
    }

    custom_errors: Dict[Pattern[str], Tuple[str, SupersetErrorType, Dict[
        str, Any]]] = {
            COLUMN_DOES_NOT_EXIST_REGEX: (
                __(
                    'We can\'t seem to resolve the column "%(column_name)s" at '
                    "line %(location)s.", ),
                SupersetErrorType.COLUMN_DOES_NOT_EXIST_ERROR,
                {},
            ),
            TABLE_DOES_NOT_EXIST_REGEX: (
                __(
                    'The table "%(table_name)s" does not exist. '
                    "A valid table must be used to run this query.", ),
                SupersetErrorType.TABLE_DOES_NOT_EXIST_ERROR,
                {},
            ),
            SCHEMA_DOES_NOT_EXIST_REGEX: (
                __(
                    'The schema "%(schema_name)s" does not exist. '
                    "A valid schema must be used to run this query.", ),
                SupersetErrorType.SCHEMA_DOES_NOT_EXIST_ERROR,
                {},
            ),
            CONNECTION_ACCESS_DENIED_REGEX: (
                __('Either the username "%(username)s" or the password is incorrect.'
                   ),
                SupersetErrorType.CONNECTION_ACCESS_DENIED_ERROR,
                {},
            ),
            CONNECTION_INVALID_HOSTNAME_REGEX: (
                __('The hostname "%(hostname)s" cannot be resolved.'),
                SupersetErrorType.CONNECTION_INVALID_HOSTNAME_ERROR,
                {},
            ),
            CONNECTION_HOST_DOWN_REGEX: (
                __('The host "%(hostname)s" might be down, and can\'t be '
                   "reached on port %(port)s."),
                SupersetErrorType.CONNECTION_HOST_DOWN_ERROR,
                {},
            ),
            CONNECTION_PORT_CLOSED_REGEX: (
                __('Port %(port)s on hostname "%(hostname)s" refused the connection.'
                   ),
                SupersetErrorType.CONNECTION_PORT_CLOSED_ERROR,
                {},
            ),
            CONNECTION_UNKNOWN_DATABASE_ERROR: (
                __('Unable to connect to catalog named "%(catalog_name)s".'),
                SupersetErrorType.CONNECTION_UNKNOWN_DATABASE_ERROR,
                {},
            ),
        }

    @classmethod
    def get_allow_cost_estimate(cls, extra: Dict[str, Any]) -> bool:
        version = extra.get("version")
        return version is not None and StrictVersion(version) >= StrictVersion(
            "0.319")

    @classmethod
    def update_impersonation_config(
        cls,
        connect_args: Dict[str, Any],
        uri: str,
        username: Optional[str],
    ) -> None:
        """
        Update a configuration dictionary
        that can set the correct properties for impersonating users
        :param connect_args: config to be updated
        :param uri: URI string
        :param impersonate_user: Flag indicating if impersonation is enabled
        :param username: Effective username
        :return: None
        """
        url = make_url(uri)
        backend_name = url.get_backend_name()

        # Must be Presto connection, enable impersonation, and set optional param
        # auth=LDAP|KERBEROS
        # Set principal_username=$effective_username
        if backend_name == "presto" and username is not None:
            connect_args["principal_username"] = username

    @classmethod
    def get_table_names(cls, database: "Database", inspector: Inspector,
                        schema: Optional[str]) -> List[str]:
        tables = super().get_table_names(database, inspector, schema)
        if not is_feature_enabled("PRESTO_SPLIT_VIEWS_FROM_TABLES"):
            return tables

        views = set(cls.get_view_names(database, inspector, schema))
        actual_tables = set(tables) - views
        return list(actual_tables)

    @classmethod
    def get_view_names(cls, database: "Database", inspector: Inspector,
                       schema: Optional[str]) -> List[str]:
        """Returns an empty list

        get_table_names() function returns all table names and view names,
        and get_view_names() is not implemented in sqlalchemy_presto.py
        https://github.com/dropbox/PyHive/blob/e25fc8440a0686bbb7a5db5de7cb1a77bdb4167a/pyhive/sqlalchemy_presto.py
        """
        if not is_feature_enabled("PRESTO_SPLIT_VIEWS_FROM_TABLES"):
            return []

        if schema:
            sql = ("SELECT table_name FROM information_schema.views "
                   "WHERE table_schema=%(schema)s")
            params = {"schema": schema}
        else:
            sql = "SELECT table_name FROM information_schema.views"
            params = {}

        engine = cls.get_engine(database, schema=schema)
        with closing(engine.raw_connection()) as conn:
            cursor = conn.cursor()
            cursor.execute(sql, params)
            results = cursor.fetchall()

        return [row[0] for row in results]

    @classmethod
    def _create_column_info(cls, name: str,
                            data_type: types.TypeEngine) -> Dict[str, Any]:
        """
        Create column info object
        :param name: column name
        :param data_type: column data type
        :return: column info object
        """
        return {"name": name, "type": f"{data_type}"}

    @classmethod
    def _get_full_name(cls, names: List[Tuple[str, str]]) -> str:
        """
        Get the full column name
        :param names: list of all individual column names
        :return: full column name
        """
        return ".".join(column[0] for column in names if column[0])

    @classmethod
    def _has_nested_data_types(cls, component_type: str) -> bool:
        """
        Check if string contains a data type. We determine if there is a data type by
        whitespace or multiple data types by commas
        :param component_type: data type
        :return: boolean
        """
        comma_regex = r",(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)"
        white_space_regex = r"\s(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)"
        return (re.search(comma_regex, component_type) is not None
                or re.search(white_space_regex, component_type) is not None)

    @classmethod
    def _split_data_type(cls, data_type: str, delimiter: str) -> List[str]:
        """
        Split data type based on given delimiter. Do not split the string if the
        delimiter is enclosed in quotes
        :param data_type: data type
        :param delimiter: string separator (i.e. open parenthesis, closed parenthesis,
               comma, whitespace)
        :return: list of strings after breaking it by the delimiter
        """
        return re.split(
            r"{}(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)".format(delimiter), data_type)

    @classmethod
    def _parse_structural_column(  # pylint: disable=too-many-locals
        cls,
        parent_column_name: str,
        parent_data_type: str,
        result: List[Dict[str, Any]],
    ) -> None:
        """
        Parse a row or array column
        :param result: list tracking the results
        """
        formatted_parent_column_name = parent_column_name
        # Quote the column name if there is a space
        if " " in parent_column_name:
            formatted_parent_column_name = f'"{parent_column_name}"'
        full_data_type = f"{formatted_parent_column_name} {parent_data_type}"
        original_result_len = len(result)
        # split on open parenthesis ( to get the structural
        # data type and its component types
        data_types = cls._split_data_type(full_data_type, r"\(")
        stack: List[Tuple[str, str]] = []
        for data_type in data_types:
            # split on closed parenthesis ) to track which component
            # types belong to what structural data type
            inner_types = cls._split_data_type(data_type, r"\)")
            for inner_type in inner_types:
                # We have finished parsing multiple structural data types
                if not inner_type and stack:
                    stack.pop()
                elif cls._has_nested_data_types(inner_type):
                    # split on comma , to get individual data types
                    single_fields = cls._split_data_type(inner_type, ",")
                    for single_field in single_fields:
                        single_field = single_field.strip()
                        # If component type starts with a comma, the first single field
                        # will be an empty string. Disregard this empty string.
                        if not single_field:
                            continue
                        # split on whitespace to get field name and data type
                        field_info = cls._split_data_type(single_field, r"\s")
                        # check if there is a structural data type within
                        # overall structural data type
                        column_spec = cls.get_column_spec(field_info[1])
                        column_type = column_spec.sqla_type if column_spec else None
                        if column_type is None:
                            column_type = types.String()
                            logger.info(
                                "Did not recognize type %s of column %s",
                                field_info[1],
                                field_info[0],
                            )
                        if field_info[1] == "array" or field_info[1] == "row":
                            stack.append((field_info[0], field_info[1]))
                            full_parent_path = cls._get_full_name(stack)
                            result.append(
                                cls._create_column_info(
                                    full_parent_path, column_type))
                        else:  # otherwise this field is a basic data type
                            full_parent_path = cls._get_full_name(stack)
                            column_name = "{}.{}".format(
                                full_parent_path, field_info[0])
                            result.append(
                                cls._create_column_info(
                                    column_name, column_type))
                    # If the component type ends with a structural data type, do not pop
                    # the stack. We have run across a structural data type within the
                    # overall structural data type. Otherwise, we have completely parsed
                    # through the entire structural data type and can move on.
                    if not (inner_type.endswith("array")
                            or inner_type.endswith("row")):
                        stack.pop()
                # We have an array of row objects (i.e. array(row(...)))
                elif inner_type in ("array", "row"):
                    # Push a dummy object to represent the structural data type
                    stack.append(("", inner_type))
                # We have an array of a basic data types(i.e. array(varchar)).
                elif stack:
                    # Because it is an array of a basic data type. We have finished
                    # parsing the structural data type and can move on.
                    stack.pop()
        # Unquote the column name if necessary
        if formatted_parent_column_name != parent_column_name:
            for index in range(original_result_len, len(result)):
                result[index]["name"] = result[index]["name"].replace(
                    formatted_parent_column_name, parent_column_name)

    @classmethod
    def _show_columns(cls, inspector: Inspector, table_name: str,
                      schema: Optional[str]) -> List[RowProxy]:
        """
        Show presto column names
        :param inspector: object that performs database schema inspection
        :param table_name: table name
        :param schema: schema name
        :return: list of column objects
        """
        quote = inspector.engine.dialect.identifier_preparer.quote_identifier
        full_table = quote(table_name)
        if schema:
            full_table = "{}.{}".format(quote(schema), full_table)
        columns = inspector.bind.execute(
            "SHOW COLUMNS FROM {}".format(full_table))
        return columns

    column_type_mappings = (
        (
            re.compile(r"^boolean.*", re.IGNORECASE),
            types.BOOLEAN,
            utils.GenericDataType.BOOLEAN,
        ),
        (
            re.compile(r"^tinyint.*", re.IGNORECASE),
            TinyInteger(),
            utils.GenericDataType.NUMERIC,
        ),
        (
            re.compile(r"^smallint.*", re.IGNORECASE),
            types.SMALLINT(),
            utils.GenericDataType.NUMERIC,
        ),
        (
            re.compile(r"^integer.*", re.IGNORECASE),
            types.INTEGER(),
            utils.GenericDataType.NUMERIC,
        ),
        (
            re.compile(r"^bigint.*", re.IGNORECASE),
            types.BIGINT(),
            utils.GenericDataType.NUMERIC,
        ),
        (
            re.compile(r"^real.*", re.IGNORECASE),
            types.FLOAT(),
            utils.GenericDataType.NUMERIC,
        ),
        (
            re.compile(r"^double.*", re.IGNORECASE),
            types.FLOAT(),
            utils.GenericDataType.NUMERIC,
        ),
        (
            re.compile(r"^decimal.*", re.IGNORECASE),
            types.DECIMAL(),
            utils.GenericDataType.NUMERIC,
        ),
        (
            re.compile(r"^varchar(\((\d+)\))*$", re.IGNORECASE),
            lambda match: types.VARCHAR(int(match[2]))
            if match[2] else types.String(),
            utils.GenericDataType.STRING,
        ),
        (
            re.compile(r"^char(\((\d+)\))*$", re.IGNORECASE),
            lambda match: types.CHAR(int(match[2]))
            if match[2] else types.CHAR(),
            utils.GenericDataType.STRING,
        ),
        (
            re.compile(r"^varbinary.*", re.IGNORECASE),
            types.VARBINARY(),
            utils.GenericDataType.STRING,
        ),
        (
            re.compile(r"^json.*", re.IGNORECASE),
            types.JSON(),
            utils.GenericDataType.STRING,
        ),
        (
            re.compile(r"^date.*", re.IGNORECASE),
            types.DATETIME(),
            utils.GenericDataType.TEMPORAL,
        ),
        (
            re.compile(r"^timestamp.*", re.IGNORECASE),
            types.TIMESTAMP(),
            utils.GenericDataType.TEMPORAL,
        ),
        (
            re.compile(r"^interval.*", re.IGNORECASE),
            Interval(),
            utils.GenericDataType.TEMPORAL,
        ),
        (
            re.compile(r"^time.*", re.IGNORECASE),
            types.Time(),
            utils.GenericDataType.TEMPORAL,
        ),
        (re.compile(r"^array.*",
                    re.IGNORECASE), Array(), utils.GenericDataType.STRING),
        (re.compile(r"^map.*",
                    re.IGNORECASE), Map(), utils.GenericDataType.STRING),
        (re.compile(r"^row.*",
                    re.IGNORECASE), Row(), utils.GenericDataType.STRING),
    )

    @classmethod
    def get_columns(cls, inspector: Inspector, table_name: str,
                    schema: Optional[str]) -> List[Dict[str, Any]]:
        """
        Get columns from a Presto data source. This includes handling row and
        array data types
        :param inspector: object that performs database schema inspection
        :param table_name: table name
        :param schema: schema name
        :return: a list of results that contain column info
                (i.e. column name and data type)
        """
        columns = cls._show_columns(inspector, table_name, schema)
        result: List[Dict[str, Any]] = []
        for column in columns:
            # parse column if it is a row or array
            if is_feature_enabled("PRESTO_EXPAND_DATA") and (
                    "array" in column.Type or "row" in column.Type):
                structural_column_index = len(result)
                cls._parse_structural_column(column.Column, column.Type,
                                             result)
                result[structural_column_index]["nullable"] = getattr(
                    column, "Null", True)
                result[structural_column_index]["default"] = None
                continue

            # otherwise column is a basic data type
            column_spec = cls.get_column_spec(column.Type)
            column_type = column_spec.sqla_type if column_spec else None
            if column_type is None:
                column_type = types.String()
                logger.info(
                    "Did not recognize type %s of column %s",
                    str(column.Type),
                    str(column.Column),
                )
            column_info = cls._create_column_info(column.Column, column_type)
            column_info["nullable"] = getattr(column, "Null", True)
            column_info["default"] = None
            result.append(column_info)
        return result

    @classmethod
    def _is_column_name_quoted(cls, column_name: str) -> bool:
        """
        Check if column name is in quotes
        :param column_name: column name
        :return: boolean
        """
        return column_name.startswith('"') and column_name.endswith('"')

    @classmethod
    def _get_fields(cls, cols: List[Dict[str, Any]]) -> List[ColumnClause]:
        """
        Format column clauses where names are in quotes and labels are specified
        :param cols: columns
        :return: column clauses
        """
        column_clauses = []
        # Column names are separated by periods. This regex will find periods in a
        # string if they are not enclosed in quotes because if a period is enclosed in
        # quotes, then that period is part of a column name.
        dot_pattern = r"""\.                # split on period
                          (?=               # look ahead
                          (?:               # create non-capture group
                          [^\"]*\"[^\"]*\"  # two quotes
                          )*[^\"]*$)        # end regex"""
        dot_regex = re.compile(dot_pattern, re.VERBOSE)
        for col in cols:
            # get individual column names
            col_names = re.split(dot_regex, col["name"])
            # quote each column name if it is not already quoted
            for index, col_name in enumerate(col_names):
                if not cls._is_column_name_quoted(col_name):
                    col_names[index] = '"{}"'.format(col_name)
            quoted_col_name = ".".join(
                col_name if cls._is_column_name_quoted(col_name
                                                       ) else f'"{col_name}"'
                for col_name in col_names)
            # create column clause in the format "name"."name" AS "name.name"
            column_clause = literal_column(quoted_col_name).label(col["name"])
            column_clauses.append(column_clause)
        return column_clauses

    @classmethod
    def select_star(  # pylint: disable=too-many-arguments
        cls,
        database: "Database",
        table_name: str,
        engine: Engine,
        schema: Optional[str] = None,
        limit: int = 100,
        show_cols: bool = False,
        indent: bool = True,
        latest_partition: bool = True,
        cols: Optional[List[Dict[str, Any]]] = None,
    ) -> str:
        """
        Include selecting properties of row objects. We cannot easily break arrays into
        rows, so render the whole array in its own row and skip columns that correspond
        to an array's contents.
        """
        cols = cols or []
        presto_cols = cols
        if is_feature_enabled("PRESTO_EXPAND_DATA") and show_cols:
            dot_regex = r"\.(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)"
            presto_cols = [
                col for col in presto_cols
                if not re.search(dot_regex, col["name"])
            ]
        return super().select_star(
            database,
            table_name,
            engine,
            schema,
            limit,
            show_cols,
            indent,
            latest_partition,
            presto_cols,
        )

    @classmethod
    def estimate_statement_cost(cls, statement: str,
                                cursor: Any) -> Dict[str, Any]:
        """
        Run a SQL query that estimates the cost of a given statement.

        :param statement: A single SQL statement
        :param database: Database instance
        :param cursor: Cursor instance
        :param username: Effective username
        :return: JSON response from Presto
        """
        sql = f"EXPLAIN (TYPE IO, FORMAT JSON) {statement}"
        cursor.execute(sql)

        # the output from Presto is a single column and a single row containing
        # JSON:
        #
        #   {
        #     ...
        #     "estimate" : {
        #       "outputRowCount" : 8.73265878E8,
        #       "outputSizeInBytes" : 3.41425774958E11,
        #       "cpuCost" : 3.41425774958E11,
        #       "maxMemory" : 0.0,
        #       "networkCost" : 3.41425774958E11
        #     }
        #   }
        result = json.loads(cursor.fetchone()[0])
        return result

    @classmethod
    def query_cost_formatter(
            cls, raw_cost: List[Dict[str, Any]]) -> List[Dict[str, str]]:
        """
        Format cost estimate.

        :param raw_cost: JSON estimate from Presto
        :return: Human readable cost estimate
        """
        def humanize(value: Any, suffix: str) -> str:
            try:
                value = int(value)
            except ValueError:
                return str(value)

            prefixes = ["K", "M", "G", "T", "P", "E", "Z", "Y"]
            prefix = ""
            to_next_prefix = 1000
            while value > to_next_prefix and prefixes:
                prefix = prefixes.pop(0)
                value //= to_next_prefix

            return f"{value} {prefix}{suffix}"

        cost = []
        columns = [
            ("outputRowCount", "Output count", " rows"),
            ("outputSizeInBytes", "Output size", "B"),
            ("cpuCost", "CPU cost", ""),
            ("maxMemory", "Max memory", "B"),
            ("networkCost", "Network cost", ""),
        ]
        for row in raw_cost:
            estimate: Dict[str, float] = row.get("estimate", {})
            statement_cost = {}
            for key, label, suffix in columns:
                if key in estimate:
                    statement_cost[label] = humanize(estimate[key],
                                                     suffix).strip()
            cost.append(statement_cost)

        return cost

    @classmethod
    def adjust_database_uri(cls,
                            uri: URL,
                            selected_schema: Optional[str] = None) -> None:
        database = uri.database
        if selected_schema and database:
            selected_schema = parse.quote(selected_schema, safe="")
            if "/" in database:
                database = database.split("/")[0] + "/" + selected_schema
            else:
                database += "/" + selected_schema
            uri.database = database

    @classmethod
    def convert_dttm(cls, target_type: str, dttm: datetime) -> Optional[str]:
        tt = target_type.upper()
        if tt == utils.TemporalType.DATE:
            return f"""from_iso8601_date('{dttm.date().isoformat()}')"""
        if tt == utils.TemporalType.TIMESTAMP:
            return f"""from_iso8601_timestamp('{dttm.isoformat(timespec="microseconds")}')"""  # pylint: disable=line-too-long,useless-suppression
        return None

    @classmethod
    def epoch_to_dttm(cls) -> str:
        return "from_unixtime({col})"

    @classmethod
    def get_all_datasource_names(
            cls, database: "Database",
            datasource_type: str) -> List[utils.DatasourceName]:
        datasource_df = database.get_df(
            "SELECT table_schema, table_name FROM INFORMATION_SCHEMA.{}S "
            "ORDER BY concat(table_schema, '.', table_name)".format(
                datasource_type.upper()),
            None,
        )
        datasource_names: List[utils.DatasourceName] = []
        for _unused, row in datasource_df.iterrows():
            datasource_names.append(
                utils.DatasourceName(schema=row["table_schema"],
                                     table=row["table_name"]))
        return datasource_names

    @classmethod
    def expand_data(  # pylint: disable=too-many-locals
        cls, columns: List[Dict[Any, Any]],
        data: List[Dict[Any, Any]]) -> Tuple[List[Dict[Any, Any]], List[Dict[
            Any, Any]], List[Dict[Any, Any]]]:
        """
        We do not immediately display rows and arrays clearly in the data grid. This
        method separates out nested fields and data values to help clearly display
        structural columns.

        Example: ColumnA is a row(nested_obj varchar) and ColumnB is an array(int)
        Original data set = [
            {'ColumnA': ['a1'], 'ColumnB': [1, 2]},
            {'ColumnA': ['a2'], 'ColumnB': [3, 4]},
        ]
        Expanded data set = [
            {'ColumnA': ['a1'], 'ColumnA.nested_obj': 'a1', 'ColumnB': 1},
            {'ColumnA': '',     'ColumnA.nested_obj': '',   'ColumnB': 2},
            {'ColumnA': ['a2'], 'ColumnA.nested_obj': 'a2', 'ColumnB': 3},
            {'ColumnA': '',     'ColumnA.nested_obj': '',   'ColumnB': 4},
        ]
        :param columns: columns selected in the query
        :param data: original data set
        :return: list of all columns(selected columns and their nested fields),
                 expanded data set, listed of nested fields
        """
        if not is_feature_enabled("PRESTO_EXPAND_DATA"):
            return columns, data, []

        # process each column, unnesting ARRAY types and
        # expanding ROW types into new columns
        to_process = deque((column, 0) for column in columns)
        all_columns: List[Dict[str, Any]] = []
        expanded_columns = []
        current_array_level = None
        while to_process:
            column, level = to_process.popleft()
            if column["name"] not in [
                    column["name"] for column in all_columns
            ]:
                all_columns.append(column)

            # When unnesting arrays we need to keep track of how many extra rows
            # were added, for each original row. This is necessary when we expand
            # multiple arrays, so that the arrays after the first reuse the rows
            # added by the first. every time we change a level in the nested arrays
            # we reinitialize this.
            if level != current_array_level:
                unnested_rows: Dict[int, int] = defaultdict(int)
                current_array_level = level

            name = column["name"]
            values: Optional[Union[str, List[Any]]]

            if column["type"].startswith("ARRAY("):
                # keep processing array children; we append to the right so that
                # multiple nested arrays are processed breadth-first
                to_process.append((get_children(column)[0], level + 1))

                # unnest array objects data into new rows
                i = 0
                while i < len(data):
                    row = data[i]
                    values = row.get(name)
                    if isinstance(values, str):
                        row[name] = values = destringify(values)
                    if values:
                        # how many extra rows we need to unnest the data?
                        extra_rows = len(values) - 1

                        # how many rows were already added for this row?
                        current_unnested_rows = unnested_rows[i]

                        # add any necessary rows
                        missing = extra_rows - current_unnested_rows
                        for _ in range(missing):
                            data.insert(i + current_unnested_rows + 1, {})
                            unnested_rows[i] += 1

                        # unnest array into rows
                        for j, value in enumerate(values):
                            data[i + j][name] = value

                        # skip newly unnested rows
                        i += unnested_rows[i]

                    i += 1

            if column["type"].startswith("ROW("):
                # expand columns; we append them to the left so they are added
                # immediately after the parent
                expanded = get_children(column)
                to_process.extendleft(
                    (column, level) for column in expanded[::-1])
                expanded_columns.extend(expanded)

                # expand row objects into new columns
                for row in data:
                    values = row.get(name) or []
                    if isinstance(values, str):
                        values = cast(Optional[List[Any]], destringify(values))
                        row[name] = values
                    for value, col in zip(values or [], expanded):
                        row[col["name"]] = value

        data = [{k["name"]: row.get(k["name"], "")
                 for k in all_columns} for row in data]

        return all_columns, data, expanded_columns

    @classmethod
    def extra_table_metadata(cls, database: "Database", table_name: str,
                             schema_name: str) -> Dict[str, Any]:
        metadata = {}

        indexes = database.get_indexes(table_name, schema_name)
        if indexes:
            cols = indexes[0].get("column_names", [])
            full_table_name = table_name
            if schema_name and "." not in table_name:
                full_table_name = "{}.{}".format(schema_name, table_name)
            pql = cls._partition_query(full_table_name, database)
            col_names, latest_parts = cls.latest_partition(table_name,
                                                           schema_name,
                                                           database,
                                                           show_first=True)

            if not latest_parts:
                latest_parts = tuple([None] * len(col_names))
            metadata["partitions"] = {
                "cols": cols,
                "latest": dict(zip(col_names, latest_parts)),
                "partitionQuery": pql,
            }

        # flake8 is not matching `Optional[str]` to `Any` for some reason...
        metadata["view"] = cast(
            Any, cls.get_create_view(database, schema_name, table_name))

        return metadata

    @classmethod
    def get_create_view(cls, database: "Database", schema: str,
                        table: str) -> Optional[str]:
        """
        Return a CREATE VIEW statement, or `None` if not a view.

        :param database: Database instance
        :param schema: Schema name
        :param table: Table (view) name
        """
        # pylint: disable=import-outside-toplevel
        from pyhive.exc import DatabaseError

        engine = cls.get_engine(database, schema)
        with closing(engine.raw_connection()) as conn:
            cursor = conn.cursor()
            sql = f"SHOW CREATE VIEW {schema}.{table}"
            try:
                cls.execute(cursor, sql)
                polled = cursor.poll()

                while polled:
                    time.sleep(0.2)
                    polled = cursor.poll()
            except DatabaseError:  # not a VIEW
                return None
            rows = cls.fetch_data(cursor, 1)
        return rows[0][0]

    @classmethod
    def handle_cursor(cls, cursor: Any, query: Query,
                      session: Session) -> None:
        """Updates progress information"""
        query_id = query.id
        poll_interval = query.database.connect_args.get(
            "poll_interval", current_app.config["PRESTO_POLL_INTERVAL"])
        logger.info("Query %i: Polling the cursor for progress", query_id)
        polled = cursor.poll()
        # poll returns dict -- JSON status information or ``None``
        # if the query is done
        # https://github.com/dropbox/PyHive/blob/
        # b34bdbf51378b3979eaf5eca9e956f06ddc36ca0/pyhive/presto.py#L178
        while polled:
            # Update the object and wait for the kill signal.
            stats = polled.get("stats", {})

            query = session.query(type(query)).filter_by(id=query_id).one()
            if query.status in [QueryStatus.STOPPED, QueryStatus.TIMED_OUT]:
                cursor.cancel()
                break

            if stats:
                state = stats.get("state")

                # if already finished, then stop polling
                if state == "FINISHED":
                    break

                completed_splits = float(stats.get("completedSplits"))
                total_splits = float(stats.get("totalSplits"))
                if total_splits and completed_splits:
                    progress = 100 * (completed_splits / total_splits)
                    logger.info("Query {} progress: {} / {} "  # pylint: disable=logging-format-interpolation
                                "splits".format(query_id, completed_splits,
                                                total_splits))
                    if progress > query.progress:
                        query.progress = progress
                    session.commit()
            time.sleep(poll_interval)
            logger.info("Query %i: Polling the cursor for progress", query_id)
            polled = cursor.poll()

    @classmethod
    def _extract_error_message(cls, ex: Exception) -> str:
        if (hasattr(ex, "orig")
                and type(ex.orig).__name__ == "DatabaseError"  # type: ignore
                and isinstance(ex.orig[0], dict)  # type: ignore
            ):
            error_dict = ex.orig[0]  # type: ignore
            return "{} at {}: {}".format(
                error_dict.get("errorName"),
                error_dict.get("errorLocation"),
                error_dict.get("message"),
            )
        if type(ex).__name__ == "DatabaseError" and hasattr(
                ex, "args") and ex.args:
            error_dict = ex.args[0]
            return error_dict.get("message", _("Unknown Presto Error"))
        return utils.error_msg_from_exception(ex)

    @classmethod
    def _partition_query(  # pylint: disable=too-many-arguments,too-many-locals
        cls,
        table_name: str,
        database: "Database",
        limit: int = 0,
        order_by: Optional[List[Tuple[str, bool]]] = None,
        filters: Optional[Dict[Any, Any]] = None,
    ) -> str:
        """Returns a partition query

        :param table_name: the name of the table to get partitions from
        :type table_name: str
        :param limit: the number of partitions to be returned
        :type limit: int
        :param order_by: a list of tuples of field name and a boolean
            that determines if that field should be sorted in descending
            order
        :type order_by: list of (str, bool) tuples
        :param filters: dict of field name and filter value combinations
        """
        limit_clause = "LIMIT {}".format(limit) if limit else ""
        order_by_clause = ""
        if order_by:
            l = []
            for field, desc in order_by:
                l.append(field + " DESC" if desc else "")
            order_by_clause = "ORDER BY " + ", ".join(l)

        where_clause = ""
        if filters:
            l = []
            for field, value in filters.items():
                l.append(f"{field} = '{value}'")
            where_clause = "WHERE " + " AND ".join(l)

        presto_version = database.get_extra().get("version")

        # Partition select syntax changed in v0.199, so check here.
        # Default to the new syntax if version is unset.
        partition_select_clause = (
            f'SELECT * FROM "{table_name}$partitions"' if not presto_version
            or StrictVersion(presto_version) >= StrictVersion("0.199") else
            f"SHOW PARTITIONS FROM {table_name}")

        sql = textwrap.dedent(f"""\
            {partition_select_clause}
            {where_clause}
            {order_by_clause}
            {limit_clause}
        """)
        return sql

    @classmethod
    def where_latest_partition(  # pylint: disable=too-many-arguments
        cls,
        table_name: str,
        schema: Optional[str],
        database: "Database",
        query: Select,
        columns: Optional[List[Dict[str, str]]] = None,
    ) -> Optional[Select]:
        try:
            col_names, values = cls.latest_partition(table_name,
                                                     schema,
                                                     database,
                                                     show_first=True)
        except Exception:  # pylint: disable=broad-except
            # table is not partitioned
            return None

        if values is None:
            return None

        column_names = {column.get("name") for column in columns or []}
        for col_name, value in zip(col_names, values):
            if col_name in column_names:
                query = query.where(Column(col_name) == value)
        return query

    @classmethod
    def _latest_partition_from_df(cls,
                                  df: pd.DataFrame) -> Optional[List[str]]:
        if not df.empty:
            return df.to_records(index=False)[0].item()
        return None

    @classmethod
    @cache_manager.data_cache.memoize(timeout=60)
    def latest_partition(
        cls,
        table_name: str,
        schema: Optional[str],
        database: "Database",
        show_first: bool = False,
    ) -> Tuple[List[str], Optional[List[str]]]:
        """Returns col name and the latest (max) partition value for a table

        :param table_name: the name of the table
        :param schema: schema / database / namespace
        :param database: database query will be run against
        :type database: models.Database
        :param show_first: displays the value for the first partitioning key
          if there are many partitioning keys
        :type show_first: bool

        >>> latest_partition('foo_table')
        (['ds'], ('2018-01-01',))
        """
        indexes = database.get_indexes(table_name, schema)
        if not indexes:
            raise SupersetTemplateException(
                f"Error getting partition for {schema}.{table_name}. "
                "Verify that this table has a partition.")

        if len(indexes[0]["column_names"]) < 1:
            raise SupersetTemplateException(
                "The table should have one partitioned field")

        if not show_first and len(indexes[0]["column_names"]) > 1:
            raise SupersetTemplateException(
                "The table should have a single partitioned field "
                "to use this function. You may want to use "
                "`presto.latest_sub_partition`")

        column_names = indexes[0]["column_names"]
        part_fields = [(column_name, True) for column_name in column_names]
        sql = cls._partition_query(table_name, database, 1, part_fields)
        df = database.get_df(sql, schema)
        return column_names, cls._latest_partition_from_df(df)

    @classmethod
    def latest_sub_partition(cls, table_name: str, schema: Optional[str],
                             database: "Database", **kwargs: Any) -> Any:
        """Returns the latest (max) partition value for a table

        A filtering criteria should be passed for all fields that are
        partitioned except for the field to be returned. For example,
        if a table is partitioned by (``ds``, ``event_type`` and
        ``event_category``) and you want the latest ``ds``, you'll want
        to provide a filter as keyword arguments for both
        ``event_type`` and ``event_category`` as in
        ``latest_sub_partition('my_table',
            event_category='page', event_type='click')``

        :param table_name: the name of the table, can be just the table
            name or a fully qualified table name as ``schema_name.table_name``
        :type table_name: str
        :param schema: schema / database / namespace
        :type schema: str
        :param database: database query will be run against
        :type database: models.Database

        :param kwargs: keyword arguments define the filtering criteria
            on the partition list. There can be many of these.
        :type kwargs: str
        >>> latest_sub_partition('sub_partition_table', event_type='click')
        '2018-01-01'
        """
        indexes = database.get_indexes(table_name, schema)
        part_fields = indexes[0]["column_names"]
        for k in kwargs.keys():  # pylint: disable=consider-iterating-dictionary
            if k not in k in part_fields:  # pylint: disable=comparison-with-itself
                msg = "Field [{k}] is not part of the portioning key"
                raise SupersetTemplateException(msg)
        if len(kwargs.keys()) != len(part_fields) - 1:
            msg = ("A filter needs to be specified for {} out of the "
                   "{} fields.").format(
                       len(part_fields) - 1, len(part_fields))
            raise SupersetTemplateException(msg)

        for field in part_fields:
            if field not in kwargs.keys():
                field_to_return = field

        sql = cls._partition_query(table_name, database, 1,
                                   [(field_to_return, True)], kwargs)
        df = database.get_df(sql, schema)
        if df.empty:
            return ""
        return df.to_dict()[field_to_return][0]

    @classmethod
    @cache_manager.data_cache.memoize()
    def get_function_names(cls, database: "Database") -> List[str]:
        """
        Get a list of function names that are able to be called on the database.
        Used for SQL Lab autocomplete.

        :param database: The database to get functions for
        :return: A list of function names useable in the database
        """
        return database.get_df("SHOW FUNCTIONS")["Function"].tolist()

    @classmethod
    def is_readonly_query(cls, parsed_query: ParsedQuery) -> bool:
        """Pessimistic readonly, 100% sure statement won't mutate anything"""
        return super().is_readonly_query(
            parsed_query) or parsed_query.is_show()

    @classmethod
    def get_column_spec(
        cls,
        native_type: Optional[str],
        source: utils.ColumnTypeSource = utils.ColumnTypeSource.GET_TABLE,
        column_type_mappings: Tuple[Tuple[Pattern[str],
                                          Union[TypeEngine,
                                                Callable[[Match[str]],
                                                         TypeEngine]],
                                          GenericDataType, ],
                                    ..., ] = column_type_mappings,
    ) -> Union[ColumnSpec, None]:

        column_spec = super().get_column_spec(
            native_type, column_type_mappings=column_type_mappings)

        if column_spec:
            return column_spec

        return super().get_column_spec(native_type)

    @classmethod
    def has_implicit_cancel(cls) -> bool:
        """
        Return True if the live cursor handles the implicit cancelation of the query,
        False otherise.

        :return: Whether the live cursor implicitly cancels the query
        :see: handle_cursor
        """

        return True
Exemplo n.º 10
0
    False,
    datetime.date(2013, 10, 10),
    datetime.datetime(2013, 10, 10, 11, 27, 16),
    datetime.time(11, 27, 16),
    "test_bytes",
]

SAMPLE_COLUMNS = [
    {"name": "integer", "type": types.Integer(), "nullable": True, "default": None},
    {"name": "timestamp", "type": types.TIMESTAMP(), "nullable": True, "default": None},
    {"name": "string", "type": types.String(), "nullable": True, "default": None},
    {"name": "float", "type": types.Float(), "nullable": True, "default": None},
    {"name": "numeric", "type": types.Numeric(), "nullable": True, "default": None},
    {"name": "boolean", "type": types.Boolean(), "nullable": True, "default": None},
    {"name": "date", "type": types.DATE(), "nullable": True, "default": None},
    {"name": "datetime", "type": types.DATETIME(), "nullable": True, "default": None},
    {"name": "time", "type": types.TIME(), "nullable": True, "default": None},
    {"name": "bytes", "type": types.BINARY(), "nullable": True, "default": None},
    {
        "name": "record",
        "type": types.JSON(),
        "nullable": True,
        "default": None,
        "comment": "In Standard SQL this data type is a STRUCT<name STRING, age INT64>.",
    },
    {"name": "record.name", "type": types.String(), "nullable": True, "default": None},
    {"name": "record.age", "type": types.Integer(), "nullable": True, "default": None},
    {"name": "nested_record", "type": types.JSON(), "nullable": True, "default": None},
    {
        "name": "nested_record.record",
        "type": types.JSON(),
Exemplo n.º 11
0
 },
 {
     "name": "boolean",
     "type": types.Boolean(),
     "nullable": True,
     "default": None
 },
 {
     "name": "date",
     "type": types.DATE(),
     "nullable": True,
     "default": None
 },
 {
     "name": "datetime",
     "type": types.DATETIME(),
     "nullable": True,
     "default": None
 },
 {
     "name": "time",
     "type": types.TIME(),
     "nullable": True,
     "default": None
 },
 {
     "name": "bytes",
     "type": types.BINARY(),
     "nullable": True,
     "default": None
 },
Exemplo n.º 12
0
class VerticaDialect(PGDialect):
    """ Vertica Dialect using a vertica-python connection and PGDialect """

    name = 'vertica'
    driver = 'vertica_python'

    # UPDATE functionality works with the following option set to False
    supports_sane_rowcount = False

    supports_unicode_statements = True
    supports_unicode_binds = True
    supports_native_decimal = True

    ischema_names = {
        'BINARY': sqltypes.BLOB,
        'VARBINARY': sqltypes.BLOB,
        'LONG VARBINARY': sqltypes.BLOB,
        'BYTEA': sqltypes.BLOB,
        'RAW': sqltypes.BLOB,

        'BOOLEAN': sqltypes.BOOLEAN,

        'CHAR': sqltypes.CHAR,
        'VARCHAR': sqltypes.VARCHAR,
        'LONG VARCHAR': sqltypes.VARCHAR,
        'VARCHAR2': sqltypes.VARCHAR,
        'TEXT': sqltypes.VARCHAR,
        'UUID': sqltypes.VARCHAR,

        'DATE': sqltypes.DATE(),
        'DATETIME': sqltypes.DATETIME(),
        'SMALLDATETIME': sqltypes.DATETIME(),
        'TIME': sqltypes.TIME(),
        'TIMETZ': sqltypes.TIME(timezone=True),
        'TIME WITH TIMEZONE': sqltypes.TIME(timezone=True),
        'TIMESTAMP': sqltypes.TIMESTAMP(),
        'TIMESTAMPTZ': sqltypes.TIMESTAMP(timezone=True),
        'TIMESTAMP WITH TIMEZONE': sqltypes.TIMESTAMP(timezone=True),

        'INTERVAL': INTERVAL,

        # All the same internal representation
        'FLOAT': sqltypes.FLOAT,
        'FLOAT8': sqltypes.FLOAT,
        'DOUBLE': sqltypes.FLOAT,
        'REAL': sqltypes.FLOAT,

        'INT': sqltypes.INTEGER,
        'INTEGER': sqltypes.INTEGER,
        'INT8': sqltypes.INTEGER,
        'BIGINT': sqltypes.INTEGER,
        'SMALLINT': sqltypes.INTEGER,
        'TINYINT': sqltypes.INTEGER,

        'NUMERIC': sqltypes.NUMERIC,
        'DECIMAL': sqltypes.NUMERIC,
        'NUMBER': sqltypes.NUMERIC,
        'MONEY': sqltypes.NUMERIC,
    }

    # skip all the version-specific stuff in PGDialect's initialize method (Vertica versions don't match feature-wise)
    def initialize(self, connection):
        super(PGDialect, self).initialize(connection)
        self.implicit_returning = False

    def is_disconnect(self, e, connection, cursor):
        return (
            isinstance(e, self.dbapi.Error) and
            connection is not None and
            connection.closed()
        )

    @classmethod
    def dbapi(cls):
        vp_module = __import__('vertica_python')

        # sqlalchemy expects to find the base Error class here,
        # so we need to alias it
        vp_module.Error = vp_module.errors.Error

        return vp_module


    def create_connect_args(self, url):
        opts = url.translate_connect_args(username='******')
        opts.update(url.query)
        return [[], opts]


    def has_schema(self, connection, schema):
        query = ("SELECT EXISTS (SELECT schema_name FROM v_catalog.schemata "
                 "WHERE schema_name='%s')") % (schema)
        rs = connection.execute(query)
        return bool(rs.scalar())


    def has_table(self, connection, table_name, schema=None):
        if schema is None:
            schema = self._get_default_schema_name(connection)
        query = ("SELECT EXISTS ("
                 "SELECT table_name FROM v_catalog.all_tables "
                 "WHERE schema_name='%s' AND "
                 "table_name='%s'"
                 ")") % (schema, table_name)
        rs = connection.execute(query)
        return bool(rs.scalar())


    def has_sequence(self, connection, sequence_name, schema=None):
        if schema is None:
            schema = self._get_default_schema_name(connection)
        query = ("SELECT EXISTS ("
                 "SELECT sequence_name FROM v_catalog.sequences "
                 "WHERE sequence_schema='%s' AND "
                 "sequence_name='%s'"
                 ")") % (schema, sequence_name)
        rs = connection.execute(query)
        return bool(rs.scalar())


    def has_type(self, connection, type_name, schema=None):
        query = ("SELECT EXISTS ("
                 "SELECT type_name FROM v_catalog.types "
                 "WHERE type_name='%s'"
                 ")") % (type_name)
        rs = connection.execute(query)
        return bool(rs.scalar())


    def _get_server_version_info(self, connection):
        v = connection.scalar("select version()")
        m = re.match(
            '.*Vertica Analytic Database '
            'v(\d+)\.(\d+)\.(\d)+.*',
            v)
        if not m:
            raise AssertionError(
                "Could not determine version from string '%s'" % v)
        return tuple([int(x) for x in m.group(1, 2, 3) if x is not None])


    def _get_default_schema_name(self, connection):
        return connection.scalar("select current_schema()")


    @reflection.cache
    def get_schema_names(self, connection, **kw):
        query = "SELECT schema_name FROM v_catalog.schemata ORDER BY schema_name"
        rs = connection.execute(query)
        return [row[0] for row in rs if not row[0].startswith('v_')]


    @reflection.cache
    def get_table_comment(self, connection, table_name, schema=None, **kw):
        schema_conditional = (
            "" if schema is None else "AND object_schema = '{schema}'".format(schema=schema))
        query = """
        SELECT comment FROM v_catalog.comments WHERE object_type = 'TABLE'
        AND object_name = '{table_name}'
        {schema_conditional}
        """.format(table_name=table_name, schema_conditional=schema_conditional)
        rs = connection.execute(query)
        return {"text": rs.scalar()}


    @reflection.cache
    def get_table_names(self, connection, schema=None, **kw):
        s = ["SELECT table_name FROM v_catalog.tables"]
        if schema is not None:
            s.append("WHERE table_schema = '%s'" % (schema,))
        s.append("ORDER BY table_schema, table_name")

        rs = connection.execute(' '.join(s))
        return [row[0] for row in rs]


    @reflection.cache
    def get_view_names(self, connection, schema=None, **kw):
        s = ["SELECT table_name FROM v_catalog.views"]
        if schema is not None:
            s.append("WHERE table_schema = '%s'" % (schema,))
        s.append("ORDER BY table_schema, table_name")

        rs = connection.execute(' '.join(s))
        return [row[0] for row in rs]

    @reflection.cache
    def get_columns(self, connection, table_name, schema=None, **kw):
        schema_conditional = (
            "" if schema is None else "AND table_schema = '{schema}'".format(schema=schema))

        pk_column_select = """
        SELECT column_name FROM v_catalog.primary_keys
        WHERE table_name = '{table_name}'
        AND constraint_type = 'p'
        {schema_conditional}
        """.format(table_name=table_name, schema_conditional=schema_conditional)
        primary_key_columns = tuple(row[0] for row in connection.execute(pk_column_select))
        column_select = """
        SELECT
          column_name,
          data_type,
          column_default,
          is_nullable,
          is_identity,
          ordinal_position
        FROM v_catalog.columns
        where table_name = '{table_name}'
        {schema_conditional}
        UNION
        SELECT
          column_name,
          data_type,
          '' as column_default,
          true as is_nullable,
          false as is_identity,
          ordinal_position
        FROM v_catalog.view_columns
        where table_name = '{table_name}'
        {schema_conditional}
        ORDER BY ordinal_position ASC
        """.format(table_name=table_name, schema_conditional=schema_conditional)
        colobjs = []
        column_select_results = list(connection.execute(column_select))
        for row in list(connection.execute(column_select)):
            sequence_info = connection.execute("""
                SELECT
                sequence_name as name,
                minimum as start,
                increment_by as increment
                FROM v_catalog.sequences
                WHERE identity_table_name = '{table_name}'
                {schema_conditional}
                """.format(
                    table_name=table_name,
                    schema_conditional=(
                        "" if schema is None
                        else "AND sequence_schema = '{schema}'".format(schema=schema)
                    )
                )
            ).first() if row.is_identity else None

            colobj = self._get_column_info(
                row.column_name,
                row.data_type,
                row.is_nullable,
                row.column_default,
                row.is_identity,
                (row.column_name in primary_key_columns),
                sequence_info
            )
            if colobj:
                colobjs.append(colobj)
        return colobjs

    def _get_column_info(self, name, data_type, is_nullable, default, is_identity, is_primary_key, sequence):
        m = re.match(r'(\w[ \w]*\w)(?:\((\d+)(?:,(\d+))?\))?', data_type)
        if not m:
            raise ValueError("data type string not parseable for type name and optional parameters: %s" % data_type)
        typename = m.group(1).upper()
        typeobj = self.ischema_names[typename]
        typeargs = []
        typekwargs = {}
        for arg_group in (2, 3):
            try:
                param = m.group(arg_group)
                if param:
                    typeargs.append(int(param))
            except (TypeError, ValueError):
                pass

        if any(tz_string in typename for tz_string in ('TIMEZONE', 'TIME ZONE', 'TIMESTAMPTZ')):
            typekwargs['timezone'] = True

        if callable(typeobj):
            typeobj = typeobj(*typeargs, **typekwargs)

        column_info = {
            'name': name,
            'type': typeobj,
            'nullable': is_nullable,
            'default': default,
            'primary_key': (is_primary_key or is_identity)
        }
        if is_identity:
            column_info['autoincrement'] = True
        if sequence:
            column_info['sequence'] = dict(sequence)
        return column_info

    @reflection.cache
    def get_unique_constraints(self, connection, table_name, schema=None, **kw):

        query = "SELECT constraint_id, constraint_name, column_name FROM v_catalog.constraint_columns \n\
                 WHERE table_name = '" + table_name + "'"
        if schema is not None:
             query += " AND table_schema = '" + schema + "'"
        query += " AND constraint_type = 'u'"

        rs = connection.execute(query)

        unique_names = {row[1] for row in rs}

        result_dict = {unique: [] for unique in unique_names}
        for row in rs:
            result_dict[row[1]].append(row[2])

        result = []
        for key in result_dict.keys():
            result.append(
                {"name": key,
                 "column_names": result_dict[key]}
            )

        return result

    @reflection.cache
    def get_check_constraints(self, connection, table_name, schema=None, **kw):
        query = """
        SELECT
            cons.constraint_name as name,
            cons.predicate as src
        FROM
            v_catalog.table_constraints cons
        WHERE
            cons.constraint_type = 'c'
          AND
            cons.table_id = (
                SELECT
                    i.table_id
                FROM
                    v_catalog.tables i
                WHERE
                    i.table_name='{table_name}'
                {schema_clause}
            )
        """.format(table_name=table_name, schema_clause=(
            "" if schema is None else "AND i.table_schema ='{schema}'".format(schema=schema)))

        return [
            {
                'name': name,
                'sqltext': src[1:-1]
            } for name, src in connection.execute(query).fetchall()
        ]

    # constraints are enforced on selects, but returning nothing for these
    # methods allows table introspection to work

    @reflection.cache
    def get_pk_constraint(self, connection, table_name, schema=None, **kw):
        query = "SELECT constraint_id, constraint_name, column_name FROM v_catalog.constraint_columns \n\
                 WHERE constraint_type = 'p' AND table_name = '" + table_name + "'"

        if schema is not None:
            query += " AND table_schema = '" + schema + "' \n"

        cols = set()
        name = None
        for row in connection.execute(query):
             name = row[1] if name is None else name
             cols.add(row[2])

        return {"constrained_columns": list(cols), "name": name}


    def get_foreign_keys(self, connection, table_name, schema, **kw):
        return []


    def get_indexes(self, connection, table_name, schema, **kw):
        return []


    # Disable index creation since that's not a thing in Vertica.
    def visit_create_index(self, create):
        return None
def osm_delineation(param):
    """

    """
    osm.op_endpoint = param['osm']['op_endpoint']

    ########################################
    ### Load data

    # run_time_start = pd.Timestamp.today().strftime('%Y-%m-%d %H:%M:%S')
    # print(run_time_start)

    ## Read in source data
    print('--Reading in source data...')

    json_lst = get_json_from_api(param['plan_limits']['api_url'], param['plan_limits']['api_headers'])
    json_lst1 = json_filters(json_lst, only_operative=True, only_reach_points=True)
    gjson1, hydro_units, pts_alt, sg1 = geojson_convert(json_lst1)

    combined_zones1 = [j for j in json_lst if j['id'] == param['other']['combined_zones_id']][0]
    combined_zones2 = [s['id'] for s in combined_zones1['spatialUnit']]

    no_limit1 = [j for j in json_lst if j['id'] == param['other']['no_limit_id']][0]
    no_limit2 = [s['id'] for s in no_limit1['spatialUnit']][0]

    # pts = mssql.rd_sql(param['gis_waterdata']['server'], param['gis_waterdata']['database'], param['gis_waterdata']['pts']['table'], [param['gis_waterdata']['pts']['id']], where_in={param['gis_waterdata']['pts']['id']: pts_alt.id.unique().tolist()}, geo_col=True, username=param['gis_waterdata']['username'], password=param['gis_waterdata']['password'], rename_cols=[id_col])
    pts = mssql.rd_sql(param['gis_waterdata']['server'], param['gis_waterdata']['database'], param['gis_waterdata']['pts']['table'], [param['gis_waterdata']['pts']['id']], where_in={param['gis_waterdata']['pts']['id']: pts_alt.id.unique().tolist()}, geo_col=True, rename_cols=[id_col])

    ## Point checks
    excluded_points = pts_alt[~pts_alt.id.isin(pts.SpatialUnitId)].copy()
    if not excluded_points.empty:
        print('These points are in the Plan Limits db, but have no GIS data:')
        print(excluded_points)

    bad_geo = pts[pts.geom_type != 'Point']
    if not bad_geo.empty:
        print('These points do not have a "Point" geometry (likely "MultiPoint"):')
        print(bad_geo)
        pts = pts[~pts.SpatialUnitId.isin(bad_geo.SpatialUnitId)].copy()

    cwms1 = mssql.rd_sql(param['gis_prod']['server'], param['gis_prod']['database'], param['gis_prod']['cwms']['table'], param['gis_prod']['cwms']['col_names'], rename_cols=param['gis_prod']['cwms']['rename_cols'], geo_col=True, username=param['gis_prod']['username'], password=param['gis_prod']['password'])

    # zones3 = mssql.rd_sql(param['gis_waterdata']['server'], param['gis_waterdata']['database'], param['gis_waterdata']['allo_zones']['table'], [param['gis_waterdata']['allo_zones']['id']], where_in={param['gis_waterdata']['allo_zones']['id']: combined_zones2}, username=param['gis_waterdata']['username'], password=param['gis_waterdata']['password'], geo_col=True, rename_cols=[id_col])
    zones3 = mssql.rd_sql(param['gis_waterdata']['server'], param['gis_waterdata']['database'], param['gis_waterdata']['allo_zones']['table'], [param['gis_waterdata']['allo_zones']['id']], where_in={param['gis_waterdata']['allo_zones']['id']: combined_zones2}, geo_col=True, rename_cols=[id_col])

    pts['geometry'] = pts.geometry.simplify(1)

    #######################################
    ### Run query
    print('--Pull out the waterways from OSM')

    pts1, bad_points = osm.get_nearest_waterways(pts, id_col, param['other']['search_distance'], 'all')

    waterways, nodes = osm.get_waterways(pts1, 'all')

    print('--Delineating Reaches from OSM')

    site_delin = osm.waterway_delineation(waterways, True)
    osm_delin = osm.to_osm(site_delin, nodes)
    gdf1 = osm.to_gdf(osm_delin)

    gdf2 = gdf1.to_crs(pts.crs)

    gdf3 = gdf2.merge(pts1.rename(columns={'id': 'start_node'})[['start_node', id_col]], on='start_node')

    print('--Pulling out all of Canterbury...')

    cant2 = osm.get_waterways_within_boundary(cwms1, buffer=0, waterway_type='all')

    combined1, poly1 = vector.pts_poly_join(cant2, zones3, id_col, op='intersects')
    gdf3 = gdf3[~gdf3.way_id.isin(combined1.way_id.unique())].copy()

    all_others1 = cant2[~cant2.way_id.isin(combined1.way_id)]
    all_others2 = all_others1[~all_others1.way_id.isin(gdf3.way_id.unique().tolist())].copy()
    all_others2[id_col] = no_limit2

    print('--Combine all reach data')

    gdf4 = pd.concat([gdf3, combined1, all_others2]).reset_index(drop=True)

    gdf4.rename(columns={'way_id': 'OSMWaterwayId', 'waterway': 'OSMWaterwayType', 'name': 'RiverName', 'start_node': 'StartNode'}, inplace=True)
    gdf4['OSMWaterwayId'] = gdf4['OSMWaterwayId'].astype('int64')

    print('--Compare existing reaches in the database')

    cols = gdf4.columns.drop('geometry').tolist()
    cols.extend(['OBJECTID'])

    # old1 = mssql.rd_sql(param['gis_waterdata']['server'], param['gis_waterdata']['database'], param['gis_waterdata']['reaches']['table'], cols, username=param['gis_waterdata']['username'], password=param['gis_waterdata']['password'], geo_col=True)
    old1 = mssql.rd_sql(param['gis_waterdata']['server'], param['gis_waterdata']['database'], param['gis_waterdata']['reaches']['table'], cols, geo_col=True)

    comp_dict = util.compare_dfs(old1.drop('OBJECTID', axis=1), gdf4, on=['SpatialUnitId', 'OSMWaterwayId'])
    new1 = comp_dict['new'].copy()
    diff1 = comp_dict['diff'].copy()
    rem1 = comp_dict['remove'][['SpatialUnitId', 'OSMWaterwayId']].copy()

    print('--Save to database')

    sql_dtypes = {'StartNode': types.BIGINT(), 'OSMWaterwayId': types.BIGINT(), 'RiverName': types.NVARCHAR(200), 'OSMWaterwayType': types.NVARCHAR(30), 'SpatialUnitId': types.NVARCHAR(8), 'SHAPE_': types.VARCHAR(), 'OBJECTID': types.INT(), 'ModifiedDate': types.DATETIME()}

    if not new1.empty:
        max_id = old1['OBJECTID'].max() + 1

        new1['ModifiedDate'] = today_str
        new1['OBJECTID'] = list(range(max_id, max_id + len(new1)))
        new1.rename(columns={'geometry': 'SHAPE'}, inplace=True)

        # mssql.update_table_rows(new1, param['gis_waterdata']['server'], param['gis_waterdata']['database'], param['gis_waterdata']['reaches']['table'], on=['SpatialUnitId', 'OSMWaterwayId'], index=False, append=True, username=param['gis_waterdata']['username'], password=param['gis_waterdata']['password'], geo_col='SHAPE', clear_table=False, dtype=sql_dtypes)
        mssql.update_table_rows(new1, param['gis_waterdata']['server'], param['gis_waterdata']['database'], param['gis_waterdata']['reaches']['table'], on=['SpatialUnitId', 'OSMWaterwayId'], index=False, append=True, geo_col='SHAPE', clear_table=False, dtype=sql_dtypes)

    if not diff1.empty:
        diff2 = pd.merge(diff1, old1[['SpatialUnitId', 'OSMWaterwayId', 'OBJECTID']], on=['SpatialUnitId', 'OSMWaterwayId'])
        diff2['ModifiedDate'] = today_str
        diff2.rename(columns={'geometry': 'SHAPE'}, inplace=True)

        # mssql.update_table_rows(diff2, param['gis_waterdata']['server'], param['gis_waterdata']['database'], param['gis_waterdata']['reaches']['table'], on=['SpatialUnitId', 'OSMWaterwayId'], index=False, append=True, username=param['gis_waterdata']['username'], password=param['gis_waterdata']['password'], geo_col='SHAPE', clear_table=False, dtype=sql_dtypes)
        mssql.update_table_rows(diff2, param['gis_waterdata']['server'], param['gis_waterdata']['database'], param['gis_waterdata']['reaches']['table'], on=['SpatialUnitId', 'OSMWaterwayId'], index=False, append=True, geo_col='SHAPE', clear_table=False, dtype=sql_dtypes)

    if not rem1.empty:
        # mssql.del_table_rows(param['gis_waterdata']['server'], param['gis_waterdata']['database'], param['gis_waterdata']['reaches']['table'], pk_df=rem1, username=param['gis_waterdata']['username'], password=param['gis_waterdata']['password'])
        mssql.del_table_rows(param['gis_waterdata']['server'], param['gis_waterdata']['database'], param['gis_waterdata']['reaches']['table'], pk_df=rem1)

    return gdf4, excluded_points, bad_geo, bad_points
Exemplo n.º 14
0
    False,
    datetime.date(2013, 10, 10),
    datetime.datetime(2013, 10, 10, 11, 27, 16),
    datetime.time(11, 27, 16),
    'test_bytes'
]

SAMPLE_COLUMNS = [
    {'name': 'integer', 'type': types.Integer(), 'nullable': True, 'default': None},
    {'name': 'timestamp', 'type': types.TIMESTAMP(), 'nullable': True, 'default': None},
    {'name': 'string', 'type': types.String(), 'nullable': True, 'default': None},
    {'name': 'float', 'type': types.Float(), 'nullable': True, 'default': None},
    {'name': 'numeric', 'type': types.DECIMAL(), 'nullable': True, 'default': None},
    {'name': 'boolean', 'type': types.Boolean(), 'nullable': True, 'default': None},
    {'name': 'date', 'type': types.DATE(), 'nullable': True, 'default': None},
    {'name': 'datetime', 'type': types.DATETIME(), 'nullable': True, 'default': None},
    {'name': 'time', 'type': types.TIME(), 'nullable': True, 'default': None},
    {'name': 'bytes', 'type': types.BINARY(), 'nullable': True, 'default': None},
    {'name': 'record', 'type': types.JSON(), 'nullable': True, 'default': None},
    {'name': 'record.name', 'type': types.String(), 'nullable': True, 'default': None},
    {'name': 'record.age', 'type': types.Integer(), 'nullable': True, 'default': None},
    {'name': 'nested_record', 'type': types.JSON(), 'nullable': True, 'default': None},
    {'name': 'nested_record.record', 'type': types.JSON(), 'nullable': True, 'default': None},
    {'name': 'nested_record.record.name', 'type': types.String(), 'nullable': True, 'default': None},
    {'name': 'nested_record.record.age', 'type': types.Integer(), 'nullable': True, 'default': None},
    {'name': 'array', 'type': types.ARRAY(types.Integer()), 'nullable': True, 'default': None},
]


@pytest.fixture(scope='session')
def engine():