def wirte_to_mysqldb(df, result_tb = 'data_health_examination', user='******', \ psw='liangzhi123', host='192.168.1.22', db='datamining', \ if_exists='append', dtype={u'db_name':sqltypes.NVARCHAR(length=255), u'table_name':sqltypes.NVARCHAR(length=255), u'part_date':sqltypes.NVARCHAR(length=255), u'create_date':sqltypes.DateTime(), u'field_name':sqltypes.NVARCHAR(length=255), u'field_type':sqltypes.NVARCHAR(length=255), u'missing_value_num':sqltypes.BigInteger(), u'missing_value_prop':sqltypes.Float(), u'other_missing_value_num':sqltypes.BigInteger(), u'other_missing_value_prop':sqltypes.Float(), u'abnormal_value_index':sqltypes.Text(), u'abnormal_value_num':sqltypes.BigInteger(), u'abnormal_value_prop':sqltypes.Float(), u'if_exist_probability_plot':sqltypes.Integer(), u'probability_plot_result':sqltypes.NVARCHAR(length=255), u'probability_plot_script':sqltypes.Text(), u'if_exist_frequency_plot':sqltypes.Integer(), u'frequency_plot_result':sqltypes.NVARCHAR(length=255), u'frequency_plot_script':sqltypes.Text(), u'if_exist_rules':sqltypes.Integer(), u'show_Chn_rules':sqltypes.NVARCHAR(length=255), u'show_Eng_rules':sqltypes.NVARCHAR(length=255), u'rules_result':sqltypes.NVARCHAR(length=255)}): engine = sqlalchemy.create_engine(str(r"mysql+mysqldb://%s:" + '%s' + "@%s/%s?%s")\ % (user, psw, host, db, 'charset=utf8')) df.to_sql(result_tb, engine, if_exists=if_exists, index=False, dtype=dtype)
class Reply(Base): __tablename__ = 'reply' id = Column(types.Integer, primary_key=True) tweet_id = Column(types.BigInteger()) reply_text = Column(types.Text) src_id = Column(types.BigInteger()) src_text = Column(types.Text) is_analyze = Column(types.SmallInteger, default=False) def __repr__(self): return "<Reply(tweet_id='%d', reply_text='%s', src_id='%s', src_text='%s')>" % ( self.tweet_id, self.reply_text, self.src_id, self.text)
def upgrade(migrate_engine): meta.bind = migrate_engine stats = schema.Table( 'apistats', meta, schema.Column('id', types.Integer(), primary_key=True), schema.Column('host', types.String(80)), schema.Column('request_count', types.BigInteger()), schema.Column('error_count', types.BigInteger()), schema.Column('average_response_time', types.Float()), schema.Column('requests_per_tenant', types.Text()), schema.Column('requests_per_second', types.Float()), schema.Column('errors_per_second', types.Float()), schema.Column('created', types.DateTime, nullable=False), schema.Column('updated', types.DateTime, nullable=False)) stats.create()
class Vehicle(Base): __tablename__ = "vehicles" id = Column(types.Integer(), Sequence("seq_id"), primary_key=True, doc="The primary key") name = Column(types.String(length=50), doc="The name of the vehicle") type = Column(types.Enum(VehicleType, name="vehicle_type"), nullable=False) created_at = Column(types.DateTime()) paint = Column(types.Enum(*COLORS, name="colors")) is_used = Column(types.Boolean) @property def lower_name(self): return self.name.lower() _engine_cylinders = Column("engine_cylinders", types.BigInteger()) _engine_displacement = Column("engine_displacement", types.Numeric(asdecimal=True, precision=10, scale=2)) _engine_type = Column("engine_type", types.String(length=25)) _engine_fuel_type = Column("engine_fuel_type", types.String(length=10)) engine = orm.composite(Engine, _engine_cylinders, _engine_displacement, _engine_type, _engine_fuel_type) _owner_id = Column("owner_id", types.Integer(), ForeignKey(Owner.id)) owner = orm.relationship(Owner, backref="vehicles") def clean_name(self): if self.name == "invalid": raise ValidationError("invalid vehicle name")
class Vehicle(Base): __tablename__ = 'vehicles' id = Column(types.Integer(), Sequence('seq_id'), primary_key=True, doc='The primary key') name = Column(types.String(), doc='The name of the vehicle') type = Column(types.Enum(VehicleType), nullable=False) created_at = Column(types.DateTime()) paint = Column(types.Enum(*COLORS)) is_used = Column(types.Boolean) @property def lower_name(self): return self.name.lower() _engine_cylinders = Column('engine_cylinders', types.BigInteger()) _engine_displacement = Column( 'engine_displacement', types.Numeric(asdecimal=True, precision=10, scale=2)) _engine_type = Column('engine_type', types.String(length=25)) _engine_fuel_type = Column('engine_fuel_type', types.String(length=10)) engine = orm.composite(Engine, _engine_cylinders, _engine_displacement, _engine_type, _engine_fuel_type) _owner_id = Column('owner_id', types.Integer(), ForeignKey(Owner.id)) owner = orm.relationship(Owner, backref='vehicles')
class BigInteger(types.TypeDecorator): impl = types.BigInteger().with_variant(sqlite.INTEGER(), 'sqlite') @property def python_type(self): return int def __repr__(self): return 'BigInteger()'
def _get_column(table, column): """ Returns a Column with the appropriate sqlalchemy data type for a column from the JSON schema description. Some column definitions are incorrect in the JSON data, so this function has some manual overrides. """ if table == 'group_membership_dim' and column['name'] in [ u'id', u'canvas_id' ]: """ The group_membership_dim.id and group_membership_dim.canvas_id columns are specified as varchars but they should be bigints """ return Column( column['name'], types.BigInteger(), ) elif table == 'quiz_question_answer_dim' and column['name'] in [ u'answer_match_left', u'answer_match_right', u'matching_answer_incorrect_matches' ]: """ These three columns in the quiz_question_answer_dim table are specified as having a length of 256, but the actual dumps contain longer values. Using the text type instead. """ return Column(column['name'], types.Text()) elif table == 'quiz_question_dim' and column['name'] == u'name': """ The quiz_question_dim.name column is specified as having a length of 256, but the actual dumps contain longer values. Using the text type instead. """ return Column(column['name'], types.Text()) elif column['type'] == 'varchar': return Column( column['name'], types.String(length=column['length']), ) elif column['type'] in TYPE_MAP: return Column( column['name'], TYPE_MAP[column['type']], ) else: return None
class AutoIncrementInteger(types.TypeDecorator): impl = types.INT count = 0 def process_bind_param(self, value, dialect): value = self.count self.count += 1 return value def process_result_value(self, value, dialect): return value MagicBigInt = types.BigInteger().with_variant(AutoIncrementInteger, 'sqlite') class JSONEncodedDict(types.TypeDecorator): """Represents an immutable structure as a json-encoded string""" impl = types.TEXT def process_bind_param(self, value, dialect): return json.dumps(value) if value else None def process_result_value(self, value, dialect): return json.loads(value) if value else None class CompressedBinary(types.TypeDecorator):
class Reply(object): pass class Analyze(object): pass metadata = sqlalchemy.MetaData() status = Table( "buffer_20100605", metadata, Column('id_autoinc', types.BigInteger(20), primary_key=True), Column('id', types.BigInteger(20)), Column('user', types.String(20)), Column('content', types.Text), Column('source', types.Text), Column('time', types.DateTime, default=datetime.now), #Column('isAnalyze', types.SmallInteger, default=False), #Column('isReplyAnalyze',types.SmallInteger, default=0), mysql_engine='InnoDB', mysql_charset='utf8') reply = Table("reply", metadata, Column('id', types.Integer, primary_key=True), Column('tweet_id', types.BigInteger(20)), Column('reply_text', types.Text),
class BaseEngineSpec: # pylint: disable=too-many-public-methods """Abstract class for database engine specific configurations""" engine = "base" # str as defined in sqlalchemy.engine.engine engine_aliases: Optional[Tuple[str]] = None engine_name: Optional[ str ] = None # used for user messages, overridden in child classes _date_trunc_functions: Dict[str, str] = {} _time_grain_expressions: Dict[Optional[str], str] = {} column_type_mappings: Tuple[ Tuple[ Pattern[str], Union[TypeEngine, Callable[[Match[str]], TypeEngine]], GenericDataType, ], ..., ] = ( ( re.compile(r"^smallint", re.IGNORECASE), types.SmallInteger(), GenericDataType.NUMERIC, ), ( re.compile(r"^int.*", re.IGNORECASE), types.Integer(), GenericDataType.NUMERIC, ), ( re.compile(r"^bigint", re.IGNORECASE), types.BigInteger(), GenericDataType.NUMERIC, ), ( re.compile(r"^decimal", re.IGNORECASE), types.Numeric(), GenericDataType.NUMERIC, ), ( re.compile(r"^numeric", re.IGNORECASE), types.Numeric(), GenericDataType.NUMERIC, ), (re.compile(r"^real", re.IGNORECASE), types.REAL, GenericDataType.NUMERIC,), ( re.compile(r"^smallserial", re.IGNORECASE), types.SmallInteger(), GenericDataType.NUMERIC, ), ( re.compile(r"^serial", re.IGNORECASE), types.Integer(), GenericDataType.NUMERIC, ), ( re.compile(r"^bigserial", re.IGNORECASE), types.BigInteger(), GenericDataType.NUMERIC, ), ( re.compile(r"^string", re.IGNORECASE), types.String(), utils.GenericDataType.STRING, ), ( re.compile(r"^N((VAR)?CHAR|TEXT)", re.IGNORECASE), UnicodeText(), utils.GenericDataType.STRING, ), ( re.compile(r"^((VAR)?CHAR|TEXT|STRING)", re.IGNORECASE), String(), utils.GenericDataType.STRING, ), ( re.compile(r"^datetime", re.IGNORECASE), types.DateTime(), GenericDataType.TEMPORAL, ), (re.compile(r"^date", re.IGNORECASE), types.Date(), GenericDataType.TEMPORAL,), ( re.compile(r"^timestamp", re.IGNORECASE), types.TIMESTAMP(), GenericDataType.TEMPORAL, ), ( re.compile(r"^interval", re.IGNORECASE), types.Interval(), GenericDataType.TEMPORAL, ), (re.compile(r"^time", re.IGNORECASE), types.Time(), GenericDataType.TEMPORAL,), ( re.compile(r"^bool.*", re.IGNORECASE), types.Boolean(), GenericDataType.BOOLEAN, ), ) time_groupby_inline = False limit_method = LimitMethod.FORCE_LIMIT time_secondary_columns = False allows_joins = True allows_subqueries = True allows_alias_in_select = True allows_alias_in_orderby = True allows_sql_comments = True force_column_alias_quotes = False arraysize = 0 max_column_name_length = 0 try_remove_schema_from_table_name = True # pylint: disable=invalid-name run_multiple_statements_as_one = False @classmethod def get_dbapi_exception_mapping(cls) -> Dict[Type[Exception], Type[Exception]]: """ Each engine can implement and converge its own specific exceptions into Superset DBAPI exceptions Note: On python 3.9 this method can be changed to a classmethod property without the need of implementing a metaclass type :return: A map of driver specific exception to superset custom exceptions """ return {} @classmethod def get_dbapi_mapped_exception(cls, exception: Exception) -> Exception: """ Get a superset custom DBAPI exception from the driver specific exception. Override if the engine needs to perform extra changes to the exception, for example change the exception message or implement custom more complex logic :param exception: The driver specific exception :return: Superset custom DBAPI exception """ new_exception = cls.get_dbapi_exception_mapping().get(type(exception)) if not new_exception: return exception return new_exception(str(exception)) @classmethod def get_allow_cost_estimate(cls, extra: Dict[str, Any]) -> bool: return False @classmethod def get_engine( cls, database: "Database", schema: Optional[str] = None, source: Optional[str] = None, ) -> Engine: user_name = utils.get_username() return database.get_sqla_engine( schema=schema, nullpool=True, user_name=user_name, source=source ) @classmethod def get_timestamp_expr( cls, col: ColumnClause, pdf: Optional[str], time_grain: Optional[str], type_: Optional[str] = None, ) -> TimestampExpression: """ Construct a TimestampExpression to be used in a SQLAlchemy query. :param col: Target column for the TimestampExpression :param pdf: date format (seconds or milliseconds) :param time_grain: time grain, e.g. P1Y for 1 year :param type_: the source column type :return: TimestampExpression object """ if time_grain: time_expr = cls.get_time_grain_expressions().get(time_grain) if not time_expr: raise NotImplementedError( f"No grain spec for {time_grain} for database {cls.engine}" ) if type_ and "{func}" in time_expr: date_trunc_function = cls._date_trunc_functions.get(type_) if date_trunc_function: time_expr = time_expr.replace("{func}", date_trunc_function) if type_ and "{type}" in time_expr: date_trunc_function = cls._date_trunc_functions.get(type_) if date_trunc_function: time_expr = time_expr.replace("{type}", type_) else: time_expr = "{col}" # if epoch, translate to DATE using db specific conf if pdf == "epoch_s": time_expr = time_expr.replace("{col}", cls.epoch_to_dttm()) elif pdf == "epoch_ms": time_expr = time_expr.replace("{col}", cls.epoch_ms_to_dttm()) return TimestampExpression(time_expr, col, type_=DateTime) @classmethod def get_time_grains(cls) -> Tuple[TimeGrain, ...]: """ Generate a tuple of supported time grains. :return: All time grains supported by the engine """ ret_list = [] time_grains = builtin_time_grains.copy() time_grains.update(config["TIME_GRAIN_ADDONS"]) for duration, func in cls.get_time_grain_expressions().items(): if duration in time_grains: name = time_grains[duration] ret_list.append(TimeGrain(name, _(name), func, duration)) return tuple(ret_list) @classmethod def _sort_time_grains( cls, val: Tuple[Optional[str], str], index: int ) -> Union[float, int, str]: """ Return an ordered time-based value of a portion of a time grain for sorting Values are expected to be either None or start with P or PT Have a numerical value in the middle and end with a value for the time interval It can also start or end with epoch start time denoting a range i.e, week beginning or ending with a day """ pos = { "FIRST": 0, "SECOND": 1, "THIRD": 2, "LAST": 3, } if val[0] is None: return pos["FIRST"] prog = re.compile(r"(.*\/)?(P|PT)([0-9\.]+)(S|M|H|D|W|M|Y)(\/.*)?") result = prog.match(val[0]) # for any time grains that don't match the format, put them at the end if result is None: return pos["LAST"] second_minute_hour = ["S", "M", "H"] day_week_month_year = ["D", "W", "M", "Y"] is_less_than_day = result.group(2) == "PT" interval = result.group(4) epoch_time_start_string = result.group(1) or result.group(5) has_starting_or_ending = bool(len(epoch_time_start_string or "")) def sort_day_week() -> int: if has_starting_or_ending: return pos["LAST"] if is_less_than_day: return pos["SECOND"] return pos["THIRD"] def sort_interval() -> float: if is_less_than_day: return second_minute_hour.index(interval) return day_week_month_year.index(interval) # 0: all "PT" values should come before "P" values (i.e, PT10M) # 1: order values within the above arrays ("D" before "W") # 2: sort by numeric value (PT10M before PT15M) # 3: sort by any week starting/ending values plist = { 0: sort_day_week(), 1: pos["SECOND"] if is_less_than_day else pos["THIRD"], 2: sort_interval(), 3: float(result.group(3)), } return plist.get(index, 0) @classmethod def get_time_grain_expressions(cls) -> Dict[Optional[str], str]: """ Return a dict of all supported time grains including any potential added grains but excluding any potentially disabled grains in the config file. :return: All time grain expressions supported by the engine """ # TODO: use @memoize decorator or similar to avoid recomputation on every call time_grain_expressions = cls._time_grain_expressions.copy() grain_addon_expressions = config["TIME_GRAIN_ADDON_EXPRESSIONS"] time_grain_expressions.update(grain_addon_expressions.get(cls.engine, {})) denylist: List[str] = config["TIME_GRAIN_DENYLIST"] for key in denylist: time_grain_expressions.pop(key) return dict( sorted( time_grain_expressions.items(), key=lambda x: ( cls._sort_time_grains(x, 0), cls._sort_time_grains(x, 1), cls._sort_time_grains(x, 2), cls._sort_time_grains(x, 3), ), ) ) @classmethod def make_select_compatible( cls, groupby_exprs: Dict[str, ColumnElement], select_exprs: List[ColumnElement] ) -> List[ColumnElement]: """ Some databases will just return the group-by field into the select, but don't allow the group-by field to be put into the select list. :param groupby_exprs: mapping between column name and column object :param select_exprs: all columns in the select clause :return: columns to be included in the final select clause """ return select_exprs @classmethod def fetch_data( cls, cursor: Any, limit: Optional[int] = None ) -> List[Tuple[Any, ...]]: """ :param cursor: Cursor instance :param limit: Maximum number of rows to be returned by the cursor :return: Result of query """ if cls.arraysize: cursor.arraysize = cls.arraysize try: if cls.limit_method == LimitMethod.FETCH_MANY and limit: return cursor.fetchmany(limit) return cursor.fetchall() except Exception as ex: raise cls.get_dbapi_mapped_exception(ex) @classmethod def expand_data( cls, columns: List[Dict[Any, Any]], data: List[Dict[Any, Any]] ) -> Tuple[List[Dict[Any, Any]], List[Dict[Any, Any]], List[Dict[Any, Any]]]: """ Some engines support expanding nested fields. See implementation in Presto spec for details. :param columns: columns selected in the query :param data: original data set :return: list of all columns(selected columns and their nested fields), expanded data set, listed of nested fields """ return columns, data, [] @classmethod def alter_new_orm_column(cls, orm_col: "TableColumn") -> None: """Allow altering default column attributes when first detected/added For instance special column like `__time` for Druid can be set to is_dttm=True. Note that this only gets called when new columns are detected/created""" # TODO: Fix circular import caused by importing TableColumn @classmethod def epoch_to_dttm(cls) -> str: """ SQL expression that converts epoch (seconds) to datetime that can be used in a query. The reference column should be denoted as `{col}` in the return expression, e.g. "FROM_UNIXTIME({col})" :return: SQL Expression """ raise NotImplementedError() @classmethod def epoch_ms_to_dttm(cls) -> str: """ SQL expression that converts epoch (milliseconds) to datetime that can be used in a query. :return: SQL Expression """ return cls.epoch_to_dttm().replace("{col}", "({col}/1000)") @classmethod def get_datatype(cls, type_code: Any) -> Optional[str]: """ Change column type code from cursor description to string representation. :param type_code: Type code from cursor description :return: String representation of type code """ if isinstance(type_code, str) and type_code != "": return type_code.upper() return None @classmethod def normalize_indexes(cls, indexes: List[Dict[str, Any]]) -> List[Dict[str, Any]]: """ Normalizes indexes for more consistency across db engines noop by default :param indexes: Raw indexes as returned by SQLAlchemy :return: cleaner, more aligned index definition """ return indexes @classmethod def extra_table_metadata( cls, database: "Database", table_name: str, schema_name: str ) -> Dict[str, Any]: """ Returns engine-specific table metadata :param database: Database instance :param table_name: Table name :param schema_name: Schema name :return: Engine-specific table metadata """ # TODO: Fix circular import caused by importing Database return {} @classmethod def apply_limit_to_sql(cls, sql: str, limit: int, database: "Database") -> str: """ Alters the SQL statement to apply a LIMIT clause :param sql: SQL query :param limit: Maximum number of rows to be returned by the query :param database: Database instance :return: SQL query with limit clause """ # TODO: Fix circular import caused by importing Database if cls.limit_method == LimitMethod.WRAP_SQL: sql = sql.strip("\t\n ;") qry = ( select("*") .select_from(TextAsFrom(text(sql), ["*"]).alias("inner_qry")) .limit(limit) ) return database.compile_sqla_query(qry) if cls.limit_method == LimitMethod.FORCE_LIMIT: parsed_query = sql_parse.ParsedQuery(sql) sql = parsed_query.set_or_update_query_limit(limit) return sql @classmethod def get_limit_from_sql(cls, sql: str) -> Optional[int]: """ Extract limit from SQL query :param sql: SQL query :return: Value of limit clause in query """ parsed_query = sql_parse.ParsedQuery(sql) return parsed_query.limit @classmethod def set_or_update_query_limit(cls, sql: str, limit: int) -> str: """ Create a query based on original query but with new limit clause :param sql: SQL query :param limit: New limit to insert/replace into query :return: Query with new limit """ parsed_query = sql_parse.ParsedQuery(sql) return parsed_query.set_or_update_query_limit(limit) @staticmethod def csv_to_df(**kwargs: Any) -> pd.DataFrame: """Read csv into Pandas DataFrame :param kwargs: params to be passed to DataFrame.read_csv :return: Pandas DataFrame containing data from csv """ kwargs["encoding"] = "utf-8" kwargs["iterator"] = True chunks = pd.read_csv(**kwargs) df = pd.concat(chunk for chunk in chunks) return df @classmethod def df_to_sql(cls, df: pd.DataFrame, **kwargs: Any) -> None: """Upload data from a Pandas DataFrame to a database. For regular engines this calls the DataFrame.to_sql() method. Can be overridden for engines that don't work well with to_sql(), e.g. BigQuery. :param df: Dataframe with data to be uploaded :param kwargs: kwargs to be passed to to_sql() method """ df.to_sql(**kwargs) @classmethod def create_table_from_csv( # pylint: disable=too-many-arguments cls, filename: str, table: Table, database: "Database", csv_to_df_kwargs: Dict[str, Any], df_to_sql_kwargs: Dict[str, Any], ) -> None: """ Create table from contents of a csv. Note: this method does not create metadata for the table. """ df = cls.csv_to_df(filepath_or_buffer=filename, **csv_to_df_kwargs) engine = cls.get_engine(database) if table.schema: # only add schema when it is preset and non empty df_to_sql_kwargs["schema"] = table.schema if engine.dialect.supports_multivalues_insert: df_to_sql_kwargs["method"] = "multi" cls.df_to_sql(df=df, con=engine, **df_to_sql_kwargs) @classmethod def convert_dttm(cls, target_type: str, dttm: datetime) -> Optional[str]: """ Convert Python datetime object to a SQL expression :param target_type: The target type of expression :param dttm: The datetime object :return: The SQL expression """ return None @classmethod def create_table_from_excel( # pylint: disable=too-many-arguments cls, filename: str, table: Table, database: "Database", excel_to_df_kwargs: Dict[str, Any], df_to_sql_kwargs: Dict[str, Any], ) -> None: """ Create table from contents of a excel. Note: this method does not create metadata for the table. """ df = pd.read_excel(io=filename, **excel_to_df_kwargs) engine = cls.get_engine(database) if table.schema: # only add schema when it is preset and non empty df_to_sql_kwargs["schema"] = table.schema if engine.dialect.supports_multivalues_insert: df_to_sql_kwargs["method"] = "multi" cls.df_to_sql(df=df, con=engine, **df_to_sql_kwargs) @classmethod def get_all_datasource_names( cls, database: "Database", datasource_type: str ) -> List[utils.DatasourceName]: """Returns a list of all tables or views in database. :param database: Database instance :param datasource_type: Datasource_type can be 'table' or 'view' :return: List of all datasources in database or schema """ # TODO: Fix circular import caused by importing Database schemas = database.get_all_schema_names( cache=database.schema_cache_enabled, cache_timeout=database.schema_cache_timeout, force=True, ) all_datasources: List[utils.DatasourceName] = [] for schema in schemas: if datasource_type == "table": all_datasources += database.get_all_table_names_in_schema( schema=schema, force=True, cache=database.table_cache_enabled, cache_timeout=database.table_cache_timeout, ) elif datasource_type == "view": all_datasources += database.get_all_view_names_in_schema( schema=schema, force=True, cache=database.table_cache_enabled, cache_timeout=database.table_cache_timeout, ) else: raise Exception(f"Unsupported datasource_type: {datasource_type}") return all_datasources @classmethod def handle_cursor(cls, cursor: Any, query: Query, session: Session) -> None: """Handle a live cursor between the execute and fetchall calls The flow works without this method doing anything, but it allows for handling the cursor and updating progress information in the query object""" # TODO: Fix circular import error caused by importing sql_lab.Query @classmethod def extract_error_message(cls, ex: Exception) -> str: return f"{cls.engine} error: {cls._extract_error_message(ex)}" @classmethod def _extract_error_message(cls, ex: Exception) -> str: """Extract error message for queries""" return utils.error_msg_from_exception(ex) @classmethod def extract_errors(cls, ex: Exception) -> List[Dict[str, Any]]: return [ dataclasses.asdict( SupersetError( error_type=SupersetErrorType.GENERIC_DB_ENGINE_ERROR, message=cls._extract_error_message(ex), level=ErrorLevel.ERROR, extra={"engine_name": cls.engine_name}, ) ) ] @classmethod def adjust_database_uri(cls, uri: URL, selected_schema: Optional[str]) -> None: """ Mutate the database component of the SQLAlchemy URI. The URI here represents the URI as entered when saving the database, ``selected_schema`` is the schema currently active presumably in the SQL Lab dropdown. Based on that, for some database engine, we can return a new altered URI that connects straight to the active schema, meaning the users won't have to prefix the object names by the schema name. Some databases engines have 2 level of namespacing: database and schema (postgres, oracle, mssql, ...) For those it's probably better to not alter the database component of the URI with the schema name, it won't work. Some database drivers like presto accept '{catalog}/{schema}' in the database component of the URL, that can be handled here. """ @classmethod def patch(cls) -> None: """ TODO: Improve docstring and refactor implementation in Hive """ @classmethod def get_schema_names(cls, inspector: Inspector) -> List[str]: """ Get all schemas from database :param inspector: SqlAlchemy inspector :return: All schemas in the database """ return sorted(inspector.get_schema_names()) @classmethod def get_table_names( cls, database: "Database", inspector: Inspector, schema: Optional[str] ) -> List[str]: """ Get all tables from schema :param inspector: SqlAlchemy inspector :param schema: Schema to inspect. If omitted, uses default schema for database :return: All tables in schema """ tables = inspector.get_table_names(schema) if schema and cls.try_remove_schema_from_table_name: tables = [re.sub(f"^{schema}\\.", "", table) for table in tables] return sorted(tables) @classmethod def get_view_names( cls, database: "Database", inspector: Inspector, schema: Optional[str] ) -> List[str]: """ Get all views from schema :param inspector: SqlAlchemy inspector :param schema: Schema name. If omitted, uses default schema for database :return: All views in schema """ views = inspector.get_view_names(schema) if schema and cls.try_remove_schema_from_table_name: views = [re.sub(f"^{schema}\\.", "", view) for view in views] return sorted(views) @classmethod def get_table_comment( cls, inspector: Inspector, table_name: str, schema: Optional[str] ) -> Optional[str]: """ Get comment of table from a given schema and table :param inspector: SqlAlchemy Inspector instance :param table_name: Table name :param schema: Schema name. If omitted, uses default schema for database :return: comment of table """ comment = None try: comment = inspector.get_table_comment(table_name, schema) comment = comment.get("text") if isinstance(comment, dict) else None except NotImplementedError: # It's expected that some dialects don't implement the comment method pass except Exception as ex: # pylint: disable=broad-except logger.error("Unexpected error while fetching table comment") logger.exception(ex) return comment @classmethod def get_columns( cls, inspector: Inspector, table_name: str, schema: Optional[str] ) -> List[Dict[str, Any]]: """ Get all columns from a given schema and table :param inspector: SqlAlchemy Inspector instance :param table_name: Table name :param schema: Schema name. If omitted, uses default schema for database :return: All columns in table """ return inspector.get_columns(table_name, schema) @classmethod def where_latest_partition( # pylint: disable=too-many-arguments cls, table_name: str, schema: Optional[str], database: "Database", query: Select, columns: Optional[List[Dict[str, str]]] = None, ) -> Optional[Select]: """ Add a where clause to a query to reference only the most recent partition :param table_name: Table name :param schema: Schema name :param database: Database instance :param query: SqlAlchemy query :param columns: List of TableColumns :return: SqlAlchemy query with additional where clause referencing latest partition """ # TODO: Fix circular import caused by importing Database, TableColumn return None @classmethod def _get_fields(cls, cols: List[Dict[str, Any]]) -> List[Any]: return [column(c["name"]) for c in cols] @classmethod def select_star( # pylint: disable=too-many-arguments,too-many-locals cls, database: "Database", table_name: str, engine: Engine, schema: Optional[str] = None, limit: int = 100, show_cols: bool = False, indent: bool = True, latest_partition: bool = True, cols: Optional[List[Dict[str, Any]]] = None, ) -> str: """ Generate a "SELECT * from [schema.]table_name" query with appropriate limit. WARNING: expects only unquoted table and schema names. :param database: Database instance :param table_name: Table name, unquoted :param engine: SqlALchemy Engine instance :param schema: Schema, unquoted :param limit: limit to impose on query :param show_cols: Show columns in query; otherwise use "*" :param indent: Add indentation to query :param latest_partition: Only query latest partition :param cols: Columns to include in query :return: SQL query """ fields: Union[str, List[Any]] = "*" cols = cols or [] if (show_cols or latest_partition) and not cols: cols = database.get_columns(table_name, schema) if show_cols: fields = cls._get_fields(cols) quote = engine.dialect.identifier_preparer.quote if schema: full_table_name = quote(schema) + "." + quote(table_name) else: full_table_name = quote(table_name) qry = select(fields).select_from(text(full_table_name)) if limit: qry = qry.limit(limit) if latest_partition: partition_query = cls.where_latest_partition( table_name, schema, database, qry, columns=cols ) if partition_query is not None: qry = partition_query sql = database.compile_sqla_query(qry) if indent: sql = sqlparse.format(sql, reindent=True) return sql @classmethod def estimate_statement_cost(cls, statement: str, cursor: Any,) -> Dict[str, Any]: """ Generate a SQL query that estimates the cost of a given statement. :param statement: A single SQL statement :param cursor: Cursor instance :return: Dictionary with different costs """ raise Exception("Database does not support cost estimation") @classmethod def query_cost_formatter( cls, raw_cost: List[Dict[str, Any]] ) -> List[Dict[str, str]]: """ Format cost estimate. :param raw_cost: Raw estimate from `estimate_query_cost` :return: Human readable cost estimate """ raise Exception("Database does not support cost estimation") @classmethod def process_statement( cls, statement: str, database: "Database", user_name: str ) -> str: """ Process a SQL statement by stripping and mutating it. :param statement: A single SQL statement :param database: Database instance :param username: Effective username :return: Dictionary with different costs """ parsed_query = ParsedQuery(statement) sql = parsed_query.stripped() sql_query_mutator = config["SQL_QUERY_MUTATOR"] if sql_query_mutator: sql = sql_query_mutator(sql, user_name, security_manager, database) return sql @classmethod def estimate_query_cost( cls, database: "Database", schema: str, sql: str, source: Optional[str] = None ) -> List[Dict[str, Any]]: """ Estimate the cost of a multiple statement SQL query. :param database: Database instance :param schema: Database schema :param sql: SQL query with possibly multiple statements :param source: Source of the query (eg, "sql_lab") """ extra = database.get_extra() or {} if not cls.get_allow_cost_estimate(extra): raise Exception("Database does not support cost estimation") user_name = g.user.username if g.user else None parsed_query = sql_parse.ParsedQuery(sql) statements = parsed_query.get_statements() engine = cls.get_engine(database, schema=schema, source=source) costs = [] with closing(engine.raw_connection()) as conn: cursor = conn.cursor() for statement in statements: processed_statement = cls.process_statement( statement, database, user_name ) costs.append(cls.estimate_statement_cost(processed_statement, cursor)) return costs @classmethod def modify_url_for_impersonation( cls, url: URL, impersonate_user: bool, username: Optional[str] ) -> None: """ Modify the SQL Alchemy URL object with the user to impersonate if applicable. :param url: SQLAlchemy URL object :param impersonate_user: Flag indicating if impersonation is enabled :param username: Effective username """ if impersonate_user and username is not None: url.username = username @classmethod def update_impersonation_config( cls, connect_args: Dict[str, Any], uri: str, username: Optional[str], ) -> None: """ Update a configuration dictionary that can set the correct properties for impersonating users :param connect_args: config to be updated :param uri: URI :param impersonate_user: Flag indicating if impersonation is enabled :param username: Effective username :return: None """ @classmethod def execute(cls, cursor: Any, query: str, **kwargs: Any) -> None: """ Execute a SQL query :param cursor: Cursor instance :param query: Query to execute :param kwargs: kwargs to be passed to cursor.execute() :return: """ if not cls.allows_sql_comments: query = sql_parse.strip_comments_from_sql(query) if cls.arraysize: cursor.arraysize = cls.arraysize try: cursor.execute(query) except Exception as ex: raise cls.get_dbapi_mapped_exception(ex) @classmethod def make_label_compatible(cls, label: str) -> Union[str, quoted_name]: """ Conditionally mutate and/or quote a sqlalchemy expression label. If force_column_alias_quotes is set to True, return the label as a sqlalchemy.sql.elements.quoted_name object to ensure that the select query and query results have same case. Otherwise return the mutated label as a regular string. If maxmimum supported column name length is exceeded, generate a truncated label by calling truncate_label(). :param label: expected expression label/alias :return: conditionally mutated label supported by the db engine """ label_mutated = cls._mutate_label(label) if ( cls.max_column_name_length and len(label_mutated) > cls.max_column_name_length ): label_mutated = cls._truncate_label(label) if cls.force_column_alias_quotes: label_mutated = quoted_name(label_mutated, True) return label_mutated @classmethod def get_sqla_column_type( cls, column_type: Optional[str], column_type_mappings: Tuple[ Tuple[ Pattern[str], Union[TypeEngine, Callable[[Match[str]], TypeEngine]], GenericDataType, ], ..., ] = column_type_mappings, ) -> Union[Tuple[TypeEngine, GenericDataType], None]: """ Return a sqlalchemy native column type that corresponds to the column type defined in the data source (return None to use default type inferred by SQLAlchemy). Override `column_type_mappings` for specific needs (see MSSQL for example of NCHAR/NVARCHAR handling). :param column_type: Column type returned by inspector :return: SqlAlchemy column type """ if not column_type: return None for regex, sqla_type, generic_type in column_type_mappings: match = regex.match(column_type) if match: if callable(sqla_type): return sqla_type(match), generic_type return sqla_type, generic_type return None @staticmethod def _mutate_label(label: str) -> str: """ Most engines support mixed case aliases that can include numbers and special characters, like commas, parentheses etc. For engines that have restrictions on what types of aliases are supported, this method can be overridden to ensure that labels conform to the engine's limitations. Mutated labels should be deterministic (input label A always yields output label X) and unique (input labels A and B don't yield the same output label X). :param label: Preferred expression label :return: Conditionally mutated label """ return label @classmethod def _truncate_label(cls, label: str) -> str: """ In the case that a label exceeds the max length supported by the engine, this method is used to construct a deterministic and unique label based on the original label. By default this returns an md5 hash of the original label, conditionally truncated if the length of the hash exceeds the max column length of the engine. :param label: Expected expression label :return: Truncated label """ label = hashlib.md5(label.encode("utf-8")).hexdigest() # truncate hash if it exceeds max length if cls.max_column_name_length and len(label) > cls.max_column_name_length: label = label[: cls.max_column_name_length] return label @classmethod def column_datatype_to_string( cls, sqla_column_type: TypeEngine, dialect: Dialect ) -> str: """ Convert sqlalchemy column type to string representation. By default removes collation and character encoding info to avoid unnecessarily long datatypes. :param sqla_column_type: SqlAlchemy column type :param dialect: Sqlalchemy dialect :return: Compiled column type """ sqla_column_type = sqla_column_type.copy() if hasattr(sqla_column_type, "collation"): sqla_column_type.collation = None if hasattr(sqla_column_type, "charset"): sqla_column_type.charset = None return sqla_column_type.compile(dialect=dialect).upper() @classmethod def get_function_names(cls, database: "Database") -> List[str]: """ Get a list of function names that are able to be called on the database. Used for SQL Lab autocomplete. :param database: The database to get functions for :return: A list of function names useable in the database """ return [] @staticmethod def pyodbc_rows_to_tuples(data: List[Any]) -> List[Tuple[Any, ...]]: """ Convert pyodbc.Row objects from `fetch_data` to tuples. :param data: List of tuples or pyodbc.Row objects :return: List of tuples """ if data and type(data[0]).__name__ == "Row": data = [tuple(row) for row in data] return data @staticmethod def mutate_db_for_connection_test(database: "Database") -> None: """ Some databases require passing additional parameters for validating database connections. This method makes it possible to mutate the database instance prior to testing if a connection is ok. :param database: instance to be mutated """ return None @staticmethod def get_extra_params(database: "Database") -> Dict[str, Any]: """ Some databases require adding elements to connection parameters, like passing certificates to `extra`. This can be done here. :param database: database instance from which to extract extras :raises CertificateException: If certificate is not valid/unparseable """ extra: Dict[str, Any] = {} if database.extra: try: extra = json.loads(database.extra) except json.JSONDecodeError as ex: logger.error(ex) raise ex return extra @classmethod def is_readonly_query(cls, parsed_query: ParsedQuery) -> bool: """Pessimistic readonly, 100% sure statement won't mutate anything""" return ( parsed_query.is_select() or parsed_query.is_explain() or parsed_query.is_show() ) @classmethod def get_column_spec( cls, native_type: Optional[str], source: utils.ColumnTypeSource = utils.ColumnTypeSource.GET_TABLE, column_type_mappings: Tuple[ Tuple[ Pattern[str], Union[TypeEngine, Callable[[Match[str]], TypeEngine]], GenericDataType, ], ..., ] = column_type_mappings, ) -> Union[ColumnSpec, None]: """ Converts native database type to sqlalchemy column type. :param native_type: Native database typee :param source: Type coming from the database table or cursor description :return: ColumnSpec object """ col_types = cls.get_sqla_column_type( native_type, column_type_mappings=column_type_mappings ) if col_types: column_type, generic_type = col_types # wrap temporal types in custom type that supports literal binding # using datetimes if generic_type == GenericDataType.TEMPORAL: column_type = literal_dttm_type_factory( type(column_type), cls, native_type or "" ) is_dttm = generic_type == GenericDataType.TEMPORAL return ColumnSpec( sqla_type=column_type, generic_type=generic_type, is_dttm=is_dttm ) return None
class cikUIK(al_base, Base): __tablename__ = 'cik_uik' id = Column(types.Integer(), primary_key=True) iz_id = Column(types.BigInteger(), index=True) parent_id = Column(types.Integer(), ForeignKey('cik_uik.id', ondelete="SET NULL"), index=True) type_ik = Column(types.String(10)) region = Column(types.String(50)) url = Column(types.String(255)) name = Column(types.String(255)) address = Column(types.Text()) phone = Column(types.String(100)) fax = Column(types.String(100)) email = Column(types.String(100)) end_date = Column(types.String(100)) #дата окончания полномочий # self.url = 'http://www.%s.vybory.izbirkom.ru/ik/%s' % (region, iz_id or '') @classmethod def add_or_update(cls, attrs): if 'iz_id' not in attrs: return None uik = Session.query(cls).filter(cls.iz_id == attrs['iz_id']).first() if not uik: uik = cls(iz_id=attrs['iz_id']) Session.add(uik) uik.set_attrs(attrs) return uik @property def local_path(self): return os.path.join('orig', self.region, self.type_ik, str(self.iz_id)) def normalize_attrs(self, attrs): norm_keys = { u'Адрес комиссии': 'address', u'Телефон': 'phone', u'Факс': 'fax', u'Адрес электронной почты': 'email', u'Срок окончания полномочий': 'end_date', } for (k, v) in norm_keys.items(): if k in attrs: attrs[v] = attrs.pop(k) return attrs def parse(self, update=True, recursion=False): logging.info('parse %s', self.name) data = down_data(self.url, self.local_path + '.htm').decode('cp1251') ehtml = lxml.html.fromstring(data) attrs = {} div_main = ehtml.xpath( '//div[@id="main"]/*/div[@class="center-colm"]')[0] try: attrs['name'] = div_main.xpath('h2')[0].text except: pass #аттрибуты комиссии for (k, v) in re.findall('<p><strong>(.*?): </strong>(.*?)</p>', lxml.html.tostring(div_main, encoding=unicode)): attrs[k] = v attrs = self.normalize_attrs(attrs) if update: self.set_attrs(attrs) #члены комиссии people_tbl = ehtml.xpath( '//div[@id="main"]/*/div[@class="center-colm"]//table')[0] for p in people_tbl.xpath('.//tr'): vals = [x.text_content().strip() for x in p.xpath('.//td')] if len(vals) > 0: people_attrs = dict( zip(('number', 'fio', 'post', 'party'), vals)) people_attrs['ik_id'] = self.id cikPeople.add_or_update(people_attrs) if recursion: self.search_childs(data, recursion=True) Session.commit() return attrs def search_childs(self, data, recursion=False): childs = [] if self.type_ik == 'ik': url = "http://www.vybory.izbirkom.ru/%s/ik_tree/" % (self.region, ) txt = down_data(url, self.local_path + '_childs.js').decode('cp1251') vals = simplejson.loads(txt)[0] self.set_attrs({ 'iz_id': vals.get('id', ''), }) for child in vals.get('children', []): childs.append({ 'url': 'http://www.vybory.izbirkom.ru' + child.get('a_attr', {}).get('href', ''), 'name': child.get('text', ''), 'iz_id': child.get('id', ''), 'region': self.region, 'parent_id': self.id, 'type_ik': 'tik' }) elif self.type_ik == 'tik': url = "http://www.vybory.izbirkom.ru/%s/ik_tree/?operation=get_children&id=%s" % ( self.region, self.iz_id) txt = down_data(url, self.local_path + '_childs.js').decode('cp1251') vals = simplejson.loads(txt) for child in vals: childs.append({ 'url': 'http://www.vybory.izbirkom.ru' + child.get('a_attr', {}).get('href', ''), 'name': child.get('text', ''), 'iz_id': child.get('id', ''), 'region': self.region, 'parent_id': self.id, 'type_ik': 'uik' }) for child in childs: uik = cikUIK.add_or_update(child) if uik and recursion: uik.parse(recursion=True) logging.debug(childs)
class VehicleOther(Base): __tablename__ = "vehicle_other" id = Column(types.Integer(), primary_key=True, doc="The primary key") advertising_cost = Column(types.BigInteger()) base_invoice = Column(types.BigInteger()) base_msrp = Column(types.BigInteger()) destination_charge = Column(types.BigInteger()) gas_guzzler_tax = Column(types.BigInteger()) list_price = Column(types.BigInteger()) misc_cost = Column(types.BigInteger()) options_invoice = Column(types.BigInteger()) options_msrp = Column(types.BigInteger()) package_discount = Column(types.BigInteger()) prep_cost = Column(types.BigInteger()) total_msrp = Column(types.BigInteger()) vehicle_invoice = Column(types.BigInteger()) vehicle_msrp = Column(types.BigInteger()) _vehicle_id = Column(types.Integer(), ForeignKey(Vehicle.id)) vehicle = orm.relationship(Vehicle, backref=orm.backref("other", uselist=False), uselist=False)
FieldTypeDict = { # basic info 'S_INFO_WINDCODE': st.VARCHAR(40), 'S_INFO_NAME': st.VARCHAR(40), 'S_INFO_LISTDATE': st.VARCHAR(8), 'S_INFO_DELISTDATE': st.VARCHAR(8), # share info 'ANN_DT': st.VARCHAR(8), 'NEXT_ANN_DT': st.VARCHAR(8), 'CHANGE_DT': st.VARCHAR(8), 'TOT_SHR': st.FLOAT, 'FLOAT_SHR': st.FLOAT, 'FLOAT_A_SHR': st.FLOAT, 'S_SHARE_TOTALA': st.FLOAT, # holders info 'TOT_HOLDERS': st.BigInteger(), 'PERS_HOLDERS': st.BigInteger(), 'INST_HOLDERS': st.BigInteger(), 'TOT_QUANTITY': st.BigInteger(), 'PERS_QUANTITY': st.BigInteger(), 'INST_QUANTITY': st.BigInteger(), # trade_info 'TRADE_DT': st.VARCHAR(8), 'S_DQ_OPEN': st.FLOAT, 'S_DQ_HIGH': st.FLOAT, 'S_DQ_LOW': st.FLOAT, 'S_DQ_CLOSE': st.FLOAT, 'S_DQ_VOLUME': st.FLOAT, 'S_DQ_AMOUNT': st.FLOAT, 'S_DQ_PCTCHANGE': st.FLOAT, 'S_DQ_TRADESTATUS': st.INTEGER,
from sqlalchemy import MetaData, Table, Column from sqlalchemy import types from sqlalchemy.schema import CreateTable, DropTable TYPE_MAP = { 'bigint': types.BigInteger(), 'boolean': types.Boolean(), 'date': types.DATE(), 'timestamp': types.TIMESTAMP(), 'datetime': types.TIMESTAMP(), 'double precision': types.FLOAT(), 'enum': types.String(length=256), 'guid': types.String(length=256), 'int': types.Integer(), 'integer': types.Integer(), 'text': types.Text(), } def ddl_from_json(schema_json): """ This function takes the schema definition in JSON format that's returned by the Canvas Data API and returns SQL DDL statements that can be used to create all of the tables necessary to hold the archived data. """ metadata = MetaData() create_ddl = [] drop_ddl = [] for artifact in schema_json: table_name = schema_json[artifact]['tableName'] json_columns = schema_json[artifact]['columns']
class PrestoEngineSpec(BaseEngineSpec): engine = "presto" engine_name = "Presto" _time_grain_expressions = { None: "{col}", "PT1S": "date_trunc('second', CAST({col} AS TIMESTAMP))", "PT1M": "date_trunc('minute', CAST({col} AS TIMESTAMP))", "PT1H": "date_trunc('hour', CAST({col} AS TIMESTAMP))", "P1D": "date_trunc('day', CAST({col} AS TIMESTAMP))", "P1W": "date_trunc('week', CAST({col} AS TIMESTAMP))", "P1M": "date_trunc('month', CAST({col} AS TIMESTAMP))", "P0.25Y": "date_trunc('quarter', CAST({col} AS TIMESTAMP))", "P1Y": "date_trunc('year', CAST({col} AS TIMESTAMP))", "P1W/1970-01-03T00:00:00Z": "date_add('day', 5, date_trunc('week', " "date_add('day', 1, CAST({col} AS TIMESTAMP))))", "1969-12-28T00:00:00Z/P1W": "date_add('day', -1, date_trunc('week', " "date_add('day', 1, CAST({col} AS TIMESTAMP))))", } @classmethod def get_allow_cost_estimate(cls, version: Optional[str] = None) -> bool: return version is not None and StrictVersion(version) >= StrictVersion( "0.319") @classmethod def get_table_names(cls, database: "Database", inspector: Inspector, schema: Optional[str]) -> List[str]: tables = super().get_table_names(database, inspector, schema) if not is_feature_enabled("PRESTO_SPLIT_VIEWS_FROM_TABLES"): return tables views = set(cls.get_view_names(database, inspector, schema)) actual_tables = set(tables) - views return list(actual_tables) @classmethod def get_view_names(cls, database: "Database", inspector: Inspector, schema: Optional[str]) -> List[str]: """Returns an empty list get_table_names() function returns all table names and view names, and get_view_names() is not implemented in sqlalchemy_presto.py https://github.com/dropbox/PyHive/blob/e25fc8440a0686bbb7a5db5de7cb1a77bdb4167a/pyhive/sqlalchemy_presto.py """ if not is_feature_enabled("PRESTO_SPLIT_VIEWS_FROM_TABLES"): return [] if schema: sql = ("SELECT table_name FROM information_schema.views " "WHERE table_schema=%(schema)s") params = {"schema": schema} else: sql = "SELECT table_name FROM information_schema.views" params = {} engine = cls.get_engine(database, schema=schema) with closing(engine.raw_connection()) as conn: with closing(conn.cursor()) as cursor: cursor.execute(sql, params) results = cursor.fetchall() return [row[0] for row in results] @classmethod def _create_column_info(cls, name: str, data_type: str) -> Dict[str, Any]: """ Create column info object :param name: column name :param data_type: column data type :return: column info object """ return {"name": name, "type": f"{data_type}"} @classmethod def _get_full_name(cls, names: List[Tuple[str, str]]) -> str: """ Get the full column name :param names: list of all individual column names :return: full column name """ return ".".join(column[0] for column in names if column[0]) @classmethod def _has_nested_data_types(cls, component_type: str) -> bool: """ Check if string contains a data type. We determine if there is a data type by whitespace or multiple data types by commas :param component_type: data type :return: boolean """ comma_regex = r",(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)" white_space_regex = r"\s(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)" return (re.search(comma_regex, component_type) is not None or re.search(white_space_regex, component_type) is not None) @classmethod def _split_data_type(cls, data_type: str, delimiter: str) -> List[str]: """ Split data type based on given delimiter. Do not split the string if the delimiter is enclosed in quotes :param data_type: data type :param delimiter: string separator (i.e. open parenthesis, closed parenthesis, comma, whitespace) :return: list of strings after breaking it by the delimiter """ return re.split( r"{}(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)".format(delimiter), data_type) @classmethod def _parse_structural_column( # pylint: disable=too-many-locals,too-many-branches cls, parent_column_name: str, parent_data_type: str, result: List[Dict[str, Any]], ) -> None: """ Parse a row or array column :param result: list tracking the results """ formatted_parent_column_name = parent_column_name # Quote the column name if there is a space if " " in parent_column_name: formatted_parent_column_name = f'"{parent_column_name}"' full_data_type = f"{formatted_parent_column_name} {parent_data_type}" original_result_len = len(result) # split on open parenthesis ( to get the structural # data type and its component types data_types = cls._split_data_type(full_data_type, r"\(") stack: List[Tuple[str, str]] = [] for data_type in data_types: # split on closed parenthesis ) to track which component # types belong to what structural data type inner_types = cls._split_data_type(data_type, r"\)") for inner_type in inner_types: # We have finished parsing multiple structural data types if not inner_type and stack: stack.pop() elif cls._has_nested_data_types(inner_type): # split on comma , to get individual data types single_fields = cls._split_data_type(inner_type, ",") for single_field in single_fields: single_field = single_field.strip() # If component type starts with a comma, the first single field # will be an empty string. Disregard this empty string. if not single_field: continue # split on whitespace to get field name and data type field_info = cls._split_data_type(single_field, r"\s") # check if there is a structural data type within # overall structural data type column_type = cls.get_sqla_column_type(field_info[1]) if column_type is None: raise NotImplementedError( _("Unknown column type: %(col)s", col=field_info[1])) if field_info[1] == "array" or field_info[1] == "row": stack.append((field_info[0], field_info[1])) full_parent_path = cls._get_full_name(stack) result.append( cls._create_column_info( full_parent_path, column_type)) else: # otherwise this field is a basic data type full_parent_path = cls._get_full_name(stack) column_name = "{}.{}".format( full_parent_path, field_info[0]) result.append( cls._create_column_info( column_name, column_type)) # If the component type ends with a structural data type, do not pop # the stack. We have run across a structural data type within the # overall structural data type. Otherwise, we have completely parsed # through the entire structural data type and can move on. if not (inner_type.endswith("array") or inner_type.endswith("row")): stack.pop() # We have an array of row objects (i.e. array(row(...))) elif inner_type in ("array", "row"): # Push a dummy object to represent the structural data type stack.append(("", inner_type)) # We have an array of a basic data types(i.e. array(varchar)). elif stack: # Because it is an array of a basic data type. We have finished # parsing the structural data type and can move on. stack.pop() # Unquote the column name if necessary if formatted_parent_column_name != parent_column_name: for index in range(original_result_len, len(result)): result[index]["name"] = result[index]["name"].replace( formatted_parent_column_name, parent_column_name) @classmethod def _show_columns(cls, inspector: Inspector, table_name: str, schema: Optional[str]) -> List[RowProxy]: """ Show presto column names :param inspector: object that performs database schema inspection :param table_name: table name :param schema: schema name :return: list of column objects """ quote = inspector.engine.dialect.identifier_preparer.quote_identifier full_table = quote(table_name) if schema: full_table = "{}.{}".format(quote(schema), full_table) columns = inspector.bind.execute( "SHOW COLUMNS FROM {}".format(full_table)) return columns column_type_mappings = ( (re.compile(r"^boolean.*", re.IGNORECASE), types.Boolean()), (re.compile(r"^tinyint.*", re.IGNORECASE), TinyInteger()), (re.compile(r"^smallint.*", re.IGNORECASE), types.SmallInteger()), (re.compile(r"^integer.*", re.IGNORECASE), types.Integer()), (re.compile(r"^bigint.*", re.IGNORECASE), types.BigInteger()), (re.compile(r"^real.*", re.IGNORECASE), types.Float()), (re.compile(r"^double.*", re.IGNORECASE), types.Float()), (re.compile(r"^decimal.*", re.IGNORECASE), types.DECIMAL()), ( re.compile(r"^varchar(\((\d+)\))*$", re.IGNORECASE), lambda match: types.VARCHAR(int(match[2])) if match[2] else types.String(), ), ( re.compile(r"^char(\((\d+)\))*$", re.IGNORECASE), lambda match: types.CHAR(int(match[2])) if match[2] else types.CHAR(), ), (re.compile(r"^varbinary.*", re.IGNORECASE), types.VARBINARY()), (re.compile(r"^json.*", re.IGNORECASE), types.JSON()), (re.compile(r"^date.*", re.IGNORECASE), types.DATE()), (re.compile(r"^time.*", re.IGNORECASE), types.Time()), (re.compile(r"^timestamp.*", re.IGNORECASE), types.TIMESTAMP()), (re.compile(r"^interval.*", re.IGNORECASE), Interval()), (re.compile(r"^array.*", re.IGNORECASE), Array()), (re.compile(r"^map.*", re.IGNORECASE), Map()), (re.compile(r"^row.*", re.IGNORECASE), Row()), ) @classmethod def get_columns(cls, inspector: Inspector, table_name: str, schema: Optional[str]) -> List[Dict[str, Any]]: """ Get columns from a Presto data source. This includes handling row and array data types :param inspector: object that performs database schema inspection :param table_name: table name :param schema: schema name :return: a list of results that contain column info (i.e. column name and data type) """ columns = cls._show_columns(inspector, table_name, schema) result: List[Dict[str, Any]] = [] for column in columns: # parse column if it is a row or array if is_feature_enabled("PRESTO_EXPAND_DATA") and ( "array" in column.Type or "row" in column.Type): structural_column_index = len(result) cls._parse_structural_column(column.Column, column.Type, result) result[structural_column_index]["nullable"] = getattr( column, "Null", True) result[structural_column_index]["default"] = None continue # otherwise column is a basic data type column_type = cls.get_sqla_column_type(column.Type) if column_type is None: raise NotImplementedError( _("Unknown column type: %(col)s", col=column_type)) column_info = cls._create_column_info(column.Column, column_type) column_info["nullable"] = getattr(column, "Null", True) column_info["default"] = None result.append(column_info) return result @classmethod def _is_column_name_quoted(cls, column_name: str) -> bool: """ Check if column name is in quotes :param column_name: column name :return: boolean """ return column_name.startswith('"') and column_name.endswith('"') @classmethod def _get_fields(cls, cols: List[Dict[str, Any]]) -> List[ColumnClause]: """ Format column clauses where names are in quotes and labels are specified :param cols: columns :return: column clauses """ column_clauses = [] # Column names are separated by periods. This regex will find periods in a # string if they are not enclosed in quotes because if a period is enclosed in # quotes, then that period is part of a column name. dot_pattern = r"""\. # split on period (?= # look ahead (?: # create non-capture group [^\"]*\"[^\"]*\" # two quotes )*[^\"]*$) # end regex""" dot_regex = re.compile(dot_pattern, re.VERBOSE) for col in cols: # get individual column names col_names = re.split(dot_regex, col["name"]) # quote each column name if it is not already quoted for index, col_name in enumerate(col_names): if not cls._is_column_name_quoted(col_name): col_names[index] = '"{}"'.format(col_name) quoted_col_name = ".".join( col_name if cls._is_column_name_quoted(col_name ) else f'"{col_name}"' for col_name in col_names) # create column clause in the format "name"."name" AS "name.name" column_clause = literal_column(quoted_col_name).label(col["name"]) column_clauses.append(column_clause) return column_clauses @classmethod def select_star( # pylint: disable=too-many-arguments cls, database: "Database", table_name: str, engine: Engine, schema: Optional[str] = None, limit: int = 100, show_cols: bool = False, indent: bool = True, latest_partition: bool = True, cols: Optional[List[Dict[str, Any]]] = None, ) -> str: """ Include selecting properties of row objects. We cannot easily break arrays into rows, so render the whole array in its own row and skip columns that correspond to an array's contents. """ cols = cols or [] presto_cols = cols if is_feature_enabled("PRESTO_EXPAND_DATA") and show_cols: dot_regex = r"\.(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)" presto_cols = [ col for col in presto_cols if not re.search(dot_regex, col["name"]) ] return super().select_star( database, table_name, engine, schema, limit, show_cols, indent, latest_partition, presto_cols, ) @classmethod def estimate_statement_cost( # pylint: disable=too-many-locals cls, statement: str, database: "Database", cursor: Any, user_name: str) -> Dict[str, Any]: """ Run a SQL query that estimates the cost of a given statement. :param statement: A single SQL statement :param database: Database instance :param cursor: Cursor instance :param username: Effective username :return: JSON response from Presto """ parsed_query = ParsedQuery(statement) sql = parsed_query.stripped() sql_query_mutator = config["SQL_QUERY_MUTATOR"] if sql_query_mutator: sql = sql_query_mutator(sql, user_name, security_manager, database) sql = f"EXPLAIN (TYPE IO, FORMAT JSON) {sql}" cursor.execute(sql) # the output from Presto is a single column and a single row containing # JSON: # # { # ... # "estimate" : { # "outputRowCount" : 8.73265878E8, # "outputSizeInBytes" : 3.41425774958E11, # "cpuCost" : 3.41425774958E11, # "maxMemory" : 0.0, # "networkCost" : 3.41425774958E11 # } # } result = json.loads(cursor.fetchone()[0]) return result @classmethod def query_cost_formatter( cls, raw_cost: List[Dict[str, Any]]) -> List[Dict[str, str]]: """ Format cost estimate. :param raw_cost: JSON estimate from Presto :return: Human readable cost estimate """ def humanize(value: Any, suffix: str) -> str: try: value = int(value) except ValueError: return str(value) prefixes = ["K", "M", "G", "T", "P", "E", "Z", "Y"] prefix = "" to_next_prefix = 1000 while value > to_next_prefix and prefixes: prefix = prefixes.pop(0) value //= to_next_prefix return f"{value} {prefix}{suffix}" cost = [] columns = [ ("outputRowCount", "Output count", " rows"), ("outputSizeInBytes", "Output size", "B"), ("cpuCost", "CPU cost", ""), ("maxMemory", "Max memory", "B"), ("networkCost", "Network cost", ""), ] for row in raw_cost: estimate: Dict[str, float] = row.get("estimate", {}) statement_cost = {} for key, label, suffix in columns: if key in estimate: statement_cost[label] = humanize(estimate[key], suffix).strip() cost.append(statement_cost) return cost @classmethod def adjust_database_uri(cls, uri: URL, selected_schema: Optional[str] = None) -> None: database = uri.database if selected_schema and database: selected_schema = parse.quote(selected_schema, safe="") if "/" in database: database = database.split("/")[0] + "/" + selected_schema else: database += "/" + selected_schema uri.database = database @classmethod def convert_dttm(cls, target_type: str, dttm: datetime) -> Optional[str]: tt = target_type.upper() if tt == utils.TemporalType.DATE: return f"""from_iso8601_date('{dttm.date().isoformat()}')""" if tt == utils.TemporalType.TIMESTAMP: return f"""from_iso8601_timestamp('{dttm.isoformat(timespec="microseconds")}')""" # pylint: disable=line-too-long return None @classmethod def epoch_to_dttm(cls) -> str: return "from_unixtime({col})" @classmethod def get_all_datasource_names( cls, database: "Database", datasource_type: str) -> List[utils.DatasourceName]: datasource_df = database.get_df( "SELECT table_schema, table_name FROM INFORMATION_SCHEMA.{}S " "ORDER BY concat(table_schema, '.', table_name)".format( datasource_type.upper()), None, ) datasource_names: List[utils.DatasourceName] = [] for _unused, row in datasource_df.iterrows(): datasource_names.append( utils.DatasourceName(schema=row["table_schema"], table=row["table_name"])) return datasource_names @classmethod def expand_data( # pylint: disable=too-many-locals,too-many-branches cls, columns: List[Dict[Any, Any]], data: List[Dict[Any, Any]]) -> Tuple[List[Dict[Any, Any]], List[Dict[ Any, Any]], List[Dict[Any, Any]]]: """ We do not immediately display rows and arrays clearly in the data grid. This method separates out nested fields and data values to help clearly display structural columns. Example: ColumnA is a row(nested_obj varchar) and ColumnB is an array(int) Original data set = [ {'ColumnA': ['a1'], 'ColumnB': [1, 2]}, {'ColumnA': ['a2'], 'ColumnB': [3, 4]}, ] Expanded data set = [ {'ColumnA': ['a1'], 'ColumnA.nested_obj': 'a1', 'ColumnB': 1}, {'ColumnA': '', 'ColumnA.nested_obj': '', 'ColumnB': 2}, {'ColumnA': ['a2'], 'ColumnA.nested_obj': 'a2', 'ColumnB': 3}, {'ColumnA': '', 'ColumnA.nested_obj': '', 'ColumnB': 4}, ] :param columns: columns selected in the query :param data: original data set :return: list of all columns(selected columns and their nested fields), expanded data set, listed of nested fields """ if not is_feature_enabled("PRESTO_EXPAND_DATA"): return columns, data, [] # process each column, unnesting ARRAY types and # expanding ROW types into new columns to_process = deque((column, 0) for column in columns) all_columns: List[Dict[str, Any]] = [] expanded_columns = [] current_array_level = None while to_process: column, level = to_process.popleft() if column["name"] not in [ column["name"] for column in all_columns ]: all_columns.append(column) # When unnesting arrays we need to keep track of how many extra rows # were added, for each original row. This is necessary when we expand # multiple arrays, so that the arrays after the first reuse the rows # added by the first. every time we change a level in the nested arrays # we reinitialize this. if level != current_array_level: unnested_rows: Dict[int, int] = defaultdict(int) current_array_level = level name = column["name"] values: Optional[Union[str, List[Any]]] if column["type"].startswith("ARRAY("): # keep processing array children; we append to the right so that # multiple nested arrays are processed breadth-first to_process.append((get_children(column)[0], level + 1)) # unnest array objects data into new rows i = 0 while i < len(data): row = data[i] values = row.get(name) if isinstance(values, str): row[name] = values = destringify(values) if values: # how many extra rows we need to unnest the data? extra_rows = len(values) - 1 # how many rows were already added for this row? current_unnested_rows = unnested_rows[i] # add any necessary rows missing = extra_rows - current_unnested_rows for _ in range(missing): data.insert(i + current_unnested_rows + 1, {}) unnested_rows[i] += 1 # unnest array into rows for j, value in enumerate(values): data[i + j][name] = value # skip newly unnested rows i += unnested_rows[i] i += 1 if column["type"].startswith("ROW("): # expand columns; we append them to the left so they are added # immediately after the parent expanded = get_children(column) to_process.extendleft( (column, level) for column in expanded[::-1]) expanded_columns.extend(expanded) # expand row objects into new columns for row in data: values = row.get(name) or [] if isinstance(values, str): row[name] = values = cast(List[Any], destringify(values)) for value, col in zip(values, expanded): row[col["name"]] = value data = [{k["name"]: row.get(k["name"], "") for k in all_columns} for row in data] return all_columns, data, expanded_columns @classmethod def extra_table_metadata(cls, database: "Database", table_name: str, schema_name: str) -> Dict[str, Any]: metadata = {} indexes = database.get_indexes(table_name, schema_name) if indexes: cols = indexes[0].get("column_names", []) full_table_name = table_name if schema_name and "." not in table_name: full_table_name = "{}.{}".format(schema_name, table_name) pql = cls._partition_query(full_table_name, database) col_names, latest_parts = cls.latest_partition(table_name, schema_name, database, show_first=True) if not latest_parts: latest_parts = tuple([None] * len(col_names)) # type: ignore metadata["partitions"] = { "cols": cols, "latest": dict(zip(col_names, latest_parts)), # type: ignore "partitionQuery": pql, } # flake8 is not matching `Optional[str]` to `Any` for some reason... metadata["view"] = cast( Any, cls.get_create_view(database, schema_name, table_name)) return metadata @classmethod def get_create_view(cls, database: "Database", schema: str, table: str) -> Optional[str]: """ Return a CREATE VIEW statement, or `None` if not a view. :param database: Database instance :param schema: Schema name :param table: Table (view) name """ from pyhive.exc import DatabaseError engine = cls.get_engine(database, schema) with closing(engine.raw_connection()) as conn: with closing(conn.cursor()) as cursor: sql = f"SHOW CREATE VIEW {schema}.{table}" try: cls.execute(cursor, sql) polled = cursor.poll() while polled: time.sleep(0.2) polled = cursor.poll() except DatabaseError: # not a VIEW return None rows = cls.fetch_data(cursor, 1) return rows[0][0] @classmethod def handle_cursor(cls, cursor: Any, query: Query, session: Session) -> None: """Updates progress information""" query_id = query.id poll_interval = query.database.connect_args.get( "poll_interval", config["PRESTO_POLL_INTERVAL"]) logger.info("Query %i: Polling the cursor for progress", query_id) polled = cursor.poll() # poll returns dict -- JSON status information or ``None`` # if the query is done # https://github.com/dropbox/PyHive/blob/ # b34bdbf51378b3979eaf5eca9e956f06ddc36ca0/pyhive/presto.py#L178 while polled: # Update the object and wait for the kill signal. stats = polled.get("stats", {}) query = session.query(type(query)).filter_by(id=query_id).one() if query.status in [QueryStatus.STOPPED, QueryStatus.TIMED_OUT]: cursor.cancel() break if stats: state = stats.get("state") # if already finished, then stop polling if state == "FINISHED": break completed_splits = float(stats.get("completedSplits")) total_splits = float(stats.get("totalSplits")) if total_splits and completed_splits: progress = 100 * (completed_splits / total_splits) logger.info("Query {} progress: {} / {} " # pylint: disable=logging-format-interpolation "splits".format(query_id, completed_splits, total_splits)) if progress > query.progress: query.progress = progress session.commit() time.sleep(poll_interval) logger.info("Query %i: Polling the cursor for progress", query_id) polled = cursor.poll() @classmethod def _extract_error_message(cls, ex: Exception) -> str: if (hasattr(ex, "orig") and type(ex.orig).__name__ == "DatabaseError" # type: ignore and isinstance(ex.orig[0], dict) # type: ignore ): error_dict = ex.orig[0] # type: ignore return "{} at {}: {}".format( error_dict.get("errorName"), error_dict.get("errorLocation"), error_dict.get("message"), ) if type(ex).__name__ == "DatabaseError" and hasattr( ex, "args") and ex.args: error_dict = ex.args[0] return error_dict.get("message", _("Unknown Presto Error")) return utils.error_msg_from_exception(ex) @classmethod def _partition_query( # pylint: disable=too-many-arguments,too-many-locals cls, table_name: str, database: "Database", limit: int = 0, order_by: Optional[List[Tuple[str, bool]]] = None, filters: Optional[Dict[Any, Any]] = None, ) -> str: """Returns a partition query :param table_name: the name of the table to get partitions from :type table_name: str :param limit: the number of partitions to be returned :type limit: int :param order_by: a list of tuples of field name and a boolean that determines if that field should be sorted in descending order :type order_by: list of (str, bool) tuples :param filters: dict of field name and filter value combinations """ limit_clause = "LIMIT {}".format(limit) if limit else "" order_by_clause = "" if order_by: l = [] for field, desc in order_by: l.append(field + " DESC" if desc else "") order_by_clause = "ORDER BY " + ", ".join(l) where_clause = "" if filters: l = [] for field, value in filters.items(): l.append(f"{field} = '{value}'") where_clause = "WHERE " + " AND ".join(l) presto_version = database.get_extra().get("version") # Partition select syntax changed in v0.199, so check here. # Default to the new syntax if version is unset. partition_select_clause = ( f'SELECT * FROM "{table_name}$partitions"' if not presto_version or StrictVersion(presto_version) >= StrictVersion("0.199") else f"SHOW PARTITIONS FROM {table_name}") sql = textwrap.dedent(f"""\ {partition_select_clause} {where_clause} {order_by_clause} {limit_clause} """) return sql @classmethod def where_latest_partition( # pylint: disable=too-many-arguments cls, table_name: str, schema: Optional[str], database: "Database", query: Select, columns: Optional[List[Dict[str, str]]] = None, ) -> Optional[Select]: try: col_names, values = cls.latest_partition(table_name, schema, database, show_first=True) except Exception: # pylint: disable=broad-except # table is not partitioned return None if values is None: return None column_names = {column.get("name") for column in columns or []} for col_name, value in zip(col_names, values): if col_name in column_names: query = query.where(Column(col_name) == value) return query @classmethod def _latest_partition_from_df(cls, df: pd.DataFrame) -> Optional[List[str]]: if not df.empty: return df.to_records(index=False)[0].item() return None @classmethod def latest_partition( cls, table_name: str, schema: Optional[str], database: "Database", show_first: bool = False, ) -> Tuple[List[str], Optional[List[str]]]: """Returns col name and the latest (max) partition value for a table :param table_name: the name of the table :param schema: schema / database / namespace :param database: database query will be run against :type database: models.Database :param show_first: displays the value for the first partitioning key if there are many partitioning keys :type show_first: bool >>> latest_partition('foo_table') (['ds'], ('2018-01-01',)) """ indexes = database.get_indexes(table_name, schema) if not indexes: raise SupersetTemplateException( f"Error getting partition for {schema}.{table_name}. " "Verify that this table has a partition.") if len(indexes[0]["column_names"]) < 1: raise SupersetTemplateException( "The table should have one partitioned field") if not show_first and len(indexes[0]["column_names"]) > 1: raise SupersetTemplateException( "The table should have a single partitioned field " "to use this function. You may want to use " "`presto.latest_sub_partition`") column_names = indexes[0]["column_names"] part_fields = [(column_name, True) for column_name in column_names] sql = cls._partition_query(table_name, database, 1, part_fields) df = database.get_df(sql, schema) return column_names, cls._latest_partition_from_df(df) @classmethod def latest_sub_partition(cls, table_name: str, schema: Optional[str], database: "Database", **kwargs: Any) -> Any: """Returns the latest (max) partition value for a table A filtering criteria should be passed for all fields that are partitioned except for the field to be returned. For example, if a table is partitioned by (``ds``, ``event_type`` and ``event_category``) and you want the latest ``ds``, you'll want to provide a filter as keyword arguments for both ``event_type`` and ``event_category`` as in ``latest_sub_partition('my_table', event_category='page', event_type='click')`` :param table_name: the name of the table, can be just the table name or a fully qualified table name as ``schema_name.table_name`` :type table_name: str :param schema: schema / database / namespace :type schema: str :param database: database query will be run against :type database: models.Database :param kwargs: keyword arguments define the filtering criteria on the partition list. There can be many of these. :type kwargs: str >>> latest_sub_partition('sub_partition_table', event_type='click') '2018-01-01' """ indexes = database.get_indexes(table_name, schema) part_fields = indexes[0]["column_names"] for k in kwargs.keys(): # pylint: disable=consider-iterating-dictionary if k not in k in part_fields: # pylint: disable=comparison-with-itself msg = "Field [{k}] is not part of the portioning key" raise SupersetTemplateException(msg) if len(kwargs.keys()) != len(part_fields) - 1: msg = ("A filter needs to be specified for {} out of the " "{} fields.").format( len(part_fields) - 1, len(part_fields)) raise SupersetTemplateException(msg) for field in part_fields: if field not in kwargs.keys(): field_to_return = field sql = cls._partition_query(table_name, database, 1, [(field_to_return, True)], kwargs) df = database.get_df(sql, schema) if df.empty: return "" return df.to_dict()[field_to_return][0] @classmethod @cache.memoize() def get_function_names(cls, database: "Database") -> List[str]: """ Get a list of function names that are able to be called on the database. Used for SQL Lab autocomplete. :param database: The database to get functions for :return: A list of function names useable in the database """ return database.get_df("SHOW FUNCTIONS")["Function"].tolist()
def test_should_big_integer_convert_int(): assert_column_conversion(types.BigInteger(), graphene.Int)
mysql_charset='utf8') collocation = Table("collocation", metadata, Column('id', types.Integer, primary_key=True), Column('a', types.Unicode(32)), Column('b', types.Unicode(32)), Column('colloc_count', types.Integer, default=1), Column('sentence_count', types.Integer, default=1), mysql_engine='InnoDB', mysql_charset='utf8') reply = Table("reply", metadata, Column('id', types.Integer, primary_key=True), Column('tweet_id', types.BigInteger(20)), Column('reply_text', types.Text), Column('src_id', types.BigInteger(20)), Column('src_text', types.Text), Column('is_analyze', types.SmallInteger, default=False), mysql_engine='InnoDB', mysql_charset='utf8') def startSession(conf): global init config = { "sqlalchemy.url": "mysql://" + conf["dbuser"] + ":" + conf["dbpass"] + "@" + conf["dbhost"] + "/" + conf["db"] + "?charset=utf8", "sqlalchemy.echo":
from sqlalchemy import types from abstract_db_interface import AbstractDBInterface # Mapping from python types to sqlalchemy types. _type_map = { 'bool': types.Boolean(), 'char': types.SmallInteger(unsigned=True), 'int8': types.SmallInteger(), 'uint8': types.SmallInteger(unsigned=True), 'byte': types.SmallInteger(), 'int16': types.Integer(), 'uint16': types.Integer(unsigned=True), 'int32': types.Integer(), 'uint32': types.Integer(unsigned=True), 'int64': types.BigInteger(), 'uint64': types.BigInteger(unsigned=True), 'float32': types.Float(precision=32), 'float64': types.Float(precision=64), 'string': types.Text(), } _suffix_for_array_table = '_data_' _suffix_for_builtin = '_value_' _types_table_name = 'message_types' # TODO: remove all occurences of roslib.msgs def getListItem(l, i):
"""샘플쿼리 모듈 :filename: - query.py :modified: - 2017.08.24 :note: - 이 모듈에서는 자주사용하는 샘플쿼리를 미리 정의함 """ '''모듈 불러오기''' from sqlalchemy import types #ALCHEMY for engine '''쿼리 인스턴스 임포트''' #기본상품정보조회 #컬럼 데이터타입 basic_col = \ {'ISIN_NO' :types.NVARCHAR(length=50), 'STD_DATE' :types.DateTime(), 'FIRST_AMT' :types.BigInteger(), 'REMAIN_AMT' :types.BigInteger(), 'EFF_DATE' :types.DateTime(), 'MAT_DATE' :types.DateTime(), 'PRSV_RATE' :types.Float()} #쿼리문 basic_sql = \ ( "select ISIN_NO," " to_date(STD_DATE,'yyyymmdd') STD_DATE, " " FIRST_AMT, REMAIN_AMT, " " EFF_DATE, MAT_DATE, PRSV_RATE " "from " "( " " select tblLATEST.ISIN_NO, " #ISIN번호 " greatest(tblLATEST.STND_DATE, nvl(tblREFUND.STND_DATE,0)) STD_DATE, " #처리일자 " tblLATEST.FIRST_AMT/1000000 FIRST_AMT, " #최초발행금액 (백만원)
import sqlalchemy from sqlalchemy.orm import scoped_session, sessionmaker, mapper from sqlalchemy import MetaData from sqlalchemy import Column, MetaData, Table, types from datetime import datetime class Message(object): pass metadata = sqlalchemy.MetaData() message = Table("message", metadata, Column('id', types.BigInteger(20), primary_key=True), Column('parent_id', types.BigInteger(20)), Column('incident_id', types.Integer(11)), Column('user_id', types.Integer(11)), Column('reporter_id', types.BigInteger(20)), Column('service_messageid', types.Unicode(100)), Column('message_from', types.Unicode(100)), Column('message_to', types.Unicode(100)), Column('message', types.Unicode), Column('message_detail', types.Unicode), Column('message_type', types.SmallInteger(4)), Column('message_date', types.DateTime), Column('message_level', types.SmallInteger(4)), Column('type', types.Integer(4)), mysql_engine='InnoDB', mysql_charset='utf8')
def test_should_big_integer_convert_int(): assert get_field(types.BigInteger()).type == graphene.Float
from sqlalchemy.engine import default from sqlalchemy.sql import compiler from sqlalchemy.sql import expression from sqlalchemy.sql import operators from sqlalchemy.sql.expression import BindParameter from sqlalchemy_solr.solr_type_compiler import SolrTypeCompiler from sqlalchemy_solr.solrdbapi.array import ARRAY logging.basicConfig(format="%(asctime)s - %(message)s", level=logging.ERROR) _type_map = { "binary": types.LargeBinary(), "boolean": types.Boolean(), "pdate": types.DateTime(), "pint": types.Integer(), "plong": types.BigInteger(), "pfloat": types.Float(), "pdouble": types.REAL(), "string": types.VARCHAR(), "text_general": types.Text(), "booleans": ARRAY(types.BOOLEAN()), "pints": ARRAY(types.Integer()), "plongs": ARRAY(types.BigInteger()), "pfloats": ARRAY(types.Float()), "pdoubles": ARRAY(types.REAL()), "strings": ARRAY(types.VARCHAR()), } class SolrCompiler(compiler.SQLCompiler):