def _fixed_lookup_fixture(self): return [ (sqltypes.String(), sqltypes.VARCHAR()), (sqltypes.String(1), sqltypes.VARCHAR(1)), (sqltypes.String(3), sqltypes.VARCHAR(3)), (sqltypes.Text(), sqltypes.TEXT()), (sqltypes.Unicode(), sqltypes.VARCHAR()), (sqltypes.Unicode(1), sqltypes.VARCHAR(1)), (sqltypes.UnicodeText(), sqltypes.TEXT()), (sqltypes.CHAR(3), sqltypes.CHAR(3)), (sqltypes.NUMERIC, sqltypes.NUMERIC()), (sqltypes.NUMERIC(10, 2), sqltypes.NUMERIC(10, 2)), (sqltypes.Numeric, sqltypes.NUMERIC()), (sqltypes.Numeric(10, 2), sqltypes.NUMERIC(10, 2)), (sqltypes.DECIMAL, sqltypes.DECIMAL()), (sqltypes.DECIMAL(10, 2), sqltypes.DECIMAL(10, 2)), (sqltypes.INTEGER, sqltypes.INTEGER()), (sqltypes.BIGINT, sqltypes.BIGINT()), (sqltypes.Float, sqltypes.FLOAT()), (sqltypes.TIMESTAMP, sqltypes.TIMESTAMP()), (sqltypes.DATETIME, sqltypes.DATETIME()), (sqltypes.DateTime, sqltypes.DATETIME()), (sqltypes.DateTime(), sqltypes.DATETIME()), (sqltypes.DATE, sqltypes.DATE()), (sqltypes.Date, sqltypes.DATE()), (sqltypes.TIME, sqltypes.TIME()), (sqltypes.Time, sqltypes.TIME()), (sqltypes.BOOLEAN, sqltypes.BOOLEAN()), (sqltypes.Boolean, sqltypes.BOOLEAN()), ]
class CrawlJob(EntityModel): __tablename__ = 'crawl_job' flight_date = Column(dbt.DATE, nullable=False) airline = Column(dbt.CHAR(32), nullable=False) origin = Column(dbt.CHAR(3), nullable=False) destination = Column(dbt.CHAR(3), nullable=False) period = Column(dbt.Integer, default=24 * 60 * 60) next_run_after = Column(dbt.DATETIME, nullable=False, default=0) _unique_cols = ("flight_date", "airline", "origin", "destination") __table_args__ = (UniqueConstraint(*_unique_cols, name='unique_job'), ) def __str__(self): return "%s %s-%s/%s every %ss" % ( self.flight_date, self.origin, self.destination, self.airline, self.period, ) @classmethod def upsert(cls, **data): # TODO testme id_ = cls.add(**data) if id_ is not None: return id_ filter_cond = { k: v for k, v in data.iteritems() if k in set(cls._unique_cols) } matches = cls.query.filter_by(**filter_cond) data.pop("id", None) n_effected = matches.update(data) assert n_effected == 1 return matches[0].id def to_dict(self): return { "id": self.id, "flight_date": self.flight_date.strftime("%Y-%m-%d"), "airline": self.airline, "origin": self.origin, "destination": self.destination, "period": self.period, "next_run_after": self.next_run_after.strftime("%Y-%m-%d %H:%M:%S"), } @classmethod def get_jobs(cls, at=datetime.now()): if at is None: return cls.query.all() return cls.query.filter(cls.next_run_after <= at).all()
class Web2PyBoolean(types.TypeDecorator): impl = types.CHAR(1) python_type = bool # From the `docs <https://docs.sqlalchemy.org/en/14/core/custom_types.html#sqlalchemy.types.TypeDecorator.cache_ok>`_: "The requirements for cacheable elements is that they are hashable and also that they indicate the same SQL rendered for expressions using this type every time for a given cache value." cache_ok = True def process_bind_param(self, value, dialect): if value: return "T" elif value is None: return None elif not value: return "F" else: assert False, f"{value} is not T or F" def process_result_value(self, value, dialect): if value == "T": return True elif value == "F": return False elif value is None: return None else: assert False, f"{value} is not T or F" def copy(self, **kw): return Web2PyBoolean(self.impl.length)
def load_dialect_impl(self, dialect): if dialect.name == 'mysql': return dialect.type_descriptor(mysql.MSBinary(16)) elif dialect.name in ('postgres', 'postgresql'): return dialect.type_descriptor(postgresql.PGUuid()) else: return dialect.type_descriptor(types.CHAR(self.impl.length))
def load_dialect_impl(self, dialect): if dialect.name == 'mysql': return dialect.type_descriptor(mysql.MSBinary) if dialect.name == 'postgresql': return dialect.type_descriptor(postgres.UUID()) else: return dialect.type_descriptor(types.CHAR(32))
class TrackOld(Base): __tablename__ = 'track_old' id = Column("id", Integer, primary_key=True, autoincrement=True) date = Column("date", types.TIMESTAMP(timezone=False)) trkptnum = Column("trkptnum", Integer) distance = Column("distance", types.Numeric(11, 4)) timespan = Column("timespan", types.Interval) gencpoly_pts = Column("gencpoly_pts", types.UnicodeText) gencpoly_levels = Column("gencpoly_levels", types.UnicodeText) color = Column("color", types.CHAR(6), default='FF0000') maxlat = Column("maxlat", types.Numeric(9, 7)) maxlon = Column("maxlon", types.Numeric(10, 7)) minlat = Column("minlat", types.Numeric(9, 7)) minlon = Column("minlon", types.Numeric(10, 7)) json_0002 = Column("json_0002", Text) def __init__(self, date, trkptnum, distance, timespan, gencpoly_pts, gencpoly_levels, color, maxlat, maxlon, minlat, minlon, json_0002): self.date = date self.trkptnum = trkptnum self.distance = distance self.timespan = timespan self.gencpoly_pts = gencpoly_pts self.gencpoly_levels = gencpoly_levels self.color = color self.maxlat = maxlat self.maxlon = maxlon self.minlat = minlat self.minlon = minlon self.json_0002 = json_0002
class Web2PyBoolean(types.TypeDecorator): impl = types.CHAR(1) def process_bind_param(self, value, dialect): if value: return 'T' elif value is None: return None elif not value: return 'F' else: assert False def process_result_value(self, value, dialect): if value == 'T': return True elif value == 'F': return False elif value is None: return None else: assert False def copy(self, **kw): return Web2PyBoolean(self.impl.length)
def get_columns(self, connection, table_name, schema=None, **kw): if table_name is None: return [] columns = [] rows = self._get_columns(connection, table_name=table_name, schema=schema, **kw) table_name = self.denormalize_name(table_name) for row in rows: (colname, coltype, length, precision, scale, nullable, default, identity, is_distribution_key) = \ (row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8]) # FIXME: Missing type support: INTERVAL DAY [(p)] TO SECOND [(fp)], INTERVAL YEAR[(p)] TO MONTH # remove ASCII, UTF8 and spaces from char-like types coltype = re.sub(r'ASCII|UTF8| ', '', coltype) # remove precision and scale addition from numeric types coltype = re.sub(r'\(\d+(\,\d+)?\)', '', coltype) try: if coltype == 'VARCHAR': coltype = sqltypes.VARCHAR(length) elif coltype == 'CHAR': coltype = sqltypes.CHAR(length) elif coltype == 'DECIMAL': # this Dialect forces INTTYPESINRESULTSIFPOSSIBLE=y on ODBC level # thus, we need to convert DECIMAL(<=18,0) back to INTEGER type # and DECIMAL(36,0) back to BIGINT type if scale == 0 and precision <= 18: coltype = sqltypes.INTEGER() elif scale == 0 and precision == 36: coltype = sqltypes.BIGINT() else: coltype = sqltypes.DECIMAL(precision, scale) else: coltype = self.ischema_names[coltype] except KeyError: util.warn("Did not recognize type '%s' of column '%s'" % (coltype, colname)) coltype = sqltypes.NULLTYPE cdict = { 'name': self.normalize_name(colname), 'type': coltype, 'nullable': nullable, 'default': default, 'is_distribution_key': is_distribution_key } if identity: identity = int(identity) # if we have a positive identity value add a sequence if identity is not None and identity >= 0: cdict['sequence'] = {'name': ''} # TODO: we have to possibility to encode the current identity value count # into the column metadata. But the consequence is that it would also be used # as start value in CREATE statements. For now the current value is ignored. # Add it by changing the dict to: {'name':'', 'start': int(identity)} columns.append(cdict) return columns
def test_no_convert_unicode(self): """test no utf-8 encoding occurs""" dialect = sqlite.dialect() for t in ( String(convert_unicode=True), sqltypes.CHAR(convert_unicode=True), sqltypes.Unicode(), sqltypes.UnicodeText(), String(convert_unicode=True), sqltypes.CHAR(convert_unicode=True), sqltypes.Unicode(), sqltypes.UnicodeText(), ): bindproc = t.dialect_impl(dialect).bind_processor(dialect) assert not bindproc or \ isinstance(bindproc(util.u('some string')), util.text_type)
def load_dialect_impl(self, dialect): if dialect.name == 'postgresql': # Use the native UUID type. return dialect.type_descriptor(postgresql.UUID()) else: # Fallback to either a BINARY or a CHAR. kind = self.impl if self.binary else types.CHAR(32) return dialect.type_descriptor(kind)
class User(Base): __tablename__ = 'users' # я уже и забыл, какой всё-таки это закат солнца вручную, эта ваша алхимия id = Column(Integer, primary_key=True) email = Column(String(100)) first_name = Column(String(50)) last_name = Column(String(50)) gender = Column(types.CHAR(1)) birth_date = Column(Integer)
def load_dialect_impl(self, dialect): if self.native and dialect.name in ('postgresql', 'cockroachdb'): # Use the native UUID type. return dialect.type_descriptor(postgresql.UUID()) if dialect.name == 'mssql' and self.native: # Use the native UNIQUEIDENTIFIER type. return dialect.type_descriptor(mssql.UNIQUEIDENTIFIER()) else: # Fallback to either a BINARY or a CHAR. kind = self.impl if self.binary else types.CHAR(32) return dialect.type_descriptor(kind)
def test_basic_reflection(self): meta = self.metadata users = Table( "engine_users", meta, Column("user_id", types.INT, primary_key=True), Column("user_name", types.VARCHAR(20), nullable=False), Column("test1", types.CHAR(5), nullable=False), Column("test2", types.Float(5), nullable=False), Column("test2.5", types.Float(), nullable=False), Column("test3", types.Text()), Column("test4", types.Numeric, nullable=False), Column("test4.5", types.Numeric(10, 2), nullable=False), Column("test5", types.DateTime), Column( "parent_user_id", types.Integer, ForeignKey("engine_users.user_id"), ), Column("test6", types.DateTime, nullable=False), Column("test7", types.Text()), Column("test8", types.LargeBinary()), Column("test_passivedefault2", types.Integer, server_default="5"), Column("test9", types.BINARY(100)), Column("test_numeric", types.Numeric()), ) addresses = Table( "engine_email_addresses", meta, Column("address_id", types.Integer, primary_key=True), Column("remote_user_id", types.Integer, ForeignKey(users.c.user_id)), Column("email_address", types.String(20)), ) meta.create_all() meta2 = MetaData() reflected_users = Table("engine_users", meta2, autoload=True, autoload_with=testing.db) reflected_addresses = Table( "engine_email_addresses", meta2, autoload=True, autoload_with=testing.db, ) self.assert_tables_equal(users, reflected_users) self.assert_tables_equal(addresses, reflected_addresses)
def _fixed_lookup_fixture(self): return [ (sqltypes.String(), sqltypes.VARCHAR()), (sqltypes.String(1), sqltypes.VARCHAR(1)), (sqltypes.String(3), sqltypes.VARCHAR(3)), (sqltypes.Text(), sqltypes.TEXT()), (sqltypes.Unicode(), sqltypes.VARCHAR()), (sqltypes.Unicode(1), sqltypes.VARCHAR(1)), (sqltypes.UnicodeText(), sqltypes.TEXT()), (sqltypes.CHAR(3), sqltypes.CHAR(3)), (sqltypes.NUMERIC, sqltypes.NUMERIC()), (sqltypes.NUMERIC(10, 2), sqltypes.NUMERIC(10, 2)), (sqltypes.Numeric, sqltypes.NUMERIC()), (sqltypes.Numeric(10, 2), sqltypes.NUMERIC(10, 2)), (sqltypes.DECIMAL, sqltypes.DECIMAL()), (sqltypes.DECIMAL(10, 2), sqltypes.DECIMAL(10, 2)), (sqltypes.INTEGER, sqltypes.INTEGER()), (sqltypes.BIGINT, sqltypes.BIGINT()), (sqltypes.Float, sqltypes.FLOAT()), (sqltypes.TIMESTAMP, sqltypes.TIMESTAMP()), (sqltypes.DATETIME, sqltypes.DATETIME()), (sqltypes.DateTime, sqltypes.DATETIME()), (sqltypes.DateTime(), sqltypes.DATETIME()), (sqltypes.DATE, sqltypes.DATE()), (sqltypes.Date, sqltypes.DATE()), (sqltypes.TIME, sqltypes.TIME()), (sqltypes.Time, sqltypes.TIME()), (sqltypes.BOOLEAN, sqltypes.BOOLEAN()), (sqltypes.Boolean, sqltypes.BOOLEAN()), (sqlite.DATE(storage_format="%(year)04d%(month)02d%(day)02d", ), sqltypes.DATE()), (sqlite.TIME( storage_format="%(hour)02d%(minute)02d%(second)02d", ), sqltypes.TIME()), (sqlite.DATETIME(storage_format="%(year)04d%(month)02d%(day)02d" "%(hour)02d%(minute)02d%(second)02d", ), sqltypes.DATETIME()), ]
class Enum34(types.TypeDecorator): impl = types.CHAR(20) def __init__(self, enum_class, *args, **kwargs): super(Enum34, self).__init__(*args, **kwargs) self.enum_class = enum_class def process_bind_param(self, value, dialect): if value is None: return None if value not in self.enum_class: raise ValueError("'%s' is not a valid enum value" % repr(value)) return value.value def process_result_value(self, value, dialect): if value is not None: return self.enum_class(value) return None
def test_basic_reflection(self): meta = self.metadata users = Table( 'engine_users', meta, Column('user_id', types.INT, primary_key=True), Column('user_name', types.VARCHAR(20), nullable=False), Column('test1', types.CHAR(5), nullable=False), Column('test2', types.Float(5), nullable=False), Column('test3', types.Text()), Column('test4', types.Numeric, nullable=False), Column('test5', types.DateTime), Column('parent_user_id', types.Integer, ForeignKey('engine_users.user_id')), Column('test6', types.DateTime, nullable=False), Column('test7', types.Text()), Column('test8', types.LargeBinary()), Column('test_passivedefault2', types.Integer, server_default='5'), Column('test9', types.BINARY(100)), Column('test_numeric', types.Numeric()), ) addresses = Table( 'engine_email_addresses', meta, Column('address_id', types.Integer, primary_key=True), Column('remote_user_id', types.Integer, ForeignKey(users.c.user_id)), Column('email_address', types.String(20)), ) meta.create_all() meta2 = MetaData() reflected_users = Table('engine_users', meta2, autoload=True, autoload_with=testing.db) reflected_addresses = Table('engine_email_addresses', meta2, autoload=True, autoload_with=testing.db) self.assert_tables_equal(users, reflected_users) self.assert_tables_equal(addresses, reflected_addresses)
class User(BaseModel): __tablename__ = 'Users' id = sqlalchemy.Column(UUID(), primary_key=True, default=uuid.uuid4) email = sqlalchemy.Column(types.Text, unique=True, index=True, nullable=False) password = sqlalchemy.Column(types.CHAR(256)) name = sqlalchemy.Column(types.Text) surname = sqlalchemy.Column(types.Text) permissions = relationship(Permission, backref='user') attachments = relationship(UserAttachments) tickets = relationship(Ticket, backref='user', foreign_keys=[Ticket.user_id]) data = sqlalchemy.Column(types.JSON, default={}) confirmation_id = sqlalchemy.Column(UUID(), index=True, default=uuid.uuid4) confirmed = sqlalchemy.Column(types.BOOLEAN, server_default='f') def to_protobuf(self, with_permissions=True, permission_type='') -> user_pb2.User: return user_pb2.User( name=self.name, surname=self.surname, email=self.email, id=str(self.id), data=self.data, permissions=[ permission.to_protobuf() for permission in self.permissions ] if with_permissions else [], permission_type=permission_type, confirmed=self.confirmed, )
def setUpClass(cls): col1 = { 'name': 'col1', 'type': types.VARCHAR(length=11), 'nullable': True, 'default': None } col2 = { 'name': 'col2', 'type': types.DATE(), 'nullable': False, 'default': None } col3 = { 'name': 'col3', 'type': types.INTEGER(), 'nullable': True, 'default': None } col4 = { 'name': 'col4', 'type': types.FLOAT(), 'nullable': True, 'default': None } col5 = { 'name': 'col5', 'type': types.CHAR(length=10, collation='Latin1_General_CI_AS'), 'nullable': True, 'default': None } cols = [col1, col2, col3, col4, col5] cls.info = I2FVG.Info(name='tbl_prova', unique=['pk1', 'pk2'], keys=[], foreign=[], columns=cols)
class UUID(TypeDecorator): """Platform-independent GUID type. Uses Postgresql's UUID type, otherwise uses CHAR(36) """ impl = types.CHAR(36) def load_dialect_impl(self, dialect): if dialect.name == 'postgresql': return dialect.type_descriptor(pUUID()) else: return dialect.type_descriptor(types.CHAR(36)) def process_bind_param(self, value, dialect): if value is None: return value return str(value).lower() def process_result_value(self, value, dialect): if value is None: return value return value.lower()
'DateTimeField': simple(types.DateTime), 'DecimalField': lambda x: types.Numeric(scale=x.decimal_places, precision=x.max_digits), 'FileField': varchar, 'FilePathField': varchar, 'FloatField': simple(types.Float), 'IntegerField': simple(types.Integer), 'BigIntegerField': simple(types.BigInteger), 'IPAddressField': lambda field: types.CHAR(length=15), 'NullBooleanField': simple(types.Boolean), 'OneToOneField': foreign_key, 'ForeignKey': foreign_key, 'PositiveIntegerField': simple(types.Integer), 'PositiveSmallIntegerField': simple(types.SmallInteger), 'SlugField': varchar, 'SmallIntegerField': simple(types.SmallInteger), 'TextField':
def load_dialect_impl(self, dialect): if dialect.name == 'postgresql': from sqlalchemy.dialects.postgresql import UUID as _UUID return dialect.type_descriptor(_UUID(as_uuid=True)) else: return dialect.type_descriptor(types.CHAR(32))
- requires_quotes: indicates if the data type should be quoted - sqlalchemy_type: and instance of `sqlalchemy.types.TypeEngine <https://docs-sqlalchemy.readthedocs.io/ko/latest/core/type_api.html#sqlalchemy.types.TypeEngine>`__ lineage. """ name: str requires_quotes: Optional[bool] = True sqlalchemy_type: Optional[Any] = None def __repr__(self) -> str: return self.name # TODO:break these out into meaninful data types quoted_types = ( ( "CHAR", types.CHAR(length=1, ), ), ( "DATE", types.DATE(), ), ( "DATETIME", types.DATETIME(), ), ( "JSON", types.JSON(), ), ( "TIME",
def load_dialect_impl(self, dialect: Any) -> Any: if dialect.name == "postgresql": return dialect.type_descriptor(postgresql.UUID()) else: return dialect.type_descriptor(types.CHAR(32))
def load_dialect_impl(self, dialect): if dialect.name == 'postgresql': return dialect.type_descriptor(postgresql.UUID()) return dialect.type_descriptor(types.CHAR(36))
"BigAutoField": simple(types.BigInteger), "BooleanField": simple(types.Boolean), "CharField": varchar, "CommaSeparatedIntegerField": varchar, "DateField": simple(types.Date), "DateTimeField": simple(types.DateTime), "DecimalField": lambda x: types.Numeric( scale=x.decimal_places, precision=x.max_digits ), "DurationField": simple(types.Interval), "FileField": varchar, "FilePathField": varchar, "FloatField": simple(types.Float), "IntegerField": simple(types.Integer), "BigIntegerField": simple(types.BigInteger), "IPAddressField": lambda field: types.CHAR(length=15), "NullBooleanField": simple(types.Boolean), "OneToOneField": foreign_key, "ForeignKey": foreign_key, "PositiveIntegerField": simple(types.Integer), "PositiveSmallIntegerField": simple(types.SmallInteger), "SlugField": varchar, "SmallIntegerField": simple(types.SmallInteger), "TextField": simple(types.Text), "TimeField": simple(types.Time), } # Update with dialect specific data types DATA_TYPES["ArrayField"] = lambda field: postgres.array_type(DATA_TYPES, field) DATA_TYPES["UUIDField"] = simple(sqlalchemy.dialects.postgresql.UUID)
def load_dialect_impl(self, dialect): if dialect.name == 'postgresql': assert pg_UUID is not None return dialect.type_descriptor(pg_UUID()) else: return dialect.type_descriptor(types.CHAR(32))
class PrestoEngineSpec(BaseEngineSpec): engine = "presto" engine_name = "Presto" _time_grain_expressions = { None: "{col}", "PT1S": "date_trunc('second', CAST({col} AS TIMESTAMP))", "PT1M": "date_trunc('minute', CAST({col} AS TIMESTAMP))", "PT1H": "date_trunc('hour', CAST({col} AS TIMESTAMP))", "P1D": "date_trunc('day', CAST({col} AS TIMESTAMP))", "P1W": "date_trunc('week', CAST({col} AS TIMESTAMP))", "P1M": "date_trunc('month', CAST({col} AS TIMESTAMP))", "P0.25Y": "date_trunc('quarter', CAST({col} AS TIMESTAMP))", "P1Y": "date_trunc('year', CAST({col} AS TIMESTAMP))", "P1W/1970-01-03T00:00:00Z": "date_add('day', 5, date_trunc('week', " "date_add('day', 1, CAST({col} AS TIMESTAMP))))", "1969-12-28T00:00:00Z/P1W": "date_add('day', -1, date_trunc('week', " "date_add('day', 1, CAST({col} AS TIMESTAMP))))", } @classmethod def get_allow_cost_estimate(cls, version: Optional[str] = None) -> bool: return version is not None and StrictVersion(version) >= StrictVersion( "0.319") @classmethod def get_table_names(cls, database: "Database", inspector: Inspector, schema: Optional[str]) -> List[str]: tables = super().get_table_names(database, inspector, schema) if not is_feature_enabled("PRESTO_SPLIT_VIEWS_FROM_TABLES"): return tables views = set(cls.get_view_names(database, inspector, schema)) actual_tables = set(tables) - views return list(actual_tables) @classmethod def get_view_names(cls, database: "Database", inspector: Inspector, schema: Optional[str]) -> List[str]: """Returns an empty list get_table_names() function returns all table names and view names, and get_view_names() is not implemented in sqlalchemy_presto.py https://github.com/dropbox/PyHive/blob/e25fc8440a0686bbb7a5db5de7cb1a77bdb4167a/pyhive/sqlalchemy_presto.py """ if not is_feature_enabled("PRESTO_SPLIT_VIEWS_FROM_TABLES"): return [] if schema: sql = ("SELECT table_name FROM information_schema.views " "WHERE table_schema=%(schema)s") params = {"schema": schema} else: sql = "SELECT table_name FROM information_schema.views" params = {} engine = cls.get_engine(database, schema=schema) with closing(engine.raw_connection()) as conn: with closing(conn.cursor()) as cursor: cursor.execute(sql, params) results = cursor.fetchall() return [row[0] for row in results] @classmethod def _create_column_info(cls, name: str, data_type: str) -> Dict[str, Any]: """ Create column info object :param name: column name :param data_type: column data type :return: column info object """ return {"name": name, "type": f"{data_type}"} @classmethod def _get_full_name(cls, names: List[Tuple[str, str]]) -> str: """ Get the full column name :param names: list of all individual column names :return: full column name """ return ".".join(column[0] for column in names if column[0]) @classmethod def _has_nested_data_types(cls, component_type: str) -> bool: """ Check if string contains a data type. We determine if there is a data type by whitespace or multiple data types by commas :param component_type: data type :return: boolean """ comma_regex = r",(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)" white_space_regex = r"\s(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)" return (re.search(comma_regex, component_type) is not None or re.search(white_space_regex, component_type) is not None) @classmethod def _split_data_type(cls, data_type: str, delimiter: str) -> List[str]: """ Split data type based on given delimiter. Do not split the string if the delimiter is enclosed in quotes :param data_type: data type :param delimiter: string separator (i.e. open parenthesis, closed parenthesis, comma, whitespace) :return: list of strings after breaking it by the delimiter """ return re.split( r"{}(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)".format(delimiter), data_type) @classmethod def _parse_structural_column( # pylint: disable=too-many-locals,too-many-branches cls, parent_column_name: str, parent_data_type: str, result: List[Dict[str, Any]], ) -> None: """ Parse a row or array column :param result: list tracking the results """ formatted_parent_column_name = parent_column_name # Quote the column name if there is a space if " " in parent_column_name: formatted_parent_column_name = f'"{parent_column_name}"' full_data_type = f"{formatted_parent_column_name} {parent_data_type}" original_result_len = len(result) # split on open parenthesis ( to get the structural # data type and its component types data_types = cls._split_data_type(full_data_type, r"\(") stack: List[Tuple[str, str]] = [] for data_type in data_types: # split on closed parenthesis ) to track which component # types belong to what structural data type inner_types = cls._split_data_type(data_type, r"\)") for inner_type in inner_types: # We have finished parsing multiple structural data types if not inner_type and stack: stack.pop() elif cls._has_nested_data_types(inner_type): # split on comma , to get individual data types single_fields = cls._split_data_type(inner_type, ",") for single_field in single_fields: single_field = single_field.strip() # If component type starts with a comma, the first single field # will be an empty string. Disregard this empty string. if not single_field: continue # split on whitespace to get field name and data type field_info = cls._split_data_type(single_field, r"\s") # check if there is a structural data type within # overall structural data type column_type = cls.get_sqla_column_type(field_info[1]) if column_type is None: raise NotImplementedError( _("Unknown column type: %(col)s", col=field_info[1])) if field_info[1] == "array" or field_info[1] == "row": stack.append((field_info[0], field_info[1])) full_parent_path = cls._get_full_name(stack) result.append( cls._create_column_info( full_parent_path, column_type)) else: # otherwise this field is a basic data type full_parent_path = cls._get_full_name(stack) column_name = "{}.{}".format( full_parent_path, field_info[0]) result.append( cls._create_column_info( column_name, column_type)) # If the component type ends with a structural data type, do not pop # the stack. We have run across a structural data type within the # overall structural data type. Otherwise, we have completely parsed # through the entire structural data type and can move on. if not (inner_type.endswith("array") or inner_type.endswith("row")): stack.pop() # We have an array of row objects (i.e. array(row(...))) elif inner_type in ("array", "row"): # Push a dummy object to represent the structural data type stack.append(("", inner_type)) # We have an array of a basic data types(i.e. array(varchar)). elif stack: # Because it is an array of a basic data type. We have finished # parsing the structural data type and can move on. stack.pop() # Unquote the column name if necessary if formatted_parent_column_name != parent_column_name: for index in range(original_result_len, len(result)): result[index]["name"] = result[index]["name"].replace( formatted_parent_column_name, parent_column_name) @classmethod def _show_columns(cls, inspector: Inspector, table_name: str, schema: Optional[str]) -> List[RowProxy]: """ Show presto column names :param inspector: object that performs database schema inspection :param table_name: table name :param schema: schema name :return: list of column objects """ quote = inspector.engine.dialect.identifier_preparer.quote_identifier full_table = quote(table_name) if schema: full_table = "{}.{}".format(quote(schema), full_table) columns = inspector.bind.execute( "SHOW COLUMNS FROM {}".format(full_table)) return columns column_type_mappings = ( (re.compile(r"^boolean.*", re.IGNORECASE), types.Boolean()), (re.compile(r"^tinyint.*", re.IGNORECASE), TinyInteger()), (re.compile(r"^smallint.*", re.IGNORECASE), types.SmallInteger()), (re.compile(r"^integer.*", re.IGNORECASE), types.Integer()), (re.compile(r"^bigint.*", re.IGNORECASE), types.BigInteger()), (re.compile(r"^real.*", re.IGNORECASE), types.Float()), (re.compile(r"^double.*", re.IGNORECASE), types.Float()), (re.compile(r"^decimal.*", re.IGNORECASE), types.DECIMAL()), ( re.compile(r"^varchar(\((\d+)\))*$", re.IGNORECASE), lambda match: types.VARCHAR(int(match[2])) if match[2] else types.String(), ), ( re.compile(r"^char(\((\d+)\))*$", re.IGNORECASE), lambda match: types.CHAR(int(match[2])) if match[2] else types.CHAR(), ), (re.compile(r"^varbinary.*", re.IGNORECASE), types.VARBINARY()), (re.compile(r"^json.*", re.IGNORECASE), types.JSON()), (re.compile(r"^date.*", re.IGNORECASE), types.DATE()), (re.compile(r"^time.*", re.IGNORECASE), types.Time()), (re.compile(r"^timestamp.*", re.IGNORECASE), types.TIMESTAMP()), (re.compile(r"^interval.*", re.IGNORECASE), Interval()), (re.compile(r"^array.*", re.IGNORECASE), Array()), (re.compile(r"^map.*", re.IGNORECASE), Map()), (re.compile(r"^row.*", re.IGNORECASE), Row()), ) @classmethod def get_columns(cls, inspector: Inspector, table_name: str, schema: Optional[str]) -> List[Dict[str, Any]]: """ Get columns from a Presto data source. This includes handling row and array data types :param inspector: object that performs database schema inspection :param table_name: table name :param schema: schema name :return: a list of results that contain column info (i.e. column name and data type) """ columns = cls._show_columns(inspector, table_name, schema) result: List[Dict[str, Any]] = [] for column in columns: # parse column if it is a row or array if is_feature_enabled("PRESTO_EXPAND_DATA") and ( "array" in column.Type or "row" in column.Type): structural_column_index = len(result) cls._parse_structural_column(column.Column, column.Type, result) result[structural_column_index]["nullable"] = getattr( column, "Null", True) result[structural_column_index]["default"] = None continue # otherwise column is a basic data type column_type = cls.get_sqla_column_type(column.Type) if column_type is None: raise NotImplementedError( _("Unknown column type: %(col)s", col=column_type)) column_info = cls._create_column_info(column.Column, column_type) column_info["nullable"] = getattr(column, "Null", True) column_info["default"] = None result.append(column_info) return result @classmethod def _is_column_name_quoted(cls, column_name: str) -> bool: """ Check if column name is in quotes :param column_name: column name :return: boolean """ return column_name.startswith('"') and column_name.endswith('"') @classmethod def _get_fields(cls, cols: List[Dict[str, Any]]) -> List[ColumnClause]: """ Format column clauses where names are in quotes and labels are specified :param cols: columns :return: column clauses """ column_clauses = [] # Column names are separated by periods. This regex will find periods in a # string if they are not enclosed in quotes because if a period is enclosed in # quotes, then that period is part of a column name. dot_pattern = r"""\. # split on period (?= # look ahead (?: # create non-capture group [^\"]*\"[^\"]*\" # two quotes )*[^\"]*$) # end regex""" dot_regex = re.compile(dot_pattern, re.VERBOSE) for col in cols: # get individual column names col_names = re.split(dot_regex, col["name"]) # quote each column name if it is not already quoted for index, col_name in enumerate(col_names): if not cls._is_column_name_quoted(col_name): col_names[index] = '"{}"'.format(col_name) quoted_col_name = ".".join( col_name if cls._is_column_name_quoted(col_name ) else f'"{col_name}"' for col_name in col_names) # create column clause in the format "name"."name" AS "name.name" column_clause = literal_column(quoted_col_name).label(col["name"]) column_clauses.append(column_clause) return column_clauses @classmethod def select_star( # pylint: disable=too-many-arguments cls, database: "Database", table_name: str, engine: Engine, schema: Optional[str] = None, limit: int = 100, show_cols: bool = False, indent: bool = True, latest_partition: bool = True, cols: Optional[List[Dict[str, Any]]] = None, ) -> str: """ Include selecting properties of row objects. We cannot easily break arrays into rows, so render the whole array in its own row and skip columns that correspond to an array's contents. """ cols = cols or [] presto_cols = cols if is_feature_enabled("PRESTO_EXPAND_DATA") and show_cols: dot_regex = r"\.(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)" presto_cols = [ col for col in presto_cols if not re.search(dot_regex, col["name"]) ] return super().select_star( database, table_name, engine, schema, limit, show_cols, indent, latest_partition, presto_cols, ) @classmethod def estimate_statement_cost( # pylint: disable=too-many-locals cls, statement: str, database: "Database", cursor: Any, user_name: str) -> Dict[str, Any]: """ Run a SQL query that estimates the cost of a given statement. :param statement: A single SQL statement :param database: Database instance :param cursor: Cursor instance :param username: Effective username :return: JSON response from Presto """ parsed_query = ParsedQuery(statement) sql = parsed_query.stripped() sql_query_mutator = config["SQL_QUERY_MUTATOR"] if sql_query_mutator: sql = sql_query_mutator(sql, user_name, security_manager, database) sql = f"EXPLAIN (TYPE IO, FORMAT JSON) {sql}" cursor.execute(sql) # the output from Presto is a single column and a single row containing # JSON: # # { # ... # "estimate" : { # "outputRowCount" : 8.73265878E8, # "outputSizeInBytes" : 3.41425774958E11, # "cpuCost" : 3.41425774958E11, # "maxMemory" : 0.0, # "networkCost" : 3.41425774958E11 # } # } result = json.loads(cursor.fetchone()[0]) return result @classmethod def query_cost_formatter( cls, raw_cost: List[Dict[str, Any]]) -> List[Dict[str, str]]: """ Format cost estimate. :param raw_cost: JSON estimate from Presto :return: Human readable cost estimate """ def humanize(value: Any, suffix: str) -> str: try: value = int(value) except ValueError: return str(value) prefixes = ["K", "M", "G", "T", "P", "E", "Z", "Y"] prefix = "" to_next_prefix = 1000 while value > to_next_prefix and prefixes: prefix = prefixes.pop(0) value //= to_next_prefix return f"{value} {prefix}{suffix}" cost = [] columns = [ ("outputRowCount", "Output count", " rows"), ("outputSizeInBytes", "Output size", "B"), ("cpuCost", "CPU cost", ""), ("maxMemory", "Max memory", "B"), ("networkCost", "Network cost", ""), ] for row in raw_cost: estimate: Dict[str, float] = row.get("estimate", {}) statement_cost = {} for key, label, suffix in columns: if key in estimate: statement_cost[label] = humanize(estimate[key], suffix).strip() cost.append(statement_cost) return cost @classmethod def adjust_database_uri(cls, uri: URL, selected_schema: Optional[str] = None) -> None: database = uri.database if selected_schema and database: selected_schema = parse.quote(selected_schema, safe="") if "/" in database: database = database.split("/")[0] + "/" + selected_schema else: database += "/" + selected_schema uri.database = database @classmethod def convert_dttm(cls, target_type: str, dttm: datetime) -> Optional[str]: tt = target_type.upper() if tt == utils.TemporalType.DATE: return f"""from_iso8601_date('{dttm.date().isoformat()}')""" if tt == utils.TemporalType.TIMESTAMP: return f"""from_iso8601_timestamp('{dttm.isoformat(timespec="microseconds")}')""" # pylint: disable=line-too-long return None @classmethod def epoch_to_dttm(cls) -> str: return "from_unixtime({col})" @classmethod def get_all_datasource_names( cls, database: "Database", datasource_type: str) -> List[utils.DatasourceName]: datasource_df = database.get_df( "SELECT table_schema, table_name FROM INFORMATION_SCHEMA.{}S " "ORDER BY concat(table_schema, '.', table_name)".format( datasource_type.upper()), None, ) datasource_names: List[utils.DatasourceName] = [] for _unused, row in datasource_df.iterrows(): datasource_names.append( utils.DatasourceName(schema=row["table_schema"], table=row["table_name"])) return datasource_names @classmethod def expand_data( # pylint: disable=too-many-locals,too-many-branches cls, columns: List[Dict[Any, Any]], data: List[Dict[Any, Any]]) -> Tuple[List[Dict[Any, Any]], List[Dict[ Any, Any]], List[Dict[Any, Any]]]: """ We do not immediately display rows and arrays clearly in the data grid. This method separates out nested fields and data values to help clearly display structural columns. Example: ColumnA is a row(nested_obj varchar) and ColumnB is an array(int) Original data set = [ {'ColumnA': ['a1'], 'ColumnB': [1, 2]}, {'ColumnA': ['a2'], 'ColumnB': [3, 4]}, ] Expanded data set = [ {'ColumnA': ['a1'], 'ColumnA.nested_obj': 'a1', 'ColumnB': 1}, {'ColumnA': '', 'ColumnA.nested_obj': '', 'ColumnB': 2}, {'ColumnA': ['a2'], 'ColumnA.nested_obj': 'a2', 'ColumnB': 3}, {'ColumnA': '', 'ColumnA.nested_obj': '', 'ColumnB': 4}, ] :param columns: columns selected in the query :param data: original data set :return: list of all columns(selected columns and their nested fields), expanded data set, listed of nested fields """ if not is_feature_enabled("PRESTO_EXPAND_DATA"): return columns, data, [] # process each column, unnesting ARRAY types and # expanding ROW types into new columns to_process = deque((column, 0) for column in columns) all_columns: List[Dict[str, Any]] = [] expanded_columns = [] current_array_level = None while to_process: column, level = to_process.popleft() if column["name"] not in [ column["name"] for column in all_columns ]: all_columns.append(column) # When unnesting arrays we need to keep track of how many extra rows # were added, for each original row. This is necessary when we expand # multiple arrays, so that the arrays after the first reuse the rows # added by the first. every time we change a level in the nested arrays # we reinitialize this. if level != current_array_level: unnested_rows: Dict[int, int] = defaultdict(int) current_array_level = level name = column["name"] values: Optional[Union[str, List[Any]]] if column["type"].startswith("ARRAY("): # keep processing array children; we append to the right so that # multiple nested arrays are processed breadth-first to_process.append((get_children(column)[0], level + 1)) # unnest array objects data into new rows i = 0 while i < len(data): row = data[i] values = row.get(name) if isinstance(values, str): row[name] = values = destringify(values) if values: # how many extra rows we need to unnest the data? extra_rows = len(values) - 1 # how many rows were already added for this row? current_unnested_rows = unnested_rows[i] # add any necessary rows missing = extra_rows - current_unnested_rows for _ in range(missing): data.insert(i + current_unnested_rows + 1, {}) unnested_rows[i] += 1 # unnest array into rows for j, value in enumerate(values): data[i + j][name] = value # skip newly unnested rows i += unnested_rows[i] i += 1 if column["type"].startswith("ROW("): # expand columns; we append them to the left so they are added # immediately after the parent expanded = get_children(column) to_process.extendleft( (column, level) for column in expanded[::-1]) expanded_columns.extend(expanded) # expand row objects into new columns for row in data: values = row.get(name) or [] if isinstance(values, str): row[name] = values = cast(List[Any], destringify(values)) for value, col in zip(values, expanded): row[col["name"]] = value data = [{k["name"]: row.get(k["name"], "") for k in all_columns} for row in data] return all_columns, data, expanded_columns @classmethod def extra_table_metadata(cls, database: "Database", table_name: str, schema_name: str) -> Dict[str, Any]: metadata = {} indexes = database.get_indexes(table_name, schema_name) if indexes: cols = indexes[0].get("column_names", []) full_table_name = table_name if schema_name and "." not in table_name: full_table_name = "{}.{}".format(schema_name, table_name) pql = cls._partition_query(full_table_name, database) col_names, latest_parts = cls.latest_partition(table_name, schema_name, database, show_first=True) if not latest_parts: latest_parts = tuple([None] * len(col_names)) # type: ignore metadata["partitions"] = { "cols": cols, "latest": dict(zip(col_names, latest_parts)), # type: ignore "partitionQuery": pql, } # flake8 is not matching `Optional[str]` to `Any` for some reason... metadata["view"] = cast( Any, cls.get_create_view(database, schema_name, table_name)) return metadata @classmethod def get_create_view(cls, database: "Database", schema: str, table: str) -> Optional[str]: """ Return a CREATE VIEW statement, or `None` if not a view. :param database: Database instance :param schema: Schema name :param table: Table (view) name """ from pyhive.exc import DatabaseError engine = cls.get_engine(database, schema) with closing(engine.raw_connection()) as conn: with closing(conn.cursor()) as cursor: sql = f"SHOW CREATE VIEW {schema}.{table}" try: cls.execute(cursor, sql) polled = cursor.poll() while polled: time.sleep(0.2) polled = cursor.poll() except DatabaseError: # not a VIEW return None rows = cls.fetch_data(cursor, 1) return rows[0][0] @classmethod def handle_cursor(cls, cursor: Any, query: Query, session: Session) -> None: """Updates progress information""" query_id = query.id poll_interval = query.database.connect_args.get( "poll_interval", config["PRESTO_POLL_INTERVAL"]) logger.info("Query %i: Polling the cursor for progress", query_id) polled = cursor.poll() # poll returns dict -- JSON status information or ``None`` # if the query is done # https://github.com/dropbox/PyHive/blob/ # b34bdbf51378b3979eaf5eca9e956f06ddc36ca0/pyhive/presto.py#L178 while polled: # Update the object and wait for the kill signal. stats = polled.get("stats", {}) query = session.query(type(query)).filter_by(id=query_id).one() if query.status in [QueryStatus.STOPPED, QueryStatus.TIMED_OUT]: cursor.cancel() break if stats: state = stats.get("state") # if already finished, then stop polling if state == "FINISHED": break completed_splits = float(stats.get("completedSplits")) total_splits = float(stats.get("totalSplits")) if total_splits and completed_splits: progress = 100 * (completed_splits / total_splits) logger.info("Query {} progress: {} / {} " # pylint: disable=logging-format-interpolation "splits".format(query_id, completed_splits, total_splits)) if progress > query.progress: query.progress = progress session.commit() time.sleep(poll_interval) logger.info("Query %i: Polling the cursor for progress", query_id) polled = cursor.poll() @classmethod def _extract_error_message(cls, ex: Exception) -> str: if (hasattr(ex, "orig") and type(ex.orig).__name__ == "DatabaseError" # type: ignore and isinstance(ex.orig[0], dict) # type: ignore ): error_dict = ex.orig[0] # type: ignore return "{} at {}: {}".format( error_dict.get("errorName"), error_dict.get("errorLocation"), error_dict.get("message"), ) if type(ex).__name__ == "DatabaseError" and hasattr( ex, "args") and ex.args: error_dict = ex.args[0] return error_dict.get("message", _("Unknown Presto Error")) return utils.error_msg_from_exception(ex) @classmethod def _partition_query( # pylint: disable=too-many-arguments,too-many-locals cls, table_name: str, database: "Database", limit: int = 0, order_by: Optional[List[Tuple[str, bool]]] = None, filters: Optional[Dict[Any, Any]] = None, ) -> str: """Returns a partition query :param table_name: the name of the table to get partitions from :type table_name: str :param limit: the number of partitions to be returned :type limit: int :param order_by: a list of tuples of field name and a boolean that determines if that field should be sorted in descending order :type order_by: list of (str, bool) tuples :param filters: dict of field name and filter value combinations """ limit_clause = "LIMIT {}".format(limit) if limit else "" order_by_clause = "" if order_by: l = [] for field, desc in order_by: l.append(field + " DESC" if desc else "") order_by_clause = "ORDER BY " + ", ".join(l) where_clause = "" if filters: l = [] for field, value in filters.items(): l.append(f"{field} = '{value}'") where_clause = "WHERE " + " AND ".join(l) presto_version = database.get_extra().get("version") # Partition select syntax changed in v0.199, so check here. # Default to the new syntax if version is unset. partition_select_clause = ( f'SELECT * FROM "{table_name}$partitions"' if not presto_version or StrictVersion(presto_version) >= StrictVersion("0.199") else f"SHOW PARTITIONS FROM {table_name}") sql = textwrap.dedent(f"""\ {partition_select_clause} {where_clause} {order_by_clause} {limit_clause} """) return sql @classmethod def where_latest_partition( # pylint: disable=too-many-arguments cls, table_name: str, schema: Optional[str], database: "Database", query: Select, columns: Optional[List[Dict[str, str]]] = None, ) -> Optional[Select]: try: col_names, values = cls.latest_partition(table_name, schema, database, show_first=True) except Exception: # pylint: disable=broad-except # table is not partitioned return None if values is None: return None column_names = {column.get("name") for column in columns or []} for col_name, value in zip(col_names, values): if col_name in column_names: query = query.where(Column(col_name) == value) return query @classmethod def _latest_partition_from_df(cls, df: pd.DataFrame) -> Optional[List[str]]: if not df.empty: return df.to_records(index=False)[0].item() return None @classmethod def latest_partition( cls, table_name: str, schema: Optional[str], database: "Database", show_first: bool = False, ) -> Tuple[List[str], Optional[List[str]]]: """Returns col name and the latest (max) partition value for a table :param table_name: the name of the table :param schema: schema / database / namespace :param database: database query will be run against :type database: models.Database :param show_first: displays the value for the first partitioning key if there are many partitioning keys :type show_first: bool >>> latest_partition('foo_table') (['ds'], ('2018-01-01',)) """ indexes = database.get_indexes(table_name, schema) if not indexes: raise SupersetTemplateException( f"Error getting partition for {schema}.{table_name}. " "Verify that this table has a partition.") if len(indexes[0]["column_names"]) < 1: raise SupersetTemplateException( "The table should have one partitioned field") if not show_first and len(indexes[0]["column_names"]) > 1: raise SupersetTemplateException( "The table should have a single partitioned field " "to use this function. You may want to use " "`presto.latest_sub_partition`") column_names = indexes[0]["column_names"] part_fields = [(column_name, True) for column_name in column_names] sql = cls._partition_query(table_name, database, 1, part_fields) df = database.get_df(sql, schema) return column_names, cls._latest_partition_from_df(df) @classmethod def latest_sub_partition(cls, table_name: str, schema: Optional[str], database: "Database", **kwargs: Any) -> Any: """Returns the latest (max) partition value for a table A filtering criteria should be passed for all fields that are partitioned except for the field to be returned. For example, if a table is partitioned by (``ds``, ``event_type`` and ``event_category``) and you want the latest ``ds``, you'll want to provide a filter as keyword arguments for both ``event_type`` and ``event_category`` as in ``latest_sub_partition('my_table', event_category='page', event_type='click')`` :param table_name: the name of the table, can be just the table name or a fully qualified table name as ``schema_name.table_name`` :type table_name: str :param schema: schema / database / namespace :type schema: str :param database: database query will be run against :type database: models.Database :param kwargs: keyword arguments define the filtering criteria on the partition list. There can be many of these. :type kwargs: str >>> latest_sub_partition('sub_partition_table', event_type='click') '2018-01-01' """ indexes = database.get_indexes(table_name, schema) part_fields = indexes[0]["column_names"] for k in kwargs.keys(): # pylint: disable=consider-iterating-dictionary if k not in k in part_fields: # pylint: disable=comparison-with-itself msg = "Field [{k}] is not part of the portioning key" raise SupersetTemplateException(msg) if len(kwargs.keys()) != len(part_fields) - 1: msg = ("A filter needs to be specified for {} out of the " "{} fields.").format( len(part_fields) - 1, len(part_fields)) raise SupersetTemplateException(msg) for field in part_fields: if field not in kwargs.keys(): field_to_return = field sql = cls._partition_query(table_name, database, 1, [(field_to_return, True)], kwargs) df = database.get_df(sql, schema) if df.empty: return "" return df.to_dict()[field_to_return][0] @classmethod @cache.memoize() def get_function_names(cls, database: "Database") -> List[str]: """ Get a list of function names that are able to be called on the database. Used for SQL Lab autocomplete. :param database: The database to get functions for :return: A list of function names useable in the database """ return database.get_df("SHOW FUNCTIONS")["Function"].tolist()
date_old = datetime.date(2001,1,1) return date_old sql2 = 'select max(date) from fq_day;' date_old = con.execute(sql2).fetchall()[0][0].date() if date_old < datetime.date.today() - datetime.timedelta(1): return date_old else: con.close() print('今天已经获取过数据,不需重新获取') os._exit(1) # 声明队列,用于存取股票以代码数据,以便获取复权明细 stock_code_queue = Queue() for code in stock_basics.index: stock_code_queue.put(code) type_fq_day = {'code':types.CHAR(6),'open':types.FLOAT,'hige':types.FLOAT,'close':types.FLOAT,'low':types.FLOAT, 'amount':types.FLOAT,'factor':types.FLOAT} # 获取复权数据 def process_data(old_date,task_qeue): #queueLock.acquire() while not task_qeue.empty(): data = task_qeue.get() print("正在获取%s;数据还有%s条:" %(data,task_qeue.qsize())) #queueLock.release() date_begin = old_date + datetime.timedelta(1) date_end = datetime.date.today() try: qfq_day = ts.get_h_data(data,start = str(date_begin),end=str(date_end),autype='qfq',drop_factor=False) qfq_day['code'] = data qfq_day.to_sql('fq_day',engine,if_exists='append',dtype=type_fq_day) except:
class LowestPriceHistory(EntityModel): ''' CZ3997:xx(a)n(n)(n)(n)(a) https://en.wikipedia.org/wiki/Airline_codes PEK:aaa https://en.wikipedia.org/wiki/International_Air_Transport_Association_airport_code ''' __tablename__ = 'lowest_price_history' flight_date = Column(dbt.DATE, nullable=False) origin = Column(dbt.CHAR(3), nullable=False) destination = Column(dbt.CHAR(3), nullable=False) airline = Column(dbt.CHAR(32), nullable=False) price_cny = Column(dbt.DECIMAL(8, 2), nullable=False) first_seen_at = Column(sa.BigInteger, nullable=False) last_seen_at = Column(sa.BigInteger, nullable=False) @classmethod def add(cls, flight_date, origin, destination, airline, price_cny, first_seen_at=time.time()): payload = { 'flight_date': flight_date, 'origin': origin, 'destination': destination, 'airline': airline, 'price_cny': price_cny, 'first_seen_at': first_seen_at, 'last_seen_at': first_seen_at, } return super(LowestPriceHistory, cls).add(**payload) def to_dict(self): return { 'flight_date': self.flight_date.strftime("%Y-%m-%d"), 'origin': self.origin, 'destination': self.destination, 'airline': self.airline, 'price_cny': float(self.price_cny), 'first_seen_at': datetime.fromtimestamp(self.first_seen_at), 'last_seen_at': datetime.fromtimestamp(self.first_seen_at), } @classmethod def get_latest_price(cls, flight_date, origin, destination): return cls.query.filter_by( flight_date=flight_date, origin=origin, destination=destination, ).order_by(sa.desc(cls.last_seen_at)).first() @classmethod def list(cls): price_dict = {} for rec in cls.query.all(): price_dict.setdefault( (rec.flight_date, rec.origin, rec.destination), []).append( (rec.id, rec.airline, rec.first_seen_at, rec.last_seen_at, rec.price_cny)) price_list = [] for unique_record, history in price_dict.iteritems(): flight_date, origin, destination = unique_record price_list.append({ 'flight_date': flight_date.strftime("%Y-%m-%d"), 'origin': origin, 'destination': destination, 'priceline': [{ 'id': str(id_), 'airline': airline, 'price_cny': float(price_cny), 'first_seen_at': datetime.fromtimestamp(first_seen_at).strftime( "%Y-%m-%d %H:%M:%S"), 'last_seen_at': datetime.fromtimestamp(last_seen_at).strftime( "%Y-%m-%d %H:%M:%S"), } for (id_, airline, first_seen_at, last_seen_at, price_cny) in sorted(history, key=lambda x: x[3])], }) return price_list @classmethod def update_price(cls, flight_date, origin, destination, price, airline, now_ts=time.time()): if not isinstance(price, Decimal): raise ValueError("price should be of type decimal.Decimal") existed = cls.get_latest_price(flight_date, origin, destination) if existed is None: return cls.add(flight_date, origin, destination, airline, price, now_ts) if abs(existed.price_cny - price) > 0.01: on_price_change(flight_date, origin, destination, airline, price, now_ts, existed) return cls.add(flight_date, origin, destination, airline, price, now_ts) payload = {'last_seen_at': now_ts} if existed.airline != airline: payload['airline'] = airline existed.update(**payload) return existed.id
con.close() print('今天已经获取过数据,不需重新获取') # os._exit(1) # 声明队列,用于存取股票以代码数据,以便获取复权明细 stock_code_queue = Queue.Queue(0) for code in stock_basics.index: stock_code_queue.put(code) # len(stock_basics.index) # stock_code_queue.qsize() # stock_code_queue.queue.clear() type_fq_day = { 'code': types.CHAR(6), 'open': types.FLOAT, 'high': types.FLOAT, 'close': types.FLOAT, 'low': types.FLOAT, 'amount': types.FLOAT, 'factor': types.FLOAT } def process_data(task_queue): """ :param old_date: :param task_queue: :return: 获取除权数据