def __init__(self, db, table, ncols=[], ntypes=sa_t.Text(), **kwargs): """ Args: ncols: Normal columns; a list of column names ntypes: SQLAlchemy types associated with the columns (defaults to Text) See :class:`.Database` for the definition of the additional arguments. """ # The table to use self._TABLE = table # Normal (non-kv) columns if 'key' in ncols or 'value' in ncols: raise Error("'key' or 'value' cant be used as a normal field name") self._NCOLS = ncols super(KVDb, self).__init__(db, **kwargs) if not isinstance(ntypes, list): ntypes = [ntypes for n in ncols] columns = [Column(n, t) for n, t in zip(ncols, ntypes)] self.create_table(self._TABLE, Column('key', sa_t.String(_MAX_STRING_LEN)), Column('value', sa_t.Text()), *columns)
def wirte_to_mysqldb(df, result_tb = 'data_health_examination', user='******', \ psw='liangzhi123', host='192.168.1.22', db='datamining', \ if_exists='append', dtype={u'db_name':sqltypes.NVARCHAR(length=255), u'table_name':sqltypes.NVARCHAR(length=255), u'part_date':sqltypes.NVARCHAR(length=255), u'create_date':sqltypes.DateTime(), u'field_name':sqltypes.NVARCHAR(length=255), u'field_type':sqltypes.NVARCHAR(length=255), u'missing_value_num':sqltypes.BigInteger(), u'missing_value_prop':sqltypes.Float(), u'other_missing_value_num':sqltypes.BigInteger(), u'other_missing_value_prop':sqltypes.Float(), u'abnormal_value_index':sqltypes.Text(), u'abnormal_value_num':sqltypes.BigInteger(), u'abnormal_value_prop':sqltypes.Float(), u'if_exist_probability_plot':sqltypes.Integer(), u'probability_plot_result':sqltypes.NVARCHAR(length=255), u'probability_plot_script':sqltypes.Text(), u'if_exist_frequency_plot':sqltypes.Integer(), u'frequency_plot_result':sqltypes.NVARCHAR(length=255), u'frequency_plot_script':sqltypes.Text(), u'if_exist_rules':sqltypes.Integer(), u'show_Chn_rules':sqltypes.NVARCHAR(length=255), u'show_Eng_rules':sqltypes.NVARCHAR(length=255), u'rules_result':sqltypes.NVARCHAR(length=255)}): engine = sqlalchemy.create_engine(str(r"mysql+mysqldb://%s:" + '%s' + "@%s/%s?%s")\ % (user, psw, host, db, 'charset=utf8')) df.to_sql(result_tb, engine, if_exists=if_exists, index=False, dtype=dtype)
def upgrade_1033_to_1034(operations, metadata): if 'type' not in Table('allocations', metadata, autoload=True).columns: operations.add_column('allocations', Column('type', types.Text(), nullable=True)) if 'type' not in Table('reservations', metadata, autoload=True).columns: operations.add_column('reservations', Column('type', types.Text(), nullable=True))
class Users(object): __tablename__ = 'users' id = schema.Column('id', types.Integer, schema.Sequence('users_id_seq'), primary_key=True) name = schema.Column('name', types.Text()) password = schema.Column('password', types.Text()) real_name = schema.Column('real_name', types.Text())
def refresh(self): self.deleteChildren() self.appendRow(BaseTreeItem('FIELDS')) curTableFields = self.lastChild() self.appendRow(BaseTreeItem('FK')) curTableFK = self.lastChild() self.appendRow(BaseTreeItem('FK_REFERENCE')) #faltan las FK de vuelta #engine = self.getConnection().data().engine #inspector = inspect(engine) inspector = self.getConnection().inspector table_name = self.text() schema = self.getSchema().text() if schema == '': schema = None try: #FIXME self.getRecordCount() for column in inspector.get_columns(table_name, schema): try: name = BaseTreeItem(column['name']) tipo = BaseTreeItem( typeHandler(column.get('type', types.Text()))) curTableFields.appendRow((name, tipo)) #FIXME el rendimiento es intolerable para poer hacerlo para todas las columnas #curTableFields.lastChild().getValueSpread() except CompileError: #except CompileError: if config.DEBUG: print('Columna sin tipo', schema, ' ', table_name, ' ', name) if name and name != '': tipo = BaseTreeItem(typeHandler(types.Text())) curTableFields.appendRow((name, tipo)) for fk in inspector.get_foreign_keys(table_name, schema): if fk['name'] is None: name = BaseTreeItem(table_name + '2' + fk['referred_table'] + '*') else: name = BaseTreeItem(fk['name']) if fk['referred_schema'] is not None: table = BaseTreeItem(fk['referred_schema'] + '.' + fk['referred_table']) else: table = BaseTreeItem(fk['referred_table']) constrained = BaseTreeItem( norm2String(fk['constrained_columns'])) referred = BaseTreeItem(norm2String(fk['referred_columns'])) curTableFK.appendRow((name, table, constrained, referred)) except (OperationalError, ProgrammingError) as e: if config.DEBUG: #showConnectionError('Error en {}.{}'.format(schema,table_name),norm2String(e.orig.args)) print('Error en {}.{}'.format(schema, table_name), norm2String(e.orig.args))
def write_asic_details(driver, engine, linked_id, company_name): try: df = extract_asic_details(driver, linked_id, company_name) inspector = inspect(engine) types = {'linked_id': st.Integer(), 'company_name': st.Text()} table_exists = 'asx' in inspector.get_table_names(schema="asic") current_cols_sql = """SELECT column_name FROM information_schema.columns WHERE table_schema = 'asic' AND table_name = 'asx' """ current_cols = pd.read_sql(current_cols_sql, engine)['column_name'].tolist() for col in df.columns: if(re.search('(^date_|_date$|_date_)', col)): types[col] = st.Date() if(col not in current_cols and table_exists): new_col_sql = "ALTER TABLE asic.asx ADD COLUMN " + col + " DATE" engine.execute(new_col_sql) elif(col == 'former_names'): types[col] = st.ARRAY(st.Text(), dimensions = 1) if(col not in current_cols and table_exists): new_col_sql = "ALTER TABLE asic.asx ADD COLUMN " + col + " TEXT[]" engine.execute(new_col_sql) else: types[col] = st.Text() if(col not in current_cols and table_exists): new_col_sql = "ALTER TABLE asic.asx ADD COLUMN " + col + " TEXT" engine.execute(new_col_sql) df.to_sql('asx', engine, schema="asic", if_exists="append", index=False, dtype = types) return(True) except: return(False)
def test_basic_reflection(self): meta = self.metadata users = Table( "engine_users", meta, Column("user_id", types.INT, primary_key=True), Column("user_name", types.VARCHAR(20), nullable=False), Column("test1", types.CHAR(5), nullable=False), Column("test2", types.Float(5), nullable=False), Column("test2.5", types.Float(), nullable=False), Column("test3", types.Text()), Column("test4", types.Numeric, nullable=False), Column("test4.5", types.Numeric(10, 2), nullable=False), Column("test5", types.DateTime), Column( "parent_user_id", types.Integer, ForeignKey("engine_users.user_id"), ), Column("test6", types.DateTime, nullable=False), Column("test7", types.Text()), Column("test8", types.LargeBinary()), Column("test_passivedefault2", types.Integer, server_default="5"), Column("test9", types.BINARY(100)), Column("test_numeric", types.Numeric()), ) addresses = Table( "engine_email_addresses", meta, Column("address_id", types.Integer, primary_key=True), Column("remote_user_id", types.Integer, ForeignKey(users.c.user_id)), Column("email_address", types.String(20)), ) meta.create_all() meta2 = MetaData() reflected_users = Table("engine_users", meta2, autoload=True, autoload_with=testing.db) reflected_addresses = Table( "engine_email_addresses", meta2, autoload=True, autoload_with=testing.db, ) self.assert_tables_equal(users, reflected_users) self.assert_tables_equal(addresses, reflected_addresses)
class Ranking(Base): __tablename__ = 'ranking' id = Column(types.Integer(), primary_key=True) team_id = Column(types.Integer(), ForeignKey('teams.team_id')) league_id = Column(types.Integer()) season_id = Column(types.Integer()) standingsdate = Column(types.Date()) conference = Column(types.Text()) team = Column(types.Text()) g_i = Column(types.SmallInteger()) w = Column(types.SmallInteger()) l = Column(types.SmallInteger()) w_pct = Column(types.Numeric()) home_record = Column(types.Text()) road_record = Column(types.Text())
def init_rev_table(metadata): return Table( 'revisions', metadata, Column('id', types.Integer, primary_key=True), Column('name', types.Unicode(255), default=''), Column('log', types.Text(), default=u'No log message'), Column('author', types.Unicode(255), default=u'Unknown Author'), Column('date', types.DateTime()))
def _fixed_lookup_fixture(self): return [ (sqltypes.String(), sqltypes.VARCHAR()), (sqltypes.String(1), sqltypes.VARCHAR(1)), (sqltypes.String(3), sqltypes.VARCHAR(3)), (sqltypes.Text(), sqltypes.TEXT()), (sqltypes.Unicode(), sqltypes.VARCHAR()), (sqltypes.Unicode(1), sqltypes.VARCHAR(1)), (sqltypes.UnicodeText(), sqltypes.TEXT()), (sqltypes.CHAR(3), sqltypes.CHAR(3)), (sqltypes.NUMERIC, sqltypes.NUMERIC()), (sqltypes.NUMERIC(10, 2), sqltypes.NUMERIC(10, 2)), (sqltypes.Numeric, sqltypes.NUMERIC()), (sqltypes.Numeric(10, 2), sqltypes.NUMERIC(10, 2)), (sqltypes.DECIMAL, sqltypes.DECIMAL()), (sqltypes.DECIMAL(10, 2), sqltypes.DECIMAL(10, 2)), (sqltypes.INTEGER, sqltypes.INTEGER()), (sqltypes.BIGINT, sqltypes.BIGINT()), (sqltypes.Float, sqltypes.FLOAT()), (sqltypes.TIMESTAMP, sqltypes.TIMESTAMP()), (sqltypes.DATETIME, sqltypes.DATETIME()), (sqltypes.DateTime, sqltypes.DATETIME()), (sqltypes.DateTime(), sqltypes.DATETIME()), (sqltypes.DATE, sqltypes.DATE()), (sqltypes.Date, sqltypes.DATE()), (sqltypes.TIME, sqltypes.TIME()), (sqltypes.Time, sqltypes.TIME()), (sqltypes.BOOLEAN, sqltypes.BOOLEAN()), (sqltypes.Boolean, sqltypes.BOOLEAN()), ]
class Supportparams(Model): supportparams_id = Column(types.Integer, primary_key=True) document_path = Column(types.String(255), nullable=False, unique=True) systempid = Column(types.String(255)) systemos = Column(types.String(255)) systemperlv = Column(types.String(255)) systemperlexe = Column(types.String(255)) idstring = Column(types.String(255)) program = Column(types.String(255)) commandline = Column(types.Text) sampleconfig_path = Column(types.String(255)) sampleconfig = Column(types.Text(16777215)) time = Column(types.DateTime) def __repr__(self): return "{self.__class__.__name__}: {self.document_path}".format( self=self) @staticmethod def exists(document_path: str) -> Optional[int]: """Checks if the supportparams entry already exists""" try: support_params: Supportparams = Supportparams.query.filter_by( document_path=document_path).one() return support_params.supportparams_id except NoResultFound: return None
class Prefix(Base): __tablename__ = 'prefix' id = schema.Column('id', types.Integer, schema.Sequence('prefix_id_seq'), primary_key=True) prefix = schema.Column('prefix', types.Text())
class Link(Base): __tablename__ = 'Link' __mapper_args__ = dict(order_by='id desc') id = sa.Column(types.Integer, primary_key=True) url = sa.Column(types.Text()) visits = sa.Column(types.Integer()) last_visit = sa.Column(types.DateTime())
def upgrade(migrate_engine): meta.bind = migrate_engine environment = schema.Table('environment', meta, autoload=True) networking = schema.Column('networking', types.Text(), nullable=True, default='{}') networking.create(environment)
def _get_column(table, column): """ Returns a Column with the appropriate sqlalchemy data type for a column from the JSON schema description. Some column definitions are incorrect in the JSON data, so this function has some manual overrides. """ if table == 'group_membership_dim' and column['name'] in [ u'id', u'canvas_id' ]: """ The group_membership_dim.id and group_membership_dim.canvas_id columns are specified as varchars but they should be bigints """ return Column( column['name'], types.BigInteger(), ) elif table == 'quiz_question_answer_dim' and column['name'] in [ u'answer_match_left', u'answer_match_right', u'matching_answer_incorrect_matches' ]: """ These three columns in the quiz_question_answer_dim table are specified as having a length of 256, but the actual dumps contain longer values. Using the text type instead. """ return Column(column['name'], types.Text()) elif table == 'quiz_question_dim' and column['name'] == u'name': """ The quiz_question_dim.name column is specified as having a length of 256, but the actual dumps contain longer values. Using the text type instead. """ return Column(column['name'], types.Text()) elif column['type'] == 'varchar': return Column( column['name'], types.String(length=column['length']), ) elif column['type'] in TYPE_MAP: return Column( column['name'], TYPE_MAP[column['type']], ) else: return None
class Visit(Base): __tablename__ = 'visits' id = Column(Integer, primary_key=True) url = Column(String(500)) time = Column(types.DateTime()) raw_dom = Column(types.Text()) extacted_text = Column(types.Text()) lang = Column(String(16)) def __init__(self, url, time, raw_dom): self.url = url self.time = time self.raw_dom = raw_dom def __repr__(self): return '<Visit %s>' % (self.url[:20])
class Hop(Base): __tablename__ = 'Hop' __mapper_args__ = dict(order_by='id desc') id = sa.Column(types.Integer, primary_key=True) title = sa.Column(types.Unicode(255)) links = sa.Column(types.Text()) userid = sa.Column(types.Integer()) date = sa.Column(types.DateTime())
def load_dialect_impl(self, dialect): if dialect.name == 'postgresql': if dialect.server_version_info >= (9, 4): self.using_native_json = True return dialect.type_descriptor(postgresql.JSONB()) if dialect.server_version_info >= (9, 2): self.using_native_json = True return dialect.type_descriptor(postgresql.JSON()) return dialect.type_descriptor(types.Text())
def __init__(self, enum_class): # pylint: disable=super-init-not-called self.enum = enum_class first_value = list(enum_class)[0].value if isinstance(first_value, str): self.impl = sqla_types.Text() elif isinstance(first_value, int): self.impl = sqla_types.Integer() else: raise ValueError(f"Unsupported type of value for {enum_class}")
def test_basic_reflection(self): meta = self.metadata users = Table( 'engine_users', meta, Column('user_id', types.INT, primary_key=True), Column('user_name', types.VARCHAR(20), nullable=False), Column('test1', types.CHAR(5), nullable=False), Column('test2', types.Float(5), nullable=False), Column('test3', types.Text()), Column('test4', types.Numeric, nullable=False), Column('test5', types.DateTime), Column('parent_user_id', types.Integer, ForeignKey('engine_users.user_id')), Column('test6', types.DateTime, nullable=False), Column('test7', types.Text()), Column('test8', types.LargeBinary()), Column('test_passivedefault2', types.Integer, server_default='5'), Column('test9', types.BINARY(100)), Column('test_numeric', types.Numeric()), ) addresses = Table( 'engine_email_addresses', meta, Column('address_id', types.Integer, primary_key=True), Column('remote_user_id', types.Integer, ForeignKey(users.c.user_id)), Column('email_address', types.String(20)), ) meta.create_all() meta2 = MetaData() reflected_users = Table('engine_users', meta2, autoload=True, autoload_with=testing.db) reflected_addresses = Table('engine_email_addresses', meta2, autoload=True, autoload_with=testing.db) self.assert_tables_equal(users, reflected_users) self.assert_tables_equal(addresses, reflected_addresses)
class Obj(Model, Mixin, Abstract): text = Column(types.Text()) __global_table_args__ = (Index('idx_obj_text', 'text'), { 'mysql_foo': 'foo' }) __local_table_args__ = (Index('idx_obj_text2', 'text'), { 'mysql_baz': 'baz' })
class Feed(Base): id = Column(types.Integer(), primary_key=True, autoincrement=True) url_id = Column(ForeignKey('url.id', ondelete=RESTRICT, onupdate=CASCADE), nullable=False) title = Column(types.Text(), nullable=False) dead = Column(types.Boolean()) @declared_attr def __table_args__(self): return (Index(None, 'url_id', unique=True), )
class Comment(Base): __tablename__ = 'comments' id = schema.Column(types.Integer, schema.Sequence('comment_seq_id', optional=True), primary_key=True) pageid = schema.Column(types.Integer, schema.ForeignKey('pages.id'), nullable=False) content = schema.Column(types.Text(), default=u'') name = schema.Column(types.Unicode(255)) email = schema.Column(types.Unicode(255), nullable=False) created = schema.Column(types.TIMESTAMP(), default=now)
class ObjCM(Model, MixinEmpty, MixinCM, AbstractCM): text = Column(types.Text()) @classmethod def __global_table_args__(cls): return (Index('idx_cm_obj_text', 'text'), {'mysql_foo': 'foo'}) @classmethod def __local_table_args__(cls): return (Index('idx_cm_obj_text2', 'text'), { 'mysql_baz': 'baz' })
class Games_details(Base): __tablename__ = 'games_details' id = Column(types.Integer(), primary_key=True) game_id = Column(types.Integer(), ForeignKey('games.game_id')) team_id = Column(types.Integer(), ForeignKey('teams.team_id')) team_abbreviation = Column(types.Text()) team_city = Column(types.Text()) player_id = Column(types.Integer()) player_name = Column(types.Text()) start_position = Column(types.Text()) comment_t = Column(types.Text()) min_time = Column(types.Text()) fgm = Column(types.Numeric()) fga = Column(types.Numeric()) fg_pct = Column(types.Numeric()) fg3m = Column(types.Numeric()) fg3a = Column(types.Numeric()) fg3_pct = Column(types.Numeric()) ftm = Column(types.Numeric()) fta = Column(types.Numeric()) ft_pct = Column(types.Numeric()) oreb = Column(types.Numeric()) dreb = Column(types.Numeric()) reb = Column(types.Numeric()) ast = Column(types.Numeric()) stl = Column(types.Numeric()) blk = Column(types.Numeric()) t_num = Column(types.Numeric()) pf = Column(types.Numeric()) pts = Column(types.Numeric()) plus_minus = Column(types.Numeric()) def __tuple(self): return (self.team_id, self.team_abbreviation)
class TermType(types.TypeDecorator): """Term typology.""" impl = types.Text() def process_bind_param(self, value, dialect): """Process bound parameters.""" if isinstance(value, (QuotedGraph, Graph)): return text_type(value.identifier) elif isinstance(value, Node): return text_type(value) else: return value
class cikPeople(al_base, Base): __tablename__ = 'cik_people' id = Column(types.Integer(), primary_key=True) ik_id = Column(types.Integer(), index=True) number = Column(types.SmallInteger(), index=True) #пн fio = Column(types.Text()) #фио post = Column(types.Text()) #должность party = Column(types.Text()) #Кем рекомендован в состав комиссии @classmethod def add_or_update(cls, attrs): if 'ik_id' not in attrs or 'number' not in attrs: return None people = Session.query(cls).filter( cls.ik_id == attrs['ik_id'], cls.number == attrs['number']).first() if not people: people = cls(ik_id=attrs['ik_id'], number=attrs['number']) Session.add(people) people.set_attrs(attrs) return people
class Music(Base): __tablename__ = 'music' id = schema.Column(types.Integer, schema.Sequence('music_seq_id'), primary_key=True) title = schema.Column(types.Text(), default="") artist = schema.Column(types.Text(), default="") albumart = schema.Column(types.Text(), default="/albumart.jpg") name = schema.Column(types.Text()) path = schema.Column(types.Text()) summary = schema.Column(types.Text(), default="No info found") content = schema.Column(types.Text(), default="No info found")
def test_max_ident_in_varchar_not_present(self, metadata, connection): """test [ticket:3504]. Here we are testing not just that the "max" token comes back as None, but also that these types accept "max" as the value of "length" on construction, which isn't a directly documented pattern however is likely in common use. """ Table( "t", metadata, Column("t1", types.String), Column("t2", types.Text("max")), Column("t3", types.Text("max")), Column("t4", types.LargeBinary("max")), Column("t5", types.VARBINARY("max")), ) metadata.create_all(connection) for col in inspect(connection).get_columns("t"): is_(col["type"].length, None) in_("max", str(col["type"].compile(dialect=connection.dialect)))
class BookmarkNote(Base): __tablename__ = 'bookmark_notes' __table_args__ = (sa.ForeignKeyConstraint(['bookmark_id'], [Bookmark.bookmark_id]), ) note_id = sa.Column(UUIDType(), primary_key=True, default=UUIDType.new_uuid) bookmark_id = sa.Column(UUIDType(), nullable=False) text = sa.Column(sa_types.Text(), nullable=False) author = sa.Column(sa.String(100), nullable=False) created_on = sa.Column(sa_types.TIMESTAMP(timezone=True), nullable=False, default=datetime.utcnow().replace(microsecond=0))