def insert_sample_email(self, sample_table_name, email): if not isinstance(email, Email): print 'ERROR: input must be of type Email' return if sample_table_name == self.table_name: print('Cannot use the same table name') return email_table = Table(sample_table_name, self.metadata) ins_stmt = email_table.insert() conn = self.engine.connect() conn.execute(ins_stmt, date=email.date, mime_type=email.mime_type, from_addr=email.from_addr, to_addr=email.to_addr, subject=email.subject, raw_body=email.raw_body, cleaned_body=email.cleaned_body, one_line=email.one_line, path=email.path, label=email.label, prediction=email.prediction, probability=email.probability, )
def insert_address(self, email_address): if type(email) != EmailAddress: print 'ERROR: input must be of type EmailAddress' return email_address_table = Table('email_address', self.metadata) ins_stmt = email_address_table.insert() conn = self.engine.connect() result = conn.execute(ins_stmt, address=email_address.address, name=email_address.name)
def view(name, schema, metadata, selectable): """ Create a view for the given select. A table is returned which can be used to query the view. """ # a temporary MetaData object is used to avoid that this table is actually # created t = Table(name, MetaData(), schema=schema) for c in selectable.c: t.append_column(Column(c.name, c.type, primary_key=c.primary_key)) return t
def insert_to_table(self, email, table_name): if not isinstance(email, Email): print 'ERROR: input must be of type Email' return email_table = Table(table_name, self.metadata) ins_stmt = email_table.insert() conn = self.engine.connect() result = conn.execute(ins_stmt, date=email.date, mime_type=email.mime_type, from_addr=email.from_addr, to_addr=email.to_addr, subject=email.subject, body=email.body, path=email.path, label=email.label)
def add_username(self): email_table = Table('brushed_email', self.metadata) sel_stmt = select([email_table.c.id, email_table.c.path]) rp = self.engine.execute(sel_stmt) conn = self.engine.connect() for record in rp: # print(record) p = "\/[^\/]*\/([^\/]+)" # match the content between the second / and the third / match = re.match(p, record.path) if match: username = match.group(1) stmt = email_table.update().where(email_table.c.id == record.id).values(username=username) conn.execute(stmt) else: print("Error! " + record.path) exit(0)
def create_table(self): email_table = Table(self.table_name, self.metadata, Column('id', Integer, primary_key=True), Column('date', Text), Column('mime_type', Text), Column('from_addr', Text), Column('to_addr', Text), Column('subject', Text), Column('raw_body', Text), Column('cleaned_body', Text), Column('one_line', Text), Column('path', Text), Column('prediction', Integer), Column('probability', Float) ) email_table.create(self.engine)
def test_vector_greater_than(): metadata = MetaData() table = Table('people', metadata, Column('id', Integer, primary_key=True), Column('first_name', String), Column('middle_name', String), Column('last_name', String), Column('blood_type', String)) def random_person(idx): first = names.get_first_name() last = names.get_last_name() middle = random.choice([names.get_first_name, names.get_last_name, lambda: None])() blood_type = random.choice(['A', 'A', 'B', 'B', 'O', 'O', 'O', 'O', 'AB']) return {'id': idx, 'first_name': first, 'middle_name': middle, 'last_name': last, 'blood_type': blood_type } engine = create_engine('sqlite:///:memory:', echo=False) metadata.create_all(engine) def compare_results(compa, cols, vals): vals = tuple(map(none_to_minus_inf, vals)) res = set([row['id'] for row in engine.execute(select(table.columns).where(compa))]) all_ = [valmap(none_to_minus_inf, row) for row in engine.execute(select(table.columns))] cor = set() for row in all_: if tuple(row[col.name] for col in cols) > vals: cor.add(row['id']) assert_equal(res, cor) for i in range(1000): engine.execute(table.insert(random_person(i))) col_tuples = [(table.columns['id'],), (table.columns['blood_type'], table.columns['id']), (table.columns['blood_type'], table.columns['middle_name'], table.columns['id']), (table.columns['blood_type'], table.columns['id'], table.columns['middle_name']), (table.columns['middle_name'], table.columns['blood_type'], table.columns['id']),] val_tuples = [(5,), ('AB', 500), ('B', None, 500), ('B', 500, None), (None, 'B', 500)] for cols, vals in zip(col_tuples, val_tuples): compare_results(vector_greater_than(cols, vals), cols, vals)
def insert_cleaned_email_full(self, email): if not isinstance(email, Email): print 'ERROR: input must be of type Email' return email_table = Table("email_full", self.metadata) ins_stmt = email_table.insert() conn = self.engine.connect() conn.execute(ins_stmt, date=email.date, mime_type=email.mime_type, from_addr=email.from_addr, to_addr=email.to_addr, subject=email.subject, raw_body=email.raw_body, body=email.body, all_lines=email.all_lines, one_line=email.one_line, path=email.path)
class Collection(Base): __table__ = Table('collection', Base.metadata, autoload=True, autoload_with=engine) __mapper_args__ = { 'polymorphic_identity': 'collection', 'polymorphic_on': 'is_calendar' }
def _create_dest_table(self, dst_table: Table) -> None: """ Creates a table in the destination database. """ tablename = dst_table.name if tablename in self.tablenames_created: return # don't create it twice # Create the table # log.debug("Adding table {!r} to dump output", tablename) # You have to use an engine, not a session, to create tables (or you # get "AttributeError: 'Session' object has no attribute # '_run_visitor'"). # However, you have to commit the session, or you get # "sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) # database is locked", since a session is also being used. self.dst_session.commit() dst_table.create(self.dst_engine) self.tablenames_created.add(tablename)
def test_reflect_table_include_columns(self, engine, connection): one_row_complex = Table('one_row_complex', MetaData(bind=engine)) engine.dialect.reflecttable(connection, one_row_complex, include_columns=['col_int'], exclude_columns=[]) self.assertEqual(len(one_row_complex.c), 1) self.assertIsNotNone(one_row_complex.c.col_int) self.assertRaises(AttributeError, lambda: one_row_complex.c.col_tinyint)
def test_char_length(self, engine, conn): one_row_complex = Table("one_row_complex", MetaData(bind=engine), autoload=True) result = ( sqlalchemy.select( [sqlalchemy.func.char_length(one_row_complex.c.col_string)] ) .execute() .scalar() ) self.assertEqual(result, len("a string"))
def __init__(self, config): ApplicationSession.__init__(self, config) self.count = 0 self.engine = None metadata = MetaData() self.telemetry = Table("telemetry", metadata, Column("id", Integer(), primary_key=True), Column("MSG_ID", Integer()), Column("V_FC", Integer()), Column("V_CAP", Integer()), Column("A_ENG", Integer()), Column("A_CAP", Integer()), Column("T_O2_In", Integer()), Column("T_O2_Out", Integer()), Column("T_FC_H2O_Out", Integer()), Column("Water_In", Integer()), Column("Water_Out", Integer()), Column("Master_SW", Integer()), Column("CAP_Down_SW", Integer()), Column("Drive_SW", Integer()), Column("FC_state", Integer()), Column("Mosfet_state", Integer()), Column("Safety_state", Integer()), Column("Air_Pump_load", Numeric()), Column("Mosfet_load", Integer()), Column("Water_Pump_load", Integer()), Column("Fan_load", Integer()), Column("Acc_X", Integer()), Column("Acc_Y", Integer()), Column("Acc_Z", Integer()), Column("AUX", Numeric()), Column("GPS_X", Integer()), Column("GPS_Y", Integer()), Column("GPS_Z", Integer()), Column("GPS_Speed", Integer()), Column("V_Safety", Integer()), Column("H2_Level", Integer()), Column("O2_calc", Numeric()), Column("lat", Numeric()), Column("lng", Numeric()), )
def diff_table_data(self, tablename): try: firsttable = Table(tablename, self.firstmeta, autoload=True) secondtable = Table(tablename, self.secondmeta, autoload=True) pk = ",".join( self.firstinspector.get_pk_constraint(tablename) ['constrained_columns']) if not pk: return None, "no primary key(s) on this table." \ " Comparision is not possible." except NoSuchTableError: return False, "table is missing" SQL_TEMPLATE_HASH = f""" SELECT md5(array_agg(md5((t.*)::varchar))::varchar) hash FROM ( SELECT * FROM ( (SELECT * FROM {tablename} ORDER BY {pk} limit :row_limit) UNION (SELECT * FROM {tablename} ORDER BY {pk} DESC limit :row_limit) ) as topbottomselect order by {pk} ) AS t; """ position = 0 firstresult = self.firstsession.execute(SQL_TEMPLATE_HASH, { "row_limit": self.chunk_size, "row_offset": position }).fetchone() secondresult = self.secondsession.execute(SQL_TEMPLATE_HASH, { "row_limit": self.chunk_size, "row_offset": position }).fetchone() #-- to be able to run long queries self.firstsession.close() self.secondsession.close() if firstresult != secondresult: return False, f"data is different - position {position} -" \ f" {position + self.chunk_size}" position += self.chunk_size return True, "data and count(implicit) is identical."
def test_reflect_select(self, engine, connection): one_row_complex = Table('one_row_complex', MetaData(bind=engine), autoload=True) self.assertEqual(len(one_row_complex.c), 15) self.assertIsInstance(one_row_complex.c.col_string, Column) rows = one_row_complex.select().execute().fetchall() self.assertEqual(len(rows), 1) self.assertEqual(list(rows[0]), [ True, 127, 32767, 2147483647, 9223372036854775807, 0.5, 0.25, 'a string', datetime(2017, 1, 1, 0, 0, 0), date(2017, 1, 2), b'123', '[1, 2]', '{1=2, 3=4}', '{a=1, b=2}', Decimal('0.1'), ]) self.assertIsInstance(one_row_complex.c.col_boolean.type, BOOLEAN) self.assertIsInstance(one_row_complex.c.col_tinyint.type, INTEGER) self.assertIsInstance(one_row_complex.c.col_smallint.type, INTEGER) self.assertIsInstance(one_row_complex.c.col_int.type, INTEGER) self.assertIsInstance(one_row_complex.c.col_bigint.type, BIGINT) self.assertIsInstance(one_row_complex.c.col_float.type, FLOAT) self.assertIsInstance(one_row_complex.c.col_double.type, FLOAT) self.assertIsInstance(one_row_complex.c.col_string.type, type(STRINGTYPE)) self.assertIsInstance(one_row_complex.c.col_timestamp.type, TIMESTAMP) self.assertIsInstance(one_row_complex.c.col_date.type, DATE) self.assertIsInstance(one_row_complex.c.col_binary.type, BINARY) self.assertIsInstance(one_row_complex.c.col_array.type, type(STRINGTYPE)) self.assertIsInstance(one_row_complex.c.col_map.type, type(STRINGTYPE)) self.assertIsInstance(one_row_complex.c.col_struct.type, type(STRINGTYPE)) self.assertIsInstance(one_row_complex.c.col_decimal.type, DECIMAL)
class RapidEyeFootPrintsMexicoOld(BASE): # TODO: this is just an example of three columns in the table and two columns mapped __table__ = Table('rapideye_footprints_mexico_old', BASE.metadata, Column('gid', Integer, primary_key=True), Column('code', Integer), Column('mapgrid', Integer), Column('mapgrid2', Integer)) __table_args__ = {'autoload': True, 'autoload_with': ENGINE} code = __table__.c.code mapgrid = __table__.c.mapgrid mapgrid2 = __table__.c.mapgrid2
def budgets_labels_table(): """ The association table for linking budgets to labels """ return Table( 'budgets_labels', EntityManager.get_base().metadata, Column('budget_id', Integer, ForeignKey('budgets.id')), Column('label_id', Integer, ForeignKey('labels.id')) )
async def get_or_create( query_clause: str, query_values: dict, table: Table, insert_values: dict ): """取得或建立""" result = await database.fetch_one(query=query_clause, values=query_values) if result: # get return result query = table.insert() return await database.execute(query=query, values=insert_values)
class NewsContent(Base): __table__ = Table( 'news_content', Base.metadata, Column('id', Integer, primary_key=True, autoincrement=True), Column('WEBSITE_ID', Integer), Column('CRAWL_URL', VARCHAR(100)), Column('NEWS_NAME', VARCHAR(100)), Column('NEWS_URL', VARCHAR(100)), Column('NEWS_IMAGE', VARCHAR(100)), Column('NEWS_DESC', TEXT), Column('KEYWORDS', VARCHAR(100)), Column('PUBLISH_TIME', DATE), Column('NEWS_RESOURCE', VARCHAR(50)), Column('NEWS_AUTHOR', VARCHAR(50)), Column('COMMENT_NUM', Integer), Column('READ_NUM', Integer))
class jianshu_blog(Base): __table__ = Table( 'jianshu_blog', Base.metadata, Column('id', Integer, primary_key=True, autoincrement=True), Column('main_topic', VARCHAR(64)), Column('topic_link', VARCHAR(256)), Column('blog_name', VARCHAR(256)), Column('blog_content', TEXT), Column('blog_link', VARCHAR(256)), Column('published_time', DATE), Column('blog_author', VARCHAR(128)), Column('author_link', VARCHAR(256)), Column('read_num', Integer), Column('comment_num', Integer), Column('like_num', Integer), Column('support_num', Integer))
def test_basic_create_table(self): expected_result = re.sub( r'\s+', '', "CREATE TABLE testtable (col1 INTEGER)").strip() engine = create_engine('redshift+psycopg2://') table_model = Table("testtable", MetaData(), Column("col1", Integer)) ddl_statement = re.sub( r'\s+', '', str(CreateTable(table_model).compile(engine)).strip()) self.assertEqual(expected_result, ddl_statement)
def create_sample_table(self, sample_table_name): if sample_table_name == self.table_name: print('Cannon use the same table name') return email_table = Table(sample_table_name, self.metadata, Column('id', Integer, primary_key=True), Column('date', Text), Column('mime_type', Text), Column('from_addr', Text), Column('to_addr', Text), Column('subject', Text), Column('raw_body', Text), Column('cleaned_body', Text), Column('one_line', Text), Column('path', Text), Column('prediction', Integer), Column('probability', Float), Column('manual_label', Integer) ) email_table.create(self.engine)
class News(Base): __table__ =Table('36ke_news', Base.metadata, Column('id', Integer, primary_key=True, autoincrement=True), Column('ke_item', VARCHAR(64)), Column('news_lable', VARCHAR(256)), Column('news_name', VARCHAR(256)), Column('news_content', TEXT), Column('news_link', VARCHAR(256)), Column('news_resource', VARCHAR(128)), Column('news_date', DATE), Column('published_time', DATE))
def save_data_to_database(self, df: pd.DataFrame, engine: Engine): for idx, row in df.iterrows(): with engine.connect() as conn: t = Table('citation', MetaData(), autoload_with=engine) # Delete and reinsert as no upsert command is available stmt = t.delete().where(delete_stmt_decisions_with_df(df)) engine.execute(stmt) for k in row['citations'].keys(): citation_type_id = CitationType(k).value for citation in row['citations'][k]: stmt = t.insert().values([{ "decision_id": str(row['decision_id']), "citation_type_id": citation_type_id, "url": citation.get("url"), "text": citation["text"] }]) engine.execute(stmt)
def test_iso_duration_field(self) -> None: id_colname = "id" duration_colname = "duration_iso" id_col = Column(id_colname, Integer, primary_key=True) duration_col = Column(duration_colname, PendulumDurationAsIsoTextColType) table = Table("testtable", self.meta, id_col, duration_col) table.create() d1 = Duration(years=1, months=3, seconds=3, microseconds=4) d2 = Duration(seconds=987.654321) d3 = Duration(days=-5) table.insert().values([ { id_colname: 1, duration_colname: d1 }, { id_colname: 2, duration_colname: d2 }, { id_colname: 3, duration_colname: d3 }, ]).execute() select_fields = [id_col, duration_col] rows = list( select(select_fields).select_from(table).order_by( id_col).execute()) self._assert_duration_equal(rows[0][duration_col], d1) self._assert_duration_equal(rows[1][duration_col], d2) self._assert_duration_equal(rows[2][duration_col], d3)
def test_semantic_version_field(self) -> None: id_colname = "id" version_colname = "version" id_col = Column(id_colname, Integer, primary_key=True) version_col = Column(version_colname, SemanticVersionColType) table = Table("testtable", self.meta, id_col, version_col) table.create() v1 = Version("1.1.0") v2 = Version("2.0.1") v3 = Version("14.0.0") table.insert().values([ { id_colname: 1, version_colname: v1 }, { id_colname: 2, version_colname: v2 }, { id_colname: 3, version_colname: v3 }, ]).execute() select_fields = [id_col, version_col] rows = list( select(select_fields).select_from(table).order_by( id_col).execute()) self._assert_version_equal(rows[0][version_col], v1) self._assert_version_equal(rows[1][version_col], v2) self._assert_version_equal(rows[2][version_col], v3)
class Subdistrict(Entity): ''' Entity for subdistricts. ''' __table__ = Table( _('subdistricts'), Entity.metadata, Column(_('id'), BigInteger, nullable=False, primary_key=True), Column(_('district_id'), Integer, ForeignKey(_('districts.id'), use_alter=True), nullable=False, index=True), Column(_('municipality_id'), Integer, ForeignKey(_('municipalities.id'), use_alter=True), nullable=False, index=True), Column(_('microregion_id'), Integer, ForeignKey(_('microregions.id'), use_alter=True), nullable=False, index=True), Column(_('mesoregion_id'), SmallInteger, ForeignKey(_('mesoregions.id'), use_alter=True), nullable=False, index=True), Column(_('state_id'), SmallInteger, ForeignKey(_('states.id'), use_alter=True), nullable=False, index=True), Column(_('name'), String(64), nullable=False, index=True) ) # Columns mapping __columns__ = { _('id'): 'subdistrict_id', _('district_id'): 'district_id', _('municipality_id'): 'municipality_id', _('microregion_id'): 'microregion_id', _('mesoregion_id'): 'mesoregion_id', _('state_id'): 'state_id', _('name'): 'subdistrict_name', }
def test_if_not_exists_create_table(self): expected_result = re.sub( r'\s+', '', "CREATE TABLE IF NOT EXISTS testtable (col1 INTEGER)").strip() engine = create_engine('redshift+psycopg2://') table_model = Table("testtable", MetaData(), Column("col1", Integer)) ddl_statement = re.sub( r'\s+', '', str( CreateTable(table_model).compile( engine, compile_kwargs={"if_not_exists": True})).strip()) self.assertEqual(expected_result, ddl_statement)
def save_the_file_numbers(series: pd.DataFrame) -> pd.DataFrame: """ Saves the file_number for each of the decision ids :param series: :return: """ query = f"SELECT decision_id FROM decision WHERE file_id = '{series['file_id']}'" series['decision_id'] = pd.read_sql(query, engine.connect())["decision_id"][0] with engine.connect() as conn: t = Table('file_number', MetaData(), autoload_with=engine) # Delete and reinsert as no upsert command is available stmt = t.delete().where(delete_stmt_decisions_with_df(series)) conn.execute(stmt) series['text'] = series['file_number'].strip( ) # .map(lambda x: x.strip()) save_to_db(series[['decision_id', 'text']], 'file_number') if ('file_number_additional' in series and series['file_number_additional'] is not None and len(series['file_number_additional']) > 0): series['text'] = series['file_number_additional'].strip( ) # .map(lambda x: x.strip()) save_to_db(series[['decision_id', 'text']], 'file_number') return series
def engine(request, tmpdir_factory): engine = None if request.param == 'sqlite-file': tmpdir = tmpdir_factory.mktemp('asphalt-sqlalchemy') db_path = str(tmpdir.join('test.db')) engine = create_engine('sqlite:///' + db_path) elif request.param == 'sqlite-memory': engine = create_engine('sqlite:///:memory:') elif request.param == 'mysql': url = request.getfixturevalue('mysql_url') engine = create_engine(url) elif request.param == 'postgresql': url = request.getfixturevalue('postgresql_url') engine = create_engine(url) if engine.dialect.name != 'sqlite': engine.execute(CreateSchema('altschema')) if request.param != 'sqlite-memory': metadata = MetaData() Table('table', metadata, Column('column1', Integer, primary_key=True)) Table('table2', metadata, Column('fk_column', ForeignKey('table.column1'))) if engine.dialect.name != 'sqlite': Table('table3', metadata, Column('fk_column', Integer), schema='altschema') metadata.create_all(engine) yield engine if engine.dialect.name != 'sqlite': metadata.drop_all(engine) engine.execute(DropSchema('altschema'))
def table_override(original, base, overrides={}): result = Table(original.name, base.metadata, schema=original.schema) for col in original.columns: if col.name not in overrides: result.append_column(col.copy()) else: new_col = overrides[col.name].copy() new_col.name = col.name result.append_column(new_col) return result
def to_sqa_table(cls, metadata, name, **kwargs): ''' Create a sqalchemy Table object corresponding to this class. Use kwargs to override any desired columns. ''' bad_kwargs = set(kwargs.keys()) - set( map(lambda col: col.name, cls.columns)) if bad_kwargs: raise ValueError( 'Optional keyword argument names must correspond to field names. The following names are not compliant: %s' % str(sorted(bad_kwargs))) cols = [ col.to_sqa() if col.name not in kwargs else kwargs[col.name] for col in cls.columns ] return Table(name, metadata, *cols)
class State(Entity): """Entity for states.""" __name__ = 'state' __table__ = Table( 'states', Entity.metadata, Column('id', SmallInteger, nullable=False, primary_key=True), Column('name', String(32), nullable=False, index=True)) # Relationships mesoregions = Relationship('Mesoregion', 'state') microregions = Relationship('Microregion', 'state') municipalities = Relationship('Municipality', 'state') districts = Relationship('District', 'state') subdistricts = Relationship('Subdistrict', 'state')
def athena(): # from pyathenajdbc import connect # conn_str = 'awsathena+jdbc://{}:{}@athena.{}.amazonaws.com:443/{}?s3_staging_dir={}'.format( # current_app.config['S3_KEY'], # current_app.config['S3_SECRET'], # 'us-west-2', # 'default', # 's3://aws-athena-query-results-upw/') # conn = connect(conn_str) # try: # with conn.cursor() as cursor: # cursor.execute(""" # SELECT * FROM one_row # """) # print(cursor.description) # print(cursor.fetchall()) # finally: # conn.close() res = "" import contextlib from urllib.parse import quote_plus # PY2: from urllib import quote_plus from sqlalchemy.engine import create_engine from sqlalchemy.sql.expression import select from sqlalchemy.sql.functions import func from sqlalchemy.sql.schema import Table, MetaData conn_str = 'awsathena+jdbc://{}:{}@athena.{}.amazonaws.com:443/{}?s3_staging_dir={}'.format( current_app.config['S3_KEY'], current_app.config['S3_SECRET'], 'us-west-2', 'uploadwizard', 's3://aws-athena-query-results-upw/') engine = create_engine( conn_str.format( access_key=quote_plus(current_app.config['S3_KEY']), secret_key=quote_plus(current_app.config['S3_SECRET']), region_name='us-west-2', schema_name='uploadwizard', s3_staging_dir=quote_plus('s3://aws-athena-query-results-upw/'))) try: with contextlib.closing(engine.connect()) as conn: many_rows = Table('file', MetaData(bind=engine), autoload=True) rs = select([many_rows.c.manufacturerpartid]).execute() res = "" for row in rs: res += str(row) + "\n<br/>" finally: engine.dispose() return res
def test_reflect_table_include_columns(self, engine, connection): one_row_complex = Table('one_row_complex', MetaData(bind=engine)) version = float(re.search(r'^([\d]+\.[\d]+)\..+', sqlalchemy.__version__).group(1)) if version <= 1.2: engine.dialect.reflecttable(connection, one_row_complex, include_columns=['col_int'], exclude_columns=[]) else: # https://docs.sqlalchemy.org/en/13/changelog/changelog_13.html# # change-64ac776996da1a5c3e3460b4c0f0b257 engine.dialect.reflecttable(connection, one_row_complex, include_columns=['col_int'], exclude_columns=[], resolve_fks=True) self.assertEqual(len(one_row_complex.c), 1) self.assertIsNotNone(one_row_complex.c.col_int) self.assertRaises(AttributeError, lambda: one_row_complex.c.col_tinyint)
def _table_definition(metadata: MetaData) -> Table: """ Gets the definition of the comments table. Args: metadata (MetaData): The database schema metadata. Returns: Table: Table following the comments table definition. """ return Table( "comments", metadata, Column("repo_dir", String(140), primary_key=True), Column("issue_id", Integer, primary_key=True), Column("comment_id", Integer, primary_key=True), Column("author", String(39), nullable=False), Column("body", LONGTEXT, nullable=False), ForeignKeyConstraint(["repo_dir", "issue_id"], ["issues.repo_dir", "issues.issue_id"], ondelete="CASCADE"))
def test_reflect_table_include_columns(self, engine, connection): one_row_complex = Table('one_row_complex', MetaData(bind=engine)) version = float( re.search(r'^([\d]+\.[\d]+)\..+', sqlalchemy.__version__).group(1)) if version <= 1.2: engine.dialect.reflecttable(connection, one_row_complex, include_columns=['col_int'], exclude_columns=[]) else: engine.dialect.reflecttable(connection, one_row_complex, include_columns=['col_int'], exclude_columns=[], resolve_fks=True) self.assertEqual(len(one_row_complex.c), 1) self.assertIsNotNone(one_row_complex.c.col_int) self.assertRaises(AttributeError, lambda: one_row_complex.c.col_tinyint)
def _table_definition(metadata: MetaData) -> Table: """ Gets the definition of the outcome table. Args: metadata (MetaData): The database schema metadata. Returns: Table: Table following the issues table definition. """ return Table( "outcomes", metadata, Column("task_id", Integer, ForeignKey("tasks.task_id", ondelete="CASCADE"), primary_key=True), Column("repo_dir", String(140), ForeignKey("repositories.repo_dir", ondelete="CASCADE")), Column("model_type", String(10), nullable=False), Column("outcome_data", JSON, nullable=False), Column("exec_time", Float, nullable=False))
class Subdistrict(Entity): """Entity for subdistricts.""" __name__ = 'subdistrict' __table__ = Table( 'subdistricts', Entity.metadata, Column('id', BigInteger, nullable=False, primary_key=True), Column('district_id', Integer, ForeignKey('districts.id', use_alter=True), nullable=False, index=True), Column('municipality_id', Integer, ForeignKey('municipalities.id', use_alter=True), nullable=False, index=True), Column('microregion_id', Integer, ForeignKey('microregions.id', use_alter=True), nullable=False, index=True), Column('mesoregion_id', SmallInteger, ForeignKey('mesoregions.id', use_alter=True), nullable=False, index=True), Column('state_id', SmallInteger, ForeignKey('states.id', use_alter=True), nullable=False, index=True), Column('name', String(64), nullable=False, index=True)) # Relationships state = Relationship('State', 'subdistricts') mesoregion = Relationship('Mesoregion', 'subdistricts') microregion = Relationship('Microregion', 'subdistricts') municipality = Relationship('Municipality', 'subdistricts') district = Relationship('District', 'subdistricts')
def update_brushed_email_is_scheduling(self, email_id, is_scheduling): email_table = Table('brushed_email', self.metadata) conn = self.engine.connect() stmt = email_table.update().where(email_table.c.id == email_id).values(is_scheduling=is_scheduling) conn.execute(stmt)
#-*- encoding: utf-8 -*- ''' Created on 2014-11-5 @author: [email protected] ''' from sqlalchemy.sql.schema import MetaData, Table metadata = MetaData('mysql://%s:%s@%s/%s?charset=utf8' % ('root', 'root', '172.16.109.105:3306', 'itgfz2014')) if __name__ == '__main__': mem_tab = Table('itgfz_member',metadata,autoload=True) stat = mem_tab.select() print stat r = stat.execute() print [v for v in r.fetchall()] pass