Exemple #1
0
 def test_unicode(self, engine, connection):
     unicode_str = '密林'
     one_row = Table('one_row', MetaData(bind=engine))
     returned_str = sqlalchemy.select(
         [expression.bindparam('あまぞん', unicode_str)],
         from_obj=one_row,
     ).scalar()
     self.assertEqual(returned_str, unicode_str)
Exemple #2
0
 def test_char_length(self, engine, connection):
     one_row_complex = Table('one_row_complex',
                             MetaData(bind=engine),
                             autoload=True)
     result = sqlalchemy.select([
         sqlalchemy.func.char_length(one_row_complex.c.col_string)
     ]).execute().scalar()
     self.assertEqual(result, len('a string'))
 def test_reserved_words(self, engine, connection):
     """Presto uses double quotes, not backticks"""
     fake_table = Table('select', MetaData(bind=engine), Column('current_timestamp', STRINGTYPE))
     query = str(fake_table.select(fake_table.c.current_timestamp == 'a'))
     self.assertIn('"select"', query)
     self.assertIn('"current_timestamp"', query)
     self.assertNotIn('`select`', query)
     self.assertNotIn('`current_timestamp`', query)
    def save_data_to_database(self, df: pd.DataFrame, engine: Engine):
        with engine.connect() as conn:
            t = Table('lower_court', MetaData(), autoload_with=conn)
            # Delete and reinsert as no upsert command is available
            stmt = t.delete().where(delete_stmt_decisions_with_df(df))
            conn.execute(stmt)
            for _, row in df.iterrows():
                if not 'lower_court' in row or row['lower_court'] is None:
                    continue
                lower_court = row["lower_court"]
                res = {}

                if 'court' in lower_court and lower_court['court'] is not None:
                    res['court_id'] = list(
                        self.select(
                            engine, 'court', 'court_id',
                            f"court_string = '{lower_court['court']}'")
                    )[0]['court_id'][0]
                    res['court_id'] = int(
                        res['court_id']
                    ) if res['court_id'] is not None else None
                if 'canton' in lower_court and lower_court[
                        'canton'] is not None:
                    res['canton_id'] = list(
                        self.select(engine, 'canton', 'canton_id',
                                    f"short_code = '{lower_court['canton']}'")
                    )[0]['canton_id'][0]
                    res['canton_id'] = int(
                        res['canton_id']
                    ) if res['canton_id'] is not None else None
                if 'chamber' in lower_court and lower_court[
                        'chamber'] is not None:
                    res['chamber_id'] = list(
                        self.select(
                            engine, 'chamber', 'chamber_id',
                            f"chamber_string = '{lower_court['chamber']}'")
                    )[0]['chamber_id'][0]
                    res['chamber_id'] = int(
                        res['chamber_id']
                    ) if res['chamber_id'] is not None else None

                    stmt = t.insert().values([{
                        "decision_id":
                        str(row['decision_id']),
                        "court_id":
                        res.get('court_id'),
                        "canton_id":
                        res.get('canton_id'),
                        "chamber_id":
                        res.get('chamber_id'),
                        "date":
                        lower_court.get('date'),
                        "file_number":
                        lower_court.get('file_number')
                    }])
                    conn.execute(stmt)
Exemple #5
0
def metadata():
    """Returns shared metadata instance with naming convention."""
    naming_convention = {
        'ix': 'ix_%(column_0_label)s',
        'uq': 'uq_%(table_name)s_%(column_0_name)s',
        'ck': 'ck_%(table_name)s_%(constraint_name)s',
        'fk': 'fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s',
        'pk': 'pk_%(table_name)s'
    }
    return MetaData(naming_convention=naming_convention)
Exemple #6
0
 def __init__(self,
              firstdb,
              seconddb,
              firstdb_schemas="public",
              seconddb_schemas="public",
              chunk_size=10000,
              count_only=False):
     firstsession, firstengine = make_session(firstdb, firstdb_schemas)
     secondsession, secondengine = make_session(seconddb, seconddb_schemas)
     self.firstsession = firstsession
     self.firstengine = firstengine
     self.secondsession = secondsession
     self.secondengine = secondengine
     self.firstmeta = MetaData(bind=firstengine, schema=firstdb_schemas)
     self.secondmeta = MetaData(bind=secondengine, schema=seconddb_schemas)
     self.firstinspector = inspect(firstengine)
     self.secondinspector = inspect(secondengine)
     self.chunk_size = int(chunk_size)
     self.count_only = count_only
Exemple #7
0
 def __init__(self,
              firstdb,
              seconddb,
              chunk_size=10000,
              count_only=False,
              exclude_tables=""):
     firstsession, firstengine = make_session(firstdb)
     secondsession, secondengine = make_session(seconddb)
     self.firstsession = firstsession
     self.firstengine = firstengine
     self.secondsession = secondsession
     self.secondengine = secondengine
     self.firstmeta = MetaData(bind=firstengine)
     self.secondmeta = MetaData(bind=secondengine)
     self.firstinspector = inspect(firstengine)
     self.secondinspector = inspect(secondengine)
     self.chunk_size = int(chunk_size)
     self.count_only = count_only
     self.exclude_tables = exclude_tables.split(',')
Exemple #8
0
 def test_reflect_table_include_columns(self, engine, connection):
     one_row_complex = Table('one_row_complex', MetaData(bind=engine))
     engine.dialect.reflecttable(connection,
                                 one_row_complex,
                                 include_columns=['col_int'],
                                 exclude_columns=[])
     self.assertEqual(len(one_row_complex.c), 1)
     self.assertIsNotNone(one_row_complex.c.col_int)
     self.assertRaises(AttributeError,
                       lambda: one_row_complex.c.col_tinyint)
Exemple #9
0
 def get_table(self, table_name, engine=None, schema=None):
     if not engine:
         engine = self.engine1
     if not schema:
         schema = self.schema
     return Table(table_name,
                  MetaData(bind=engine),
                  schema=schema or None,
                  autoload=True,
                  autoload_with=engine)
Exemple #10
0
def run_with_taskmanager(dsn=None):
    """Example for running PCSE/WOFOST with the task manager.
    
    Runs PyWOFOST for a number of crop types but only in order to 
	simulate phenology. Executing PyWOFOST runs is done through
     the task manager. Depending on the state of the table TSUM
	 in the database the simulations are done for the whole world
	 or at least for a considerable part of it. Output is written 
	 to the database.
    
    Parameters:
    dsn - SQLAlchemy data source name pointing to the database to be used.
    """

    #from pcse.pywofost_ensemble import PyWofostEnsemble
    from pcse.db.pcse import db_input as dbi
    from pcse.taskmanager import TaskManager
    from sqlalchemy.exc import SQLAlchemyError
    from sqlalchemy import engine as sa_engine
    from sqlalchemy.sql.schema import MetaData
    from sqlalchemy import Table
    import socket
    # from sqlalchemy.exceptions import SQLAlchemyError

    # Open database connection and empty output table
    db_engine = sa_engine.create_engine(dsn)
    connection = db_engine.connect()
    metadata = MetaData(db_engine)
    table_tasklist = Table("tasklist", metadata, autoload=True)

    # Initialise task manager
    taskmanager = TaskManager(metadata,
                              connection,
                              dbtype="MySQL",
                              hostname=socket.gethostname())
    # Loop until no tasks are left
    task = taskmanager.get_task()
    while task is not None:
        try:
            print "Running task: %i" % (task["task_id"])
            task_runner(db_engine, task)

            # Set status of current task to 'Finished'
            check_connection(connection)
            taskmanager.set_task_finished(task)

        except SQLAlchemyError, inst:
            print("Database error: %s" % inst)
            # Break because of error in the database connection
            break

        except PCSEError, inst:
            print "Error in PCSE: %s" % inst
            # Set status of current task to 'Error'
            taskmanager.set_task_error(task)
Exemple #11
0
    def test_basic_create_table(self):
        expected_result = re.sub(
            r'\s+', '', "CREATE TABLE testtable (col1 INTEGER)").strip()

        engine = create_engine('redshift+psycopg2://')
        table_model = Table("testtable", MetaData(), Column("col1", Integer))
        ddl_statement = re.sub(
            r'\s+', '',
            str(CreateTable(table_model).compile(engine)).strip())

        self.assertEqual(expected_result, ddl_statement)
Exemple #12
0
    def test_get_table_names(self, engine, conn):
        meta = MetaData()
        meta.reflect(bind=engine)
        print(meta.tables)
        self.assertIn("one_row", meta.tables)
        self.assertIn("one_row_complex", meta.tables)

        insp = sqlalchemy.inspect(engine)
        self.assertIn(
            "many_rows", insp.get_table_names(schema=SCHEMA),
        )
Exemple #13
0
    def run(self):
        # Source reflection
        source_meta = MetaData()
        source_meta.reflect(bind=self.source_engine)
        source_tables = source_meta.tables

        source_table_names = [k for k, v in source_tables.items()]

        # Destination Binding
        destination_meta = MetaData(bind=self.destination_engine)
        for name, table in source_tables.items():
            table.metadata = destination_meta
            if name in self.settings.exclude_data.keys():
                table.__mapper_args__ = {'exclude_properties': self.settings.exclude_data[name]}

        # Drop table for testing purposes
        # destination_meta.drop_all(self.destination_engine)
        for table in source_table_names:
            self.sessions.destination.execute('DROP TABLE {table};'.format(table=table))
            self.sessions.destination.commit()
            print('DROPPED TABLE {table}'.format(table=table))

        # Begin migration
        source_meta.create_all(self.destination_engine)

        source_data = {table: self.sessions.source.query(source_tables[table]).all() for table in source_table_names}

        for table in source_table_names:
            print("Migrating:", table)
            # if table in self.settings.exclude_data.keys():
            #     pprint(source_tables[table].__mapper_args__)
            #     exit(1)
            for row in source_data[table]:
                try:
                    self.sessions.destination.execute(source_tables[table].insert(row))
                except StatementError:
                    print('Bad data in table: ', table, 'row data:\n', row[0], 'Error:', sys.exc_info()[0])
            print('Data for:', table, 'added to the queue..')

        self.sessions.destination.commit()
        print('Migration Complete!')
Exemple #14
0
def dump_connection_info(engine: Engine, fileobj: TextIO = sys.stdout) -> None:
    """
    Dumps some connection info, as an SQL comment. Obscures passwords.

    Args:
        engine: the SQLAlchemy :class:`Engine` to dump metadata information
            from
        fileobj: the file-like object (default ``sys.stdout``) to write
            information to
    """
    meta = MetaData(bind=engine)
    writeline_nl(fileobj, sql_comment(f'Database info: {meta}'))
    def test_to_sql(self, engine, conn):
        # TODO pyathena.error.OperationalError: SYNTAX_ERROR: line 1:305:
        #      Column 'foobar' cannot be resolved.
        #      def _format_bytes(formatter, escaper, val):
        #          return val.decode()
        table_name = "to_sql_{0}".format(str(uuid.uuid4()).replace("-", ""))
        df = pd.DataFrame({
            "col_int": np.int32([1]),
            "col_bigint": np.int64([12345]),
            "col_float": np.float32([1.0]),
            "col_double": np.float64([1.2345]),
            "col_string": ["a"],
            "col_boolean": np.bool_([True]),
            "col_timestamp": [datetime(2020, 1, 1, 0, 0, 0)],
            "col_date": [date(2020, 12, 31)],
            # "col_binary": "foobar".encode(),
        })
        # Explicitly specify column order
        df = df[[
            "col_int",
            "col_bigint",
            "col_float",
            "col_double",
            "col_string",
            "col_boolean",
            "col_timestamp",
            "col_date",
            # "col_binary",
        ]]
        df.to_sql(
            table_name,
            engine,
            schema=SCHEMA,
            index=False,
            if_exists="replace",
            method="multi",
        )

        table = Table(table_name, MetaData(bind=engine), autoload=True)
        self.assertEqual(
            table.select().execute().fetchall(),
            [(
                1,
                12345,
                1.0,
                1.2345,
                "a",
                True,
                datetime(2020, 1, 1, 0, 0, 0),
                date(2020, 12, 31),
                # "foobar".encode(),
            )],
        )
Exemple #16
0
    def test_to_sql(self, engine, conn):
        # TODO Add binary column (After dropping support for Python 2.7)
        table_name = "to_sql_{0}".format(str(uuid.uuid4()).replace("-", ""))
        df = pd.DataFrame(
            {
                "col_int": np.int32([1]),
                "col_bigint": np.int64([12345]),
                "col_float": np.float32([1.0]),
                "col_double": np.float64([1.2345]),
                "col_string": ["a"],
                "col_boolean": np.bool_([True]),
                "col_timestamp": [datetime(2020, 1, 1, 0, 0, 0)],
                "col_date": [date(2020, 12, 31)],
            }
        )
        # Explicitly specify column order
        df = df[
            [
                "col_int",
                "col_bigint",
                "col_float",
                "col_double",
                "col_string",
                "col_boolean",
                "col_timestamp",
                "col_date",
            ]
        ]
        df.to_sql(
            table_name,
            engine,
            schema=SCHEMA,
            index=False,
            if_exists="replace",
            method="multi",
        )

        table = Table(table_name, MetaData(bind=engine), autoload=True)
        self.assertEqual(
            table.select().execute().fetchall(),
            [
                (
                    1,
                    12345,
                    1.0,
                    1.2345,
                    "a",
                    True,
                    datetime(2020, 1, 1, 0, 0, 0),
                    date(2020, 12, 31),
                )
            ],
        )
Exemple #17
0
class Entity(declarative_base()):
    """Abstract entity class."""

    __abstract__ = True

    naming_convention = {
        'pk': 'pk_%(table_name)s',
        'fk': 'fk_%(table_name)s_%(column_0_name)s',
        'ix': 'ix_%(column_0_label)s',
        'uq': 'uq_%(table_name)s_%(column_0_name)s',
    }

    metadata = MetaData(naming_convention=naming_convention)

    def serialize(self, flatten=False):
        """
        Serializes the entity to an ordered mapping.

        Args:
            flatten (bool): Whether it should flatten column names or not

        Returns:
            geodatabr.core.types.OrderedMap: The entity columns/values pairs
        """
        if not flatten:
            return OrderedMap({
                column.name: getattr(self, column.name)
                for column in self.__table__.columns
            })

        def _flatten(entity):
            flattened = OrderedMap()

            for column in entity.__table__.columns:
                if not column.foreign_keys:
                    flattened[entity.__name__ + '_' + column.name] =\
                        getattr(entity, column.name)

            return flattened

        flattened = OrderedMap()

        for column in reversed(list(self.__table__.columns)):
            for foreign_key in column.foreign_keys:
                for relationship in self.__mapper__.relationships:
                    if relationship.table == foreign_key.column.table:
                        flattened.update(
                            _flatten(getattr(self, relationship.key)))

        flattened.update(_flatten(self))

        return flattened
Exemple #18
0
def view(name, schema, metadata, selectable):
    """
    Create a view for the given select. A table is returned which can be
    used to query the view.
    """
    # a temporary MetaData object is used to avoid that this table is actually
    # created
    t = Table(name, MetaData(), schema=schema)

    for c in selectable.c:
        t.append_column(Column(c.name, c.type, primary_key=c.primary_key))

    return t
Exemple #19
0
    def test_to_sql(self, engine, conn):
        table_name = 'to_sql_{0}'.format(str(uuid.uuid4()).replace('-', ''))
        df = pd.DataFrame({'a': [1, 2, 3, 4, 5]})
        df.to_sql(table_name,
                  engine,
                  schema=SCHEMA,
                  index=False,
                  if_exists='replace',
                  method='multi')

        table = Table(table_name, MetaData(bind=engine), autoload=True)
        rows = table.select().execute().fetchall()
        self.assertEqual(sorted(rows), [(1, ), (2, ), (3, ), (4, ), (5, )])
Exemple #20
0
 def __init__(self,
              firstdb,
              seconddb,
              chunk_size=10000,
              count_only=False,
              full_data=False,
              threads=0,
              thread_number=0):
     firstsession, firstengine = make_session(firstdb)
     secondsession, secondengine = make_session(seconddb)
     self.firstsession = firstsession
     self.firstengine = firstengine
     self.secondsession = secondsession
     self.secondengine = secondengine
     self.firstmeta = MetaData(bind=firstengine)
     self.secondmeta = MetaData(bind=secondengine)
     self.firstinspector = inspect(firstengine)
     self.secondinspector = inspect(secondengine)
     self.chunk_size = int(chunk_size)
     self.count_only = count_only
     self.full_data = full_data
     self.threads = threads
     self.thread_number = thread_number
    def reflect_table(self, table_name, bind_key=None):
        with self._reflect_lock:
            if table_name in self._tables:
                return self._tables[table_name]

            engine = self.get_engine(bind_key)
            meta = MetaData(bind=engine)
            meta.reflect(only=[table_name])

            table = meta.tables[table_name]
            table.metadata = None
            self._tables[table_name] = table

            return table
Exemple #22
0
    def test_if_not_exists_create_table(self):
        expected_result = re.sub(
            r'\s+', '',
            "CREATE TABLE IF NOT EXISTS testtable (col1 INTEGER)").strip()

        engine = create_engine('redshift+psycopg2://')
        table_model = Table("testtable", MetaData(), Column("col1", Integer))
        ddl_statement = re.sub(
            r'\s+', '',
            str(
                CreateTable(table_model).compile(
                    engine, compile_kwargs={"if_not_exists": True})).strip())

        self.assertEqual(expected_result, ddl_statement)
Exemple #23
0
def nontest_update_from_main_model_instance():
    app = create_test_app()
    update_aux_db_engine_discovery_map(app,
                                       force_update=True,
                                       aux_db_engine_name_prefix='test_aux_')
    engine = get_aux_helpers_engine(app)
    metadata = MetaData()
    metadata.create_all(engine)
    session = get_aux_helpers_session(app)
    create_tables([TxHelperModel], engine, recreate_if_exists=True)
    session.query(TxHelperModel).all()

    sm = AuxSessionManager(app=app)
    seq_num = 1
    bm_session = sm.get(seq_num)
    bm_engine = sm.get_engine(seq_num)
    create_tables([TxModel], bm_engine, recreate_if_exists=True)

    len_bm_first = len(TxModel.query.with_session(bm_session).all())
    assert len_bm_first == 0
    block = Block(b64decode_hashes=True, from_detailed_dict=d3[0])
    BlockModel.update_from_block(block, session=bm_session)
    assert len(
        BlockModel.query.with_session(bm_session).all()) == len_bm_first + 1

    bm_instance = BlockModel.query.with_session(bm_session).all()[0]

    len_first = len(BlockHelperModel.query.with_session(session).all())
    assert len_first == 0
    BlockHelperModel.update_from_block_model_instance(
        bm_instance,
        session=session,
        block_model_session=bm_session,
        seq_num=seq_num)
    num_of_bm_fields = len(BlockModel.__table__.columns)
    assert len(
        BlockHelperModel.query.with_session(session).filter_by(
            seq_num=seq_num).all()) == len_first + num_of_bm_fields - 1

    BlockHelperModel.query.with_session(session).filter_by(
        seq_num=seq_num, block_id=bm_instance.id).delete()
    BlockHelperModel.update_from_block_model_instance(
        bm_instance,
        session=session,
        block_model_session=bm_session,
        seq_num=seq_num)
    assert len(
        BlockHelperModel.query.with_session(session).filter_by(
            seq_num=seq_num).all()) == len_first + num_of_bm_fields - 1
def connection(sync_engine):
    with sync_engine.connect() as conn:
        metadata = MetaData()
        Table("table", metadata, Column("column1", Integer, primary_key=True))
        Table("table2", metadata, Column("fk_column", ForeignKey("table.column1")))
        if conn.dialect.name != "sqlite":
            conn.execute(CreateSchema("altschema"))
            Table("table3", metadata, Column("fk_column", Integer), schema="altschema")

        metadata.create_all(conn)

        yield conn

        if conn.dialect.name != "sqlite":
            metadata.drop_all(conn)
            conn.execute(DropSchema("altschema"))
Exemple #25
0
def dump_orm_object_as_insert_sql(engine: Engine, obj: object,
                                  fileobj: TextIO) -> None:
    """
    Takes a SQLAlchemy ORM object, and writes ``INSERT`` SQL to replicate it
    to the output file-like object.

    Args:
        engine: SQLAlchemy :class:`Engine`
        obj: SQLAlchemy ORM object to write
        fileobj: file-like object to write to
    """
    # literal_query = make_literal_query_fn(engine.dialect)
    insp = inspect(obj)
    # insp: an InstanceState
    # http://docs.sqlalchemy.org/en/latest/orm/internals.html#sqlalchemy.orm.state.InstanceState  # noqa
    # insp.mapper: a Mapper
    # http://docs.sqlalchemy.org/en/latest/orm/mapping_api.html#sqlalchemy.orm.mapper.Mapper  # noqa

    # Don't do this:
    #   table = insp.mapper.mapped_table
    # Do this instead. The method above gives you fancy data types like list
    # and Arrow on the Python side. We want the bog-standard datatypes drawn
    # from the database itself.
    meta = MetaData(bind=engine)
    table_name = insp.mapper.mapped_table.name
    # log.debug("table_name: {}", table_name)
    table = Table(table_name, meta, autoload=True)
    # log.debug("table: {}", table)

    # NewRecord = quick_mapper(table)
    # columns = table.columns.keys()
    query = select(table.columns)
    # log.debug("query: {}", query)
    for orm_pkcol in insp.mapper.primary_key:
        core_pkcol = table.columns.get(orm_pkcol.name)
        pkval = getattr(obj, orm_pkcol.name)
        query = query.where(core_pkcol == pkval)
    # log.debug("query: {}", query)
    cursor = engine.execute(query)
    row = cursor.fetchone()  # should only be one...
    row_dict = dict(row)
    # log.debug("obj: {}", obj)
    # log.debug("row_dict: {}", row_dict)
    statement = table.insert(values=row_dict)
    # insert_str = literal_query(statement)
    insert_str = get_literal_query(statement, bind=engine)
    writeline_nl(fileobj, insert_str)
Exemple #26
0
def test_vector_greater_than():
    metadata = MetaData()
    table = Table('people', metadata,
                  Column('id', Integer, primary_key=True), 
                  Column('first_name', String),
                  Column('middle_name', String),
                  Column('last_name', String),
                  Column('blood_type', String))
    def random_person(idx):
        first = names.get_first_name()
        last = names.get_last_name()
        middle = random.choice([names.get_first_name, names.get_last_name, lambda: None])()
        blood_type = random.choice(['A', 'A', 'B', 'B', 'O', 'O', 'O', 'O', 'AB'])
        return {'id': idx,
                'first_name': first,
                'middle_name': middle,
                'last_name': last,
                'blood_type': blood_type
                }
    engine = create_engine('sqlite:///:memory:', echo=False)
    metadata.create_all(engine)
    def compare_results(compa, cols, vals):
        vals = tuple(map(none_to_minus_inf, vals))
        res = set([row['id'] for row in engine.execute(select(table.columns).where(compa))])
        all_ = [valmap(none_to_minus_inf, row) for row in engine.execute(select(table.columns))]
        cor = set()
        for row in all_:
            if tuple(row[col.name] for col in cols) > vals:
                cor.add(row['id'])
        assert_equal(res, cor)
    
    for i in range(1000):
        engine.execute(table.insert(random_person(i)))
    
    
    col_tuples = [(table.columns['id'],),
                  (table.columns['blood_type'], table.columns['id']),
                  (table.columns['blood_type'], table.columns['middle_name'], table.columns['id']),
                  (table.columns['blood_type'], table.columns['id'], table.columns['middle_name']),
                  (table.columns['middle_name'], table.columns['blood_type'], table.columns['id']),]
    val_tuples = [(5,),
                  ('AB', 500),
                  ('B', None, 500),
                  ('B', 500, None),
                  (None, 'B', 500)]
    for cols, vals in zip(col_tuples, val_tuples):
        compare_results(vector_greater_than(cols, vals), cols, vals)
 def test_reflect_select(self, engine, conn):
     one_row_complex = Table("one_row_complex",
                             MetaData(bind=engine),
                             autoload=True)
     self.assertEqual(len(one_row_complex.c), 15)
     self.assertIsInstance(one_row_complex.c.col_string, Column)
     rows = one_row_complex.select().execute().fetchall()
     self.assertEqual(len(rows), 1)
     self.assertEqual(
         list(rows[0]),
         [
             True,
             127,
             32767,
             2147483647,
             9223372036854775807,
             0.5,
             0.25,
             "a string",
             datetime(2017, 1, 1, 0, 0, 0),
             date(2017, 1, 2),
             b"123",
             "[1, 2]",
             "{1=2, 3=4}",
             "{a=1, b=2}",
             Decimal("0.1"),
         ],
     )
     self.assertIsInstance(one_row_complex.c.col_boolean.type, BOOLEAN)
     self.assertIsInstance(one_row_complex.c.col_tinyint.type, INTEGER)
     self.assertIsInstance(one_row_complex.c.col_smallint.type, INTEGER)
     self.assertIsInstance(one_row_complex.c.col_int.type, INTEGER)
     self.assertIsInstance(one_row_complex.c.col_bigint.type, BIGINT)
     self.assertIsInstance(one_row_complex.c.col_float.type, FLOAT)
     self.assertIsInstance(one_row_complex.c.col_double.type, FLOAT)
     self.assertIsInstance(one_row_complex.c.col_string.type,
                           type(STRINGTYPE))
     self.assertIsInstance(one_row_complex.c.col_timestamp.type, TIMESTAMP)
     self.assertIsInstance(one_row_complex.c.col_date.type, DATE)
     self.assertIsInstance(one_row_complex.c.col_binary.type, BINARY)
     self.assertIsInstance(one_row_complex.c.col_array.type,
                           type(STRINGTYPE))
     self.assertIsInstance(one_row_complex.c.col_map.type, type(STRINGTYPE))
     self.assertIsInstance(one_row_complex.c.col_struct.type,
                           type(STRINGTYPE))
     self.assertIsInstance(one_row_complex.c.col_decimal.type, DECIMAL)
 def test_reflect_table_include_columns(self, engine, connection):
     one_row_complex = Table('one_row_complex', MetaData(bind=engine))
     version = float(re.search(r'^([\d]+\.[\d]+)\..+', sqlalchemy.__version__).group(1))
     if version <= 1.2:
         engine.dialect.reflecttable(connection, one_row_complex,
                                     include_columns=['col_int'],
                                     exclude_columns=[])
     else:
         # https://docs.sqlalchemy.org/en/13/changelog/changelog_13.html#
         # change-64ac776996da1a5c3e3460b4c0f0b257
         engine.dialect.reflecttable(connection, one_row_complex,
                                     include_columns=['col_int'],
                                     exclude_columns=[],
                                     resolve_fks=True)
     self.assertEqual(len(one_row_complex.c), 1)
     self.assertIsNotNone(one_row_complex.c.col_int)
     self.assertRaises(AttributeError, lambda: one_row_complex.c.col_tinyint)
def athena():
    # from pyathenajdbc import connect
    # conn_str = 'awsathena+jdbc://{}:{}@athena.{}.amazonaws.com:443/{}?s3_staging_dir={}'.format(
    #     current_app.config['S3_KEY'],
    #     current_app.config['S3_SECRET'],
    #     'us-west-2',
    #     'default',
    #     's3://aws-athena-query-results-upw/')
    # conn = connect(conn_str)
    # try:
    #     with conn.cursor() as cursor:
    #         cursor.execute("""
    #             SELECT * FROM one_row
    #             """)
    #         print(cursor.description)
    #         print(cursor.fetchall())
    # finally:
    #     conn.close()
    res = ""
    import contextlib
    from urllib.parse import quote_plus  # PY2: from urllib import quote_plus
    from sqlalchemy.engine import create_engine
    from sqlalchemy.sql.expression import select
    from sqlalchemy.sql.functions import func
    from sqlalchemy.sql.schema import Table, MetaData

    conn_str = 'awsathena+jdbc://{}:{}@athena.{}.amazonaws.com:443/{}?s3_staging_dir={}'.format(
        current_app.config['S3_KEY'], current_app.config['S3_SECRET'],
        'us-west-2', 'uploadwizard', 's3://aws-athena-query-results-upw/')
    engine = create_engine(
        conn_str.format(
            access_key=quote_plus(current_app.config['S3_KEY']),
            secret_key=quote_plus(current_app.config['S3_SECRET']),
            region_name='us-west-2',
            schema_name='uploadwizard',
            s3_staging_dir=quote_plus('s3://aws-athena-query-results-upw/')))
    try:
        with contextlib.closing(engine.connect()) as conn:
            many_rows = Table('file', MetaData(bind=engine), autoload=True)
            rs = select([many_rows.c.manufacturerpartid]).execute()
            res = ""
            for row in rs:
                res += str(row) + "\n<br/>"
    finally:
        engine.dispose()
    return res
Exemple #30
0
    def __init__(self, engine, db=None, sch=None, vws=False):
        self.eng = engine
        self.path = tb.P(self.eng.url.database)
        self.con = self.eng.connect()
        self.ses = sessionmaker()(bind=self.eng)  # ORM style
        self.db = db
        self.sch = sch
        self.vws = vws

        self.insp = None
        self.meta = MetaData()
        self.schema = None
        self.tables = None
        self.views = None
        self.sch_tab = None
        self.sch_vws = None
        self.refresh()