Example #1
0
    def test_mol_from_ctab(self):

        rs = engine.execute(select([ func.is_valid_ctab('xyz') ]))
        self.assertFalse(rs.fetchall()[0][0])

        ctab = '''chiral1.mol
  ChemDraw04200416412D

  5  4  0  0  0  0  0  0  0  0999 V2000
   -0.0141    0.0553    0.0000 C   0  0  0  0  0  0  0  0  0  0  0  0
    0.8109    0.0553    0.0000 F   0  0  0  0  0  0  0  0  0  0  0  0
   -0.4266    0.7697    0.0000 Br  0  0  0  0  0  0  0  0  0  0  0  0
   -0.0141   -0.7697    0.0000 Cl  0  0  0  0  0  0  0  0  0  0  0  0
   -0.8109   -0.1583    0.0000 C   0  0  0  0  0  0  0  0  0  0  0  0
  1  2  1  0
  1  3  1  0
  1  4  1  1
  1  5  1  0
M  END'''

        rs = engine.execute(select([ func.is_valid_ctab(ctab) ]))
        self.assertTrue(rs.fetchall()[0][0])

        rs = engine.execute(select([ func.mol_from_ctab(ctab) ]))
        self.assertIsInstance(rs.fetchall()[0][0], Chem.Mol)
def _queryNewsVolumes(aggStartDatetime, aggStopDatetime):
  """ Query the database for the counts of security releases+headlines for each
  company that were detected during the specified time window.

  :param aggStartDatetime: inclusive start of aggregation interval as
    UTC datetime
  :param aggStopDatetime: non-inclusive upper bound of aggregation interval as
    UTC datetime
  :returns: a sparse sequence of two-tuples: (symbol, count); companies that
    have no detected news in the given aggregation period will be absent from
    the result.
  """
  headlineSel = sql.select(
    [schema.xigniteSecurityHeadline.c.symbol.label("symbol")]
    ).where(
      (schema.xigniteSecurityHeadline.c.discovered_at >= aggStartDatetime) &
      (schema.xigniteSecurityHeadline.c.discovered_at < aggStopDatetime))

  releaseSel = sql.select(
    [schema.xigniteSecurityRelease.c.symbol]
    ).where(
      (schema.xigniteSecurityRelease.c.discovered_at >= aggStartDatetime) &
      (schema.xigniteSecurityRelease.c.discovered_at < aggStopDatetime))

  allNewsUnion = sql.union_all(headlineSel, releaseSel)

  aggSel = sql.select(
    ["symbol", sql.func.count("symbol").label("sym_count")]
    ).select_from(allNewsUnion.alias("union_of_tables")
    ).group_by("symbol")

  return collectorsdb.engineFactory().execute(aggSel).fetchall()
Example #3
0
 def test_with_transaction(self):
     conn = self.engine.connect()
     trans = conn.begin()
     eq_(conn.execute(select([1])).scalar(), 1)
     assert not conn.closed
     self.engine.test_shutdown()
     _assert_invalidated(conn.execute, select([1]))
     assert not conn.closed
     assert conn.invalidated
     assert trans.is_active
     assert_raises_message(
         tsa.exc.StatementError,
         "Can't reconnect until invalid transaction is rolled back",
         conn.execute, select([1]))
     assert trans.is_active
     assert_raises_message(
         tsa.exc.InvalidRequestError,
         "Can't reconnect until invalid transaction is rolled back",
         trans.commit
     )
     assert trans.is_active
     trans.rollback()
     assert not trans.is_active
     assert conn.invalidated
     eq_(conn.execute(select([1])).scalar(), 1)
     assert not conn.invalidated
Example #4
0
    def whereInEquipement(self,fullQueryJoin,criteria):
        sensorObj = list(filter(lambda x:'FK_Sensor'==x['Column'], criteria))[0]
        sensor = sensorObj['Value']

        table = Base.metadata.tables['MonitoredSiteEquipment']
        joinTable = outerjoin(table,Sensor, table.c['FK_Sensor'] == Sensor.ID)

        if sensorObj['Operator'].lower() in ['is','is not'] and sensorObj['Value'].lower() == 'null':
            subSelect = select([table.c['FK_MonitoredSite']]
                ).select_from(joinTable).where(
                and_(MonitoredSite.ID== table.c['FK_MonitoredSite']
                    ,or_(table.c['EndDate'] >= func.now(),table.c['EndDate'] == None)
                        ))
            if sensorObj['Operator'].lower() == 'is':
                fullQueryJoin = fullQueryJoin.where(~exists(subSelect))
            else :
                fullQueryJoin = fullQueryJoin.where(exists(subSelect))
        else :
            subSelect = select([table.c['FK_MonitoredSite']]
                ).select_from(joinTable).where(
                and_(MonitoredSite.ID== table.c['FK_MonitoredSite']
                    ,and_(eval_.eval_binary_expr(Sensor.UnicIdentifier,sensorObj['Operator'],sensor)
                        ,or_(table.c['EndDate'] >= func.now(),table.c['EndDate'] == None))
                        ))
            fullQueryJoin = fullQueryJoin.where(exists(subSelect))
        return fullQueryJoin
Example #5
0
    def test_basic(self):
        for threadlocal in False, True:
            engine = engines.reconnecting_engine(
                options={'pool_threadlocal': threadlocal})

            conn = engine.contextual_connect()
            eq_(conn.execute(select([1])).scalar(), 1)
            conn.close()

            # set the pool recycle down to 1.
            # we aren't doing this inline with the
            # engine create since cx_oracle takes way
            # too long to create the 1st connection and don't
            # want to build a huge delay into this test.

            engine.pool._recycle = 1

            # kill the DB connection
            engine.test_shutdown()

            # wait until past the recycle period
            time.sleep(2)

            # can connect, no exception
            conn = engine.contextual_connect()
            eq_(conn.execute(select([1])).scalar(), 1)
            conn.close()
Example #6
0
    def test_conn_reusable(self):
        conn = self.db.connect()

        conn.execute(select([1]))

        eq_(
            self.dbapi.connect.mock_calls,
            [self.mock_connect]
        )

        self.dbapi.shutdown()

        assert_raises(
            tsa.exc.DBAPIError,
            conn.execute, select([1])
        )

        assert not conn.closed
        assert conn.invalidated

        eq_(
            [c.close.mock_calls for c in self.dbapi.connections],
            [[call()]]
        )

        # test reconnects
        conn.execute(select([1]))
        assert not conn.invalidated

        eq_(
            [c.close.mock_calls for c in self.dbapi.connections],
            [[call()], []]
        )
Example #7
0
    def test_reconnect(self):
        conn = self.engine.connect()

        eq_(conn.execute(select([1])).scalar(), 1)
        assert not conn.closed

        self.engine.test_shutdown()

        _assert_invalidated(conn.execute, select([1]))

        assert not conn.closed
        assert conn.invalidated

        assert conn.invalidated
        eq_(conn.execute(select([1])).scalar(), 1)
        assert not conn.invalidated

        # one more time
        self.engine.test_shutdown()
        _assert_invalidated(conn.execute, select([1]))

        assert conn.invalidated
        eq_(conn.execute(select([1])).scalar(), 1)
        assert not conn.invalidated

        conn.close()
Example #8
0
def dat_crime(fpath=None):
    # Step Zero: Create dat_crime table
    raw_crime(fpath=fpath)
    dedupe_crime()
    src_crime()
    src_crime_table = Table('src_chicago_crimes_all', Base.metadata, 
        autoload=True, autoload_with=engine, extend_existing=True)
    dat_crime_table = crime_table('dat_chicago_crimes_all', Base.metadata)
    dat_crime_table.append_column(Column('chicago_crimes_all_row_id', Integer, primary_key=True))
    dat_crime_table.append_column(Column('start_date', TIMESTAMP, server_default=text('CURRENT_TIMESTAMP')))
    dat_crime_table.append_column(Column('end_date', TIMESTAMP, server_default=text('NULL')))
    dat_crime_table.append_column(Column('current_flag', Boolean, server_default=text('TRUE')))
    dat_crime_table.append_constraint(UniqueConstraint('id', 'start_date'))
    dat_crime_table.create(bind=engine, checkfirst=True)
    new_cols = ['start_date', 'end_date', 'current_flag', 'chicago_crimes_all_row_id']
    dat_ins = dat_crime_table.insert()\
        .from_select(
            [c for c in dat_crime_table.columns.keys() if c not in new_cols],
            select([c for c in src_crime_table.columns])
        )
    conn = engine.contextual_connect()
    res = conn.execute(dat_ins)
    cols = crime_master_cols(dat_crime_table)
    master_ins = MasterTable.insert()\
        .from_select(
            [c for c in MasterTable.columns.keys() if c != 'master_row_id'],
            select(cols)\
                .select_from(dat_crime_table)
        )
    conn = engine.contextual_connect()
    res = conn.execute(master_ins)
    return 'DAT crime created'
        def verify_thd(conn):
            metadata = sa.MetaData()
            metadata.bind = conn

            builders = sautils.Table('builders', metadata, autoload=True)
            q = sa.select([builders])
            num_rows = 0
            for row in conn.execute(q):
                self.assertEqual(
                    row, (1, u'bname', u'description', u'dontcare'))
                num_rows += 1
            self.assertEqual(num_rows, 1)

            tags = sautils.Table('tags', metadata, autoload=True)
            builders_tags = sautils.Table('builders_tags', metadata,
                                          autoload=True)

            q = sa.select([tags.c.id, tags.c.name,
                           tags.c.name_hash])
            self.assertEqual(conn.execute(q).fetchall(), [])

            q = sa.select([builders_tags.c.id,
                           builders_tags.c.builderid,
                           builders_tags.c.tagid])
            self.assertEqual(conn.execute(q).fetchall(), [])
Example #10
0
 def test_union(self):
     t1 = table(
         't1', column('col1'), column('col2'),
         column('col3'), column('col4'))
     t2 = table(
         't2', column('col1'), column('col2'),
         column('col3'), column('col4'))
     s1, s2 = select(
         [t1.c.col3.label('col3'), t1.c.col4.label('col4')],
         t1.c.col2.in_(['t1col2r1', 't1col2r2'])), \
         select([t2.c.col3.label('col3'), t2.c.col4.label('col4')],
                t2.c.col2.in_(['t2col2r2', 't2col2r3']))
     u = union(s1, s2, order_by=['col3', 'col4'])
     self.assert_compile(u,
                         'SELECT t1.col3 AS col3, t1.col4 AS col4 '
                         'FROM t1 WHERE t1.col2 IN (:col2_1, '
                         ':col2_2) UNION SELECT t2.col3 AS col3, '
                         't2.col4 AS col4 FROM t2 WHERE t2.col2 IN '
                         '(:col2_3, :col2_4) ORDER BY col3, col4')
     self.assert_compile(u.alias('bar').select(),
                         'SELECT bar.col3, bar.col4 FROM (SELECT '
                         't1.col3 AS col3, t1.col4 AS col4 FROM t1 '
                         'WHERE t1.col2 IN (:col2_1, :col2_2) UNION '
                         'SELECT t2.col3 AS col3, t2.col4 AS col4 '
                         'FROM t2 WHERE t2.col2 IN (:col2_3, '
                         ':col2_4)) AS bar')
Example #11
0
    def test_limit_offset_with_correlated_order_by(self):
        t1 = table('t1', column('x', Integer), column('y', Integer))
        t2 = table('t2', column('x', Integer), column('y', Integer))

        order_by = select([t2.c.y]).where(t1.c.x == t2.c.x).as_scalar()
        s = select([t1]).where(t1.c.x == 5).order_by(order_by) \
            .limit(10).offset(20)

        self.assert_compile(
            s,
            "SELECT anon_1.x, anon_1.y "
            "FROM (SELECT t1.x AS x, t1.y AS y, "
            "ROW_NUMBER() OVER (ORDER BY "
            "(SELECT t2.y FROM t2 WHERE t1.x = t2.x)"
            ") AS mssql_rn "
            "FROM t1 "
            "WHERE t1.x = :x_1) AS anon_1 "
            "WHERE mssql_rn > :param_1 AND mssql_rn <= :param_2 + :param_1",
            checkparams={'param_1': 20, 'param_2': 10, 'x_1': 5}
        )

        c = s.compile(dialect=mssql.MSDialect())
        eq_(len(c._result_columns), 2)
        assert t1.c.x in set(c._create_result_map()['x'][1])
        assert t1.c.y in set(c._create_result_map()['y'][1])
Example #12
0
def _propagate_new_wf_comments(cycle_task_ids):
  """Special handler for comment propagation."""
  if not cycle_task_ids:
    return
  acl_table = all_models.AccessControlList.__table__.alias("original_acl")
  acr_table = all_models.AccessControlRole.__table__.alias("original_acr")

  ct_propagated_roles = {
      "Admin Mapped",
      "Workflow Member Mapped",
  }

  propagation_acr_ids = sa.select([acr_table.c.id]).where(
      acr_table.c.name.in_(ct_propagated_roles)
  )

  cycle_task_type = all_models.CycleTaskGroupObjectTask.__name__

  cycle_task_acl_ids = sa.select([acl_table.c.id]).where(
      sa.and_(
          acl_table.c.object_id.in_(cycle_task_ids),
          acl_table.c.object_type == cycle_task_type,
          acl_table.c.ac_role_id.in_(propagation_acr_ids),
      )
  )

  _propagate_to_cte(cycle_task_acl_ids)
Example #13
0
    def test_strict_binds(self):
        """test the 'strict' compiler binds."""

        from sqlalchemy.dialects.mssql.base import MSSQLStrictCompiler
        mxodbc_dialect = mxodbc.dialect()
        mxodbc_dialect.statement_compiler = MSSQLStrictCompiler

        t = table('sometable', column('foo'))

        for expr, compile in [
            (
                select([literal("x"), literal("y")]),
                "SELECT 'x' AS anon_1, 'y' AS anon_2",
            ),
            (
                select([t]).where(t.c.foo.in_(['x', 'y', 'z'])),
                "SELECT sometable.foo FROM sometable WHERE sometable.foo "
                "IN ('x', 'y', 'z')",
            ),
            (
                t.c.foo.in_([None]),
                "sometable.foo IN (NULL)"
            )
        ]:
            self.assert_compile(expr, compile, dialect=mxodbc_dialect)
Example #14
0
    def verify_thd(self, conn):
        "verify the contents of the db - run in a thread"
        # note that this will all change if we re-create the tarball!
        model = self.db.model

        r = conn.execute(
            sa.select([model.changes], order_by=model.changes.c.changeid))
        ch = r.fetchone()
        self.failUnlessEqual(ch.changeid, 1)
        self.failUnlessEqual(ch.author, u'the snowman <\N{SNOWMAN}@norpole.net>')
        self.failUnlessEqual(ch.comments, u'shooting star or \N{COMET}?')
        self.failUnlessEqual(ch.revision, u'\N{BLACK STAR}-devel')
        self.failUnlessEqual(ch.branch, u'\N{COMET}')
        ch = r.fetchone()
        self.failUnlessEqual(ch.changeid, 2)
        self.failUnlessEqual(ch.author, u"dustin <*****@*****.**>")
        self.failUnlessEqual(ch.comments, u'on-branch change')
        self.failUnlessEqual(ch.revision, u'1234')
        self.failUnlessEqual(ch.branch, u'') # arguably a bug - should be None?

        r = conn.execute(
            sa.select([model.change_files]))
        # use a set to avoid depending on db collation
        filenames = set([ row.filename for row in r ])
        expected = set([
            u'boring/path',
            u'normal/path',
            u'\N{BLACK STAR}/funny_chars/in/a/path',
        ])
        self.failUnlessEqual(filenames, expected)
Example #15
0
    def verify_thd(self, conn):
        "partially verify the contents of the db - run in a thread"
        model = self.db.model

        # this is a big db, so we only spot-check things -- hopefully any errors
        # will occur on the import
        r = conn.execute(
            sa.select([model.changes],
            whereclause=model.changes.c.changeid == 70))
        ch = r.fetchone()
        self.failUnlessEqual(ch.changeid, 70)
        self.failUnlessEqual(ch.author, u'Jakub Vysoky <*****@*****.**>')
        self.failUnlessEqual(ch.comments, u'some failing tests in check_downgrade and metapackage_version')
        self.failUnlessEqual(ch.revision, u'2ce0c33b7e10cce98e8d9c5b734b8c133ee4d320')
        self.failUnlessEqual(ch.branch, u'master')

        r = conn.execute(
            sa.select([model.change_files.c.filename],
            whereclause=model.change_files.c.changeid == 70))
        self.assertEqual(r.scalar(), 'tests/test_debian.py')

        r = conn.execute(
            sa.select([model.changes],
            whereclause=model.changes.c.changeid == 77))
        ch = r.fetchone()
        self.failUnlessEqual(ch.changeid, 77)
        self.failUnlessEqual(ch.author, u'BuildBot')
        self.failUnlessEqual(ch.comments, u'Dependency changed, sending dummy commit')
        self.failUnlessEqual(ch.revision, u'HEAD')
        self.failUnlessEqual(ch.branch, u'master')

        r = conn.execute(
            sa.select([model.change_files.c.filename],
            whereclause=model.change_files.c.changeid == 77))
        self.assertEqual(r.scalar(), 'CHANGELOG')
Example #16
0
def merge_data(args):
    engine = create_engine(args.postgres)
    Base.metadata.bind = engine
    Base.metadata.create_all(bind=engine)
    DBSession = sessionmaker()
    DBSession.bind = engine

    s = DBSession()
    ctes = []
    raw_data = RawData.__table__
    for filename in args.filenames:
        cte = sa.select([raw_data.c.data]).where(
            sa.and_(
                raw_data.c.file_name == filename,
                raw_data.c.connector == args.connector,
                raw_data.c.version == args.connector_version
            )
        ).cte(filename)
        ctes.append(cte)
    selected_cols = ctes[0].c.data
    for cte in ctes[1:]:
        selected_cols = selected_cols + cte.c.data
    select_from = ctes[0].join(
        ctes[1], ctes[0].c.data['Country_Code'] == ctes[1].c.data['Country_Code']
    )
    merge_stmt = sa.select([selected_cols]).select_from(select_from)
    rs = s.execute(merge_stmt)
    for rst in rs:
        merged_data = MergedData(
            data=rst[0], connector=args.connector,
            version=args.connector_version
        )
        s.add(merged_data)
    s.commit()
    s.close()
Example #17
0
    def execute(self, metadata, connection, filter_values):
        try:
            table = metadata.tables[self.table_name]
        except KeyError:
            raise TableNotFoundException("Unable to query table, table not found: %s" % self.table_name)

        asha_table = self.get_asha_table(metadata)

        max_date_query = sqlalchemy.select([
            sqlalchemy.func.max(asha_table.c.date).label('date'),
            asha_table.c.case_id.label('case_id')
        ])

        if self.filters:
            for filter in self.filters:
                max_date_query.append_whereclause(filter.build_expression(table))

        max_date_query.append_group_by(
            asha_table.c.case_id
        )

        max_date_subquery = sqlalchemy.alias(max_date_query, 'max_date')

        checklist_query = sqlalchemy.select()
        for column in self.columns:
            checklist_query.append_column(column.build_column(asha_table))

        checklist_query = checklist_query.where(
            asha_table.c.case_id == max_date_subquery.c.case_id
        ).where(
            asha_table.c.date == max_date_subquery.c.date
        )

        return connection.execute(checklist_query, **filter_values).fetchall()
Example #18
0
    def test_functions_with_cols(self):
        users = table(
            'users',
            column('id'),
            column('name'),
            column('fullname'))
        calculate = select([column('q'), column('z'), column('r')], from_obj=[
                           func.calculate(
                               bindparam('x', None), bindparam('y', None)
                           )])

        self.assert_compile(select([users], users.c.id > calculate.c.z),
                            "SELECT users.id, users.name, users.fullname "
                            "FROM users, (SELECT q, z, r "
                            "FROM calculate(:x, :y)) "
                            "WHERE users.id > z"
                            )

        s = select([users], users.c.id.between(
            calculate.alias('c1').unique_params(x=17, y=45).c.z,
            calculate.alias('c2').unique_params(x=5, y=12).c.z))

        self.assert_compile(
            s, "SELECT users.id, users.name, users.fullname "
            "FROM users, (SELECT q, z, r "
            "FROM calculate(:x_1, :y_1)) AS c1, (SELECT q, z, r "
            "FROM calculate(:x_2, :y_2)) AS c2 "
            "WHERE users.id BETWEEN c1.z AND c2.z", checkparams={
                'y_1': 45, 'x_1': 17, 'y_2': 12, 'x_2': 5})
Example #19
0
    def test_joins(self):
        region = self.con.table("tpch_region")
        nation = self.con.table("tpch_nation")

        rt = self._to_sqla(region).alias("t0")
        nt = self._to_sqla(nation).alias("t1")

        ipred = region.r_regionkey == nation.n_regionkey
        spred = rt.c.r_regionkey == nt.c.n_regionkey

        fully_mat_joins = [
            (region.inner_join(nation, ipred), rt.join(nt, spred)),
            (region.left_join(nation, ipred), rt.join(nt, spred, isouter=True)),
            (region.outer_join(nation, ipred), rt.outerjoin(nt, spred)),
        ]
        for ibis_joined, joined_sqla in fully_mat_joins:
            expected = sa.select(["*"]).select_from(joined_sqla)
            self._compare_sqla(ibis_joined, expected)

        subselect_joins = [
            (region.inner_join(nation, ipred).projection(nation), rt.join(nt, spred)),
            (region.left_join(nation, ipred).projection(nation), rt.join(nt, spred, isouter=True)),
            (region.outer_join(nation, ipred).projection(nation), rt.outerjoin(nt, spred)),
        ]
        for ibis_joined, joined_sqla in subselect_joins:
            expected = sa.select([nt]).select_from(joined_sqla)
            self._compare_sqla(ibis_joined, expected)
Example #20
0
    def _index_query(self, obj):
        """
        Returns the query needed for fetching the index of this record relative
        to version history.
        """
        alias = sa.orm.aliased(obj)

        subquery = (
            sa.select([sa.func.count('1')], from_obj=[alias.__table__])
            .where(
                getattr(alias, tx_column_name(obj))
                <
                getattr(obj, tx_column_name(obj))
            )
            .correlate(alias.__table__)
            .label('position')
        )
        query = (
            sa.select([subquery], from_obj=[obj.__table__])
            .where(sa.and_(*self._pk_correlation_condition(obj, False)))
            .order_by(
                getattr(obj.__class__, tx_column_name(obj))
            )
        )
        return query
Example #21
0
        def thddeleteOldLogs(conn):
            model = self.db.model
            res = conn.execute(sa.select([sa.func.count(model.logchunks.c.content)]))
            count1 = res.fetchone()[0]
            res.close()

            # update log types older than timestamps
            # we do it first to avoid having UI discrepancy
            res = conn.execute(
                model.logs.update()
                .where(model.logs.c.stepid.in_(
                    sa.select([model.steps.c.id])
                    .where(model.steps.c.started_at < older_than_timestamp)))
                .values(type='d')
            )
            res.close()

            # query all logs with type 'd' and delete their chunks.
            q = sa.select([model.logs.c.id])
            q = q.select_from(model.logs)
            q = q.where(model.logs.c.type == 'd')

            # delete their logchunks
            res = conn.execute(
                model.logchunks.delete()
                .where(model.logchunks.c.logid.in_(q))
            )
            res.close()
            res = conn.execute(sa.select([sa.func.count(model.logchunks.c.content)]))
            count2 = res.fetchone()[0]
            res.close()
            return count1 - count2
Example #22
0
    def test_conn_reusable(self):
        conn = db.connect()

        conn.execute(select([1]))

        assert len(dbapi.connections) == 1

        dbapi.shutdown()

        # raises error
        try:
            conn.execute(select([1]))
            assert False
        except tsa.exc.DBAPIError:
            pass

        assert not conn.closed
        assert conn.invalidated

        # ensure all connections closed (pool was recycled)
        gc_collect()
        assert len(dbapi.connections) == 0

        # test reconnects
        conn.execute(select([1]))
        assert not conn.invalidated
        assert len(dbapi.connections) == 1
        def verify_thd(conn):
            metadata = sa.MetaData()
            metadata.bind = conn

            buildworkers = sa.Table('buildworkers',
                                   metadata, autoload=True)
            configured_buildworkers = sa.Table('configured_buildworkers',
                                              metadata, autoload=True)
            connected_buildworkers = sa.Table('connected_buildworkers',
                                             metadata, autoload=True)

            q = sa.select([buildworkers])
            self.assertEqual(map(dict, conn.execute(q).fetchall()), [
                # (the info does not get de-JSON'd due to use of autoload)
                {'id': 29, 'name': u'windows', 'info': '{}'}])

            # check that the name column was resized
            self.assertEqual(buildworkers.c.name.type.length, 50)

            q = sa.select([configured_buildworkers.c.buildermasterid,
                           configured_buildworkers.c.buildworkerid])
            self.assertEqual(conn.execute(q).fetchall(), [])

            q = sa.select([connected_buildworkers.c.masterid,
                           connected_buildworkers.c.buildworkerid])
            self.assertEqual(conn.execute(q).fetchall(), [])
Example #24
0
    def test_insert_from_select(self):
        table = self.tables.manual_pk
        config.db.execute(
                table.insert(),
                [
                    dict(id=1, data="data1"),
                    dict(id=2, data="data2"),
                    dict(id=3, data="data3"),
                ]
        )


        config.db.execute(
                table.insert(inline=True).
                    from_select(
                        ("id", "data",), select([table.c.id + 5, table.c.data]).where(
                                table.c.data.in_(["data2", "data3"]))
                    ),
        )

        eq_(
            config.db.execute(
                select([table.c.data]).order_by(table.c.data)
            ).fetchall(),
            [("data1", ), ("data2", ), ("data2", ),
                ("data3", ), ("data3", )]
        )
Example #25
0
    def test_insert_from_select_cte_follows_insert_one(self):
        dialect = default.DefaultDialect()
        dialect.cte_follows_insert = True

        table1 = self.tables.mytable

        cte = select([table1.c.name]).where(table1.c.name == "bar").cte()

        sel = select([table1.c.myid, table1.c.name]).where(
            table1.c.name == cte.c.name
        )

        ins = self.tables.myothertable.insert().from_select(
            ("otherid", "othername"), sel
        )
        self.assert_compile(
            ins,
            "INSERT INTO myothertable (otherid, othername) "
            "WITH anon_1 AS "
            "(SELECT mytable.name AS name FROM mytable "
            "WHERE mytable.name = :name_1) "
            "SELECT mytable.myid, mytable.name FROM mytable, anon_1 "
            "WHERE mytable.name = anon_1.name",
            checkparams={"name_1": "bar"},
            dialect=dialect,
        )
Example #26
0
    def ping_connection(self, connection, branch):
        if branch:
            # "branch" refers to a sub-connection of a connection,
            # we don't want to bother pinging on these.
            return

        try:
            # run a SELECT 1.   use a core select() so that
            # the SELECT of a scalar value without a table is
            # appropriately formatted for the backend
            connection.scalar(select([1]))
        except exc.DBAPIError as err:
            # catch SQLAlchemy's DBAPIError, which is a wrapper
            # for the DBAPI's exception.  It includes a .connection_invalidated
            # attribute which specifies if this connection is a "disconnect"
            # condition, which is based on inspection of the original exception
            # by the dialect in use.
            if err.connection_invalidated:
                # run the same SELECT again - the connection will re-validate
                # itself and establish a new connection.  The disconnect detection
                # here also causes the whole connection pool to be invalidated
                # so that all stale connections are discarded.
                connection.scalar(select([1]))
            else:
                raise
Example #27
0
def store_initial_soil_water(engine, fname_HDFstore, args):
    """retrieves the INITIAL_SOIL_WATER table from a CGMS12 database.

    if the --crop_no option is used only the records for the given crop_no
    will be retrieved.
    """
    meta = MetaData(engine)
    tbl_isw = Table("initial_soil_water", meta, autoload=True)

    # retrieve distinct crop types from DB table
    if args.crop_no is not None:
        s = sa.select([tbl_isw.c.crop_no]).distinct()
        crops = [row[0] for row in s.execute()]
        if args.crop_no not in crops:
            print("Crop ID specified with --cropno (%s) not found in INITIAL_SOIL_WATER table! Returning..." % args.crop_no)
            sys.exit()

    # Select distinct years to iterate over
    s = sa.select([tbl_isw.c.year]).distinct()
    years = s.execute()
    dataset_name = "/initial_soil_water"
    with pd.io.pytables.HDFStore(fname_HDFstore) as store:
        for yr, in sorted(years):
            if args.crop_no:
                s = tbl_isw.select().where(sa.and_(tbl_isw.c.year == yr,
                                                   tbl_isw.c.crop_no == args.crop_no))
                print("Storing initial_soil_water for crop %i and year %i" % (args.crop_no, yr))
            else:
                s = tbl_isw.select().where(tbl_isw.c.year == yr)
                print("Storing initial_soil_water for year %i" % yr)
            df_isw = pd.read_sql(s, engine)
            if dataset_name in store:
                store.append(dataset_name, df_isw, data_columns=["grid_no", "stu_no", "crop_no", "year"])
            else:
                store.put(dataset_name, df_isw, format="table", data_columns=["grid_no", "stu_no", "crop_no", "year"])
Example #28
0
    def test_insert_from_select(self):
        table = self.tables.autoinc_pk
        config.db.execute(
                table.insert(),
                [
                    dict(data="data1"),
                    dict(data="data2"),
                    dict(data="data3"),
                ]
        )


        config.db.execute(
                table.insert().
                    from_select(
                        ("data",), select([table.c.data]).where(
                                table.c.data.in_(["data2", "data3"]))
                    ),
        )

        eq_(
            config.db.execute(
                select([table.c.data]).order_by(table.c.data)
            ).fetchall(),
            [("data1", ), ("data2", ), ("data2", ),
                ("data3", ), ("data3", )]
        )
Example #29
0
    def apply_default_value(self, column):
        if column.default:
            execute = self.table.migration.conn.execute
            val = column.default.arg
            table = self.table.migration.metadata.tables[self.table.name]
            table.append_column(column)
            cname = getattr(table.c, column.name)
            if column.default.is_callable:
                Table = self.table.migration.metadata.tables['system_model']
                Column = self.table.migration.metadata.tables['system_column']
                j1 = join(Table, Column, Table.c.name == Column.c.model)
                query = select([Column.c.name]).select_from(j1)
                query = query.where(Column.c.primary_key.is_(True))
                query = query.where(Table.c.table == self.table.name)
                columns = [x[0] for x in execute(query).fetchall()]

                query = select([func.count()]).select_from(table)
                query = query.where(cname.is_(None))
                nb_row = self.table.migration.conn.execute(query).fetchone()[0]
                for offset in range(nb_row):
                    query = select(columns).select_from(table)
                    query = query.where(cname.is_(None)).limit(1)
                    res = execute(query).fetchone()
                    where = and_(
                        *[getattr(table.c, x) == res[x] for x in columns])
                    query = update(table).where(where).values(
                        {cname: val(None)})
                    execute(query)

            else:
                query = update(table).where(cname.is_(None)).values(
                    {cname: val})
                execute(query)
Example #30
0
def upgrade(active_plugins=None, options=None):
    bind = op.get_bind()

    branches = list(bind.execute(
        sa.select(columns=['id', 'name', 'project_id'],
                  from_obj=sa.Table('branches', sa.MetaData()))))

    projects = list(bind.execute(
        sa.select(columns=['id'], from_obj=sa.Table('projects',
                                                    sa.MetaData()))))
    if len(projects) > 0:
        branch_dict = {}

        for branch in branches:
            branch_dict[(branch['project_id'], branch['name'])] = branch['id']

        tasks_table = table(
            'tasks',
            sa.Column('project_id', sa.Integer(), nullable=True),
            sa.Column('branch_id', sa.Integer(), nullable=True),
        )

        bind.execute(tasks_table.update().
                     where(tasks_table.c.project_id.is_(None)).
                     values(project_id=projects[0].id))

        for project in projects:
            bind.execute(
                tasks_table.update().
                where(tasks_table.c.project_id == project['id']).
                values(branch_id=branch_dict[(project['id'], "master")])
            )
Example #31
0
    sqlalchemy_opentracing.init_tracing(tracer)
    sqlalchemy_opentracing.register_engine(engine)

    span = tracer.start_span('create sample')

    # 1. Create a table
    metadata = MetaData()
    users = Table(
        'users',
        metadata,
        Column('id', Integer, primary_key=True),
        Column('name', String),
    )
    creat = CreateTable(users)
    sqlalchemy_opentracing.set_parent_span(creat, span)
    conn.execute(creat)

    # 2. Insert a single value.
    ins = users.insert().values(name='John Doe', id=1)
    sqlalchemy_opentracing.set_parent_span(ins, span)
    conn.execute(ins)

    # 3. Select the new value.
    sel = select([users])
    sqlalchemy_opentracing.set_parent_span(sel, span)
    print conn.execute(sel).fetchone()

    span.finish()
    tracer.flush()
Example #32
0
def get_user_by_uid(uid):
    query = select([users.c.id, users.c.uid, users.c.name]) \
        .where(users.c.uid == uid)
    result = conn.execute(query)
    return result.fetchone()
Example #33
0
    def __init__(self, callback):
        super(GeographyMenu, self).__init__()
        geography_table = Geography.__table__
        geos = select([
            geography_table.c.id, geography_table.c.name,
            geography_table.c.parent_id
        ]).execute().fetchall()
        geos_hash = {}
        # TODO: i think the geo_hash should be calculated in an idle
        # function so that starting the editor isn't delayed while the
        # hash is being built
        for geo_id, name, parent_id in geos:
            try:
                geos_hash[parent_id].append((geo_id, name))
            except KeyError:
                geos_hash[parent_id] = [(geo_id, name)]

        for kids in geos_hash.values():
            kids.sort(key=itemgetter(1))  # sort by name

        def get_kids(pid):
            try:
                return geos_hash[pid]
            except KeyError:
                return []

        def has_kids(pid):
            try:
                return len(geos_hash[pid]) > 0
            except KeyError:
                return False

        def build_menu(geo_id, name):
            item = gtk.MenuItem(name)
            if not has_kids(geo_id):
                if item.get_submenu() is None:
                    item.connect('activate', callback, geo_id)
                    # self.view.connect(item, 'activate',
                    #                   self.on_activate_add_menu_item, geo_id)
                return item

            kids_added = False
            submenu = gtk.Menu()
            # removes two levels of kids with the same name, there must be a
            # better way to do this but i got tired of thinking about it
            kids = get_kids(geo_id)
            if len(kids) > 0:
                kids_added = True
            for kid_id, kid_name in kids:  # get_kids(geo_id):
                submenu.append(build_menu(kid_id, kid_name))

            if kids_added:
                sel_item = gtk.MenuItem(name)
                submenu.insert(sel_item, 0)
                submenu.insert(gtk.SeparatorMenuItem(), 1)
                item.set_submenu(submenu)
                #self.view.connect(sel_item, 'activate',callback, geo_id)
                sel_item.connect('activate', callback, geo_id)
            else:
                item.connect('activate', callback, geo_id)
            return item

        def populate():
            """
            add geography value to the menu, any top level items that don't
            have any kids are appended to the bottom of the menu
            """
            if not geos_hash:
                # we would get here if the Geography menu is populate,
                # usually during a unit test
                return
            no_kids = []
            for geo_id, geo_name in geos_hash[None]:
                if geo_id not in geos_hash.keys():
                    no_kids.append((geo_id, geo_name))
                else:
                    self.append(build_menu(geo_id, geo_name))

            for geo_id, geo_name in sorted(no_kids):
                self.append(build_menu(geo_id, geo_name))

            self.show_all()

        import gobject
        gobject.idle_add(populate)
Example #34
0
 def get_total_row_count(self, db: Session) -> int:
     """Return the total number of rows."""
     return len(db.execute(select(self.model)).all())
Example #35
0
    async def mutate(cls, root, info, email, props):
        async with info.context['dbpool'].acquire() as conn, conn.begin():
            username = props.username if props.username else email
            data = {
                'username': username,
                'email': email,
                'password': props.password,
                'need_password_change': props.need_password_change,
                'full_name': props.full_name,
                'description': props.description,
                'is_active': props.is_active,
                'domain_name': props.domain_name,
                'role': UserRole(props.role),
            }
            try:
                query = (users.insert().values(data))
                result = await conn.execute(query)
                if result.rowcount > 0:
                    # Read the created user data from DB.
                    checkq = users.select().where(users.c.email == email)
                    result = await conn.execute(checkq)
                    o = User.from_row(await result.first())

                    # Create user's first access_key and secret_key.
                    from .keypair import generate_keypair, keypairs
                    ak, sk = generate_keypair()
                    is_admin = True if data['role'] in [
                        UserRole.SUPERADMIN, UserRole.ADMIN
                    ] else False
                    kp_data = {
                        'user_id': email,
                        'access_key': ak,
                        'secret_key': sk,
                        'is_active': True,
                        'is_admin': is_admin,
                        'resource_policy': 'default',
                        'concurrency_used': 0,
                        'rate_limit': 1000,
                        'num_queries': 0,
                        'user': o.uuid,
                    }
                    query = (keypairs.insert().values(kp_data))
                    await conn.execute(query)

                    # Add user to groups if group_ids parameter is provided.
                    from .group import association_groups_users, groups
                    if props.group_ids:
                        query = (sa.select([
                            groups.c.id
                        ]).select_from(groups).where(
                            groups.c.domain_name == props.domain_name).where(
                                groups.c.id.in_(props.group_ids)))
                        result = await conn.execute(query)
                        grps = await result.fetchall()
                        if grps:
                            values = [{
                                'user_id': o.uuid,
                                'group_id': grp.id
                            } for grp in grps]
                            query = association_groups_users.insert().values(
                                values)
                            await conn.execute(query)
                    return cls(ok=True, msg='success', user=o)
                else:
                    return cls(ok=False,
                               msg='failed to create user',
                               user=None)
            except (pg.IntegrityError, sa.exc.IntegrityError) as e:
                return cls(ok=False, msg=f'integrity error: {e}', user=None)
            except (asyncio.CancelledError, asyncio.TimeoutError):
                raise
            except Exception as e:
                return cls(ok=False, msg=f'unexpected error: {e}', user=None)
Example #36
0
 def one_by_unique_key(self, db: Session, unique_key_name: str, unique_key_value: Any) -> ModelType:
     """Return exactly one row specified by unique_key or raise an exception."""
     statement = select(self.model).where(getattr(self.model, unique_key_name) == unique_key_value)
     return db.execute(statement).one()[0]
Example #37
0
from sqlalchemy import create_engine, Table, MetaData
from sqlalchemy import select, or_, and_, not_, desc
from sqlalchemy import func  #to use .sum() and .count() don't import those directly, python conflicts

#connect to DB
engine = create_engine("sqlite:///census.sqlite")
connection = engine.connect()

#set up metadata
metadata = MetaData()

#get the table (there is only one in census.sqlite)
census = Table('census', metadata, autoload=True, autoload_with=engine)

#you can use .distinct() which is an SQL command, to only grab distinct values
stmt = select([census.columns.state.distinct()])
stmt = stmt.order_by(census.columns.state)
results = connection.execute(stmt).fetchall()
print("All States:" + '*' * 77 + "\n", results)

#there are some operations you can do in SQL that are more efficient than doing them in python
#examples: Count, Sum
#these two are called aggregation functions, cause they collapse multiple records into one
#to use it in an SQLAlchemy select query, do this
stmt = select([func.sum(census.columns.pop2008)])

#execute the query and get the results from a ResultProxy
results = connection.execute(stmt).fetchall()

print("Sum of US population from the census 2008:", results, 30 * '*')
Example #38
0
    def update_area(self, session, areaid):
        # Select all cells in this area and derive a bounding box for them
        radio, mcc, mnc, lac = decode_cellarea(areaid)
        load_fields = ('cellid', 'lat', 'lon', 'radius', 'region', 'last_seen',
                       'max_lat', 'max_lon', 'min_lat', 'min_lon')

        shard = self.cell_model.shard_model(radio)
        fields = [getattr(shard.__table__.c, f) for f in load_fields]

        cells = session.execute(
            select(fields)
            .where(shard.__table__.c.radio == radio)
            .where(shard.__table__.c.mcc == mcc)
            .where(shard.__table__.c.mnc == mnc)
            .where(shard.__table__.c.lac == lac)
            .where(shard.__table__.c.lat.isnot(None))
            .where(shard.__table__.c.lon.isnot(None))
        ).fetchall()

        if len(cells) == 0:
            # If there are no more underlying cells, delete the area entry
            session.execute(
                delete(self.area_table)
                .where(self.area_table.c.areaid == areaid)
            )
            return

        # Otherwise update the area entry based on all the cells
        area = session.execute(
            select([self.area_table.c.areaid,
                    self.area_table.c.modified,
                    self.area_table.c.lat,
                    self.area_table.c.lon,
                    self.area_table.c.radius,
                    self.area_table.c.region,
                    self.area_table.c.avg_cell_radius,
                    self.area_table.c.num_cells,
                    self.area_table.c.last_seen,
                    ])
            .where(self.area_table.c.areaid == areaid)
        ).fetchone()

        cell_extremes = numpy.array([
            (numpy.nan if cell.max_lat is None else cell.max_lat,
             numpy.nan if cell.max_lon is None else cell.max_lon)
            for cell in cells] + [
            (numpy.nan if cell.min_lat is None else cell.min_lat,
             numpy.nan if cell.min_lon is None else cell.min_lon)
            for cell in cells
        ], dtype=numpy.double)

        max_lat, max_lon = numpy.nanmax(cell_extremes, axis=0)
        min_lat, min_lon = numpy.nanmin(cell_extremes, axis=0)

        ctr_lat, ctr_lon = numpy.array(
            [(c.lat, c.lon) for c in cells],
            dtype=numpy.double).mean(axis=0)
        ctr_lat = float(ctr_lat)
        ctr_lon = float(ctr_lon)

        radius = circle_radius(
            ctr_lat, ctr_lon, max_lat, max_lon, min_lat, min_lon)

        cell_radii = numpy.array([
            (numpy.nan if cell.radius is None else cell.radius)
            for cell in cells
        ], dtype=numpy.int32)
        avg_cell_radius = int(round(numpy.nanmean(cell_radii)))
        num_cells = len(cells)
        region = self.region(ctr_lat, ctr_lon, mcc, cells)

        last_seen = None
        cell_last_seen = set([cell.last_seen for cell in cells
                              if cell.last_seen is not None])
        if cell_last_seen:
            last_seen = max(cell_last_seen)

        if area is None:
            session.execute(
                self.area_table.insert(
                    mysql_on_duplicate='num_cells = num_cells'  # no-op
                ).values(
                    areaid=areaid,
                    radio=radio,
                    mcc=mcc,
                    mnc=mnc,
                    lac=lac,
                    created=self.utcnow,
                    modified=self.utcnow,
                    lat=ctr_lat,
                    lon=ctr_lon,
                    radius=radius,
                    region=region,
                    avg_cell_radius=avg_cell_radius,
                    num_cells=num_cells,
                    last_seen=last_seen,
                )
            )
        else:
            session.execute(
                self.area_table.update()
                .where(self.area_table.c.areaid == areaid)
                .values(
                    modified=self.utcnow,
                    lat=ctr_lat,
                    lon=ctr_lon,
                    radius=radius,
                    region=region,
                    avg_cell_radius=avg_cell_radius,
                    num_cells=num_cells,
                    last_seen=last_seen,
                )
            )
Example #39
0
 def all(self, db: Session) -> list[ModelType]:
     """Return all objects queried from database."""
     return db.execute(select(self.model)).scalars().all()
Example #40
0
 def set_taxonomy(cls, taxon_dataframe):
     """
     Set the taxonomy. If a taxonomy already exist, delete it and set the
     new one.
     :param taxon_dataframe: A dataframe containing the taxonomy to set.
     The dataframe must contain at least the following columns:
         taxon_id  -> The id of the taxon (must be the index of the
                      DataFrame).
         parent_id -> The id of the taxon's parent.
         rank      -> The rank of the taxon (must be a value in:
                      'REGNUM', 'PHYLUM', 'CLASSIS', 'ORDO', 'FAMILIA',
                      'GENUS', 'SPECIES', 'INFRASPECIES')
         full_name -> The full name of the taxon.
         rank_name -> The rank name of the taxon.
     The remaining columns will be stored as synonyms, using the column
     name.
     """
     LOGGER.debug("Starting set_taxonomy...")
     required_columns = {'parent_id', 'rank', 'full_name', 'rank_name'}
     cols = set(list(taxon_dataframe.columns))
     inter = cols.intersection(required_columns)
     synonym_cols = cols.difference(required_columns)
     if cls.IDENTITY_SYNONYM_KEY in synonym_cols:
         synonym_cols = synonym_cols.difference({cls.IDENTITY_SYNONYM_KEY})
         m = "The '{}' synonym key is a special key reserved by Niamoto," \
             "this column will be ignored."
         LOGGER.warning(m.format(cls.IDENTITY_SYNONYM_KEY))
     if not inter == required_columns:
         m = "The taxon dataframe does not contains the required " \
             "columns {}, csv has: {}".format(required_columns, cols)
         raise MalformedDataSourceError(m)
     if len(synonym_cols) > 0:
         LOGGER.debug(
             "The following synonym keys had been detected: {}".format(
                 ','.join(synonym_cols)))
         synonyms = taxon_dataframe[list(synonym_cols)].apply(
             lambda x: x.to_json(), axis=1)
     else:
         LOGGER.debug("No synonym keys had been detected.")
         synonyms = '{}'
     taxon_dataframe.drop(synonym_cols, axis=1, inplace=True)
     taxon_dataframe['synonyms'] = synonyms
     mptt = ['mptt_tree_id', 'mptt_depth', 'mptt_left', 'mptt_right']
     for col in mptt:
         taxon_dataframe[col] = 0
     taxon_dataframe = cls.construct_mptt(taxon_dataframe)
     taxon_dataframe['taxon_id'] = taxon_dataframe.index
     taxon_dataframe = taxon_dataframe.astype(object).where(
         pd.notnull(taxon_dataframe), None)
     with Connector.get_connection() as connection:
         current_synonym_keys = set(
             pd.read_sql(select([meta.synonym_key_registry.c.name]),
                         connection)['name'])
         to_add = synonym_cols.difference(current_synonym_keys)
         to_delete = current_synonym_keys.difference(
             synonym_cols).difference({cls.IDENTITY_SYNONYM_KEY})
         to_keep = current_synonym_keys.intersection(synonym_cols)
         if len(to_delete) > 0:
             prov = meta.data_provider
             syno = meta.synonym_key_registry
             providers_to_update = pd.read_sql(
                 select([prov.c.name]).select_from(
                     prov.join(syno,
                               syno.c.id == prov.c.synonym_key_id)).where(
                                   syno.c.name.in_(to_delete)), connection)
             msg = "The following synonym keys will be deleted: {}. " \
                   "The following data providers where depending on those" \
                   " synonym keys: {} Please consider updating " \
                   "them, or updating the taxonomy."
             LOGGER.warning(
                 msg.format(to_delete, set(providers_to_update['name'])))
         with connection.begin():
             connection.execute("SET CONSTRAINTS ALL DEFERRED;")
             # Unregister synonym keys
             cls.unregister_all_synonym_keys(
                 bind=connection,
                 exclude=to_keep,
             )
             # Delete existing taxonomy
             cls.delete_all_taxa(bind=connection)
             # Register synonym cols
             for synonym_key in to_add:
                 cls.register_synonym_key(synonym_key, bind=connection)
             # Insert the data
             LOGGER.debug("Inserting the taxonomy in database...")
             if len(taxon_dataframe) > 0:
                 ins = meta.taxon.insert().values(
                     id=bindparam('taxon_id'),
                     full_name=bindparam('full_name'),
                     rank_name=bindparam('rank_name'),
                     rank=bindparam('rank'),
                     parent_id=bindparam('parent_id'),
                     synonyms=cast(bindparam('synonyms'), JSONB),
                     mptt_left=bindparam('mptt_left'),
                     mptt_right=bindparam('mptt_right'),
                     mptt_tree_id=bindparam('mptt_tree_id'),
                     mptt_depth=bindparam('mptt_depth'),
                 )
                 result = connection.execute(
                     ins,
                     taxon_dataframe.to_dict(orient='records')).rowcount
             else:
                 result = 0
             m = "The taxonomy had been successfully set ({} taxa " \
                 "inserted)!"
             LOGGER.debug(m.format(result))
     return result, synonym_cols
Example #41
0
 def one(self, db: Session, id_: Any) -> Optional[ModelType]:
     """Return exactly one row or raise an exception."""
     statement = select(self.model).where(self.model.id == id_)
     return db.execute(statement).one()[0]
Example #42
0
def column_reflection_fallback(
    selectable: Select, dialect: Dialect, sqlalchemy_engine: Engine
) -> List[Dict[str, str]]:
    """If we can't reflect the table, use a query to at least get column names."""
    col_info_dict_list: List[Dict[str, str]]
    # noinspection PyUnresolvedReferences
    if dialect.name.lower() == "mssql":
        # Get column names and types from the database
        # Reference: https://dataedo.com/kb/query/sql-server/list-table-columns-in-database
        tables_table_clause: TableClause = sa.table(
            "tables",
            sa.column("object_id"),
            sa.column("schema_id"),
            sa.column("name"),
            schema="sys",
        ).alias("sys_tables_table_clause")
        tables_table_query: Select = (
            sa.select(
                [
                    tables_table_clause.c.object_id.label("object_id"),
                    sa.func.schema_name(tables_table_clause.c.schema_id).label(
                        "schema_name"
                    ),
                    tables_table_clause.c.name.label("table_name"),
                ]
            )
            .select_from(tables_table_clause)
            .alias("sys_tables_table_subquery")
        )
        columns_table_clause: TableClause = sa.table(
            "columns",
            sa.column("object_id"),
            sa.column("user_type_id"),
            sa.column("column_id"),
            sa.column("name"),
            sa.column("max_length"),
            sa.column("precision"),
            schema="sys",
        ).alias("sys_columns_table_clause")
        columns_table_query: Select = (
            sa.select(
                [
                    columns_table_clause.c.object_id.label("object_id"),
                    columns_table_clause.c.user_type_id.label("user_type_id"),
                    columns_table_clause.c.column_id.label("column_id"),
                    columns_table_clause.c.name.label("column_name"),
                    columns_table_clause.c.max_length.label("column_max_length"),
                    columns_table_clause.c.precision.label("column_precision"),
                ]
            )
            .select_from(columns_table_clause)
            .alias("sys_columns_table_subquery")
        )
        types_table_clause: TableClause = sa.table(
            "types",
            sa.column("user_type_id"),
            sa.column("name"),
            schema="sys",
        ).alias("sys_types_table_clause")
        types_table_query: Select = (
            sa.select(
                [
                    types_table_clause.c.user_type_id.label("user_type_id"),
                    types_table_clause.c.name.label("column_data_type"),
                ]
            )
            .select_from(types_table_clause)
            .alias("sys_types_table_subquery")
        )
        inner_join_conditions: BinaryExpression = sa.and_(
            *(tables_table_query.c.object_id == columns_table_query.c.object_id,)
        )
        outer_join_conditions: BinaryExpression = sa.and_(
            *(
                columns_table_query.columns.user_type_id
                == types_table_query.columns.user_type_id,
            )
        )
        col_info_query: Select = (
            sa.select(
                [
                    tables_table_query.c.schema_name,
                    tables_table_query.c.table_name,
                    columns_table_query.c.column_id,
                    columns_table_query.c.column_name,
                    types_table_query.c.column_data_type,
                    columns_table_query.c.column_max_length,
                    columns_table_query.c.column_precision,
                ]
            )
            .select_from(
                tables_table_query.join(
                    right=columns_table_query,
                    onclause=inner_join_conditions,
                    isouter=False,
                ).join(
                    right=types_table_query,
                    onclause=outer_join_conditions,
                    isouter=True,
                )
            )
            .where(tables_table_query.c.table_name == selectable.name)
            .order_by(
                tables_table_query.c.schema_name.asc(),
                tables_table_query.c.table_name.asc(),
                columns_table_query.c.column_id.asc(),
            )
        )
        col_info_tuples_list: List[tuple] = sqlalchemy_engine.execute(
            col_info_query
        ).fetchall()
        # type_module = _get_dialect_type_module(dialect=dialect)
        col_info_dict_list: List[Dict[str, str]] = [
            {
                "name": column_name,
                # "type": getattr(type_module, column_data_type.upper())(),
                "type": column_data_type.upper(),
            }
            for schema_name, table_name, column_id, column_name, column_data_type, column_max_length, column_precision in col_info_tuples_list
        ]
    elif dialect.name.lower() == "trino":
        try:
            table_name = selectable.name
        except AttributeError:
            table_name = selectable

        tables_table: sa.Table = sa.Table(
            "tables",
            sa.MetaData(),
            schema="information_schema",
        )
        tables_table_query: Select = (
            sa.select(
                [
                    sa.column("table_schema").label("schema_name"),
                    sa.column("table_name").label("table_name"),
                ]
            )
            .select_from(tables_table)
            .alias("information_schema_tables_table")
        )
        columns_table: sa.Table = sa.Table(
            "columns",
            sa.MetaData(),
            schema="information_schema",
        )
        columns_table_query: Select = (
            sa.select(
                [
                    sa.column("column_name").label("column_name"),
                    sa.column("table_name").label("table_name"),
                    sa.column("table_schema").label("schema_name"),
                    sa.column("data_type").label("column_data_type"),
                ]
            )
            .select_from(columns_table)
            .alias("information_schema_columns_table")
        )
        conditions = sa.and_(
            *(
                tables_table_query.c.table_name == columns_table_query.c.table_name,
                tables_table_query.c.schema_name == columns_table_query.c.schema_name,
            )
        )
        col_info_query: Select = (
            sa.select(
                [
                    tables_table_query.c.schema_name,
                    tables_table_query.c.table_name,
                    columns_table_query.c.column_name,
                    columns_table_query.c.column_data_type,
                ]
            )
            .select_from(
                tables_table_query.join(
                    right=columns_table_query, onclause=conditions, isouter=False
                )
            )
            .where(tables_table_query.c.table_name == table_name)
            .order_by(
                tables_table_query.c.schema_name.asc(),
                tables_table_query.c.table_name.asc(),
                columns_table_query.c.column_name.asc(),
            )
            .alias("column_info")
        )
        col_info_tuples_list: List[tuple] = sqlalchemy_engine.execute(
            col_info_query
        ).fetchall()
        # type_module = _get_dialect_type_module(dialect=dialect)
        col_info_dict_list: List[Dict[str, str]] = [
            {
                "name": column_name,
                "type": column_data_type.upper(),
            }
            for schema_name, table_name, column_name, column_data_type in col_info_tuples_list
        ]
    else:
        # if a custom query was passed
        if isinstance(selectable, TextClause):
            query: TextClause = selectable
        else:
            query: Select = sa.select([sa.text("*")]).select_from(selectable).limit(1)
        result_object = sqlalchemy_engine.execute(query)
        # noinspection PyProtectedMember
        col_names: List[str] = result_object._metadata.keys
        col_info_dict_list = [{"name": col_name} for col_name in col_names]
    return col_info_dict_list
Example #43
0
from db_settings import parse_results, engine
from pywhois import whois
import pywhois
from sqlalchemy import select

some = list()

conn = engine.connect()
urls = conn.execute(select([parse_results.c.id, parse_results.c.response_domain])
                    .where(parse_results.c.response_code == 'ClientConnectorError').offset(1000))
urls = list(urls)


for url in urls:
    try:
        w = whois.whois(url[1])
        exp_date = w.registrar
        if not exp_date:
            some.append(url[1])
        print(url[1], exp_date)
    except pywhois.whois.parser.PywhoisError:
        some.append(url[1])
    except Exception as err:
        print(url[1], type(err))

with open('result.txt', 'wt', encoding='utf-8') as file:
    file.write('\n'.join(str(line) for line in some))
Example #44
0
 def test_scalar_strings(self, scalar_strings, connection):
     fn = func.scalar_strings(5)
     result = connection.execute(select(fn.column_valued())).scalars().all()
     eq_(result, ["some string"] * 5)
Example #45
0
def related_users_query(city_id):
    cols = [user.c.id]
    vtab = subscription.join(user).join(weather_state).join(city)
    cond = city.c.id == city_id
    query = select(cols).select_from(vtab).where(cond)
    return query
 def test_ip_addrs_vip_name_exists(self):
     result = db.execute(
         sa.select([self.meta.tables['ip_addrs'].c.vip_name]))
     self.assertEqual(result.scalar(), "management")
Example #47
0
 def test_two_strings(self, two_strings, connection):
     fn = func.three_pairs().table_valued("string1", "string2")
     result = connection.execute(select(fn.c.string1, fn.c.string2)).all()
     eq_(result, [("a", "b"), ("c", "d"), ("e", "f")])
Example #48
0
 def test_for_update_expr(self):
     engine = self._fixture(True)
     s1 = select([1], for_update=True)
     result = engine.execute(s1)
     assert result.cursor.name
    def test_deployment_graph_tasks_creation_success(self):
        deployment_graph_id = self._insert_deployment_graph()
        tasks = [{
            'task_name':
            'task1',
            'deployment_graph_id':
            deployment_graph_id,
            'version':
            '2.0.0',
            'type':
            'puppet',
            'condition':
            None,
            'requires': ['a', 'b'],
            'required_for': ['c', 'd'],
            'refresh_on': ['r1', 'r2'],
            'cross_depends':
            jsonutils.dumps([{
                'name': 'a'
            }, {
                'name': 'b'
            }]),
            'cross_depended_by':
            jsonutils.dumps([{
                'name': 'c'
            }, {
                'name': 'd'
            }]),
            'reexecute_on': ["nailgun_event1", "nailgun_event2"],
            'groups': ['group1', 'group2'],
            'roles': ['role1', 'role2'],
            'tasks': ['t1', 't2'],
            'parameters':
            jsonutils.dumps({'param1': 'val1'}),
            '_custom':
            jsonutils.dumps({}),
        }, {
            'task_name':
            'task2',
            'deployment_graph_id':
            deployment_graph_id,
            'version':
            '2.0.0',
            'type':
            'puppet',
            'condition':
            None,
            'requires': ['task1'],
            'required_for': ['c', 'd'],
            'refresh_on': [],
            'cross_depends':
            jsonutils.dumps([{
                'name': 'task1'
            }]),
            'cross_depended_by':
            jsonutils.dumps([{
                'name': 'c'
            }, {
                'name': 'd'
            }]),
            'reexecute_on': ["nailgun_event3", "nailgun_event4"],
            'groups': ['group3', 'group4'],
            'roles': ['role3', 'role4'],
            'tasks': [],
            'parameters':
            jsonutils.dumps({'param2': 'val2'}),
            '_custom':
            jsonutils.dumps({}),
        }]
        db.execute(self.meta.tables['deployment_graph_tasks'].insert(), tasks)
        db.commit()

        result = db.execute(
            sa.select([self.meta.tables['deployment_graph_tasks']]).where(
                sa.text(
                    'deployment_graph_tasks.deployment_graph_id = {0}'.format(
                        deployment_graph_id))))

        db_tasks = [dict(r) for r in result]
        for d in db_tasks:
            d.pop('id', None)

        self.assertItemsEqual(tasks, db_tasks)
Example #50
0
 def test_stmt_option_disabled(self):
     engine = self._fixture(True)
     s = select([1]).execution_options(stream_results=False)
     result = engine.execute(s)
     assert not result.cursor.name
Example #51
0
 def email(self):
     return (
         select([Email.email])
         .where((Email.user_id == self.id) & (Email.primary.is_(True)))
         .as_scalar()
     )