Esempio n. 1
0
def get_maps(document, lang):
    """Load and return maps that intersect with the document geometry.
    """
    if document.geometry is None:
        return []

    document_geom = select([DocumentGeometry.geom]). \
        where(DocumentGeometry.document_id == document.document_id)
    document_geom_detail = select([DocumentGeometry.geom_detail]). \
        where(DocumentGeometry.document_id == document.document_id)
    topo_maps = DBSession. \
        query(TopoMap). \
        filter(TopoMap.redirects_to.is_(None)). \
        join(
            DocumentGeometry,
            TopoMap.document_id == DocumentGeometry.document_id). \
        options(load_only(
            TopoMap.document_id, TopoMap.editor, TopoMap.code,
            TopoMap.version, TopoMap.protected)). \
        options(joinedload(TopoMap.locales).load_only(
            DocumentLocale.lang, DocumentLocale.title,
            DocumentLocale.version)). \
        filter(
            or_(
                DocumentGeometry.geom_detail.ST_Intersects(
                    document_geom.label('t1')),
                DocumentGeometry.geom_detail.ST_Intersects(
                    document_geom_detail.label('t2'))
            )). \
        all()

    if lang is not None:
        set_best_locale(topo_maps, lang)

    return topo_maps
Esempio n. 2
0
    def test_within_distance(self):
        """
        Because SDO_WITHIN_DISTANCE requires a spatial index for the geometry used
        as first parameter, we have to insert out test geometries into tables,
        unlike to the other databases.

        Note that Oracle uses meter as unit for the tolerance value for geodetic coordinate
        systems (like 4326)!
        """
        # test if SDO_functions._within_distance is called correctly
        eq_(session.query(Spot).filter(functions._within_distance(Spot.spot_location, 'POINT(0 0)', 0)).count(), 1)
        eq_(session.query(Spot).filter(functions._within_distance(Spot.spot_location, 'POINT(0 0)', 0.1)).count(), 1)
        eq_(session.query(Spot).filter(functions._within_distance(Spot.spot_location, 'POINT(9 9)', 100000)).count(), 0)

        eq_(session.query(Spot).filter(functions._within_distance(Spot.spot_location,
                                                       'Polygon((-5 -5, 5 -5, 5 5, -5 5, -5 -5))', 0)).count(), 3)

        eq_(session.query(Spot).filter(functions._within_distance(Spot.spot_location,
                                                       'Polygon((-10 -10, 10 -10, 10 10, -10 10, -10 -10))', 0)).count(), 4)

        eq_(session.query(Spot).filter(functions._within_distance(Spot.spot_location,
                                                       'Polygon((-10 -10, 10 -10, 10 10, -10 10, -10 -10))', 200000)).count(), 5)

        # test if SDO_GEOM.functions._within_distance is called correctly
        eq_(session.scalar(select([text('1')], from_obj=['dual']).where(
                                                    functions._within_distance('POINT(0 0)', 'POINT(0 0)', 0,
                                                                    {'tol' : 0.00000005}))), 1)
        eq_(session.scalar(select([text('1')], from_obj=['dual']).where(
                                                    functions._within_distance('POINT(0 0)', 'POINT(0 0)', 0,
                                                                    {'dim1' : text(diminfo),
                                                                     'dim2' : text(diminfo)}))), 1)
Esempio n. 3
0
 def constructQuery(self, context):
     session= Session()
     trusted=removeSecurityProxy(context)
     user_id = getattr(trusted, self.value_field, None)
     if user_id:
         query = session.query(domain.User 
                ).filter(domain.User.user_id == 
                     user_id).order_by(domain.User.last_name,
                         domain.User.first_name,
                         domain.User.middle_name)
         return query
     else:
         sitting = trusted.__parent__
         group_id = sitting.group_id
         group_sitting_id = sitting.group_sitting_id
         all_member_ids = sql.select([schema.user_group_memberships.c.user_id], 
                 sql.and_(
                     schema.user_group_memberships.c.group_id == group_id,
                     schema.user_group_memberships.c.active_p == True))
         attended_ids = sql.select([schema.group_sitting_attendance.c.member_id],
                  schema.group_sitting_attendance.c.group_sitting_id == group_sitting_id)
         query = session.query(domain.User).filter(
             sql.and_(domain.User.user_id.in_(all_member_ids),
                 ~ domain.User.user_id.in_(attended_ids))).order_by(
                         domain.User.last_name,
                         domain.User.first_name,
                         domain.User.middle_name)
         return query
Esempio n. 4
0
 def test_nested_in(self):
     from sqlalchemy.sql.expression import select
     from sqlalchemy.schema import Table, MetaData, Column
     from sqlalchemy.types import Integer
     meta = MetaData()
     table = Table(
         u'Foo',
         meta,
         Column(u'a', Integer())
         )
     target = self._makeOne(
         select(
             [u'a'],
             from_obj=table,
             whereclause=table.c.a.in_(
                 self._makeOne(
                     select(
                         [u'a'],
                         from_obj=table,
                         ),
                     comment=u'inner'
                     )
                 )
             ),
         comment=u'test'
         )
     result = target.compile()
     self.assertEqual(unicode(result).replace(u'\n', ''), u'SELECT a FROM "Foo" WHERE "Foo".a IN (SELECT a FROM "Foo") /* test */')
def set_leg_waypoints():
    t = time.time()

    dd = db.metadata.tables["device_data"]
    legs = db.metadata.tables["legs"]
    glue = db.metadata.tables["leg_waypoints"]

    legpoints = select(
        [legs.c.id, dd.c.waypoint_id, dd.c.time, dd.c.snapping_time],
        from_obj=dd.join(legs, and_(
            dd.c.device_id == legs.c.device_id,
            dd.c.time.between(legs.c.time_start, legs.c.time_end)))) \
        .alias("legpoints")
    done = select([glue.c.leg], distinct=True)
    nounsnapped = select(
        [legpoints.c.id],
        legpoints.c.id.notin_(done),
        group_by=legpoints.c.id,
        having=func.bool_and(legpoints.c.snapping_time.isnot(None)))
    newitems = select(
        [legpoints.c.id, legpoints.c.waypoint_id, func.min(legpoints.c.time)],
        legpoints.c.id.in_(nounsnapped),
        group_by=[legpoints.c.id, legpoints.c.waypoint_id]).alias("newitems")

    ins = glue.insert().from_select(["leg", "waypoint", "first"], newitems)
    rowcount = db.engine.execute(ins).rowcount
    print("set_leg_waypoints on %d rows in %.2g seconds" % (
        rowcount, time.time() - t))
Esempio n. 6
0
 def p_is_term(p):
     '''is_term : OP_IS string'''
     #TODO: implement starred, watched, owner, reviewer, draft
     username = p.parser.username
     if p[2] == 'reviewed':
         filters = []
         filters.append(gertty.db.approval_table.c.change_key == gertty.db.change_table.c.key)
         filters.append(gertty.db.approval_table.c.value != 0)
         s = select([gertty.db.change_table.c.key], correlate=False).where(and_(*filters))
         p[0] = gertty.db.change_table.c.key.in_(s)
     elif p[2] == 'open':
         p[0] = gertty.db.change_table.c.status.notin_(['MERGED', 'ABANDONED'])
     elif p[2] == 'closed':
         p[0] = gertty.db.change_table.c.status.in_(['MERGED', 'ABANDONED'])
     elif p[2] == 'submitted':
         p[0] = gertty.db.change_table.c.status == 'SUBMITTED'
     elif p[2] == 'merged':
         p[0] = gertty.db.change_table.c.status == 'MERGED'
     elif p[2] == 'abandoned':
         p[0] = gertty.db.change_table.c.status == 'ABANDONED'
     elif p[2] == 'owner':
         p[0] = and_(gertty.db.change_table.c.account_key == gertty.db.account_table.c.key,
                     gertty.db.account_table.c.username == username)
     elif p[2] == 'reviewer':
         filters = []
         filters.append(gertty.db.approval_table.c.change_key == gertty.db.change_table.c.key)
         filters.append(gertty.db.approval_table.c.account_key == gertty.db.account_table.c.key)
         filters.append(gertty.db.account_table.c.username == username)
         s = select([gertty.db.change_table.c.key], correlate=False).where(and_(*filters))
         p[0] = gertty.db.change_table.c.key.in_(s)
     else:
         raise gertty.search.SearchSyntaxError('Syntax error: has:%s is not supported' % p[2])
Esempio n. 7
0
    def query(self, parent_ids=None):
        """ Construct a SQL query for this level of the request. """
        if self.parent is None:
            q = select(from_obj=self.join(self.alias))
            q = q.offset(self.get_child_node_value('offset', 0))
            if not self.node.as_list:
                q = q.limit(1)
            else:
                q = q.limit(self.get_child_node_value('limit', 10))
        else:
            q = select(from_obj=self.join(self.parent.alias))
        q = self.filter(q)
        
        if parent_ids is not None:
            q = q.where(self.parent.alias.c.id.in_(parent_ids))
        
        q = self.project(q)
        q = q.distinct()
        #print self, type(self)
        #print q

        ids = []
        rp = db.session.execute(q)
        while True:
            row = rp.fetchone()
            if row is None:
                break
            row = dict(row.items())
            ids.append(row.get(self.pk_id))
            self.collect(row)

        for name, child in self.children.items():
            child.query(parent_ids=ids)
Esempio n. 8
0
 def create_expression(self):
     if self.last_result is None:
         try:
             expr = select(self.table.columns).order_by(*[safe_collate(self.table.columns[nm], None) for nm in self.sort_key])
             if self.starter is not None:
                 expr = expr.where(and_(self.starter, self.filter))
             else:
                 expr = expr.where(self.filter)
         except:
             raise
     else:
         try:
             where_clause = vector_greater_than([self.table.columns[nm] for nm in self.sort_key], \
                                               [self.last_result[n] for n in self.sort_index])
             expr = (select(self.table.columns).order_by(*[safe_collate(self.table.columns[nm], None) for nm in self.sort_key]) \
                    .where(and_(where_clause, self.filter)))
         except:
             raise
     
     if self.limit_per is not None:
         expr = expr.limit(self.limit_per)
         
     if self.stream:
         expr = expr.execution_options(stream_results=True)
     
     return expr
Esempio n. 9
0
 def test_in_25(self):
     self.assert_compile(
         select([self.table1.c.myid.in_(
                     select([self.table2.c.otherid]).as_scalar())]),
         "SELECT mytable.myid IN (SELECT myothertable.otherid "
             "FROM myothertable) AS anon_1 FROM mytable"
     )
def upgrade(migrate_engine):
    if migrate_engine.name == 'sqlite':
        return
    meta = MetaData(bind=migrate_engine)
    for table_name, ref, child in TABLES:
        table = Table(table_name, meta, autoload=True)

        column_name, ref_table_name, ref_column_name = ref
        column = table.c[column_name]

        ref_table = Table(ref_table_name, meta, autoload=True)
        ref_column = ref_table.c[ref_column_name]

        subq = select([ref_column]).where(ref_column != None)

        if child:
            # Dump and cleanup rows in child table first
            child_table_name, child_column_name, child_ref_column_name = child

            child_table = Table(child_table_name, meta, autoload=True)
            child_column = child_table.c[child_column_name]

            child_ref_column = table.c[child_ref_column_name]

            child_subq = select([child_ref_column]).where(~ column.in_(subq))
            dump_cleanup_rows(migrate_engine, meta, child_table,
                              child_column.in_(child_subq))

        dump_cleanup_rows(migrate_engine, meta, table, ~ column.in_(subq))

        params = {'columns': [column], 'refcolumns': [ref_column]}
        if migrate_engine.name == 'mysql':
            params['name'] = "_".join(('fk', table_name, column_name))
        fkey = ForeignKeyConstraint(**params)
        fkey.create()
Esempio n. 11
0
    def test_migrate_batch_stureg(self):
        batch_guid = '2bb942b9-75cf-4055-a67a-8b9ab53a9dfc'
        batch = {UdlStatsConstants.REC_ID: '6',
                 UdlStatsConstants.BATCH_GUID: batch_guid, UdlStatsConstants.TENANT: self.__tenant,
                 UdlStatsConstants.SCHEMA_NAME: None, Constants.DEACTIVATE: False,
                 UdlStatsConstants.LOAD_TYPE: LoadType.STUDENT_REGISTRATION,
                 UdlStatsConstants.BATCH_OPERATION: 's',
                 UdlStatsConstants.SNAPSHOT_CRITERIA: '{"reg_system_id": "015247bd-058c-48cd-bb4d-f6cffe5b40c1", "academic_year": 2015}'}
        self.insert_into_udl_stats(batch[UdlStatsConstants.REC_ID], batch_guid, self.__tenant, batch[UdlStatsConstants.LOAD_TYPE])

        preprod_conn = EdMigrateSourceConnection(tenant=get_unittest_preprod_tenant_name())
        count_to_source_query = select([func.count()]).select_from(preprod_conn.get_table(Constants.STUDENT_REG))
        count_to_be_inserted = preprod_conn.execute(count_to_source_query).fetchall()[0][0]
        self.assertEqual(10, count_to_be_inserted)

        prod_conn = EdMigrateDestConnection(tenant=get_unittest_preprod_tenant_name())
        student_reg_table = prod_conn.get_table(Constants.STUDENT_REG)
        count_query = select([func.count()]).select_from(student_reg_table)
        count_before = prod_conn.execute(count_query).fetchall()[0][0]
        self.assertEqual(2581, count_before)

        count_snapshot_query = select([func.count()], student_reg_table.c.academic_year == 2015).select_from(student_reg_table)
        count_to_be_deleted = prod_conn.execute(count_snapshot_query).fetchall()[0][0]
        self.assertEqual(1217, count_to_be_deleted)

        rtn = migrate_batch(batch)
        self.assertTrue(rtn)

        expected_count_after = count_before - count_to_be_deleted + count_to_be_inserted
        count_after = prod_conn.execute(count_query).fetchall()[0][0]
        self.assertEqual(expected_count_after, count_after)
Esempio n. 12
0
    def test_migrate_student_reg(self):
        Unittest_with_edcore_sqlite.setUpClass(EdMigrateDestConnection.get_datasource_name(TestMigrate.test_tenant),
                                               use_metadata_from_db=False)
        preprod_conn = EdMigrateSourceConnection(tenant=get_unittest_preprod_tenant_name())
        prod_conn = EdMigrateDestConnection(tenant=get_unittest_prod_tenant_name())
        batch_guid = "0aa942b9-75cf-4055-a67a-8b9ab53a9dfc"
        student_reg_table = preprod_conn.get_table(Constants.STUDENT_REG)
        get_query = select([student_reg_table.c.student_reg_rec_id]).order_by(student_reg_table.c.student_reg_rec_id)
        count_query = select([func.count().label('student_reg_rec_ids')],
                             student_reg_table.c.student_reg_rec_id.in_(range(15541, 15551)))

        rset = preprod_conn.execute(get_query)
        row = rset.fetchall()
        self.assertEqual(10, len(row))
        self.assertListEqual([(15541,), (15542,), (15543,), (15544,), (15545,), (15546,), (15547,), (15548,), (15549,), (15550,)],
                             row)
        rset.close()

        rset = prod_conn.execute(count_query)
        row = rset.fetchone()
        self.assertEqual(0, row['student_reg_rec_ids'])
        rset.close()

        delete_count, insert_count = migrate_table(batch_guid, None, preprod_conn, prod_conn, 'student_reg', False)
        self.assertEqual(0, delete_count)
        self.assertEqual(10, insert_count)

        rset = prod_conn.execute(count_query)
        row = rset.fetchone()
        self.assertEqual(10, row['student_reg_rec_ids'])
        rset.close()
Esempio n. 13
0
def _connect_ping_listener(connection, branch):
    """Ping the server at connection startup.

    Ping the server at transaction begin and transparently reconnect
    if a disconnect exception occurs.
    """
    if branch:
        return

    # turn off "close with result".  This can also be accomplished
    # by branching the connection, however just setting the flag is
    # more performant and also doesn't get involved with some
    # connection-invalidation awkardness that occurs (see
    # https://bitbucket.org/zzzeek/sqlalchemy/issue/3215/)
    save_should_close_with_result = connection.should_close_with_result
    connection.should_close_with_result = False
    try:
        # run a SELECT 1.   use a core select() so that
        # any details like that needed by Oracle, DB2 etc. are handled.
        connection.scalar(select([1]))
    except exception.DBConnectionError:
        # catch DBConnectionError, which is raised by the filter
        # system.
        # disconnect detected.  The connection is now
        # "invalid", but the pool should be ready to return
        # new connections assuming they are good now.
        # run the select again to re-validate the Connection.
        connection.scalar(select([1]))
    finally:
        connection.should_close_with_result = save_should_close_with_result
def get_asmt_rec_id(guid_batch, tenant_name, asmt_rec_id_info):
    '''
    Returns asmt_rec_id from dim_asmt table
    Steps:
    1. Get guid_asmt from integration table INT_SBAC_ASMT
    2. Select asmt_rec_id from dim_asmt by the same guid_amst got from 1. It should have 1 value
    '''
    source_table_name = asmt_rec_id_info['source_table']
    guid_column_name_in_source = asmt_rec_id_info['guid_column_in_source']
    target_table_name = asmt_rec_id_info['target_table']
    guid_column_name_in_target = asmt_rec_id_info['guid_column_name']
    rec_id_column_name = asmt_rec_id_info['rec_id']

    # connect to integration table, to get the value of guid_asmt
    with get_udl_connection() as udl_conn:
        int_table = udl_conn.get_table(source_table_name)
        query = select([int_table.c[guid_column_name_in_source]], from_obj=int_table, limit=1)
        query = query.where(int_table.c['guid_batch'] == guid_batch)
        results = udl_conn.get_result(query)
        if results:
            guid_column_value = results[0][guid_column_name_in_source]

    # connect to target table, to get the value of asmt_rec_id
    with get_target_connection(tenant_name, guid_batch) as target_conn:
        dim_asmt = target_conn.get_table(target_table_name)
        query = select([dim_asmt.c[rec_id_column_name]], from_obj=dim_asmt, limit=1)
        query = query.where(dim_asmt.c[guid_column_name_in_target] == guid_column_value)
        query = query.where(and_(dim_asmt.c['batch_guid'] == guid_batch))
        results = target_conn.get_result(query)
        if results:
            asmt_rec_id = results[0][rec_id_column_name]

    return asmt_rec_id
Esempio n. 15
0
def generate_create_inheritance_view_statement(class_):
    viewname = cls2tbl(class_)[1:]
    tables = class_.__table__
    cols = {}
    def add_cols(table):
        for col in table.c:
            if col.name not in cols:
                cols[col.name] = col
    add_cols(class_.__table__)
    if class_.__score_db__['inheritance'] is not None:
        parent = class_.__score_db__['parent']
        while parent:
            table = parent.__table__
            tables = tables.join(
                table, onclause=table.c.id == class_.__table__.c.id)
            add_cols(table)
            parent = parent.__score_db__['parent']
    if class_.__score_db__['inheritance'] != 'single-table':
        viewselect = select(cols.values(), from_obj=tables)
    else:
        typecol = getattr(
            class_, class_.__score_db__['type_column'])
        typenames = []
        def add_typenames(cls):
            typenames.append(cls.__score_db__['type_name'])
            for subclass in cls.__subclasses__():
                add_typenames(subclass)
        add_typenames(class_)
        viewselect = select(cols.values(),
                            from_obj=class_.__table__,
                            whereclause=typecol.in_(typenames))
    return CreateView(viewname, viewselect)
Esempio n. 16
0
 def test_union(self):
     from sqlalchemy.sql.expression import select
     from sqlalchemy.schema import Table, MetaData, Column
     from sqlalchemy.types import Integer
     meta = MetaData()
     table = Table(
         u'Foo',
         meta,
         Column(u'a', Integer())
         )
     target = self._makeOne(
         select(
             [u'a'],
             from_obj=table,
             whereclause=(table.c.a == 1)
             ),
         comment=u'test'
         )
     target = target.union(
         select(
             [u'a'],
             from_obj=table,
             whereclause=(table.c.a == 2)
             )
         )
     result = target.compile()
     self.assertRegexpMatches(unicode(result).replace(u'\n', ''), ur'SELECT a FROM "Foo" WHERE "Foo".a = [^ ]+ UNION SELECT a FROM "Foo" WHERE "Foo".a = [^ ]+ /\* test \*/')
Esempio n. 17
0
    def test_six_pt_five(self):
        x = column("x")
        self.assert_compile(select([x]).where(or_(x == 7, true())),
                "SELECT x WHERE true")

        self.assert_compile(select([x]).where(or_(x == 7, true())),
                "SELECT x WHERE 1 = 1",
                dialect=default.DefaultDialect(supports_native_boolean=False))
Esempio n. 18
0
def load_to_table(data_dict, guid_batch, int_table, tenant_name, udl_schema):
    '''
    Load the table into the proper table
    @param data_dict: the dictionary containing the data to be loaded
    @param guid_batch: the id for the batch
    @param int_table: the name of the integration table
    @param tenant_name: name of the tenant
    @param udl_schema: udl schema name
    '''
    # Create sqlalchemy connection and get table information from sqlalchemy
    ref_column_mapping_columns = {}
    with get_udl_connection() as conn:
        data_dict[mk.GUID_BATCH] = guid_batch
        data_dict = fix_empty_strings(data_dict)
        ref_table = conn.get_table('ref_column_mapping')
        s_int_table = conn.get_table(int_table)
        column_mapping_query = select([ref_table.c.target_column,
                                       ref_table.c.stored_proc_name],
                                      from_obj=ref_table).where(and_(ref_table.c.source_table == 'lz_json',
                                                                     ref_table.c.target_table == int_table))
        results = conn.get_result(column_mapping_query)
        for result in results:
            target_column = result['target_column']
            stored_proc_name = result['stored_proc_name']
            value = data_dict.get(target_column)
            if value:
                if stored_proc_name:
                    if stored_proc_name.startswith('sp_'):
                        ref_column_mapping_columns[target_column] = stored_proc_name + '(' + QuotedString(value if type(value) is str else str(value)).getquoted().decode('utf-8') + ')'
                    else:
                        format_value = dict()
                        format_value['value'] = QuotedString(value if type(value) is str
                                                             else str(value)).getquoted().decode('utf-8')
                        if s_int_table.c[target_column].type.python_type is str:
                            format_value['length'] = s_int_table.c[target_column].type.length
                        ref_column_mapping_columns[target_column] = stored_proc_name.format(**format_value)
                    continue
            ref_column_mapping_columns[target_column] = value

        record_sid = 'nextval(\'{schema_name}.{tenant_sequence_name}\')'.\
            format(schema_name=udl_schema, tenant_sequence_name=Constants.TENANT_SEQUENCE_NAME(tenant_name))
        from_select_column_names = ['record_sid']
        from_select_select_values = [record_sid]
        for column in s_int_table.c:
            value = data_dict.get(column.name)
            if value is not None:
                from_select_column_names.append(column.name)
                from_select_select_values.append(
                    ref_column_mapping_columns.get(column.name,
                                                   QuotedString(value if type(value) is str else str(value)).getquoted().decode('utf-8')))
        insert_into_int_table = s_int_table.insert().from_select(from_select_column_names,
                                                                 select(from_select_select_values))
        # create insert statement and execute
        affected_row = db_util.execute_udl_queries(conn, [insert_into_int_table],
                                                   'Exception in loading json data -- ',
                                                   'json_loader', 'load_to_table')

    return affected_row[0]
 def get_asmt_and_outcome_result(self, conf):
     with get_udl_connection() as conn:
         asmt_table = conn.get_table(conf.get(mk.ASMT_TABLE))
         asmt_outcome_table = conn.get_table(conf.get(mk.ASMT_OUTCOME_TABLE))
         asmt_result = conn.get_result(select([asmt_table.c.guid_asmt]).
                                       where(asmt_table.c.guid_batch == conf.get(mk.GUID_BATCH)))
         asmt_outcome_result = conn.get_result(select([asmt_outcome_table.c.assessmentguid], distinct=True).
                                               where(asmt_outcome_table.c.guid_batch == conf.get(mk.GUID_BATCH)))
     return asmt_result, asmt_outcome_result
def get_udl_record_by_batch_guid(batch_guid, tenant):
    records = []
    with get_prod_connection(tenant=tenant) as connection:
        fact_asmt_outcome = connection.get_table('fact_asmt_outcome')
        fact_block_asmt_outcome = connection.get_table('fact_block_asmt_outcome')
        select_fao = select([fact_asmt_outcome.c.student_id.label(STUDENT_ID), fact_asmt_outcome.c.asmt_guid.label(ASMT_GUID), fact_asmt_outcome.c.date_taken.label(DATE_TAKEN)]).where(and_(fact_asmt_outcome.c.batch_guid == batch_guid, fact_asmt_outcome.c.rec_status == 'C'))
        select_fbao = select([fact_block_asmt_outcome.c.student_id.label(STUDENT_ID), fact_block_asmt_outcome.c.asmt_guid.label(ASMT_GUID), fact_block_asmt_outcome.c.date_taken.label(DATE_TAKEN)]).where(and_(fact_block_asmt_outcome.c.batch_guid == batch_guid, fact_block_asmt_outcome.c.rec_status == 'C'))
        records = connection.get_result(select_fao.union(select_fbao))
    return records
 def verify_udl_failure(self, udl_connector, guid_batch_id):
     status = [('FAILURE',)]
     batch_table = udl_connector.get_table(Constants.UDL2_BATCH_TABLE)
     query = select([batch_table.c.udl_phase_step_status], and_(batch_table.c.guid_batch == guid_batch_id, batch_table.c.udl_phase == 'UDL_COMPLETE'))
     query2 = select([batch_table.c.udl_phase_step_status], and_(batch_table.c.guid_batch == guid_batch_id, batch_table.c.udl_phase == 'udl2.W_post_etl.task'))
     batch_table_data = udl_connector.execute(query).fetchall()
     batch_table_post_udl = udl_connector.execute(query2).fetchall()
     self.assertEquals(status, batch_table_data)
     self.assertEquals([('SUCCESS',)], batch_table_post_udl)
Esempio n. 22
0
 def test_in_26(self):
     self.assert_compile(self.table1.c.myid.in_(
         union(
               select([self.table1.c.myid], self.table1.c.myid == 5),
               select([self.table1.c.myid], self.table1.c.myid == 12),
         )
     ), "mytable.myid IN ("\
     "SELECT mytable.myid FROM mytable WHERE mytable.myid = :myid_1 "\
     "UNION SELECT mytable.myid FROM mytable WHERE mytable.myid = :myid_2)")
Esempio n. 23
0
 def compare_results(compa, cols, vals):
     vals = tuple(map(none_to_minus_inf, vals))
     res = set([row['id'] for row in engine.execute(select(table.columns).where(compa))])
     all_ = [valmap(none_to_minus_inf, row) for row in engine.execute(select(table.columns))]
     cor = set()
     for row in all_:
         if tuple(row[col.name] for col in cols) > vals:
             cor.add(row['id'])
     assert_equal(res, cor)
Esempio n. 24
0
def initialize_table_source(db, tblname, schema=None, cols=None):

    tbl = initialize_table(db, tblname, schema)

    if not cols:
        return select([tbl])

    else:
        return select([tbl.c[col] for col in cols])
Esempio n. 25
0
def rebuild_table(table, delete_missing=False):
    from virtuoso.alchemy import AddForeignKey, DropForeignKey
    print "rebuilding", table
    session = get_session_maker()()
    incoming = set(get_incoming_fks(table))
    outgoing = set(table.foreign_keys)
    all_fkeys = incoming | outgoing
    self_ref = incoming & outgoing
    try:
        for fk in all_fkeys:
            if not delete_rows_with_missing_fkey(fk, delete_missing):
                print "There are missing keys, will not rebuild " + table.name
                return
    except Exception as e:
        traceback.print_exc()
        print "Could not delete missing keys"
        raise e
    # Booleans with NULL values
    for col in table.c:
        if isinstance(col.type, Boolean):
            session.execute(table.update().where(col == None).values(**{col.name:0}))
    # Drop all keys
    for fk in all_fkeys:
        try:
            session.execute(DropForeignKey(fk))
        except Exception as e:
            print "Could not drop fkey %s, maybe does not exist." % (fk_as_str(fk),)
            print e
    clone = clone_table(table, table.name+"_temp", False, False)
    clone.create(session.bind)
    column_names = [c.name for c in table.columns]
    sel = select([getattr(table.c, cname) for cname in column_names])
    with transaction.manager:
        session.execute(clone.insert().from_select(column_names, sel))
        mark_changed(session)
    session.execute(DropTable(table))
    # Should we create it without outgoing first?
    table.create(session.bind)
    # self ref will make the insert fail.
    for fk in self_ref:
        try:
            session.execute(DropForeignKey(fk))
        except Exception as e:
            print "Could not drop fkey %s, maybe does not exist." % (fk_as_str(fk),)
            print e
    sel = select([getattr(clone.c, cname) for cname in column_names])
    with transaction.manager:
        session.execute(table.insert().from_select(column_names, sel))
        mark_changed(session)
    session.execute(DropTable(clone))
    if delete_missing:
        # Delete a second time, in case.
        for fk in outgoing:
            assert delete_rows_with_missing_fkey(fk, True), "OUCH"
    for fk in incoming:  # includes self_ref
        session.execute(AddForeignKey(fk))
Esempio n. 26
0
def migrate_datastore_manager(datastores, datastore_versions):
    versions = select([datastore_versions]).execute()
    for ds_v in versions:
        ds = select([datastores]).\
            where(datastores.c.id == ds_v.datastore_id).\
            execute().fetchone()
        datastore_versions.update().\
            where(datastore_versions.c.id == ds_v.id).\
            values(manager=ds.manager).\
            execute()
Esempio n. 27
0
        def gen_pagination_with_pk(chunk):
            max_pk_query = select([func.max(pk)])
            min_pk_query = select([func.min(pk)])

            max_pk = from_conn.execute(max_pk_query).scalar() or 0
            min_pk = from_conn.execute(min_pk_query).scalar() or 0
            min_pk = to_conn.execute(max_pk_query).scalar() or (min_pk - 1)

            left_seq = range(min_pk + 1, max_pk, chunk)
            right_seq = range(min_pk + chunk, max_pk + chunk, chunk)
            for min_id, max_id in zip(left_seq, right_seq):
                yield select_query.where(pk.between(min_id, max_id))
def _downgrade_bdm_v2(meta, bdm_table):
    # First delete all the image bdms

    # NOTE (ndipanov): This will delete all the image bdms, even the ones
    #                   that were potentially created as part of th normal
    #                   operation, not only the upgrade. We have to do it,
    #                   as we have no way of handling them in the old code.
    bdm_table.delete().where(bdm_table.c.source_type == 'image').execute()

    # NOTE (ndipanov):  Set all NULL device_names (if any) to '' and let the
    #                   Nova code deal with that. This is needed so that the
    #                   return of nullable=True does not break, and should
    #                   happen only if there are instances that are just
    #                   starting up when we do the downgrade
    bdm_table.update().where(
        bdm_table.c.device_name == None
    ).values(device_name='').execute()

    instance = Table('instances', meta, autoload=True)
    instance_shadow = Table('shadow_instances', meta, autoload=True)
    instance_q = select([instance.c.uuid])
    instance_shadow_q = select([instance_shadow.c.uuid])

    for instance_uuid, in itertools.chain(
        instance_q.execute().fetchall(),
            instance_shadow_q.execute().fetchall()):
        # Get all the bdms for an instance
        bdm_q = select(
            [bdm_table.c.id, bdm_table.c.source_type, bdm_table.c.guest_format]
        ).where(
            (bdm_table.c.instance_uuid == instance_uuid) &
            (bdm_table.c.source_type == 'blank')
        ).order_by(bdm_table.c.id.asc())

        blanks = [
            dict(zip(('id', 'source', 'format'), row))
            for row in bdm_q.execute().fetchall()
        ]

        swap = [dev for dev in blanks if dev['format'] == 'swap']
        assert len(swap) < 2
        ephemerals = [dev for dev in blanks if dev not in swap]

        for index, eph in enumerate(ephemerals):
            eph['virtual_name'] = 'ephemeral' + str(index)

        if swap:
            swap[0]['virtual_name'] = 'swap'

        for bdm in swap + ephemerals:
            bdm_table.update().where(
                bdm_table.c.id == bdm['id']
            ).values(**bdm).execute()
Esempio n. 29
0
 def p_comment_term(p):
     '''comment_term : OP_COMMENT string'''
     filters = []
     filters.append(gertty.db.revision_table.c.change_key == gertty.db.change_table.c.key)
     filters.append(gertty.db.revision_table.c.message == p[2])
     revision_select = select([gertty.db.change_table.c.key], correlate=False).where(and_(*filters))
     filters = []
     filters.append(gertty.db.revision_table.c.change_key == gertty.db.change_table.c.key)
     filters.append(gertty.db.comment_table.c.revision_key == gertty.db.revision_table.c.key)
     filters.append(gertty.db.comment_table.c.message == p[2])
     comment_select = select([gertty.db.change_table.c.key], correlate=False).where(and_(*filters))
     p[0] = or_(gertty.db.change_table.c.key.in_(comment_select),
                gertty.db.change_table.c.key.in_(revision_select))
Esempio n. 30
0
def _get_select_waypoints_for_routes():
    waypoint_type = text('\'' + WAYPOINT_TYPE + '\'')
    route_type = text('\'' + ROUTE_TYPE + '\'')

    select_linked_waypoints = \
        select([
            Association.child_document_id.label('route_id'),
            Association.parent_document_id.label('waypoint_id')
        ]). \
        where(
            and_(
                Association.parent_document_type == waypoint_type,
                Association.child_document_type == route_type)). \
        cte('linked_waypoints')

    select_waypoint_parents = \
        select([
            select_linked_waypoints.c.route_id,
            Association.parent_document_id.label('waypoint_id')
        ]). \
        select_from(join(
            select_linked_waypoints,
            Association,
            and_(
                Association.child_document_id ==
                select_linked_waypoints.c.waypoint_id,
                Association.parent_document_type == waypoint_type
            ))). \
        cte('waypoint_parents')

    select_waypoint_grandparents = \
        select([
            select_waypoint_parents.c.route_id,
            Association.parent_document_id.label('waypoint_id')
        ]). \
        select_from(join(
            select_waypoint_parents,
            Association,
            and_(
                Association.child_document_id ==
                select_waypoint_parents.c.waypoint_id,
                Association.parent_document_type == waypoint_type
            ))). \
        cte('waypoint_grandparents')

    return union(
            select_linked_waypoints.select(),
            select_waypoint_parents.select(),
            select_waypoint_grandparents.select()
        ). \
        cte('all_waypoints')
Esempio n. 31
0
def _update_nodes(nodemap, instances, migrations):
    """For each migration and matching instance record, update the node columns
    if the referenced host is single-node.

    Skip updates for multi-node hosts.  In that case, there's no way to
    determine which node on a host the record should be associated with.
    """
    q = select([migrations.c.id, migrations.c.source_compute,
               migrations.c.dest_compute, instances.c.uuid, instances.c.host,
               instances.c.node],

               whereclause=and_(migrations.c.source_compute != None,
                                migrations.c.dest_compute != None,
                                instances.c.deleted == False,
                                migrations.c.status != 'reverted',
                                migrations.c.status != 'error'),

               from_obj=migrations.join(instances,
                   migrations.c.instance_uuid == instances.c.uuid)
    )

    result = q.execute()
    for migration_id, src, dest, uuid, instance_host, instance_node in result:

        values = {}

        nodes = nodemap.get(src, [])

        if len(nodes) == 1:
            # the source host is a single-node, safe to update node
            node = nodes[0]
            values['source_node'] = node

            if src == instance_host and node != instance_node:
                update(instances).where(instances.c.uuid == uuid).\
                        values(node=node)

        nodes = nodemap.get(dest, [])
        if len(nodes) == 1:
            # the dest host is a single-node, safe to update node
            node = nodes[0]
            values['dest_node'] = node

            if dest == instance_host and node != instance_node:
                update(instances).where(instances.c.uuid == uuid).\
                        values(node=node)

        if values:
            q = update(migrations,
                   values=values,
                   whereclause=migrations.c.id == migration_id)
            q.execute()
Esempio n. 32
0
 def test_in_27(self):
     # test that putting a select in an IN clause does not
     # blow away its ORDER BY clause
     self.assert_compile(
         select([self.table1, self.table2],
             self.table2.c.otherid.in_(
                 select([self.table2.c.otherid],
                                 order_by=[self.table2.c.othername],
                                 limit=10, correlate=False)
             ),
             from_obj=[self.table1.join(self.table2,
                         self.table1.c.myid == self.table2.c.otherid)],
             order_by=[self.table1.c.myid]
         ),
         "SELECT mytable.myid, "
         "myothertable.otherid, myothertable.othername FROM mytable "\
         "JOIN myothertable ON mytable.myid = myothertable.otherid "
         "WHERE myothertable.otherid IN (SELECT myothertable.otherid "\
         "FROM myothertable ORDER BY myothertable.othername "
         "LIMIT :param_1) ORDER BY mytable.myid",
         {'param_1': 10}
     )
Esempio n. 33
0
def login(username: str, password: str):
    try:
        _user = []
        for row in create_engine(db_url).connect().execute(
                select(user).where(user.c.username == username)):
            _user.append(dict(row))

        if (password == _user[0]['hashpassword']):
            return "login success"
        else:
            return "login fail"
    except:
        return "error"
Esempio n. 34
0
 def test_apply_filter_to_query_with_iep_filters(self):
     with UnittestEdcoreDBConnection() as connection:
         fact_asmt_outcome = connection.get_table(
             Constants.FACT_ASMT_OUTCOME_VW)
         dim_student = connection.get_table(Constants.DIM_STUDENT)
         query = select([fact_asmt_outcome.c.school_id],
                        from_obj=([fact_asmt_outcome]))
         query = apply_filter_to_query(query, fact_asmt_outcome,
                                       dim_student,
                                       {FILTERS_PROGRAM_IEP: [YES]})
         self.assertIsNotNone(query._whereclause)
         self.assertIn("fact_asmt_outcome_vw.dmg_prg_iep",
                       str(query._whereclause))
Esempio n. 35
0
 def test_apply_filter_to_query_with_ethnic_filters(self):
     with UnittestEdcoreDBConnection() as connection:
         fact_asmt_outcome = connection.get_table(
             Constants.FACT_ASMT_OUTCOME_VW)
         dim_student = connection.get_table(Constants.DIM_STUDENT)
         query = select([fact_asmt_outcome.c.school_id],
                        from_obj=([fact_asmt_outcome]))
         query = apply_filter_to_query(
             query, fact_asmt_outcome, dim_student,
             {FILTERS_ETHNICITY: [FILTERS_ETHNICITY_AMERICAN]})
         self.assertIsNotNone(query._whereclause)
         self.assertIn("fact_asmt_outcome_vw.dmg_eth_derived",
                       str(query._whereclause))
Esempio n. 36
0
def get_rating(id):
    rating = db.session.execute(
        select([func.sum(RatingChange.change)
                ]).where(RatingChange.chain_id == id)).scalar()
    allowed_change = 0

    if current_user.is_authenticated and current_user.id:
        user_change = RatingChange.query.filter_by(user_id=current_user.id,
                                                   chain_id=id).first()
        if user_change:
            allowed_change = -user_change.change

    return json.dumps({'num': rating, 'allowedChange': allowed_change})
Esempio n. 37
0
    def get(self, project_slug: str):
        """Get project's info"""

        return Project.query.filter(
            and_(
                Project.slug == project_slug,
                or_(
                    Project.id.in_(
                        select(ProjectUser.project_id).where(
                            current_user.id == ProjectUser.user_id)),
                    Project.is_public,
                ),
            )).first()
Esempio n. 38
0
 def get_question_by_type(self, type_id):
     table = self.table
     stmt = select([table.c.content, table.c.choices]) \
         .where(table.c.type_id == type_id) \
         .order_by(table.c.question_id)
     cursor = self.execute(stmt)
     row = cursor.fetchone()
     while row:
         yield {
             'question_id': row.question_id,
             'content': row.content,
             'choices': row.choices
         }
Esempio n. 39
0
    def find_one(self, where_clause):
        """
        Finds the most recent version
        """
        stmt = select([self.get_table()]).where(where_clause).order_by(
            desc('version')).limit(1)

        # resp is a ResultProxy object of sqlalchemy which acts something like a DB Cursor
        resp = stmt.execute().fetchone()
        if resp and resp['archived']:  # the row has been deleted
            return None

        return self.get_entity_from_proxy(resp)
Esempio n. 40
0
 def get_names_(objs):
     if not objs:
         return
     ids = set([obj.osmid for obj in objs])
     tbl = PolyTable if typ == 'found' else WaysTable
     res = self.session.execute(
         select([tbl.c.osm_id,
                 tbl.c.name]).where(tbl.c.osm_id.in_(ids)))
     names = res.fetchall()
     names = dict(names)
     for obj in objs:
         obj._name = names[obj.osmid]
         logging.info(names)
Esempio n. 41
0
    def _sub_query(self):
        select_sub = collate.make_sql_clause(""
                                             "officer_id,"
                                             "count(distinct paygrade_raw)  AS count_paygrade,"
                                             "count(distinct police_area_id)  AS count_policearea,"
                                             "date_trunc('month', job_start_date) as job_start_date", ex.text)
        from_sub = collate.make_sql_clause('staging.officer_roles', ex.text)
        group_by_sub = collate.make_sql_clause("officer_id, date_trunc('month', job_start_date) ", ex.text)

        sub_query = ex.select(columns=[select_sub], from_obj=from_sub) \
            .group_by(group_by_sub)

        return sub_query
Esempio n. 42
0
    def get_map(cls, session, urls):
        urls = tuple(urls)
        assert urls
        result_map = {}

        for batch in [urls[i:i + 500] for i in range(0, len(urls), 500)]:
            query = select([URLString])\
                .where(URLString.url.in_(batch))

            for row in session.execute(query):
                result_map[row.url] = row.id

        return result_map
Esempio n. 43
0
def get_db_time():
    """ Gives the utc time on the db. """
    s = get_session()
    try:
        storage_date_format = None
        if s.bind.dialect.name == 'oracle':
            query = select([text("sys_extract_utc(systimestamp)")])
        elif s.bind.dialect.name == 'mysql':
            query = select([text("utc_timestamp()")])
        elif s.bind.dialect.name == 'sqlite':
            query = select([text("datetime('now', 'utc')")])
            storage_date_format = '%Y-%m-%d  %H:%M:%S'
        else:
            query = select([func.current_date()])

        for now, in s.execute(query):
            if storage_date_format:
                return datetime.strptime(now, storage_date_format)
            return now

    finally:
        s.remove()
def prepare_pre_cache(tenant, state_code, batch_guid):
    '''
    prepare which state and district are pre-cached

    :param string tenant:  name of the tenant
    :param string state_code:  stateCode representing the state
    :param last_pre_cached:  dateTime of the last precached
    :rType: list
    :return:  list of results containing district guids
    '''
    with EdCoreDBConnection(tenant=tenant) as connector:
        fact_asmt_outcome_vw = connector.get_table(
            Constants.FACT_ASMT_OUTCOME_VW)
        fact_block_asmt_outcome = connector.get_table(
            Constants.FACT_BLOCK_ASMT_OUTCOME)
        query_fao = select([
            distinct(fact_asmt_outcome_vw.c.district_id).label(
                Constants.DISTRICT_ID)
        ],
                           from_obj=[fact_asmt_outcome_vw])
        query_fao = query_fao.where(
            fact_asmt_outcome_vw.c.state_code == state_code)
        query_fao = query_fao.where(
            and_(fact_asmt_outcome_vw.c.batch_guid == batch_guid))
        query_fao = query_fao.where(
            and_(fact_asmt_outcome_vw.c.rec_status == Constants.CURRENT))
        query_fbao = select([
            distinct(fact_block_asmt_outcome.c.district_id).label(
                Constants.DISTRICT_ID)
        ],
                            from_obj=[fact_block_asmt_outcome])
        query_fbao = query_fbao.where(
            fact_block_asmt_outcome.c.state_code == state_code)
        query_fbao = query_fbao.where(
            and_(fact_block_asmt_outcome.c.batch_guid == batch_guid))
        query_fbao = query_fbao.where(
            and_(fact_block_asmt_outcome.c.rec_status == Constants.CURRENT))
        results = connector.get_result(query_fao.union(query_fbao))
        return results
Esempio n. 45
0
def get_rows(altn):
    query = select([
        AlternateTranslatorNames.group, AlternateTranslatorNames.cleanname,
        AlternateTranslatorNames.name
    ],
                   from_obj=[AlternateTranslatorNames
                             ]).where(AlternateTranslatorNames.name == altn)

    results = db.session.execute(query).fetchall()
    ret = []
    for result in results:
        ret.append((result[0], result[1], result[2], 0.1))
    return ret
Esempio n. 46
0
    def select_output_features(self, subquery):
        from sqlalchemy.sql.expression import select, literal

        t_mtof = schema.model_trained_on_output_feature
        tt_mtof = t_mtof.__table__
        t_f = schema.feature

        idtype = tt_mtof.c.idmodel.type
        sq = subquery.subquery('t')
        self.session.execute(
            insert_ignore(t_mtof).from_select(
                [t_mtof.idmodel, t_mtof.idfeature],
                select([literal(self.idmodel, type_=idtype), sq.c.idfeature])))
Esempio n. 47
0
def transactions_all_json():
    lower = request.args.get('after', "")
    upper = request.args.get('before', "")
    filter = request.args.get('filter', "nonuser")
    if filter == "nonuser":
        non_user_transactions = (select(Split.transaction_id).select_from(
            Join(Split,
                 User, (User.account_id == Split.account_id),
                 isouter=True)).group_by(Split.transaction_id).having(
                     func.bool_and(User.id == None)).alias("nut"))

        tid = literal_column("nut.transaction_id")
        transactions = non_user_transactions.join(Transaction,
                                                  Transaction.id == tid)
    else:
        transactions = Transaction.__table__

    q = (select(Transaction.id, Transaction.valid_on, Split.account_id,
                Account.type, Split.amount).select_from(
                    transactions.join(
                        Split, Split.transaction_id == Transaction.id).join(
                            Account, Account.id == Split.account_id)))

    try:
        datetime.strptime(lower, "%Y-%m-%d").date()
    except ValueError:
        not lower or abort(422)
    else:
        q = q.where(Transaction.valid_on >= lower)

    try:
        datetime.strptime(upper, "%Y-%m-%d").date()
    except ValueError:
        not upper or abort(422)
    else:
        q = q.where(Transaction.valid_on <= upper)

    res = session.execute(json_agg_core(q)).fetchone()[0] or []
    return jsonify(items=res)
Esempio n. 48
0
 def p_recentlyseen_term(p):
     '''recentlyseen_term : OP_RECENTLYSEEN NUMBER string'''
     # A gertty extension
     now = datetime.datetime.utcnow()
     delta = p[2]
     unit = p[3]
     delta = age_to_delta(delta, unit)
     s = select([
         func.datetime(func.max(gertty.db.change_table.c.last_seen),
                       '-%s seconds' % delta)
     ],
                correlate=False)
     p[0] = gertty.db.change_table.c.last_seen >= s
def get_udl_record_by_batch_guid(batch_guid, tenant):
    records = []
    with get_prod_connection(tenant=tenant) as connection:
        fact_asmt_outcome = connection.get_table('fact_asmt_outcome')
        fact_block_asmt_outcome = connection.get_table(
            'fact_block_asmt_outcome')
        select_fao = select([
            fact_asmt_outcome.c.student_id.label(STUDENT_ID),
            fact_asmt_outcome.c.asmt_guid.label(ASMT_GUID),
            fact_asmt_outcome.c.date_taken.label(DATE_TAKEN)
        ]).where(
            and_(fact_asmt_outcome.c.batch_guid == batch_guid,
                 fact_asmt_outcome.c.rec_status == 'C'))
        select_fbao = select([
            fact_block_asmt_outcome.c.student_id.label(STUDENT_ID),
            fact_block_asmt_outcome.c.asmt_guid.label(ASMT_GUID),
            fact_block_asmt_outcome.c.date_taken.label(DATE_TAKEN)
        ]).where(
            and_(fact_block_asmt_outcome.c.batch_guid == batch_guid,
                 fact_block_asmt_outcome.c.rec_status == 'C'))
        records = connection.get_result(select_fao.union(select_fbao))
    return records
Esempio n. 50
0
 def _exec_default(self, default, type_):
     if default.is_sequence:
         return self.fire_sequence(default, type_)
     elif default.is_callable:
         return default.arg(self)
     elif default.is_clause_element:
         # TODO: expensive branching here should be
         # pulled into _exec_scalar()
         conn = self.connection
         c = expression.select([default.arg]).compile(bind=conn)
         return conn._execute_compiled(c, (), {}).scalar()
     else:
         return default.arg
Esempio n. 51
0
async def _(graphql_client=graphql_client, db=db, user_factory=user_factory):
    user = await user_factory(
        email="*****@*****.**",
        password="******",
        is_active=True,
        jwt_auth_id=1,
    )

    token = user.create_reset_password_token()

    db_query = select(User).where(User.id == user.id)
    raw_query_user: User = (await db.execute(db_query)).scalar()
    raw_query_user.jwt_auth_id = 2
    repository = UsersRepository(db)
    await repository.save_user(raw_query_user)
    await repository.commit()

    query = """
    mutation($input: ResetPasswordInput!) {
        resetPassword(input: $input) {
            __typename

            ... on ResetPasswordTokenInvalid {
                message
            }
        }
    }
    """

    response = await graphql_client.query(
        query, variables={"input": {"token": token, "newPassword": "******"}}
    )

    assert not response.errors
    assert response.data["resetPassword"]["__typename"] == "ResetPasswordTokenInvalid"

    query = select(User).where(User.email == "*****@*****.**")
    raw_query_user: User = (await db.execute(query)).scalar()
    assert raw_query_user.check_password("hello")
Esempio n. 52
0
def _get_select_users_for_routes_aggregated():
    """ Returns a select which retrieves for every route the ids of
    associated users.
    """
    return \
        select([
            DocumentTag.document_id.label('route_id'),
            func.array_agg(
                DocumentTag.user_id,
                type_=postgresql.ARRAY(Integer)).label('user_ids')
        ]). \
        select_from(DocumentTag). \
        group_by(DocumentTag.document_id)
Esempio n. 53
0
def get_account_verification_token(email=None, username=None):
    email = email and emailer.normalize_address(email)
    username = username and d.get_sysname(username)

    logincreate = d.meta.tables['logincreate']
    statement = select([logincreate.c.token])

    if email:
        statement = statement.where(logincreate.c.email.ilike(email))
    else:
        statement = statement.where(logincreate.c.login_name == username)

    return d.engine.scalar(statement)
Esempio n. 54
0
 def __cache_user_job_count_per_destination(self):
     # Cache the job count if necessary
     if self.user_job_count_per_destination is None and self.app.config.cache_user_job_count:
         self.user_job_count_per_destination = {}
         result = self.sa_session.execute(select([model.Job.table.c.user_id, model.Job.table.c.destination_id, func.count(model.Job.table.c.user_id).label('job_count')]) \
                                         .where(and_(model.Job.table.c.state.in_((model.Job.states.QUEUED, model.Job.states.RUNNING)))) \
                                         .group_by(model.Job.table.c.user_id, model.Job.table.c.destination_id))
         for row in result:
             if row['user_id'] not in self.user_job_count_per_destination:
                 self.user_job_count_per_destination[row['user_id']] = {}
             self.user_job_count_per_destination[row['user_id']][row['destination_id']] = row['job_count']
     elif self.user_job_count_per_destination is None:
         self.user_job_count_per_destination = {}
Esempio n. 55
0
def find_image(service_name):
    image_table = Table('service_images', meta, autoload=True)
    image = select(
        columns=[text("id"),
                 text("image_id"),
                 text("service_name")],
        from_obj=image_table,
        whereclause=text("service_name='%s'" % service_name),
        limit=1).execute().fetchone()

    if image:
        return image.id
    return LEGACY_IMAGE_ID
Esempio n. 56
0
def store_area_user(connection, galaxy_id, group):
    """
    Store the areas associated with a galaxy
    """
    LOG.info('Storing the area_users')
    count = connection.execute(
        select([func.count(AREA_USER.c.areauser_id)],
               from_obj=AREA_USER.join(AREA)).where(
                   AREA.c.galaxy_id == galaxy_id)).first()[0]
    data = numpy.zeros(count, dtype=data_type_area_user)
    count = 0
    for area_user in connection.execute(
            select([AREA_USER], from_obj=AREA_USER.join(AREA)).where(
                AREA.c.galaxy_id == galaxy_id).order_by(
                    AREA_USER.c.areauser_id)):
        data[count] = (
            area_user[AREA_USER.c.area_id],
            area_user[AREA_USER.c.userid],
            str(area_user[AREA_USER.c.create_time]),
        )
        count += 1
    group.create_dataset('area_user', data=data, compression='gzip')
Esempio n. 57
0
    def _test_storeCING2db(self): #DEFAULT disabled because it's a specific test for services not commonly used.
        cingDirTmpTest = os.path.join( cingDirTmp, getCallerName() )
        mkdirs( cingDirTmpTest )
        self.failIf(os.chdir(cingDirTmpTest), msg =
            "Failed to change to test directory for files: " + cingDirTmpTest)

        entry_code = '1brv'
        pdb_id = entry_code
        schema = NRG_DB_NAME
        archive_id = ARCHIVE_NRG_ID
        db_name = PDBJ_DB_NAME
        user_name = PDBJ_DB_USER_NAME

        nTdebug("Starting doStoreCING2db using:")
        nTdebug("entry_code:           %s" % entry_code)
    #    nTdebug("inputDir:             %s" % inputDir)
        nTdebug("archive_id:           %s" % archive_id)
        nTdebug("user_name:            %s" % user_name)
        nTdebug("db_name:              %s" % db_name)
        nTdebug("schema:               %s" % schema)

        csql = CsqlAlchemy(user=user_name, db=db_name,schema=schema)
        self.assertFalse( csql.connect(), "Failed to connect to DB")
        csql.autoload()

        execute = csql.conn.execute
        centry = csql.cingentry

        result = execute(centry.delete().where(centry.c.pdb_id == pdb_id))

        if result.rowcount:
            nTdebug("Removed original entries numbering: %s" % result.rowcount)
            if result.rowcount > 1:
                nTerror("Removed more than the expected ONE entry; this could be serious.")
                return True
        else:
            nTdebug("No original entry present yet.")
        # end if
        datetime_first = datetime.datetime(2011, 4, 7, 11, 12, 26)
        nTdebug("Trying datetime_first %s" % datetime_first)
        result = execute(centry.insert().values(
            pdb_id=pdb_id,
            name=entry_code,
            rev_first = 9,
            rev_last = 99,
            timestamp_first = datetime_first
#            datetime_last = datetime_last
        ))
        entry_id_list = execute(select([centry.c.entry_id]).where(centry.c.pdb_id==pdb_id)).fetchall()
        self.assertTrue( entry_id_list, "Failed to get the id of the inserted entry but got: %s" % entry_id_list)
        self.assertEqual( len( entry_id_list ), 1, "Failed to get ONE id of the inserted entry but got: %s" % entry_id_list)
Esempio n. 58
0
def imdb_query(session):
    import time
    from flexget.plugins.metainfo.imdb_lookup import Movie
    from flexget.plugins.cli.performance import log_query_count
    from sqlalchemy.sql.expression import select
    from progressbar import ProgressBar, Percentage, Bar, ETA
    from sqlalchemy.orm import joinedload_all

    imdb_urls = []

    log.info('Getting imdb_urls ...')
    # query so that we avoid loading whole object (maybe cached?)
    for id, url in session.execute(select([Movie.id, Movie.url])):
        imdb_urls.append(url)
    log.info('Got %i urls from database' % len(imdb_urls))
    if not imdb_urls:
        log.info('so .. aborting')
        return

    # commence testing

    widgets = [
        'Benchmarking - ',
        ETA(), ' ',
        Percentage(), ' ',
        Bar(left='[', right=']')
    ]
    bar = ProgressBar(widgets=widgets, maxval=len(imdb_urls)).start()

    log_query_count('test')
    start_time = time.time()
    for index, url in enumerate(imdb_urls):
        bar.update(index)

        #movie = session.query(Movie).filter(Movie.url == url).first()
        #movie = session.query(Movie).options(subqueryload(Movie.genres)).filter(Movie.url == url).one()

        movie = session.query(Movie).\
            options(joinedload_all(Movie.genres, Movie.languages,
            Movie.actors, Movie.directors)).\
            filter(Movie.url == url).first()

        # access it's members so they're loaded
        var = [x.name for x in movie.genres]
        var = [x.name for x in movie.directors]
        var = [x.name for x in movie.actors]
        var = [x.name for x in movie.languages]

    log_query_count('test')
    took = time.time() - start_time
    log.debug('Took %.2f seconds to query %i movies' % (took, len(imdb_urls)))
Esempio n. 59
0
def exists_in_table(session: Session, table_: Table, *criteria: Any) -> bool:
    """
    Implements an efficient way of detecting if a record or records exist;
    should be faster than ``COUNT(*)`` in some circumstances.

    Args:
        session: SQLAlchemy :class:`Session`, :class:`Engine`, or
            :class:`Connection` object
        table_: SQLAlchemy :class:`Table` object
        criteria: optional SQLAlchemy "where" criteria

    Returns:
        a boolean

    Prototypical use:

    .. code-block:: python

        return exists_in_table(session,
                               table,
                               column(fieldname1) == value2,
                               column(fieldname2) == value2)
    """
    exists_clause = exists().select_from(table_)
    # ... EXISTS (SELECT * FROM tablename)
    for criterion in criteria:
        exists_clause = exists_clause.where(criterion)
    # ... EXISTS (SELECT * FROM tablename WHERE ...)

    if session.get_bind().dialect.name == SqlaDialectName.MSSQL:
        query = select([literal(True)]).where(exists_clause)
        # ... SELECT 1 WHERE EXISTS (SELECT * FROM tablename WHERE ...)
    else:
        query = select([exists_clause])
        # ... SELECT EXISTS (SELECT * FROM tablename WHERE ...)

    result = session.execute(query).scalar()
    return bool(result)
def get_asmt_rec_id(guid_batch, tenant_name, asmt_rec_id_info):
    '''
    Returns asmt_rec_id from dim_asmt table
    Steps:
    1. Get guid_asmt from integration table INT_SBAC_ASMT
    2. Select asmt_rec_id from dim_asmt by the same guid_amst got from 1. It should have 1 value
    '''
    source_table_name = asmt_rec_id_info['source_table']
    guid_column_name_in_source = asmt_rec_id_info['guid_column_in_source']
    target_table_name = asmt_rec_id_info['target_table']
    guid_column_name_in_target = asmt_rec_id_info['guid_column_name']
    rec_id_column_name = asmt_rec_id_info['rec_id']

    # connect to integration table, to get the value of guid_asmt
    with get_udl_connection() as udl_conn:
        int_table = udl_conn.get_table(source_table_name)
        query = select([int_table.c[guid_column_name_in_source]],
                       from_obj=int_table,
                       limit=1)
        query = query.where(int_table.c['guid_batch'] == guid_batch)
        results = udl_conn.get_result(query)
        if results:
            guid_column_value = results[0][guid_column_name_in_source]

    # connect to target table, to get the value of asmt_rec_id
    with get_target_connection(tenant_name, guid_batch) as target_conn:
        dim_asmt = target_conn.get_table(target_table_name)
        query = select([dim_asmt.c[rec_id_column_name]],
                       from_obj=dim_asmt,
                       limit=1)
        query = query.where(
            dim_asmt.c[guid_column_name_in_target] == guid_column_value)
        query = query.where(and_(dim_asmt.c['batch_guid'] == guid_batch))
        results = target_conn.get_result(query)
        if results:
            asmt_rec_id = results[0][rec_id_column_name]

    return asmt_rec_id