def monitoredSitesLocality(request): req = request.POST if 'name_view' in req : print('name_view') try : proto_view_Table=Base.metadata.tables[proto_view_Name] join_table=join(proto_view_Table, Station, proto_view_Table.c['TSta_PK_ID'] == Station.id ) except : proto_view_Table=dict_proto[proto_view_Name]() join_table=join(proto_view_Table, Station, proto_view_Table.FK_TSta_ID == Station.id ) slct=select([Station.locality]).distinct().select_from(join_table) data = DBSession.execute(slct).fetchall() return [row['Place' or 'Locality'] for row in data] else : if 'Region' in req : query=select([Station.locality]).distinct().where(Station.area==req.get('Region')) else : query=select([Station.locality]).distinct() data=DBSession.execute(query).fetchall() return [row[0] for row in data]
def determine_fetches(db_session, cred): for thread in db_session.query(Thread).filter_by(closed=False): update_thread_status(thread, cred) db_session.flush() incomplete_page_ids = ( sa.select([ThreadPost.page_id]) .group_by(ThreadPost.page_id) .having(sa.func.count(ThreadPost.id) < 40) .as_scalar() ) incomplete_pages = sa.select( [ThreadPage.thread_id, ThreadPage.page_num], from_obj=sa.join(ThreadPage, Thread) ).where(sa.and_(ThreadPage.id.in_(incomplete_page_ids), Thread.closed == sa.false())) fetch_status = ( sa.select( [ThreadPage.thread_id.label("thread_id"), sa.func.max(ThreadPage.page_num).label("last_fetched_page")] ) .group_by(ThreadPage.thread_id) .alias("fetch_status") ) unfetched_pages = sa.select( [ Thread.id.label("thread_id"), sa.func.generate_series(fetch_status.c.last_fetched_page + 1, Thread.page_count).label("page_num"), ], from_obj=sa.join(Thread, fetch_status, Thread.id == fetch_status.c.thread_id), ) fetched_first_pages = sa.select([ThreadPage.thread_id]).where(ThreadPage.page_num == 1).as_scalar() unfetched_first_pages = sa.select( [Thread.id.label("thread_id"), sa.literal(1, sa.Integer).label("page_num")], from_obj=Thread ).where(Thread.id.notin_(fetched_first_pages)) q = sa.union(incomplete_pages, unfetched_pages, unfetched_first_pages) q = q.order_by(q.c.thread_id.asc(), q.c.page_num.asc()) return db_session.execute(q).fetchall()
def getIndivEquipment(request): session = request.dbsession id_indiv = request.matchdict['id'] table = Base.metadata.tables['IndividualEquipment'] joinTable = join(table, Sensor, table.c['FK_Sensor'] == Sensor.ID) joinTable = join(joinTable, SensorType, Sensor.FK_SensorType == SensorType.ID) query = select([table.c['StartDate'], table.c['EndDate'], Sensor.UnicIdentifier, Sensor.ID.label('SensorID'), table.c['FK_Individual'], SensorType.Name.label('Type')] ).select_from(joinTable ).where(table.c['FK_Individual'] == id_indiv ).order_by(desc(table.c['StartDate'])) result = session.execute(query).fetchall() response = [] for row in result: curRow = OrderedDict(row) curRow['StartDate'] = curRow['StartDate'].strftime('%Y-%m-%d %H:%M:%S') if curRow['EndDate'] is not None: curRow['EndDate'] = curRow['EndDate'].strftime('%Y-%m-%d %H:%M:%S') else: curRow['EndDate'] = '' response.append(curRow) return response
def monitoredSitesArea(request): req = request.POST if 'name_view' in req : print('name_view') try : proto_view_Table = Base.metadata.tables[proto_view_Name] join_table = join(proto_view_Table, Station, proto_view_Table.c['TSta_PK_ID'] == Station.id ) except : proto_view_Table = dict_proto[proto_view_Name]() join_table = join(proto_view_Table, Station, proto_view_Table.FK_TSta_ID == Station.id ) print (proto_view_Table) slct = select([Station.area]).distinct().select_from(join_table) data = DBSession.execute(slct).fetchall() return [row['Region' or 'Area'] for row in data] else : table = Base.metadata.tables['geo_CNTRIES_and_RENECO_MGMTAreas'] slct = select([table.c['Place']]).distinct() data = DBSession.execute(slct).fetchall() return [row[0] for row in data]
def GetFlatDataList(self,searchInfo=None,getFieldWorkers=True) : ''' Override parent function to include management of Observation/Protocols and fieldWorkers ''' fullQueryJoinOrdered = self.GetFullQuery(searchInfo) result = self.ObjContext.execute(fullQueryJoinOrdered).fetchall() data = [] if getFieldWorkers: # listID = list(map(lambda x: x['ID'],result)) queryCTE = fullQueryJoinOrdered.cte() joinFW = join(Station_FieldWorker,User,Station_FieldWorker.FK_FieldWorker==User.id) joinTable = join(queryCTE,joinFW,queryCTE.c['ID']== Station_FieldWorker.FK_Station) query = select([Station_FieldWorker.FK_Station,User.Login]).select_from(joinTable) FieldWorkers = self.ObjContext.execute(query).fetchall() list_ = {} for x,y in FieldWorkers : list_.setdefault(x,[]).append(y) for row in result : row = OrderedDict(row) try : row['FK_FieldWorker_FieldWorkers'] = list_[row['ID']] except: pass data.append(row) else: for row in result : row = OrderedDict(row) data.append(row) return data
def test_moving_plugin_attributes(self): clusters = self.meta.tables['clusters'] attributes = self.meta.tables['attributes'] plugins = self.meta.tables['plugins'] cluster_plugins = self.meta.tables['cluster_plugins'] query = sa.select([attributes.c.editable])\ .select_from( sa.join( attributes, clusters, attributes.c.cluster_id == clusters.c.id)) result = jsonutils.loads(db.execute(query).fetchone()[0]) self.assertItemsEqual(result, {}) query = sa.select([cluster_plugins.c.attributes])\ .select_from( sa.join( cluster_plugins, plugins, cluster_plugins.c.plugin_id == plugins.c.id))\ .where(plugins.c.name == 'test_plugin_a') result = jsonutils.loads(db.execute(query).fetchone()[0]) self.assertNotIn('metadata', result) self.assertItemsEqual(result['attribute'], { 'value': 'value', 'type': 'text', 'description': 'description', 'weight': 25, 'label': 'label' })
def getSensorEquipment(request): session = request.dbsession id = request.matchdict['id'] curSensor = session.query(Sensor).get(id) curSensorType = curSensor.GetType().Name if ('RFID' in curSensorType.upper()): table = Base.metadata.tables['MonitoredSiteEquipment'] joinTable = join(table, Sensor, table.c['FK_Sensor'] == Sensor.ID) joinTable = join(joinTable, MonitoredSite, table.c[ 'FK_MonitoredSite'] == MonitoredSite.ID) query = select([table.c['StartDate'], table.c['EndDate'], Sensor.UnicIdentifier, MonitoredSite.Name, MonitoredSite.ID.label('MonitoredSiteID')]).select_from(joinTable ).where(table.c['FK_Sensor'] == id ).order_by(desc(table.c['StartDate'])) elif (curSensorType.lower() in ['gsm', 'satellite', 'vhf']): table = Base.metadata.tables['IndividualEquipment'] joinTable = join(table, Sensor, table.c['FK_Sensor'] == Sensor.ID) query = select([table.c['StartDate'], table.c['EndDate'], table.c['FK_Individual'], Sensor.UnicIdentifier]).select_from(joinTable ).where(table.c['FK_Sensor'] == id ).order_by(desc(table.c['StartDate'])) else: return 'bad request' result = session.execute(query).fetchall() response = [] for row in result: curRow = OrderedDict(row) curRow['StartDate'] = curRow['StartDate'].strftime('%Y-%m-%d %H:%M:%S') curRow['EndDate'] = curRow['EndDate'].strftime( '%Y-%m-%d %H:%M:%S') if curRow['EndDate'] is not None else None curRow['format'] = 'YYYY-MM-DD HH:mm:ss' response.append(curRow) return response
def getEquipment(self): id_site = self.objectDB.ID table = Base.metadata.tables['MonitoredSiteEquipment'] joinTable = join(table, Sensor, table.c['FK_Sensor'] == Sensor.ID) joinTable = join(joinTable, SensorType, Sensor.FK_SensorType == SensorType.ID) query = select([table.c['StartDate'], table.c['EndDate'], Sensor.UnicIdentifier, table.c['FK_MonitoredSite'], SensorType.Name.label('Type')] ).select_from(joinTable ).where(table.c['FK_MonitoredSite'] == id_site ).order_by(desc(table.c['StartDate'])) result = self.session.execute(query).fetchall() response = [] for row in result: curRow = OrderedDict(row) curRow['StartDate'] = curRow['StartDate'].strftime('%Y-%m-%d %H:%M:%S') if curRow['EndDate'] is not None: curRow['EndDate'] = curRow['EndDate'].strftime('%Y-%m-%d %H:%M:%S') else: curRow['EndDate'] = '' response.append(curRow) return response
def _rel_child(parent_acl_ids, source=True): """Get left side of relationships mappings through source.""" rel_table = all_models.Relationship.__table__ acl_table = all_models.AccessControlList.__table__ parent_acr = all_models.AccessControlRole.__table__.alias( "parent_acr_{}".format(source) ) child_acr = all_models.AccessControlRole.__table__.alias( "child_acr_{}".format(source) ) if source: object_id = rel_table.c.destination_id object_type = rel_table.c.destination_type else: object_id = rel_table.c.source_id object_type = rel_table.c.source_type acl_link = sa.and_( acl_table.c.object_id == rel_table.c.id, acl_table.c.object_type == all_models.Relationship.__name__, ) select_statement = sa.select([ acl_table.c.person_id.label("person_id"), child_acr.c.id.label("ac_role_id"), object_id.label("object_id"), object_type.label("object_type"), sa.func.now().label("created_at"), sa.literal(login.get_current_user_id()).label("modified_by_id"), sa.func.now().label("updated_at"), acl_table.c.id.label("parent_id"), acl_table.c.id.label("parent_id_nn"), ]).select_from( sa.join( sa.join( sa.join( rel_table, acl_table, acl_link ), parent_acr, parent_acr.c.id == acl_table.c.ac_role_id ), child_acr, child_acr.c.parent_id == parent_acr.c.id ) ).where( sa.and_( acl_table.c.id.in_(parent_acl_ids), child_acr.c.object_type == object_type, ) ) return select_statement
def user_per_month(self, trans, **kwd): params = util.Params(kwd) message = '' email = util.restore_text(params.get('email', '')) specs = sorter('date', kwd) sort_id = specs.sort_id order = specs.order arrow = specs.arrow _order = specs.exc_order q = sa.select((self.select_month(model.Job.table.c.create_time).label('date'), sa.func.count(model.Job.table.c.id).label('total_jobs')), whereclause=model.User.table.c.email == email, from_obj=[sa.join(model.Job.table, model.User.table)], group_by=self.group_by_month(model.Job.table.c.create_time), order_by=[_order]) all_jobs_per_user = sa.select((model.Job.table.c.create_time.label('date'), model.Job.table.c.id.label('job_id')), whereclause=sa.and_(model.User.table.c.email == email), from_obj=[sa.join(model.Job.table, model.User.table)]) trends = dict() for job in all_jobs_per_user.execute(): job_day = int(job.date.strftime("%-d")) - 1 job_month = int(job.date.strftime("%-m")) job_month_name = job.date.strftime("%B") job_year = job.date.strftime("%Y") key = str(job_month_name + job_year) try: trends[key][job_day] += 1 except KeyError: job_year = int(job_year) wday, day_range = calendar.monthrange(job_year, job_month) trends[key] = [0] * day_range trends[key][job_day] += 1 jobs = [] for row in q.execute(): jobs.append((row.date.strftime("%Y-%m"), row.total_jobs, row.date.strftime("%B"), row.date.strftime("%Y"))) return trans.fill_template('/webapps/reports/jobs_user_per_month.mako', order=order, arrow=arrow, sort_id=sort_id, id=kwd.get('id'), trends=trends, email=util.sanitize_text(email), jobs=jobs, message=message)
def _propagate_to_wf_children(new_wf_acls, child_class): """Propagate newly added roles to workflow objects. Args: wf_new_acl: list of all newly created acl entries for workflows Returns: list of newly created acl entries for task groups. """ child_table = child_class.__table__ acl_table = all_models.AccessControlList.__table__ acr_table = all_models.AccessControlRole.__table__.alias("parent_acr") acr_mapped_table = all_models.AccessControlRole.__table__.alias("mapped") current_user_id = login.get_current_user_id() select_statement = sa.select([ acl_table.c.person_id, acr_mapped_table.c.id, child_table.c.id, sa.literal(child_class.__name__), sa.func.now(), sa.literal(current_user_id), sa.func.now(), acl_table.c.id.label("parent_id"), acl_table.c.id.label("parent_id_nn"), ]).select_from( sa.join( sa.join( sa.join( child_table, acl_table, sa.and_( acl_table.c.object_id == child_table.c.workflow_id, acl_table.c.object_type == all_models.Workflow.__name__, ) ), acr_table, ), acr_mapped_table, acr_mapped_table.c.name == sa.func.concat( acr_table.c.name, " Mapped") ) ).where( acl_table.c.id.in_(new_wf_acls) ) acl_utils.insert_select_acls(select_statement) return _get_child_ids(new_wf_acls, child_class)
def bulk_get(self, queue, message_ids, project): if project is None: project = '' message_ids = [id for id in map(utils.msgid_decode, message_ids) if id is not None] statement = sa.sql.select([tables.Messages.c.id, tables.Messages.c.body, tables.Messages.c.ttl, tables.Messages.c.created]) and_stmt = [tables.Messages.c.id.in_(message_ids), tables.Queues.c.name == queue, tables.Queues.c.project == project, tables.Messages.c.ttl > sfunc.now() - tables.Messages.c.created] j = sa.join(tables.Messages, tables.Queues, tables.Messages.c.qid == tables.Queues.c.id) statement = statement.select_from(j).where(sa.and_(*and_stmt)) now = timeutils.utcnow_ts() records = self.driver.run(statement) for id, body, ttl, created in records: yield { 'id': utils.msgid_encode(id), 'ttl': ttl, 'age': now - calendar.timegm(created.timetuple()), 'body': json.loads(body), }
def get(self, queue, message_id, project, count=False): if project is None: project = '' mid = utils.msgid_decode(message_id) if mid is None: raise errors.MessageDoesNotExist(message_id, queue, project) try: j = sa.join(tables.Messages, tables.Queues, tables.Messages.c.qid == tables.Queues.c.id) sel = sa.sql.select([tables.Messages.c.body, tables.Messages.c.ttl, tables.Messages.c.created]) if count: sel = sa.sql.select([sfunc.count(tables.Messages.c.id)]) sel = sel.select_from(j) sel = sel.where(sa.and_(tables.Messages.c.id == mid, tables.Queues.c.project == project, tables.Queues.c.name == queue, tables.Messages.c.ttl > sfunc.now() - tables.Messages.c.created)) return self.driver.get(sel)[0] except utils.NoResult: raise errors.MessageDoesNotExist(message_id, queue, project)
def getIndivHistory(request): session = request.dbsession id = request.matchdict['id'] tableJoin = join(IndividualDynPropValue,IndividualDynProp ,IndividualDynPropValue.FK_IndividualDynProp == IndividualDynProp.ID) query = select([IndividualDynPropValue,IndividualDynProp.Name]).select_from(tableJoin).where( IndividualDynPropValue.FK_Individual == id ).order_by(desc(IndividualDynPropValue.StartDate)) result = session.execute(query).fetchall() response = [] for row in result: curRow = OrderedDict(row) dictRow = {} for key in curRow : if curRow[key] is not None : if 'Value' in key : dictRow['value'] = curRow[key] elif 'FK' not in key : dictRow[key] = curRow[key] dictRow['StartDate'] = curRow['StartDate'].strftime('%Y-%m-%d %H:%M:%S') response.append(dictRow) return response
def bulk_get(self, queue, message_ids, project): if project is None: project = '' message_ids = [id for id in map(utils.msgid_decode, message_ids) if id is not None] statement = sa.sql.select([tables.Messages.c.id, tables.Messages.c.body, tables.Messages.c.ttl, tables.Messages.c.created, tables.Messages.c.cid]) and_stmt = [tables.Messages.c.id.in_(message_ids)] and_stmt.extend(self._and_stmt_with_ttl(queue, project)) j = sa.join(tables.Messages, tables.Queues, tables.Messages.c.qid == tables.Queues.c.id) statement = statement.select_from(j).where(sa.and_(*and_stmt)) now = timeutils.utcnow_ts() records = self.driver.run(statement) for id, body, ttl, created, cid in records: yield { 'id': utils.msgid_encode(int(id)), 'ttl': ttl, 'age': now - calendar.timegm(created.timetuple()), 'body': utils.json_decode(body), 'claim_id': utils.cid_encode(cid) if cid else None, }
def thd(conn): bs_tbl = self.db.model.buildsets ch_tbl = self.db.model.changes j = sa.join(self.db.model.buildsets, self.db.model.sourcestampsets) j = j.join(self.db.model.sourcestamps) j = j.join(self.db.model.sourcestamp_changes) j = j.join(ch_tbl) q = sa.select(columns=[bs_tbl], from_obj=[j], distinct=True) q = q.order_by(sa.desc(bs_tbl.c.id)) q = q.limit(count) if complete is not None: if complete: q = q.where(bs_tbl.c.complete != 0) else: q = q.where((bs_tbl.c.complete == 0) | (bs_tbl.c.complete == None)) if branch: q = q.where(ch_tbl.c.branch == branch) if repository: q = q.where(ch_tbl.c.repository == repository) res = conn.execute(q) return list(reversed([ self._row2dict(row) for row in res.fetchall() ]))
def _get(self, queue, message_id, project, count=False): if project is None: project = '' mid = utils.msgid_decode(message_id) if mid is None: raise errors.MessageDoesNotExist(message_id, queue, project) try: j = sa.join(tables.Messages, tables.Queues, tables.Messages.c.qid == tables.Queues.c.id) sel = sa.sql.select([tables.Messages.c.body, tables.Messages.c.ttl, tables.Messages.c.created, tables.Messages.c.cid]) if count: sel = sa.sql.select([sfunc.count(tables.Messages.c.id)]) sel = sel.select_from(j) and_stmt = [tables.Messages.c.id == mid] and_stmt.extend(self._and_stmt_with_ttl(queue, project)) sel = sel.where(sa.and_(*and_stmt)) return self.driver.get(sel) except utils.NoResult: raise errors.MessageDoesNotExist(message_id, queue, project)
def job(request, name): engine = yield from aiopg.sa.create_engine(DATABASE_URL) with (yield from engine) as conn: jobs = yield from conn.execute(select( [Job.__table__, Maintainer.__table__,], use_labels=True ).select_from(join( Maintainer.__table__, Job.__table__, Maintainer.id == Job.maintainer_id )).where(Job.name == name).limit(1)) job = yield from jobs.first() runs = yield from conn.execute( select([Run.id, Run.failed, Run.start_time, Run.end_time]). where(Run.job_id == job.job_id). order_by(Run.id.desc()) ) return request.render('job.html', { "job": job, "runs": runs, "next_run": humanize.naturaltime(job.job_scheduled), })
def location_graph(request): session = request.dbsession joinTable = join(Individual_Location, Sensor, Individual_Location.FK_Sensor == Sensor.ID) data = [] query = select([Individual_Location.type_, func.count("*").label("nb")]).group_by(Individual_Location.type_) global graphDataDate global indivLocationData d = datetime.datetime.now() - datetime.timedelta(days=1) if graphDataDate["indivLocationData"] is None or graphDataDate["indivLocationData"] < d: graphDataDate["indivLocationData"] = datetime.datetime.now() for row in session.execute(query).fetchall(): curRow = OrderedDict(row) lab = curRow["type_"].upper() if "ARG" in lab: try: nbArg = nbArg + curRow["nb"] except: nbArg = curRow["nb"] else: data.append({"value": curRow["nb"], "label": lab}) data.append({"value": nbArg, "label": "ARGOS"}) data.sort(key=itemgetter("label")) indivLocationData = data else: print("indiv loc already fetched") return indivLocationData
def active_property_groups(cls, when=None): return select([PropertyGroup]).select_from( join(PropertyGroup, Membership).join(cls) ).where( Membership.active(when) )
def active_traffic_groups(cls, when=None): return select([TrafficGroup]).select_from( join(TrafficGroup, Membership).join(cls) ).where( Membership.active(when) )
def top_tags(cls, limit=10, returned_tag_info='object'): # by package assert returned_tag_info in ('name', 'id', 'object') tag = table('tag') package_tag = table('package_tag') package = table('package') if returned_tag_info == 'name': from_obj = [package_tag.join(tag)] tag_column = tag.c.name else: from_obj = None tag_column = package_tag.c.tag_id j = join(package_tag, package, package_tag.c.package_id == package.c.id) s = select([tag_column, func.count(package_tag.c.package_id)], from_obj=from_obj).\ select_from(j).\ where(and_(package_tag.c.state=='active', package.c.private == False, package.c.state == 'active' )) s = s.group_by(tag_column).\ order_by(func.count(package_tag.c.package_id).desc()).\ limit(limit) res_col = model.Session.execute(s).fetchall() if returned_tag_info in ('id', 'name'): return res_col elif returned_tag_info == 'object': res_tags = [(model.Session.query(model.Tag).get(text_type(tag_id)), val) for tag_id, val in res_col] return res_tags
def test_nested_joins(self): task, Task_Type, Joined, prj, task_type, msg = (self.tables.task, self.classes.Task_Type, self.classes.Joined, self.tables.prj, self.tables.task_type, self.tables.msg) # this is testing some subtle column resolution stuff, # concerning corresponding_column() being extremely accurate # as well as how mapper sets up its column properties mapper(Task_Type, task_type) tsk_cnt_join = sa.outerjoin(prj, task, task.c.prj_id == prj.c.id) j = sa.outerjoin(task, msg, task.c.id == msg.c.task_id) jj = sa.select([task.c.id.label('task_id'), sa.func.count(msg.c.id).label('props_cnt')], from_obj=[j], group_by=[task.c.id]).alias('prop_c_s') jjj = sa.join(task, jj, task.c.id == jj.c.task_id) mapper(Joined, jjj, properties=dict( type=relationship(Task_Type, lazy='joined'))) session = create_session() eq_(session.query(Joined).limit(10).offset(0).one(), Joined(id=1, title='task 1', props_cnt=0))
def test_cross_schema_reflection_one(self): meta1 = self.metadata users = Table('users', meta1, Column('user_id', Integer, primary_key=True), Column('user_name', String(30), nullable=False), schema='test_schema') addresses = Table( 'email_addresses', meta1, Column( 'address_id', Integer, primary_key=True), Column( 'remote_user_id', Integer, ForeignKey( users.c.user_id)), Column( 'email_address', String(20)), schema='test_schema') meta1.create_all() meta2 = MetaData(testing.db) addresses = Table('email_addresses', meta2, autoload=True, schema='test_schema') users = Table('users', meta2, mustexist=True, schema='test_schema') j = join(users, addresses) self.assert_((users.c.user_id == addresses.c.remote_user_id).compare(j.onclause))
def test_composite_fk(self): """test reflection of composite foreign keys""" meta = MetaData(testing.db) multi = Table( 'multi', meta, Column('multi_id', sa.Integer, primary_key=True), Column('multi_rev', sa.Integer, primary_key=True), Column('multi_hoho', sa.Integer, primary_key=True), Column('name', sa.String(50), nullable=False), Column('val', sa.String(100)), test_needs_fk=True, ) multi2 = Table('multi2', meta, Column('id', sa.Integer, primary_key=True), Column('foo', sa.Integer), Column('bar', sa.Integer), Column('lala', sa.Integer), Column('data', sa.String(50)), sa.ForeignKeyConstraint(['foo', 'bar', 'lala'], ['multi.multi_id', 'multi.multi_rev', 'multi.multi_hoho']), test_needs_fk=True, ) meta.create_all() try: meta2 = MetaData() table = Table('multi', meta2, autoload=True, autoload_with=testing.db) table2 = Table('multi2', meta2, autoload=True, autoload_with=testing.db) self.assert_tables_equal(multi, table) self.assert_tables_equal(multi2, table2) j = sa.join(table, table2) self.assert_(sa.and_(table.c.multi_id==table2.c.foo, table.c.multi_rev==table2.c.bar, table.c.multi_hoho==table2.c.lala).compare(j.onclause)) finally: meta.drop_all()
def apply_default_value(self, column): if column.default: execute = self.table.migration.conn.execute val = column.default.arg table = self.table.migration.metadata.tables[self.table.name] table.append_column(column) cname = getattr(table.c, column.name) if column.default.is_callable: Table = self.table.migration.metadata.tables['system_model'] Column = self.table.migration.metadata.tables['system_column'] j1 = join(Table, Column, Table.c.name == Column.c.model) query = select([Column.c.name]).select_from(j1) query = query.where(Column.c.primary_key.is_(True)) query = query.where(Table.c.table == self.table.name) columns = [x[0] for x in execute(query).fetchall()] query = select([func.count()]).select_from(table) query = query.where(cname.is_(None)) nb_row = self.table.migration.conn.execute(query).fetchone()[0] for offset in range(nb_row): query = select(columns).select_from(table) query = query.where(cname.is_(None)).limit(1) res = execute(query).fetchone() where = and_( *[getattr(table.c, x) == res[x] for x in columns]) query = update(table).where(where).values( {cname: val(None)}) execute(query) else: query = update(table).where(cname.is_(None)).values( {cname: val}) execute(query)
def test_schema_reflection(self): """note: this test requires that the 'test_schema' schema be separate and accessible by the test user""" meta1 = self.metadata users = Table('users', meta1, Column('user_id', Integer, primary_key=True), Column('user_name', String(30), nullable=False), schema='test_schema') addresses = Table( 'email_addresses', meta1, Column('address_id', Integer, primary_key=True), Column('remote_user_id', Integer, ForeignKey(users.c.user_id)), Column('email_address', String(20)), schema='test_schema', ) meta1.create_all() meta2 = MetaData(testing.db) addresses = Table('email_addresses', meta2, autoload=True, schema='test_schema') users = Table('users', meta2, mustexist=True, schema='test_schema') j = join(users, addresses) self.assert_((users.c.user_id == addresses.c.remote_user_id).compare(j.onclause))
def indiv_details(request): params=int(request.matchdict['id']) join_table = join(SatTrx, ObjectsCaracValues, SatTrx.ptt == cast(ObjectsCaracValues.value, Integer) ).join(Individual, ObjectsCaracValues.object==Individual.id) query=select([ObjectsCaracValues.value.label('id'), Individual.id.label('ind_id'),Individual.survey_type.label('survey_type'), Individual.status.label('status') , Individual.monitoring_status.label('monitoring_status'), Individual.birth_date.label('birth_date'), Individual.ptt.label('ptt'),ObjectsCaracValues.begin_date.label('begin_date'),ObjectsCaracValues.end_date.label('end_date')] ).select_from(join_table ).where(and_(SatTrx.model.like('GSM%'),ObjectsCaracValues.carac_type==19,ObjectsCaracValues.object_type=='Individual') ).where(ObjectsCaracValues.value==params).order_by(desc(ObjectsCaracValues.begin_date)) data=DBSession.execute(query).first() transaction.commit() if data['end_date'] == None : end_date=datetime.datetime.now() else : end_date=data['end_date'] result=dict([ (key[0],key[1]) for key in data.items()]) print(result) result['duration']=(end_date.month-data['begin_date'].month)+(end_date.year-data['begin_date'].year)*12 query = select([V_Individuals_LatLonDate.c.date] ).where(V_Individuals_LatLonDate.c.ind_id == result['ind_id'] ).order_by(desc(V_Individuals_LatLonDate.c.date)).limit(1) lastObs=DBSession.execute(query).fetchone() result['last_observation']=lastObs['date'].strftime('%d/%m/%Y') if result['birth_date']!= None: result['birth_date']=result['birth_date'].strftime('%d/%m/%Y') del result['begin_date'], result['end_date'] print (result) return result
def user_per_month( self, trans, **kwd ): params = util.Params( kwd ) message = '' email = util.restore_text( params.get( 'email', '' ) ) specs = sorter( 'date', kwd ) sort_id = specs.sort_id order = specs.order arrow = specs.arrow _order = specs.exc_order q = sa.select( ( self.select_month( model.Job.table.c.create_time ).label( 'date' ), sa.func.count( model.Job.table.c.id ).label( 'total_jobs' ) ), whereclause=sa.and_( model.Job.table.c.session_id == model.GalaxySession.table.c.id, model.GalaxySession.table.c.user_id == model.User.table.c.id, model.User.table.c.email == email ), from_obj=[ sa.join( model.Job.table, model.User.table ) ], group_by=self.group_by_month( model.Job.table.c.create_time ), order_by=[ _order ] ) jobs = [] for row in q.execute(): jobs.append( ( row.date.strftime( "%Y-%m" ), row.total_jobs, row.date.strftime( "%B" ), row.date.strftime( "%Y" ) ) ) return trans.fill_template( '/webapps/reports/jobs_user_per_month.mako', order=order, arrow=arrow, sort_id=sort_id, id=kwd.get('id'), email=util.sanitize_text( email ), jobs=jobs, message=message )
def fetch_comments_by_thread_client_id(thread_client_id): """Fetch a list of comments for the given thread's client_id from the database.""" t_comment = tables.comment t_thread = tables.thread stmt = ( sa.select(t_comment.c) .select_from(sa.join(t_comment, t_thread)) .where(t_thread.c.client_id == thread_client_id) .order_by(sa.asc(t_comment.c.created)) ) # Run add_comment_filter_predicate hooks stmt = ext.exec_filter_hooks(ext.AddCommentFilterPredicate, stmt) result = db.engine.execute(stmt) # Very large result sets can cause a lot of memory allocation here # that CPython may not give back to the OS, due to a lack of compacting # GC. See: http://stackoverflow.com/a/5495318 # However, assuming a worst case of 50K smallish rows, the memory for the # process will only roughly double (to about 100MB). # If comments could be sorted in the database, it would not be necessary # to fetch all the results and sort in-process. This has the drawback # of putting more load on the database server. If memory due to large # allocations for results is an issue, it is recommended to reload # workers after exceeding a memory limit via uwsgi's `reload-on-rss`. comments_seq = [dict(x) for x in result.fetchall()] result.close() return comments_seq
def get_request_review(self, num): """ Displays one single review request after the user clicks on the title when she sees it on mainpage accepts an int returns a dict """ query = select(self.cols). \ select_from( \ join(self.structure, User.structure)). \ where(self.structure.c.reqId == num) result = connect_and_get(query).fetchall() if result: return zip_results(self.cols, result)[0] return False
def process_movie_tags(self, document, movieelement, movie): # build query for movie tags processing tagsquerycolumns = [ db.metadata.tables['movie_tag'].c.movie_id, db.metadata.tables['movie_tag'].c.tag_id, db.metadata.tables['tags'].c.name ] tag_join = join(db.metadata.tables['movie_tag'], \ db.metadata.tables['tags'], \ db.metadata.tables['movie_tag'].c.tag_id==db.metadata.tables['tags'].c.tag_id) tagsquery = select(\ bind=self.db.session.bind, \ columns = tagsquerycolumns, \ from_obj = [tag_join], \ whereclause = db.metadata.tables['movie_tag'].c.movie_id==movie['movies_movie_id']) self.process_tags(document, movieelement, tagsquery)
async def show_julia(conn): print("Lookup for Julia:") join = sa.join(emails, users, users.c.id == emails.c.user_id) query = ( sa.select([users, emails], use_labels=True) .select_from(join) .where(users.c.name == "Julia") ) async for row in conn.execute(query): print( row.users_name, row.users_birthday, row.emails_email, row.emails_private, ) print()
def show_merges(self): other = sa.orm.aliased(Entry) select_entries = sa.select([ Entry.hash, Entry.refid, File.name.label('filename'), Entry.bibkey, ]).select_from(sa.join(Entry, File))\ .order_by(Entry.hash, Entry.refid.desc(), File.name, Entry.bibkey)\ .where(sa.exists().where(other.hash == Entry.hash).where(other.refid != Entry.refid)) with self.engine.connect() as conn: for hash, group in group_first(conn.execute(select_entries)): self._print_group(conn, group) new = self._merged_entry(self._entrygrp(conn, hash), raw=True) cand = [ (ri, self._merged_entry(self._entrygrp(conn, ri), raw=True)) for ri in unique(ri for _, ri, _, _ in group)] old = min(cand, key=lambda p: distance(new, p[1]))[0] print('-> %s\n' % old)
class MeterSample(Base): """Helper model. It's needed as many of the filters work against Sample data joined with Meter data. """ meter = Meter.__table__ sample = Sample.__table__ __table__ = join(meter, sample) id = column_property(sample.c.id) meter_id = column_property(meter.c.id, sample.c.meter_id) counter_name = column_property(meter.c.name) counter_type = column_property(meter.c.type) counter_unit = column_property(meter.c.unit) counter_volume = column_property(sample.c.volume)
def single_speciality_of_headquarter(cls, *, heq_id, msp_id): """ Especialidades médicas de la sede heq_id y de la especialidad msp_id """ j = join( cls.msp, doctors.Doctors.doc, cls.msp.c.msp_id == doctors.Doctors.doc.c.msp_id, ).join( HealthRelations.hre, HealthRelations.hre.c.doc_id == doctors.Doctors.doc.c.doc_id, ) query = (select([cls.msp]).distinct().select_from(j).where( HealthRelations.hre.c.heq_id == heq_id).where( cls.msp.c.msp_id == msp_id)) return query
def show_splits(self): other = sa.orm.aliased(Entry) select_entries = sa.select([ Entry.refid, Entry.hash, File.name.label('filename'), Entry.bibkey, ]).select_from(sa.join(Entry, File))\ .order_by(Entry.refid, Entry.hash, File.name, Entry.bibkey)\ .where(sa.exists().where(other.refid == Entry.refid).where(other.hash != Entry.hash)) with self.engine.connect() as conn: for refid, group in group_first(conn.execute(select_entries)): self._print_group(conn, group) old = self._merged_entry(self._entrygrp(conn, refid), raw=True) cand = [ (hs, self._merged_entry(self._entrygrp(conn, hs), raw=True)) for hs in unique(hs for _, hs, _, _ in group)] new = min(cand, key=lambda p: distance(old, p[1]))[0] print('-> %s\n' % new)
def get_vw(self, search_kwargs, search_extras_regexp=None): """Get a view for our model. Our model will consist of items found in our database + any ingredients specified when we were created. In other words, we'll list all ingredients that we're told about, whether they're in the nutrition aliases table or not. search_kwargs are the arguments handed to our database search. search_extras_text is a regexp used to filter our "extras." """ select = sqlalchemy.select( [ self.rd.nutritionaliases_table.c.ingkey, self.rd.nutritionaliases_table.c.density_equivalent, self.rd.nutrition_table.c.desc, self.rd.nutrition_table.c.ndbno ], *gourmet.backends.db.make_simple_select_arg( search_kwargs, self.rd.nutrition_table, self.rd.nutritionaliases_table), **{ 'from_obj': [ sqlalchemy.join(self.rd.nutrition_table, self.rd.nutritionaliases_table) ] }) vw = select.execute().fetchall() #vw = self.rd.fetch_join(self.rd.nutritionaliases_table,self.rd.nutrition_table, # 'ndbno','ndbno',sort_by=[('ingkey',1)], # **search_kwargs) # We must show ingredients whether we have them or not... extras = [] if self.ingredients: ings_to_add = self.ingredients[:] if search_extras_regexp: ings_to_add = filter( lambda i: re.match(search_extras_regexp, i), ings_to_add) for row in vw: while row.ingkey in ings_to_add: ings_to_add.remove(row.ingkey) for extra_ing in ings_to_add: if extra_ing: extras.append( MockObject(ingkey=extra_ing, ndbno=0, desc='Not in database', density_equivalent=None)) return vw + extras
def safety_stock_controller(uri: str, sku_id: str = None, direction: str = None): """ Retrieves safety stock. Args: uri (str): Database connection string. sku_id (str): SKU unique identification. direction (str): Indication of sort direction. Returns: """ meta = MetaData() connection = engine(uri) inventory_analysis = Table('inventory_analysis', meta, autoload=True, autoload_with=connection) msk = Table('master_sku_list', meta, autoload=True, autoload_with=connection) j = join(inventory_analysis, msk, msk.columns.id == inventory_analysis.columns.sku_id) if direction == 'smallest': sku_classification = select([ func.min(inventory_analysis.columns.safety_stock), msk.columns.sku_id ]).select_from(j) rp = connection.execute(sku_classification) else: sku_classification = select([ func.max(inventory_analysis.columns.safety_stock), msk.columns.sku_id ]).select_from(j) rp = connection.execute(sku_classification) safety_stock = '' for i in rp: safety_stock = i[0] sku_identification = i[1] return safety_stock, sku_identification
def get_data_query(self): all_snapshot = db_tables.clean_all_snapshots_table student_lookup = all_snapshot.c.student_lookup school_year = all_snapshot.c.school_year grade = all_snapshot.c.grade discipline_incidents = all_snapshot.c.discipline_incidents student_years = sql.select([ student_lookup, school_year.label('end_year'), grade, ]).distinct(student_lookup, school_year, grade).where(grade >= 9).alias('student_years') student_discipline = sql.select( [student_lookup, school_year, grade, discipline_incidents]).where( grade >= features_config.min_grade).alias('student_discipline') joined = sql.join( left=student_discipline, right=student_years, onclause=sql.and_( student_discipline.c.student_lookup == student_years.c.student_lookup, student_discipline.c.school_year <= student_years.c.end_year)) discipline_incident_rates = sql.select([ joined.c.student_discipline_student_lookup.label('student_lookup'), joined.c.student_years_end_year.label('school_year'), joined.c.student_years_grade.label('grade'), db_func.avg( joined.c.student_discipline_discipline_incidents).label( 'discipline_incident_rate'), db_func.percent_rank().over( order_by=db_func.avg( joined.c.student_discipline_discipline_incidents), partition_by=[ joined.c.student_years_end_year, joined.c.student_years_grade ]).label('discipline_incident_rate_perc') ]).select_from(joined).group_by( joined.c.student_discipline_student_lookup.label('student_lookup'), joined.c.student_years_end_year.label('school_year'), joined.c.student_years_grade.label('grade')) return discipline_incident_rates
def get_release_id(timestamp: Union[str, datetime], process_name: str) -> Tuple[int, datetime]: # Prevent duplication sleep(random() * 10) session = Session() try: query = session.execute( select([ ReleaseReference.id, ReleaseReference.timestamp ]).select_from( join(ReleaseReference, ReleaseCategory, ReleaseReference.id == ReleaseCategory.release_id)).where( and_( func.DATE(ReleaseReference.timestamp) == timestamp.date(), ReleaseCategory.process_name == process_name))) result = query.fetchone() if result is not None: return result except Exception as err: session.rollback() raise err finally: session.close() session = Session(autocommit=True) try: release = ReleaseReference(timestamp=timestamp) session.add(release) session.flush() category = ReleaseCategory(release_id=release.id, process_name=process_name) session.add(category) session.flush() except Exception as err: session.rollback() raise err finally: session.close() return get_release_id(timestamp, process_name)
def resolve_num_children(self, args, context, info): phase = args.get('identifier', '') if phase == 'multiColumns': _it = models.Idea.__table__ _ilt = models.IdeaLink.__table__ _target_it = models.Idea.__table__.alias() j = join(_ilt, _it, _ilt.c.source_id == _it.c.id).join( _target_it, _ilt.c.target_id == _target_it.c.id) num = select([func.count(_ilt.c.id)]).select_from(j).where( (_ilt.c.tombstone_date == None) & (_it.c.tombstone_date == None) & ( # noqa: E711 _it.c.id == self.id) & (_target_it.c.message_view_override == 'messageColumns') ).correlate_except(_ilt) return self.db.execute(num).fetchone()[0] return self.num_children
async def get_list_active_notifications(): try: async with create_engine(dsn) as engine: async with engine.acquire() as conn: join = sa.join(user_notify, user_note, user_notify.c.note_id == user_note.c.id) select = sa.select([user_notify, user_note], use_labels=True).select_from(join).\ where(user_notify.c.notify_status == NotifyStatus.processing) notify_list = [] async for row in conn.execute(select): notify_list.append(row) return notify_list except Exception as message: logging.warning(message)
def get_data_query(self): all_snapshots = db_tables.clean_all_snapshots_table student_lookup = all_snapshots.c.student_lookup school_year = all_snapshots.c.school_year grade = all_snapshots.c.grade zipcode = all_snapshots.c.zip # get first 5 digits for zipcode processed_zipcode = sql.case([(zipcode == None, None)], else_=sql.func.substr( sql.cast(zipcode, VARCHAR), 1, 5)) snapshots = sql.select([ student_lookup, school_year, grade, processed_zipcode.label('zipcode'), ]).\ distinct(student_lookup, school_year, grade).\ where( student_lookup != None, ).\ cte('acs_temp_a') to_join = [snapshots] for k, v in db_tables.__dict__.items(): if k.startswith('acs_'): to_join.append(v) joined = to_join[0] for i in range(1, len(to_join)): if i == 1: on_clause = (joined.c.zipcode == to_join[i].c.zipcode) else: on_clause = (joined.c[to_join[0].name + '_zipcode'] == to_join[i].c.zipcode) joined = sql.join(left=joined, right=to_join[i], onclause=on_clause, isouter=True) cols = [c for c in joined.c if c.name != 'zipcode'] final = sql.select(cols).select_from(joined) return final
def getDatas(self): if self.type_ == 'camtrap': if self.request.method == 'GET': joinTable = join(CamTrap, self.viewTable, CamTrap.pk_id == self.viewTable.c['pk_id']) query = select([CamTrap]).select_from(joinTable) query = query.where(self.viewTable.c['sessionID'] == self.sessionID) query = query.where(or_(self.viewTable.c['checked'] == 0, self.viewTable.c['checked'] == None)) query = query.order_by(asc(self.viewTable.c['date_creation'])) else: query = select([self.viewTable] ).where(self.viewTable.c['sessionID'] == self.sessionID ).where(or_(self.viewTable.c['checked'] == 0, self.viewTable.c['checked'] == None)) query = self.handleQuery(query) data = self.session.execute(query).fetchall() return self.handleResult(data)
def select_languoid_endangerment(languoid=Languoid, *, label: str = 'endangerment', sort_keys: bool = False, bib_suffix: str = '_e') -> sa.sql.Select: bibitem = aliased(Bibitem, name=f'bibitem{bib_suffix}') bibfile = aliased(Bibfile, name=f'bibfile{bib_suffix}') return (select(Endangerment.jsonf(EndangermentSource, bibfile, bibitem, sort_keys=sort_keys, label=label)) .select_from(Endangerment) .filter_by(languoid_id=languoid.id) .correlate(languoid) .join(Endangerment.source) .outerjoin(sa.join(bibitem, bibfile)) .label(label))
class RunJoin(Base): reduction_variable_table = Table( 'reduction_variables_runvariable', metadata, Column('variable_ptr_id', Integer, ForeignKey('reduction_variables_variable.id'), primary_key=True), Column('reduction_run_id', Integer, ForeignKey('reduction_viewer_reductionrun.id'))) reduction_variable_join = join(variable_table, reduction_variable_table) __table__ = reduction_variable_join id = column_property(variable_table.c.id, reduction_variable_table.c.variable_ptr_id) reduction_run = relationship('ReductionRun', foreign_keys='RunJoin.reduction_run_id')
def get_data_query(self): # FIXME: Make end year go upto the last year on record index_cols_dict = self.index_cols_dict student_years = sql.select( list(index_cols_dict.values()) ).distinct( *list(index_cols_dict.values()) ).where( index_cols_dict['grade'] >= 9 ).alias('student_years') student_block = sql.select( list(index_cols_dict.values()) + [self.blocking_col] ).where( index_cols_dict['grade'] >= features_config.min_grade ).alias('student_block') joined = sql.join( left=student_block, right=student_years, onclause=sql.and_( student_block.c.student_lookup == student_years.c.student_lookup, student_block.c.school_year <= student_years.c.school_year ) ) value_col = db_func.count() * 1.0 / db_func.count(sql.distinct(joined.c.student_block_school_year)) inv_rates = sql.select([ joined.c.student_block_student_lookup.label('student_lookup'), joined.c.student_years_school_year.label('school_year'), joined.c.student_years_grade, joined.c['student_block_' + self.blocking_col.name].label(self.blocking_col.name), value_col.label(self.value_col_name), ]).select_from( joined ).group_by( joined.c.student_block_student_lookup, joined.c.student_years_school_year, joined.c.student_years_grade, joined.c['student_block_' + self.blocking_col.name], ) return inv_rates
async def batch_load_by_name_user(cls, context, names): async with context['dbpool'].acquire() as conn: access_key = context['access_key'] j = sa.join( keypairs, keypair_resource_policies, keypairs.c.resource_policy == keypair_resource_policies.c.name) query = (sa.select([keypair_resource_policies]).select_from( j).where((keypair_resource_policies.c.name.in_(names)) & (keypairs.c.access_key == access_key)).order_by( keypair_resource_policies.c.name)) objs_per_key = OrderedDict() for k in names: objs_per_key[k] = None async for row in conn.execute(query): o = cls.from_row(context, row) objs_per_key[row.name] = o return tuple(objs_per_key.values())
def reorder_level_controller(uri: str, sku_id: str = None, direction: str = None): """ Retrieves reorder level. Args: uri: sku_id: direction: Returns: """ meta = MetaData() connection = engine(uri) inventory_analysis = Table('inventory_analysis', meta, autoload=True, autoload_with=connection) msk = Table('master_sku_list', meta, autoload=True, autoload_with=connection) j = join(inventory_analysis, msk, msk.columns.id == inventory_analysis.columns.sku_id) if direction == 'smallest': sku_classification = select([ func.min(inventory_analysis.columns.reorder_level), msk.columns.sku_id ]).select_from(j) rp = connection.execute(sku_classification) else: sku_classification = select([ func.max(inventory_analysis.columns.reorder_level), msk.columns.sku_id ]).select_from(j) rp = connection.execute(sku_classification) reorder_level = '' sku_identification = '' for i in rp: reorder_level = i[0] sku_identification = i[1] return reorder_level, sku_identification
async def list_contacts_in_segment(self, segment_id: int): async with self._db_engine.acquire() as conn: query = (select( [ contact_table.c.id, contact_table.c.name, contact_table.c.email ], segment_contact_table.c.segment_id == segment_id, ).select_from( join( contact_table, segment_contact_table, contact_table.c.id == segment_contact_table.c.contact_id, )).order_by(contact_table.c.id)) contacts = await conn.execute(query) return list(map(dict, await contacts.fetchall()))
def largest_groups(cls, limit=10): member = table('member') package = table('package') j = join(member, package, member.c.table_id == package.c.id) s = select([member.c.group_id, func.count(member.c.table_id)]).\ select_from(j).\ group_by(member.c.group_id).\ where(and_(member.c.group_id!=None, member.c.table_name=='package', package.c.private==False, package.c.state=='active')).\ order_by(func.count(member.c.table_id).desc()).\ limit(limit) res_ids = model.Session.execute(s).fetchall() res_groups = [(model.Session.query(model.Group).get(unicode(group_id)), val) for group_id, val in res_ids] return res_groups
def _bulkLoadDispsTask(importGroupHash: str, disps: List): """ Import Disps Links 1) Drop all disps with matching importGroupHash 2) set the coordSetId :param importGroupHash: :param disps: An array of disp objects to import :return: """ dispTable = DispBase.__table__ gridKeyIndexTable = GridKeyIndex.__table__ gridQueueTable = GridKeyCompilerQueue.__table__ engine = CeleryDbConn.getDbEngine() conn = engine.connect() transaction = conn.begin() try: stmt = select([gridKeyIndexTable.c.coordSetId, gridKeyIndexTable.c.gridKey]) \ .where(dispTable.c.importGroupHash == importGroupHash) \ .select_from(join(gridKeyIndexTable, dispTable, gridKeyIndexTable.c.dispId == dispTable.c.id)) \ .distinct() ins = gridQueueTable.insert().from_select(['coordSetId', 'gridKey'], stmt) conn.execute(ins) conn.execute(dispTable.delete().where( dispTable.c.importGroupHash == importGroupHash)) transaction.commit() _bulkInsertDisps(engine, disps) except Exception: transaction.rollback() raise finally: conn.close()
async def batch_load_by_ak(context, access_keys, *, domain_name=None): async with context['dbpool'].acquire() as conn: from .user import users j = sa.join(keypairs, users, keypairs.c.user == users.c.uuid) query = (sa.select([keypairs]).select_from(j).where( keypairs.c.access_key.in_(access_keys))) if domain_name is not None: query = query.where(users.c.domain_name == domain_name) objs_per_key = OrderedDict() # For each access key, there is only one keypair. # So we don't build lists in objs_per_key variable. for k in access_keys: objs_per_key[k] = None async for row in conn.execute(query): o = KeyPair.from_row(row) objs_per_key[row.access_key] = o return tuple(objs_per_key.values())
def retrieve(self): from ..utils.parseValue import formatThesaurus propertiesTable = Base.metadata.tables[ self.parent.objectDB.GetDynPropTable()] dynamicValuesTable = Base.metadata.tables[ self.parent.objectDB.GetDynPropValuesTable()] FK_name = self.parent.objectDB.GetSelfFKNameInValueTable() FK_property_name = self.parent.objectDB.GetDynPropFKName() tableJoin = join( dynamicValuesTable, propertiesTable, dynamicValuesTable.c[FK_property_name] == propertiesTable.c['ID']) query = select( [dynamicValuesTable, propertiesTable.c['Name']]).select_from(tableJoin).where( dynamicValuesTable.c[FK_name] == self.parent.objectDB.ID) query = query.where( not_(propertiesTable.c['Name'].in_([ 'Release_Comments', 'Breeding ring kept after release', 'Box_ID', 'Date_Sortie', 'Poids' ]))).order_by(desc(dynamicValuesTable.c['StartDate'])) result = self.session.execute(query).fetchall() response = [] for row in result: curRow = OrderedDict(row) dictRow = {} for key in curRow: if curRow[key] is not None: if key == 'ValueString' in key and curRow[key] is not None: try: thesauralValueObj = formatThesaurus(curRow[key]) dictRow['value'] = thesauralValueObj[ 'displayValue'] except: dictRow['value'] = curRow[key] elif 'FK' not in key: dictRow[key] = curRow[key] dictRow['StartDate'] = curRow['StartDate'].strftime( '%Y-%m-%d %H:%M:%S') response.append(dictRow) return response
def process_movie_acodecs(self, document, movieelement, movie): # build query for audio codec processing acodecscolumns = [ db.metadata.tables['movie_lang'].c.movie_id, db.metadata.tables['movie_lang'].c.type, db.metadata.tables['movie_lang'].c.acodec_id, db.metadata.tables['acodecs'].c.name ] acodec_join = join(db.metadata.tables['movie_lang'], \ db.metadata.tables['acodecs'], \ db.metadata.tables['movie_lang'].c.acodec_id==db.metadata.tables['acodecs'].c.acodec_id) acodecsquery = select(\ bind=self.db.session.bind, \ columns = acodecscolumns, \ from_obj = [acodec_join], \ whereclause = db.metadata.tables['movie_lang'].c.movie_id==movie['movies_movie_id']) self.process_acodecs(document, movieelement, acodecsquery)
async def load_all_user(cls, context, access_key): async with context['dbpool'].acquire() as conn: query = (sa.select([keypairs.c.user_id ]).select_from(keypairs).where( keypairs.c.access_key == access_key)) result = await conn.execute(query) row = await result.fetchone() user_id = row['user_id'] j = sa.join( keypairs, keypair_resource_policies, keypairs.c.resource_policy == keypair_resource_policies.c.name) query = (sa.select([keypair_resource_policies ]).select_from(j).where( (keypairs.c.user_id == user_id))) result = await conn.execute(query) rows = await result.fetchall() return [cls.from_row(context, r) for r in rows]
def process_movie_subtitles(self, document, movieelement, movie): # build query for movie subtitles processing languagescolumns = [ db.metadata.tables['movie_lang'].c.movie_id, db.metadata.tables['movie_lang'].c.type, db.metadata.tables['movie_lang'].c.lang_id, db.metadata.tables['languages'].c.name ] language_join = join(db.metadata.tables['movie_lang'], \ db.metadata.tables['languages'], \ db.metadata.tables['movie_lang'].c.lang_id==db.metadata.tables['languages'].c.lang_id) languagesquery = select(\ bind=self.db.session.bind, \ columns = languagescolumns, \ from_obj = [language_join], \ whereclause = and_(db.metadata.tables['movie_lang'].c.movie_id==movie['movies_movie_id'], db.metadata.tables['movie_lang'].c.type==3)) self.process_subtitles(document, movieelement, languagesquery)
async def count(conn): c1 = (await conn.scalar(users.count())) c2 = (await conn.scalar(emails.count())) print("Population consists of", c1, "people with", c2, "emails in total") join = sa.join(emails, users, users.c.id == emails.c.user_id) query = (sa.select([users.c.name]) .select_from(join) .where(emails.c.private == False) # noqa .group_by(users.c.name) .having(sa.func.count(emails.c.private) > 0)) print("Users with public emails:") async for row in conn.execute(query): print(row.name) print()
def _convert(cls, session, id, node_type_cache=None, scan_id_cache=None): if node_type_cache is None: node_type_cache = dict() if scan_id_cache is None: scan_id_cache = dict() node_table = cls.__table__ attribs = session.execute( node_table.select().where(node_table.c.id == id)).fetchone() peak_table = DeconvolutedPeak.__table__ selector = select([DeconvolutedPeak.__table__]).select_from( join(peak_table, ChromatogramTreeNodeToDeconvolutedPeak)).where( ChromatogramTreeNodeToDeconvolutedPeak.c.node_id == id) members = session.execute(selector).fetchall() try: scan_id = scan_id_cache[attribs.scan_id] except KeyError: selector = select([ MSScan.__table__.c.scan_id ]).where(MSScan.__table__.c.id == attribs.scan_id) scan_id = session.execute(selector).fetchone() scan_id_cache[attribs.scan_id] = scan_id members = [make_memory_deconvoluted_peak(m) for m in members] try: node_type = node_type_cache[attribs.node_type_id] except KeyError: shift = session.query(CompoundMassShift).get(attribs.node_type_id) node_type = shift.convert() node_type_cache[attribs.node_type_id] = node_type children_ids = session.query( ChromatogramTreeNodeBranch.child_id).filter( ChromatogramTreeNodeBranch.parent_id == id) children = [ cls._convert(session, i[0], node_type_cache) for i in children_ids ] return MemoryChromatogramTreeNode(attribs.retention_time, scan_id, children, members, node_type)