def get_user_and_check_password(username, password): """ Called by account controller and/or AuthKit valid_password to return a user from local db """ try: q = Session.query(User).select_from( join(User, UserLogin, User.login_details)) q = q.filter(User.id == make_username(username)) q = q.filter(User.status == 'active') q = q.filter(UserLogin.type == 'password') q = q.filter(UserLogin.token == encode_plain_text_password(password)) return q.one() except NoResultFound: # AllanC - Added fallback to search for email as some users get confised as to how to identify themselfs # emails are not indexed? performance? using this should be safe as our username policy prohibits '@' and '.' try: q = Session.query(User).select_from( join(User, UserLogin, User.login_details)) q = q.filter(User.email == username) q = q.filter(User.status == 'active') q = q.filter(UserLogin.type == 'password') q = q.filter( UserLogin.token == encode_plain_text_password(password)) return q.one() except NoResultFound: return None
class ListingRent(Base): __table__ = select([ RoomListing.id .label('roomlisting_id'), (func.max(RoomBandPrice.rent) - func.coalesce(func.sum(RoomBandModifierPrice.discount), 0)) .label('price') ]).select_from( join(RoomListing, join( RoomBand, RoomBandPrice ), (RoomListing.ballot_season_id == RoomBandPrice.season_id) & (RoomListing.band_id == RoomBandPrice.band_id) ).outerjoin( join( room_listing_modifiers_assoc, join( RoomBandModifier, RoomBandModifierPrice ) ), (RoomListing.ballot_season_id == RoomBandModifierPrice.season_id) & (RoomListing.id == room_listing_modifiers_assoc.c.room_listing_id) ) ).group_by(RoomListing.id).correlate(None).alias(name='listing_rents')
def get_tutors_with_skill(self, user, search_key): search_key = [sk.lower() for sk in search_key] query_filter = [ func.lower(Skill.name).like("%" + sk + "%") for sk in search_key ] skill_ids = [ skill.id for skill in Skill.query.filter(and_(*query_filter)).all() ] if not skill_ids: return [] if Ivysaur.Config.RESTRICT_EXPERTS: userskills_users = self.db.session.query( UserSkill, User).select_from(join(UserSkill, User)).filter( UserSkill.skill_id.in_(skill_ids), User.email.in_(Ivysaur.Config.SEARCHABLE_EXPERTS) # UserSkill.user_id != user.id ).all() else: userskills_users = self.db.session.query( UserSkill, User).select_from(join(UserSkill, User)).filter( UserSkill.skill_id.in_(skill_ids), # UserSkill.user_id != user.id ).all() return [ dict(userskills_user[1].get_basic_data().items() + userskills_user[0].to_dict().items()) for userskills_user in userskills_users ]
def __init__(self, graph_view): from os.path import dirname, join from jinja2 import Template templates_dir = join(dirname(dirname(__file__)), 'templates') with open(join(templates_dir, "idea_in_synthesis.jinja2")) as f: self.idea_template = Template(f.read()) with open(join(templates_dir, "synthesis.jinja2")) as f: self.synthesis_template = Template(f.read()) self.graph_view = graph_view
def dataXml(self,**kw): allOpen = None allClosed = None openByParent = None try: allOpen = DBSession().query(Request).select_from(join(Request, RequestItem)).filter(RequestItem.request_item_dispense_date==None).order_by(Request.request_id).all() allClosed = DBSession().query(Request).select_from(join(Request, RequestItem)).filter(RequestItem.request_item_dispense_date > (datetime.now()-timedelta(days=1))).order_by(Request.request_id).all() openByParent = DBSession().query(Request).select_from(join(Request, RequestItem)).filter(Request.requested_by_id == request.identity['user'].user_id).filter(RequestItem.request_item_dispense_date == None).order_by(Request.request_id).all() except Exception,e: log.exception(e)
def __init__(self, graph_view): from os.path import dirname, join from jinja2 import Template templates_dir = join(dirname(dirname(__file__)), "templates") with open(join(templates_dir, "idea_in_synthesis.jinja2")) as f: self.idea_template = Template(f.read()) with open(join(templates_dir, "synthesis.jinja2")) as f: self.synthesis_template = Template(f.read()) self.graph_view = graph_view
def getDiagnoseByAdmin2(cls,session,hostpitalList=None,doctorName=None,pagger=Pagger(1,20) ): if (doctorName is None or doctorName == u'')and hostpitalList is None: return session.query(Diagnose).filter(Diagnose.status==DiagnoseStatus.NeedTriage).offset(pagger.getOffset()).limit(pagger.getLimitCount()).all() if doctorName is None or doctorName == u'': return session.query(Diagnose).filter(Diagnose.hospitalId.in_(hostpitalList),Diagnose.status==DiagnoseStatus.NeedTriage).offset(pagger.getOffset()).limit(pagger.getLimitCount()).all() if hostpitalList: query=session.query(Diagnose).select_from(join(Doctor,Diagnose,Doctor.id==Diagnose.doctorId))\ .filter(Doctor.username==doctorName,Diagnose.status==DiagnoseStatus.NeedTriage,Diagnose.hospitalId.in_(hostpitalList)).offset(pagger.getOffset()).limit(pagger.getLimitCount()) else: query=session.query(Diagnose).select_from(join(Doctor,Diagnose,Doctor.id==Diagnose.doctorId)) \ .filter(Doctor.username==doctorName,Diagnose.status==DiagnoseStatus.NeedTriage).offset(pagger.getOffset()).limit(pagger.getLimitCount()) return query.all()
def getDiagnoseByPatientUser(cls,session,userId,status=None,pagger=Pagger(1,20) ): if userId is None : return query=None if status is None or len(status) == 0: query=session.query(Diagnose).select_from(join(Patient,Diagnose,Patient.id==Diagnose.patientId)) \ .filter(Patient.userID==userId,Diagnose.status!=DiagnoseStatus.Del).offset(pagger.getOffset()).limit(pagger.getLimitCount()) else: query=session.query(Diagnose).select_from(join(Patient,Diagnose,Patient.id==Diagnose.patientId)) \ .filter(Patient.userID==userId,Diagnose.status==status).offset(pagger.getOffset()).limit(pagger.getLimitCount()) return query.all()
def viewDataTable(data_table_id): #1. get data_table data_table = DataTable.query.filter_by(data_table_id=data_table_id).first() data_table_data_source_id = data_table.data_table_data_source_id title=data_table.data_table_name caption = data_table.data_table_description notes = "Notes" #1a. Get tags #tags = models.db.session.query(Tag, TagMap).filter_by(TagMap.data_table_id=data_table_id).all() tags = models.db.session.query(Tag).select_from(join(Tag, TagMap)).filter(TagMap.data_table_id==data_table_id).all() #models.db.session.query(Tag, TagMap).filter(Tag.tag_id==TagMap.tag_id).filter(TagMap.data_table_id=='*****@*****.**').all() #2. get data_source data_source = DataSource.query.filter_by(data_source_id=data_table_data_source_id).first() data_source_owner_user_id = data_source.data_source_owner_user_id data_owner =Users.query.filter_by(user_id=data_source_owner_user_id).first() #3. get data_columns data_columns = DataColumn.query.filter_by(data_column_data_table_id=data_table_id) no_of_data_columns = data_columns.count() #4. get values_data_table_<data_table_id> sql="select * from values_data_table_"+str(data_table_id) values_data_table = models.db.session.execute(sql) return render_template('community/view_data_table.html',title=title, caption=caption, notes=notes, values_data_table=values_data_table, data_table=data_table,data_source=data_source, data_columns=data_columns,no_of_data_columns=no_of_data_columns,data_owner=data_owner,explore_tab="active",data_table_id=data_table_id,tags=tags)
def test_migrate(data): database, destination = data adapt_schema(database, destination, 'tenant1') migrate(database, destination, ['tenant1', 'tenant2']) meta = MetaData() meta.reflect(bind=destination) # PublicTable = meta.tables['public_table'] TenantMasterTable = meta.tables['tenant_master_table'] TenantDetailTable = meta.tables['tenant_detail_table'] j1 = join(TenantDetailTable, TenantMasterTable) # j2 = join(TenantMasterTable, PublicTable) stm = select([ TenantDetailTable.c.name.label('detail_name'), TenantMasterTable.c.name.label('master_name'), TenantDetailTable.c.country_name.label('detail_country_name'), TenantMasterTable.c.country_name.label('master_country_name'), TenantMasterTable.c.public_id.label('public_id'), ]).select_from(j1) conn = destination.connect() result = conn.execute(stm) for row in result: # TODO: remove me print(111, "test_sql.py:65", dict(row))
def isInRole2(username, rolename): if rolename == None: print "ERROR: no rolename specified" return False if rolename == '': print "ERROR: no rolename specified" return False # validate rolename: rolerow = sqlalchemysetup.session.query(tableclasses.Role).filter(tableclasses.Role.role_name == rolename ).first() if rolerow == None: print "ERROR: invalid rolename specified" return False rolerow = sqlalchemysetup.session.query(tableclasses.Role).select_from(join(join(tableclasses.Role,tableclasses.RoleMember),tableclasses.Account)).filter(tableclasses.Role.role_name == rolename ).filter(tableclasses.Account.username == username ).first() return ( rolerow != None )
def search_index(self): parent_id = self.params.get("parent_id", None) # Setup Paging c.count = 0 c.resources = None limit, offset = self._paging() # Build Query q = meta.Session.query(self._poly_class_) if parent_id: q = q.select_from(join(Connection, Resource, Connection.child_id == Resource.id)).filter( Connection.parent_id == parent_id ) if self._classname() != "resources": q = q.filter(Resource.resource_type == self._classname()) for k, v in self.params.iteritems(): if hasattr(self._poly_class_, k): q = q.filter(getattr(self._poly_class_, k) == v) c.count = q.count() if limit: q = q.limit(limit) if offset: q = q.offset(offset) c.resources = q.all() return c.resources
def test_host_in_two_clusters(): """ create 2 new clusters and add a host to both. check Host.cluster. """ per = sess.query(Personality).select_from(join( Archetype, Personality)).filter( and_(Archetype.name == 'windows', Personality.name == 'generic')).one() for i in xrange(3): ec = EsxCluster(name='%s%s' % (CLUSTER_NAME, i), personality=per) add(sess, ec) commit(sess) c1 = sess.query(EsxCluster).filter_by(name='%s1' % (CLUSTER_NAME)).one() c2 = sess.query(EsxCluster).filter_by(name='%s2' % (CLUSTER_NAME)).one() assert c1 assert c2 print 'clusters in host in 2 cluster test are %s and %s' % (c1, c2) host = h_factory.next() sess.autoflush = False hcm1 = HostClusterMember(host=host, cluster=c1) create(sess, hcm1) assert host in c1.hosts print 'c1 hosts are %s' % (c1.hosts) c2.hosts.append(host) sess.autoflush = True commit(sess)
def hello(): data = request.get_json() if 'keywords' in data: keywords = [x.lower() for x in data.get('keywords', [])] tags_ids = list_id( models.Tag.query.filter(models.Tag.name.in_(keywords))) news_ids = db.session.query( models.NewsTag.news_id, func.sum(models.NewsTag.rank).label('total'), ).\ select_from(join(models.NewsTag, models.News)).\ distinct(models.News.newspaper_id).\ filter(models.NewsTag.tag_id.in_(tags_ids), models.News.content != '').\ group_by(models.NewsTag.news_id, models.News.newspaper_id).\ order_by(models.News.newspaper_id, func.sum(models.NewsTag.rank).desc()).all() news_ids = dict(news_ids) response_news = models.News.query.filter( models.News.id.in_(news_ids.keys())) news_return = [] for news in response_news: news_return.append({ 'ranking': (news_ids.get(news.id) / len(tags_ids)) * 2, 'title': news.title, 'content': news.content, 'reference': news.url if '://' in news.url else (news.newspaper.url + '/' + news.url) }) news_return.sort(key=lambda x: x.get('ranking', 0), reverse=True) return jsonify({"news": news_return}) return jsonify({"error": 'need tags parameter'}), 400
def create_link_xml(xml_topology, network_name): #search link subq = join(InterFace, Node, InterFace.idNode == Node.id) \ .select(Node.network_name == network_name) #To specify the column to get. subq = subq.with_only_columns([InterFace.id]) #find a link connection source I/F in the network. link_list = Link.query.filter(Link.src_idIF.in_(subq)).group_by(Link.id).all() for link in link_list: if link.src_if is None or link.dst_if is None: logger.warn('{0} is invalid link.(src_if:{1}-dst_if{2})'\ .format(link.id,link.src_idIF,link.dst_idIF)) continue #add <link type='lan'> xml_link = SubElement(xml_topology, const.XML_TAG_LINK, {const.XML_ATTR_TYPE:const.TYPE_LINK_LAN}) #add (source I/F) <interface_ref client_id='xxx'> SubElement(xml_link, const.XML_TAG_IF_REF, {const.XML_ATTR_CLIENT_ID:link.src_if.if_name}) #add (destination I/F) <interface_ref client_id='xxx'> SubElement(xml_link, const.XML_TAG_IF_REF, {const.XML_ATTR_CLIENT_ID:link.dst_if.if_name}) return xml_topology
def test_valid_activation(self): """Test that the ``activate`` view properly handles a valid activation (in this case, based on the default backend's activation window). """ success_redirect = self.url_reverse("registration_activation_complete") # First, register an account. self.client.post( reverse("registration_register"), data={ "username": "******", "email": "*****@*****.**", "password1": "swordfish", "password2": "swordfish", }, ) profile = ( self.session.query(RegistrationProfile) .select_from(join(RegistrationProfile, User)) .filter(User.username == u"alice") .first() ) self.assertIsNotNone(profile) path = reverse("registration_activate", kwargs={"activation_key": profile.activation_key}) response = self.client.get(path) self.assertRedirects(response, success_redirect) alice = self.session.query(User).filter_by(username=u"alice").first() self.assertTrue(alice.is_active)
def test_create_hosts(): br = Branch.get_unique(sess, 'ny-prod', compel=True) dns_dmn = DnsDomain.get_unique(sess, 'one-nyp.ms.com', compel=True) stat = Status.get_unique(sess, 'build', compel=True) os = sess.query(OperatingSystem).filter(Archetype.name == 'vmhost').first() assert os, 'No OS in %s' % func_name() pers = sess.query(Personality).select_from(join( Personality, Archetype)).filter( and_(Archetype.name == 'vmhost', Personality.name == 'generic')).one() sess.autoflush = False for i in xrange(NUM_HOSTS): machine = m_factory.next() vm_host = Host(machine=machine, name='%s%s' % (HOST_NAME, i), dns_domain=dns_dmn, branch=br, personality=pers, status=stat, operating_system=os) add(sess, vm_host) sess.autoflush = True commit(sess) hosts = sess.query(Host).filter(Host.name.like(HOST_NAME + '%')).all() assert len(hosts) is NUM_HOSTS print 'created %s hosts' % len(hosts)
def test_host_in_two_clusters(): """ create 2 new clusters and add a host to both. check Host.cluster. """ per = sess.query(Personality).select_from( join(Archetype, Personality)).filter( and_(Archetype.name=='windows', Personality.name=='generic')).one() for i in xrange(3): ec = EsxCluster(name='%s%s'% (CLUSTER_NAME, i), personality=per) add(sess, ec) commit(sess) c1 = sess.query(EsxCluster).filter_by(name='%s1' % (CLUSTER_NAME)).one() c2 = sess.query(EsxCluster).filter_by(name='%s2' % (CLUSTER_NAME)).one() assert c1 assert c2 print 'clusters in host in 2 cluster test are %s and %s'% (c1, c2) host = h_factory.next() sess.autoflush = False hcm1 = HostClusterMember(host=host, cluster=c1) create(sess, hcm1) assert host in c1.hosts print 'c1 hosts are %s'% (c1.hosts) c2.hosts.append(host) sess.autoflush = True commit(sess)
def create_link_xml(xml_topology, network_name): #search link subq = join(InterFace, Node, InterFace.idNode == Node.id) \ .select(Node.network_name == network_name) #To specify the column to get. subq = subq.with_only_columns([InterFace.id]) #find a link connection source I/F in the network. link_list = Link.query.filter(Link.src_idIF.in_(subq)).group_by( Link.id).all() for link in link_list: if link.src_if is None or link.dst_if is None: logger.warn('{0} is invalid link.(src_if:{1}-dst_if{2})'\ .format(link.id,link.src_idIF,link.dst_idIF)) continue #add <link type='lan'> xml_link = SubElement(xml_topology, const.XML_TAG_LINK, {const.XML_ATTR_TYPE: const.TYPE_LINK_LAN}) #add (source I/F) <interface_ref client_id='xxx'> SubElement(xml_link, const.XML_TAG_IF_REF, {const.XML_ATTR_CLIENT_ID: link.src_if.if_name}) #add (destination I/F) <interface_ref client_id='xxx'> SubElement(xml_link, const.XML_TAG_IF_REF, {const.XML_ATTR_CLIENT_ID: link.dst_if.if_name}) return xml_topology
def test_create_hosts(): br = Branch.get_unique(sess, 'ny-prod', compel=True) dns_dmn = DnsDomain.get_unique(sess, 'one-nyp.ms.com', compel=True) stat = Status.get_unique(sess, 'build', compel=True) os = sess.query(OperatingSystem).filter(Archetype.name == 'vmhost').first() assert os, 'No OS in %s' % func_name() pers = sess.query(Personality).select_from( join(Personality, Archetype)).filter( and_(Archetype.name=='vmhost', Personality.name=='generic')).one() sess.autoflush=False for i in xrange(NUM_HOSTS): machine = m_factory.next() vm_host = Host(machine=machine, name='%s%s' % (HOST_NAME, i), dns_domain=dns_dmn, branch=br, personality=pers, status=stat, operating_system=os) add(sess, vm_host) sess.autoflush=True commit(sess) hosts = sess.query(Host).filter( Host.name.like(HOST_NAME+'%')).all() assert len(hosts) is NUM_HOSTS print 'created %s hosts'% len(hosts)
def list(self, user=None): self.is_page_back = True query = meta.Session.query(model.Event) if user is None: query = query.select_from( join(model.Event, model.User, model.Event.node_user_id == model.User.id)).filter( and_(model.Event.published != None, model.User.nickname != 'demo')) else: user_query = meta.Session.query(model.User) try: user_node = user_query.filter( model.User.nickname == user).one() except NoResultFound: abort(404) query = query.filter(model.Event.node_user_id == user_node.id) if user_node.nickname != current_user().nickname: query = query.filter(model.Event.published != None) query = query.order_by(model.Event.start.desc()) c.user = user c.nodes = pager_or_404(query) if is_ajax(): result = self.taconite(render('/event/list_partial.html')) else: result = render('/event/list.html') return result
def _get_map_from_user_by_id(self, user, map_id): """ Get a mapfile owned by a user from the database by map_id. """ req = Session.query(Map).select_from(join(Map, User)) try: return req.filter(and_(User.login == user, Map.id == map_id)).one() except Exception, e: return None
def _get_map_from_user_by_id(self, user, map_id): """ Get a mapfile owned by a user from the database by map_id. """ req = Session.query(Map).select_from(join(Map, User)) try: return req.filter(and_(User.login==user, Map.id==map_id)).one() except Exception, e: return None
def untrack(self, query, session): try: t = session.query(UserTrack).select_from( join(UserTrack, Track)).filter( UserTrack.user_id == self.id).filter(Track.query == query).one() session.delete(t) return True except exc.NoResultFound, e: return False
def get_all_target_if(node_name,port): # search node subq = join(InterFace, Node, InterFace.idNode == Node.id) \ .select(Node.node_name == node_name) # To specify the column to get. subq = subq.with_only_columns([InterFace.id]) # look for the interface that node_name and port_num matches. return InterFace.query.filter(InterFace.port == port).filter(InterFace.id.in_(subq)).all()
def get_all_se_if(): # search node subq = join(InterFace, Node, InterFace.idNode == Node.id) \ .select(Node.type == const.TYPE_NODE_SE) # To specify the column to get. subq = subq.with_only_columns([InterFace.id]) # look for the interface that node_type == 'se'. return InterFace.query.filter(InterFace.id.in_(subq)).all()
def groupfinder(authorname, request): dbsession = DBSession() author = dbsession.query(Author).filter_by(author_name=authorname).first() groups = dbsession.query(AccessGroup).select_from(join(AuthorAccessGroupMap, AccessGroup, AuthorAccessGroupMap.group_id==AccessGroup.id)).filter(AuthorAccessGroupMap.author_id==author.id).all() return [group.group_name for group in groups]
def get_all_target_if(node_name, port): # search node subq = join(InterFace, Node, InterFace.idNode == Node.id) \ .select(Node.node_name == node_name) # To specify the column to get. subq = subq.with_only_columns([InterFace.id]) # look for the interface that node_name and port_num matches. return InterFace.query.filter(InterFace.port == port).filter( InterFace.id.in_(subq)).all()
def getNeedDealDiagnoseByHospitalUser(cls,session,uploadUserId,patientName=None,pagger=Pagger(1,20) ): if uploadUserId is None : return if patientName is None or patientName == u'': query=session.query(Diagnose)\ .filter(Diagnose.uploadUserId==uploadUserId,Diagnose.status.in_((DiagnoseStatus.NeedTriage,DiagnoseStatus.NeedUpdate))).offset(pagger.getOffset()).limit(pagger.getLimitCount()) else: query=session.query(Diagnose).select_from(join(Patient,Diagnose,Patient.id==Diagnose.patientId)) \ .filter(Patient.realname==patientName,Diagnose.status.in_((DiagnoseStatus.NeedTriage,DiagnoseStatus.NeedUpdate)),Diagnose.uploadUserId==uploadUserId).offset(pagger.getOffset()).limit(pagger.getLimitCount()) return query.all()
def bargraph(): print request.form visual_plugin = Plugin.query.filter_by(plugin_key="bargraph").first() ################# DATA RELATED ########################################### data_type = request.form.get('data_type', '') data_key = request.form.get('data_key', '') name_column = request.form.get('name_column', '').strip() value_column = request.form.get('value_column', '').strip() if data_key != "": data_key =int(data_key) else: data_key = 0 #only for table as of now data_table_id = data_key data_table = DataTable.query.filter_by(data_table_id=data_table_id).first() data_table_data_source_id = data_table.data_table_data_source_id title=data_table.data_table_name caption = data_table.data_table_description notes = "Notes" #1a. Get tags #tags = models.db.session.query(Tag, TagMap).filter_by(TagMap.data_table_id=data_table_id).all() tags = models.db.session.query(Tag).select_from(join(Tag, TagMap)).filter(TagMap.data_table_id==data_table_id).all() #models.db.session.query(Tag, TagMap).filter(Tag.tag_id==TagMap.tag_id).filter(TagMap.data_table_id=='*****@*****.**').all() #2. get data_source data_source = DataSource.query.filter_by(data_source_id=data_table_data_source_id).first() data_source_owner_user_id = data_source.data_source_owner_user_id data_owner =Users.query.filter_by(user_id=data_source_owner_user_id).first() #3. get data_columns data_columns = DataColumn.query.filter_by(data_column_data_table_id=data_table_id) no_of_data_columns = data_columns.count() #4. get values_data_table_<data_table_id> sql="select * from values_data_table_"+str(data_table_id) values_data_table = models.db.session.execute(sql) data_array =[] values_array = [] values_array.append(name_column) values_array.append(value_column) data_array.append(values_array) for row in values_data_table: values_array = [] print unicode(row[name_column]) values_array.append(unicode(row[name_column])) values_array.append(int(row[value_column])) data_array.append(values_array) print json.dumps(data_array,ensure_ascii=False) #pass the returned value to template for display return render_template("visualization_plugins/bargraph.html",title=request.form['title'],caption="",notes="Please click on save to save the visualization",explore_tab="active",visual_plugin=visual_plugin, values_data_table=values_data_table, data_table=data_table,data_source=data_source, data_columns=data_columns,no_of_data_columns=no_of_data_columns,data_owner=data_owner,data_table_id=data_table_id,tags=tags,user_action="try",selected_columns=request.form,data_array=json.dumps(data_array,ensure_ascii=False) )
def unmatched_ingredients(db): """ Return (names, num_recipes) ifor ingredients that are not matched with OntologyNodes. """ recipe_count = func.count(RecipeIngredientAssociation._recipe_id) recipe_join = join(Ingredient, RecipeIngredientAssociation) query = (db._session.query( Ingredient, recipe_count).select_from(recipe_join).filter( Ingredient.ontology_node == None).group_by(Ingredient.id).order_by( desc(recipe_count))) return query
def groupfinder(authorname, request): dbsession = DBSession() author = dbsession.query(Author).filter_by(author_name=authorname).first() # TODO: add handling for invalid author name if author is not None: groups = dbsession.query(AccessGroup).select_from(join(AuthorAccessGroupMap, AccessGroup, AuthorAccessGroupMap.group_id == AccessGroup.id)).filter(AuthorAccessGroupMap.author_id == author.id).all() return [group.group_name for group in groups] return []
def xmlrpc_getGroupNameByAccountName(self, queryParams, client_id): connection = dbconnect.engines[client_id].connect() Session = dbconnect.session(bind=connection) res = Session.query(dbconnect.Groups).select_from( join(dbconnect.Groups, dbconnect.Account)).filter( dbconnect.Account.accountname == queryParams[0]).first() Session.close() connection.connection.close() if res != None: return [res.groupname] else: return False
def get_latest_info(self, session, name): """Return latest known identifier in dict (season, episode, name) for series name""" episode = session.query(Episode).select_from(join(Episode, Series)).\ filter(Episode.season != None).\ filter(Series.name == name).\ order_by(desc(Episode.season)).\ order_by(desc(Episode.number)).first() if not episode: # log.trace('get_latest_info: no info available for %s' % name) return False # log.trace('get_latest_info, series: %s season: %s episode: %s' % \ # (name, episode.season, episode.number)) return {'season': episode.season, 'episode': episode.number, 'name': name}
def test_no_host_threshold(): """ ensure down_hosts_threshold must exist """ br = Branch.get_unique(sess, 'ny-prod', compel=True) np = Building.get_unique(sess, name='np', compel=True) per = sess.query(Personality).select_from( join(Archetype, Personality)).filter( and_(Archetype.name=='windows', Personality.name=='generic')).one() ec = EsxCluster(name=CLUSTER_NAME, location_constraint=np, personality=per, branch=br) add(sess, ec) commit(sess)
def get_user_from_openid_identifyer(identifyer): """ Called by account controller to return a user from our db from an openid identifyer """ try: q = Session.query(User).select_from( join(User, UserLogin, User.login_details)) #q = q.filter(User.status == 'active' ) # the base controler checks for pending status and redirects to login page accordingly q = q.filter(UserLogin.token == identifyer) q = q.one() return q except NoResultFound: return None
def unmatched_ingredients(db): """ Return (names, num_recipes) ifor ingredients that are not matched with OntologyNodes. """ recipe_count = func.count(RecipeIngredientAssociation._recipe_id) recipe_join = join(Ingredient, RecipeIngredientAssociation) query = (db._session.query(Ingredient, recipe_count) .select_from(recipe_join) .filter(Ingredient.ontology_node == None) .group_by(Ingredient.id) .order_by(desc(recipe_count))) return query
def directAttributeRequest(itemIDs, attrIDs): for itemID in itemIDs: if not isinstance(itemID, int): raise TypeError("All attrIDs must be integer") for itemID in itemIDs: if not isinstance(itemID, int): raise TypeError("All itemIDs must be integer") q = select((Item.typeID, Attribute.attributeID, Attribute.value), and_(Attribute.attributeID.in_(attrIDs), Item.typeID.in_(itemIDs)), from_obj=[join(Attribute, Item)]) result = sqlAlchemy.gamedata_session.execute(q).fetchall() return result
def directAttributeRequest(itemIDs, attrIDs): for itemID in itemIDs: if not isinstance(itemID, int): raise TypeError("All attrIDs must be integer") for itemID in itemIDs: if not isinstance(itemID, int): raise TypeError("All itemIDs must be integer") q = select((Item.typeID, Attribute.attributeID, Attribute.value), and_(Attribute.attributeID.in_(attrIDs), Item.typeID.in_(itemIDs)), from_obj=[join(Attribute, Item)]) result = gamedata_session.execute(q).fetchall() return result
def test_activation_success_url(self): """Passing ``success_url`` to the ``activate`` view and successfully activating will result in that URL being used for the redirect. """ success_redirect = "http://testserver%s" % reverse("registration_test_custom_success_url") self.client.post(reverse("registration_register"), data=self.ALICE_DATA) profile = ( self.session.query(RegistrationProfile) .select_from(join(RegistrationProfile, User)) .filter(User.username == u"alice") .first() ) path = reverse("registration_test_activate_success_url", kwargs={"activation_key": profile.activation_key}) response = self.client.get(path) self.assertRedirects(response, success_redirect)
def Overview(request): user = currentUser(request) session = meta.Session() invoices = ( session.query(Invoice) .select_from(orm.join(Invoice, Customer)) .filter(Customer.account == user) .order_by(Invoice.sent.desc()) .options(orm.joinedload(Invoice.entries)) ) summary = summaryInvoices(invoices) return render("invoice_overview.pt", request, section="invoices", **summary)
def remove_all_stickykeys(self, sess, server_adress, load = None): ''' This method removes all the sticky keys for this server and optionally update the load for this server :param server_adress: the unique server_adress :param load: load value that we want to update in datastore for this server ''' try: sticky_mappings = sess.query(self.StickyMapping).select_from(join(self.Server, self.StickyMapping)).all() sess.query(self.StickyMapping).filter(self.StickyMapping.mapping_id.in_([sm.mapping_id for sm in sticky_mappings ]))\ .delete(synchronize_session='fetch') # if load: # sess.query(self.Server).filter(self.Server.unique_key == server_adress).update({self.Server.load:load}, synchronize_session=False) sess.commit() except Exception: pass
def test_no_host_threshold(): """ ensure down_hosts_threshold must exist """ br = Branch.get_unique(sess, 'ny-prod', compel=True) np = Building.get_unique(sess, name='np', compel=True) per = sess.query(Personality).select_from(join( Archetype, Personality)).filter( and_(Archetype.name == 'windows', Personality.name == 'generic')).one() ec = EsxCluster(name=CLUSTER_NAME, location_constraint=np, personality=per, branch=br) add(sess, ec) commit(sess)
def Overview(request): user = currentUser(request) session = meta.Session() invoices=session.query(Invoice)\ .select_from(orm.join(Invoice, Customer))\ .filter(Customer.account==user)\ .order_by(Invoice.sent.desc())\ .options(orm.joinedload(Invoice.entries)) summary = summaryInvoices(invoices) return render("invoice_overview.pt", request, section="invoices", **summary)
def get_by_permissions(cls, permissions): from compstack.auth.model.queries import query_users_permissions vuserperms = query_users_permissions().alias() return db.sess.query(cls).select_from( saorm.join(cls, vuserperms, cls.id == vuserperms.c.user_id)).filter( sasql.or_( vuserperms.c.user_approved == 1, sasql.and_( vuserperms.c.user_approved.is_(None), sasql.or_( vuserperms.c.group_denied.is_(None), vuserperms.c.group_denied >= 0, ), vuserperms.c.group_approved >= 1))).filter( vuserperms.c.permission_name.in_( tolist(permissions))).all()
def test_create_esx_cluster(): """ tests the creation of an EsxCluster """ np = Building.get_unique(sess, name='np', compel=True) br = Branch.get_unique(sess, 'ny-prod', compel=True) per = sess.query(Personality).select_from( join(Archetype, Personality)).filter( and_(Archetype.name=='windows', Personality.name=='generic')).one() ec = EsxCluster(name=CLUSTER_NAME, location_constraint=np, personality=per, down_hosts_threshold=2, branch=br) create(sess, ec) assert ec print ec assert ec.max_hosts is 8 print 'esx cluster max members = %s' % ec.max_hosts
def test_create_cluster(): # TODO: make this a reusable function in test_cluster and import np = sess.query(Building).filter_by(name="np").one() dmn = sess.query(Domain).first() assert dmn, "No domain found in %s" % func_name() per = ( sess.query(Personality) .select_from(join(Archetype, Personality)) .filter(and_(Archetype.name == "windows", Personality.name == "generic")) .one() ) ec = EsxCluster(name=CLUSTER_NAME, location_constraint=np, personality=per, domain=dmn) create(sess, ec) assert ec, "No EsxCluster created by %s" % func_name() print ec
def getNeedDealDiagnoseByHospitalUser(cls,session,uploadUserId,patientName=None,pagger=Pagger(1,20) ): if uploadUserId is None : return if patientName is None or patientName == u'': # query=session.query(Diagnose)\ # .filter(Diagnose.uploadUserId==uploadUserId,Diagnose.ossUploaded == constant.DiagnoseUploaed.NoUploaded,Diagnose.status.in_((DiagnoseStatus.NeedPay,DiagnoseStatus.NeedUpdate))).offset(pagger.getOffset()).limit(pagger.getLimitCount()) # query=query.union_all(session.query(Diagnose) \ # .filter(Diagnose.uploadUserId==uploadUserId,Diagnose.ossUploaded == None,Diagnose.status.in_((DiagnoseStatus.NeedPay,DiagnoseStatus.NeedUpdate))).offset(pagger.getOffset()).limit(pagger.getLimitCount())) query=session.query(Diagnose)\ .filter(Diagnose.uploadUserId==uploadUserId,Diagnose.status.in_((DiagnoseStatus.HospitalUserDiagnoseNeedCommit,DiagnoseStatus.NeedUpdate))).offset(pagger.getOffset()).limit(pagger.getLimitCount()) else: # query=session.query(Diagnose).select_from(join(Patient,Diagnose,Patient.id==Diagnose.patientId)) \ # .filter(Patient.realname==patientName,Diagnose.ossUploaded == constant.DiagnoseUploaed.NoUploaded,Diagnose.status.in_((DiagnoseStatus.NeedPay,DiagnoseStatus.NeedUpdate)),Diagnose.uploadUserId==uploadUserId).offset(pagger.getOffset()).limit(pagger.getLimitCount()) # query=query.union_all(session.query(Diagnose).select_from(join(Patient,Diagnose,Patient.id==Diagnose.patientId)) \ # .filter(Patient.realname==patientName,Diagnose.ossUploaded == None,Diagnose.status.in_((DiagnoseStatus.NeedPay,DiagnoseStatus.NeedUpdate)),Diagnose.uploadUserId==uploadUserId).offset(pagger.getOffset()).limit(pagger.getLimitCount())) query=session.query(Diagnose).select_from(join(Patient,Diagnose,Patient.id==Diagnose.patientId)) \ .filter(Patient.realname==patientName,Diagnose.status.in_((DiagnoseStatus.HospitalUserDiagnoseNeedCommit,DiagnoseStatus.NeedUpdate)),Diagnose.uploadUserId==uploadUserId).offset(pagger.getOffset()).limit(pagger.getLimitCount()) return query.all()
def xmlrpc_getGroupNameByAccountName(self, queryParams, client_id): ''' xmlrpc_getGroupNameByAccountName :purpose function for extracting groupname from group table by account name i/p parameters : accountname o/p parameters : groupname ''' connection = dbconnect.engines[client_id].connect() Session = dbconnect.session(bind=connection) result = Session.query(dbconnect.Groups).select_from(join(dbconnect.Groups,dbconnect.Account)).\ filter(and_(dbconnect.Account.accountname == queryParams[0],\ dbconnect.Groups.groupcode == dbconnect.Account.groupcode)).\ first() Session.close() connection.connection.close() if result != None: return [result.groupname] else: return []
def test_create_cluster(): #TODO: make this a reusable function in test_cluster and import np = sess.query(Building).filter_by(name='np').one() dmn = sess.query(Domain).first() assert dmn, 'No domain found in %s' % func_name() per = sess.query(Personality).select_from(join( Archetype, Personality)).filter( and_(Archetype.name == 'windows', Personality.name == 'generic')).one() ec = EsxCluster(name=CLUSTER_NAME, location_constraint=np, personality=per, domain=dmn) create(sess, ec) assert ec, "No EsxCluster created by %s" % func_name() print ec
def xmlrpc_getGroupNameByAccountName(self,queryParams,client_id): ''' xmlrpc_getGroupNameByAccountName :purpose function for extracting groupname from group table by account name i/p parameters : accountname o/p parameters : groupname ''' connection = dbconnect.engines[client_id].connect() Session = dbconnect.session(bind=connection) result = Session.query(dbconnect.Groups).select_from(join(dbconnect.Groups,dbconnect.Account)).\ filter(and_(dbconnect.Account.accountname == queryParams[0],\ dbconnect.Groups.groupcode == dbconnect.Account.groupcode)).\ first() Session.close() connection.connection.close() if result != None: return [result.groupname] else: return []
def getRequiredFor(itemID, attrMapping): Attribute1 = aliased(Attribute) Attribute2 = aliased(Attribute) skillToLevelClauses = [] for attrSkill, attrLevel in attrMapping.items(): skillToLevelClauses.append(and_(Attribute1.attributeID == attrSkill, Attribute2.attributeID == attrLevel)) queryOr = or_(*skillToLevelClauses) q = select((Attribute2.typeID, Attribute2.value), and_(Attribute1.value == itemID, queryOr), from_obj=[ join(Attribute1, Attribute2, Attribute1.typeID == Attribute2.typeID) ]) result = gamedata_session.execute(q).fetchall() return result
def add_term(message, command, word): """コマンドに語録を追加する :param message: slackbot.dispatcher.Message :param str command: 登録済のコマンド名 :param str word: 登録する語録 """ s = Session() term = (s.query(Term) .select_from(join(Term, CreateCommand)) .filter(CreateCommand.id == command.id) .filter(Term.word == word) .one_or_none()) name = command.name if term: botsend(message, 'コマンド `${}` に「{}」は登録済みです'.format(name, word)) else: s.add(Term(create_command=command.id, creator=message.body['user'], word=word)) s.commit() botsend(message, 'コマンド `${}` に「{}」を追加しました'.format(name, word))
def test_create_esx_cluster(): """ tests the creation of an EsxCluster """ np = Building.get_unique(sess, name='np', compel=True) br = Branch.get_unique(sess, 'ny-prod', compel=True) per = sess.query(Personality).select_from(join( Archetype, Personality)).filter( and_(Archetype.name == 'windows', Personality.name == 'generic')).one() ec = EsxCluster(name=CLUSTER_NAME, location_constraint=np, personality=per, down_hosts_threshold=2, branch=br) create(sess, ec) assert ec print ec assert ec.max_hosts is 8 print 'esx cluster max members = %s' % ec.max_hosts
def metrics() -> List: from flask import jsonify from sqlalchemy.orm import aliased from app.models import Metric requested_metric_value = int(request.args.get('metric_value')) metric1 = Metric.__table__ metric2 = aliased(Metric.__table__) query = join( metric1, metric2, metric1.c.artist_id == metric2.c.artist_id).select().where( and_( metric1.c.value >= requested_metric_value, metric2.c.value < requested_metric_value, metric2.c.date == metric1.c.date - text('INTERVAL 24 HOURS'))) queryset_data = db.session.connection().execute(query).fetchall() return jsonify(queryset_data)
def test_create_clusters(): np = sess.query(Building).filter_by(name='np').one() br = Branch.get_unique(sess, 'ny-prod', compel=True) per = sess.query(Personality).select_from( join(Archetype, Personality)).filter( and_(Archetype.name == 'windows', Personality.name == 'generic')).one() for i in xrange(NUM_CLUSTERS): ec = EsxCluster(name='%s%s' % (CLUSTER_NAME, i), location_constraint=np, branch=br, personality=per, down_hosts_threshold=2) add(sess, ec) commit(sess) ecs = sess.query(EsxCluster).all() assert len(ecs) is NUM_CLUSTERS print ecs[0] assert ecs[0].max_hosts is 8 print 'esx cluster max hosts = %s' % (ecs[0].max_hosts)
def _update_queues(self): ########################################################### # There are reasons why a waiting reservation may not be # able to be promoted while the next one is. For instance, # if a user is waiting for "pld boards", but only for # instances of "pld boards" which have a "ud-binary@Binary # experiments" server running. If only a "ud-pld@PLD # Experiments" is available, then this user will not be # promoted and the another user which is waiting for a # "ud-pld@PLD Experiments" can be promoted. # # Therefore, we have a list of the IDs of the waiting # reservations we previously thought that they couldn't be # promoted in this iteration. They will have another # chance in the next run of _update_queues. # previously_waiting_reservation_ids = [] ########################################################### # While there are free instances and waiting reservations, # take the first waiting reservation and set it to current # reservation. Make this repeatedly because we want to # commit each change # while True: session = self.session_maker() try: resource_type = session.query(ResourceType).filter(ResourceType.name == self.resource_type_name).first() # # Retrieve the first waiting reservation. If there is no one that # we haven't tried already, return # first_waiting_reservations = session.query(WaitingReservation).filter(WaitingReservation.resource_type == resource_type).order_by(WaitingReservation.priority, WaitingReservation.id)[:len(previously_waiting_reservation_ids) + 1] first_waiting_reservation = None for waiting_reservation in first_waiting_reservations: if waiting_reservation.id not in previously_waiting_reservation_ids: first_waiting_reservation = waiting_reservation break if first_waiting_reservation is None: return # There is no waiting reservation for this resource that we haven't already tried previously_waiting_reservation_ids.append(first_waiting_reservation.id) # # For the current resource_type, let's ask for # all the resource instances available (i.e. those # who have no SchedulingSchemaIndependentSlotReservation # associated) # free_instances = session.query(CurrentResourceSlot)\ .select_from(join(CurrentResourceSlot, ResourceInstance))\ .filter(not_(CurrentResourceSlot.slot_reservations.any()))\ .filter(ResourceInstance.resource_type == resource_type)\ .order_by(CurrentResourceSlot.id).all() if len(free_instances) == 0: # If there is no free instance, just return return # # Select the correct free_instance for the current student among # all the free_instances # if self.randomize_instances: randomized_free_instances = [ free_instance for free_instance in free_instances ] random.shuffle(randomized_free_instances) else: randomized_free_instances = free_instances for free_instance in randomized_free_instances: resource_type = free_instance.resource_instance.resource_type if resource_type is None: continue # If suddenly the free_instance is not a free_instance anymore, try with other free_instance # # IMPORTANT: from here on every "continue" should first revoke the # reservations_manager and resources_manager confirmations # self.reservations_manager.confirm(session, first_waiting_reservation.reservation_id) slot_reservation = self.resources_manager.acquire_resource(session, free_instance) total_time = first_waiting_reservation.time initialization_in_accounting = first_waiting_reservation.initialization_in_accounting start_time = self.time_provider.get_time() concrete_current_reservation = ConcreteCurrentReservation(slot_reservation, first_waiting_reservation.reservation_id, total_time, start_time, first_waiting_reservation.priority, first_waiting_reservation.initialization_in_accounting) concrete_current_reservation.set_timestamp_before(self.time_provider.get_time()) client_initial_data = first_waiting_reservation.reservation.client_initial_data request_info = json.loads(first_waiting_reservation.reservation.request_info) username = request_info.get('username') username_unique = request_info.get('username_unique') locale = request_info.get('locale') reservation_id = first_waiting_reservation.reservation_id if reservation_id is None: break # If suddenly the waiting_reservation is not a waiting_reservation anymore, so reservation is None, go again to the while True. requested_experiment_type = first_waiting_reservation.reservation.experiment_type selected_experiment_instance = None for experiment_instance in free_instance.resource_instance.experiment_instances: if experiment_instance.experiment_type == requested_experiment_type: selected_experiment_instance = experiment_instance if selected_experiment_instance is None: # This resource is not valid for this user, other free_instance should be # selected. Try with other, but first clean the acquired resources self.reservations_manager.downgrade_confirmation(session, first_waiting_reservation.reservation_id) self.resources_manager.release_resource(session, slot_reservation) continue experiment_instance_id = ExperimentInstanceId(selected_experiment_instance.experiment_instance_id, requested_experiment_type.exp_name, requested_experiment_type.cat_name) laboratory_coord_address = selected_experiment_instance.laboratory_coord_address try: session.delete(first_waiting_reservation) session.add(concrete_current_reservation) session.commit() except IntegrityError as ie: if DEBUG: print("IntegrityError when adding concrete_current_reservation: ", sys.exc_info()) # Other scheduler confirmed the user or booked the reservation, rollback and try again # But log just in case log.log( PriorityQueueScheduler, log.level.Warning, "IntegrityError looping on update_queues: %s" % ie ) log.log_exc(PriorityQueueScheduler, log.level.Info) session.rollback() break except Exception as e: if DEBUG: print("Other error when adding concrete_current_reservation: ", sys.exc_info()) log.log( PriorityQueueScheduler, log.level.Warning, "Exception looping on update_queues: %s" % e ) log.log_exc(PriorityQueueScheduler, log.level.Info) session.rollback() break else: # # Enqueue the confirmation, since it might take a long time # (for instance, if the laboratory server does not reply because # of any network problem, or it just takes too much in replying), # so this method might take too long. That's why we enqueue these # petitions and run them in other threads. # timezone = time.timezone if (time.localtime().tm_isdst == 0) else time.altzone deserialized_server_initial_data = { 'priority.queue.slot.length' : '%s' % total_time, 'priority.queue.slot.start' : '%s' % datetime.datetime.fromtimestamp(start_time), 'priority.queue.slot.start.utc' : '%s' % datetime.datetime.utcfromtimestamp(start_time), 'priority.queue.slot.start.timestamp' : '%s' % start_time, 'priority.queue.slot.start.timezone' : '%s' % timezone, 'priority.queue.slot.initialization_in_accounting' : initialization_in_accounting, 'request.experiment_id.experiment_name' : experiment_instance_id.exp_name, 'request.experiment_id.category_name' : experiment_instance_id.cat_name, 'request.username' : username, 'request.username.unique' : username_unique, 'request.full_name' : username, 'request.locale' : locale, 'weblab_reservation_id' : first_waiting_reservation.reservation_id, } server_initial_data = json.dumps(deserialized_server_initial_data) # server_initial_data will contain information such as "what was the last experiment used?". # If a single resource was used by a binary experiment, then the next time may not require reprogramming the device self.confirmer.enqueue_confirmation(laboratory_coord_address, reservation_id, experiment_instance_id, client_initial_data, server_initial_data, self.resource_type_name) # # After it, keep in the while True in order to add the next # reservation # break except (ConcurrentModificationError, IntegrityError) as ie: # Something happened somewhere else, such as the user being confirmed twice, the experiment being reserved twice or so on. # Rollback and start again if DEBUG: print("Other ConcurrentModificationError or IntegrityError in update_queues: ", sys.exc_info()) log.log( PriorityQueueScheduler, log.level.Warning, "Exception while updating queues, reverting and trying again: %s" % ie ) log.log_exc(PriorityQueueScheduler, log.level.Info) session.rollback() finally: session.close()
def search(cls, session, name): '''Search for a movie''' j = join(movie_search_table, movie_table, movie_search_table.c.movie_id == Movie.movie_id) q = session.query(Movie).select_from(j) return q.filter(movie_search_table.c.name.match(name))