def wide_parameter_definition_tag_sq(self): """A subquery of the form: .. code-block:: sql SELECT parameter_definition_id, GROUP_CONCAT(parameter_tag_id) AS parameter_tag_id_list, GROUP_CONCAT(parameter_tag) AS parameter_tag_list FROM ( SELECT pdt.parameter_definition_id, pt.id AS parameter_tag_id, pt.tag AS parameter_tag FROM parameter_definition_tag as pdt, parameter_tag AS pt WHERE pdt.parameter_tag_id = pt.id ) GROUP BY parameter_definition_id :type: :class:`~sqlalchemy.sql.expression.Alias` """ if self._wide_parameter_definition_tag_sq is None: self._wide_parameter_definition_tag_sq = (self.query( self.ext_parameter_definition_tag_sq.c.parameter_definition_id, func.group_concat( self.ext_parameter_definition_tag_sq.c.parameter_tag_id). label("parameter_tag_id_list"), func.group_concat(self.ext_parameter_definition_tag_sq.c. parameter_tag).label("parameter_tag_list"), ).group_by(self.ext_parameter_definition_tag_sq.c. parameter_definition_id).subquery()) return self._wide_parameter_definition_tag_sq
def l_servers(page=1): form = SearchServerForm() if request.method == "POST": filter_string = ' like "%' + form.s_content.data + '%"' filter_field = form.s_select.data items = models.App_servers.query.with_entities(models.App_servers.server_name, models.App_servers.inner_ip, models.App_servers.env, models.App_servers.location, models.App_servers.type, models.App_servers.internet_ip, models.App_servers.cpu, models.App_servers.ram, models.App_servers.hdd, models.App_servers.status, models.App_servers.desc, func.group_concat(models.App_nodes.app_name).label('app_list')). \ filter(models.App_servers.id == models.App_nodes.node_id). \ group_by(models.App_servers.id). \ order_by(models.App_servers.inner_ip).filter(filter_field + filter_string).all() total = len(items) return render_template('servers/l_servers.html', total=total, object_list=items, form=form, APP_ENV=g.config.get('APP_ENV'), SERVER_LOCATION=g.config.get('SERVER_LOCATION'), SERVER_TYPE=g.config.get('SERVER_TYPE')) else: paginate = models.App_servers.query.with_entities(models.App_servers.server_name, models.App_servers.inner_ip, models.App_servers.env, models.App_servers.location, models.App_servers.type, models.App_servers.internet_ip, models.App_servers.cpu, models.App_servers.ram, models.App_servers.hdd, models.App_servers.status, models.App_servers.desc, func.group_concat(models.App_nodes.app_name).label( 'app_list')). \ filter(models.App_servers.id == models.App_nodes.node_id). \ group_by(models.App_servers.id). \ order_by(models.App_servers.inner_ip).paginate(page, g.config.get('POSTS_PER_PAGE'), False) object_list = paginate.items total = models.App_servers.query.count() pagination = models.App_servers.query.paginate( page, per_page=g.config.get('POSTS_PER_PAGE')) return render_template('servers/l_servers.html', total=total, pagination=pagination, object_list=object_list, form=form, APP_ENV=g.config.get('APP_ENV'), SERVER_LOCATION=g.config.get('SERVER_LOCATION'), SERVER_TYPE=g.config.get('SERVER_TYPE'))
def __init__(self, search_term): search_term = re.sub( and_regex, ' AND ', search_term ) search_term = re.sub( or_regex, ' OR ', search_term) parser = QueryParser("content", schema=None) q = parser.parse(search_term) invalid = self.validate_search_term(q) if invalid: raise ValueError(invalid + search_term) myapp.db_connector.connect() session = myapp.db_connector.get_session() subq = session.query( TourneyList.id.label("tourney_list_id"), TourneyVenue.country.label("country_name"), TourneyVenue.state.label("state_name"), TourneyVenue.city.label("city_name"), TourneyVenue.venue.label("venue_name"), Tourney.tourney_type.label("tourney_type"), func.group_concat( ShipPilot.ship_type.distinct()).label("ship_name" ), func.group_concat( func.concat( Pilot.name, " ", Pilot.canon_name )).label("pilot_name"), func.group_concat( func.concat( Upgrade.name, " ", Upgrade.canon_name ) ).label("upgrade_name") ). \ join(Tourney).\ join(TourneyVenue).\ join(Ship). \ join(ShipPilot). \ join(Pilot). \ outerjoin(ShipUpgrade). \ outerjoin(Upgrade).\ group_by( TourneyList.id).subquery() fn = tree_to_expr(q, subq) self.query = session.query(subq.c.tourney_list_id).filter( fn )
def get_index_data(only_vulnerable=False, only_in_repo=True): select = (db.session.query(CVEGroup, CVE, func.group_concat(CVEGroupPackage.pkgname, ' '), func.group_concat(Advisory.id, ' ')) .join(CVEGroupEntry).join(CVE).join(CVEGroupPackage) .outerjoin(Advisory, and_(Advisory.group_package_id == CVEGroupPackage.id, Advisory.publication == Publication.published))) if only_vulnerable: select = select.filter(CVEGroup.status.in_([Status.unknown, Status.vulnerable, Status.testing])) if only_in_repo: select = select.join(Package, Package.name == CVEGroupPackage.pkgname) entries = (select.group_by(CVEGroup.id).group_by(CVE.id) .order_by(CVEGroup.status.desc()) .order_by(CVEGroup.created.desc())).all() groups = defaultdict(defaultdict) for group, cve, pkgs, advisories in entries: group_entry = groups.setdefault(group.id, {}) group_entry['group'] = group group_entry['pkgs'] = list(set(pkgs.split(' '))) group_entry['advisories'] = advisories.split(' ') if advisories else [] group_entry.setdefault('issues', []).append(cve) for key, group in groups.items(): group['issues'] = sorted(group['issues'], key=lambda item: item.id, reverse=True) groups = groups.values() groups = sorted(groups, key=lambda item: item['group'].created, reverse=True) groups = sorted(groups, key=lambda item: item['group'].severity) groups = sorted(groups, key=lambda item: item['group'].status) return groups
def get_samples_query(): query = Sample.query \ .with_entities(Sample, Analysis, func.group_concat(func.CONCAT_WS (';', Variation.id, Variation.tier_label) .distinct()).label("variations"), ObservedMap.id.label('observed_map_id'), func.count(Error.id.distinct()).label('total_errors'), func.group_concat(func.CONCAT_WS (';', Map.id, Map.construct_id) .distinct()).label("map"), func.group_concat(func.CONCAT_WS (';', ObservedMap.id, ObservedMap.status) .distinct()).label("total_maps") ) \ .outerjoin((Analysis, Sample.id == Analysis.sample_id)) \ .outerjoin(MapAnalysis) \ .outerjoin(Map) \ .outerjoin(ObservedMap) \ .outerjoin(Error) \ .outerjoin(Variation) \ .group_by(Analysis.id) return query
def wide_relationship_parameter_definition_list( self, relationship_class_id_list=None, parameter_definition_id_list=None): """Return relationship classes and their parameter definitions in wide format.""" qry = self.query( self.RelationshipClass.id.label("relationship_class_id"), self.RelationshipClass.name.label("relationship_class_name"), self.parameter_definition_sq.c.id.label("parameter_definition_id"), self.parameter_definition_sq.c.name.label("parameter_name"), ).filter(self.RelationshipClass.id == self.parameter_definition_sq.c.relationship_class_id) if relationship_class_id_list is not None: qry = qry.filter( self.RelationshipClass.id.in_(relationship_class_id_list)) if parameter_definition_id_list is not None: qry = qry.filter( self.parameter_definition_sq.c.id.in_( parameter_definition_id_list)) subqry = qry.subquery() return self.query( subqry.c.relationship_class_id, subqry.c.relationship_class_name, func.group_concat(subqry.c.parameter_definition_id).label( "parameter_definition_id_list"), func.group_concat( subqry.c.parameter_name).label("parameter_name_list"), ).group_by(subqry.c.relationship_class_id, subqry.c.relationship_class_name)
def wide_object_parameter_definition_list( self, object_class_id_list=None, parameter_definition_id_list=None): """Return object classes and their parameter definitions in wide format.""" qry = self.query( self.object_class_sq.c.id.label("object_class_id"), self.object_class_sq.c.name.label("object_class_name"), self.parameter_definition_sq.c.id.label("parameter_definition_id"), self.parameter_definition_sq.c.name.label("parameter_name"), ).filter(self.object_class_sq.c.id == self.parameter_definition_sq.c.object_class_id) if object_class_id_list is not None: qry = qry.filter( self.object_class_sq.c.id.in_(object_class_id_list)) if parameter_definition_id_list is not None: qry = qry.filter( self.parameter_definition_sq.c.id.in_( parameter_definition_id_list)) subqry = qry.subquery() return self.query( subqry.c.object_class_id, subqry.c.object_class_name, func.group_concat(subqry.c.parameter_definition_id).label( "parameter_definition_id_list"), func.group_concat( subqry.c.parameter_name).label("parameter_name_list"), ).group_by(subqry.c.object_class_id, subqry.c.object_class_name)
def draw_cooccurrence_network(net_type=None, db_path=None, output_path=None, top_n=30): assert net_type is not None and output_path is not None and db_path is not None engine = get_engine(db_path) session = get_session(engine) print('正在处理共现数据') graph_data = [] data = [] title = None if net_type == 'keyword': title = 'Author Keyword Co-occurrence Network' data = session.query(WosDocument.unique_id, func.group_concat(WosKeyword.keyword, ';'))\ .join(WosKeyword).group_by(WosDocument.unique_id) filter_data = session.query(WosKeyword.keyword, func.count('*').label('num')) \ .group_by(WosKeyword.keyword).order_by(desc('num')) elif net_type == 'keyword_plus': title = 'WoS Keyword Co-occurrence Network' data = session.query(WosDocument.unique_id, func.group_concat(WosKeywordPlus.keyword_plus, ';'))\ .join(WosKeywordPlus).group_by(WosDocument.unique_id) filter_data = session.query(WosKeywordPlus.keyword_plus, func.count('*').label('num')) \ .group_by(WosKeywordPlus.keyword_plus).order_by(desc('num')) elif net_type == 'author': title = 'Author Co-authorship Network' data = session.query(WosDocument.unique_id, func.group_concat(WosAuthor.last_name +','+ WosAuthor.first_name, ';'))\ .join(WosAuthor).group_by(WosDocument.unique_id) filter_data = session.query(WosAuthor.last_name + ',' + WosAuthor.first_name, func.count('*').label('num')) \ .group_by(WosAuthor.last_name + ',' + WosAuthor.first_name).order_by(desc('num')) else: print('未考虑到的作图情况:', net_type) exit(-1) for row in data: row_split = row[1].split(';') if len(row_split) > 1: graph_data += list(combinations(row_split, 2)) # network是包含了全部关键词的共现网络 print('正在生成共现网络') network = get_network(graph_data, directed=False) session.close() nx.write_graphml(network, 'test.gml') filter_nodes = [i[0] for i in filter_data[top_n:]] sub = nx.restricted_view(network, filter_nodes, []) # 最大联通子图 # sub = sorted(nx.connected_component_subgraphs(sub), key = len, reverse=True)[0] # print('正在绘图') draw_net(sub, title=title, output_path=os.path.join(output_path, net_type))
def revised_chain_info(self, ifes): self.logger.debug('ifes: %s' % ifes) ife_ids = self.class_property(ifes, 'id') with self.session() as session: query = session.query( mod.IfeInfo.ife_id, func.sum(mod.ChainInfo.chain_length).label('Exp Length (CI)'), func.group_concat(mod.ChainInfo.sequence.op('SEPARATOR')('+')).label('Exp Sequence (CI)'), func.group_concat(mod.ChainInfo.compound.op('SEPARATOR')(' + ')).label('Nucleic Acid Compound'), func.group_concat(mod.SpeciesMapping.species_name.op('SEPARATOR')(' / ')).label('RNA Species'), ).\ join(mod.IfeChains, mod.IfeChains.ife_id == mod.IfeInfo.ife_id).\ join(mod.ChainInfo, mod.ChainInfo.chain_id == mod.IfeChains.chain_id).\ join(mod.ChainSpecies, mod.ChainSpecies.chain_id == mod.ChainInfo.chain_id).\ outerjoin(mod.SpeciesMapping, mod.SpeciesMapping.species_mapping_id == mod.ChainSpecies.species_id).\ filter(mod.IfeInfo.ife_id.in_(ife_ids)).\ group_by(mod.IfeInfo.ife_id) data = {} for result in query: entry = row2dict(result) #entry['Exp Length (CI)'] = len(entry['Exp Sequence (CI)']) ife_id = entry.pop('ife_id') data[ife_id] = entry return data
def get(self): union = self.session.query( SpiderRecords.name, SpiderRecords.author, func.group_concat(SpiderRecords.dsp_id), func.group_concat(SpiderRecords.link), func.group_concat(SpiderRecords.rank), func.count(SpiderRecords._id)).\ group_by(SpiderRecords.name, SpiderRecords.author).\ having(func.count(SpiderRecords._id)>1).\ all() union_list = [] for item in union: name = item[0] author = item[1] dsp_ids = item[2] links = item[3] ranks = item[4] dsp_ids = dsp_ids.split(',') links = links.split(',') ranks = ranks.split(',') comb = zip(dsp_ids, links, ranks) link_info = [{ "dsp_id": com[0], "link": com[1], "rank": com[2] } for com in comb] union_dict = dict(name=name, author=author, link_info=link_info) union_list.append(union_dict) self.write(dict(union_list=union_list))
def get_entries_summed(): # todo: check if its lower case... entries = db.session.query( Entry.name, label('team', Entry.team), label('last_updated', func.group_concat(Entry.timestamp)), label('paid', func.group_concat(Entry.paid)), label('payment_references', func.group_concat(Entry.payment_reference)), label('total_paid', func.sum(Entry.paid))).group_by(Entry.name).all(), formatted_entries = [] for entry in entries: payment_references = [] if entry.payment_references: payment_references = [ ref for ref in entry.payment_references.split(',') ] formatted_entries.append({ 'name': entry.name, 'team': entry.team, 'last_updated': [timestamp for timestamp in entry.last_updated.split(',')], 'paid': [paid for paid in entry.paid.split(',')], 'total_paid': entry.total_paid, 'payment_references': payment_references }) return jsonify(formatted_entries)
def getKeywordsOfPub(id): """ Retrieve all keywords for a publication by ID @param id: the database ID of the publication @type id: int @return: a list of keywords @rtype: list(dict) """ result = db.session.query( Publications.id, func.group_concat(Keywords.id), func.group_concat(Keywords.name) )\ .outerjoin(Keywords_publication, Publications.id == Keywords_publication.publication_id)\ .outerjoin(Keywords, Keywords.id == Keywords_publication.keyword_id)\ .filter(Keywords.id == Keywords_publication.keyword_id)\ .filter(Keywords_publication.publication_id == id)\ .group_by(Publications.id).first() if result is None: return [] result = [{ 'id': id, 'name': result[2].split(',')[i] if result[2] is not None else '' } for i, id in enumerate( result[1].split(',') if result[1] is not None else [])] db.session.close() return result
def get_data(db_session, maxAuthors = 10000, type = "abstract"): if type == "abstract": data = db_session.query(Author.id, Author.firstName, Author.lastName, Author.category, func.group_concat(Article.abstract).label('text')).filter(Article.authors).group_by(Author.id).limit(maxAuthors) else: data = db_session.query(Author.id, Author.firstName, Author.lastName, Author.category, func.group_concat(Article.title).label('text')).filter(Author.id, Article.id).filter(Article.authors).group_by(Author.id).limit(maxAuthors) return data
def tobs(): q3 = session.query(Measurement.date, func.group_concat( Measurement.station), func.group_concat(Measurement.tobs)).filter( Measurement.date > "2016-08-23").group_by(Measurement.date) d3 = { date: {s: t for s, t in zip(station.split(","), tobs.split(","))} for date, station, tobs in q3.all() } return jsonify(d3)
def prcp(): q2 = session.query(Measurement.date, func.group_concat( Measurement.station), func.group_concat(Measurement.prcp)).filter( Measurement.date > "2016-08-23").group_by(Measurement.date) d2 = { date: {s: p for s, p in zip(station.split(","), prcp.split(","))} for date, station, prcp in q2.all() } return jsonify(d2)
def books(): args = request.args published_date = args.get('published_date') author = args.get('author') if published_date is None and author is None: result = db_session.query( Books.id, Books.title, Books.published_date, func.group_concat(Authors.author, ', ').label('author'), func.group_concat(Categories.category.distinct()).label( 'category')).join(Authors).join(Categories).group_by( Books.id).all() response = [] for r in result: response.append(r._asdict()) return {"books": response} else: published_data_filter = {} if published_date is not None: published_data_filter = {'published_date': published_date} published_data_filter = { key: value for (key, value) in published_data_filter.items() if value } author_filter = {} if author is not None: author_filter = {'author': author} author_filter = { key: value for (key, value) in author_filter.items() if value } result = db_session.query( Books.id, Books.title, Books.published_date, func.group_concat(Authors.author, ', ').label('author'), func.group_concat( Categories.category.distinct()).label('category')).filter_by( **published_data_filter).join(Authors).filter_by( **author_filter).join(Categories).group_by( Books.id).all() response = [] for r in result: response.append(r._asdict()) print(response) return {"books": response}
def books_id(book_id): result = db_session.query( Books.id, Books.title, Books.published_date, func.group_concat(Authors.author, ', ').label('author'), func.group_concat( Categories.category.distinct()).label('category')).filter_by( id=book_id).join(Authors).join(Categories).group_by( Books.id).all() response = [] for r in result: response.append(r._asdict()) return {"book": response}
def batch(self, feature_type=None): logger = get_logger() # Default concat function to sqlite concat_func = func.group_concat( InitiativeImport.id.distinct()).label("id_array") if self.db.session.bind.driver != "pysqlite": # Assume postgres concat_func = func.array_agg( InitiativeImport.id, type_=ARRAY(Integer)).label("id_string") location_set = self.db.session.query( InitiativeImport.location, concat_func ) \ .filter(InitiativeImport.location.isnot(None)) \ .filter(InitiativeImport.osm_address.is_(None)) \ .with_for_update().group_by(InitiativeImport.location).all() # What if location set is blank? if len(location_set) == 0: logger.warning("Ik wil geocoderen, maar er is geen data.") exit() for item in location_set: self.geocode(item, feature_type)
def get_hackathon_show_list(self, hackathon_id, show_type=None, limit=10): criterion = TeamShow.hackathon_id == hackathon_id if show_type: criterion = and_(criterion, TeamShow.type == show_type) # show_list = TeamShow.query.filter(criterion).order_by(TeamShow.create_time.desc()).limit(limit) show_list = ( self.db.session() .query( TeamShow.id, TeamShow.note, TeamShow.team_id, TeamShow.hackathon_id, Team.name, Team.description, Team.logo, func.group_concat(func.concat(TeamShow.uri, ":::", TeamShow.type)).label("uri"), ) .join(Team, Team.id == TeamShow.team_id) .filter(criterion) .group_by(TeamShow.team_id) .order_by(TeamShow.create_time.desc()) .all() ) return [s._asdict() for s in show_list]
def get_same_genres(self, genres_list, genre_str): same_genres = db.session.query(Movies, MoviesGenre) \ .join(Movies, Movies.id == MoviesGenre.media_id) \ .filter(MoviesGenre.genre.in_(genres_list), MoviesGenre.media_id != self.id) \ .group_by(MoviesGenre.media_id) \ .having(func.group_concat(MoviesGenre.genre.distinct()) == genre_str).limit(8).all() return same_genres
def precipitation(): # Return a list of all precipitation # Calculate the date 1 year ago from the last data point in the database last_date = session.query(Measurement.date).order_by( Measurement.date.desc()).first() print("Last Date: ", last_date.date) # Perform a query to retrieve the data and precipitation scores query_date = dt.date(2017, 8, 23) - dt.timedelta(days=365) print("Query Date: ", query_date) # Query data results = (session.query( Measurement.date, func.group_concat( Measurement.prcp.distinct()).label("precipitation")).filter( Measurement.date >= query_date).group_by( Measurement.date).order_by(Measurement.date).all()) # Create emtpy dict x_result = {} # Build dict with distinct set of precipitation values on each given day (for each station) for result in results: x_result.update({result.date: [result.precipitation]}) # Return return jsonify(x_result)
def msrun_page(request): try: query = DBSession.query(func.count(SpectrumHit.spectrum_hit_id).label("count_hits"), func.count(SpectrumHit.sequence.distinct()).label("count_pep"), func.count(Protein.name.distinct()).label("count_prot") ) query = query.join(MsRun, SpectrumHit.ms_run_ms_run_id == MsRun.ms_run_id) query = query.join(t_spectrum_protein_map) query = query.join(Protein) query = query.filter(MsRun.ms_run_id == request.matchdict["msrun"]) statistics = json.dumps(query.all()) query = DBSession.query(MsRun.filename, func.group_concat( (HlaType.hla_string.distinct().op('order by')(HlaType.hla_string))).label( 'hla_typing'), Source.histology, Source.source_id, Source.patient_id, Source.organ, Source.comment, Source.dignity, Source.celltype, Source.location, Source.metastatis, Source.person, Source.organism, Source.treatment, Source.comment.label("comment"), func.cast(MsRun.ms_run_date, String).label("ms_run_date"), MsRun.used_share, MsRun.comment.label("msrun_comment"), MsRun.sample_mass, MsRun.sample_volume, MsRun.antibody_set, MsRun.antibody_mass) query = query.join(Source) query = query.join(t_hla_map) query = query.join(HlaType) query = query.filter(MsRun.ms_run_id == request.matchdict["msrun"]) metadata = json.dumps(query.all()) except: return Response(conn_err_msg, content_type='text/plain', status_int=500) return {"statistics": statistics, "metadata": metadata, "msrun": request.matchdict["msrun"]}
def copy_group(avg): group_id = avg.replace('AVG-', '') group_data = (db.session.query( CVEGroup, CVE, func.group_concat( CVEGroupPackage.pkgname, ' ')).filter(CVEGroup.id == group_id).join(CVEGroupEntry).join( CVE).join(CVEGroupPackage).group_by(CVEGroup.id).group_by( CVE.id).order_by(CVE.id)).all() if not group_data: return not_found() group = group_data[0][0] issues = [cve for (group, cve, pkg) in group_data] issue_ids = [cve.id for cve in issues] pkgnames = set( chain.from_iterable( [pkg.split(' ') for (group, cve, pkg) in group_data])) form = GroupForm() form.advisory_qualified.data = group.advisory_qualified form.affected.data = group.affected form.bug_ticket.data = group.bug_ticket form.cve.data = '\n'.join(issue_ids) form.fixed.data = group.fixed form.notes.data = group.notes form.pkgnames.data = '\n'.join(sorted(pkgnames)) form.reference.data = group.reference form.status.data = status_to_affected(group.status).name return render_template('form/group.html', title='Add AVG', form=form, CVEGroup=CVEGroup, action='/avg/add')
def get_channel_category(): # 获取请求参数 uuid = request.args['uuid'] # 子查询 - 每个频道分类指派的频道uuid query_channel = Channel_category.query \ .join(categories_channels).join(Channel) \ .with_entities(Channel_category.uuid) \ .add_columns(func.group_concat(Channel.uuid).label('channel_uuid')) \ .group_by(Channel_category.uuid) \ .subquery() # 联合查询 query = Channel_category.query \ .outerjoin(query_channel, Channel_category.uuid == query_channel.c.uuid) \ .with_entities(Channel_category.uuid, Channel_category.name, Channel_category.cname, Channel_category.tname, \ Channel_category.sort_id, Channel_category.status, Channel_category.create_time, Channel_category.update_time) \ .add_columns(query_channel.c.channel_uuid) \ .filter(Channel_category.uuid==uuid).first() # 返回datatable数据 data = { 'uuid': query.uuid, 'name': query.name, 'cname': query.cname, 'tname': query.tname, 'sort_id': query.sort_id, 'channel_uuid': query.channel_uuid, 'status': query.status, 'create_time': query.create_time, 'update_time': query.update_time } return jsonify(data)
def siteinfo(id): form = AddSiteForm() form_sorce = models.App_sites.query.filter_by(id=int(id))[0] if request.method == "GET": form.site_name.data = form_sorce.site_name #获取该站点类别下的应用和部署节点 items = models.App_nodes.query.with_entities( models.App_nodes.app_name, func.group_concat( models.App_nodes.node_ip).label('node_list')).filter( models.Apps.id == models.App_nodes.app_id, models.Apps.site == id).group_by( models.App_nodes.app_name).all() total_app = len(items) #Tomcat实例数量 total_instance = models.App_nodes.query.filter( models.Apps.id == models.App_nodes.app_id, models.Apps.site == id).count() #部署节目数量 total_node = models.App_nodes.query.filter( models.Apps.id == models.App_nodes.app_id, models.Apps.site == id).group_by(models.App_nodes.node_ip).count() return render_template('site/siteinfo.html', form=form, object_list=items, total_app=total_app, total_node=total_node, total_instance=total_instance)
def get_searched_books(param_dict, db): '''Returns book(s) that match user search. Args: param_dict (dict): The user-specified search parameters db: The flask_sqlalchemy.SQLAlchemy object used to interact with the database Returns: dict: A dict of books matching the user-specified search parameters with isbn, title, publication_year, author first_name, author middle_name, and author last_name for each book ''' conditions = [] for key, value in param_dict.items(): if value != '': # want to ensure that first part of the db string matches the user-submitted value # but it's okay if the db string continues past that value = value + '%' if (key == 'last_name' or key == 'first_name'): conditions.append(getattr(Author, key).like(value)) else: conditions.append(getattr(Book, key).like(value)) books = db.session.query( Book.isbn, Book.title, Book.publication_year, func.group_concat(Author.full_name).label('authors')).filter( Book.book_id == Book_Author.book_id).filter( Author.author_id == Book_Author.author_id).filter( and_(*conditions)).group_by(Book.book_id) return get_dict_list_from_result(books)
def getPurchaserProducts(purchaser_id): start = request.args.get('start_date') end = request.args.get('end_date') query = db.session.query( func.strftime('%Y-%m-%d', Order.purchase_timestamp), func.group_concat(Product.name, ';') ).join(Product).filter( Order.purchaser_id == purchaser_id ) if start: start = datetime.datetime.fromisoformat('%sT00:00:00+09:00' % start) query = query.filter(Order.purchase_timestamp >= start) if end: end = datetime.datetime.fromisoformat('%sT00:00:00+09:00' % end) + \ datetime.timedelta(days=1) query = query.filter(Order.purchase_timestamp <= end) try: rows = query.group_by( func.strftime('%Y-%m-%d', Order.purchase_timestamp) ).all() ret = dict() for row in rows: ret[row[0]] = [dict(product=i) for i in row[1].split(';')] return jsonify(dict(purchases=ret)) except exc.SQLAlchemyError as e: return DatabaseError(e)
def SelectSingeMenuPrivilege(strUserID, MidList): """ @note 查询列表里菜单ID的权限 :param strUserID: :param MidList: :return: 返回菜单权限列表 """ project_dic = CommonSession.SelectProject('ProjectDic') menu_list = [] with GetSession() as db_ses: privilege = db_ses.query(tables.MenuPrivilege.mid, tables.Menu.name, tables.Menu.url, tables.Menu.preid, expression.label('privileges', func.group_concat(tables.MenuPrivilege.pid, ";", tables.MenuPrivilege.r_priv, ";", tables.MenuPrivilege.w_priv))).join( tables.Menu, tables.MenuPrivilege.mid == tables.Menu.mid).filter(tables.MenuPrivilege.uid == strUserID, tables.MenuPrivilege.mid.in_( MidList)).group_by( tables.MenuPrivilege.mid).all() for menu in privilege: priv_list = [] for prjs in str(menu[4]).split(','): priv = prjs.split(';') prj_dic = {} if priv[0] in project_dic.keys(): prj_dic[project_dic[priv[0]]] = {'pid': priv[0], 'r_priv': priv[1], 'w_priv': priv[2]} priv_list.append(prj_dic) menu_dic = {'menu_id': menu[0], 'menu_name': menu[1], 'menu_url': menu[2], 'menu_preid': menu[3], 'menu_pri': priv_list} menu_list.append(menu_dic) return menu_list
def add_task(task_id=None): name = request.values.get('task_name') scheme = request.values.get('task_scheme') domain = request.values.get('task_domain') source_ip = request.values.get('source_ip') path = request.values.get('task_path') cookie = request.values.get('task_cookie') spider_type = request.values.get('spider_type') task_policy = request.values.get('task_policy') urls = request.values.get('urls') target = request.values.get('target') multiple_task = True if request.values.get('multiple_task') else False run_now = True if request.values.get('run_now') else False run_time = request.values.get('run_time') rules = request.values.get('rules') scan_key = request.values.get('scan_key') try: # 从接口提交的扫描任务,如果是全面扫描则扫描所有规则 if scan_key: if not (name and urls and run_time and task_policy): raise Exception user_id = verify_scan_key(scan_key).id if task_policy == '509': rules = db.session.query(func.group_concat(WebVulFamily.id)).filter(WebVulFamily.parent_id != 0).first()[0] spider_type = 2 else: username = current_user.name user_id = db.session.query(User).filter(User.name == username).first().id except Exception, e: logger.exception(e) return jsonify(dict(status=False, desc='添加更新失败'))
def _query_not_won(tiid): return db.session.query(TicketsOrder.USid, TicketsOrder.TIid, func.group_concat(TicketsOrder.TSOid) ).filter(TicketsOrder.isdelete == false(), TicketsOrder.TIid == tiid, TicketsOrder.TSOstatus == TicketsOrderStatus.pending.value ).group_by(TicketsOrder.USid).all()
def add_task_html(task_id=None): try: web_policys = db.session.query(WebVulPolicy) web_schemes = db.session.query(TaskWebScheme) rule_types = db.session.query(WebVulFamily).filter(WebVulFamily.parent_id != 0) rep_models = db.session.query(ReportModel.model_id, ReportModel.model_name) if task_id: rule_family_ids = db.session.query(func.group_concat(TaskRuleFamilyRef.rule_family_id)).\ filter(TaskRuleFamilyRef.task_id == task_id).first() # print rule_family_ids[0] task = db.session.query(Task).filter(Task.id == task_id).first() task_rep_model = db.session.query(TaskRepModelRef).filter(TaskRepModelRef.task_id==task_id).first() if task_rep_model: task_rep_model_id = task_rep_model.rep_model_id else: task_rep_model_id = db.session.query(ReportModel).filter(or_(ReportModel.company == '上海云盾信息技术有限公司', ReportModel.model_name == '盾眼默认模板')).first().model_id return render_template('web_task_edit.html', task=task, policys=web_policys, schemes=web_schemes, rep_models=rep_models, task_rep_model_id=task_rep_model_id, rule_family_ids=rule_family_ids[0], level_one='task', level_two='add_task') return render_template('web_task_add.html', policys=web_policys, schemes=web_schemes, rule_types=rule_types, rep_models=rep_models, level_one='task', level_two='add_task') except Exception as e: logger.error(e) abort(404)
def wide_parameter_value_list_sq(self): """A subquery of the form: .. code-block:: sql SELECT id, name, GROUP_CONCAT(value) AS value_list FROM ( SELECT id, name, value FROM parameter_value_list ORDER BY id, value_index ) GROUP BY id :type: :class:`~sqlalchemy.sql.expression.Alias` """ if self._wide_parameter_value_list_sq is None: self._wide_parameter_value_list_sq = ( self.query( self.parameter_value_list_sq.c.id, self.parameter_value_list_sq.c.name, func.group_concat( # self.parameter_value_list_sq.c.value.op("ORDER BY")(self.parameter_value_list_sq.c.value_index) self.parameter_value_list_sq.c.value).label( "value_list"), ).group_by(self.parameter_value_list_sq.c.id, self.parameter_value_list_sq.c.name)).subquery() return self._wide_parameter_value_list_sq
def query(self, *args, **kwargs): """Return a sqlalchemy :class:`~sqlalchemy.orm.query.Query` object applied to this :class:`.DatabaseMappingBase`. To perform custom ``SELECT`` statements, call this method with one or more of the class documented :class:`~sqlalchemy.sql.expression.Alias` properties. For example, to select the object class with ``id`` equal to 1:: from spinedb_api import DatabaseMapping url = 'sqlite:///spine.db' ... db_map = DatabaseMapping(url) db_map.query(db_map.object_class_sq).filter_by(id=1).one_or_none() To perform more complex queries, just use this method in combination with the SQLAlchemy API. For example, to select all object class names and the names of their objects concatenated in a string:: from sqlalchemy import func db_map.query( db_map.object_class_sq.c.name, func.group_concat(db_map.object_sq.c.name) ).filter( db_map.object_sq.c.class_id == db_map.object_class_sq.c.id ).group_by(db_map.object_class_sq.c.name).all() """ return self.session.query(*args, **kwargs) db_map.query(db_map.object_class_sq.c.name, func.group_concat(db_map.object_sq.c.name)).filter( db_map.object_sq.c.class_id == db_map.object_class_sq.c.id).group_by( db_map.object_class_sq.c.name).all()
def get_channel_product(): # 获取请求参数 uuid = request.args['uuid'] # 子查询 - 每个产品包指派的频道uuid query_channel = Channel_product.query \ .join(products_channels).join(Channel) \ .with_entities(Channel_product.uuid) \ .add_columns(func.group_concat(Channel.uuid).label('channel_uuid')) \ .group_by(Channel_product.uuid) \ .subquery() # 联合查询 query = Channel_product.query \ .outerjoin(query_channel, Channel_product.uuid == query_channel.c.uuid) \ .with_entities(Channel_product.uuid, Channel_product.cname, Channel_product.description, \ Channel_product.category, Channel_product.status, Channel_product.create_time, Channel_product.update_time) \ .add_columns(query_channel.c.channel_uuid) \ .filter(Channel_product.uuid==uuid).first() # 返回datatable数据 data = { 'uuid': query.uuid, 'cname': query.cname, 'description': query.description, 'channel_uuid': query.channel_uuid, 'category': query.category, 'status': query.status, 'create_time': query.create_time, 'update_time': query.update_time } return jsonify(data)
def SelectMenuProjectPrivilege(strUserId, strMenuID='None'): """ @note 查询用户菜单权限 :param strUserId: :param strMenuID: None 返回此用户所有菜单权限 不为None 返回此用户某个菜单ID的菜单权限 :return: """ project_dic = CommonSession.SelectProject('ProjectDic') menu_list = [] with GetSession() as db_ses: if strMenuID == 'None': privilege = db_ses.query(tables.MenuPrivilege.mid, tables.Menu.name, tables.Menu.url, tables.Menu.preid, expression.label('privileges', func.group_concat(tables.MenuPrivilege.pid, ";", tables.MenuPrivilege.r_priv, ";", tables.MenuPrivilege.w_priv))).join( tables.Menu, tables.MenuPrivilege.mid == tables.Menu.mid).filter( tables.MenuPrivilege.uid == strUserId).group_by(tables.MenuPrivilege.mid).all() else: privilege = db_ses.query(tables.MenuPrivilege.mid, tables.Menu.name, tables.Menu.url, tables.Menu.preid, expression.label('privileges', func.group_concat(tables.MenuPrivilege.pid, ";", tables.MenuPrivilege.r_priv, ";", tables.MenuPrivilege.w_priv))).join( tables.Menu, tables.MenuPrivilege.mid == tables.Menu.mid).filter( tables.MenuPrivilege.uid == strUserId, tables.MenuPrivilege.mid == strMenuID).group_by( tables.MenuPrivilege.mid).all() for menu in privilege: priv_list = [] for prjs in str(menu[4]).split(','): priv = prjs.split(';') prj_dic = {} if priv[0] in project_dic.keys(): prj_dic[project_dic[priv[0]]] = {'pid': priv[0], 'r_priv': priv[1], 'w_priv': priv[2]} priv_list.append(prj_dic) menu_dic = {'menu_id': menu[0], 'menu_name': menu[1], 'menu_url': menu[2], 'menu_preid': menu[3], 'menu_pri': priv_list} menu_list.append(menu_dic) return menu_list
def add_similarity_scores_for_nonprofit_tweets(): """Calculate similarity scores for every pair of nonprofit tweets and store them in the DB.""" logger.debug('Inside add_similarity_scores_for_nonprofit_tweets()') tweets = DBSession.query(Tweet.twitter_name, func.group_concat(Tweet.text).label('text')).group_by(Tweet.twitter_name).all() similarity_matrix = similarity.get_similarity_scores_all_pairs([tweet.text for tweet in tweets]) DBSession.query(Nonprofits_Similarity_By_Tweets).delete() for m in xrange(len(similarity_matrix) - 1): for n in xrange(m + 1, len(similarity_matrix)): DBSession.add(Nonprofits_Similarity_By_Tweets(tweets[m].twitter_name, tweets[n].twitter_name, similarity_matrix[m][n])) DBSession.commit()
def get_permissions_query(session, identifier_s): """ :type identifier_s: list """ thedomain = case([(Domain.name == None, '*')], else_=Domain.name) theaction = case([(Action.name == None, '*')], else_=Action.name) theresource = case([(Resource.name == None, '*')], else_=Resource.name) action_agg = func.group_concat(theaction.distinct()) resource_agg = func.group_concat(theresource.distinct()) return (session.query(thedomain + ':' + action_agg + ':' + resource_agg). select_from(User). join(role_membership, User.pk_id == role_membership.c.user_id). join(role_permission, role_membership.c.role_id == role_permission.c.role_id). join(Permission, role_permission.c.permission_id == Permission.pk_id). outerjoin(Domain, Permission.domain_id == Domain.pk_id). outerjoin(Action, Permission.action_id == Action.pk_id). outerjoin(Resource, Permission.resource_id == Resource.pk_id). filter(User.identifier.in_(identifier_s)). group_by(Permission.domain_id, Permission.resource_id))
def print_body_for_user(self, authenticated_user): db_session = DB_Session_Factory.get_db_session() interviewees = [] for interviewee_info in db_session.query(func.group_concat(Interview.technical_score), func.group_concat(Interview.cultural_score), Interview.candidate_name).group_by(Interview.candidate_name).filter(func.date(Interview.start_time) == func.date(self.date)).all(): [tech_scores, cultural_scores, candidate_name] = interviewee_info avg_tech_score = Candidate_List_HTTP_Response_Builder.avg_score(tech_scores) avg_cultural_score= Candidate_List_HTTP_Response_Builder.avg_score(cultural_scores) status = 'unknown' if avg_tech_score > 2.5 and avg_cultural_score > 2.5: status = 'success' elif avg_tech_score is not 0 and avg_cultural_score is not 0: status = 'failure' candidate = db_session.query(Candidate).get(candidate_name) interviewees.append({'candidate_name' : candidate_name, 'status' : status, 'position' : candidate.position}) final_output_dict = {'candidates' : interviewees, 'date' : self.date.strftime("%s")} print json.dumps(final_output_dict)
def get_article(article_id=None, title_path=None, render=True, released=None): """Return an article by it's ID.""" if article_id is None and title_path is None: raise ValueError("You must specify either an ID or path.") # Generate the proper where condition if article_id is not None: where_cond = (articles.c.id == article_id) else: where_cond = (articles.c.title_path == title_path) # Generate the SQL syntax with SQLAlchemy stmt = select( [articles, func.ifnull(func.group_concat(tags.c.tag, ", "), "").label('tag_list')] ).select_from( articles.outerjoin( tag_map, articles.c.id == tag_map.c.article_id ).outerjoin( tags, tag_map.c.tag_id == tags.c.id ) ).where( where_cond ).where( articles.c.released == released if released is not None else "" ).group_by( tag_map.c.article_id ) # Get our results conn = engine.connect() result = conn.execute(stmt) row = result.fetchone() article = article_from_row(row, render=render) if row is not None else None conn.close() return article
def get_data_for_test(test, title=None): """ Retrieves the data for a single test, with an optional title. :param test: The test to retrieve the data for. :type test: Test :param title: The title to use in the result. If empty, it's set to 'test {id}' :type title: str :return: A dictionary with the appropriate values. :rtype: dict """ if title is None: title = 'test {id}'.format(id=test.id) populated_categories = g.db.query(regressionTestLinkTable.c.category_id).subquery() categories = Category.query.filter(Category.id.in_(populated_categories)).order_by(Category.name.asc()).all() hours = 0 minutes = 0 queued_tests = 0 """ evaluating estimated time if the test is still in queue estimated time = (number of tests already in queue + 1) * (average time of that platform) - (time already spend by those tests) calculates time in minutes and hours """ if len(test.progress) == 0: var_average = 'average_time_' + test.platform.value queued_kvm = g.db.query(Kvm.test_id).filter(Kvm.test_id < test.id).subquery() queued_kvm_entries = g.db.query(Test.id).filter( and_(Test.id.in_(queued_kvm), Test.platform == test.platform) ).subquery() kvm_test = g.db.query(TestProgress.test_id, label('time', func.group_concat(TestProgress.timestamp))).filter( TestProgress.test_id.in_(queued_kvm_entries) ).group_by(TestProgress.test_id).all() number_kvm_test = g.db.query(Test.id).filter( and_(Test.id.in_(queued_kvm), Test.platform == test.platform) ).count() average_duration = float(GeneralData.query.filter(GeneralData.key == var_average).first().value) queued_tests = number_kvm_test time_run = 0 for pr_test in kvm_test: timestamps = pr_test.time.split(',') start = datetime.strptime(timestamps[0], '%Y-%m-%d %H:%M:%S') end = datetime.strptime(timestamps[-1], '%Y-%m-%d %H:%M:%S') time_run += (end - start).total_seconds() # subtracting current running tests total = (average_duration * (queued_tests + 1)) - time_run minutes = (total % 3600) // 60 hours = total // 3600 results = [{ 'category': category, 'tests': [{ 'test': rt, 'result': next((r for r in test.results if r.regression_test_id == rt.id), None), 'files': TestResultFile.query.filter( and_(TestResultFile.test_id == test.id, TestResultFile.regression_test_id == rt.id) ).all() } for rt in category.regression_tests if rt.id in test.get_customized_regressiontests()] } for category in categories] # Run through the categories to see if they should be marked as failed or passed. A category failed if one or more # tests in said category failed. for category in results: error = False for category_test in category['tests']: test_error = False # A test fails if: # - Exit code is not what we expected # - There are result files but one of them is not identical # - There are no result files but there should have been result = category_test['result'] if result is not None and result.exit_code != result.expected_rc: test_error = True if len(category_test['files']) > 0: for result_file in category_test['files']: if result_file.got is not None and result.exit_code == 0: test_error = True break else: # We need to check if the regression test had any file that shouldn't have been ignored. outputs = RegressionTestOutput.query.filter(and_( RegressionTestOutput.regression_id == category_test['test'].id, RegressionTestOutput.ignore is False )).all() got = None if len(outputs) > 0: test_error = True got = 'error' # Add dummy entry for pass/fail display category_test['files'] = [TestResultFile(-1, -1, -1, '', got)] # Store test status in error field category_test['error'] = test_error # Update category error error = error or test_error category['error'] = error results.sort(key=lambda entry: entry['category'].name) return { 'test': test, 'TestType': TestType, 'results': results, 'title': title, 'next': queued_tests, 'min': minutes, 'hr': hours }
def peptide_query_result(request): # Check if one of these parameters is set, if not forward to peptide_query page params_check_dict = [ "sequence", "source_id", "patient_id", "run_name", "organ", "histology", "dignity", "hla_typing", "protein", "length_1", "length_2", "antibody", "celltype", "treatment", ] input_check = False for param in params_check_dict: if param in request.params: if len(request.params[param]) > 0: input_check = True # if there is no input forward to peptide_query if not input_check: raise HTTPFound(request.route_url("peptide_query")) # Group by peptide if request.params["grouping"] == "peptide": try: query = DBSession.query( PeptideRun.sequence, func.group_concat(Protein.name.distinct().op("order by")(Protein.name)).label("protein"), func.group_concat(Source.source_id.distinct().op("order by")(Source.source_id)).label("source_id"), func.group_concat(Source.patient_id.distinct().op("order by")(Source.patient_id)).label("patient_id"), func.group_concat(Source.dignity.distinct().op("order by")(Source.dignity)).label("dignity"), func.group_concat((HlaType.hla_string.distinct().op("order by")(HlaType.hla_string))).label( "hla_typing" ), ) query = query.join(Source) query = query.join(MsRun, PeptideRun.ms_run_ms_run_id == MsRun.ms_run_id) query = query.join(t_hla_map) query = query.join(HlaType) query = query.join(t_peptide_protein_map) query = query.join(Protein) # filter query = create_filter( query, "sequence", request.params, "sequence", PeptideRun, "sequence_rule", True, set=False ) query = create_filter( query, "patient_id", request.params, "patient_id", Source, "patient_id_rule", True, set=False ) query = create_filter( query, "source_id", request.params, "source_id", Source, "source_id_rule", True, set=False ) query = create_filter( query, "run_name", request.params, "filename", MsRun, "run_name_rule", True, set=False ) query = create_filter(query, "organ", request.params, "organ", Source, "organ_rule", False, set=False) query = create_filter( query, "histology", request.params, "histology", Source, "histology_rule", False, set=False ) query = create_filter(query, "dignity", request.params, "dignity", Source, "dignity_rule", False, set=False) query = create_filter( query, "hla_typing", request.params, "hla_string", HlaType, "hla_typing_rule", False, set=False ) # TODO: check if it works withou fk, # fk=HlaLookup.fk_hla_typess) query = create_filter(query, "digits", request.params, "digits", HlaType, None, False, set=False) query = create_filter( query, "protein", request.params, "name", Protein, "protein_rule", False, set=False, fk=PeptideRun.protein_proteins, ) query = create_filter(query, "length_1", request.params, "length", PeptideRun, ">", False, set=False) query = create_filter(query, "length_2", request.params, "length", PeptideRun, "<", False, set=False) query = create_filter( query, "antibody", request.params, "antibody_set", MsRun, "antibody_rule", False, set=True ) query = create_filter( query, "celltype", request.params, "celltype", Source, "celltype_rule", False, set=False ) query = create_filter( query, "treatment", request.params, "treatment", Source, "treatment_rule", False, set=False ) query = query.group_by(PeptideRun.sequence) your_json = json.dumps(query.all()) grouping = "peptide" except DBAPIError: return Response(conn_err_msg, content_type="text/plain", status_int=500) # MS run group by elif request.params["grouping"] == "run": try: query = DBSession.query( PeptideRun.peptide_run_id, PeptideRun.sequence, PeptideRun.minRT, PeptideRun.maxRT, PeptideRun.minScore, PeptideRun.maxScore, PeptideRun.minE, PeptideRun.maxE, PeptideRun.minQ, PeptideRun.maxQ, PeptideRun.PSM, func.group_concat(HlaType.hla_string.distinct().op("order by")(HlaType.hla_string)).label("hla_typing"), func.group_concat(Protein.name.distinct().op("order by")(Protein.name)).label("protein"), Source.histology, Source.patient_id, Source.source_id, MsRun.filename, MsRun.ms_run_id, ) query = query.join(Source) query = query.join(MsRun, PeptideRun.ms_run_ms_run_id == MsRun.ms_run_id) query = query.join(t_hla_map) query = query.join(HlaType) query = query.join(t_peptide_protein_map) query = query.join(Protein) # filter query = create_filter( query, "sequence", request.params, "sequence", PeptideRun, "sequence_rule", True, set=False ) query = create_filter( query, "patient_id", request.params, "patient_id", Source, "patient_id_rule", True, set=False ) query = create_filter( query, "source_id", request.params, "source_id", Source, "source_id_rule", True, set=False ) query = create_filter( query, "run_name", request.params, "filename", MsRun, "run_name_rule", True, set=False ) query = create_filter(query, "organ", request.params, "organ", Source, "organ_rule", False, set=False) query = create_filter( query, "histology", request.params, "histology", Source, "histology_rule", False, set=False ) query = create_filter(query, "dignity", request.params, "dignity", Source, "dignity_rule", False, set=False) query = create_filter( query, "hla_typing", request.params, "hla_string", HlaType, "hla_typing_rule", False, set=False ) # TODO: check if it works withou fk, # fk=HlaLookup.fk_hla_typess) query = create_filter(query, "digits", request.params, "digits", HlaType, None, False, set=False) query = create_filter( query, "protein", request.params, "name", Protein, "protein_rule", False, set=False, fk=PeptideRun.protein_proteins, ) query = create_filter(query, "length_1", request.params, "length", PeptideRun, ">", False, set=False) query = create_filter(query, "length_2", request.params, "length", PeptideRun, "<", False, set=False) query = create_filter( query, "antibody", request.params, "antibody_set", MsRun, "antibody_rule", False, set=True ) query = create_filter( query, "celltype", request.params, "celltype", Source, "celltype_rule", False, set=False ) query = create_filter( query, "treatment", request.params, "treatment", Source, "treatment_rule", False, set=False ) query = query.group_by(PeptideRun.peptide_run_id) your_json = json.dumps(query.all()) grouping = "run" except DBAPIError: return Response(conn_err_msg, content_type="text/plain", status_int=500) # source without PSM group by elif request.params["grouping"] == "source": try: query = DBSession.query( PeptideRun.peptide_run_id, PeptideRun.sequence, func.min(PeptideRun.minRT).label("minRT"), func.max(PeptideRun.maxRT).label("maxRT"), func.min(PeptideRun.minScore).label("minScore"), func.max(PeptideRun.maxScore).label("maxScore"), func.min(PeptideRun.minE).label("minE"), func.max(PeptideRun.maxE).label("maxE"), func.min(PeptideRun.minQ).label("minQ"), func.max(PeptideRun.maxQ).label("maxQ"), func.group_concat(HlaType.hla_string.distinct().op("order by")(HlaType.hla_string)).label("hla_typing"), func.group_concat(Protein.name.distinct().op("order by")(Protein.name)).label("protein"), Source.histology, Source.patient_id, Source.source_id, ) query = query.join(Source) query = query.join(MsRun, PeptideRun.ms_run_ms_run_id == MsRun.ms_run_id) query = query.join(t_hla_map) query = query.join(HlaType) query = query.join(t_peptide_protein_map) query = query.join(Protein) # filter query = create_filter( query, "sequence", request.params, "sequence", PeptideRun, "sequence_rule", True, set=False ) query = create_filter( query, "patient_id", request.params, "patient_id", Source, "patient_id_rule", True, set=False ) query = create_filter( query, "source_id", request.params, "source_id", Source, "source_id_rule", True, set=False ) query = create_filter( query, "run_name", request.params, "filename", MsRun, "run_name_rule", True, set=False ) query = create_filter(query, "organ", request.params, "organ", Source, "organ_rule", False, set=False) query = create_filter( query, "histology", request.params, "histology", Source, "histology_rule", False, set=False ) query = create_filter(query, "dignity", request.params, "dignity", Source, "dignity_rule", False, set=False) query = create_filter( query, "hla_typing", request.params, "hla_string", HlaType, "hla_typing_rule", False, set=False ) # TODO: check if it works withou fk, # fk=HlaLookup.fk_hla_typess) query = create_filter(query, "digits", request.params, "digits", HlaType, None, False, set=False) query = create_filter( query, "protein", request.params, "name", Protein, "protein_rule", False, set=False, fk=PeptideRun.protein_proteins, ) query = create_filter(query, "length_1", request.params, "length", PeptideRun, ">", False, set=False) query = create_filter(query, "length_2", request.params, "length", PeptideRun, "<", False, set=False) query = create_filter( query, "antibody", request.params, "antibody_set", MsRun, "antibody_rule", False, set=True ) query = create_filter( query, "celltype", request.params, "celltype", Source, "celltype_rule", False, set=False ) query = create_filter( query, "treatment", request.params, "treatment", Source, "treatment_rule", False, set=False ) query = query.group_by(Source.source_id, PeptideRun.sequence) your_json = json.dumps(query.all()) grouping = "source" except DBAPIError: return Response(conn_err_msg, content_type="text/plain", status_int=500) # source with PSM group by elif request.params["grouping"] == "source_psm": try: query = DBSession.query( SpectrumHit.sequence, func.min(SpectrumHit.ionscore).label("minScore"), func.max(SpectrumHit.ionscore).label("maxScore"), func.min(SpectrumHit.e_value).label("minE"), func.max(SpectrumHit.e_value).label("maxE"), func.min(SpectrumHit.q_value).label("minQ"), func.max(SpectrumHit.q_value).label("maxQ"), func.count(SpectrumHit.spectrum_hit_id.distinct()).label("PSM"), func.group_concat(HlaType.hla_string.distinct().op("order by")(HlaType.hla_string)).label("hla_typing"), func.group_concat(Protein.name.distinct().op("order by")(Protein.name)).label("protein"), Source.histology, Source.patient_id, Source.source_id, ) query = query.join(Source) query = query.join(MsRun, SpectrumHit.ms_run_ms_run_id == MsRun.ms_run_id) query = query.join(t_hla_map) query = query.join(HlaType) query = query.join(t_spectrum_protein_map) query = query.join(Protein) # filter query = create_filter( query, "sequence", request.params, "sequence", SpectrumHit, "sequence_rule", True, set=False ) query = create_filter( query, "patient_id", request.params, "patient_id", Source, "patient_id_rule", True, set=False ) query = create_filter( query, "source_id", request.params, "source_id", Source, "source_id_rule", True, set=False ) query = create_filter( query, "run_name", request.params, "filename", MsRun, "run_name_rule", True, set=False ) query = create_filter(query, "organ", request.params, "organ", Source, "organ_rule", False, set=False) query = create_filter( query, "histology", request.params, "histology", Source, "histology_rule", False, set=False ) query = create_filter(query, "dignity", request.params, "dignity", Source, "dignity_rule", False, set=False) query = create_filter( query, "hla_typing", request.params, "hla_string", HlaType, "hla_typing_rule", False, set=False ) # TODO: check if it works withou fk, # fk=HlaLookup.fk_hla_typess) query = create_filter(query, "digits", request.params, "digits", HlaType, None, False, set=False) query = create_filter( query, "protein", request.params, "name", Protein, "protein_rule", False, set=False, fk=SpectrumHit.protein_proteins, ) query = create_filter(query, "length_1", request.params, "length", SpectrumHit, ">", False, set=False) query = create_filter(query, "length_2", request.params, "length", SpectrumHit, "<", False, set=True) query = create_filter( query, "celltype", request.params, "celltype", Source, "celltype_rule", False, set=False ) query = create_filter( query, "treatment", request.params, "treatment", Source, "treatment_rule", False, set=False ) query = query.group_by(Source.source_id, SpectrumHit.sequence) your_json = json.dumps(query.all()) grouping = "source_psm" except DBAPIError: return Response(conn_err_msg, content_type="text/plain", status_int=500) # Group by protein elif request.params["grouping"] == "protein": # TODO: a whole protein query from kidney take 8 min... try: query = DBSession.query( func.group_concat(PeptideRun.sequence.distinct().op("order by")(PeptideRun.sequence)).label("peptide"), Protein.name.label("protein"), func.group_concat(Source.patient_id.distinct().op("order by")(Source.patient_id)).label("patient_id"), func.group_concat(Source.source_id.distinct().op("order by")(Source.source_id)).label("source_id"), func.group_concat(Source.dignity.distinct().op("order by")(Source.dignity)).label("dignity"), ) query = query.join(Source) query = query.join(MsRun, PeptideRun.ms_run_ms_run_id == MsRun.ms_run_id) query = query.join(t_hla_map) query = query.join(HlaType) query = query.join(t_peptide_protein_map) query = query.join(Protein) # filter query = create_filter( query, "sequence", request.params, "sequence", PeptideRun, "sequence_rule", True, set=False ) query = create_filter( query, "patient_id", request.params, "patient_id", Source, "patient_id_rule", True, set=False ) query = create_filter( query, "source_id", request.params, "source_id", Source, "source_id_rule", True, set=False ) query = create_filter( query, "run_name", request.params, "filename", MsRun, "run_name_rule", True, set=False ) query = create_filter(query, "organ", request.params, "organ", Source, "organ_rule", False, set=False) query = create_filter( query, "histology", request.params, "histology", Source, "histology_rule", False, set=False ) query = create_filter(query, "dignity", request.params, "dignity", Source, "dignity_rule", False, set=False) query = create_filter( query, "hla_typing", request.params, "hla_string", HlaType, "hla_typing_rule", False, set=False ) # TODO: check if it works withou fk, # fk=HlaLookup.fk_hla_typess) query = create_filter(query, "digits", request.params, "digits", HlaType, None, False, set=False) query = create_filter( query, "protein", request.params, "name", Protein, "protein_rule", False, set=False, fk=PeptideRun.protein_proteins, ) query = create_filter(query, "length_1", request.params, "length", PeptideRun, ">", False, set=False) query = create_filter(query, "length_2", request.params, "length", PeptideRun, "<", False, set=False) query = create_filter( query, "antibody", request.params, "antibody_set", MsRun, "antibody_rule", False, set=True ) query = create_filter( query, "celltype", request.params, "celltype", Source, "celltype_rule", False, set=False ) query = create_filter( query, "treatment", request.params, "treatment", Source, "treatment_rule", False, set=False ) query = query.group_by(Protein) your_json = json.dumps(query.all()) grouping = "protein" except DBAPIError: return Response(conn_err_msg, content_type="text/plain", status_int=500) return {"project": your_json, "grouping": grouping}
print db_session.query(exists().where(User.id == 3).label('exist_id')).scalar() print db_session.query((db_session.query(User.id).filter(User.id == 3).exists().label('exist_id'))).scalar() print db_session.query(func.count('*').label('count_id')).select_from(User).scalar() print db_session.query(func.now().label('time_now')).scalar() print db_session.query(User).filter(User.id == 3).scalar() print db_session.execute(insert(User).from_select((User.user_name, User.password), db_session.query(User.user_name, User.password).filter(User.id == 3))) db_session.commit() print db_session.execute(insert(User).from_select((User.user_name, User.password), db_session.query('"xiaoqigui01"', User.password).filter( User.id == 3))) db_session.commit() res_users = db_session.query(User.user_name, func.group_concat(User.user_name.op('SEPARATOR')(';'))).group_by( User.user_name).all() print res_users user_table = aliased(User, name='user_table') print db_session.query(exists().where(user_table.id == 3).label('exist_id')).scalar() print "~~~~~~~~~~~~~~~~~~~~~~~~~" sub_query = db_session.query(User.user_name.label('user_name'), User.id.label('user_id')).filter(User.id == 3).subquery( 'sub') # columns¸úcÒ»Ñù print db_session.query(User.user_name.label('user_name')).filter(User.id == sub_query.columns.user_id).all() print "~~~~~~~~~~~~~~~~~~~~~~~~~"
def source_id_page(request): try: # Catch if there are no peptides!! query = DBSession.query(func.count(SpectrumHit.spectrum_hit_id).label("count_hits"), func.count(SpectrumHit.sequence.distinct()).label("count_pep"), func.count(Protein.name.distinct()).label("count_prot") ) query = query.join(Source) query = query.join(t_spectrum_protein_map) query = query.join(Protein) query = query.filter(Source.source_id == request.matchdict["source_id"]) statistics = json.dumps(query.all()) query = DBSession.query(Source.source_id, func.group_concat( (Source.histology.distinct().op('order by')(Source.histology))).label( 'histology') , func.group_concat( (Source.patient_id.distinct().op('order by')(Source.patient_id))).label( 'patient_id') , func.group_concat( (Source.organ.distinct().op('order by')(Source.organ))).label( 'organ') , func.group_concat( (Source.comment.distinct().op('order by')(Source.comment))).label( 'comment') , func.group_concat( (Source.dignity.distinct().op('order by')(Source.dignity))).label( 'dignity') , func.group_concat( (Source.celltype.distinct().op('order by')(Source.celltype))).label( 'celltype') , func.group_concat( (Source.location.distinct().op('order by')(Source.location))).label( 'location') , func.group_concat( (Source.metastatis.distinct().op('order by')(Source.metastatis))).label( 'metastatis') , func.group_concat( (Source.person.distinct().op('order by')(Source.person))).label( 'person') , func.group_concat( (Source.organism.distinct().op('order by')(Source.organism))).label( 'organism') , func.group_concat( (HlaType.hla_string.distinct().op('order by')(HlaType.hla_string))).label( 'hla_typing') ) query = query.join(t_hla_map) query = query.join(HlaType) query = query.filter(Source.source_id == request.matchdict["source_id"]) query = query.group_by(Source.source_id) metadata = json.dumps(query.all()) query = DBSession.query(MsRun.ms_run_id, MsRun.filename).join(Source).filter( Source.source_id == request.matchdict["source_id"]) runs = json.dumps(query.all()) except: return Response(conn_err_msg, content_type='text/plain', status_int=500) return {"statistic": statistics, "metadata": metadata, "runs": runs, "source": request.matchdict["source_id"]}
def get_articles(start=None, page_size=config.PAGE_SIZE, with_body=True, with_links=False, released=False, render=True, tag=None, tag_list=False): """Return a list of articles.""" by_tag = True if isinstance(tag, str) else False # Generate the correct list of columns cols = [articles.c.id, articles.c.released, articles.c.title_path, articles.c.title, articles.c.date] if with_body: cols.append(articles.c.body) if with_links: cols.append(articles.c.title_link) cols.append(articles.c.title_alt) if tag_list: cols.append( func.ifnull( func.group_concat(tags.c.tag, ", "), "" ).label('tag_list') ) # Build the statement stmt = select(cols, offset=start, limit=page_size).where( articles.c.released == released if released is not None else "" ).order_by( articles.c.date.desc() ) # Join the tag map and tag table if either: # - we want to return tags # - we are returning all articles with a certain tag if by_tag or tag_list: stmt = stmt.select_from( articles.outerjoin( tag_map, articles.c.id == tag_map.c.article_id ).outerjoin( tags, tag_map.c.tag_id == tags.c.id ) ).group_by(articles.c.id) # Limit articles by tag if by_tag: stmt = stmt.where( articles.c.id.in_( select([tag_map.c.article_id]).select_from( tag_map.outerjoin( tags, tag_map.c.tag_id == tags.c.id ) ).where( tags.c.tag == tag ) ) ) # Execute the statement article_list = [] conn = engine.connect() for row in conn.execute(stmt): article = article_from_row(row, render=render) article_list.append(article) conn.close() return article_list
'shipping_cost': func.sum(i.shipping_cost), 'shipping': func.sum(oi.shipping / er.rate), 'tax': func.sum(oi.tax / er.rate), 'units': func.sum(oi.qty)} for f in FACTS: FACTS[f] = FACTS[f].label(f) DIMENSIONS = {'activity': act.name, 'activity_code': act.code, 'activity_date': wi.last_updated, 'canceller': cu.username, 'cancel_date': o.cancel_date, 'cancel_reason': can.comments, 'charge_date': pay.payment_date, 'client': sc.name, 'comments': select([func.group_concat(oc.txt)]) .where(o.order_id == oc.order_id) .correlate(o.__table__), 'cover_color': cc.name, 'cover_material': cm.name, 'currency': cur.code, 'customer_name': func.concat_ws(' ', cus.first_name, cus.last_name), 'destination': func.concat_ws(', ', a.city, a.state, a.country), 'email': e.email, 'error_date': err.error_date, 'error_reason': err.error_reason, 'foid': oi.order_item_id, 'gross': oi.gross, 'last_update': func.date(wi.last_updated), 'location': lv.location,