def get_cash_flows(): date_range_filter_schema = DateRangeFilterSchema().load(request.args) if date_range_filter_schema.errors: return {'errors': date_range_filter_schema.errors}, 400 cash_flow_schema = CashFlowSchema() amounts = db.session.query( func.sum(Record.amount).label("cash_flow"), func.sum( case([(Record.record_type == Record.RECORD_TYPE_INCOME, Record.amount)], else_=0) ).label('income'), func.sum( case([(Record.record_type == Record.RECORD_TYPE_EXPENSE, Record.amount)], else_=0) ).label('expense'), func.date_trunc('month', Record.date).label("date"), ).group_by( func.date_trunc('month', Record.date) ).order_by( func.date_trunc('month', Record.date) ) if 'date_from' in date_range_filter_schema.data: amounts = amounts.filter(Record.date >= date_range_filter_schema.data['date_from']) if 'date_to' in date_range_filter_schema.data: amounts = amounts.filter(Record.date < date_range_filter_schema.data['date_to']) return {'objects': cash_flow_schema.dump(amounts, many=True).data}
def milestone_list(): fmt = get_format(request) if fmt == 'html': return send_file(root_path('static', 'index.html'), mimetype='text/html') user = get_user(request) session = db.session q = session.query( Ticket.milestone, func.SUM(1).label("total"), func.SUM(case([(u'closed', 1)], Ticket.status, 0)).label("closed"), func.SUM(case([(u'closed', 0)], Ticket.status, 1)).label("open"), ).group_by(Ticket.milestone).subquery() rows = session.query(Milestone, q.c.total, q.c.closed, q.c.open).\ filter(Milestone.completed == 0).\ join((q, Milestone.name == q.c.milestone)).\ order_by(Milestone.due == 0, Milestone.due, func.UPPER(Milestone.name)).\ all() if fmt == 'json': return jsonify({ 'template': 'milestone_list', 'milestones': [orm_dict(r.Milestone, total=r.total, closed=r.closed, open=r.open) for r in rows], 'user': user, 'title': 'Roadmap', }) abort(404)
def get_data_by_date(self, page=0, page_size=DEFAULT_PAGE_SIZE, datestr=None): session = self.db_helper.Session() try: news = session.query(News).filter( News.news_id == self.news_id).first() comment_query = session.query(Comments).filter( Comments.news_id == self.news_id).filter( Comments.comment_time == Helper.get_date( datestr)).order_by(Comments.comment_id.desc()) comments = self.db_helper.query(comment_query, page=page, page_size=page_size) sentiment_nums = session.query( Comments.comment_time, func.sum( sql.case([(sql.column('sentiment') >= self.DEFAULT_POSITIVE_THRESHOLD, 1)], else_=0)).label('positive'), func.sum( sql.case([(sql.column('sentiment') < self.DEFAULT_POSITIVE_THRESHOLD, 1)], else_=0)).label('negative') ).filter(Comments.news_id == self.news_id).filter( Comments.comment_time == Helper.get_date(datestr)).group_by( Comments.comment_time) total_comments = int( session.query(func.count('*').label('total')). filter(Comments.news_id == self.news_id).filter( Comments.comment_time == Helper.get_date(datestr))[0][0]) session.close() pages = int(total_comments / page_size) if total_comments % page_size == 0 else int( total_comments / page_size) + 1 return { 'news': news.to_dict(), 'comments': [comment.to_dict() for comment in comments], 'date': sentiment_nums[0][0].strftime("%Y-%m-%d"), 'positive': int(sentiment_nums[0][1]), 'negative': int(sentiment_nums[0][2]), 'total': int(sentiment_nums[0][1]) + int(sentiment_nums[0][2]), 'pages': pages } except Exception as ex: self.logger.error("Exception occurred when getting data by date. ", ex) return { 'news': {}, 'comments': [], 'date': '', 'positive': 0, 'negative': 0, 'total': 0 } finally: session.close()
def execute(self, request, user, name): alliance = Alliance.load(name) if alliance is None: return HttpResponseRedirect(reverse("alliance_ranks")) ph = aliased(PlanetHistory) members = count().label("members") size = sum(ph.size).label("size") value = sum(ph.value).label("value") score = sum(ph.score).label("score") avg_size = size.op("/")(members).label("avg_size") avg_value = value.op("/")(members).label("avg_value") t10v = count(case(whens=((ph.value_rank <= 10 ,1),), else_=None)).label("t10v") t100v = count(case(whens=((ph.value_rank <= 100 ,1),), else_=None)).label("t100v") pho = aliased(PlanetHistory) sizeo = sum(pho.size).label("sizeo") valueo = sum(pho.value).label("valueo") scoreo = sum(pho.score).label("scoreo") Q = session.query(PlanetHistory.tick.label("tick"), Alliance.id.label("id"), literal_column("rank() OVER (PARTITION BY planet_history.tick ORDER BY sum(planet_history.size) DESC)").label("size_rank"), literal_column("rank() OVER (PARTITION BY planet_history.tick ORDER BY sum(planet_history.value) DESC)").label("value_rank"), ) Q = Q.filter(PlanetHistory.active == True) Q = Q.join(PlanetHistory.current) Q = Q.join(Planet.intel) Q = Q.join(Intel.alliance) Q = Q.group_by(PlanetHistory.tick, Alliance.id) ranks = Q.subquery() Q = session.query(ph.tick, members, size, value, avg_size, avg_value, size-sizeo, value-valueo, score-scoreo, t10v, t100v, ) Q = Q.filter(ph.active == True) Q = Q.join(ph.current) Q = Q.join(Planet.intel) Q = Q.join(Intel.alliance) Q = Q.outerjoin((pho, and_(ph.id==pho.id, ph.tick-1==pho.tick),)) Q = Q.filter(Intel.alliance == alliance) Q = Q.group_by(ph.tick) Q = Q.from_self().add_columns(ranks.c.size_rank, ranks.c.value_rank) Q = Q.outerjoin((ranks, and_(ph.tick == ranks.c.tick, alliance.id == ranks.c.id),)) Q = Q.order_by(desc(ph.tick)) history = Q.all() return render("ialliancehistory.tpl", request, alliance=alliance, members=alliance.intel_members, history=history)
def get_balance(year, month): (_, day) = calendar.monthrange(year, month) start_date = datetime.date(year, month, 1) end_date = datetime.date(year, month, day) balance_schema = BalanceSchema() amounts = db.session.query( func.sum(Record.amount).label("cash_flow"), func.sum( case([(Record.record_type == Record.RECORD_TYPE_INCOME, Record.amount)], else_=0) ).label('income'), func.sum( case([(Record.record_type == Record.RECORD_TYPE_EXPENSE, Record.amount)], else_=0) ).label('expense'), func.date_trunc('month', Record.date).label("date"), ).filter( func.extract('year', Record.date) == year, func.extract('month', Record.date) == month, ).group_by( func.date_trunc('month', Record.date) ).first() current_balance = db.session.query( func.sum( case([(Record.date < start_date, Record.amount)], else_=0) ).label('start_balance'), func.sum(Record.amount).label("end_balance") ).filter( Record.date <= end_date ).first() if amounts: balance = balance_schema.dump({ 'cash_flow': amounts.cash_flow, 'income': amounts.income, 'expense': amounts.expense, 'date': amounts.date, 'start_balance': current_balance.start_balance, 'end_balance': current_balance.end_balance, }).data else: balance = balance_schema.dump({ 'cash_flow': 0, 'income': 0, 'expense': 0, 'date': end_date, 'start_balance': current_balance.start_balance, 'end_balance': current_balance.end_balance, }).data return balance
def upgrade(): op.add_column('request', sa.Column('payout', sa.Numeric(precision=15, scale=2), index=True, nullable=True)) bind = op.get_bind() absolute = select([abs_table.c.value.label('value'), mod_table.c.request_id.label('request_id')])\ .select_from(join(abs_table, mod_table, mod_table.c.id == abs_table.c.id))\ .where(mod_table.c.voided_user_id == None)\ .alias() relative = select([rel_table.c.value.label('value'), mod_table.c.request_id.label('request_id')])\ .select_from(join(rel_table, mod_table, mod_table.c.id == rel_table.c.id))\ .where(mod_table.c.voided_user_id == None)\ .alias() abs_sum = select([request.c.id.label('request_id'), request.c.base_payout.label('base_payout'), func.sum(absolute.c.value).label('sum')])\ .select_from(outerjoin(request, absolute, request.c.id == absolute.c.request_id))\ .group_by(request.c.id)\ .alias() rel_sum = select([request.c.id.label('request_id'), func.sum(relative.c.value).label('sum')])\ .select_from(outerjoin(request, relative, request.c.id == relative.c.request_id))\ .group_by(request.c.id)\ .alias() total_sum = select([abs_sum.c.request_id.label('request_id'), (( abs_sum.c.base_payout + case([(abs_sum.c.sum == None, Decimal(0))], else_=abs_sum.c.sum)) * ( 1 + case([(rel_sum.c.sum == None, Decimal(0))], else_=rel_sum.c.sum))).label('payout')])\ .select_from(join(abs_sum, rel_sum, abs_sum.c.request_id == rel_sum.c.request_id)) payouts = bind.execute(total_sum) for request_id, payout in payouts: up = update(request).where(request.c.id == request_id).values( payout=payout) bind.execute(up) op.alter_column('request', 'payout', nullable=False, existing_type=sa.Numeric(precision=15, scale=2))
def message_totals(dbsession, user): "Message totals query" query = dbsession.query(Message.date, func.count(Message.date).label('mail_total'), func.sum(case([(Message.virusinfected > 0, 1)], else_=0)).label('virus_total'), func.sum(case([(and_(Message.virusinfected == 0, Message.spam > 0), 1)], else_=0)).label('spam_total'), func.sum(Message.size).label('total_size') ).group_by(Message.date).order_by( desc(Message.date)) uquery = UserFilter(dbsession, user, query) query = uquery.filter() return query
def createView(self): # filter indexes catalog = self.env.catalog.index_catalog xmlindex_list = catalog.getIndexes(package_id='seismology', resourcetype_id='event') filter = ['datetime', 'latitude', 'longitude', 'depth', 'magnitude', 'magnitude_type', 'event_type', 'np1_strike', 'np1_dip', 'np1_rake', 'mt_mrr', 'mt_mtt', 'mt_mpp', 'mt_mrt', 'mt_mrp', 'mt_mtp', 'localisation_method'] xmlindex_list = [x for x in xmlindex_list if x.label in filter] if not xmlindex_list: return # build up query query, joins = catalog._createIndexView(xmlindex_list, compact=True) options = [ sql.literal_column("datetime.keyval").label("end_datetime"), sql.literal_column("datetime.keyval").label("start_datetime"), sql.case( value=sql.literal_column("localisation_method.keyval"), whens={'manual': 'circle'}, else_='square').label('gis_localisation_method'), sql.func.GeomFromText( sql.text("'POINT(' || longitude.keyval || ' ' || " + \ "latitude.keyval || ')', 4326")).label('geom') ] for option in options: query.append_column(option) query = query.select_from(joins) return util.compileStatement(query)
def query(cls, db, name=None, lang=None, order_by=True): name = _listify(name) lang = _listify(lang) query = db.query(cls) if len(name) == 1: query = query.filter(cls.name == name[0]) elif len(name) > 1: query = query.filter(cls.name.in_(name)) if len(lang) == 1: query = query.filter(cls.lang == lang[0]) elif len(lang) > 1: query = query.filter(cls.lang.in_(lang)) # Handle ordering if order_by: if order_by is True: if not lang: query = query.order_by(cls.lang) elif len(lang) > 1: query = query.order_by( sql.case( value=cls.lang, whens=list((item, ix) for ix, item in enumerate(lang)) ) ) if len(name) != 1: query = query.order_by(cls.name) else: # noinspection PyArgumentList query = query.order_by(*order_by) return query
def filter_by_watches(self, user): """Filter the gallery down to only things `user` is watching.""" # XXX make this work for multiple users self.query = self.query.filter(or_( # Check for by/for/of watching # XXX need an index on relationship_type, badly! model.Artwork.id.in_( self.session.query(model.UserArtwork.artwork_id) .join((model.UserWatch, model.UserArtwork.user_id == model.UserWatch.other_user_id)) .filter(model.UserWatch.user_id == user.id) .filter(case( value=model.UserArtwork.relationship_type, whens={ u'by': model.UserWatch.watch_by, u'for': model.UserWatch.watch_for, u'of': model.UserWatch.watch_of, }, )) ), # Check for upload watching model.Artwork.uploader_user_id.in_( self.session.query(model.UserWatch.other_user_id) .filter(model.UserWatch.user_id == user.id) .filter(model.UserWatch.watch_upload == True) # gross ), ))
def tree_stats(request, treedef, tree, parentid): tree_table = datamodel.get_table(tree) parentid = None if parentid == 'null' else int(parentid) node = getattr(models, tree_table.name) descendant = aliased(node) node_id = getattr(node, node._id) descendant_id = getattr(descendant, node._id) treedef_col = tree_table.name + "TreeDefID" same_tree_p = getattr(descendant, treedef_col) == int(treedef) is_descendant_p = sql.and_( sql.between(descendant.nodeNumber, node.nodeNumber, node.highestChildNodeNumber), same_tree_p) target, make_joins = getattr(StatsQuerySpecialization, tree)() target_id = getattr(target, target._id) direct_count = sql.cast( sql.func.sum(sql.case([(sql.and_(target_id != None, descendant_id == node_id), 1)], else_=0)), types.Integer) all_count = sql.func.count(target_id) with models.session_context() as session: query = session.query(node_id, direct_count, all_count) \ .join(descendant, is_descendant_p) \ .filter(node.ParentID == parentid) \ .group_by(node_id) query = make_joins(request.specify_collection, query, descendant_id) results = list(query) return HttpResponse(toJson(results), content_type='application/json')
def get_measures(self): """Find all data that should be included in the report. The data is returned as a list of tuples containing a :py:class:`Module <euphorie.client.model.Module>`, :py:class:`Risk <euphorie.client.model.Risk>` and :py:class:`ActionPlan <euphorie.client.model.ActionPlan>`. Each entry in the list will correspond to a row in the generated Excel file. This implementation differs from Euphorie in its ordering: it sorts on risk priority instead of start date. """ query = ( Session.query(model.Module, model.Risk, model.ActionPlan) .filter(sql.and_(model.Module.session == self.session, model.Module.profile_index > -1)) .filter(sql.not_(model.SKIPPED_PARENTS)) .filter(sql.or_(model.MODULE_WITH_RISK_OR_TOP5_FILTER, model.RISK_PRESENT_OR_TOP5_FILTER)) .join( ( model.Risk, sql.and_( model.Risk.path.startswith(model.Module.path), model.Risk.depth == model.Module.depth + 1, model.Risk.session == self.session, ), ) ) .join((model.ActionPlan, model.ActionPlan.risk_id == model.Risk.id)) .order_by(sql.case(value=model.Risk.priority, whens={"high": 0, "medium": 1}, else_=2), model.Risk.path) ) return query.all()
def change_weight(self, new_weight: int): """ Change the weight of the entity within it's container. """ obj = self.dbsession.query(Content).enable_eagerloads(False).\ with_lockmode('update').get(self.entity.id) (min_weight, max_weight) = sorted((new_weight, obj.weight)) # Do we move downwards or upwards ? if new_weight - obj.weight > 0: operation = operator.sub whens = {min_weight: max_weight} else: operation = operator.add whens = {max_weight: min_weight} # Select all the rows between the current weight and the new weight filters = sql.and_(Content.container_id == obj.container_id, Content.weight.between(min_weight, max_weight)) # Swap min_weight/max_weight, or increment/decrement by one depending # on whether one moves up or down new_weight = sql.case(value=Content.weight, whens=whens, else_=operation(Content.weight, 1)) try: # The UPDATE statement updated = self.dbsession.query(Content).enable_eagerloads(False).\ filter(filters).\ update({'weight': new_weight}, synchronize_session=False) self.dbsession.flush() return updated except DatabaseError: return None
def protocol(cls): return sql.case([ (cls.endpoint_sip_uuid.isnot(None), 'sip'), (cls.endpoint_sccp_id.isnot(None), 'sccp'), (cls.endpoint_custom_id.isnot(None), 'custom'), ], else_=None)
def create_mapper(rack_specs_tbl): "Mapper factory." rs = rack_specs_tbl polymorphic_select = select([ rs, (case([(rs.c.has_movable_subitems, literal(RACK_SPECS_TYPES.TUBE_RACK_SPECS))], else_=literal(RACK_SPECS_TYPES.PLATE_SPECS))).label( 'rackspecs_type') ], ).alias('rackspecs') m = mapper(RackSpecs, polymorphic_select, id_attribute='rack_specs_id', slug_expression=lambda cls: as_slug_expression(cls.name), properties=dict( manufacturer=relationship(Organization), shape=relationship(RackShape, uselist=False, back_populates='specs'), rack_specs_type= column_property(polymorphic_select.c.rackspecs_type), ), polymorphic_on=polymorphic_select.c.rackspecs_type, polymorphic_identity=RACK_SPECS_TYPES.RACK_SPECS, ) RackSpecs.has_tubes = synonym('has_movable_subitems') return m
def status(cls): return case( [ (and_(cls.shipped_on.is_(None), cls.ship_method.like("%/%/%")), "Cancelled"), (cls.shipped_on.isnot(None), "Shipped"), ], else_="Open", )
def is_active(cls): expr = sa_sql.and_( ~cls.is_disabled_by_date, cls.is_verified == sa.true(), cls.is_enabled == sa.true(), ) # need to wrap the expression in a case to work with MSSQL return sa_sql.case([(expr, sa.true())], else_=sa.false())
def _sql_rank_over(rank_func, col, partition): # partitioning ensures aggregates that use total length are correct # e.g. percent rank, cume_dist and friends over_clause = RankOver(rank_func(), order_by=col, partition_by=col.isnot(None) if partition else None) return sql.case({col.isnot(None): over_clause})
def __call__(self, column_clause, cuboid=None): if cuboid and cuboid.fact_count_column is not None: count = func.sum(cuboid.fact_count_column) return case([(count == 0, 0)], else_=( func.sum(column_clause * cuboid.fact_count_column) / cast(count, types.Numeric))) return func.avg(column_clause)
def caller_id_name(cls): regex = '"([^"]+)"\\s+' return sql.case( [(cls.endpoint_sip_uuid.isnot(None), cls._sip_query_option('callerid', regex_filter=regex)), (cls.endpoint_sccp_id.isnot(None), cls._sccp_query_option('cid_name'))], else_=None)
def prevp(cls): sess1 = Session() cls1 = aliased(cls) cls2 = aliased(cls) res = case([ (cls.pi != 0, sess1.query(cls2).join(cls, cls.ab_id==cls2.ab_id).filter(cls2.pi == cls.pi - 1).first()), ], else_ = None) if res is not None: sess1.expunge(res)
def caller_id_num(cls): regex = '<([0-9A-Z]+)?>' return sql.case([ (cls.endpoint_sip_uuid.isnot(None), cls._sip_query_option('callerid', regex_filter=regex)), (cls.endpoint_sccp_id.isnot(None), cls._sccp_query_option('cid_num')), ])
def license_approved_names(): lic = db.licenses.alias() return (select([ lic.c.license_id, case( [[lic.c.is_spdx_official == True, lic.c.short_name]], else_='LicenseRef-' + lic.c.short_name ).label('short_name') ]) )
def has_permission(cls, principal, permission): # This will need to emit some SQL that iterates through a # sequence of ACEs, looking for allows or denies. Fake some # simple case for now. from sqlalchemy.sql import case return case( [ (cls.id == 2, True), ], else_=False)
def test_string_literals_not_oldstyle_quoted_in_func(): s = str( sql.case( [(sql.column("WHOA") == "Jason's", 1)], else_=0 ).compile( compile_kwargs={"literal_binds": True}, dialect=bq.BQDialect() ) ) assert "Jason's" not in s
def last_observation(self): session = object_session(self) q = session.query(ObservedMac) q = q.filter_by(mac_address=self.mac) # Group the results into 'any port number but zero' and 'port 0'. # This prioritizes any port over the uplink port. # Saying that port 0 is an uplink port isn't very elegant, also # with real port names it's not even true. q = q.order_by(desc(case([(ObservedMac.port == "0", 0)], else_=1))) q = q.order_by(desc(ObservedMac.last_seen)) return q.first()
def accumulator(self, column_name, new_row, agg_row, old_row=None): new_count = new_row.count new_total = new_row.c[column_name] * new_row.count if old_row is not None: new_count = new_count - old_row.count new_total = (new_total - (old_row.c[column_name] * old_row.count)) agg_count = func.coalesce(agg_row.count, 0) agg_value = func.coalesce(agg_row.c[column_name]) * agg_count total_count = new_count + agg_count return case([(total_count == 0, 0)], else_=(agg_value + new_total) / total_count)
def __init__(self, dbsession, user): self.dbsession = dbsession self.user = user self.query = self.dbsession.query( func.count(Message.id).label('total'), func.sum(case([(and_(Message.virusinfected == 0, Message.nameinfected == 0, Message.otherinfected == 0, Message.spam == 0, Message.highspam == 0), 1)], else_=0)).label('clean'), func.sum(case([(Message.virusinfected > 0, 1)], else_=0)).label('virii'), func.sum(case([(and_(Message.highspam == 0, Message.spam == 0, Message.virusinfected == 0, or_(Message.nameinfected > 0, Message.otherinfected > 0)), 1)], else_=0)).label('infected'), func.sum(case([(and_(Message.virusinfected == 0, Message.otherinfected == 0, Message.nameinfected == 0, or_(Message.spam > 0, Message.highspam > 0)), 1)], else_=0)).label('spam'), func.sum(case([(and_(Message.virusinfected == 0, Message.otherinfected == 0, Message.nameinfected == 0, Message.spam > 0, Message.highspam == 0), 1)], else_=0)).label('lowspam'), func.sum(case([(and_(Message.virusinfected == 0, Message.otherinfected == 0, Message.nameinfected == 0, Message.highspam > 0), 1)], else_=0)).label('highspam'))\ .filter(Message.timestamp.between( ustartday(self.user.timezone), uendday(self.user.timezone)))
def __init__(self, dbsession, user): self.dbsession = dbsession self.user = user self.query = self.dbsession.query( func.count(Message.id).label('total'), func.sum(case([(and_(Message.virusinfected == 0, Message.nameinfected == 0, Message.otherinfected == 0, Message.spam == 0, Message.highspam == 0), 1)], else_=0)).label('clean'), func.sum(case([(Message.virusinfected > 0, 1)], else_=0)).label('virii'), func.sum(case([(and_(Message.highspam == 0, Message.spam == 0, Message.virusinfected == 0, or_(Message.nameinfected > 0, Message.otherinfected > 0)), 1)], else_=0)).label('infected'), func.sum(case([(and_(Message.virusinfected == 0, Message.otherinfected == 0, Message.nameinfected == 0, or_(Message.spam > 0, Message.highspam > 0)), 1)], else_=0)).label('spam'), func.sum(case([(and_(Message.virusinfected == 0, Message.otherinfected == 0, Message.nameinfected == 0, Message.spam > 0, Message.highspam == 0), 1)], else_=0)).label('lowspam'), func.sum(case([(and_(Message.virusinfected == 0, Message.otherinfected == 0, Message.nameinfected == 0, Message.highspam > 0), 1)], else_=0)).label('highspam'))\ .filter(Message.date == now().date())
def get_measures(self): """Find all data that should be included in the report. The data is returned as a list of tuples containing a :py:class:`Module <euphorie.client.model.Module>`, :py:class:`Risk <euphorie.client.model.Risk>` and :py:class:`ActionPlan <euphorie.client.model.ActionPlan>`. Each entry in the list will correspond to a row in the generated Excel file. This implementation differs from Euphorie in its ordering: it sorts on risk priority instead of start date. """ query = (Session.query( model.Module, model.Risk, model.ActionPlan).select_from(model.Module).filter( sql.and_( model.Module.session == self.session, model.Module.profile_index > -1, )).filter(sql.not_(model.SKIPPED_PARENTS)).filter( sql.or_( model.MODULE_WITH_RISK_OR_TOP5_FILTER, model.RISK_PRESENT_OR_TOP5_FILTER, )).join(model.Risk, model.Risk.parent_id == model.Module.id). join(( model.ActionPlan, sql.and_( model.ActionPlan.risk_id == model.Risk.id, sql.or_( model.ActionPlan.plan_type == "measure_standard", model.ActionPlan.plan_type == "measure_custom", ), ), )).order_by( sql.case(value=model.Risk.priority, whens={ "high": 0, "medium": 1 }, else_=2), model.Risk.path, )) return [ t for t in query.all() if ((t[-1].planning_start is not None or t[-1].planning_end is not None or t[-1].responsible is not None or t[-1].requirements is not None or t[-1].budget is not None or t[-1].action is not None) and ( t[1].identification == "no" or t[1].risk_type == "top5")) ]
def index(self): s = select( [ t_users.c.id, t_users.c.nick_name, func.sum(case([(t_avatar_guesses.c.guessed == True, 1)], else_=0)), func.count(t_avatar_guesses.c.guessed), ], from_obj=[t_avatar_guesses.join(t_users)], ).group_by(t_users.c.id, t_users.c.nick_name) c.players = Session.execute(s) c.title = "igrice" return render("/games/index.mako")
def creators(): cre = db.creators.alias() cty = db.creator_types.alias() return (select([ cre.c.creator_id, case( [[cty.c.name == 'Tool', cty.c.name + ': ' + cre.c.name ]], else_=cty.c.name + ': ' + cre.c.name + ' (' + cre.c.email + ')' ).label('creator_text') ]) .select_from( cre .join(cty, cre.c.creator_type_id == cty.c.creator_type_id) ) )
def create_mapper(container_specs_tbl): "Mapper factory." cs = container_specs_tbl.alias('cs') polymorphic_select = select([ cs, (case([(cs.c.has_barcode, literal(CONTAINER_SPECS_TYPES.TUBE))], else_=literal(CONTAINER_SPECS_TYPES.WELL))).label( 'containerspecs_type') ], ).alias('containerspecs') m = mapper(ContainerSpecs, polymorphic_select, id_attribute='container_specs_id', slug_expression=lambda cls: as_slug_expression(cls.name), properties=dict(manufacturer=relationship(Organization), ), polymorphic_on=polymorphic_select.c.containerspecs_type, polymorphic_identity=CONTAINER_SPECS_TYPES.CONTAINER, ) return m
def handle_tree_field(self, node, table, tree_rank, tree_field): query = self assert query.collection is not None # Not sure it makes sense to query across collections logger.info('handling treefield %s rank: %s field: %s', table, tree_rank, tree_field) treedefitem_column = table.name + 'TreeDefItemID' if (table, 'TreeRanks') in query.join_cache: logger.debug("using join cache for %r tree ranks.", table) ancestors, treedef = query.join_cache[(table, 'TreeRanks')] else: treedef = get_treedef(query.collection, table.name) rank_count = treedef.treedefitems.count() ancestors = [node] for i in range(rank_count - 1): ancestor = orm.aliased(node) query = query.outerjoin( ancestor, ancestors[-1].ParentID == getattr(ancestor, ancestor._id)) ancestors.append(ancestor) logger.debug("adding to join cache for %r tree ranks.", table) query = query._replace(join_cache=query.join_cache.copy()) query.join_cache[(table, 'TreeRanks')] = (ancestors, treedef) query = query._replace(param_count=self.param_count + 1) treedefitem_param = sql.bindparam( 'tdi_%s' % query.param_count, value=treedef.treedefitems.get(name=tree_rank).id) column_name = 'name' if tree_field is None else \ node._id if tree_field == 'ID' else \ table.get_field(tree_field.lower()).name column = sql.case([(getattr(ancestor, treedefitem_column) == treedefitem_param, getattr(ancestor, column_name)) for ancestor in ancestors]) return query, column
def _list_user_arguments(session, user_ids): query = session.query( UserFeatures.id.label('user_id'), sql.func.string_agg( sql.case([(LineFeatures.endpoint_sip_uuid.isnot(None), literal_column("'PJSIP/'") + EndpointSIP.name), (LineFeatures.endpoint_sccp_id.isnot(None), literal_column("'SCCP/'") + SCCPLine.name), (LineFeatures.endpoint_custom_id.isnot(None), UserCustom.interface)]), literal_column("'&'")).label('argument'), ).join(UserLine.userfeatures, ).join(UserLine.linefeatures, ).outerjoin( EndpointSIP, ).outerjoin(SCCPLine, ).outerjoin(UserCustom, ).filter( and_( UserFeatures.id.in_(user_ids), UserLine.main_user.is_(True), LineFeatures.commented == 0, )).group_by(UserFeatures.id) return {row.user_id: row.argument for row in query}
def get_measures(self): """Find all data that should be included in the report. The data is returned as a list of tuples containing a :py:class:`Module <euphorie.client.model.Module>`, :py:class:`Risk <euphorie.client.model.Risk>` and :py:class:`ActionPlan <euphorie.client.model.ActionPlan>`. Each entry in the list will correspond to a row in the generated Excel file. This implementation differs from Euphorie in its ordering: it sorts on risk priority instead of start date. """ query = Session.query(model.Module, model.Risk, model.ActionPlan)\ .filter(sql.and_(model.Module.session == self.session, model.Module.profile_index > -1))\ .filter(sql.not_(model.SKIPPED_PARENTS))\ .filter(sql.or_(model.MODULE_WITH_RISK_OR_TOP5_FILTER, model.RISK_PRESENT_OR_TOP5_FILTER))\ .join((model.Risk, sql.and_(model.Risk.path.startswith(model.Module.path), model.Risk.depth == model.Module.depth+1, model.Risk.session == self.session)))\ .join((model.ActionPlan, model.ActionPlan.risk_id == model.Risk.id))\ .order_by( sql.case( value=model.Risk.priority, whens={'high': 0, 'medium': 1}, else_=2), model.Risk.path) return [t for t in query.all() if (( t[-1].planning_start is not None or t[-1].planning_end is not None or t[-1].responsible is not None or t[-1].prevention_plan is not None or t[-1].requirements is not None or t[-1].budget is not None or t[-1].action_plan is not None) and (t[1].identification == 'no' or t[1].risk_type == 'top5') )]
def create_mapper(container_mapper, tube_tbl): "Mapper factory." m = mapper(Tube, tube_tbl, inherits=container_mapper, slug_expression=lambda cls: case([(cls.barcode == None, literal('no-barcode-') + cast(cls.id, String))], else_=cls.barcode), properties= dict(location=relationship( TubeLocation, uselist=False, back_populates='container', cascade='all,delete,delete-orphan', single_parent=True ), # sample=relationship(StockSample, uselist=False, # back_populates='container', # ), ), polymorphic_identity=CONTAINER_TYPES.TUBE) return m
def get_measures(self): """Find all data that should be included in the report. The data is returned as a list of tuples containing a :py:class:`Module <euphorie.client.model.Module>`, :py:class:`Risk <euphorie.client.model.Risk>` and :py:class:`ActionPlan <euphorie.client.model.ActionPlan>`. Each entry in the list will correspond to a row in the generated Excel file. This implementation differs from Euphorie in its ordering: it sorts on risk priority instead of start date. """ query = Session.query(model.Module, model.Risk, model.ActionPlan)\ .filter(sql.and_(model.Module.session == self.session, model.Module.profile_index > -1))\ .filter(sql.not_(model.SKIPPED_PARENTS))\ .filter(sql.or_(model.MODULE_WITH_RISK_OR_TOP5_FILTER, model.RISK_PRESENT_OR_TOP5_FILTER))\ .join((model.Risk, sql.and_(model.Risk.path.startswith(model.Module.path), model.Risk.depth == model.Module.depth+1, model.Risk.session == self.session)))\ .join((model.ActionPlan, model.ActionPlan.risk_id == model.Risk.id))\ .order_by( sql.case( value=model.Risk.priority, whens={'high': 0, 'medium': 1}, else_=2), model.Risk.path) return [ t for t in query.all() if ((t[-1].planning_start is not None or t[-1].planning_end is not None or t[-1].responsible is not None or t[-1]. prevention_plan is not None or t[-1].requirements is not None or t[-1].budget is not None or t[-1].action_plan is not None) and (t[1].identification == 'no' or t[1].risk_type == 'top5')) ]
def change_weight(self, new_weight: int): """ Change the weight of the entity within it's container. """ obj = self.dbsession.query(Content).enable_eagerloads(False).\ with_lockmode('update').get(self.entity.id) (min_weight, max_weight) = sorted((new_weight, obj.weight)) # Do we move downwards or upwards ? if new_weight - obj.weight > 0: operation = operator.sub whens = {min_weight: max_weight} else: operation = operator.add whens = {max_weight: min_weight} # Select all the rows between the current weight and the new weight filters = sql.and_( Content.container_id == obj.container_id, Content.weight.between(min_weight, max_weight) ) # Swap min_weight/max_weight, or increment/decrement by one depending # on whether one moves up or down new_weight = sql.case( value=Content.weight, whens=whens, else_=operation(Content.weight, 1) ) try: # The UPDATE statement updated = self.dbsession.query(Content).enable_eagerloads(False).\ filter(filters).\ update({'weight': new_weight}, synchronize_session=False) self.dbsession.flush() return updated except DatabaseError: return None
def visit_case(self, clause, **kwargs): """ Adjust case clause to use explicit casts for 'THEN' expressions. """ def sanitized_cast(elem): """ Cast elements, casting NullType to string. """ if isinstance(elem.type, sqltypes.NullType): return cast(elem, sqltypes.String) else: return cast(elem, elem.type) whens_with_cast = [(when_, sanitized_cast(result)) for when_, result in clause.whens] else_with_cast = None if clause.else_ is not None: else_with_cast = sanitized_cast(clause.else_) case_with_cast = case( whens=whens_with_cast, else_=else_with_cast, value=clause.value ) return super(H2Compiler, self).visit_case(case_with_cast, **kwargs)
def _case_when(__data, cases): # TODO: will need listener to enter case statements, to handle when they use windows if isinstance(cases, Call): cases = cases(__data) whens = [] case_items = list(cases.items()) n_items = len(case_items) else_val = None for ii, (expr, val) in enumerate(case_items): # handle where val is a column expr if callable(val): val = val(__data) # handle when expressions if ii + 1 == n_items and expr is True: else_val = val elif callable(expr): whens.append((expr(__data), val)) else: whens.append((expr, val)) return sql.case(whens, else_=else_val)
def query(self): query = powa_getstatdata_sample("query") query = query.where( (column("datname") == bindparam("database")) & (column("queryid") == bindparam("query"))) query = query.alias() c = query.c total_blocks = ((c.shared_blks_read + c.shared_blks_hit) .label("total_blocks")) def bps(col): ts = extract("epoch", greatest(c.mesure_interval, '1 second')) return (mulblock(col) / ts).label(col.name) cols = [to_epoch(c.ts), c.rows, c.calls, case([(total_blocks == 0, 0)], else_=cast(c.shared_blks_hit, Numeric) * 100 / total_blocks).label("hit_ratio"), bps(c.shared_blks_read), bps(c.shared_blks_hit), bps(c.shared_blks_dirtied), bps(c.shared_blks_written), bps(c.local_blks_read), bps(c.local_blks_hit), bps(c.local_blks_dirtied), bps(c.local_blks_written), bps(c.temp_blks_read), bps(c.temp_blks_written), c.blk_read_time, c.blk_write_time, (c.runtime / greatest(c.calls, 1)).label("avg_runtime")] from_clause = query if self.has_extension("pg_stat_kcache"): # Add system metrics from pg_stat_kcache, # and detailed hit ratio. kcache_query = kcache_getstatdata_sample() kc = inner_cc(kcache_query) kcache_query = ( kcache_query .where(kc.queryid == bindparam("query")) .alias()) kc = kcache_query.c sys_hits = (greatest(mulblock(c.shared_blks_read) - kc.reads, 0) .label("kcache_hitblocks")) sys_hitratio = (cast(sys_hits, Numeric) * 100 / mulblock(total_blocks)) disk_hit_ratio = (kc.reads / mulblock(total_blocks)) total_time = greatest(c.runtime, 1); # Rusage can return values > real time due to sampling bias # aligned to kernel ticks. As such, we have to clamp values to 100% total_time_percent = lambda x: least(100, (x * 100) / total_time) cols.extend([ kc.reads, kc.writes, total_time_percent(kc.user_time * 1000).label("user_time"), total_time_percent(kc.system_time * 1000).label("system_time"), greatest(total_time_percent( c.runtime - (kc.user_time + kc.system_time) * 1000), 0).label("other_time"), case([(total_blocks == 0, 0)], else_=disk_hit_ratio).label("disk_hit_ratio"), case([(total_blocks == 0, 0)], else_=sys_hitratio).label("sys_hit_ratio")]) from_clause = from_clause.join( kcache_query, kcache_query.c.ts == c.ts) else: cols.extend([ case([(total_blocks == 0, 0)], else_=cast(c.shared_blks_read, Numeric) * 100 / total_blocks).label("miss_ratio") ]) return (select(cols) .select_from(from_clause) .where(c.calls != None) .order_by(c.ts) .params(samples=100))
def inactive(cls): return sasql.case([(sasql.or_( cls.inactive_flag == sa.true(), sasql.and_(cls.inactive_date.isnot(None), cls.inactive_date < datetime.now())), True)], else_=False).label('inactive')
def downgrade(): op.execute(musiconhold.update() .where(musiconhold.c.var_name == 'sort') .values( var_name='random', var_val=sql.case([(musiconhold.c.var_val == 'random', 'yes')], else_='no')))
def update(self): super(MeasuresOverview, self).update() lang = getattr(self.request, "LANGUAGE", "en") if "-" in lang: lang = lang.split("-")[0] if self.session is not None and self.session.title != ( callable(getattr(self.context, "Title", None)) and self.context.Title() or "" ): self.session_title = self.session.title else: self.session_title = ( callable(getattr(self.context, "Title", None)) and self.context.Title() or "" ) today = date.today() this_month = date(today.year, today.month, 1) self.label_page = translate( _(u"label_page", default=u"Page"), target_language=lang ) self.label_page_of = translate( _(u"label_page_of", default=u"of"), target_language=lang ) def get_next_month(this_month): month = this_month.month + 1 year = this_month.year if month == 13: month = 1 year = year + 1 return date(year, month, 1) next_month = get_next_month(this_month) month_after_next = get_next_month(next_month) self.months = [] self.months.append(today.strftime("%b")) self.months.append(next_month.strftime("%b")) self.months.append(month_after_next.strftime("%b")) self.monthstrings = [ translate( PloneLocalesFactory( "month_{0}_abbr".format(month.lower()), default=month, ), target_language=lang, ) for month in self.months ] query = ( Session.query(model.Module, model.Risk, model.ActionPlan) .select_from(model.Module) .filter( sql.and_( model.Module.session == self.session, model.Module.profile_index > -1, ) ) .filter(sql.not_(model.SKIPPED_PARENTS)) .filter( sql.or_( model.MODULE_WITH_RISK_OR_TOP5_FILTER, model.RISK_PRESENT_OR_TOP5_FILTER, ) ) .join(model.Risk, model.Risk.parent_id == model.Module.id) .join(model.ActionPlan, model.ActionPlan.risk_id == model.Risk.id) .order_by( sql.case( value=model.Risk.priority, whens={"high": 0, "medium": 1}, else_=2 ), model.Risk.path, ) ) measures = [ t for t in query.all() if ( ( ( t[-1].planning_start is not None and t[-1].planning_start.strftime("%b") in self.months ) or ( t[-1].planning_end is not None and t[-1].planning_end.strftime("%b") in self.months ) or ( t[-1].planning_start is not None and ( t[-1].planning_end is None or t[-1].planning_end >= month_after_next ) and t[-1].planning_start <= this_month ) ) and t[1].identification not in ("n/a", "yes") and ( t[-1].responsible is not None or t[-1].requirements is not None or t[-1].budget is not None or t[-1].action is not None ) ) ] modulesdict = defaultdict(lambda: defaultdict(list)) for module, risk, action in measures: if "custom-risks" not in risk.zodb_path: risk_obj = self.survey.restrictedTraverse(risk.zodb_path.split("/")) title = risk_obj and risk_obj.problem_description or risk.title else: title = risk.title classes = [] start_month = action.planning_start and date( action.planning_start.year, action.planning_start.month, 1 ) end_month = action.planning_end and date( action.planning_end.year, action.planning_end.month, 1 ) for m in [this_month, next_month, month_after_next]: cls = None if start_month: if start_month == m: cls = "start" if end_month: if end_month == m: if end_month == (start_month is not None and start_month): cls = "start-end" else: cls = "end" elif start_month < m and end_month > m: cls = "ongoing" elif start_month < m: cls = "ongoing" elif end_month: if end_month == m: cls = "end" elif end_month > m: cls = "ongoing" classes.append(cls) modulesdict[module][risk.priority].append( { "title": title, "description": action.action, "months": [ ( action.planning_start and action.planning_start.month == m.month ) or ( action.planning_end and action.planning_end.month == m.month ) for m in [today, next_month, month_after_next] ], "classes": classes, } ) main_modules = {} for module, risks in sorted(modulesdict.items(), key=lambda m: m[0].zodb_path): module_obj = self.survey.restrictedTraverse(module.zodb_path.split("/")) if ( IProfileQuestion.providedBy(module_obj) or ICustomRisksModule.providedBy(module_obj) or module.depth >= 3 ): path = module.path[:6] else: path = module.path[:3] if path in main_modules: for prio in risks.keys(): if prio in main_modules[path]["risks"]: main_modules[path]["risks"][prio].extend(risks[prio]) else: main_modules[path]["risks"][prio] = risks[prio] else: title = module.title number = module.number main_modules[path] = {"name": title, "number": number, "risks": risks} self.modules = [] for key in sorted(main_modules.keys()): self.modules.append(main_modules[key])
def compile_if_else(test, true_expression, false_expression): return sql.case([(test, true_expression)], else_=false_expression)
def test_string_literals_not_oldstyle_quoted_in_func(): s = str( sql.case([(sql.column("WHOA") == "Jason's", 1)], else_=0).compile(compile_kwargs={"literal_binds": True}, dialect=bq.BQDialect())) assert "Jason's" not in s
def is_disabled_by_date(cls): is_disabled_expr = sa.sql.and_( cls.disabled_utc.isnot(None), cls.disabled_utc <= arrow.utcnow(), ) return sa_sql.case([(is_disabled_expr, sa.true())], else_=sa.false())
def update(self): self.verify_view_permission() super(MeasuresOverview, self).update() lang = getattr(self.request, "LANGUAGE", "en") if "-" in lang: lang = lang.split("-")[0] now = datetime.now() next_month = datetime(now.year, (now.month + 1) % 12 or 12, 1) month_after_next = datetime(now.year, (now.month + 2) % 12 or 12, 1) self.months = [] self.months.append(now.strftime("%b")) self.months.append(next_month.strftime("%b")) self.months.append(month_after_next.strftime("%b")) self.monthstrings = [ translate( PloneLocalesMessageFactory("month_{0}_abbr".format( month.lower()), default=month), target_language=lang, ) for month in self.months ] query = (Session.query( Module, Risk, ActionPlan).select_from(Module).filter( sql.and_( Module.session == self.session, Module.profile_index > -1)).filter(sql.not_(SKIPPED_PARENTS)).filter( sql.or_( MODULE_WITH_RISK_OR_TOP5_FILTER, RISK_PRESENT_OR_TOP5_FILTER)).join( Risk, Risk.parent_id == Module.id).join( ActionPlan, ActionPlan.risk_id == Risk.id).order_by( sql.case(value=Risk.priority, whens={ "high": 0, "medium": 1 }, else_=2), Risk.path, )) measures = [ t for t in query.all() if ((t[-1].planning_end is not None and t[-1].planning_end.strftime("%b") in self.months) and (t[-1].planning_start is not None or t[-1].responsible is not None or t[-1].prevention_plan is not None or t[-1].requirements is not None or t[-1].budget is not None or t[-1].action_plan is not None)) ] modulesdict = defaultdict(lambda: defaultdict(list)) for module, risk, action in measures: if "custom-risks" not in risk.zodb_path: risk_obj = self.context.restrictedTraverse( risk.zodb_path.split("/")) title = risk_obj and risk_obj.problem_description or risk.title else: title = risk.title modulesdict[module][risk.priority or "low"].append({ "title": title, "description": action.action_plan, "months": [ action.planning_end and action.planning_end.month == m.month for m in [now, next_month, month_after_next] ], }) main_modules = {} for module, risks in sorted(modulesdict.items(), key=lambda m: m[0].zodb_path): module_obj = self.context.restrictedTraverse( module.zodb_path.split("/")) if (IProfileQuestion.providedBy(module_obj) or ICustomRisksModule.providedBy(module_obj) or module.depth >= 3): path = module.path[:6] else: path = module.path[:3] if path in main_modules: for prio in risks.keys(): if prio in main_modules[path]["risks"]: main_modules[path]["risks"][prio].extend(risks[prio]) else: main_modules[path]["risks"][prio] = risks[prio] else: title = module.title number = module.number if "custom-risks" in module.zodb_path: num_elems = number.split(".") number = ".".join(["Ω"] + num_elems[1:]) title = api.portal.translate(_(title)) main_modules[path] = { "name": title, "number": number, "risks": risks } self.modules = [] for key in sorted(main_modules.keys()): self.modules.append(main_modules[key])
def upgrade(): op.execute(musiconhold.update() .where(musiconhold.c.var_name == 'random') .values( var_name='sort', var_val=sql.case([(musiconhold.c.var_val == 'yes', 'random')])))
def _if_else(cond, true_vals, false_vals): whens = [(cond, true_vals)] return sql.case(whens, else_=false_vals)
def name(cls): nf = sasql.functions.coalesce(cls.name_first, u'') nl = sasql.functions.coalesce(cls.name_last, u'') return (nf + sasql.case([(sasql.or_(nf == u'', nl == u''), u'')], else_=u' ') + nl).label('name')