def test_custom_queries(self): dao = Dao(DAO_URL, sql_logging=SQL_LOG) dao.load_gtfs(DUMMY_GTFS) # A simple custom query: count the number of stops per type (stop/station) # SQL equivalent: SELECT stop.location_type, count(stop.location_type) FROM stop GROUP BY stop.location_type for type, stop_count in dao.session() \ .query(Stop.location_type, func.count(Stop.location_type)) \ .group_by(Stop.location_type) \ .all(): # print("type %d : %d stops" % (type, stop_count)) if type == Stop.TYPE_STATION: self.assertTrue(stop_count == 3) if type == Stop.TYPE_STOP: self.assertTrue(stop_count > 15 and stop_count < 30) # A more complex custom query: count the number of trips per calendar date per route on june/july from_date = CalendarDate.ymd(2016, 6, 1) to_date = CalendarDate.ymd(2016, 7, 31) for date, route, trip_count in dao.session() \ .query(CalendarDate.date, Route, func.count(Trip.trip_id)) \ .join(Calendar).join(Trip).join(Route) \ .filter((func.date(CalendarDate.date) >= from_date.date) & (func.date(CalendarDate.date) <= to_date.date)) \ .group_by(CalendarDate.date, Route.route_short_name) \ .all(): # print("%s / %20s : %d trips" % (date, route.route_short_name + " " + route.route_long_name, trip_count)) self.assertTrue(date >= from_date.as_date()) self.assertTrue(date <= to_date.as_date()) self.assertTrue(trip_count > 0)
def payments(): payments = Payment.query.join(Purchase).with_entities( Payment, func.count(Purchase.id).label('purchase_count'), ).group_by(Payment).order_by(Payment.id).all() return render_template('admin/payments/payments.html', payments=payments)
def _get_sitemap_index(): document_locales_per_type = DBSession. \ query(Document.type, func.count().label('count')). \ join( DocumentLocale, Document.document_id == DocumentLocale.document_id). \ filter(Document.type != USERPROFILE_TYPE). \ group_by(Document.type). \ all() sitemaps = [] for doc_type, count in document_locales_per_type: num_sitemaps = ceil(count / PAGES_PER_SITEMAP) sitemaps_for_type = [ { 'url': '/sitemaps/{}/{}'.format(doc_type, i), 'doc_type': doc_type, 'i': i } for i in range(0, num_sitemaps) ] sitemaps.extend(sitemaps_for_type) return { 'sitemaps': sitemaps }
def selectables(cls, bag, agg_spec): """ Create a list of statements from spec :type bag: mongosql.bag.ModelPropertyBags :rtype: list[sqlalchemy.sql.elements.ColumnElement] """ # TODO: calculation expressions for selection: http://docs.mongodb.org/manual/meta/aggregation-quick-reference/ selectables = [] for comp_field, comp_expression in agg_spec.items(): # Column reference if isinstance(comp_expression, basestring): selectables.append(bag.columns[comp_expression].label(comp_field)) continue # Computed expression assert isinstance(comp_expression, dict), 'Aggregate: Expression should be either a column name, or an object' assert len(comp_expression) == 1, 'Aggregate: expression can only contain a single operator' operator, expression = comp_expression.popitem() # Expression statement if isinstance(expression, int) and operator == '$sum': # Special case for count expression_stmt = expression elif isinstance(expression, basestring): # Column name expression_stmt = bag.columns[expression] # Json column? if bag.columns.is_column_json(expression): # PostgreSQL always returns text values from it, and for aggregation we usually need numbers :) expression_stmt = cast(expression_stmt, Float) elif isinstance(expression, dict): # Boolean expression expression_stmt = MongoCriteria.statement(bag, expression) # Need to cast it to int expression_stmt = cast(expression_stmt, Integer) else: raise AssertionError('Aggregate: expression should be either a column name, or an object') # Operator if operator == '$max': comp_stmt = func.max(expression_stmt) elif operator == '$min': comp_stmt = func.min(expression_stmt) elif operator == '$avg': comp_stmt = func.avg(expression_stmt) elif operator == '$sum': if isinstance(expression_stmt, int): # Special case for count comp_stmt = func.count() if expression_stmt != 1: comp_stmt *= expression_stmt else: comp_stmt = func.sum(expression_stmt) else: raise AssertionError('Aggregate: unsupported operator "{}"'.format(operator)) # Append selectables.append(comp_stmt.label(comp_field)) return selectables
def tees(): purchases = ProductGroup.query.filter_by(name='tees') \ .join(Product, Purchase, Purchase.owner).group_by(User.id, Product.id) \ .with_entities(User, Product, func.count(Purchase.id)) \ .order_by(User.name) return render_template('admin/products/tee-purchases.html', purchases=purchases)
def invoice(payment_id): payment = get_user_payment_or_abort(payment_id, allow_admin=True) invoice_lines = payment.tickets.join(TicketType). \ with_entities(TicketType, func.count(Ticket.type_id)). \ group_by(TicketType).order_by(TicketType.order).all() ticket_sum = sum(tt.get_price_ex_vat(payment.currency) * count for tt, count in invoice_lines) if payment.provider == 'stripe': premium = payment.__class__.premium(payment.currency, ticket_sum) else: premium = Decimal(0) subtotal = ticket_sum + premium vat = subtotal * Decimal('0.2') app.logger.debug('Invoice subtotal %s + %s = %s', ticket_sum, premium, subtotal) # FIXME: we should use a currency-specific quantization here (or rounder numbers) if subtotal + vat - payment.amount > Decimal('0.01'): app.logger.error('Invoice total mismatch: %s + %s - %s = %s', subtotal, vat, payment.amount, subtotal + vat - payment.amount) flash('Your invoice cannot currently be displayed') return redirect(url_for('tickets.main')) app.logger.debug('Invoice total: %s + %s = %s', subtotal, vat, payment.amount) due_date = min(t.expires for t in payment.tickets) return render_template('invoice.html', payment=payment, invoice_lines=invoice_lines, premium=premium, subtotal=subtotal, vat=vat, due_date=due_date)
def getStats(klass): assuranceStats = dict() assurances = klass.query.with_entities(Assurance.name).add_column(func.count(klass.name)).group_by(klass.name).all() for name, value in assurances: assuranceStats[name] = value return assuranceStats
def storage(page): """ SELECT goods.nomenclature_id AS goods_nomenclature_id, goods.attribute_id AS goods_attribute_id, goods.incoming_price AS goods_incoming_price, count(*) AS counts, attributes.name AS attr_name, nomenclatures.internal_code AS nomenclatures_internal_code, nomenclatures.name AS nom_name FROM goods JOIN nomenclatures ON nomenclatures.id = goods.nomenclature_id JOIN attributes ON attributes.id = goods.attribute_id WHERE goods.outgoing_date IS NULL AND goods.paid = 1 GROUP BY goods.nomenclature_id, goods.attribute_id, goods.incoming_price """ pagination = Goods.query.\ with_entities(Goods.nomenclature_id, func.count().label('counts'), Nomenclatures.internal_code, Nomenclatures.name.label('nom_name')).\ join(Nomenclatures).\ join(Attributes).\ filter(func.isnull(Goods.outgoing_date), Goods.paid == True).\ group_by(Goods.nomenclature_id).order_by(Nomenclatures.internal_code.asc()).paginate(page, 10) return render_template('b_acc/storage.html', pagination=pagination)
def payments(): payments = Payment.query.join(Ticket).with_entities( Payment, func.min(Ticket.expires).label('first_expires'), func.count(Ticket.id).label('ticket_count'), ).group_by(Payment).order_by(Payment.id).all() return render_template('admin/payments.html', payments=payments)
def tees(): tickets = TicketType.query.join(Ticket, User) \ .filter(TicketType.fixed_id.in_(range(14, 24))) \ .filter(Ticket.paid.is_(True)) \ .group_by(User, TicketType) \ .with_entities(User, TicketType, func.count(Ticket.id)) \ .order_by(User.name, TicketType.order) return render_template('admin/tee-tickets.html', tickets=tickets)
def api_property(context, request): q = DBSession.query(Property) q = q.filter(Property.id == context.property_id) property = q.one() q = DBSession.query(Tag.text, func.count(Tag.id)) q = q.select_from(Review) q = q.join(Review.tags) q = q.filter(Review.property_id == property.id) q = q.group_by(Tag.id) q = q.order_by(func.count(Tag.id).desc()) q = q.limit(5) property.top_tags = {r[0]: r[1] for r in q.all()} return property
def nrfa_qmed_catchments(db_session): return db_session.query(Catchment).join(Descriptors).join(Catchment.amax_records). \ filter(Catchment.is_suitable_for_qmed, Descriptors.centroid_ngr != None, or_(Descriptors.urbext2000 < 0.03, Descriptors.urbext2000 == None)). \ group_by(Catchment). \ having(func.count(AmaxRecord.catchment_id) >= 10). \ all()
def get_hires(): purchases = (ProductGroup.query.filter_by(type='hire') .join(Product, Purchase, Purchase.owner) .group_by(User.id, Product.id, Purchase.state) .filter(Purchase.state.in_(['paid', 'payment-pending', 'receipt-emailed'])) .with_entities(User, Product, Purchase.state, func.count(Purchase.id)) .order_by(User.name, Product.name)) return purchases
def query_aggregate(self, ctx, drills, cuts, limit=5000): mappings = self.sql_mappings(ctx) joins = self.sql_joins(ctx, None) pk = self.pk(ctx) connection = self.sqltable.connection.connection() engine = self.sqltable.connection._engine # Build query Session = sessionmaker() Session.configure(bind=engine) session = Session() q = session.query() #q = q.add_columns(self.sqltable.sa_table.columns['is_bot_id'].label("x")) #q = q.add_entity(self.sqltable.sa_table) # Include measures for measure in [m for m in mappings if isinstance(m.field, Measure)]: sa_column = self.sqltable.sa_table.columns[measure.sqlcolumn.name] q = q.add_columns(func.avg(sa_column).label(measure.field.name + "_avg")) q = q.add_columns(func.sum(sa_column).label(measure.field.name + "_sum")) q = q.add_columns(func.count(self.sqltable.sa_table).label("record_count")) # Drills for dimension in [m for m in mappings if isinstance(m.field, Dimension)]: # We shoulld check the dimension-path here, with drills, and use key/lookup for drill if dimension.field.name in drills: sa_column = None try: sa_column = self.sqltable.sa_table.columns[dimension.sqlcolumn.name] except KeyError as e: raise ETLException("Unknown column in backend SQL table (table=%s, column=%s). Columns: %s" % (self.sqltable.sa_table, dimension.sqlcolumn.name, [c.name for c in self.sqltable.sa_table.columns])) q = q.add_columns(sa_column) q = q.group_by(sa_column) # Cuts # TODO: Filterng on any dimension attribute, not only the key # (ie filter cities with type A or icon B), but then again # that could be a different (nested) dimension. for dimension in [m for m in mappings if isinstance(m.field, Dimension)]: # We shoulld check the dimension-path here, with drills if dimension.field.name in cuts.keys(): sa_column = self.sqltable.sa_table.columns[dimension.sqlcolumn.name] cut_value = cuts[dimension.field.name] q = q.filter(sa_column==cut_value) # Limit q = q.limit(5000) statement = q.statement logger.debug("Statement: %s", str(statement).replace("\n", " ")) rows = connection.execute(statement).fetchall() return rows
def most_similar_catchments(self, subject_catchment, similarity_dist_function, records_limit=500, include_subject_catchment='auto'): """ Return a list of catchments sorted by hydrological similarity defined by `similarity_distance_function` :param subject_catchment: subject catchment to find similar catchments for :type subject_catchment: :class:`floodestimation.entities.Catchment` :param similarity_dist_function: a method returning a similarity distance measure with 2 arguments, both :class:`floodestimation.entities.Catchment` objects :param include_subject_catchment: - `auto`: include subject catchment if suitable for pooling and if urbext < 0.03 - `force`: always include subject catchment having at least 10 years of data - `exclude`: do not include the subject catchment :type include_subject_catchment: str :return: list of catchments sorted by similarity :type: list of :class:`floodestimation.entities.Catchment` """ if include_subject_catchment not in ['auto', 'force', 'exclude']: raise ValueError("Parameter `include_subject_catchment={}` invalid.".format(include_subject_catchment) + "Must be one of `auto`, `force` or `exclude`.") query = (self.db_session.query(Catchment). join(Catchment.descriptors). join(Catchment.amax_records). filter(Catchment.id != subject_catchment.id, Catchment.is_suitable_for_pooling, or_(Descriptors.urbext2000 < 0.03, Descriptors.urbext2000 == None), AmaxRecord.flag == 0). group_by(Catchment). having(func.count(AmaxRecord.catchment_id) >= 10)) # At least 10 AMAX records catchments = query.all() # Add subject catchment if required (may not exist in database, so add after querying db if include_subject_catchment == 'force': if len(subject_catchment.amax_records) >= 10: # Never include short-record catchments catchments.append(subject_catchment) elif include_subject_catchment == 'auto': if len(subject_catchment.amax_records) >= 10 and subject_catchment.is_suitable_for_pooling and \ (subject_catchment.descriptors.urbext2000 < 0.03 or subject_catchment.descriptors.urbext2000 is None): catchments.append(subject_catchment) # Store the similarity distance as an additional attribute for each catchment for catchment in catchments: catchment.similarity_dist = similarity_dist_function(subject_catchment, catchment) # Then simply sort by this attribute catchments.sort(key=attrgetter('similarity_dist')) # Limit catchments until total amax_records counts is at least `records_limit`, default 500 amax_records_count = 0 catchments_limited = [] for catchment in catchments: catchments_limited.append(catchment) amax_records_count += catchment.record_length if amax_records_count >= records_limit: break return catchments_limited
def expiring(): expiring = BankPayment.query.join(Purchase).filter( BankPayment.state == 'inprogress', BankPayment.expires < datetime.utcnow() + timedelta(days=3), ).with_entities( BankPayment, func.count(Purchase.id).label('purchase_count'), ).group_by(BankPayment).order_by(BankPayment.expires).all() return render_template('admin/payments/payments-expiring.html', expiring=expiring)
def count_periods_const(self, session, cost_filter): low = 1 high = 100 count = [] for n in cost_filter: high = n count.append(session.query(func.count(Period.pid)).filter(Period.cost >= low, Period.cost < high).all()) low = high return count
def do_run(self, output=sys.stdout): self.out = output # Within the past 24 hours, how many new LicensePools became # available? This represents new registrations coming in. qu = self._db.query( Identifier.type, func.count(func.distinct(LicensePool.id)) ) new_pools = qu.select_from(LicensePool).join(LicensePool.identifier) self.report_the_past( "New LicensePools (~registrations)", new_pools, LicensePool.availability_time ) self.write() # Within the past 24 hours, how many Works were updated? # This represents work being done to achieve coverage. qu = self._db.query(Identifier.type, func.count(func.distinct(Work.id))) updated_works = qu.select_from(Work).join(Work.license_pools).join( LicensePool.identifier ) self.report_the_past( "Updated Works (~coverage)", updated_works, Work.last_update_time ) self.write() # For each catalog, how many Identifiers have Works and how # many don't? This is a rough proxy for 'the basic tasks have # been done and we can improve the data at our leisure.' self.write("Current coverage:") total_done = Counter() total_not_done = Counter() types = set() for collection in self._db.query(Collection).order_by(Collection.id): done, not_done = self.report_backlog(collection) for type, count in done.items(): total_done[type] += count types.add(type) for type, count in not_done.items(): total_not_done[type] += count types.add(type) self.write("\n Totals:") self.report_backlog(None)
def admin_expiring(): expiring = BankPayment.query.join(Ticket).filter( BankPayment.state == 'inprogress', Ticket.expires < datetime.utcnow() + timedelta(days=3), ).with_entities( BankPayment, func.min(Ticket.expires).label('first_expires'), func.count(Ticket.id).label('ticket_count'), ).group_by(BankPayment).order_by('first_expires').all() return render_template('admin/payments-expiring.html', expiring=expiring)
def requested_refunds(): payments = Payment.query.filter_by(state='refund-requested') \ .join(Purchase) \ .with_entities(Payment, func.count(Purchase.id).label('purchase_count'), ) \ .group_by(Payment) \ .order_by(Payment.id) \ .all() return render_template('admin/payments/requested_refunds.html', payments=payments)
def count_articles(_author_id, _catalog_id): with get_scoped_db_session(False) as _dbss: _query = _dbss.query(func.count(Article.id))\ if _catalog_id: _query = _query.join(Article.catalogs).filter(Article.author_id==_author_id)\ .filter(ArticleCatalog.id==_catalog_id) else: _query = _query.filter(Article.author_id==_author_id) _count = _query.scalar() return _count
def storage_attributes(nomenclature_id, page): """ SELECT goods.nomenclature_id AS goods_nomenclature_id, goods.attribute_id AS goods_attribute_id, nomenclatures.internal_code AS nomenclatures_internal_code, nomenclatures.name AS nom_name, count(*) AS counts, goods.incoming_price AS goods_incoming_price, attributes.name AS attr_name FROM goods JOIN nomenclatures ON nomenclatures.id = goods.nomenclature_id JOIN attributes ON attributes.id = goods.attribute_id WHERE isnull(goods.outgoing_date) AND isnull(goods.outgoing_price) AND goods.nomenclature_id = :nomenclature_id_1 GROUP BY goods.attribute_id, goods.incoming_price ORDER BY goods.attribute_id, goods.incoming_price """ nomenclature = Nomenclatures.query.filter_by(id=nomenclature_id).first() if not nomenclature: flash(u'Аттрибуты и цены: номенклатуры не существует', 'danger') return redirect(url_for('b_acc.storage')) attributes = Goods.query.with_entities(Goods.nomenclature_id, Goods.attribute_id, Nomenclatures.internal_code, Nomenclatures.name.label('nom_name'), func.count().label('counts'), Goods.incoming_price, Attributes.name.label('attr_name')).\ join(Nomenclatures).\ join(Attributes).\ filter(func.isnull(Goods.outgoing_date), func.isnull(Goods.outgoing_price), Goods.nomenclature_id == nomenclature_id).\ group_by(Goods.attribute_id, Goods.incoming_price).\ order_by(Goods.attribute_id, Goods.incoming_price) pagination = attributes.paginate(page, 10) return render_template('b_acc/storage_attributes.html', nomenclature=nomenclature, pagination=pagination)
def invoice(payment_id): payment = get_user_payment_or_abort(payment_id, allow_admin=True) form = InvoiceForm() if form.validate_on_submit(): current_user.company = form.company.data db.session.commit() flash('Company name updated') return redirect(url_for('.invoice', payment_id=payment_id)) if request.method != 'POST': form.company.data = current_user.company edit_company = bool(request.args.get('edit_company')) if request.args.get('js') == '0': flash("Please use your browser's print feature or download the PDF") invoice_lines = Purchase.query.filter_by(payment_id=payment_id).join(PriceTier, Product) \ .with_entities(PriceTier, func.count(Purchase.price_tier_id)) \ .group_by(PriceTier, Product.name).order_by(Product.name).all() ticket_sum = sum(pt.get_price(payment.currency).value_ex_vat * count for pt, count in invoice_lines) if payment.provider == 'stripe': premium = payment.__class__.premium(payment.currency, ticket_sum) else: premium = Decimal(0) subtotal = ticket_sum + premium vat = subtotal * Decimal('0.2') app.logger.debug('Invoice subtotal %s + %s = %s', ticket_sum, premium, subtotal) # FIXME: we should use a currency-specific quantization here (or rounder numbers) if subtotal + vat - payment.amount > Decimal('0.01'): app.logger.error('Invoice total mismatch: %s + %s - %s = %s', subtotal, vat, payment.amount, subtotal + vat - payment.amount) flash('Your invoice cannot currently be displayed') return redirect(url_for('users.purchases')) app.logger.debug('Invoice total: %s + %s = %s', subtotal, vat, payment.amount) page = render_template('invoice.html', payment=payment, invoice_lines=invoice_lines, form=form, premium=premium, subtotal=subtotal, vat=vat, edit_company=edit_company) if request.args.get('pdf'): url = external_url('.invoice', payment_id=payment_id) return send_file(render_pdf(url, page), mimetype='application/pdf', cache_timeout=60) return page
def ticket_report(): # This is an admissions-based view, so includes expired tickets totals = Ticket.query.outerjoin(Payment).filter( Ticket.refund_id.is_(None), or_(Ticket.paid == True, # noqa ~Payment.state.in_(['new', 'cancelled', 'refunded'])) ).join(TicketType).with_entities( TicketType.admits, func.count(), ).group_by(TicketType.admits).all() totals = dict(totals) query = db.session.query(TicketType.admits, func.count(), func.sum(TicketPrice.price_int)).\ select_from(Ticket).join(TicketType).join(TicketPrice).\ filter(TicketPrice.currency == 'GBP', Ticket.paid == True).group_by(TicketType.admits) # noqa accounting_totals = {} for row in query.all(): accounting_totals[row[0]] = { 'count': row[1], 'total': row[2] } return render_template('admin/ticket-report.html', totals=totals, accounting_totals=accounting_totals)
def _test_list_article(): _author_id = random.randint(1, 20) _catalog_id = random.randint(1, 20) print '_author_id', _author_id print '_catalog_id', _catalog_id with get_scoped_db_session(False) as _dbss: _count = _dbss.query(func.count(Article.id)).filter_by(author_id=_author_id).scalar() # _articles = _dbss.query(Article).options(joinedload(Article.catalogs)).filter_by(author_id=_author_id).all() _articles = _dbss.query(Article).join(Article.catalogs).filter_by(author_id=_author_id)\ .filter(ArticleCatalog.id==_catalog_id).all() # _articles = _dbss.query(Article).filter_by(author_id=_author_id).filter(Article.catalogs.any(id=_catalog_id)).all() # @UndefinedVariable print '%s articles found.' % _count for _a in _articles: print '[%s] %s' % (_a.id, _a.title)#, [_c.name for _c in _a.catalogs]
def nearest_qmed_catchments(self, subject_catchment, limit=None, dist_limit=500): """ Return a list of catchments sorted by distance to `subject_catchment` **and filtered to only include catchments suitable for QMED analyses**. :param subject_catchment: catchment object to measure distances to :type subject_catchment: :class:`floodestimation.entities.Catchment` :param limit: maximum number of catchments to return. Default: `None` (returns all available catchments). :type limit: int :param dist_limit: maximum distance in km. between subject and donor catchment. Default: 500 km. Increasing the maximum distance will increase computation time! :type dist_limit: float or int :return: list of catchments sorted by distance :rtype: list of :class:`floodestimation.entities.Catchment` """ dist_sq = Catchment.distance_to(subject_catchment).label('dist_sq') # Distance squared, calculated using SQL query = self.db_session.query(Catchment, dist_sq). \ join(Catchment.amax_records). \ join(Catchment.descriptors). \ filter(Catchment.id != subject_catchment.id, # Exclude subject catchment itself Catchment.is_suitable_for_qmed, # Only catchments suitable for QMED estimation Catchment.country == subject_catchment.country, # SQL dist method does not cover cross-boundary dist # Within the distance limit dist_sq <= dist_limit ** 2). \ group_by(Catchment, Descriptors.centroid_ngr_x, Descriptors.centroid_ngr_y). \ order_by(dist_sq). \ having(func.count(AmaxRecord.catchment_id) >= 10) # At least 10 AMAX records if limit: rows = query[0:limit] # Each row is tuple of (catchment, distance squared) else: rows = query.all() # Add real `dist` attribute to catchment list using previously calculated SQL dist squared catchments = [] for row in rows: catchment = row[0] catchment.dist = sqrt(row[1]) catchments.append(catchment) return catchments
def grid_view(request): query = DBSession.query(User).options(joinedload(User.tags)).options( joinedload(User.company)).outerjoin(User.company) total = DBSession.query(func.count(User.id)).outerjoin(User.company) query = query.filter(*get_filters(request)) total = total.filter(*get_filters(request)).scalar() results = add_pagination(request, query).all() request.session['query'] = request.params.get('query', '') # If the user is not coming from similar path then set select_all checkbox parameter to False if request.referer and request.path_info not in request.referer: request.session['select_all'] = False return {'items': results, 'select_all': request.session['select_all'] if request.session.get('select_all') is not None else False, 'repost': request.params, 'page': int(request.params.get('p', 1)), 'page_size': getset_page_size(request), 'total': total, 'num_pages': int(ceil(float(total) / getset_page_size(request))) }
def grid_view(request): query = DBSession.query(BaseCompany).options(joinedload(BaseCompany.tags)).options( joinedload(BaseCompany.users_rel).joinedload(UserXCompany.user)) total = DBSession.query(func.count(BaseCompany.id)) query = query.filter(*get_filters(request)) total = total.filter(*get_filters(request)).scalar() results = add_pagination(request, query).all() results = add_connection_info(results, request.user) request.session['query'] = request.params.get('query', '') # If the user is not coming from similar path then set select_all checkbox parameter to False if request.referer and request.path_info not in request.referer: request.session['select_all_comp'] = False return {'items': results, 'select_all': request.session.get('select_all_comp'), 'repost': request.params, 'page': int(request.params.get('p', 1)), 'page_size': PAGE_SIZE, 'total': total, 'num_pages': int(ceil(float(total) / PAGE_SIZE)) }
def report_backlog(self, collection): done = Counter() not_done = Counter() clause = LicensePool.work_id==None types = set() for clause, counter in ( (LicensePool.work_id!=None, done), (LicensePool.work_id==None, not_done), ): qu = self._db.query( Identifier.type, func.count(func.distinct(Identifier.id)), ).select_from( Collection ).join( Collection.catalog ).outerjoin( Identifier.licensed_through ) if collection: qu = qu.filter( Collection.id==collection.id ) qu = qu.filter( clause ).group_by(Identifier.type).order_by(Identifier.type) for type, count in qu: counter[type] += count types.add(type) if len(done) == 0 and len(not_done) == 0: # This catalog is empty. return done, not_done if collection: name, identifier = self.decode_metadata_identifier(collection) self.write(' %s/%s' % (name, identifier)) for type in sorted(types): self.report_backlog_item(type, done, not_done) return done, not_done
def get_count_query(self) -> query: return self.session.query(func.count(distinct( User.id))).select_from(User).join(UserOfferer)
def get_count(q): count_q = q.statement.with_only_columns([func.count()]).order_by(None) count = q.session.execute(count_q).scalar() return count
def predict(): #javasript post method랑 안맞음?? global current_user style_name = [ 'Business Casual', 'Ethnic', 'Feminine', 'Girlish', '80s Retro', 'British Mode', 'Casual', 'Street' ] if request.method == 'POST': #jpeg으로 저장 f_ext = '.jpeg' pub_time = time.time() #파일이름 설정,current user가 undefined인 예외 처리(js 때문) try: picture_name = str(pub_time) + str(current_user.id) + f_ext except: picture_name = str(pub_time) + f_ext picture_path = os.path.join(file_url, picture_name) style = request.json['class'] #user가 선택한 스타일 img_rows, img_cols, img_channel = 256, 256, 3 img = base64_to_pil(request.json['image']) #s3에 머저장 buffer = BytesIO() try: img.save(buffer, "JPEG") except: img = img.convert("RGB") #JPG는 A(투명도)가 없음 img.save(buffer, "JPEG") buffer.seek(0) upload_fn(buffer, 'socksclub', picture_name) #그리고 db에 추가 new_img = img.resize((img_rows, img_cols)) new_img = np.array(new_img)[np.newaxis, ...] if USE_TF: layer_output, result = extraction_model.predict(new_img) result = result.tolist() # mapping feature extraction last_weight = new_model.weights[-2] layer_output = np.squeeze(layer_output) try: '''메모리 에러 발생?''' feature_map = scipy.ndimage.zoom(layer_output, (32, 32, 0.5), order=1) except Exception as e: '''에러 메세지 출력해줘야됨''' print('error occured', e) return jsonify(ok=False) pred_class = np.argmax(result) ai_style = style_name[pred_class] #Street pred_class_weight = last_weight[:, pred_class] final_output = np.dot(feature_map.reshape((256 * 256, 256)), pred_class_weight).reshape((256, 256)) new_buffer = BytesIO() fig, axes = plt.subplots(1, 1, figsize=(5, 5)) axes.imshow(np.asarray(img), alpha=0.5) axes.imshow(final_output, cmap='jet', alpha=0.5) extent = axes.get_window_extent().transformed( fig.dpi_scale_trans.inverted()) fig.savefig(new_buffer, bbox_inches=extent, format='png') img_str = base64.b64encode(new_buffer.getvalue()) img_str = img_str.decode('utf-8') img_str = 'data:image/png;base64,' + img_str else: result = np.random.randn(1, len(style_name)) pred_class = np.argmax(result) ai_style = style_name[pred_class] #Street img_str = request.json['image'] res = {} for keys, values in zip(style_name, result[0]): values = round(values, 2) * 100 res[keys] = values else: res = dict(sorted(res.items(), key=(lambda x: x[1]), reverse=True)) top3_keys, top3_values = list(res.keys())[:3], list( res.values())[:3] if top3_values[0] == 100: np.random.seed(0) randNum = np.random.rand(2).tolist() for i in range(1, len(top3_values)): top3_values[i] += randNum[i - 1] * 10 + 10 top3_values[i] = round(top3_values[i], 2) top3_values[0] = top3_values[0] - (top3_values[1] + top3_values[2]) max_score = top3_values[0] #db에 String으로 저장 => 나중에 혹시 모르니까? gender = request.json['gender'] if current_user.is_authenticated(): sharing = 1 rank_post = FashionScore(score=max_score, ai_style=ai_style, pub_date=datetime.now(), img_url=picture_path, style=style, author=current_user, sharing=sharing, gender=gender) else: sharing = 0 rank_post = FashionScore(score=max_score, ai_style=ai_style, pub_date=datetime.now(), img_url=picture_path, style=style, author=None, sharing=sharing, gender=gender) db.session.add(rank_post) db.session.commit() recommend_sbquery = db.session.query( RecommendLog.ad_id, func.count(RecommendLog.ad_id).label('count')).group_by( RecommendLog.ad_id).subquery() recommendation_queries = Recommendation.query.filter( Recommendation.gender == gender).join( recommend_sbquery, Recommendation.id == recommend_sbquery.c.ad_id, isouter=True).order_by( recommend_sbquery.c.count.desc().nullslast()).all() #2개는 클릭수 많은것, 2개는 random recommendation_choices = recommendation_queries[:2] random_recommendation = random.choices( population=recommendation_queries[2:], k=2) recommendation_choices.extend(random_recommendation) #sql object 딕셔너리 형태로 바꿔줘야됨 recommedation_list = [] for i in recommendation_choices: test = { c.name: getattr(i, c.name) for c in Recommendation.__table__.columns } recommedation_list.append(test) db.session.close() #Predict class에 맞는 설명 가져오기 try: text_dir = 'static/text/' + top3_keys[0] + '.txt' f = open(text_dir, 'r') text = "<\br>".join(f.readlines()) except: text = "" return jsonify(top1_class=top3_keys[0], top2_class=top3_keys[1], top3_class=top3_keys[2], top1_value=top3_values[0], top2_value=top3_values[1], top3_value=top3_values[2], feature=img_str, picture_path=picture_path, recommendation=recommedation_list, text=text) return None
def count_groups(query, *entities): return query.with_entities( func.count().label('count'), *entities).group_by(*entities).order_by(*entities)
def display_restaurant_category(self, category_name): restaurants = self.session.query(Restaurants, Items.category, func.count(Items.category)).filter(Items.rest_id == Restaurants.id, Items.category==str(category_name)).group_by(Items).distinct().all() return restaurants
def get_user_purchases(query): return query.join(Purchase).join(Purchase.owner) \ .filter(Purchase.is_paid_for) \ .with_entities(User, func.count('*')) \ .group_by(User) \ .order_by(User.id)
def react_page(): # check if there is a current user if 'user' not in session: return redirect("/login") # get project update time freq = db.session.query(User.update_time).filter( User.user_id == session['user']).one()[0] NOW = datetime.datetime.now() # Get finished projects fin_projects = db.session.query(func.count(Project.project_id)).join(Status).filter( Project.user_id == session['user'], Status.status == "Finished").first()[0] # Get hibernating projects hib_projects = db.session.query(func.count(Project.project_id)).join(Status).filter( Project.user_id == session['user'], Status.status == "Hibernating").first()[0] # Get frogged projects frog_projects = db.session.query(func.count(Project.project_id)).join(Status).filter( Project.user_id == session['user'], Status.status == "Frogged").first()[0] # Get the projects for the current user and are in progress need_update = db.session.query(func.count(Project.project_id)).join(Status).filter( Project.user_id == session['user'], Project.updated_at < (NOW - datetime.timedelta(days = freq)), Status.status == "In progress").first()[0] updated = db.session.query(func.count(Project.project_id)).join(Status).filter( Project.user_id == session['user'], Project.updated_at > (NOW - datetime.timedelta(days = freq)), Status.status == "In progress").first()[0] counts = {"finished": int(fin_projects), "hibernate": int(hib_projects), "frogged": int(frog_projects), "need update": int(need_update), "updated": int(updated)} # counts = {k: len(v) for k,v in projects_by_type.items()} data_dict = { "labels": [k for k in sorted(counts.keys())], "datasets": [ { "data": [v for k, v in sorted(counts.items())], "backgroundColor": [ "#FF6384", "#36A2EB", "red", "blue", "green", ], "hoverBackgroundColor": [ "#FF6384", "#36A2EB", ] }] } wip_dict = { "labels": ["WIP"], "datasets": [ { "label": "Need Update", "data": [counts['need update']], "backgroundColor": [ "blue" ], "hoverBackgroundColor": [ "#B56357" ] }, { "label": "Updated", "data": [counts['updated']], "backgroundColor": [ "green" ], "hoverBackgroundColor": [ "#B4DBC0" ] }] } return render_template("react-projects.html", finished=fin_projects, hibernate=hib_projects, frogged=frog_projects, needUpdate= need_update, updated=updated, counts=counts, dict=data_dict, wip=wip_dict, freq=freq)
def count(self, stmt: Select, filter_env: bool = True) -> int: stmt = select(func.count()).select_from(stmt.subquery()) return self.execute(stmt).scalar_one()
def send_push_message(subject_id, text="", custom_payload={}, title="Gamification-Engine", android_text=None, ios_text=None): message_count = DBSession.execute( select([func.count("*").label("c")], from_obj=t_subject_messages).where( and_(t_subject_messages.c.subject_id == subject_id, t_subject_messages.c.is_read == False))).scalar() data = dict({"title": title, "badge": message_count}, **custom_payload) settings = get_settings() if not ios_text: ios_text = text if not android_text: android_text = text rows = DBSession.execute( select([t_subject_device.c.push_id, t_subject_device.c.device_os], from_obj=t_subject_device).distinct().where( t_subject_device.c.subject_id == subject_id)).fetchall() for device in rows: if "ios" in device.device_os.lower(): identifier = random.getrandbits(32) if custom_payload: payload = Payload(alert=ios_text, custom=data, badge=message_count, sound="default") else: payload = Payload(alert=ios_text, custom=data, badge=message_count, sound="default") log.debug("Sending Push message to User (ID: %s)", subject_id) if device.push_id.startswith("prod_"): get_prod_apns().gateway_server.send_notification( device.push_id[5:], payload, identifier=identifier) elif device.push_id.startswith("dev_"): get_dev_apns().gateway_server.send_notification( device.push_id[4:], payload, identifier=identifier) if "android" in device.device_os.lower(): log.debug("Sending Push message to User (ID: %s)", subject_id) push_id = lstrip_word(device.push_id, "dev_") push_id = lstrip_word(push_id, "prod_") response = get_gcm().json_request( registration_ids=[ push_id, ], data={ "message": android_text, "data": data, "title": title }, restricted_package_name=os.environ.get( "GCM_PACKAGE", settings.get("gcm.package", "")), priority='high', delay_while_idle=False) if response: gcm_feedback(response)
def _calculate_related_artists_scores( session: Session, user_id, sample_size=None, limit=MAX_RELATED_ARTIST_COUNT) -> List[RelatedArtist]: """Calculates the scores of related artists to the given user_id by querying who followers of the user_id also follow and using the scoring algorithm: `score = mutual_follower_count * percentage_of_suggested_artist_followers` """ # Get all the followers of the artist followers_subquery = aliased( Follow, session.query(Follow.follower_user_id).filter( Follow.followee_user_id == user_id, Follow.is_current, Follow.is_delete == False, ).subquery(name="followers"), ) if sample_size is None: followers_sampled = aliased(Follow) else: followers_sampled = aliased( Follow, tablesample(cast(FromClause, Follow), func.system_rows(sample_size))) # Find out who the followers are following mutual_followers_subquery = (session.query( followers_sampled.followee_user_id.label("suggested_artist_id"), func.count(followers_subquery.follower_user_id).label( "mutual_follower_count"), ).select_from(followers_subquery).join( followers_sampled, followers_subquery.follower_user_id == followers_sampled.follower_user_id, ).filter( followers_sampled.is_current, followers_sampled.is_delete == False, followers_sampled.followee_user_id != user_id, ).group_by( followers_sampled.followee_user_id).subquery(name="mutual_followers")) # Score the artists gathered from the above queries by: # # score = mutual_follower_count * percentage_of_suggested_artist_followers # # Examples: # # If we're finding related artists to artist A and artist B shares 20 followers # with artist A, and 50% of artist B's following are followers of artist A, then # the score for artist B is 20 * 0.50 = 10. # # If artists A and C share 1000 followers but C has 100,000 followers total, then # that's only 1% of artist C's following. Artist C gets a score of 1,000 * 0.01 = 10 # scoring_query = ( session.query( User.user_id.label("related_artist_user_id"), func.round( 1.0 * column("mutual_follower_count") * column("mutual_follower_count") / AggregateUser.follower_count, 3, ).label("score"), ).select_from(mutual_followers_subquery).join( AggregateUser, AggregateUser.user_id == column("suggested_artist_id")). join(User, User.user_id == column("suggested_artist_id")).filter( User.is_current, AggregateUser.track_count > 0, # Should never be true, but occasionally this is the mutual artist's # first follow and aggregate user hasn't updated yet AggregateUser.follower_count > 0, ).order_by(desc(column("score")), User.user_id).limit(limit)) rows = scoring_query.all() related_artists = [ RelatedArtist( user_id=user_id, related_artist_user_id=row.related_artist_user_id, score=row.score, ) for row in rows ] return related_artists
def _count_table_rows_(self, table): sel = select([func.count()]).select_from(table) return self.conn.execute(sel).scalar()
def invoice(payment_id, fmt=None): pdf = False if fmt == "pdf": pdf = True elif fmt: abort(404) payment = get_user_payment_or_abort(payment_id, allow_admin=True) form = InvoiceForm() if form.validate_on_submit(): current_user.company = form.company.data payment.issue_vat_invoice_number() db.session.commit() flash("Company name updated") return redirect(url_for(".invoice", payment_id=payment_id)) if request.method != "POST": form.company.data = current_user.company edit_company = bool(request.args.get("edit_company")) if request.args.get("js") == "0": flash("Please use your browser's print feature or download the PDF") invoice_lines = ( Purchase.query.filter_by(payment_id=payment_id) .join(PriceTier, Product) .with_entities(PriceTier, func.count(Purchase.price_tier_id)) .group_by(PriceTier, Product.name) .order_by(Product.name) .all() ) ticket_sum = sum( pt.get_price(payment.currency).value_ex_vat * count for pt, count in invoice_lines ) if payment.provider == "stripe": premium = payment.__class__.premium(payment.currency, ticket_sum) else: premium = Decimal(0) subtotal = ticket_sum + premium vat = subtotal * Decimal("0.2") # FIXME: we should use a currency-specific quantization here (or rounder numbers) if subtotal + vat - payment.amount > Decimal("0.01"): app.logger.error( "Invoice total mismatch: %s + %s - %s = %s", subtotal, vat, payment.amount, subtotal + vat - payment.amount, ) flash("Your invoice cannot currently be displayed") return redirect(url_for("users.purchases")) if payment.vat_invoice_number: mode = "invoice" invoice_number = payment.issue_vat_invoice_number() else: mode = "receipt" invoice_number = None page = render_template( "payments/invoice.html", mode=mode, payment=payment, account=payment.recommended_destination, invoice_lines=invoice_lines, form=form, premium=premium, subtotal=subtotal, vat=vat, edit_company=edit_company, invoice_number=invoice_number, ) url = external_url(".invoice", payment_id=payment_id) if pdf: return send_file( render_pdf(url, page), mimetype="application/pdf", cache_timeout=60, attachment_filename=f"emf_{mode}.pdf", as_attachment=True, ) if mode == "invoice": invoice_dir = "/vat_invoices" if not os.path.exists(invoice_dir): logger.warn( "Not exporting VAT invoice as directory (%s) does not exist", invoice_dir, ) return page with open(os.path.join(invoice_dir, f"{invoice_number}.pdf"), "wb") as f: shutil.copyfileobj(render_pdf(url, page), f) return page
def stats(): # Don't care about the state of the payment if it's paid for paid = Ticket.query.filter_by(paid=True) parking_paid = paid.join(TicketType).filter_by(admits='car') campervan_paid = paid.join(TicketType).filter_by(admits='campervan') # For new payments, the user hasn't committed to paying yet unpaid = Payment.query.filter( Payment.state != 'new', Payment.state != 'cancelled' ).join(Ticket).filter_by(paid=False) expired = unpaid.filter_by(expired=True) unexpired = unpaid.filter_by(expired=False) # Providers who take a while to clear - don't care about captured Stripe payments gocardless_unpaid = unpaid.filter(Payment.provider == 'gocardless', Payment.state == 'inprogress') banktransfer_unpaid = unpaid.filter(Payment.provider == 'banktransfer', Payment.state == 'inprogress') # TODO: remove this if it's not needed full_gocardless_unexpired = unexpired.filter(Payment.provider == 'gocardless', Payment.state == 'inprogress'). \ join(TicketType).filter_by(admits='full') full_banktransfer_unexpired = unexpired.filter(Payment.provider == 'banktransfer', Payment.state == 'inprogress'). \ join(TicketType).filter_by(admits='full') # These are people queries - don't care about cars or campervans being checked in checked_in = Ticket.query.join(TicketType).filter(TicketType.admits.in_(['full', 'kid'])) \ .join(TicketCheckin).filter_by(checked_in=True) badged_up = TicketCheckin.query.filter_by(badged_up=True) users = User.query # noqa proposals = Proposal.query # Simple count queries queries = [ 'checked_in', 'badged_up', 'users', 'proposals', 'gocardless_unpaid', 'banktransfer_unpaid', 'full_gocardless_unexpired', 'full_banktransfer_unexpired', ] stats = ['%s:%s' % (q, locals()[q].count()) for q in queries] # Admission types breakdown admit_types = ['full', 'kid', 'campervan', 'car'] admit_totals = dict.fromkeys(admit_types, 0) for query in 'paid', 'expired', 'unexpired': tickets = locals()[query].join(TicketType).with_entities( # noqa TicketType.admits, func.count(), ).group_by(TicketType.admits).all() tickets = dict(tickets) for a in admit_types: stats.append('%s_%s:%s' % (a, query, tickets.get(a, 0))) admit_totals[a] += tickets.get(a, 0) # and totals for a in admit_types: stats.append('%s:%s' % (a, admit_totals[a])) return ' '.join(stats)
from urllib.parse import quote_plus # PY2: from urllib import quote_plus from sqlalchemy.engine import create_engine from sqlalchemy.sql.expression import select from sqlalchemy.sql.functions import func from sqlalchemy.sql.schema import Table, MetaData conn_str = 'awsathena+rest://{aws_access_key_id}:{aws_secret_access_key}@athena.{region_name}.amazonaws.com:443/'\ '{schema_name}?s3_staging_dir={s3_staging_dir}' engine = create_engine( conn_str.format(aws_access_key_id=quote_plus('YOUR_ACCESS_KEY_ID'), aws_secret_access_key=quote_plus('YOUR_SECRET_ACCESS_KEY'), region_name='us-west-2', schema_name='default', s3_staging_dir=quote_plus('s3://YOUR_S3_BUCKET/path/to/'))) many_rows = Table('many_rows', MetaData(bind=engine), autoload=True) print(select([func.count('*')], from_obj=many_rows).scalar())
def group_by_query(conn): from sqlalchemy.sql.functions import func data = conn.query(func.count(User.id), User.pwd).group_by(User.pwd).all() return data
def get_count_query(self) -> query: from pcapi.core.users.models import User return self.session.query(func.count(distinct( User.id))).select_from(User).filter(User.isAdmin.is_(True))