def get_entry(year, month, day, entry_slug): entry = Entry.query.filter( (extract('year', Entry.pub_date) == year) & (extract('month', Entry.pub_date) == month) & (extract('day', Entry.pub_date) == day) ).filter_by(slug=entry_slug).first() return render_template('single_entry.html', entry=entry)
def dump_sheet(sheet, options): session = init_nobix_db() months = int(options.get('months', None)) upto = options.get('upto', None) dates = list(reversed([(upto - relativedelta(months=i)) for i in xrange(months)])) vend_names = {} for c in config.vendedores.keys(): vname = config.vendedores.get(c, {}).get('nombre', None) vend_names.setdefault(vname, list()).append(c) base_query = Documento.query.filter(Documento.tipo.in_(doctypes)) heads = [d.strftime("%m-%Y") for d in dates] row = 0 sheet.write(row, 0, 'vendedor', xf_map['heading']) for c, h in enumerate(heads): sheet.write(row, c+1, h, xf_map['heading']) for vend in sorted(vend_names.keys()): row += 1 col = 0 vcodes = vend_names.get(vend) query = base_query.filter(Documento.vendedor.in_(vcodes)) sheet.write(row, col, vend, xf_map['text']) for d in dates: col += 1 q = query.filter(extract('year', Documento.fecha)==d.year)\ .filter(extract('month', Documento.fecha)==d.month) sheet.write(row, col, q.count(), xf_map['number'])
def plot_date(d): """Sum amounts and group by dates. """ query = db_session.query( Entry.date, func.sum(Entry.amount) ).filter( (extract('year', Entry.date) == d.year) & (extract('month', Entry.date) == d.month) ).group_by(Entry.date).order_by(Entry.date) series = [(str(d), '%.2f' % s) for d, s in query] return jsonify(series=series)
def plot_bar(d): """Sum amounts and group by tags.""" query = db_session.query( Tag.name, func.sum(Entry.amount) ).outerjoin(Entry.tags).filter( (extract('year', Entry.date) == d.year) & (extract('month', Entry.date) == d.month) ).group_by(Tag.name).order_by(Tag.name) series = [(t or '(no tag)', '%.2f' % s) for t, s in query] return jsonify(series=series)
def query(self): bs = block_size.c.block_size query = powa_getstatdata_sample("db") query = query.alias() c = query.c return (select([ extract("epoch", c.ts).label("ts"), (sum(c.runtime) / greatest(sum(c.calls), 1)).label("avg_runtime"), (sum(c.runtime) / greatest(extract("epoch", c.mesure_interval),1)).label("load"), total_read(c), total_hit(c)]) .where(c.calls != None) .group_by(c.ts, bs, c.mesure_interval) .order_by(c.ts) .params(samples=100))
def getTotalExercisesCurrentMonth(self): current_month = DateHelper.current_month() return len(db.session.query(Exercise.id) .filter(Exercise.user_id == self.id) .filter(extract('month', Exercise.date) == current_month) .all())
def show_stats(): dow = extract('dow', Message.timestamp) stats = { i: session.query(Message).filter(dow == i).count() for i in range(7) } return render_template('stats.html')
def list_user_activities_for_prediction(gym, date): return session.query(UserActivity).join( Weather, UserActivity.weather_id == Weather.id).filter( extract('dow', UserActivity.start_date) == date.isocalendar()[2]).filter( UserActivity.gym_id == gym.id).order_by( desc(UserActivity.start_date))
def total_expenses_category(): """ This method will create a query for total expenses divided by category for one month and return if it's available and return example of data if it's not : return: all available expense in the specified budget if it's available or example data if it's not """ total_category = {} for cat in category_choice(): expenses = Expenses.query.with_entities(func.sum(Expenses.expense_amount).label('expenses_by_cat')). \ filter(Expenses.budget_id == selected_budget()).filter(Expenses.category == cat[0]). \ filter(extract('year', Expenses.transaction_date) == datetime.now().year, extract('month', Expenses.transaction_date) == datetime.now().month).first() if expenses[0]: total_category[cat[1]] = expenses[0] if len(total_category) > 0: return total_category else: return {"ex1": 5, 'ex2': 10, 'ex3': 3}
def calculate_country_month_mean(country, month): stats_from_month = country.statistics.filter( extract('month', DailyStatistic.date) == month).all() new_cases_per_million = [ stat.new_cases_per_million for stat in stats_from_month ] days_in_month = monthrange(2020, month)[1] return sum(new_cases_per_million) / days_in_month
def make_filter(cls, field, ftype, value): filter = None if ftype == 'IN': filter = field.in_([v for v in value if v]) elif ftype == 'date_gt': filter = field > value elif ftype == 'date_gte': filter = field >= value elif ftype == 'date_gt_now_less': qty, granularity = value.split(" ") filter = field > func.date_trunc(granularity, func.now() - cast(value, Interval())) elif ftype == 'date_lt_now_less': qty, granularity = value.split(" ") filter = field < func.date_trunc(granularity, func.now() - cast(value, Interval())) elif ftype == 'date_x_last_n': qty, granularity, count_current_period = value.split(" ") filter = (field > func.date_trunc(granularity, func.now() - cast("%s %s" % (qty, granularity), Interval())), field < func.date_trunc(granularity, func.now() - cast('0', Interval())),) if count_current_period == 'on': filter = filter[0] elif ftype == 'date_month_ne': filter = extract('month', field) != value elif ftype == 'date_month_gt': filter = extract('month', field) > value elif ftype == 'date_month_lt': filter = extract('month', field) < value elif ftype == 'date_month_eq': filter = extract('month', field) == value elif ftype == 'date_hour_ne': filter = extract('hour', field) != value elif ftype == 'date_hour_gt': filter = extract('hour', field) > value elif ftype == 'date_hour_lt': filter = extract('hour', field) < value elif ftype == 'date_hour_eq': filter = extract('hour', field) == value elif ftype == 'date_lt': filter = field < value elif ftype == 'date_lte': filter = field <= value elif ftype == '=': filter = field == value elif ftype == '!=': filter = field != value elif ftype == '>': filter = field > value elif ftype == '>=': filter = field >= value elif ftype == '<': filter = field < value elif ftype == '<=': filter = field <= value elif ftype == 'like': filter = field.ilike(value) return filter
def showplanfortodayy(): sessionn = connect() current_datetime = datetime.now().date() this_year = current_datetime.year this_month = current_datetime.month this_day = current_datetime.day this_id = session.get('this_id', None) stmt = sessionn.query(todolist).filter( todolist.user_id == this_id, todolist.status == 0, extract('YEAR', todolist.time_creating) == this_year, extract('month', todolist.time_creating) == this_month, extract('day', todolist.time_creating) == this_day).order_by( todolist.time_creating).all() results = [] for row in stmt: results.append([ row.user_id, row.todolist_name, row.description_of_todo, row.time_creating, row.status ]) form = markasdone() if form.is_submitted(): result = request.form adddata = result['my_number'] if int(adddata) <= len(results) and int(adddata) >= 1: updating_task = results[int(adddata) - 1] update_time = updating_task[3] sessionn.query(todolist).filter( todolist.user_id == this_id, todolist.time_creating == updating_task[3]).update( {'status': 1}) sessionn.commit() return render_template('refresh.html') else: return render_template('errors.html', error=1) check = len(results) return render_template('showtaskfortoday.html', result=results, check=check)
def get_claim_type_count(year, month): try: claim_type = { '1': 'Wages', '2': 'Medical', '3': 'Purchases', '4': 'Utilities' } claimlist = db.session.query(func.sum(Claim.grand_total),Claim.claim_type).filter(Claim.isactive == 1)\ .filter(extract('year', Claim.claim_date) == year) .filter(extract('month', Claim.claim_date) == month).group_by(Claim.claim_type).all() send_data = {"Wages": 0, "Medical": 0, "Purchases": 0, "Utilities": 0} if claimlist: for item in claimlist: send_data[claim_type.get(item.claim_type)] = str(item[0]) response_obj = {"ErrorCode": 9999, "data": send_data} else: response_obj = {"ErrorCode": 9999, "data": {}} return response_obj except Exception as e: print(e)
def yearly_stats(): day = datetime.datetime.utcnow().date() # FIXME: Use dateutil.relativedelta or something last_year = day + datetime.timedelta(-365) doy = extract('doy', Message.timestamp) messages = session.query(Message.timestamp, func.count(doy))\ .filter(last_year < Message.timestamp).group_by(doy) response = {date.strftime('%Y-%m-%d'): count for date, count in messages.all()} return jsonify(response)
def _increase_counter(name, ttl, _sessionmaker): # TODO: create context manager for usage increase - decrease with session_scope(_sessionmaker) as session: try: _, _ = get_or_create(session, Lock, name=name) except exc.IntegrityError: session.rollback() counter = Counter(name, ttl) # NOTE: sqlite does not support SELECT ... FOR UPDATE session.query(Lock).filter_by(name=name).with_for_update().one() session.add(counter) # session.flush() ?? res = session.query(Counter).filter( and_( extract('epoch', Counter.created_at) + Counter.ttl > extract( 'epoch', func.now()), Counter.name == name)).count() session.commit() c_id = counter.id return res, c_id
def tobs(): """Return a json list of tobs from the dataset.""" # Query all tobs # TOBS needs to be casted for JSON serialization results = session.query(cast(Measures.tobs, String)).filter(extract('year',Measures.date) == 2017).order_by(Measures.date.desc()).all() # Convert list of tuples into normal list using np.ravel rec_list = list(np.ravel(results)) print(results) return jsonify(rec_list)
def campaign_date_calls(campaign_id): start = request.values.get('start') end = request.values.get('end') timespan = request.values.get('timespan', 'day') if timespan not in API_TIMESPANS.keys(): abort(400, 'timespan should be one of %s' % ','.join(API_TIMESPANS)) else: timespan_strf = API_TIMESPANS[timespan] campaign = Campaign.query.filter_by(id=campaign_id).first_or_404() timespan_extract = extract(timespan, Call.timestamp).label(timespan) query = ( db.session.query( func.min(Call.timestamp.label('date')), timespan_extract, Call.status, func.count(distinct(Call.id)).label('calls_count') ) .filter(Call.campaign_id == int(campaign.id)) .group_by(timespan_extract) .order_by(timespan) .group_by(Call.status) ) if start: try: startDate = dateutil.parser.parse(start) except ValueError: abort(400, 'start should be in isostring format') query = query.filter(Call.timestamp >= startDate) if end: try: endDate = dateutil.parser.parse(end) if endDate < startDate: abort(400, 'end should be after start') if endDate == startDate: endDate = startDate + timedelta(days=1) except ValueError: abort(400, 'end should be in isostring format') query = query.filter(Call.timestamp <= endDate) dates = defaultdict(dict) for (date, timespan, call_status, count) in query.all(): # combine status values by date for status in TWILIO_CALL_STATUS: if call_status == status: date_string = date.strftime(timespan_strf) dates[date_string][status] = count sorted_dates = OrderedDict(sorted(dates.items())) return Response(json.dumps(sorted_dates), mimetype='application/json')
def numerologic(): try: sessionn = connect() this_month = str(datetime.now().date().month) this_day = str(datetime.now().date().day) numer = sessionn.query(numerology_database.numerology_description).filter(extract('month', numerology_database.numerology_date) == this_month, extract('day', numerology_database.numerology_date) == this_day).all() if len(numer) == 0: return render_template('notOk.html', text='Database is empty', redir = 'numerologic') except: return render_template('notOk.html', text='Connection error', redir = 'numerologic') return render_template('Page7.html', numer=numer, admin=session['admin'])
def list(**kvargs): base = Post.query if 'year' in kvargs: year = kvargs['year'] if year: base = base.filter(extract('year', Post.pub_date) == year) elif 'tag' in kvargs: tag = kvargs['tag'] if tag: base = base.filter(Post.tags.any(title = tag)) base = base.order_by(Post.pub_date.desc()) return base
def show_chart_percent(user_id: int, pfl_id: int): profit = db.session.query( extract('year', DepositeItems.datestamp), func.sum(DepositeItems.value)).join(Deposite.portfolio).filter( Portfolio.user_id == user_id, Portfolio.id == pfl_id, DepositeItems.is_percent_kapital, DepositeItems.deposite_id == Deposite.id).group_by( extract('year', DepositeItems.datestamp)).order_by( extract('year', DepositeItems.datestamp)).all() profit_sum = 0 labels = [] values = [] for portfolio in profit: year, value = portfolio labels.append(str(year).split('.')[0]) values.append(value) profit_sum += value return labels, values, profit_sum
def query(self): bs = block_size.c.block_size query = powa_getstatdata_sample("db") query = query.alias() c = query.c return (select([ extract("epoch", c.ts).label("ts"), (sum(c.runtime) / greatest(sum(c.calls), 1)).label("avg_runtime"), total_read(c), total_hit(c) ]).where(c.calls != None).group_by(c.ts, bs).order_by( c.ts).params(samples=100))
def build_queryset(repo_id: UUID, stat: str, grouper): # TODO(dcramer): put minimum date bounds if stat in ( "builds.aborted", "builds.failed", "builds.passed", "builds.total", "builds.duration", ): if stat == "builds.failed": extra_filters = [Build.result == Result.failed] elif stat == "builds.passed": extra_filters = [Build.result == Result.passed] elif stat == "builds.aborted": extra_filters = [Build.result == Result.aborted] else: extra_filters = [Build.status == Status.finished] if stat == "builds.duration": value = func.avg((extract("epoch", Build.date_finished) - extract("epoch", Build.date_started)) * 1000) extra_filters.append(Build.result == Result.passed) else: value = func.count(Build.id) queryset = (db.session.query(grouper.label("grouper"), value.label("value")).filter( Build.repository_id == repo_id, *extra_filters).group_by("grouper")) else: queryset = (db.session.query( grouper.label("grouper"), func.avg(ItemStat.value).label("value")).filter( ItemStat.item_id == Build.id, ItemStat.name == stat, Build.repository_id == repo_id, Build.result == Result.passed, ).group_by("grouper")) return queryset
def day_time_facebook(obj): if not obj['day']:return jsonify({'error':'enter day of week'}) else: competitor_list = [i.id for i in Channel.query.filter_by(type=1, influencer=False).all()] hours_of_day = split_day_by_hour(obj['hour_period']) if obj['hour_period'] else split_day_by_hour('2') time_obj_list = [] for i in hours_of_day: filters = ((Post.page_id.in_(competitor_list)),) if obj['id']: filters += (Post.page_id == obj['id'],) if obj['type']: filters += (Post.post_type == obj['type'],) if obj['date_from'] and obj['date_to']: filters += (Post.post_created_time >= get_local_time(int(obj['date_from'])), Post.post_created_time <= get_local_time(int(obj['date_to'])),) filters += (extract('dow',Post.post_created_time)==obj['day'],) filters += (extract('hour',Post.post_created_time) >=i[0], extract('hour',Post.post_created_time) < i[1]) sortby = 'post_' + obj['sortby'] + ' desc' if obj['sortby'] else '' post_filter = Post.query.filter(*filters).order_by(sortby).all() time_obj_list.append(Time(str(i[0])+ '-' + str(i[1]),post_filter)) object_info = DayTime(obj['day'],time_obj_list) result = day_time_schema.dump(object_info) return jsonify(result)
def get_analytics(year): data = [] for fin in db.session.query( extract('month', Finance.date), func.sum(Finance.amount), Finance.category, Finance.type, ).filter( extract('year', Finance.date) == year ).group_by( extract('month', Finance.date), Finance.category, Finance.type, ).order_by(extract('month', Finance.date)).all(): data.append({ 'month': fin[0], 'amount': fin[1], 'category': fin[2], 'type': fin[3], }) return data
def show_chart_fix_profit(user_id: int, pfl_id: int, pfl_type: int): if pfl_type == 'broker': cur_moex_profit = db.session.query( CurrencyMoexProfit.portfolio_id, extract('year', CurrencyMoexProfit.timestamp), func.sum(CurrencyMoexProfit.value).label('cur_moex_profit')).join( CurrencyMoexProfit.portfolio).filter( Portfolio.user_id == user_id, Portfolio.id == pfl_id, ).group_by(CurrencyMoexProfit.portfolio_id, extract('year', CurrencyMoexProfit.timestamp)).subquery() profit = db.session.query( extract('year', SecuritiesProfit.timestamp), func.sum(SecuritiesProfit.value) + func.coalesce(cur_moex_profit.c.cur_moex_profit, 0)).join( SecuritiesProfit.portfolio).outerjoin( cur_moex_profit, Portfolio.id == cur_moex_profit.c.portfolio_id).filter( Portfolio.user_id == user_id, Portfolio.id == pfl_id, ).group_by(extract('year', SecuritiesProfit.timestamp), cur_moex_profit.c.cur_moex_profit).order_by( extract('year', SecuritiesProfit.timestamp)).all() if pfl_type == 'cash': profit = db.session.query( extract('year', CurrencyProfit.timestamp), func.sum(CurrencyProfit.value)).join( CurrencyProfit.portfolio).filter( Portfolio.user_id == user_id, Portfolio.id == pfl_id, ).group_by(extract('year', CurrencyProfit.timestamp)).order_by( extract('year', CurrencyProfit.timestamp)).all() labels = [] values = [] profit_sum = 0 for portfolio in profit: year, value = portfolio labels.append(str(year).split('.')[0]) values.append(value) profit_sum += value return labels, values, profit_sum
def precipitation_V2(): #Return a list of daily precipitation information #Query all precipitation for last year - 2017 results = session.query(func.date(Measures.date), Measures.tobs).\ filter(extract('year',Measures.date) == 2017).\ order_by(Measures.date.desc()).all() rec_list=list(np.ravel(results)) print(rec_list) return jsonify(rec_list)
def campaigns_overall(): start = request.values.get('start') end = request.values.get('end') timespan = request.values.get('timespan', 'day') if timespan not in API_TIMESPANS.keys(): abort(400, 'timespan should be one of %s' % ','.join(API_TIMESPANS)) else: timespan_strf = API_TIMESPANS[timespan] timespan_extract = extract(timespan, Call.timestamp).label(timespan) query = (db.session.query( func.min(Call.timestamp.label('date')), Call.campaign_id, timespan_extract, func.count(distinct(Call.id)).label('calls_count')).group_by( Call.campaign_id).group_by(timespan_extract).order_by(timespan)) completed_query = db.session.query(Call.timestamp, Call.id).filter_by(status='completed') if start: try: startDate = dateutil.parser.parse(start) except ValueError: abort(400, 'start should be in isostring format') query = query.filter(Call.timestamp >= startDate) completed_query = completed_query.filter(Call.timestamp >= startDate) if end: try: endDate = dateutil.parser.parse(end) if endDate < startDate: abort(400, 'end should be after start') if endDate == startDate: endDate = startDate + timedelta(days=1) except ValueError: abort(400, 'end should be in isostring format') query = query.filter(Call.timestamp <= endDate) completed_query = completed_query.filter(Call.timestamp <= endDate) dates = defaultdict(dict) for (date, campaign_id, timespan, count) in query.all(): date_string = date.strftime(timespan_strf) dates[date_string][int(campaign_id)] = count sorted_dates = OrderedDict(sorted(dates.items())) meta = {'calls_completed': completed_query.count()} return jsonify({'meta': meta, 'objects': sorted_dates})
def make_grouping(cls, logger, grouping_info): group_type, group_args, group_name = grouping_info grouping = None if group_type == "extract": subfield, field_name = group_args real_field = getattr(cls, field_name, None) if real_field: if subfield == 'ampm': grouping = case(whens = [(cast(extract('hour', real_field), Integer()) > 12, 1),], else_ = 0).label(group_name) else: grouping = cast(extract(subfield, real_field), Integer()).label(group_name) else: logger.error("Invalid grouping %s (%s)", grouping_info, cls) elif group_type == "date_trunc": subfield, field_name = group_args real_field = getattr(cls, field_name, None) if real_field: grouping = func.date_trunc(subfield, real_field) grouping = grouping.label(group_name) else: logger.error("Invalid grouping %s (%s)", grouping_info, cls) elif group_type == "func": logger.error("Grouping by func not implemented yet") elif group_type == 'coalesce_trim': # trim(coalesce(field_name, '')) field_name = group_args.get('field', group_name) real_field = getattr(cls, field_name, None) if real_field: grouping = func.coalesce(real_field, group_args.get('coalesce_to', '')) if group_args.get('trim', True): grouping = func.trim(grouping) grouping = grouping.label(group_name) else: logger.error("Invalid grouping %s (%s)", grouping_info, cls) else: logger.error("Unknown grouping type %s", group_type) return grouping
def getEventsOnDay(self, day): """ Return events on the specified day that have not been processed. """ query = self.session.query(Event ).filter( extract('year', Event.time) == day.year ).filter( extract('month', Event.time) == day.month ).filter( extract('day', Event.time) == day.day ).filter( Event.tweeted == False ) nextEventDate = None nextEvent = self.session.query(Event).filter(Event.time > day).first() if nextEvent is not None: nextEventDate = nextEvent.time message = ('Found %s events to tweet today. There are %s ' 'events in my database %s of which have already been tweeted. Next' ' event is on %s') % ( query.count(), self.session.query(Event).count(), self.session.query(Event).filter(Event.tweeted==True).count(), nextEventDate) logging.info(message) return [meetupClient.Event.fromJson(json.loads(event.jsonData)) for event in query.all()]
def total_on_week_by_month(): form = TotalOnWeekByMonthForm(request.form) # set all months as options of SELECT element on the form form.months.choices = get_all_months_as_dictionary() # when is a POST action if form.validate_on_submit(): date_selected = DateHelper.generated_id_by_month_year_to_date(form.months.data) # get the total exercises a user have done per week on a selected month results = db.session.query(extract('week', Exercise.date).label('week'), func.count(Exercise.date).label('total'))\ .group_by(extract('week', Exercise.date))\ .order_by('week')\ .filter(extract('month', Exercise.date) == date_selected.month)\ .filter(extract('year', Exercise.date) == date_selected.year)\ .filter(Exercise.user_id == g.user.id)\ .all() # convert list to dictonary data = {('Week %i of the year' % (week)): str(total) for (week, total) in results} return render_template('exercises/total_on_week_by_month.html', form=form, data=data) return render_template('exercises/total_on_week_by_month.html', form=form)
def get_all_months_as_dictionary(): # get all months a user have done exercises all_months = db.session.query(label('year', extract('year', Exercise.date)), label('month', extract('month', Exercise.date)))\ .group_by('year', 'month')\ .order_by('year desc, month desc')\ .filter(Exercise.user_id == g.user.id)\ .all() # convert list result to list dates all_months_as_date = [DateHelper.string_to_date(('%i/%i/1' % (year, month))) for (year, month) in all_months] # convert list to dictionary return [(DateHelper.generate_id_by_month_year(item), DateHelper.date_to_year_month_string(item)) for item in all_months_as_date]
def add_to_query(self, query, value=None, op_num=None, negate=False, formatter=None): no_filter = op_num is None if self.tree_rank is None and self.get_field() is None: query, orm_field = query.objectformatter.objformat(query, getattr(models, self.root_table.name), None) no_filter = True elif self.is_relationship(): # will be formatting or aggregating related objects if self.get_field().type == 'many-to-one': query, orm_model, table, field = self.build_join(query, self.join_path) query, orm_field = query.objectformatter.objformat(query, orm_model, formatter) else: query, orm_model, table, field = self.build_join(query, self.join_path[:-1]) orm_field = query.objectformatter.aggregate(query, self.get_field(), orm_model, formatter) else: query, orm_model, table, field = self.build_join(query, self.join_path) if self.tree_rank is not None: query, orm_field = query.handle_tree_field(orm_model, table, self.tree_rank, self.tree_field) else: orm_field = getattr(orm_model, self.get_field().name) if field.type == "java.sql.Timestamp": # Only consider the date portion of timestamp fields. # This is to replicate the behavior of Sp6. It might # make sense to condition this on whether there is a # time component in the input value. orm_field = sql.func.DATE(orm_field) if field.is_temporal() and self.date_part != "Full Date": orm_field = sql.extract(self.date_part, orm_field) if not no_filter: if isinstance(value, QueryFieldSpec): _, other_field = value.add_to_query(query.reset_joinpoint()) uiformatter = None value = other_field else: uiformatter = field and get_uiformatter(query.collection, table.name, field.name) value = value op = QueryOps(uiformatter).by_op_num(op_num) f = op(orm_field, value) query = query.filter(sql.not_(f) if negate else f) query = query.reset_joinpoint() return query, orm_field
def precipitation(): #Return a list of daily precipitation information #Query all precipitation for last year - 2017 results = session.query(func.date(Measures.date), Measures.tobs).\ filter(extract('year',Measures.date) == 2017).\ order_by(Measures.date.desc()).all() rec_list=list(np.ravel(results)) d=[] for key, value in results: d.append({'date':key, 'tobs':value}) return(jsonify(d))
def campaign_date_calls(campaign_id): start = request.values.get('start') end = request.values.get('end') timespan = request.values.get('timespan', 'day') if timespan not in API_TIMESPANS.keys(): abort(400, 'timespan should be one of %s' % ','.join(API_TIMESPANS)) else: timespan_strf = API_TIMESPANS[timespan] campaign = Campaign.query.filter_by(id=campaign_id).first_or_404() timespan_extract = extract(timespan, Call.timestamp).label(timespan) query = (db.session.query( func.min(Call.timestamp.label('date')), timespan_extract, Call.status, func.count(distinct(Call.id)).label('calls_count')).filter( Call.campaign_id == int(campaign.id)).group_by( timespan_extract).order_by(timespan).group_by(Call.status)) if start: try: startDate = dateutil.parser.parse(start) except ValueError: abort(400, 'start should be in isostring format') query = query.filter(Call.timestamp >= startDate) if end: try: endDate = dateutil.parser.parse(end) if endDate < startDate: abort(400, 'end should be after start') if endDate == startDate: endDate = startDate + timedelta(days=1) except ValueError: abort(400, 'end should be in isostring format') query = query.filter(Call.timestamp <= endDate) dates = defaultdict(dict) for (date, timespan, call_status, count) in query.all(): # combine status values by date for status in TWILIO_CALL_STATUS: if call_status == status: date_string = date.strftime(timespan_strf) dates[date_string][status] = count sorted_dates = OrderedDict(sorted(dates.items())) return Response(json.dumps(sorted_dates), mimetype='application/json')
def query(self): # Fetch the base query for sample, and filter them on the database bs = block_size.c.block_size subquery = powa_getstatdata_sample("db") # Put the where clause inside the subquery subquery = subquery.where(column("datname") == bindparam("database")) query = subquery.alias() c = query.c return (select([ to_epoch(c.ts), (sum(c.runtime) / greatest(sum(c.calls), 1.)).label("avg_runtime"), (sum(c.runtime) / greatest(extract("epoch", c.mesure_interval), 1)).label("load"), total_read(c), total_hit(c) ]).where(c.calls != None).group_by( c.ts, bs, c.mesure_interval).order_by(c.ts).params(samples=100))
def get_exercises_by_month(date_search): date_selected = DateHelper.generated_id_by_month_year_to_date(date_search) user_has_exercises = db.session.query(exists().where(Exercise.user_id == g.user.id)).scalar() if not user_has_exercises: flash('You don\'t have any exercises registered') return [] exercises = db.session.query(Exercise)\ .order_by(Exercise.date)\ .filter( Exercise.user_id == g.user.id,\ extract('month', Exercise.date) == date_selected.month,\ ).all() return exercises
def generate_chart(operation, title, x_axis_label, y_axis_label): now = date.today() past = now + relativedelta(months=-48) data_x = [] data_y = {} traces = [] # Use months as a common iterator across all the data for dt in rrule.rrule(rrule.MONTHLY, dtstart=past, until=now): print(dt.month, dt.year) # Store which month we are working on data_x.append(dt) # Then store all the data from that month for country in config[operation]: # Get all the items for the matching month and year q = db.session.query(quandl).filter(extract('month', quandl.date) == dt.month).filter(extract('year', quandl.date) == dt.year).filter(quandl.code == config[operation][country]).all() # Initialize the dictionary if it doesn't exist yet and add the data for item in q: if item.code not in data_y: data_y[item.code] = [] # Check and see if the data exists, and if it does append it to the list if item.value: data_y[item.code].append(item.value) else: data_y[item.code].append([]) # Each "key" is a code (JODI/OIL_CRIMKB_CHN), we need to lookup the key name (get_country) and add it to traces for key in data_y: traces.append(go.Scatter(x=data_x, y=data_y[key], name=get_country(key, operation))) layout = dict(title = title, xaxis = dict(title = x_axis_label), yaxis = dict(title = y_axis_label)) fig = dict(data=traces, layout=layout) py.plot(fig, filename=operation, auto_open=False)
def query(self): # Fetch the base query for sample, and filter them on the database bs = block_size.c.block_size subquery = powa_getstatdata_sample("db") # Put the where clause inside the subquery subquery = subquery.where(column("datname") == bindparam("database")) query = subquery.alias() c = query.c return (select([ to_epoch(c.ts), (sum(c.runtime) / greatest(sum(c.calls), 1.)).label("avg_runtime"), (sum(c.runtime) / greatest(extract("epoch", c.mesure_interval), 1)).label("load"), total_read(c), total_hit(c)]) .where(c.calls != None) .group_by(c.ts, bs, c.mesure_interval) .order_by(c.ts) .params(samples=100))
def getEventsBetweenDaysInclusive(self, start, end): """ Return any events between the start and end days inclusive. """ query = self.session.query(Event ).filter( extract('year', Event.time) >= start.year ).filter( extract('month', Event.time) >= start.month ).filter( extract('day', Event.time) >= start.day ).filter( extract('year', Event.time) <= end.year ).filter( extract('month', Event.time) <= end.month ).filter( extract('day', Event.time) <= end.day ) return [meetupClient.Event.fromJson(json.loads(event.jsonData)) for event in query.all()]
def get_dashboard_list(year, month): try: claimlist = Claim.query.join(User,User.id==Claim.user_id).add_columns(User.rkp_name).filter(Claim.isactive == 1).filter(extract('year', Claim.claim_date) == year)\ .filter(extract('month', Claim.claim_date) == month,User.isActive==1).all() return_list = [] if claimlist: for obj in claimlist: send_data = {} send_data['rkp_name'] = obj.rkp_name for field in [ x for x in dir(obj.Claim) if not x.startswith('_') and x != 'metadata' ]: if field != 'query' and field != 'query_class': send_data[field] = str( obj.Claim.__getattribute__(field)) return_list.append(send_data) response_obj = {"ErrorCode": 9999, "data": return_list} else: response_obj = {"ErrorCode": 9999, "data": return_list} return response_obj except Exception as e: print(e)
def filter_check_expires(engine, query): ''' クエリーに期限切れ判定のフィルターを付加する :param sqlalchemy.engine.base.Engine engine: SQLAlchemy エンジンオブジェクト :param sqlalchemy.orm.query.Query query: SQLAlchemy クエリーオブジェクト :return: フィルターを付加したクエリーオブジェクト ``query`` に渡すクエリーは既に ``SessionState`` を SELECT し、 ``SystemData`` を JOIN しているものとする。 ''' if engine.name == 'postgresql': query = query.filter( extract('epoch', functions.now() - SessionState.access_dt) < SystemData.session_expires_minutes * 60) elif engine.name == 'mysql': query = query.filter( func.timestampdiff(text('minute'), SessionState.access_dt, functions.now()) < SystemData.session_expires_minutes) elif engine.name == 'mssql': #memo: そもそも MSSQL って functions.now() 使えるの? func.getdate() とかにしないとダメなんじゃ… query = query.filter( func.dateadd(text( 'minute'), SystemData.session_expires_minutes, SessionState.access_dt) > functions.now()) elif engine.name == 'oracle': #memo: Oracle も functions.now() じゃなくて func.sysdate() とかにしないとダメな気がする… query = query.filter( SessionState.access_dt + SystemData.session_expires_minutes / 1440 > functions.now()) else: raise NotImplementedError("{} はサポート対象外です".format(engine.name)) return query
def sum_per_sec(col): ts = extract("epoch", greatest(c.mesure_interval, '1 second')) return (sum(col) / ts).label(col.name)
def wps(col): ts = extract("epoch", greatest(c.mesure_interval, '1 second')) return (col / ts).label(col.name)
def bps(col): ts = extract("epoch", greatest(c.mesure_interval, '1 second')) return (mulblock(col) / ts).label(col.name)
def get_report_by_date(cls, date, municipality): return DBSession.query(SiteReport)\ .filter(and_( extract('month', SiteReport.report_date) == date.month, extract('year', SiteReport.report_date) == date.year, SiteReport.municipality == municipality)).one()
""" Utilities for commonly used SQL constructs. """ import re from sqlalchemy.sql import text, select, func, case, column, extract, cast, bindparam from sqlalchemy.types import Numeric from collections import namedtuple TOTAL_MEASURE_INTERVAL = """ extract( epoch from CASE WHEN min(total_mesure_interval) = '0 second' THEN '1 second'::interval ELSE min(total_mesure_interval) END) """ def format_jumbled_query(sql, params): it = iter(params) try: sql = re.sub("\?", lambda val: next(it), sql) except StopIteration: pass return sql RESOLVE_OPNAME = text(""" SELECT json_object_agg(oid, value) FROM ( SELECT pg_operator.oid, json_build_object( 'name', oprname, 'indexams', array_agg(distinct pg_am.oid),
def getstatdata_sample(request, mode, start, end, dbid=None, queryid=None, userid=None): if mode == 'instance': base_query = BASE_QUERY_STATDATA_SAMPLE_INSTANCE elif mode == "db": base_query = BASE_QUERY_STATDATA_SAMPLE_DATABASE elif mode == "query": base_query = BASE_QUERY_STATDATA_SAMPLE_QUERY ts = column('ts') biggest = Biggest(ts) biggestsum = Biggestsum(ts) subquery = (select([ ts, biggest("ts", '0 s', "mesure_interval"), biggestsum("calls"), biggestsum("total_exec_time", label="runtime"), biggestsum("rows"), biggestsum("shared_blks_read"), biggestsum("shared_blks_hit"), biggestsum("shared_blks_dirtied"), biggestsum("shared_blks_written"), biggestsum("local_blks_read"), biggestsum("local_blks_hit"), biggestsum("local_blks_dirtied"), biggestsum("local_blks_written"), biggestsum("temp_blks_read"), biggestsum("temp_blks_written"), biggestsum("blk_read_time"), biggestsum("blk_write_time") ]).select_from(base_query).apply_labels().group_by(*([ts]))) subquery = subquery.alias() c = subquery.c greatest = func.greatest cols = [ to_epoch(c.ts), (old_div(func.sum(c.calls), greatest(extract("epoch", c.mesure_interval), 1))).label("calls"), (old_div(func.sum(c.runtime), greatest(func.sum(c.calls), 1.))).label("avg_runtime"), (old_div(func.sum(c.runtime), greatest(extract("epoch", c.mesure_interval), 1))).label("load"), total_read(c), total_hit(c) ] query = (select(cols).select_from(subquery).where(c.calls != 0).group_by( c.ts, c.mesure_interval).order_by(c.ts)) params = dict(agent_address=request.instance.agent_address, agent_port=request.instance.agent_port, samples=50, start=start, end=end) if mode == 'db' or mode == 'query': params['dbid'] = dbid if mode == 'query': params['queryid'] = queryid params['userid'] = userid rows = request.db_session.execute(query, params).fetchall() return [dict(row) for row in rows]
def month_total(cls, date): """Query total amount for a month of a given date. """ return db_session.query(func.sum(cls.amount)).filter( (extract('year', cls.date) == date.year) & (extract('month', cls.date) == date.month) ).scalar()
def wrapped_fn(*args, start=None, end=None, dow_start=None, dow_end=None, weekend_only=False, weekday_only=False, business_hours_only=False, evening_hours_only=False, latenight_hours_only=False, ugos_closed_hours=False, **kwargs): print(args) print(kwargs) print("&" * 100) r = fn_being_decorated(*args, **kwargs) if start: r = r.filter(event.Event.timestamp>=start.replace(tzinfo=None)) if end: r = r.filter(event.Event.timestamp<end.replace(tzinfo=None)) # n.b. this is a postgres function we're calling here # The day of the week (0 - 6; Sunday is 0) (for timestamp values only) # n0 m1 t2 w3 h4 f5 s6 if dow_start: r = r.filter(extract('dow', event.Event.timestamp) >= dow_start) if dow_end: r = r.filter(extract('dow', event.Event.timestamp) < dow_start) if weekend_only: r = r.filter(or_( extract('dow', event.Event.timestamp) == 0, extract('dow', event.Event.timestamp) == 6 )) if weekday_only: r = r.filter(extract('dow', event.Event.timestamp) > 0) r = r.filter(extract('dow', event.Event.timestamp) < 6) if business_hours_only: r = r.filter(extract('hour', event.Event.timestamp) >= 8) r = r.filter(extract('hour', event.Event.timestamp) < 17) if evening_hours_only: r = r.filter(extract('hour', event.Event.timestamp) >= 17) if latenight_hours_only: r = r.filter(extract('hour', event.Event.timestamp) < 8) if ugos_closed_hours: r = r.filter(or_( # m-th 8-mid and_( extract('hour', event.Event.timestamp) < 8, # 8-mid extract('dow', event.Event.timestamp) > 0, # no sunday extract('dow', event.Event.timestamp) < 5, # no fri/sat ), # fr 8-8pm and_( or_( extract('hour', event.Event.timestamp) < 8, # before open extract('hour', event.Event.timestamp) >= 20, # after close ), extract('dow', event.Event.timestamp) == 5, # friday ), # sat noon-5pm and_( or_( extract('hour', event.Event.timestamp) < 12, # before open extract('hour', event.Event.timestamp) >= 17, # after close ), extract('dow', event.Event.timestamp) == 6, # saturday ), # sun 3pm-11pm and_( or_( extract('hour', event.Event.timestamp) < 15, # before open extract('hour', event.Event.timestamp) >= 23, # after close ), extract('dow', event.Event.timestamp) == 0, # sunday ), )) if label: return getattr(r.one(), label) or Decimal(0.0) else: return r.one() or Decimal(0.0)
def to_epoch(column): return extract("epoch", column).label(column.name)
def total_measure_interval(column): return extract( "epoch", case([(min(column) == '0 second', '1 second')], else_=min(column)))
def meLog(urlGroup,year,month,day): if 'username' not in session: return redirect(url_for('Login')) #might need to change this to allow headless data posting from external apps (Matlab, ALH etc) if request.method == 'POST': #TODO: need some error checking on these inputs author = request.form['text-author'] title = request.form['text-title'] date = request.form['text-date'] time = request.form['text-time'] text = request.form['text-edit'] #Convert the date and time strings into a datetime object strDateTime = "%s %s" % (date,time) dtDateTime = datetime.strptime(strDateTime,"%Y-%m-%d %H:%M:%S") #2014-10-20 21:52:07 #Need the group_id to update the elog groups table tmpGroup = ElogGroupData.query.filter(ElogGroupData.urlName == urlGroup).first_or_404() groupNum = tmpGroup.group_id #create the db object to write to db entry = ElogData(title=title,author=author,created=dtDateTime,text=text,read_only=0) db.session.add(entry) db.session.flush() #flush the session to get the primary key of the yet to be inserted ElogData groups = ElogGroups(entry_id=entry.entry_id,group_id=groupNum) db.session.add(groups) db.session.commit() #now write the changes to the db return redirect("/%s/%s/%s/%s/" % (urlGroup,year,month,day)) else: #required for backwards compatibility with sol2 - Is this needed? if request.args.get('y'): year = request.args.get('y') if request.args.get('m'): month = request.args.get('m') if request.args.get('d'): day = request.args.get('d') # get the current date and time time = datetime.now().time().strftime("%H:%M:%S") date = datetime.now().date() defaultDate = False defaultYear = False defaultMonth = False defaultDay = False # If year not specified use current if not year: year = date.year defaultDate = True defaultYear = True # If month not specified use current if not month: month = date.month defaultDate = True defaultMonth = True # If day not specified use current if not day: day = date.day defaultDate = True defaultDay = True #use the urlGroup var if specified if urlGroup != None: tmpGroup = ElogGroupData.query.filter(ElogGroupData.urlName == urlGroup).first_or_404() group = tmpGroup.group_title groupNum = tmpGroup.group_id # else load the default else: group = session['group'] tmpGroup = ElogGroupData.query.filter(ElogGroupData.group_title == group).first_or_404() urlGroup = tmpGroup.urlName groupNum = tmpGroup.group_id #Get a list of available groups for select menu display groups = ElogGroupData.query.filter(ElogGroupData.private == 0).order_by(ElogGroupData.sort) if defaultDate: if defaultYear: #Get the relevent log entries for group(if no date specified - then todays date as default) eLogJoin = ElogData.query.join(ElogGroups,(ElogGroups.entry_id == ElogData.entry_id)) \ .filter(ElogData.created > date,ElogGroups.group_id == groupNum) \ .from_self() \ .order_by(ElogData.created.desc()) elif defaultMonth: #use supplied year var and defaults for the rest eLogJoin = ElogData.query.join(ElogGroups,(ElogGroups.entry_id == ElogData.entry_id)) \ .filter(ElogGroups.group_id == groupNum) \ .filter(extract('year',ElogData.created) == year) \ .from_self() \ .order_by(ElogData.created.desc()) elif defaultDay: #use supplied year and month vars and defaults for the rest eLogJoin = ElogData.query.join(ElogGroups,(ElogGroups.entry_id == ElogData.entry_id)) \ .filter(ElogGroups.group_id == groupNum) \ .filter(extract('year',ElogData.created) == year) \ .filter(extract('month',ElogData.created) == month) \ .from_self() \ .order_by(ElogData.created.desc()) else: #get the entries based on the supplied date information eLogJoin = ElogData.query.join(ElogGroups,(ElogGroups.entry_id == ElogData.entry_id)) \ .filter(ElogGroups.group_id == groupNum) \ .filter(extract('year',ElogData.created) == year) \ .filter(extract('month',ElogData.created) == month) \ .filter(extract('day',ElogData.created) == day) \ .from_self() \ .order_by(ElogData.created.desc()) #Reformat the provided date for navigation display tmpDate = "%s %s %s" % (year,month,day) strDate = datetime.strptime(tmpDate,"%Y %m %d").strftime("%a %d %b %Y") return render_template("index.html", year=year, month=month, day=day, timestamp=time, datestamp=date, formatDate=strDate, groups=groups, default_group=group, url_group=urlGroup, elogEntry=eLogJoin)
def process_update_recommendation_scores(payload): text_fields = [ User.hometown, User.occupation, User.education, User.about_me, User.my_travels, User.things_i_like, User.about_place, User.additional_information, User.pet_details, User.kid_details, User.housemate_details, User.other_host_info, User.sleeping_details, User.area, User.house_rules, ] home_fields = [User.about_place, User.other_host_info, User.sleeping_details, User.area, User.house_rules] def poor_man_gaussian(): """ Produces an approximatley std normal random variate """ trials = 5 return (sum([func.random() for _ in range(trials)]) - trials / 2) / sqrt(trials / 12) def int_(stmt): return func.coalesce(cast(stmt, Integer), 0) def float_(stmt): return func.coalesce(cast(stmt, Float), 0.0) with session_scope() as session: # profile profile_text = "" for field in text_fields: profile_text += func.coalesce(field, "") text_length = func.length(profile_text) home_text = "" for field in home_fields: home_text += func.coalesce(field, "") home_length = func.length(home_text) has_text = int_(text_length > 500) long_text = int_(text_length > 2000) has_pic = int_(User.avatar_key != None) can_host = int_(User.hosting_status == HostingStatus.can_host) cant_host = int_(User.hosting_status == HostingStatus.cant_host) filled_home = int_(User.last_minute != None) * int_(home_length > 200) profile_points = 2 * has_text + 3 * long_text + 2 * has_pic + 3 * can_host + 2 * filled_home - 5 * cant_host # references left_ref_expr = int_(1).label("left_reference") left_refs_subquery = ( select(Reference.from_user_id.label("user_id"), left_ref_expr).group_by(Reference.from_user_id).subquery() ) left_reference = int_(left_refs_subquery.c.left_reference) has_reference_expr = int_(func.count(Reference.id) >= 1).label("has_reference") ref_count_expr = int_(func.count(Reference.id)).label("ref_count") ref_avg_expr = func.avg(1.4 * (Reference.rating - 0.3)).label("ref_avg") has_multiple_types_expr = int_(func.count(distinct(Reference.reference_type)) >= 2).label("has_multiple_types") has_bad_ref_expr = int_(func.sum(int_((Reference.rating <= 0.2) | (~Reference.was_appropriate))) >= 1).label( "has_bad_ref" ) received_ref_subquery = ( select( Reference.to_user_id.label("user_id"), has_reference_expr, has_multiple_types_expr, has_bad_ref_expr, ref_count_expr, ref_avg_expr, ) .group_by(Reference.to_user_id) .subquery() ) has_multiple_types = int_(received_ref_subquery.c.has_multiple_types) has_reference = int_(received_ref_subquery.c.has_reference) has_bad_reference = int_(received_ref_subquery.c.has_bad_ref) rating_score = float_( received_ref_subquery.c.ref_avg * ( 2 * func.least(received_ref_subquery.c.ref_count, 5) + func.greatest(received_ref_subquery.c.ref_count - 5, 0) ) ) ref_score = 2 * has_reference + has_multiple_types + left_reference - 5 * has_bad_reference + rating_score # activeness recently_active = int_(User.last_active >= now() - timedelta(days=180)) very_recently_active = int_(User.last_active >= now() - timedelta(days=14)) recently_messaged = int_(func.max(Message.time) > now() - timedelta(days=14)) messaged_lots = int_(func.count(Message.id) > 5) messaging_points_subquery = (recently_messaged + messaged_lots).label("messaging_points") messaging_subquery = ( select(Message.author_id.label("user_id"), messaging_points_subquery) .where(Message.message_type == MessageType.text) .group_by(Message.author_id) .subquery() ) activeness_points = recently_active + 2 * very_recently_active + int_(messaging_subquery.c.messaging_points) # verification phone_verified = int_(User.phone_is_verified) cb_subquery = ( select(ClusterSubscription.user_id.label("user_id"), func.min(Cluster.parent_node_id).label("min_node_id")) .join(Cluster, Cluster.id == ClusterSubscription.cluster_id) .where(ClusterSubscription.role == ClusterRole.admin) .where(Cluster.is_official_cluster) .group_by(ClusterSubscription.user_id) .subquery() ) min_node_id = cb_subquery.c.min_node_id cb = int_(min_node_id >= 1) f = int_(User.id <= 2) wcb = int_(min_node_id == 1) verification_points = 0.0 + 100 * f + 10 * wcb + 5 * cb # response rate t = ( select(Message.conversation_id, Message.time) .where(Message.message_type == MessageType.chat_created) .subquery() ) s = ( select(Message.conversation_id, Message.author_id, func.min(Message.time).label("time")) .group_by(Message.conversation_id, Message.author_id) .subquery() ) hr_subquery = ( select( HostRequest.host_user_id.label("user_id"), func.avg(s.c.time - t.c.time).label("avg_response_time"), func.count(t.c.time).label("received"), func.count(s.c.time).label("responded"), float_( extract( "epoch", percentile_disc(0.33).within_group(func.coalesce(s.c.time - t.c.time, timedelta(days=1000))), ) / 60.0 ).label("response_time_33p"), float_( extract( "epoch", percentile_disc(0.66).within_group(func.coalesce(s.c.time - t.c.time, timedelta(days=1000))), ) / 60.0 ).label("response_time_66p"), ) .join(t, t.c.conversation_id == HostRequest.conversation_id) .outerjoin( s, and_(s.c.conversation_id == HostRequest.conversation_id, s.c.author_id == HostRequest.host_user_id) ) .group_by(HostRequest.host_user_id) .subquery() ) avg_response_time = hr_subquery.c.avg_response_time avg_response_time_hr = float_(extract("epoch", avg_response_time) / 60.0) received = hr_subquery.c.received responded = hr_subquery.c.responded response_time_33p = hr_subquery.c.response_time_33p response_time_66p = hr_subquery.c.response_time_66p response_rate = float_(responded / (1.0 * func.greatest(received, 1))) # be careful with nulls response_rate_points = -10 * int_(response_time_33p > 60 * 48.0) + 5 * int_(response_time_66p < 60 * 48.0) recommendation_score = ( profile_points + ref_score + activeness_points + verification_points + response_rate_points + 2 * poor_man_gaussian() ) scores = ( select(User.id.label("user_id"), recommendation_score.label("score")) .outerjoin(messaging_subquery, messaging_subquery.c.user_id == User.id) .outerjoin(left_refs_subquery, left_refs_subquery.c.user_id == User.id) .outerjoin(received_ref_subquery, received_ref_subquery.c.user_id == User.id) .outerjoin(cb_subquery, cb_subquery.c.user_id == User.id) .outerjoin(hr_subquery, hr_subquery.c.user_id == User.id) ).subquery() session.execute( User.__table__.update().values(recommendation_score=scores.c.score).where(User.id == scores.c.user_id) ) logger.info("Updated recommendation scores")
def query(self): # Fetch the base query for sample, and filter them on the database bs = block_size.c.block_size subquery = powa_getstatdata_sample("db", bindparam("server")) # Put the where clause inside the subquery subquery = subquery.where(column("datname") == bindparam("database")) query = subquery.alias() c = query.c cols = [ c.srvid, to_epoch(c.ts), (sum(c.calls) / greatest(extract("epoch", c.mesure_interval), 1)).label("calls"), (sum(c.runtime) / greatest(sum(c.calls), 1.)).label("avg_runtime"), (sum(c.runtime) / greatest(extract("epoch", c.mesure_interval), 1)).label("load"), total_read(c), total_hit(c) ] from_clause = query if self.has_extension(self.path_args[0], "pg_stat_kcache"): # Add system metrics from pg_stat_kcache, kcache_query = kcache_getstatdata_sample("db") kc = inner_cc(kcache_query) kcache_query = (kcache_query.where( (kc.srvid == bindparam("server")) & (kc.datname == bindparam("database"))).alias()) kc = kcache_query.c def sum_per_sec(col): ts = extract("epoch", greatest(c.mesure_interval, '1 second')) return (sum(col) / ts).label(col.name) total_sys_hit = (total_read(c) - sum(kc.reads) / greatest(extract("epoch", c.mesure_interval), 1.) ).label("total_sys_hit") total_disk_read = (sum(kc.reads) / greatest(extract("epoch", c.mesure_interval), 1.)).label("total_disk_read") minflts = sum_per_sec(kc.minflts) majflts = sum_per_sec(kc.majflts) # nswaps = sum_per_sec(kc.nswaps) # msgsnds = sum_per_sec(kc.msgsnds) # msgrcvs = sum_per_sec(kc.msgrcvs) # nsignals = sum_per_sec(kc.nsignals) nvcsws = sum_per_sec(kc.nvcsws) nivcsws = sum_per_sec(kc.nivcsws) cols.extend([ total_sys_hit, total_disk_read, minflts, majflts, # nswaps, msgsnds, msgrcvs, nsignals, nvcsws, nivcsws ]) from_clause = from_clause.join(kcache_query, kcache_query.c.ts == c.ts) return (select(cols).select_from(from_clause).where( c.calls is not None).group_by(c.srvid, c.ts, bs, c.mesure_interval).order_by( c.ts).params(samples=100))
def query_month(cls, date): """Query all entries for a month of a given date. """ return cls.query.options(subqueryload(cls.tags)).filter( (extract('year', cls.date) == date.year) & (extract('month', cls.date) == date.month) ).order_by(cls.date)
def partial_extract(column): return extract(time_slice, column)
class DjangoQueryMixin(object): """Can be mixed into any Query class of SQLAlchemy and extends it to implements more Django like behavior: - `filter_by` supports implicit joining and subitem accessing with double underscores. - `exclude_by` works like `filter_by` just that every expression is automatically negated. - `order_by` supports ordering by field name with an optional `-` in front. """ _underscore_operators = { 'gt': operators.gt, 'lte': operators.lt, 'gte': operators.ge, 'le': operators.le, 'contains': operators.contains_op, 'in': operators.in_op, 'exact': operators.eq, 'iexact': operators.ilike_op, 'startswith': operators.startswith_op, 'istartswith': lambda c, x: c.ilike(x.replace('%', '%%') + '%'), 'iendswith': lambda c, x: c.ilike('%' + x.replace('%', '%%')), 'endswith': operators.endswith_op, 'isnull': lambda c, x: x and c != None or c == None, 'range': operators.between_op, 'year': lambda c, x: extract('year', c) == x, 'month': lambda c, x: extract('month', c) == x, 'day': lambda c, x: extract('day', c) == x } def filter_by(self, **kwargs): return self._filter_or_exclude(False, kwargs) def exclude_by(self, **kwargs): return self._filter_or_exclude(True, kwargs) def select_related(self, *columns, **options): depth = options.pop('depth', None) if options: raise TypeError('Unexpected argument %r' % iter(options).next()) if depth not in (None, 1): raise TypeError('Depth can only be 1 or None currently') need_all = depth is None columns = list(columns) for idx, column in enumerate(columns): column = column.replace('__', '.') if '.' in column: need_all = True columns[idx] = column func = (need_all and joinedload_all or joinedload) return self.options(func(*columns)) def order_by(self, *args): args = list(args) joins_needed = [] for idx, arg in enumerate(args): q = self if not isinstance(arg, basestring): continue if arg[0] in '+-': desc = arg[0] == '-' arg = arg[1:] else: desc = False q = self column = None for token in arg.split('__'): column = _entity_descriptor(q._joinpoint_zero(), token) if column.impl.uses_objects: q = q.join(column) joins_needed.append(column) column = None if column is None: raise ValueError('Tried to order by table, column expected') if desc: column = column.desc() args[idx] = column q = super(DjangoQueryMixin, self).order_by(*args) for join in joins_needed: q = q.join(join) return q def _filter_or_exclude(self, negate, kwargs): q = self negate_if = lambda expr: expr if not negate else ~expr column = None for arg, value in kwargs.iteritems(): for token in arg.split('__'): if column is None: column = _entity_descriptor(q._joinpoint_zero(), token) if column.impl.uses_objects: q = q.join(column) column = None elif token in self._underscore_operators: op = self._underscore_operators[token] q = q.filter(negate_if(op(column, *to_list(value)))) column = None else: raise ValueError('No idea what to do with %r' % token) if column is not None: q = q.filter(negate_if(column == value)) column = None q = q.reset_joinpoint() return q
class SmartQueryMixin(InspectionMixin, EagerLoadMixin): __abstract__ = True _operators = { 'isnull': lambda c, v: (c == None) if v else (c != None), 'exact': operators.eq, 'ne': operators.ne, # not equal or is not (for None) 'gt': operators.gt, # greater than , > 'ge': operators.ge, # greater than or equal, >= 'lt': operators.lt, # lower than, < 'le': operators.le, # lower than or equal, <= 'in': operators.in_op, 'notin': operators.notin_op, 'between': lambda c, v: c.between(v[0], v[1]), 'like': operators.like_op, 'ilike': operators.ilike_op, 'startswith': operators.startswith_op, 'istartswith': lambda c, v: c.ilike(v + '%'), 'endswith': operators.endswith_op, 'iendswith': lambda c, v: c.ilike('%' + v), 'contains': lambda c, v: c.ilike('%{v}%'.format(v=v)), 'year': lambda c, v: extract('year', c) == v, 'year_ne': lambda c, v: extract('year', c) != v, 'year_gt': lambda c, v: extract('year', c) > v, 'year_ge': lambda c, v: extract('year', c) >= v, 'year_lt': lambda c, v: extract('year', c) < v, 'year_le': lambda c, v: extract('year', c) <= v, 'month': lambda c, v: extract('month', c) == v, 'month_ne': lambda c, v: extract('month', c) != v, 'month_gt': lambda c, v: extract('month', c) > v, 'month_ge': lambda c, v: extract('month', c) >= v, 'month_lt': lambda c, v: extract('month', c) < v, 'month_le': lambda c, v: extract('month', c) <= v, 'day': lambda c, v: extract('day', c) == v, 'day_ne': lambda c, v: extract('day', c) != v, 'day_gt': lambda c, v: extract('day', c) > v, 'day_ge': lambda c, v: extract('day', c) >= v, 'day_lt': lambda c, v: extract('day', c) < v, 'day_le': lambda c, v: extract('day', c) <= v, } @classproperty def filterable_attributes(cls): return cls.relations + cls.columns + \ cls.hybrid_properties + cls.hybrid_methods @classproperty def sortable_attributes(cls): return cls.columns + cls.hybrid_properties @classmethod def filter_expr(cls_or_alias, **filters): """ forms expressions like [Product.age_from = 5, Product.subject_ids.in_([1,2])] from filters like {'age_from': 5, 'subject_ids__in': [1,2]} Example 1: db.query(Product).filter( *Product.filter_expr(age_from = 5, subject_ids__in=[1, 2])) Example 2: filters = {'age_from': 5, 'subject_ids__in': [1,2]} db.query(Product).filter(*Product.filter_expr(**filters)) ### About alias ###: If we will use alias: alias = aliased(Product) # table name will be product_1 we can't just write query like db.query(alias).filter(*Product.filter_expr(age_from=5)) because it will be compiled to SELECT * FROM product_1 WHERE product.age_from=5 which is wrong: we select from 'product_1' but filter on 'product' such filter will not work We need to obtain SELECT * FROM product_1 WHERE product_1.age_from=5 For such case, we can call filter_expr ON ALIAS: alias = aliased(Product) db.query(alias).filter(*alias.filter_expr(age_from=5)) Alias realization details: * we allow to call this method either ON ALIAS (say, alias.filter_expr()) or on class (Product.filter_expr()) * when method is called on alias, we need to generate SQL using aliased table (say, product_1), but we also need to have a real class to call methods on (say, Product.relations) * so, we have 'mapper' that holds table name and 'cls' that holds real class when we call this method ON ALIAS, we will have: mapper = <product_1 table> cls = <Product> when we call this method ON CLASS, we will simply have: mapper = <Product> (or we could write <Product>.__mapper__. It doesn't matter because when we call <Product>.getattr, SA will magically call <Product>.__mapper__.getattr()) cls = <Product> """ if isinstance(cls_or_alias, AliasedClass): mapper, cls = cls_or_alias, inspect(cls_or_alias).mapper.class_ else: mapper = cls = cls_or_alias expressions = [] valid_attributes = cls.filterable_attributes for attr, value in filters.items(): # if attribute is filtered by method, call this method if attr in cls.hybrid_methods: method = getattr(cls, attr) expressions.append(method(value, mapper=mapper)) # else just add simple condition (== for scalars or IN for lists) else: # determine attrbitute name and operator # if they are explicitly set (say, id___between), take them if OPERATOR_SPLITTER in attr: attr_name, op_name = attr.rsplit(OPERATOR_SPLITTER, 1) if op_name not in cls._operators: raise KeyError('Expression `{}` has incorrect ' 'operator `{}`'.format(attr, op_name)) op = cls._operators[op_name] # assume equality operator for other cases (say, id=1) else: attr_name, op = attr, operators.eq if attr_name not in valid_attributes: raise KeyError('Expression `{}` ' 'has incorrect attribute `{}`'.format( attr, attr_name)) column = getattr(mapper, attr_name) expressions.append(op(column, value)) return expressions @classmethod def order_expr(cls_or_alias, *columns): """ Forms expressions like [desc(User.first_name), asc(User.phone)] from list like ['-first_name', 'phone'] Example for 1 column: db.query(User).order_by(*User.order_expr('-first_name')) # will compile to ORDER BY user.first_name DESC Example for multiple columns: columns = ['-first_name', 'phone'] db.query(User).order_by(*User.order_expr(*columns)) # will compile to ORDER BY user.first_name DESC, user.phone ASC About cls_or_alias, mapper, cls: read in filter_expr method description """ if isinstance(cls_or_alias, AliasedClass): mapper, cls = cls_or_alias, inspect(cls_or_alias).mapper.class_ else: mapper = cls = cls_or_alias expressions = [] for attr in columns: fn, attr = (desc, attr[1:]) if attr.startswith(DESC_PREFIX) \ else (asc, attr) if attr not in cls.sortable_attributes: raise KeyError('Cant order {} by {}'.format(cls, attr)) expr = fn(getattr(mapper, attr)) expressions.append(expr) return expressions @classmethod def smart_query(cls, filters=None, sort_attrs=None, schema=None): """ Does magic Django-ish joins like post___user___name__startswith='Bob' (see https://goo.gl/jAgCyM) Does filtering, sorting and eager loading at the same time. And if, say, filters and sorting need the same joinm it will be done only one. That's why all stuff is combined in single method :param filters: dict :param sort_attrs: List[basestring] :param schema: dict """ return smart_query(cls.query, filters, sort_attrs, schema) @classmethod def where(cls, **filters): """ Shortcut for smart_query() method Example 1: Product.where(subject_ids__in=[1,2], grade_from_id=2).all() Example 2: filters = {'subject_ids__in': [1,2], 'grade_from_id': 2} Product.where(**filters).all() Example 3 (with joins): Post.where(public=True, user___name__startswith='Bi').all() """ return cls.smart_query(filters) @classmethod def sort(cls, *columns): """ Shortcut for smart_query() method Example 1: User.sort('first_name','-user_id') This is equal to db.query(User).order_by(*User.order_expr('first_name','-user_id')) Example 2: columns = ['first_name','-user_id'] User.sort(*columns) This is equal to columns = ['first_name','-user_id'] db.query(User).order_by(*User.order_expr(*columns)) Exanple 3 (with joins): Post.sort('comments___rating', 'user___name').all() """ return cls.smart_query({}, columns)
def wrapped_fn(*args, start=None, end=None, dow_start=None, dow_end=None, weekend_only=False, weekday_only=False, business_hours_only=False, evening_hours_only=False, latenight_hours_only=False, ugos_closed_hours=False, **kwargs): print(args) print(kwargs) print("&" * 100) r = fn_being_decorated(*args, **kwargs) if start: r = r.filter( event.Event.timestamp >= start.replace(tzinfo=None)) if end: r = r.filter(event.Event.timestamp < end.replace(tzinfo=None)) # n.b. this is a postgres function we're calling here # The day of the week (0 - 6; Sunday is 0) (for timestamp values only) # n0 m1 t2 w3 h4 f5 s6 if dow_start: r = r.filter( extract('dow', event.Event.timestamp) >= dow_start) if dow_end: r = r.filter(extract('dow', event.Event.timestamp) < dow_start) if weekend_only: r = r.filter( or_( extract('dow', event.Event.timestamp) == 0, extract('dow', event.Event.timestamp) == 6)) if weekday_only: r = r.filter(extract('dow', event.Event.timestamp) > 0) r = r.filter(extract('dow', event.Event.timestamp) < 6) if business_hours_only: r = r.filter(extract('hour', event.Event.timestamp) >= 8) r = r.filter(extract('hour', event.Event.timestamp) < 17) if evening_hours_only: r = r.filter(extract('hour', event.Event.timestamp) >= 17) if latenight_hours_only: r = r.filter(extract('hour', event.Event.timestamp) < 8) if ugos_closed_hours: r = r.filter( or_( # m-th 8-mid and_( extract('hour', event.Event.timestamp) < 8, # 8-mid extract('dow', event.Event.timestamp) > 0, # no sunday extract('dow', event.Event.timestamp) < 5, # no fri/sat ), # fr 8-8pm and_( or_( extract('hour', event.Event.timestamp) < 8, # before open extract('hour', event.Event.timestamp) >= 20, # after close ), extract('dow', event.Event.timestamp) == 5, # friday ), # sat noon-5pm and_( or_( extract('hour', event.Event.timestamp) < 12, # before open extract('hour', event.Event.timestamp) >= 17, # after close ), extract('dow', event.Event.timestamp) == 6, # saturday ), # sun 3pm-11pm and_( or_( extract('hour', event.Event.timestamp) < 15, # before open extract('hour', event.Event.timestamp) >= 23, # after close ), extract('dow', event.Event.timestamp) == 0, # sunday ), )) if label: return getattr(r.one(), label) or Decimal(0.0) else: return r.one() or Decimal(0.0)