def visit_datetime_op(self, expr): class_name = type(expr).__name__ input = self._expr_to_sqlalchemy[expr._input] if class_name in DATE_PARTS_DIC: if self._sa_engine and self._sa_engine.name == 'mysql': if class_name == 'UnixTimestamp': fun = func.unix_timestamp else: fun = getattr(func, class_name.lower()) sa_expr = fun(input).cast(types.df_type_to_sqlalchemy_type(expr.dtype)) else: sa_expr = func.date_part(DATE_PARTS_DIC[class_name], input)\ .cast(types.df_type_to_sqlalchemy_type(expr.dtype)) elif isinstance(expr, Date): if self._sa_engine and self._sa_engine.name == 'mysql': sa_expr = func.date(input).cast(types.df_type_to_sqlalchemy_type(expr.dtype)) else: sa_expr = func.date_trunc('day', input) elif isinstance(expr, WeekDay): if self._sa_engine and self._sa_engine.name == 'mysql': sa_expr = (func.dayofweek(input).cast(types.df_type_to_sqlalchemy_type(expr.dtype)) + 5) % 7 else: sa_expr = (func.date_part('dow', input).cast(types.df_type_to_sqlalchemy_type(expr.dtype)) + 6) % 7 else: raise NotImplementedError self._add(expr, sa_expr)
def report_tickets_per_weekday(): tr_weekday = [ u'Sunday', u'Monday', u'Tuesday', u'Wednesday', u'Thursday', u'Friday', u'Saturday' ] tickets_per_weekday = zip(tr_weekday, [0] * 7) res = db.session.query(func.dayofweek(Ticket.created_at), func.count(Ticket.id)) \ .group_by(func.dayofweek(Ticket.created_at)) \ .order_by(func.dayofweek(Ticket.created_at)) total_tickets = 0 for dayofweek, tcount in res: tickets_per_weekday[dayofweek - 1] = (tr_weekday[dayofweek - 1], tcount) total_tickets = total_tickets + tcount return render_template('report_tickets_per_weekday.html', tickets_per_weekday=tickets_per_weekday, total_tickets=total_tickets)
def print_body_for_user(self, authenticated_user): stats = [] for stats_for_hour in DB_Session_Factory.get_db_session().query(func.dayofweek(Interview.end_time), func.hour(Interview.end_time), func.avg(Interview.technical_score), func.avg(Interview.cultural_score), Interviewer, func.count(1)).group_by(func.dayofweek(Interview.end_time), func.hour(Interview.end_time), Interview.interviewer_email).join(Interviewer, Interview.interviewer_email == Interviewer.email).filter(Interview.start_time > self.earliest_ts, Interview.end_time < self.latest_ts, or_(Interview.technical_score != None, Interview.cultural_score != None), or_(Interview.technical_score != -1, Interview.cultural_score != -1)): stats.append({ 'Day' : self.days[stats_for_hour[0]], 'Hour' : stats_for_hour[1], 'Avg_Technical_Score' : stats_for_hour[2], 'Avg_Cultural_Score' : stats_for_hour[3], 'Interviewer' : stats_for_hour[4].dict_representation(), 'Sample_Size' : stats_for_hour[5], }) print json.dumps(stats)
def search(self): #begin constructing the SQLAlchemy query query = Opportunity.query.filter_by(display = 1) # if needed, filter by categories if self.categories[0] != "all": query = query.filter(Opportunity.category_class.in_(self.categories)) # if needed, filter by day if self.availability[0] != "all": query = query.filter((func.dayofweek(Opportunity.startDateTime).in_(self.availability)) | (func.year(Opportunity.startDateTime) == 2100)) # if needed, filter by zip code if self.zipcode != "all" and self.distance != "all": included_zips = get_zips_list(self.zipcode, self.distance) query = query.filter(Opportunity.zipcode.in_(included_zips)) # execute query and return matching opps return query.all()
def usage_data(hall_no, year, month, day): # turn date info into a date object # find start range by subtracting 30 days now = datetime.date(year, month, day) start = now - datetime.timedelta(days=30) # get the current day of the week for today and tomorrow # python dow is monday = 0, while sql dow is sunday = 0 dow = (now.weekday() + 1) % 7 tmw = (dow + 1) % 7 # some commands are different between mysql and sqlite is_mysql = sqldb.engine.name == "mysql" # get the laundry information for today based on the day # of week (if today is tuesday, get all the tuesdays # in the past 30 days), group them by time, and include # the first 2 hours of the next day data = sqldb.session.query( LaundrySnapshot.date, (func.floor(LaundrySnapshot.time / 60).label("time") if is_mysql else cast(LaundrySnapshot.time / 60, Integer).label("time")), func.avg(LaundrySnapshot.washers).label("all_washers"), func.avg(LaundrySnapshot.dryers).label("all_dryers"), func.avg(LaundrySnapshot.total_washers).label("all_total_washers"), func.avg(LaundrySnapshot.total_dryers).label("all_total_dryers"), ).filter(((LaundrySnapshot.room == hall_no) & ((func.dayofweek(LaundrySnapshot.date) == dow + 1 if is_mysql else func.strftime("%w", LaundrySnapshot.date) == str(dow)) | ((LaundrySnapshot.time <= 180 - 1) & (func.dayofweek(LaundrySnapshot.date) == tmw + 1 if is_mysql else func.strftime("%w", LaundrySnapshot.date) == str(tmw)))) & (LaundrySnapshot.date >= start))) \ .group_by(LaundrySnapshot.date, "time") \ .order_by(LaundrySnapshot.date, "time").all() data = [x._asdict() for x in data] all_dryers = [int(x["all_total_dryers"]) for x in data] all_washers = [int(x["all_total_washers"]) for x in data] washer_points = {k: 0 for k in range(27)} dryer_points = {k: 0 for k in range(27)} washer_total = {k: 0 for k in range(27)} dryer_total = {k: 0 for k in range(27)} for x in data: hour = int(x["time"]) # if the value is for tomorrow, add 24 hours if x["date"].weekday() != now.weekday(): hour += 24 washer_points[hour] += int(x["all_washers"]) dryer_points[hour] += int(x["all_dryers"]) washer_total[hour] += 1 dryer_total[hour] += 1 dates = [x["date"] for x in data] if not dates: dates = [now] return { "hall_name": laundry.id_to_hall[hall_no], "location": laundry.id_to_location[hall_no], "day_of_week": calendar.day_name[now.weekday()], "start_date": min(dates).strftime("%Y-%m-%d"), "end_date": max(dates).strftime("%Y-%m-%d"), "total_number_of_dryers": safe_division(sum(all_dryers), len(all_dryers)), "total_number_of_washers": safe_division(sum(all_washers), len(all_washers)), "washer_data": { x: safe_division(washer_points[x], washer_total[x]) for x in washer_points }, "dryer_data": { x: safe_division(dryer_points[x], dryer_total[x]) for x in dryer_points } }
def run(self): q = self.session.query(Attacker) # Build a subquery to get killmail ids for attackers or victims in corporations and alliances sqk = None sqk = self.session.query() sqk = sqk.add_column(Attacker.id) if (self.alliance_ids and self.corporation_ids): sqk = sqk.filter( _or(Attacker.corporation_id.in_(self.corporation_ids), Attacker.alliance_id.in_(self.alliance_ids))) elif (self.alliance_ids): sqk = sqk.filter(Attacker.alliance_id.in_(self.alliance_ids)) elif (self.corporation_ids): sqk = sqk.filter(Attacker.corporation_id.in_(self.corporation_ids)) if (self.groupids != None and not self.groupidsinvert): sqk = sqk.join(EveGroup, and_(EveType.id == Attacker.ship_id, EveGroup.id == EveType.group_id), isouter=True). \ filter(EveGroup.id.in_(self.groupids)) elif (self.groupids != None and self.groupidsinvert): sqk = sqk.join(EveGroup, and_(EveType.id == Attacker.ship_id, EveGroup.id == EveType.group_id), isouter=True). \ filter(EveGroup.id.in_(self.groupids)) # Filter by attackers and left outer join killmails q = q.filter(Attacker.id.in_(sqk.subquery())). \ join(KillMail, KillMail.id == Attacker.killmail_id, isouter=True) # Add day of week q = q.add_column(func.dayofweek(KillMail.timestamp).label('dow')).group_by('dow') # Add an hour for each column. for h in range(0, 24): q = q.add_column((func.sum(case([(func.hour(KillMail.timestamp) == h, 1)], else_=0))).label("%02i" % (h))) if (self.after): q = q.filter(KillMail.timestamp >= self.after) if (self.before): q = q.filter(KillMail.timestamp <= self.before) q = q.order_by(desc('dow')) self.columns = q.column_descriptions self.results = q.all() attackers_per_hour = self._arr_result() # We need to get the number of kills per hour accross 7 days ar = ActivityReport(self.session) ar.alliance_ids = self.alliance_ids ar.corporation_ids = self.corporation_ids ar.groupids = self.groupids ar.groupidsinvert = self.groupidsinvert ar.after = self.after ar.before = self.before ar.kills = True ar.losses = False ar.run() kills_per_hour = ar.arr_result() final_result = [] # Create the final array and cross divide between the two arrays r = 0 while (r < len(attackers_per_hour)): final_result.append([]) c = 0 while (c < len(attackers_per_hour[r])): # We can't divide on the header if (r == 0 or c == 0): final_result[r].append(attackers_per_hour[r][c]) else: try: final_result[r].append(attackers_per_hour[r][c] / kills_per_hour[r][c]) except (ZeroDivisionError, InvalidOperation) as e: final_result[r].append(0) c += 1 r += 1 self.final_result = final_result
def run(self): q = self.session.query() # Add day of week q = q.add_column(func.dayofweek(KillMail.timestamp).label('dow')) # Add an hour for each column. for h in range(0, 24): q = q.add_column(func.sum(case([(func.hour(KillMail.timestamp) == h, 1)], else_=0)).label("%02i" % (h))) # Build a subquery to get killmail ids for attackers or victims in corporations and alliances sqk = None sql = None if (self.kills): sqk = self.session.query() sqk = sqk.add_column(Attacker.killmail_id) sqk = sqk.join(EveType, EveType.id == Attacker.ship_id) if (self.alliance_ids and self.corporation_ids): sqk = sqk.filter( _or(Attacker.corporation_id.in_(self.corporation_ids), Attacker.alliance_id.in_(self.alliance_ids))) elif (self.alliance_ids): sqk = sqk.filter(Attacker.alliance_id.in_(self.alliance_ids)) elif (self.corporation_ids): sqk = sqk.filter(Attacker.corporation_id.in_(self.corporation_ids)) if (self.groupids != None and not self.groupidsinvert): sqk = sqk.join(EveGroup, and_(EveGroup.id == EveType.group_id, EveType.id == Attacker.ship_id), isouter=True). \ filter(EveGroup.id.in_(self.groupids)) elif (self.groupids != None and self.groupidsinvert): sqk = sqk.join(EveGroup, and_(EveGroup.id == EveType.group_id, EveType.id == Attacker.ship_id), isouter=True). \ filter(not_(EveGroup.id.in_(self.groupids))) if (self.losses): sql = self.session.query() km2 = aliased(KillMail) sql = sql.add_column(km2.id) sql = sql.join(EveType, EveType.id == km2.ship_id) if (self.alliance_ids and self.corporation_ids): sql = sql.filter( _or(km2.corporation_id.in_(self.corporation_ids), km2.alliance_id.in_(self.alliance_ids))) elif (self.alliance_ids): sql = sql.filter(km2.alliance_id.in_(self.alliance_ids)) elif (self.corporation_ids): sql = sql.filter(km2.corporation_id.in_(self.corporation_ids)) if (self.groupids != None and not self.groupidsinvert): sql = sql.join(EveGroup, and_(EveGroup.id == EveType.group_id, EveType.id == km2.ship_id), isouter=True). \ filter(EveGroup.id.in_(self.groupids)) elif (self.groupids != None and self.groupidsinvert): sql = sql.join(EveGroup, and_(EveGroup.id == EveType.group_id, EveType.id == km2.ship_id), isouter=True). \ filter(not_(EveGroup.id.in_(self.groupids))) if (self.kills and self.losses): q = q.filter(or_(KillMail.id.in_(sql.subquery()), KillMail.id.in_(sqk.subquery()))) elif (self.kills): q = q.filter(KillMail.id.in_(sqk.subquery())) elif (self.losses): q = q.filter(KillMail.id.in_(sql.subquery())) else: raise Exception("Please select kills, losses, or both") # Search for a specific target type if (self.againstgroup_ids and self.kills and not self.losses): et2 = aliased(EveType) eg2 = aliased(EveGroup) q = q.join(et2, et2.id == KillMail.ship_id). \ join(eg2, eg2.id == et2.group_id). \ filter(eg2.id.in_(self.againstgroup_ids)) if (self.after): q = q.filter(KillMail.timestamp >= self.after) if (self.before): q = q.filter(KillMail.timestamp <= self.before) q = q.group_by('dow'). \ order_by(desc('dow')) self.columns = q.column_descriptions self.results = q.all() return