def get_todays_electricity(): return Electricity.query.with_entities( (func.max(Electricity.meter_280) - func.min(Electricity.meter_280)).label( 'todays_export'), (func.max(Electricity.meter_180) - func.min(Electricity.meter_180)).label( 'todays_import')).filter( func.strftime('%Y-%m-%d', Electricity.created_at) == datetime.now().strftime('%Y-%m-%d')).group_by( func.strftime('%Y-%m-%d', Electricity.created_at)).first()
def check_status(date_report, session): """ Finds lead in db and check its status """ query = session.query(func.min(db.Calltouch.datetime), func.min(db.Telephony.datetime), db.Telephony.duration, db.Calltouch.telephone, db.Calltouch.status, db.Calltouch.deadline) \ .join(db.Telephony, db.Telephony.telephone_to == db.Calltouch.telephone) \ .filter(func.DATE(db.Calltouch.deadline) == date_report, func.DATE(db.Telephony.datetime) == date_report, db.Telephony.datetime > db.Calltouch.datetime, db.Calltouch.status != 'Doubled').group_by(db.Calltouch.telephone) for i in query.all(): lead_date, call_date, duration, telephone, status, deadline = i if duration <= 40 and call_date <= deadline: status = 'Short call' elif duration >= 40 and call_date > deadline: status = 'Late call' elif duration <= 40 and call_date > deadline: status = 'Short and Late call' elif duration >= 40 and call_date <= deadline: status = 'Good call' else: status = 'Bad call' session.query(db.Calltouch).filter(db.Calltouch.datetime == lead_date, db.Calltouch.telephone == telephone).update({'status': status}, synchronize_session=False)
def get_new_datasets(pkg_ids=None): ''' Return a list of new pkgs and date when they were created, in format: [(id, datetime), ...] If pkg_ids list is passed, limit query to just those packages. ''' # Can't filter by time in select because 'min' function has to # be 'for all time' else you get first revision in the time period. package_revision = table('package_revision') revision = table('revision') s = select([package_revision.c.id, func.min(revision.c.timestamp)], from_obj=[package_revision.join(revision)]) if pkg_ids: s = s.where(and_(package_revision.c.id.in_(pkg_ids), package_revision.c.type == 'dataset')) else: s = s.where(package_revision.c.type == 'dataset') s = s.group_by(package_revision.c.id).\ order_by(func.min(revision.c.timestamp)) res = model.Session.execute(s).fetchall() # [(id, datetime), ...] res_pickleable = [] for pkg_id, created_datetime in res: res_pickleable.append((pkg_id, created_datetime)) return res_pickleable
def get(self): date = datetime.datetime.strptime(self.request.GET.get('date'), '%d.%m.%Y') start_date = datetime.datetime.combine(date, day_start) end_date = datetime.datetime.combine(date, day_end) entries_p = self.session.query(User.id, User.name, func.min(PresenceEntry.ts), func.max(PresenceEntry.ts))\ .filter(User.id==PresenceEntry.user_id)\ .filter((User.location=="poznan") | (User.location==None))\ .filter(PresenceEntry.ts>=start_date)\ .filter(PresenceEntry.ts<=end_date)\ .group_by(User.id, User.name)\ .order_by(User.name) entries_w = self.session.query(User.id, User.name, func.min(PresenceEntry.ts), func.max(PresenceEntry.ts))\ .filter(User.id==PresenceEntry.user_id)\ .filter(User.location=="wroclaw")\ .filter(PresenceEntry.ts>=start_date)\ .filter(PresenceEntry.ts<=end_date)\ .group_by(User.id, User.name)\ .order_by(User.name) return dict( entries_p=((user_id, user_name, start, stop, start.time() > hour_9) for (user_id, user_name, start, stop) in entries_p), entries_w=((user_id, user_name, start, stop, start.time() > hour_9) for (user_id, user_name, start, stop) in entries_w), date=date, prev_date=previous_day(date), next_date=next_day(date), excuses=excuses.presence(), justification=excuses.presence_status(date, self.request.user.id), )
def get_extent(): return DBSession.query( func.min(func.ST_XMin(AdminZone.geometry)), func.min(func.ST_YMin(AdminZone.geometry)), func.max(func.ST_XMax(AdminZone.geometry)), func.max(func.ST_YMax(AdminZone.geometry)), ).first()
def get_stats(): result = db.session.query( func.min(Stop.stop_lat), func.min(Stop.stop_lon), func.max(Stop.stop_lat), func.max(Stop.stop_lon), func.count() ).first() data = {"minLat": result[0], "minLon": result[1], "maxLat": result[2], "maxLon": result[3], "numbers": result[4]} return jsonify({"stops": data})
def populate_months(session): if session.query(Month).count() > 0: raise Exception("Months table is already populated.") demimonth = datetime.timedelta(days=14) first_chat = session.query(func.min(Chat.date)).scalar() first_bugevent = session.query(func.min(BugEvent.date)).scalar() start_date = max(first_chat, first_bugevent) print "First chat is " + str(first_chat) print "First bug event is " + str(first_bugevent) print "Starting months on " + str(start_date) last_chat = session.query(func.max(Chat.date)).scalar() last_bugevent = session.query(func.max(BugEvent.date)).scalar() end_date = min(last_chat, last_bugevent) print "Last chat is " + str(last_chat) print "Last bug event is " + str(last_bugevent) print "End months on or around " + str(end_date) start = start_date end = start_date + datetime.timedelta(days=27) # start + 27 days = 28 day span while end < end_date: month = Month(first=start, last=end) session.add(month) start += demimonth end += demimonth session.commit()
def RecalculatePoints(): if marketscraper.cookies is None: loginScraper(m_user, m_password) #aggregate item drop rates with market drop_items = RefreshMarketWithMobDrops() #recalculate the points for the guild including run credit factors d = datetime.now() latest_item = MappedMarketResult.query.order_by(MappedMarketResult.date.desc()).all() if len(latest_item) > 0: d = latest_item[0].date market_results = db.session.query(MappedMarketResult.itemid, func.min(MappedMarketResult.price)).filter(MappedMarketResult.itemid.in_(drop_items)).filter(MappedMarketResult.date >= d).group_by(MappedMarketResult.itemid).all() guild_treasure = db.session.query(MappedGuildTreasure.itemid, func.min(MappedGuildTreasure.minMarketPrice)).filter(MappedGuildTreasure.itemid.in_(drop_items)).group_by(MappedGuildTreasure.itemid).all() #guild treasure results take precedence over market results #convert to a dictionary market_results_d = {} for mr in market_results: if market_results_d.has_key(mr[0]): market_results_d[mr[0]].append(mr[1]) else: market_results_d[mr[0]] = [mr[1]] for k,v in market_results: market_results_d[k].append(v) for k, v in guild_treasure: market_results_d[k] = [] for k,v in guild_treasure: market_results_d[k].append(v) market_results = min_values(market_results_d) relevant_runs_query = MappedRun.query.filter(MappedRun.success == True).all() rcs = [rrq.chars for rrq in relevant_runs_query] rcs = [item for sublist in rcs for item in sublist] players_not_mapped_characters = [pc for pc in rcs if pc.mappedplayer_id is None] player_names = [pc.PlayerName for pc in players_not_mapped_characters] player_names = list(set(player_names)) for pn in player_names: #players who have points and unclaimed emails will have their points calculated but they won't be able to use them. #players will need to register. Perhaps this players can get an invite? mp_exists = MappedPlayer.query.filter(MappedPlayer.Name==pn) mp = None if mp_exists.count() == 0: continue #removing placeholder characters for now. #mp = MappedPlayer(pn, 'NEED_EMAIL') else: mp = mp_exists.all()[0] db.session.add(mp) chars_to_map = [pc for pc in players_not_mapped_characters if pc.PlayerName == pn] mp.Chars = chars_to_map db.session.commit() for run in relevant_runs_query: players = [c.mappedplayer_id for c in run.chars] players = list(set(players)) CalculatePoints(run, run.mobs_killed, players, market_results, d)
def index(self): max = DBSession.query(func.max(Sensor.lat)).one()[0] min = DBSession.query(func.min(Sensor.lat)).one()[0] lat = (max + min) / 2 max = DBSession.query(func.max(Sensor.lng)).one()[0] min = DBSession.query(func.min(Sensor.lng)).one()[0] lng = (max + min) / 2 return dict(page='map', lat=lat, lng=lng)
def index(page = 1): form = DataForm() user_data = Data.query.filter_by(user_id = g.user.id) #ms = user_data.order_by(Data.systolic_pressure.desc()).first() four_weeks_ago = datetime.datetime.now() - datetime.timedelta(weeks=4) maxs = db.session.query(func.max(Data.systolic_pressure).label('max_systolic')).filter_by(user_id = g.user.id).one() max_systolic = maxs.max_systolic mins = db.session.query(func.min(Data.systolic_pressure).label('min_systolic')).filter_by(user_id = g.user.id).one() min_systolic = mins.min_systolic avgs = db.session.query(func.avg(Data.systolic_pressure).label('avg_systolic')).filter_by(user_id = g.user.id).\ filter(Data.timestamp > four_weeks_ago).one() avg_systolic = avgs.avg_systolic maxd = db.session.query(func.max(Data.diastolic_pressure).label('max_diastolic')).filter_by(user_id = g.user.id).one() max_diastolic = maxd.max_diastolic mind = db.session.query(func.min(Data.diastolic_pressure).label('min_diastolic')).filter_by(user_id = g.user.id).one() min_diastolic = mind.min_diastolic avgd = db.session.query(func.avg(Data.diastolic_pressure).label('avg_diastolic')).filter_by(user_id = g.user.id).\ filter(Data.timestamp > four_weeks_ago).one() avg_diastolic = avgd.avg_diastolic maxc = db.session.query(func.max(Data.cardiac_rate).label('max_rate')).filter_by(user_id = g.user.id).one() max_rate = maxc.max_rate minc = db.session.query(func.min(Data.cardiac_rate).label('min_rate')).filter_by(user_id = g.user.id).one() min_rate = minc.min_rate avgc = db.session.query(func.avg(Data.cardiac_rate).label('avg_rate')).filter_by(user_id = g.user.id).\ filter(Data.timestamp > four_weeks_ago).one() avg_rate = avgc.avg_rate if form.validate_on_submit(): data = Data(systolic_pressure = form.systolic_pressure.data, diastolic_pressure = form.diastolic_pressure.data, cardiac_rate = form.cardiac_rate.data, timestamp = datetime.datetime.now(), body = form.note.data, user = g.user) db.session.add(data) db.session.commit() db.session.close() flash('Added successfully') return redirect(url_for('index')) datas = user_data.order_by(Data.timestamp.desc()).paginate(page, DATAS_PER_PAGE, False) return render_template('index.html', title = 'Home', form = form, max_systolic = max_systolic, min_systolic = min_systolic, avg_systolic = avg_systolic, max_diastolic = max_diastolic, min_diastolic = min_diastolic, avg_diastolic = avg_diastolic, max_rate = max_rate, min_rate = min_rate, avg_rate = avg_rate, datas = datas)
def nextjob(self, queue): """ Make the next PENDING job active, where pending jobs are sorted by priority. Priority is assigned on the basis of usage and the order of submissions. """ session = db.Session() # Define a query which returns the lowest job id of the pending jobs # with the minimum priority _priority = select([func.min(Job.priority)], Job.status=='PENDING') min_id = select([func.min(Job.id)], and_(Job.priority == _priority, Job.status == 'PENDING')) for _ in range(10): # Repeat if conflict over next job # Get the next job, if there is one try: job = session.query(Job).filter(Job.id==min_id).one() #print job.id, job.name, job.status, job.date, job.start, job.priority except NoResultFound: return {'request': None} # Mark the job as active and record it in the active queue (session.query(Job) .filter(Job.id == job.id) .update({'status': 'ACTIVE', 'start': datetime.utcnow(), })) activejob = db.ActiveJob(jobid=job.id, queue=queue) session.add(activejob) # If the job was already taken, roll back and try again. The # first process to record the job in the active list wins, and # will change the job status from PENDING to ACTIVE. Since the # job is no longer pending, the so this # should not be an infinite loop. Hopefully if the process # that is doing the transaction gets killed in the middle then # the database will be clever enough to roll back, otherwise # we will never get out of this loop. try: session.commit() except IntegrityError: session.rollback() continue break else: logging.critical('dispatch could not assign job %s'%job.id) raise IOError('dispatch could not assign job %s'%job.id) request = store.get(job.id,'request') # No reason to include time; email or twitter does that better than # we can without client locale information. notify.notify(user=job.notify, msg=job.name+" started", level=1) return { 'id': job.id, 'request': request }
def post(self): """ -- POR HOTEL select * from availability where hotel_id = 2 and date between '2015-05-04' and '2015-05-06' group by hotel_id having min(available) = 1 -- POR CIDADE <city_id=2> retorna hoteis <119, 383> select * from availability as aa join hotel as hh on aa.hotel_id = hh.id where hh.city_id=2 and date between '2015-05-04' and '2015-05-06' GROUP BY aa.hotel_id HAVING min(aa.available) = 1 """ params = self.parser.parse_args() kind = params.get('kind') _id = params['id'] start_date = params.get('start_date') end_date = params.get('end_date') limit = params.get('limit') offset = params.get('offset') filters = {"available": True} date_filter = '' if start_date and end_date: filters.pop('available') date_filter = Availability.date.between(start_date, end_date) if kind == 'city': query = Availability.query.filter_by(**filters). \ from_self(). \ join(Availability.hotel).filter_by(city_id=_id). \ filter(date_filter). \ group_by(Availability.hotel_id). \ having(func.min(Availability.available) == 1). \ order_by(Hotel.name) else: filters.update({"hotel_id": _id}) query = Availability.query.filter_by(**filters). \ filter(date_filter). \ group_by(Availability.hotel_id). \ having(func.min(Availability.available) == 1) total = query.count() query = query.limit(limit).offset(offset).all() serializer = AvailabilitySerializer(query, many=True) return {'total': total, 'results': serializer.data}
def mesicni_vypis_alluser(mesic): # form=Card.find_by_number(current_user.card_number) # form = db.session.query(Card.time).filter_by(card_number=current_user.card_number) form = db.session.query(func.strftime('%Y-%m-%d', Card.time).label("date"), func.max(func.strftime('%H:%M', Card.time)).label("Max"), \ func.min(func.strftime('%H:%M', Card.time)).label("Min"), (func.max(Card.time) - func.min(Card.time)).label("Rozdil")) \ .filter(func.strftime('%Y-%-m', Card.time) == mesic).group_by(func.strftime('%Y-%m-%d', Card.time)) # .group_by([func.day(Card.time)]) return render_template("auth/mesicni_vypisy.tmpl", form=form, user=current_user)
def blacklist_moving_wifis(self, ago=1, offset=0, batch=1000): # TODO: this doesn't take into account wifi AP's which have # permanently moved after a certain date # maximum difference of two decimal places, ~5km at equator # or ~2km at 67 degrees north max_difference = 500000 day, max_day = daily_task_days(ago) # only look at the past 30 days for movement max_past_days = day - timedelta(days=30) try: with self.db_session() as session: query = session.query(distinct(WifiMeasure.key)).filter( WifiMeasure.created < max_day).filter( WifiMeasure.created >= day).order_by( WifiMeasure.id).limit(batch).offset(offset) new_wifis = [w[0] for w in query.all()] if not new_wifis: # pragma: no cover # nothing to be done return [] # check min/max lat/lon query = session.query( WifiMeasure.key, func.max(WifiMeasure.lat), func.min(WifiMeasure.lat), func.max(WifiMeasure.lon), func.min(WifiMeasure.lon)).filter( WifiMeasure.key.in_(new_wifis)).filter( WifiMeasure.created > max_past_days).group_by(WifiMeasure.key) results = query.all() moving_keys = set() for result in results: wifi_key, max_lat, min_lat, max_lon, min_lon = result diff_lat = abs(max_lat - min_lat) diff_lon = abs(max_lon - min_lon) if diff_lat >= max_difference or diff_lon >= max_difference: moving_keys.add(wifi_key) if moving_keys: utcnow = datetime.utcnow() query = session.query(WifiBlacklist.key).filter( WifiBlacklist.key.in_(moving_keys)) already_blocked = set([a[0] for a in query.all()]) moving_keys = moving_keys - already_blocked if not moving_keys: return [] for key in moving_keys: # TODO: on duplicate key ignore session.add(WifiBlacklist(key=key, created=utcnow)) remove_wifi.delay(list(moving_keys)) session.commit() return moving_keys except IntegrityError as exc: # pragma: no cover logger.exception('error') return [] except Exception as exc: # pragma: no cover raise self.retry(exc=exc)
def _make_stats_query(self, event_filter): query = self.session.query( func.min(Meter.timestamp).label('tsmin'), func.max(Meter.timestamp).label('tsmax'), func.avg(Meter.counter_volume).label('avg'), func.sum(Meter.counter_volume).label('sum'), func.min(Meter.counter_volume).label('min'), func.max(Meter.counter_volume).label('max'), func.count(Meter.counter_volume).label('count')) return make_query_from_filter(query, event_filter)
def _make_stats_query(self, event_filter): query = self.session.query( func.min(Meter.timestamp).label("tsmin"), func.max(Meter.timestamp).label("tsmax"), func.avg(Meter.counter_volume).label("avg"), func.sum(Meter.counter_volume).label("sum"), func.min(Meter.counter_volume).label("min"), func.max(Meter.counter_volume).label("max"), func.count(Meter.counter_volume).label("count"), ) return make_query_from_filter(query, event_filter)
def parameter_stat(self): r = self.session.query( func.min(Result.turnout_p).label('turnout_min'), func.max(Result.turnout_p).label('turnout_max'), func.min(Result.absentee_p).label('absentee_min'), func.max(Result.absentee_p).label('absentee_max'), ).one() return dict( turnout=dict(min=r.turnout_min, max=r.turnout_max), absentee=dict(min=r.absentee_min, max=r.absentee_max), )
def _make_stats_query(sample_filter): session = sqlalchemy_session.get_session() query = session.query( func.min(Meter.timestamp).label('tsmin'), func.max(Meter.timestamp).label('tsmax'), func.avg(Meter.counter_volume).label('avg'), func.sum(Meter.counter_volume).label('sum'), func.min(Meter.counter_volume).label('min'), func.max(Meter.counter_volume).label('max'), func.count(Meter.counter_volume).label('count')) return make_query_from_filter(query, sample_filter)
def _make_stats_query(sample_filter): session = sqlalchemy_session.get_session() query = session.query( func.min(Meter.timestamp).label("tsmin"), func.max(Meter.timestamp).label("tsmax"), func.avg(Meter.counter_volume).label("avg"), func.sum(Meter.counter_volume).label("sum"), func.min(Meter.counter_volume).label("min"), func.max(Meter.counter_volume).label("max"), func.count(Meter.counter_volume).label("count"), ) return make_query_from_filter(query, sample_filter)
def get_down_object(batch, current_tile_id, ignore_committed=None, ignore_discarded=None): if ignore_committed is None: ignore_committed = True if ignore_discarded is None: ignore_discarded = True row_size = (db.query(ImportFile.row_size)).filter(ImportFile.name == batch).first()[0] if ignore_committed and ignore_discarded: next_sprite_tile_id = (db.query(func.min(SpriteTile.id)) .join(GameObject) .join(ImportFile) .filter(SpriteTile.id % row_size == current_tile_id % row_size) .filter(SpriteTile.id > current_tile_id) .filter(ImportFile.name == batch) .filter(~GameObject.committed) .filter(~SpriteTile.discard) ).scalar() elif ignore_committed: next_sprite_tile_id = (db.query(func.min(SpriteTile.id)) .join(GameObject) .join(ImportFile) .filter(SpriteTile.id % row_size == current_tile_id % row_size) .filter(SpriteTile.id > current_tile_id) .filter(ImportFile.name == batch) .filter(~GameObject.committed) ).scalar() elif ignore_discarded: next_sprite_tile_id = (db.query(func.min(SpriteTile.id)) .join(GameObject) .join(ImportFile) .filter(SpriteTile.id % row_size == current_tile_id % row_size) .filter(SpriteTile.id > current_tile_id) .filter(ImportFile.name == batch) .filter(~SpriteTile.discard) ).scalar() else: next_sprite_tile_id = (db.query(func.min(SpriteTile.id)) .join(GameObject) .join(ImportFile) .filter(SpriteTile.id % row_size == current_tile_id % row_size) .filter(SpriteTile.id > current_tile_id) .filter(ImportFile.name == batch) ).scalar() if not next_sprite_tile_id: return None, None game_object_data = (db.query(GameObject, SpriteTile) .join(SpriteTile) .filter(SpriteTile.game_object_id == next_sprite_tile_id) ).all() return game_object_data, next_sprite_tile_id
def __iter__(self): """Iterate over the schedule for the day.""" if not self.rooms: raise StopIteration def rowspan(start, end): """Find the rowspan for an entry in the schedule table. This uses a binary search for the given end time from a sorted list of start times in order to find the index of the first start time that occurs after the given end time. This method is used to prevent issues that can occur with overlapping start and end times being included in the same list. """ return bisect_left(times, end) - times.index(start) times = sorted({slot.start for slot in self.slots}) # While we typically only care about the start times here, the # list is iterated over two items at a time. Without adding a # final element, the last time slot would be omitted. Any value # could be used here as bisect_left only assumes the list is # sorted, but using a meaningful value feels better. times.append(self.slots[-1].end) slots = db.session.query( Slot.id, Slot.content_override, Slot.kind, Slot.start, Slot.end, func.count(rooms_slots.c.slot_id).label('room_count'), func.min(Room.order).label('order'), ).join(rooms_slots, Room).filter(Slot.day == self).order_by( func.count(rooms_slots.c.slot_id), func.min(Room.order) ).group_by( Slot.id, Slot.content_override, Slot.kind, Slot.start, Slot.end ).all() for time, next_time in pairwise(times): row = {'time': time, 'slots': []} for slot in slots: if slot.start == time: slot.rowspan = rowspan(slot.start, slot.end) slot.colspan = slot.room_count if not slot.content_override: slot.presentation = Presentation.query.filter( Presentation.slot_id == slot.id).first() row['slots'].append(slot) if row['slots'] or next_time is None: yield row
def get_filtered_operations(self, proj_id, filters, page_start=0, page_end=20, is_count=False): """Retrieve Operations for a given project, filtered from UI.""" try: query = self.session.query(func.min(model.Operation.id), func.max(model.Operation.id), func.count(model.Operation.id), func.max(model.Operation.fk_operation_group), func.min(model.Operation.fk_from_algo), func.max(model.Operation.method_name), func.max(model.Operation.fk_launched_by), func.min(model.Operation.create_date), func.min(model.Operation.start_date), func.max(model.Operation.completion_date), func.min(model.Operation.status), func.max(model.Operation.additional_info), func.min(case_([(model.Operation.visible, 1)], else_=0)), func.min(model.Operation.user_group), func.min(model.Operation.gid) ).join(model.Algorithm).join(model.AlgorithmGroup).join(model.AlgorithmCategory) if (filters is None) or (filters.fields is None) or len(filters.fields) == 0: query = query.filter(model.Operation.fk_launched_in == proj_id) else: filter_string = filters.get_sql_filter_equivalent() query = query.filter(model.Operation.fk_launched_in == proj_id) query = query.filter(eval(filter_string)) query = query.group_by(case_([(model.Operation.fk_operation_group > 0, - model.Operation.fk_operation_group)], else_=model.Operation.id), ).order_by(desc(func.max(model.Operation.id))) if is_count: result = query.count() else: result = query.offset(page_start).limit(page_end).all() except Exception, excep: self.logger.error(excep) result = 0 if is_count else None
def deleted_packages(): # Can't filter by time in select because 'min' function has to # be 'for all time' else you get first revision in the time period. package_revision = table('package_revision') revision = table('revision') s = select([package_revision.c.id, func.min(revision.c.timestamp)], from_obj=[package_revision.join(revision)]).\ where(package_revision.c.state==model.State.DELETED).\ group_by(package_revision.c.id).\ order_by(func.min(revision.c.timestamp)) res = model.Session.execute(s).fetchall() # [(id, datetime), ...] res_pickleable = [] for pkg_id, deleted_datetime in res: res_pickleable.append((pkg_id, deleted_datetime.toordinal())) return res_pickleable
def test_apply_min(self): """Query.apply_min(col) min = session.query(Address).apply_min(Address.bounces) """ session = create_session() # 0.5.0 mins = list(session.query(Address).values(func.min(Address.bounces))) min = mins[0][0] assert min == 0 min = session.query(func.min(Address.bounces)).one()[0] assert min == 0
def _get_years(): # get min max from db db_session = DBSession() ucn_proj = db_session.query(Project).filter(Project.keyname==UCN_PROJECT_KEYNAME).one() min_start_date = db_session.query(func.min(ConstructObject.start_build_date)).filter(ConstructObject.project == ucn_proj).scalar() max_end_date = db_session.query(func.max(ConstructObject.end_build_date)).filter(ConstructObject.project == ucn_proj).scalar() # if null, set def values min_start_year = min_start_date.year if min_start_date else 2015 max_end_year = max_end_date.year if max_end_date else 2020 # check min max if min_start_year > max_end_year: min_start_year, max_end_year = max_end_year, min_start_year # create range years = list(range(min_start_year, max_end_year+1)) # current and selected years current_year = date.today().year selected_year = current_year if current_year in years else years[0] years_view_model = [{'year': x, 'selected': x == selected_year} for x in years] return years_view_model
def glycan_composition_extents(cls, session, filter_fn=lambda q: q): q = session.query( cls.GlycanCompositionAssociation.base_type, func.min(cls.GlycanCompositionAssociation.count), func.max(cls.GlycanCompositionAssociation.count), ).group_by(cls.GlycanCompositionAssociation.base_type) return filter_fn(q)
def create_vlanids(): """Prepopulates the vlan_bindings table""" LOG.debug("create_vlanids() called") session = db.get_session() start = CONF.VLANS.vlan_start end = CONF.VLANS.vlan_end try: vlanid = session.query(L2_MODEL.VlanID).one() except exc.MultipleResultsFound: """ TODO (Sumit): Salvatore rightly points out that this will not handle change in VLAN ID range across server reboots. This is currently not a supported feature. This logic will need to change if this feature has to be supported. Per Dan's suggestion we just throw a server exception for now. """ current_start = ( int(session.query(func.min(L2_MODEL.VlanID.vlan_id)). one()[0])) current_end = ( int(session.query(func.max(L2_MODEL.VlanID.vlan_id)). one()[0])) if current_start != start or current_end != end: LOG.debug("Old VLAN range %s-%s" % (current_start, current_end)) LOG.debug("New VLAN range %s-%s" % (start, end)) raise c_exc.UnableToChangeVlanRange(range_start=current_start, range_end=current_end) except exc.NoResultFound: LOG.debug("Setting VLAN range to %s-%s" % (start, end)) while start <= end: vlanid = L2_MODEL.VlanID(start) session.add(vlanid) start += 1 session.flush() return
def random(): """A random set of quotes.""" max_id, min_id = session\ .query(func.max(Quote.id), func.min(Quote.id))\ .first() # Pick a 100 ids = [randint(min_id, max_id) for _ in range(100)] # Get them and pray 20 are not deleted/pending/non-accepted quotes = session\ .query(Quote)\ .filter(Quote.status=="accepted")\ .filter(Quote.id.in_(ids))\ .limit(20)\ .all() return render_template( "listing.html", quotes=quotes, pagetitle="random" ) return render_template("listing.html")
def build_query_to_populate(self, query, full_table, aggregate_table): insert_columns = [aggregate_table.c.upload_min] mean = full_table.c.upload_octets / full_table.c.upload_time is_safe = and_(full_table.c.upload_time > 0, full_table.c.download_flag == 'f') safe_mean = case([(is_safe, mean)], else_ = None) select_query = (query.column(func.min(safe_mean))) return insert_columns, select_query
def get_pellets_consumption_last_n_days(n=7): """ Returs the pellets consumption for the last n days. Today is included. """ end_date = (datetime.utcnow() - timedelta(days=n)).strftime('%Y-%m-%d') return Kessel.query.with_entities((func.max(Kessel.pellets_total) - func.min(Kessel.pellets_total)).label('pellets_consumption')).filter( Kessel.created_at >= end_date).first().pellets_consumption
db_session.commit() def init_jijin(): from database.tbl_jijin import TblJijin from datetime import datetime dis = TblJijin() dis.jid = "test2" dis.jdate = '2020-05-05' dis.jvalue = '2.13' db_session.add(dis) db_session.commit() if __name__ == "__main__": # init_admin() # init_user() # init_setting() # init_version() from sqlalchemy import func from database.tbl_jijin import TblJijin tj = db_session.query(TblJijin.jid, TblJijin.jdate, func.min( TblJijin.jvalue)).filter(TblJijin.jid == '1717') for t in tj.all(): print(t) # te = tj.delete() # db_session.commit()
def calc_start_end_temp(start, end): calc = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\ filter(Measurement.date >= start).filter(Measurement.date <= end).all() return jsonify(calc[0])
def unemploymentData(): start_date = request.args.get('start_date') end_date = request.args.get('end_date') stateparam = request.args.get("state_abbr") session = Session(engine) if not start_date: # query the min of all file_week_ended entries if no date is given in that parameter min_start_date = session.query(func.min(unemployment.file_week_ended)) start_date = min_start_date if not end_date: # query the max of all file_week_ended entries if no date is given in that parameter max_end_date = session.query(func.max(unemployment.file_week_ended)) end_date = max_end_date if not stateparam: results = session.query(unemployment).filter( unemployment.file_week_ended >= start_date).filter( unemployment.file_week_ended <= end_date) if stateparam: print("---------------------------") print("Whats in State:", stateparam) print("Whats it's type:", type(stateparam)) print("---------------------------") stateparam = stateparam.split(',') print("Whats in State after split:", stateparam) print("What type is it now?", type(stateparam)) print("---------------------------") if isinstance(stateparam, list): print("Are you making it to this line?") # this should make an array of states valid and handle the single-state case results = session.query(unemployment).filter( unemployment.file_week_ended >= start_date).filter( unemployment.file_week_ended <= end_date).filter( unemployment.state_abbr.in_(stateparam)).all() session.close() data = [] for result in results: data.append({ "state": result.state, "state_abbr": result.state_abbr, "file_week_ended": result.file_week_ended, "initial_claims": result.initial_claims, "reflecting_week_ended": result.reflecting_week_ended, "continued_claims": result.continued_claims, "covered_employment": result.covered_employment, "insured_unemployment_rate": result.insured_unemployment_rate }) return jsonify(data)
def calc_temps(start_date, end_date): temps_stats = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\ filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all() stats_temp = [temps_stats] return jsonify(stats_temp('2016-08-25', '2016-11-05'))
def temp_start_end(start, end): """Fetch the temp data that matches the path variable supplied by the user, or a 404 if not.""" #make sure the date format is correct try: #convert inputs to dates #start date start_date_convert = dt.datetime.strptime(start, '%Y-%m-%d') query_date_start = start_date_convert.date() #end date end_date_convert = dt.datetime.strptime(end, '%Y-%m-%d') query_date_end = end_date_convert.date() #if date format is wrong except ValueError: return jsonify({"error": "Date format incorrect"}), 404 # Create our session (link) from Python to the DB session = Session(engine) #find first date in the database: first_date = session.query(Measurement.date).order_by( Measurement.date).first() # source: https://stackoverflow.com/questions/23324266/converting-string-to-date-object-without-time-info #Create datetime object and then convert to date object first_date_convert = dt.datetime.strptime(first_date[0], '%Y-%m-%d') first_date_convert = first_date_convert.date() #find last date in the database: last_date = session.query(Measurement.date).order_by( Measurement.date.desc()).first() # source: https://stackoverflow.com/questions/23324266/converting-string-to-date-object-without-time-info #Create datetime object and then convert to date object last_date_convert = dt.datetime.strptime(last_date[0], '%Y-%m-%d') last_date_convert = last_date_convert.date() #determine if user date is within database range if (query_date_start < first_date_convert) | (query_date_start > last_date_convert) | ( query_date_end < first_date_convert) | (query_date_end > last_date_convert): #close session session.close() #return error message return jsonify({"error": "Date not found."}), 404 else: #lowest temp recorded for most active station lowest_temp = session.query(func.min(Measurement.tobs)).\ filter(Measurement.date >= query_date_start).\ filter(Measurement.date<=query_date_end).all() #highest temp recorded for most active station highest_temp = session.query(func.max(Measurement.tobs)).\ filter(Measurement.date >= query_date_start).\ filter(Measurement.date<=query_date_end).all() #average temp recorded for most active station average_temp = session.query(func.avg(Measurement.tobs)).\ filter(Measurement.date >= query_date_start).\ filter(Measurement.date<=query_date_end).all() #close session session.close() #add results to a dictionary temp_info_dict = { 'TMax': highest_temp[0][0], 'TMin': lowest_temp[0][0], 'TAvg': round(average_temp[0][0], 2) } #jsonify dictionary result and return return jsonify(temp_info_dict)
QUERY_STATISTICS_SHORT_TERM = [ StatisticsShortTerm.metadata_id, StatisticsShortTerm.start, StatisticsShortTerm.mean, StatisticsShortTerm.min, StatisticsShortTerm.max, StatisticsShortTerm.last_reset, StatisticsShortTerm.state, StatisticsShortTerm.sum, ] QUERY_STATISTICS_SUMMARY_MEAN = [ StatisticsShortTerm.metadata_id, func.avg(StatisticsShortTerm.mean), func.min(StatisticsShortTerm.min), func.max(StatisticsShortTerm.max), ] QUERY_STATISTICS_SUMMARY_SUM = [ StatisticsShortTerm.metadata_id, StatisticsShortTerm.start, StatisticsShortTerm.last_reset, StatisticsShortTerm.state, StatisticsShortTerm.sum, func.row_number().over( partition_by=StatisticsShortTerm.metadata_id, order_by=StatisticsShortTerm.start.desc(), ).label("rownum"), ]
def min_max_years(query): year_col = func.extract('year', tables.Event.date) query = query.with_entities(func.min(year_col), func.max(year_col)) first_year, last_year = query.one() return first_year, last_year
def startDateOnly(): day_temp_results = session.query( func.min(Measurements.tobs), func.avg(Measurements.tobs), func.max(Measurements.tobs)).filter( Measurements.date >= '2016-08-23').all() return jsonify(day_temp_results)
def _state_attrs_exist(attr: int | None) -> Select: """Check if a state attributes id exists in the states table.""" return select(func.min( States.attributes_id)).where(States.attributes_id == attr)
def start_end_temp(start, end): "Return TMIN, TAVG, and TMAX for all dates between start and end dates" range_temp_data = session.query(func.min(Measurement.tobs), func.avg( Measurement.tobs), func.max(Measurement.tobs)).filter(Measurement.date >= start).\ filter(Measurement.date <= end).all() return jsonify(range_temp_data)
def start(start): start = dt.datetime.strptime(start, "%Y-%m-%d") startresults = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\ filter(Measurement.date >= start).all() return jsonify(startresults)
def start_end(start=None, end=None): """Return a JSON list of tmin, tmax, tavg for the dates in range of start date and end date inclusive""" between_dates = session.query(Measurement.date, func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).filter(Measurement.date >= start).filter(Measurement.date <= end).group_by(Measurement.date).all() between_dates_list = list(between_dates) return jsonify(between_dates_list)
def recurse_annotation_up_tree(channel_id): bridge = Bridge(app_name=CONTENT_APP_NAME) ContentNodeClass = bridge.get_class(ContentNode) ContentNodeTable = bridge.get_table(ContentNode) connection = bridge.get_connection() node_depth = ( bridge.session.query(func.max(ContentNodeClass.level)) .filter_by(channel_id=channel_id) .scalar() ) logger.info( "Annotating ContentNode objects with children for {levels} levels".format( levels=node_depth ) ) child = ContentNodeTable.alias() # start a transaction trans = connection.begin() start = datetime.datetime.now() # Update all leaf ContentNodes to have num_coach_content to 1 or 0 # Update all leaf ContentNodes to have on_device_resources to 1 or 0 connection.execute( ContentNodeTable.update() .where( and_( # In this channel ContentNodeTable.c.channel_id == channel_id, # That are not topics ContentNodeTable.c.kind != content_kinds.TOPIC, ) ) .values( num_coach_contents=cast(ContentNodeTable.c.coach_content, Integer()), on_device_resources=cast(ContentNodeTable.c.available, Integer()), ) ) # Before starting set availability to False on all topics. connection.execute( ContentNodeTable.update() .where( and_( # In this channel ContentNodeTable.c.channel_id == channel_id, # That are topics ContentNodeTable.c.kind == content_kinds.TOPIC, ) ) .values(available=False) ) # Expression to capture all available child nodes of a contentnode available_nodes = select([child.c.available]).where( and_( child.c.available == True, # noqa ContentNodeTable.c.id == child.c.parent_id, ) ) # Expressions for annotation of coach content # Expression that will resolve a boolean value for all the available children # of a content node, whereby if they all have coach_content flagged on them, it will be true, # but otherwise false. # Everything after the select statement should be identical to the available_nodes expression above. if bridge.engine.name == "sqlite": # Use a min function to simulate an AND. coach_content_nodes = select([func.min(child.c.coach_content)]).where( and_( child.c.available == True, # noqa ContentNodeTable.c.id == child.c.parent_id, ) ) elif bridge.engine.name == "postgresql": # Use the postgres boolean AND operator coach_content_nodes = select([func.bool_and(child.c.coach_content)]).where( and_( child.c.available == True, # noqa ContentNodeTable.c.id == child.c.parent_id, ) ) # Expression that sums the total number of coach contents for each child node # of a contentnode coach_content_num = select([func.sum(child.c.num_coach_contents)]).where( and_( child.c.available == True, # noqa ContentNodeTable.c.id == child.c.parent_id, ) ) # Expression that sums the total number of on_device_resources for each child node # of a contentnode on_device_num = select([func.sum(child.c.on_device_resources)]).where( and_( child.c.available == True, # noqa ContentNodeTable.c.id == child.c.parent_id, ) ) # Go from the deepest level to the shallowest for level in range(node_depth, 0, -1): logger.info( "Annotating ContentNode objects with children for level {level}".format( level=level ) ) # Only modify topic availability here connection.execute( ContentNodeTable.update() .where( and_( ContentNodeTable.c.level == level - 1, ContentNodeTable.c.channel_id == channel_id, ContentNodeTable.c.kind == content_kinds.TOPIC, ) ) # Because we have set availability to False on all topics as a starting point # we only need to make updates to topics with available children. .where(exists(available_nodes)) .values( available=exists(available_nodes), coach_content=coach_content_nodes, num_coach_contents=coach_content_num, on_device_resources=on_device_num, ) ) # commit the transaction trans.commit() elapsed = datetime.datetime.now() - start logger.debug( "Recursive topic tree annotation took {} seconds".format(elapsed.seconds) ) bridge.end()
def temps_calculations_end(): temps_calculations_start_end = session.query(func.min(measurement.tobs), func.avg(measurement.tobs), func.max(measurement.tobs)).\ filter(measurement.date >= "2015-02-23").filter(measurement.date <= "2015-03-01").all() return jsonify(temps_calculations_start_end)
def startend(startdate, enddate): session = Session(engine) return jsonify(session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\ filter(Measurement.date >= startdate).filter(Measurement.date <= enddate).all())
AlbumSongs = \ Table('album_songs', metadata, Column('song_id', Integer, ForeignKey(Songs.c.id, ondelete='CASCADE'), primary_key=True, autoincrement=False, nullable=False), Column('album_id', Integer, ForeignKey(Albums.c.id, ondelete='CASCADE'), autoincrement=False, nullable=False), Index('album_songs_song_id_idx', 'song_id', unique=True), Index('album_songs_album_id_idx', 'album_id')) album_properties_s = (select( [AlbumSongs.c.album_id, Properties.c.format, func.min(Properties.c.bitrate).label('min_bitrate'), func.max(Properties.c.bitrate).label('max_bitrate'), func.min(Properties.c.bits_per_sample).label('min_bits_per_sample'), func.max(Properties.c.bits_per_sample).label('max_bits_per_sample'), func.min(Properties.c.sample_rate).label('min_sample_rate'), func.max(Properties.c.sample_rate).label('max_sample_rate'), func.min(Properties.c.channels).label('min_channels'), func.max(Properties.c.channels).label('max_channels')]) .where(AlbumSongs.c.song_id == Properties.c.song_id) .group_by(AlbumSongs.c.album_id, Properties.c.format)) AlbumProperties = \ Table('album_properties', metadata, Column('album_id', Integer, ForeignKey(Albums.c.id, ondelete='CASCADE'), autoincrement=False, nullable=False, index=True),
# users = session.query(User).filter(User.phone.notlike('137%')) # SELECT * FROM user WHERE phone NOT LIKE '137%'; # users = session.query(User).filter(~User.phone.like('137%')) # SELECT * FROM user WHERE phone NOT LIKE '137%'; # users = session.query(User).filter(User.id >= 5).order_by(User.id) # select * from User where id >= 5 ORDER BY id; # users = session.query(User).filter(User.id >= 5).order_by(User.id.desc()) # select * from User where id >= 5 ORDER BY id DESC; # users = session.query(User).filter(User.id >= 5).order_by(User.id.desc()).order_by(User.age) # select * from User where id >= 5 ORDER BY id DESC, age; # users = session.query(User).filter(User.id >= 5).order_by(User.id.desc(), User.age.asc()) # select * from User where id >= 5 ORDER BY id DESC, age ASC; # users = session.query(User).order_by(User.id.asc()).limit(2) # select * from User ORDER BY id ASC LIMIT 2; # users = session.query(User).order_by(User.id.asc()).limit(2).offset(3) # select * from User ORDER BY id ASC LIMIT 3,2; # showresults(users) # users = session.query(User).filter(User.id > 5) # users = session.query(User).filter(User.id > 5) # print(users) # print("Count::::::::::::::: ", users.count()) # print(list(users)) # print(users.all()) # print(users.one()) # print(users.scalar()) # print(users.first()) # users = session.query(User).filter(User.id > 5) result = session.query(User.gender, func.count(User.id), func.min(User.age), func.max(User.age), func.avg(User.age), func.sum(User.age)).group_by(User.gender) # print('rrrrrrrrrrrrr',result) # print('oooooooooooo',result.one()) # print('ssssssssssssssssss',result.scalar()) # 在one中取第一项 print(result.all())
from sqlalchemy.sql.expression import cast import ceilometer from ceilometer.i18n import _, _LI from ceilometer import storage from ceilometer.storage import base from ceilometer.storage import models as api_models from ceilometer.storage.sqlalchemy import models from ceilometer.storage.sqlalchemy import utils as sql_utils from ceilometer import utils LOG = log.getLogger(__name__) STANDARD_AGGREGATES = dict(avg=func.avg(models.Sample.volume).label('avg'), sum=func.sum(models.Sample.volume).label('sum'), min=func.min(models.Sample.volume).label('min'), max=func.max(models.Sample.volume).label('max'), count=func.count( models.Sample.volume).label('count')) UNPARAMETERIZED_AGGREGATES = dict( stddev=func.stddev_pop(models.Sample.volume).label('stddev')) PARAMETERIZED_AGGREGATES = dict( validate=dict( cardinality=lambda p: p in ['resource_id', 'user_id', 'project_id']), compute=dict(cardinality=lambda p: func.count( distinct(getattr(models.Resource, p))).label('cardinality/%s' % p))) AVAILABLE_CAPABILITIES = { 'meters': {
self.avgDC=0 if l == 0 else sumDC/l classReport={} projectName = "three.js" # projectName = "awesome_qr" globalReport = ProjectCodeSmellReport() globalReport.projectName=projectName classKeys=[] sdcMin = 0 sdcMax = 0 sfeMin = 0 sfeMax = 0 sblobMin = 0 sblobMax = 0 for (min,max) in session.query(func.min(CodeSmell.intensity), func.max(CodeSmell.intensity)).filter_by(projectName=projectName, algo='cdisp'): sdcMin = min sdcMax = max for (min,max) in session.query(func.min(CodeSmell.intensity), func.max(CodeSmell.intensity)).filter_by(projectName=projectName, algo='JDeodorant'): sfeMin = min sfeMax = max for (min,max) in session.query(func.min(CodeSmell.intensity), func.max(CodeSmell.intensity)).filter_by(projectName=projectName, algo='DECOR'): sblobMin = min sblobMax = max globalReport.init(sdcMin,sdcMax,sfeMin,sfeMax,sblobMin,sblobMax) for smell in session.query(CodeSmell).filter_by(projectName=projectName):
def getSteps(self, beneficiary_id): try: engine = db #create a Session Session = sessionmaker(bind=engine) session = Session() pilotdateres = session.query(PilotCommencement).first() if pilotdateres is None: sys.exit else: datestarted = pilotdateres.datestarted if self.last_date_specified == 1: day = self.myjson["Day"] if day == "Today": day = datetime.date.today() res = session.query( func.sum(PhysicalActivity.stepscounter).label("sum_steps") ).filter( PhysicalActivity.beneficiary_id == beneficiary_id).filter( PhysicalActivity.datecaptured >= datestarted).filter( PhysicalActivity.datecaptured <= day).first() else: res = session.query( func.sum(PhysicalActivity.stepscounter).label("sum_steps") ).filter( PhysicalActivity.beneficiary_id == beneficiary_id).filter( PhysicalActivity.datecaptured >= datestarted).first() if res.sum_steps == None: sum_steps = 0 else: sum_steps = int(res.sum_steps) result = {} result["steps"] = sum_steps if self.last_date_specified == 1: res = session.query( func.min(PhysicalActivity.datecaptured).label("min_date") ).filter( PhysicalActivity.beneficiary_id == beneficiary_id).filter( PhysicalActivity.datecaptured <= day).first() else: res = session.query( func.min(PhysicalActivity.datecaptured).label("min_date") ).filter( PhysicalActivity.beneficiary_id == beneficiary_id).first() min_date = res.min_date if self.last_date_specified == 1: max_date = self.myjson["Day"] if max_date == "Today": max_date = datetime.date.today() else: max_date = datetime.datetime.strptime( max_date, '%Y-%m-%d').date() else: max_date = datetime.date.today() if min_date is None: dates_difference = 1 else: delta = max_date - min_date dates_difference = delta.days + 1 if min_date > max_date: dates_difference = 1 result["dates_counter"] = dates_difference except Exception as e: print "Exception thrown in function getSteps(): %s" % e result["steps"] = 0 result["dates_counter"] = 1 #self.steps=sum_steps session.close() engine.dispose() dbconn.close() return (json.JSONEncoder().encode(result))
def get_resources(self, user=None, project=None, source=None, start_timestamp=None, start_timestamp_op=None, end_timestamp=None, end_timestamp_op=None, metaquery=None, resource=None, limit=None): """Return an iterable of api_models.Resource instances :param user: Optional ID for user that owns the resource. :param project: Optional ID for project that owns the resource. :param source: Optional source filter. :param start_timestamp: Optional modified timestamp start range. :param start_timestamp_op: Optional start time operator, like gt, ge. :param end_timestamp: Optional modified timestamp end range. :param end_timestamp_op: Optional end time operator, like lt, le. :param metaquery: Optional dict with metadata to match on. :param resource: Optional resource filter. :param limit: Maximum number of results to return. """ if limit == 0: return s_filter = storage.SampleFilter(user=user, project=project, source=source, start_timestamp=start_timestamp, start_timestamp_op=start_timestamp_op, end_timestamp=end_timestamp, end_timestamp_op=end_timestamp_op, metaquery=metaquery, resource=resource) session = self._engine_facade.get_session() # get list of resource_ids has_timestamp = start_timestamp or end_timestamp # NOTE: When sql_expire_samples_only is enabled, there will be some # resources without any sample, in such case we should use inner # join on sample table to avoid wrong result. if cfg.CONF.sql_expire_samples_only or has_timestamp: res_q = session.query(distinct(models.Resource.resource_id)).join( models.Sample, models.Sample.resource_id == models.Resource.internal_id) else: res_q = session.query(distinct(models.Resource.resource_id)) res_q = make_query_from_filter(session, res_q, s_filter, require_meter=False) res_q = res_q.limit(limit) if limit else res_q for res_id in res_q.all(): # get max and min sample timestamp value min_max_q = (session.query( func.max(models.Sample.timestamp).label('max_timestamp'), func.min(models.Sample.timestamp).label('min_timestamp')).join( models.Resource, models.Resource.internal_id == models.Sample.resource_id).filter( models.Resource.resource_id == res_id[0])) min_max_q = make_query_from_filter(session, min_max_q, s_filter, require_meter=False) min_max = min_max_q.first() # get resource details for latest sample res_q = (session.query( models.Resource.resource_id, models.Resource.user_id, models.Resource.project_id, models.Resource.source_id, models.Resource.resource_metadata).join( models.Sample, models.Sample.resource_id == models.Resource.internal_id ).filter( models.Sample.timestamp == min_max.max_timestamp).filter( models.Resource.resource_id == res_id[0]).order_by( models.Sample.id.desc()).limit(1)) res = res_q.first() yield api_models.Resource( resource_id=res.resource_id, project_id=res.project_id, first_sample_timestamp=min_max.min_timestamp, last_sample_timestamp=min_max.max_timestamp, source=res.source_id, user_id=res.user_id, metadata=res.resource_metadata)
year_ago = dt.datetime(2017, 8, 23) - dt.timedelta(days=365) #print(year_ago) # 2016-08-23 #combine both queries--last 12 months of measurement dates and precipitation last_12_date_prcp = (session.query(Measurement.date, Measurement.prcp).filter( Measurement.date >= year_ago).order_by(Measurement.date).all()) # What are the most active stations? (i.e. what stations have the most rows)? active_stations = (session.query(func.count( Measurement.station), Measurement.station).group_by( Measurement.station).order_by(func.count(Measurement.station).desc())) # Using the station id from the previous query, calculate the lowest temperature recorded, # highest temperature recorded, and average temperature most active station? lo_hi_avg = (session.query(func.min(Measurement.tobs), func.max(Measurement.tobs), func.avg(Measurement.tobs), Measurement.station).group_by(Measurement.station)) ################################################# #############FLASK CODE BELOW#################### #Create an app; remember to pass __name__ #__name__ represents the name of the python file. app = Flask(__name__) #Create the 'route' in the web server. #define a function for to execute when people access this route.
def start_date(start, end): session=Session(engine) results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs),func.max(Measurement.tobs)).filter(func.strftime("%Y %m %d",Measurement.date)>= start).filter(func.strftime("%Y %m %d",Measurement.date)<= end) return(jsonify(results[0]))
def start_temp(start): "Return TMIN, TAVG, and TMAX for dates >= to start date" temp_data = session.query(func.min(Measurement.tobs), func.avg( Measurement.tobs), func.max(Measurement.tobs)).filter(Measurement.date >= start).all() return jsonify(temp_data)
def get_resources(self, user=None, project=None, source=None, start_timestamp=None, start_timestamp_op=None, end_timestamp=None, end_timestamp_op=None, metaquery=None, resource=None, pagination=None): """Return an iterable of api_models.Resource instances :param user: Optional ID for user that owns the resource. :param project: Optional ID for project that owns the resource. :param source: Optional source filter. :param start_timestamp: Optional modified timestamp start range. :param start_timestamp_op: Optional start time operator, like gt, ge. :param end_timestamp: Optional modified timestamp end range. :param end_timestamp_op: Optional end time operator, like lt, le. :param metaquery: Optional dict with metadata to match on. :param resource: Optional resource filter. :param pagination: Optional pagination query. """ if pagination: raise ceilometer.NotImplementedError('Pagination not implemented') s_filter = storage.SampleFilter(user=user, project=project, source=source, start_timestamp=start_timestamp, start_timestamp_op=start_timestamp_op, end_timestamp=end_timestamp, end_timestamp_op=end_timestamp_op, metaquery=metaquery, resource=resource) session = self._engine_facade.get_session() # get list of resource_ids res_q = session.query(distinct(models.Resource.resource_id)).join( models.Sample, models.Sample.resource_id == models.Resource.internal_id) res_q = make_query_from_filter(session, res_q, s_filter, require_meter=False) for res_id in res_q.all(): # get max and min sample timestamp value min_max_q = (session.query( func.max(models.Sample.timestamp).label('max_timestamp'), func.min(models.Sample.timestamp).label('min_timestamp')).join( models.Resource, models.Resource.internal_id == models.Sample.resource_id).filter( models.Resource.resource_id == res_id[0])) min_max_q = make_query_from_filter(session, min_max_q, s_filter, require_meter=False) min_max = min_max_q.first() # get resource details for latest sample res_q = (session.query( models.Resource.resource_id, models.Resource.user_id, models.Resource.project_id, models.Resource.source_id, models.Resource.resource_metadata).join( models.Sample, models.Sample.resource_id == models.Resource.internal_id ).filter( models.Sample.timestamp == min_max.max_timestamp).filter( models.Resource.resource_id == res_id[0]).order_by( models.Sample.id.desc()).limit(1)) res = res_q.first() yield api_models.Resource( resource_id=res.resource_id, project_id=res.project_id, first_sample_timestamp=min_max.min_timestamp, last_sample_timestamp=min_max.max_timestamp, source=res.source_id, user_id=res.user_id, metadata=res.resource_metadata)
def trip_calc_temps(start_date, end_date): session = Session(engine) trip_start_end= session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\ filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all() tse = list(np.ravel(trip_start_end)) return jsonify(tse)
def start(start_date=None): session = Session(engine) trip_start= session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\ filter(Measurement.date >= start_date).all() ts = list(np.ravel(trip_start)) return jsonify(ts)
temp_dict['station'] = active_station.station temp_dict['tobs'] = active_station.tobs temp_station.append(temp_dict) return jsonify(temp_station) ######################### ## start & end ########################## @app.route("/api/v1.0/<start>/<end>") def (start, end): start_date = dt.datetime.strptime(start,'%Y-%m-%d') end_date = dt.datetime.strptime(end,'%Y-%m-%d') trip_days = dt.datetime.strptime trip_result = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\ filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all() trip_temp = [] for in trip_result: trip_dict = {} trip_dict[tmin] = trip_temp[0][0] trip_dict[tavg] = trip_temp[0][1] trip_dict[tmax] = trip_temp[0][2] trip_temp.append(trip_dict) return jsonify(trip_temp) #################### ####################
def _event_data_id_exist(data_id: int | None) -> Select: """Check if a event data id exists in the events table.""" return select(func.min(Events.data_id)).where(Events.data_id == data_id)
def start(start=None): """Return a JSON list of tmin, tmax, tavg for the dates greater than or equal to the date provided""" from_start = session.query(Measurement.date, func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).filter(Measurement.date >= start).group_by(Measurement.date).all() from_start_list=list(from_start) return jsonify(from_start_list)