コード例 #1
0
ファイル: services.py プロジェクト: DataViva/dataviva-site
    def __init__(self, cnae_id, bra_id):
        Industry.__init__(self, cnae_id)
        self.max_year = db.session.query(func.max(Ybi.year)).filter_by(cnae_id=cnae_id)
        self.rais_query = Ybi.query.join(Bra).filter(
                Bra.id == Ybi.bra_id,
                Ybi.cnae_id == self.cnae_id,
                Ybi.bra_id_len == 9,
                Ybi.year == self.max_year,
                )
        self.state_query = Ybi.query.join(Bra).filter(
                Ybi.cnae_id == self.cnae_id,
                Ybi.year == self.max_year,
                Ybi.bra_id_len == 3
                ).order_by(desc(Ybi.num_jobs)).first()

        if bra_id:
            self.bra_id = bra_id
            self.max_year = db.session.query(func.max(Ybi.year)).filter_by(cnae_id=cnae_id)
            self.rais_query = Ybi.query.join(Bra).filter(
                Bra.id == Ybi.bra_id,
                Ybi.cnae_id == self.cnae_id,
                Ybi.bra_id_len == 9,
                Ybi.bra_id.like(self.bra_id+'%'),
                Ybi.year == self.max_year
                )
コード例 #2
0
def get_current_data():
    """Returns JSON describing the last thing in the system log."""
    
    global ol0
    global wd0
    
    #perform query
    opLog = db.session.query(ol0).filter(ol0.id==db.session.query(ol0).with_entities(func.max(ol0.id)).one()[0])[0]
    wData = db.session.query(wd0).filter(wd0.id==db.session.query(wd0).with_entities(func.max(wd0.id)).one()[0])[0]
    
    mTime  = unix_time(opLog.time)
    inTemp = opLog.indoorTemp
    setPtTemp = opLog.setpointTemp
    state = unicode(thermoStateStr[opLog.state])
    
    extTemp = wData.extTemp
    extTempTime = unix_time(wData.time)
    
    return jsonify({
        u'inTemp'      : inTemp,
        u'inTempTime'  : mTime,
        u'outTemp'     : extTemp,
        u'outTempTime' : extTempTime,
        u'setPtTemp'   : setPtTemp,
        u'opMode'      : state
    })
コード例 #3
0
ファイル: cruft.py プロジェクト: Debian/dak
def newer_version(lowersuite_name, highersuite_name, session, include_equal=False):
    '''
    Finds newer versions in lowersuite_name than in highersuite_name. Returns a
    list of tuples (source, higherversion, lowerversion) where higherversion is
    the newest version from highersuite_name and lowerversion is the newest
    version from lowersuite_name.
    '''

    lowersuite = get_suite(lowersuite_name, session)
    highersuite = get_suite(highersuite_name, session)

    query = session.query(DBSource.source, func.max(DBSource.version)). \
        with_parent(highersuite).group_by(DBSource.source)

    list = []
    for (source, higherversion) in query:
        q = session.query(func.max(DBSource.version)). \
            filter_by(source=source)
        if include_equal:
            q = q.filter(DBSource.version >= higherversion)
        else:
            q = q.filter(DBSource.version > higherversion)
        lowerversion = q.with_parent(lowersuite).group_by(DBSource.source).scalar()
        if lowerversion is not None:
            list.append((source, higherversion, lowerversion))

    list.sort()
    return list
コード例 #4
0
ファイル: cronrunner.py プロジェクト: kaze/paasmaker
	def _get_cache_key(self, session):
		updated_cron = session.query(
			func.max(paasmaker.model.ApplicationInstanceTypeCron.updated)
		).scalar()
		deleted_cron = session.query(
			func.max(paasmaker.model.ApplicationInstanceTypeCron.deleted)
		).scalar()
		updated_version = session.query(
			func.max(paasmaker.model.ApplicationVersion.updated)
		).scalar()
		deleted_version = session.query(
			func.max(paasmaker.model.ApplicationVersion.deleted)
		).scalar()

		max_date = self._max_dates(
			updated_cron,
			deleted_cron,
			updated_version,
			deleted_version
		)

		summer = hashlib.md5()
		summer.update(max_date)
		key = summer.hexdigest()

		return key
コード例 #5
0
    def _analyze(self, session):
        newest_timestamp = session.execute(func.max(NodeSample.sampled)).scalar()
        self.nodes = session.query(NodeSample).filter(NodeSample.sampled == newest_timestamp).all()
        newest_timestamp = session.execute(func.max(LinkSample.sampled)).scalar()
        self.links = session.query(LinkSample).filter(LinkSample.sampled == newest_timestamp).all()

        self.samples.add(newest_timestamp)
コード例 #6
0
ファイル: controllers.py プロジェクト: ncdesouza/bookworm
def book(bookID):
    form = BidForm()
    bookS = db.session.query(Book).filter(Book.book_id == bookID).scalar()
    print(str(db.session.query(Book).filter(Book.book_id == bookID).as_scalar()))
    aucID = db.session.query(Auction.auc_id).filter(Auction.book_id == bookID).scalar()
    print(str(db.session.query(Auction.auc_id).filter(Auction.book_id == bookID).as_scalar()))
    curPrice = db.session.query(func.max(Bid.bid_price)).filter(Bid.auc_id == aucID).scalar()
    print(str(db.session.query(func.max(Bid.bid_price)).filter(Bid.auc_id == aucID).as_scalar()))
    if request.method == 'POST':
        if not form.validate():
            flash('Bid Unsuccessful')
            return render_template('auction/book.html', form=form, book=bookS, curPrice=curPrice)
        else:
            if 'email' not in session:
                return redirect(url_for('auth.signin'))
            else:
                usrID = db.session.query(User.user_id).filter(User.email == session['email']).scalar()
                highBid = db.session.query(Bid.bid_price).filter(Bid.auc_id == aucID).\
                    filter(Bid.bid_price >= form.bid_price.data).first()
                if highBid:
                    flash('Your bid needs to be higher then the current price')
                    return render_template('auction/book.html', form=form, book=bookS, curPrice=curPrice)
                else:
                    newBid = Bid(auc_id=aucID, user_id=usrID, bid_price=form.bid_price.data)
                    db.session.add(newBid)
                    db.session.commit()
                    flash('Bid Successful')
                return render_template('auction/book.html', form=form, book=bookS, curPrice=form.bid_price.data)
    
    elif request.method == 'GET':
        return render_template('auction/book.html', form=form, book=bookS, curPrice=curPrice)
コード例 #7
0
def compress_smallest_box():
	last_box = session.query(func.max(sqlalchemy.cast(InvCard.box, sqlalchemy.Integer))).first()[0]
	box_capacity = list(metadata.bind.execute("select box,60 - count(*) as c from inv_cards where box not null group by box having c>0 order by c desc;"))
	remove_box = box_capacity[0][0]
	box_capacity = box_capacity[1:]

	cards_in_remove_box = InvCard.query.filter_by(box=str(remove_box)).order_by(InvCard.box_index.desc()).all()

	move_orders = fit_boxes(box_capacity, len(cards_in_remove_box))
	i=0

	print "********** move %d cards from box %s **********" % (60-box_capacity[0][1], remove_box)
	print "\tall boxes: %s" % sorted([int(x) for x in [remove_box] + [b for b,o in move_orders]])
	for box, count in move_orders:
		max_index = session.query(func.max(InvCard.box_index)).filter_by(box=box).one()[0]
		print "======= moving %d cards to box %s ======" % (count, box)
		for card in cards_in_remove_box[i:count+i]:
			print u"move %s to %s/%d" % (card, box, max_index)
			max_index += 1
			card.box = box
			card.box_index = max_index
		i+=count
	
	if remove_box != last_box:
		cards_in_last_box = InvCard.query.filter_by(box=str(last_box)).order_by(InvCard.box_index).all()
		print "********** finally, move all %d cards from %s to %s **********" % (len(cards_in_last_box),last_box, remove_box)
		for card in cards_in_last_box:
			card.box = remove_box
	raw_input()
	session.commit()
コード例 #8
0
ファイル: views.py プロジェクト: johnfelipe/bizkaisense
def api_outlimit_stations(request, propid, startdate, enddate, limit):
    session = Session()

    #response = HttpResponse(mimetype='text/csv')
    #response['Content-Disposition'] = 'attachment; filename="obs_' + stid + '_' + propid + '_' + date + '.csv"'

    startdate = datetime(int(startdate.split('-')[0]), int(startdate.split('-')[1]), int(startdate.split('-')[2]))
    enddate = datetime(int(enddate.split('-')[0]), int(enddate.split('-')[1]), int(enddate.split('-')[2]))

    #max = aliased(func.max(Observation.value))
    #currentdate = aliased(func.date(Observation.date))

    observations = session.query(func.max(Observation.value), Station.municipality, Station.lat, Station.lng, func.date(Observation.date)).\
    join(Station).join(Property).\
    filter(Observation.date.between(startdate, enddate), Property.name == propid).\
    group_by(func.date(Observation.date), Station.code).having(func.max(Observation.value) >= float(limit)).all()

    #Observation.value >= limit
    #writer = csv.writer(response)
    #for obs in observations:
    #    writer.writerow([obs.station.lat, obs.station.lng, obs.date, obs.value])

    resp = []
    for obs in observations:
        o = {}
        o['lat'] = obs[2]
        o['lng'] = obs[3]
        o['date'] = obs[4].isoformat()
        o['municipality'] = obs[1]
        o['value'] = obs[0]
        resp.append(o)

    session.close()
    #return response
    return resp
コード例 #9
0
ファイル: SQLSupport.py プロジェクト: 0Chuzz/pytorctl
  def _compute_ranks():
    tc_session.clear()
    min_r = select([func.min(BwHistory.rank)],
        BwHistory.table.c.router_idhex
            == RouterStats.table.c.router_idhex).as_scalar()
    avg_r = select([func.avg(BwHistory.rank)],
        BwHistory.table.c.router_idhex
            == RouterStats.table.c.router_idhex).as_scalar()
    max_r = select([func.max(BwHistory.rank)],
        BwHistory.table.c.router_idhex
            == RouterStats.table.c.router_idhex).as_scalar()
    avg_bw = select([func.avg(BwHistory.bw)],
        BwHistory.table.c.router_idhex
            == RouterStats.table.c.router_idhex).as_scalar()
    avg_desc_bw = select([func.avg(BwHistory.desc_bw)],
        BwHistory.table.c.router_idhex
            == RouterStats.table.c.router_idhex).as_scalar()

    RouterStats.table.update(values=
       {RouterStats.table.c.min_rank:min_r,
        RouterStats.table.c.avg_rank:avg_r,
        RouterStats.table.c.max_rank:max_r,
        RouterStats.table.c.avg_bw:avg_bw,
        RouterStats.table.c.avg_desc_bw:avg_desc_bw}).execute()

    #min_avg_rank = select([func.min(RouterStats.avg_rank)]).as_scalar()
    max_avg_rank = select([func.max(RouterStats.avg_rank)]).as_scalar()

    RouterStats.table.update(values=
       {RouterStats.table.c.percentile:
            (100.0*RouterStats.table.c.avg_rank)/max_avg_rank}).execute()
    tc_session.commit()
コード例 #10
0
ファイル: stats.py プロジェクト: fabid/gtfseditor
def get_stats():
    result = db.session.query(
        func.min(Stop.stop_lat), func.min(Stop.stop_lon), func.max(Stop.stop_lat), func.max(Stop.stop_lon), func.count()
    ).first()
    data = {"minLat": result[0], "minLon": result[1], "maxLat": result[2], "maxLon": result[3], "numbers": result[4]}

    return jsonify({"stops": data})
コード例 #11
0
ファイル: maps.py プロジェクト: kamalhg/nosfinanceslocales
def get_extent():
    return DBSession.query(
        func.min(func.ST_XMin(AdminZone.geometry)),
        func.min(func.ST_YMin(AdminZone.geometry)),
        func.max(func.ST_XMax(AdminZone.geometry)),
        func.max(func.ST_YMax(AdminZone.geometry)),
    ).first()
コード例 #12
0
ファイル: helper.py プロジェクト: diegoronal/solarpi
def get_todays_electricity():
    return Electricity.query.with_entities(
        (func.max(Electricity.meter_280) - func.min(Electricity.meter_280)).label(
            'todays_export'), (func.max(Electricity.meter_180) - func.min(Electricity.meter_180)).label(
            'todays_import')).filter(
        func.strftime('%Y-%m-%d', Electricity.created_at) == datetime.now().strftime('%Y-%m-%d')).group_by(
        func.strftime('%Y-%m-%d', Electricity.created_at)).first()
コード例 #13
0
ファイル: trial.py プロジェクト: gems-uff/noworkflow
    def last_trial(cls, script=None, parent_required=False,
                   session=None):
        """Return last trial according to start time

        Keyword arguments:
        script -- specify the desired script (default=None)
        parent_required -- valid only if script exists (default=False)
        """
        model = cls.m
        session = session or relational.session
        trial = (
            session.query(model)
            .filter(model.start.in_(
                select([func.max(model.start)])
                .where(model.script == script)
            ))
        ).first()
        if trial or parent_required:
            return trial
        return (
            session.query(model)
            .filter(model.start.in_(
                select([func.max(model.start)])
            ))
        ).first()
コード例 #14
0
ファイル: modbot.py プロジェクト: LateNitePie/AutoModerator
def main():
    logging.config.fileConfig(path_to_cfg)
    start_utc = datetime.utcnow()
    start_time = time()

    global r
    try:
        r = reddit.Reddit(user_agent=cfg_file.get('reddit', 'user_agent'))
        logging.info('Logging in as %s', cfg_file.get('reddit', 'username'))
        r.login(cfg_file.get('reddit', 'username'),
            cfg_file.get('reddit', 'password'))

        subreddits = Subreddit.query.filter(Subreddit.enabled == True).all()
        sr_dict = dict()
        for subreddit in subreddits:
            sr_dict[subreddit.name.lower()] = subreddit
        mod_subreddit = r.get_subreddit('mod')
    except Exception as e:
        logging.error('  ERROR: %s', e)

    # check reports
    items = mod_subreddit.get_reports(limit=1000)
    stop_time = datetime.utcnow() - REPORT_BACKLOG_LIMIT
    check_items('report', items, sr_dict, stop_time)

    # check spam
    items = mod_subreddit.get_spam(limit=1000)
    stop_time = (db.session.query(func.max(Subreddit.last_spam))
                 .filter(Subreddit.enabled == True).one()[0])
    check_items('spam', items, sr_dict, stop_time)

    # check new submissions
    items = mod_subreddit.get_new_by_date(limit=1000)
    stop_time = (db.session.query(func.max(Subreddit.last_submission))
                 .filter(Subreddit.enabled == True).one()[0])
    check_items('submission', items, sr_dict, stop_time)

    # check new comments
    comment_multi = '+'.join([s.name for s in subreddits
                              if not s.reported_comments_only])
    if comment_multi:
        comment_multi_sr = r.get_subreddit(comment_multi)
        items = comment_multi_sr.get_comments(limit=1000)
        stop_time = (db.session.query(func.max(Subreddit.last_comment))
                     .filter(Subreddit.enabled == True).one()[0])
        check_items('comment', items, sr_dict, stop_time)

    # respond to modmail
    try:
        respond_to_modmail(r.user.get_modmail(), start_utc)
    except Exception as e:
        logging.error('  ERROR: %s', e)

    # check reports html
    try:
        check_reports_html(sr_dict)
    except Exception as e:
        logging.error('  ERROR: %s', e)

    logging.info('Completed full run in %s', elapsed_since(start_time))
コード例 #15
0
ファイル: stats_views.py プロジェクト: wonderpl/dolly-web
    def index_old(self):
        from gviz_data_table import Table
        from rockpack.mainsite.services.user.models import User, UserActivity
        user_count = readonly_session.query(func.count(User.id)).\
            filter(User.refresh_token != '').scalar()
        header = ('user count', 'max lifetime', 'avg lifetime', 'stddev lifetime',
                  'max active days', 'avg active days', 'stddev active days')
        lifetime = func.date_part('days', func.max(UserActivity.date_actioned) -
                                  func.min(UserActivity.date_actioned)).label('lifetime')
        active_days = func.count(func.distinct(func.date(
            UserActivity.date_actioned))).label('active_days')
        activity = readonly_session.query(UserActivity.user, lifetime, active_days).\
            group_by(UserActivity.user)
        ctx = {}
        for key, having_expr in ('all', None), ('1day', lifetime > 1), ('7day', lifetime > 7):
            data = activity.having(having_expr).from_self(
                func.count('*'),
                func.max(lifetime),
                func.avg(lifetime),
                func.stddev_samp(lifetime),
                func.max(active_days),
                func.avg(active_days),
                func.stddev_samp(active_days)
            ).one()
            table = Table([
                dict(id='metric', type=str),
                dict(id='value', type=float),
                dict(id='%', type=str),
            ])
            pdata = ('%d%%' % (data[0] * 100 / user_count),) + ('',) * 6
            table.extend(zip(*(header, map(float, data), pdata)))
            ctx['ret_%s_data' % key] = table.encode()

        return self.render('admin/retention_stats_old.html', **ctx)
コード例 #16
0
ファイル: lease.py プロジェクト: bibi21000/janitoo_dhcp
    def new_lease(self, add_ctrl, add_node, options):
        """Get a new lease
        return add_ctrl, add_node, options

        :param add_ctrl: the controller part of the address
        :type add_ctrl: Integer
        :param add_node: the node part of the address. 0 for controller
        :type add_node: Integer
        :returns: A dict with all informations
        :rtype: dict()
        """
        #Check for malformated request
        self._new_lease_lock.acquire()
        try:
            if add_ctrl == -1:
                #A new controller wants an hadd.
                #Find and return max(add_ctrl), 0
                max_ctrl = self.dbsession.query(func.max(jntmodel.Lease.add_ctrl)).scalar()
                if max_ctrl < 10:
                    add_ctrl = 10
                else:
                    add_ctrl = max_ctrl + 1
                add_node = 0
            else:
                #A new node wants an hadd
                #check if add_ctrl,0 exists
                #Find and return add_ctrl, max(add_node)
                max_node = self.dbsession.query(func.max(jntmodel.Lease.add_node)).filter(jntmodel.Lease.add_ctrl==add_ctrl).scalar()
                if max_node is None:
                    return None
                add_node = max_node + 1
            return self.repair_lease(add_ctrl, add_node, options)
        finally:
            self._new_lease_lock.release()
コード例 #17
0
ファイル: betweenness.py プロジェクト: jcaillet/mca
    def createGraph(self):
        # Get the highest node id
        query = self.session.query(func.max(self.edge_data.start_node), func.max(self.edge_data.end_node))
        for q in query:
            counter = max(q) + 1

        self.consoleAppend("Start creating the graph")
        self.virtual_edges = dict()
        for edge in self.query:
            key1 = edge.start_node, edge.end_node
            key2 = edge.end_node, edge.start_node
            # Edge has not been added to the graph yet has not been added
            if self.G.get_edge_data(*key1) is None or self.G.get_edge_data(*key2) is None:
                if self.weighted:
                    self.G.add_edge(edge.start_node, edge.end_node, weight=edge.length)
                else:
                    self.G.add_edge(edge.start_node, edge.end_node)
            else:
                # Add a vitual node
                if self.weighted:
                    self.G.add_edge(edge.start_node, counter, weight=edge.length / 2)
                    self.G.add_edge(counter, edge.end_node, weight=edge.length / 2)
                else:
                    self.G.add_edge(edge.start_node, counter)
                    self.G.add_edge(counter, edge.end_node)

                vedge = edge.start_node, counter
                # vedge1 bc value is equal to vegde2 bc value by definition -> only one edge vedge
                self.virtual_edges[key1] = vedge
                self.virtual_edges[key2] = vedge
                counter = counter + 1
        self.consoleAppend("Graph created")
コード例 #18
0
ファイル: months.py プロジェクト: colinmorris/moz-graphs
def populate_months(session):
    if session.query(Month).count() > 0:
        raise Exception("Months table is already populated.")

    demimonth = datetime.timedelta(days=14)

    first_chat = session.query(func.min(Chat.date)).scalar()
    first_bugevent = session.query(func.min(BugEvent.date)).scalar()
    start_date = max(first_chat, first_bugevent)
    print "First chat is " + str(first_chat)
    print "First bug event is " + str(first_bugevent)
    print "Starting months on " + str(start_date)

    last_chat = session.query(func.max(Chat.date)).scalar()
    last_bugevent = session.query(func.max(BugEvent.date)).scalar()
    end_date = min(last_chat, last_bugevent)
    print "Last chat is " + str(last_chat)
    print "Last bug event is " + str(last_bugevent)
    print "End months on or around " + str(end_date)


    start = start_date
    end = start_date + datetime.timedelta(days=27) # start + 27 days = 28 day span

    while end < end_date:
        month = Month(first=start, last=end)
        session.add(month)
        start += demimonth
        end += demimonth

    session.commit()
コード例 #19
0
ファイル: api.py プロジェクト: andylolz/IATI-Data-Quality
def api_organisation_activities(organisation_code, test_id, hierarchy_id=None):
    if (("offset" in request.args) and (int(request.args['offset'])>=0)):
        offset = int(request.args['offset'])
    else:
        offset = 0
    organisation = Organisation.query.filter(Organisation.organisation_code==organisation_code
                    ).first()

    if (hierarchy_id):
        if (hierarchy_id=="None"): hierarchy_id=None
        """test_count = db.session.query(func.count(Result.result_identifier)
            ).filter(Organisation.organisation_code == organisation_code, 
                     Result.test_id==test_id,
                     Result.result_hierarchy==hierarchy_id
            ).join(Package
            ).join(OrganisationPackage
            ).join(Organisation
            ).all()"""
        test_results = db.session.query(Result.result_identifier, 
                                        Result.result_data,
                                        func.max(Result.runtime_id)
            ).filter(Organisation.organisation_code == organisation_code, 
                     Result.test_id==test_id,
                     Result.result_hierarchy==hierarchy_id
            ).group_by(Result.result_identifier
            ).group_by(Result.result_data
            ).join(Package
            ).join(OrganisationPackage
            ).join(Organisation
            ).limit(50
            ).offset(offset
            ).all()
    else:
        """test_count = db.session.query(func.count(Result.result_identifier)
            ).filter(Organisation.organisation_code == organisation_code, 
                     Result.test_id==test_id
            ).join(Package
            ).join(OrganisationPackage
            ).join(Organisation
            ).all()"""
        test_results = db.session.query(Result.result_identifier, 
                                        Result.result_data,
                                        func.max(Result.runtime_id)
            ).filter(Organisation.organisation_code == organisation_code, 
                     Result.test_id==test_id
            ).group_by(Result.result_identifier
            ).join(Package
            ).join(OrganisationPackage
            ).join(Organisation
            ).limit(50
            ).offset(offset
            ).all()
    
    test_results = dict(map(lambda x: (x[0],x[1]), test_results))

    if ((organisation_code == None) or (test_results==None)):
        abort(404)
    else:
        return jsonify({"results": test_results})
コード例 #20
0
ファイル: map.py プロジェクト: tylerwhall/Flimsy
 def index(self):
     max = DBSession.query(func.max(Sensor.lat)).one()[0]
     min = DBSession.query(func.min(Sensor.lat)).one()[0]
     lat = (max + min) / 2
     max = DBSession.query(func.max(Sensor.lng)).one()[0]
     min = DBSession.query(func.min(Sensor.lng)).one()[0]
     lng = (max + min) / 2
     return dict(page='map', lat=lat, lng=lng)
コード例 #21
0
ファイル: views.py プロジェクト: lawrencesun/meddata
def index(page = 1):
	form = DataForm()
	user_data = Data.query.filter_by(user_id = g.user.id)
	#ms = user_data.order_by(Data.systolic_pressure.desc()).first()
	four_weeks_ago = datetime.datetime.now() - datetime.timedelta(weeks=4)

	maxs = db.session.query(func.max(Data.systolic_pressure).label('max_systolic')).filter_by(user_id = g.user.id).one()
	max_systolic = maxs.max_systolic
	mins = db.session.query(func.min(Data.systolic_pressure).label('min_systolic')).filter_by(user_id = g.user.id).one()
	min_systolic = mins.min_systolic
	avgs = db.session.query(func.avg(Data.systolic_pressure).label('avg_systolic')).filter_by(user_id = g.user.id).\
			filter(Data.timestamp > four_weeks_ago).one()
	avg_systolic = avgs.avg_systolic

	maxd = db.session.query(func.max(Data.diastolic_pressure).label('max_diastolic')).filter_by(user_id = g.user.id).one()
	max_diastolic = maxd.max_diastolic
	mind = db.session.query(func.min(Data.diastolic_pressure).label('min_diastolic')).filter_by(user_id = g.user.id).one()
	min_diastolic = mind.min_diastolic
	avgd = db.session.query(func.avg(Data.diastolic_pressure).label('avg_diastolic')).filter_by(user_id = g.user.id).\
			filter(Data.timestamp > four_weeks_ago).one()
	avg_diastolic = avgd.avg_diastolic

	maxc = db.session.query(func.max(Data.cardiac_rate).label('max_rate')).filter_by(user_id = g.user.id).one()
	max_rate = maxc.max_rate
	minc = db.session.query(func.min(Data.cardiac_rate).label('min_rate')).filter_by(user_id = g.user.id).one()
	min_rate = minc.min_rate
	avgc = db.session.query(func.avg(Data.cardiac_rate).label('avg_rate')).filter_by(user_id = g.user.id).\
			filter(Data.timestamp > four_weeks_ago).one()
	avg_rate = avgc.avg_rate

	if form.validate_on_submit():
		data = Data(systolic_pressure = form.systolic_pressure.data,
					diastolic_pressure = form.diastolic_pressure.data,
					cardiac_rate = form.cardiac_rate.data,
					timestamp = datetime.datetime.now(),
					body = form.note.data,
					user = g.user)
		db.session.add(data)
		db.session.commit()
		db.session.close()
		flash('Added successfully')
		return redirect(url_for('index'))

	datas = user_data.order_by(Data.timestamp.desc()).paginate(page, DATAS_PER_PAGE, False)

	return render_template('index.html',
		title = 'Home',
		form = form,
		max_systolic = max_systolic,
		min_systolic = min_systolic,
		avg_systolic = avg_systolic,
		max_diastolic = max_diastolic,
		min_diastolic = min_diastolic,
		avg_diastolic = avg_diastolic,
		max_rate = max_rate,
		min_rate = min_rate,
		avg_rate = avg_rate,
		datas = datas)
コード例 #22
0
ファイル: util.py プロジェクト: darvid/trine
def get_cmp(table, col_name, value):
    """
    Build a SQL expression for a given column from a representational
    dictionary.
    """
    col = getattr(table.c, col_name)
    original_value = value

    neg = False
    cmp_in = False
    regex = False
    max_ilvl = False

    is_string = isinstance(value, basestring)

    if is_string and value.startswith("!"):
        value = value[1:]
        neg = True

    if is_string and value.endswith("^"):
        value = value[:-1]
        assert str(table) == "item_template"
        # TODO: support other tables where a use case exists for restricting
        # results to the MAX/MIN of a specific column
        max_ilvl = True

    if is_string and value.startswith("/") and value.endswith("/"):
        value = value[1:-1]
        regex = True

    elif is_string and value.startswith("in "):
        value = value[3:]
        cmp_in = True

    flags = get_flags(col_name, value)
    if flags is not None:
        value = col.in_(flags) if cmp_in else (col == flags)
    elif regex:
        value = col.op("rlike")(value)
    elif is_string and "%" in value:
        value = col.like(value)
        if neg:
            value = not_(value)
    elif neg:
        value = not_(value)
    elif flags is None:
        value = col == value

    # TODO: actually handle this monkeypatch in the Model object, and stop using
    # the AWFUL subquery hack below.
    value._max = None
    if max_ilvl:
        value._max = func.max(table.c.ItemLevel)
        subquery = select(["*"]).select_from(select([func.max(table.c.ItemLevel)]).where(value).alias("tmp"))
        value = and_(value, table.c.ItemLevel == subquery)

    return (value, original_value)
コード例 #23
0
ファイル: api1.py プロジェクト: e6/activityapi
def history__github():
    grain = _get_grain()
    # Filtered list of github IDs
    repo = request.args.get('repo', None)
    repoFilter = None
    if repo is not None:
        repo = repo.split(',')
        repoFilter = SnapshotOfGithub.repo_name.in_(repo)
    # Date filter
    date_group = func.date_trunc(grain, SnapshotOfGithub.timestamp)
    # Query: Range of dates
    q1 = Session.query()\
            .add_column( func.distinct(date_group).label('d') )\
            .order_by(date_group.desc())
    response = _prepare(q1.count())
    q1 = q1.offset( response['offset'] )\
            .limit( response['per_page'] )
    if q1.count():
        date_column = q1.subquery().columns.d
        (min_date,max_date) = Session.query(func.min(date_column), func.max(date_column)).first()
    else:
        # Impossible date range
        (min_date,max_date) = datetime.now()+timedelta(days=1),datetime.now()
    # Grouped query
    S = SnapshotOfGithub
    q = Session.query()\
            .add_column( func.sum(S.watchers) )\
            .add_column( func.max(S.forks) )\
            .add_column( func.max(S.open_issues) )\
            .add_column( func.max(S.size) )\
            .add_column( date_group )\
            .add_column( S.repo_name )\
            .group_by(date_group)\
            .group_by(S.repo_name)\
            .order_by(date_group.desc())\
            .filter( date_group>=min_date )\
            .filter( date_group<=max_date )\
            .filter( repoFilter )
    results = {}
    _dictize = lambda x: {
        'watchers':x[0],
        'forks':x[1],
        'issues':x[2],
        'size':x[3],
        'timestamp':x[4].date().isoformat(),
    }
    for x in q:
        repo_name = x[5] 
        results[repo_name] = results.get(repo_name, { 'repo':repo_name, 'data':[] })
        results[repo_name]['data'].append( _dictize(x) )
    # Inner function transforms SELECT tuple into recognizable format
    response['grain'] = grain
    response['data'] = results
    response['repos'] = repo
    response['min_date'] = min_date.date().isoformat()
    response['max_date'] = max_date.date().isoformat()
    return response
コード例 #24
0
ファイル: audit.py プロジェクト: ianmackinnon/mango
def max_id(orm, Entity, Entity_v, key):
    m = None
    value = orm.query(func.max(getattr(Entity, key))).first()
    if value:
        m = max(m or 0, value[0])
    value = orm.query(func.max(getattr(Entity_v, key))).first()
    if value:
        m = max(m or 0, value[0])
    return m
コード例 #25
0
ファイル: series.py プロジェクト: ARLahan/Flexget
def display_summary(options):
    """
    Display series summary.
    :param options: argparse options from the CLI
    """
    formatting = ' %-30s %-10s %-10s %-20s'
    console(formatting % ('Name', 'Latest', 'Age', 'Downloaded'))
    console('-' * 79)

    session = Session()
    try:
        query = (session.query(Series).outerjoin(Series.episodes).outerjoin(Episode.releases).
                 outerjoin(Series.in_tasks).group_by(Series.id))
        if options.configured == 'configured':
            query = query.having(func.count(SeriesTask.id) >= 1)
        elif options.configured == 'unconfigured':
            query = query.having(func.count(SeriesTask.id) < 1)
        if options.premieres:
            query = (query.having(func.max(Episode.season) <= 1).having(func.max(Episode.number) <= 2).
                     having(func.count(SeriesTask.id) < 1)).filter(Release.downloaded == True)
        if options.new:
            query = query.having(func.max(Episode.first_seen) > datetime.now() - timedelta(days=options.new))
        if options.stale:
            query = query.having(func.max(Episode.first_seen) < datetime.now() - timedelta(days=options.stale))
        for series in query.order_by(Series.name).yield_per(10):
            series_name = series.name
            if len(series_name) > 30:
                series_name = series_name[:27] + '...'

            new_ep = ' '
            behind = 0
            status = 'N/A'
            age = 'N/A'
            episode_id = 'N/A'
            latest = get_latest_release(series)
            if latest:
                if latest.first_seen > datetime.now() - timedelta(days=2):
                    new_ep = '>'
                behind = new_eps_after(latest)
                status = get_latest_status(latest)
                age = latest.age
                episode_id = latest.identifier

            if behind:
                episode_id += ' +%s' % behind

            console(new_ep + formatting[1:] % (series_name, episode_id, age, status))
            if behind >= 3:
                console(' ! Latest download is %d episodes behind, this may require '
                        'manual intervention' % behind)

        console('-' * 79)
        console(' > = new episode ')
        console(' Use `flexget series show NAME` to get detailed information')
    finally:
        session.close()
コード例 #26
0
ファイル: list.py プロジェクト: brunogola/skink
 def _init_position(self):
     s = select(
         [(func.max(position_column)+1).label('value')],
         qualifier_method(self)
     ).union(
         select([literal(1).label('value')])
     )
     a = s.alias()
     #XXX: two func.max?
     setattr(self, position_column_name, select([func.max(a.c.value)]))
コード例 #27
0
ファイル: views.py プロジェクト: TeZzo1/MQTT
def mesicni_vypis_alluser(mesic):
    # form=Card.find_by_number(current_user.card_number)
    # form = db.session.query(Card.time).filter_by(card_number=current_user.card_number)
    form = db.session.query(func.strftime('%Y-%m-%d', Card.time).label("date"),
                            func.max(func.strftime('%H:%M', Card.time)).label("Max"), \
                            func.min(func.strftime('%H:%M', Card.time)).label("Min"),
                            (func.max(Card.time) - func.min(Card.time)).label("Rozdil")) \
        .filter(func.strftime('%Y-%-m', Card.time) == mesic).group_by(func.strftime('%Y-%m-%d', Card.time))
    # .group_by([func.day(Card.time)])
    return render_template("auth/mesicni_vypisy.tmpl", form=form, user=current_user)
コード例 #28
0
ファイル: api1.py プロジェクト: e6/activityapi
def history__mailman():
    grain = _get_grain()
    # Filtered list of mailman IDs
    lists = request.args.get('list')
    listFilter = None
    if lists is not None:
        lists = lists.split(',') 
        listFilter = SnapshotOfMailman.list_name.in_(lists)
    # Date filter
    date_group = func.date_trunc(grain, SnapshotOfMailman.timestamp)
    # Query: Range of dates
    q1 = Session.query()\
            .add_column( func.distinct(date_group).label('d') )\
            .order_by(date_group.desc())
    response = _prepare(q1.count())
    q1 = q1.offset( response['offset'] )\
            .limit( response['per_page'] )
    if q1.count():
        subquery = q1.subquery()
        (min_date,max_date) = Session.query(func.min(subquery.columns.d), func.max(subquery.columns.d)).first()
    else:
        # Impossible date range
        (min_date,max_date) = datetime.now()+timedelta(days=1),datetime.now()
    # Grouped query
    S = SnapshotOfMailman
    q = Session.query()\
            .add_column( func.sum(S.posts_today) )\
            .add_column( func.max(S.subscribers) )\
            .add_column( date_group )\
            .add_column( S.list_name )\
            .group_by(date_group)\
            .group_by(S.list_name)\
            .order_by(date_group.desc())\
            .filter( date_group>=min_date )\
            .filter( date_group<=max_date )\
            .filter( listFilter )
    results = {}
    # Inner function transforms SELECT tuple into recognizable format
    _dictize = lambda x: {
        'posts':x[0],
        'subscribers':x[1],
        'timestamp':x[2].isoformat(),
    }
    # Build output datastructure from rows
    for x in q:
        list_name = x[3]
        results[list_name] = results.get(list_name, { 'list_name':list_name, 'data':[] })
        results[list_name]['data'].append( _dictize(x) )
    # Write response
    response['grain'] = grain
    response['data'] = results
    response['list'] = lists
    response['min_date'] = min_date.isoformat()
    response['max_date'] = max_date.isoformat()
    return response
コード例 #29
0
ファイル: modul.py プロジェクト: ringo-framework/ringo
def get_next_modulid(package, session):
    if package == "ringo":
        id = session.query(
            func.max(ModulItem.id)).filter(ModulItem.id < 1000).one()[0]
    else:
        id = session.query(
            func.max(ModulItem.id)).filter(ModulItem.id > 999).one()[0]
    if id:
        return id + 1
    else:
        return 1000
コード例 #30
0
ファイル: impl_sqlalchemy.py プロジェクト: markmc/ceilometer
    def _make_stats_query(self, event_filter):
        query = self.session.query(
            func.min(Meter.timestamp).label('tsmin'),
            func.max(Meter.timestamp).label('tsmax'),
            func.avg(Meter.counter_volume).label('avg'),
            func.sum(Meter.counter_volume).label('sum'),
            func.min(Meter.counter_volume).label('min'),
            func.max(Meter.counter_volume).label('max'),
            func.count(Meter.counter_volume).label('count'))

        return make_query_from_filter(query, event_filter)
コード例 #31
0
    def calculate_summary(self, product_name: str,
                          time: Range) -> TimePeriodOverview:
        """
        Create a summary of the given product/time range.
        """
        log = self.log.bind(product_name=product_name, time=time)
        log.debug("summary.query")

        begin_time, end_time, where_clause = self._where(product_name, time)
        select_by_srid = (select((
            func.ST_SRID(DATASET_SPATIAL.c.footprint).label("srid"),
            func.count().label("dataset_count"),
            func.ST_Transform(
                func.ST_Union(DATASET_SPATIAL.c.footprint),
                self._target_srid(),
                type_=Geometry(),
            ).label("footprint_geometry"),
            func.sum(DATASET_SPATIAL.c.size_bytes).label("size_bytes"),
            func.max(DATASET_SPATIAL.c.creation_time).label(
                "newest_dataset_creation_time"),
        )).where(where_clause).group_by("srid").alias("srid_summaries"))

        # Union all srid groups into one summary.
        result = self._engine.execute(
            select((
                func.sum(
                    select_by_srid.c.dataset_count).label("dataset_count"),
                func.array_agg(select_by_srid.c.srid).label("srids"),
                func.sum(select_by_srid.c.size_bytes).label("size_bytes"),
                func.ST_Union(
                    select_by_srid.c.footprint_geometry,
                    type_=Geometry(srid=self._target_srid()),
                ).label("footprint_geometry"),
                func.max(select_by_srid.c.newest_dataset_creation_time).label(
                    "newest_dataset_creation_time"),
                func.now().label("summary_gen_time"),
            )))

        rows = result.fetchall()
        log.debug("summary.query.done", srid_rows=len(rows))

        assert len(rows) == 1
        row = dict(rows[0])
        row["dataset_count"] = int(
            row["dataset_count"]) if row["dataset_count"] else 0
        if row["footprint_geometry"] is not None:
            row["footprint_crs"] = self._get_srid_name(
                row["footprint_geometry"].srid)
            row["footprint_geometry"] = geo_shape.to_shape(
                row["footprint_geometry"])
        else:
            row["footprint_crs"] = None
        row["crses"] = None
        if row["srids"] is not None:
            row["crses"] = {self._get_srid_name(s) for s in row["srids"]}
        del row["srids"]

        # Convert from Python Decimal
        if row["size_bytes"] is not None:
            row["size_bytes"] = int(row["size_bytes"])

        has_data = row["dataset_count"] > 0

        log.debug("counter.calc")

        # Initialise all requested days as zero
        day_counts = Counter({
            d.date(): 0
            for d in pd.date_range(begin_time, end_time, closed="left")
        })
        region_counts = Counter()
        if has_data:
            day_counts.update(
                Counter({
                    day.date(): count
                    for day, count in self._engine.execute(
                        select([
                            func.date_trunc(
                                "day",
                                DATASET_SPATIAL.c.center_time.op(
                                    "AT TIME ZONE")(self.grouping_time_zone),
                            ).label("day"),
                            func.count(),
                        ]).where(where_clause).group_by("day"))
                }))
            region_counts = Counter({
                item: count
                for item, count in self._engine.execute(
                    select([
                        DATASET_SPATIAL.c.region_code.label("region_code"),
                        func.count(),
                    ]).where(where_clause).group_by("region_code"))
            })

        summary = TimePeriodOverview(
            **row,
            timeline_period="day",
            time_range=Range(begin_time, end_time),
            timeline_dataset_counts=day_counts,
            region_dataset_counts=region_counts,
            # TODO: filter invalid from the counts?
            footprint_count=row["dataset_count"] or 0,
        )

        log.debug(
            "summary.calc.done",
            dataset_count=summary.dataset_count,
            footprints_missing=summary.dataset_count - summary.footprint_count,
        )
        return summary
コード例 #32
0
def start_date(start):
    """Return a JSON list of the minimum temperature, the average temp, and the max temp for a given start or start-end range"""
    """When given the start only, calculate TMIN, TAVG, and TMAX for all dates greater than and equal to the start date"""
    """TMIN, TAVG, and TMAX for a list of dates.
    
    Args:
        start_date (string): A date string in the format %Y-%m-%d
        end_date (string): A date string in the format %Y-%m-%d
        
    Returns:
        TMIN, TAVE, and TMAX
    """

    starting_date = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
        filter(Measurement.date >= start).filter(Measurement.date <= dt.date(2017, 8, 23)).all()

    return jsonify(starting_date)
コード例 #33
0
ファイル: script_dao.py プロジェクト: weikman/intelRSD
 def get_last_script_execution_id(database_session):
     max = database_session.query(func.max(ScriptExecutionModel.id).label('last_id')).one()
     return max.last_id
コード例 #34
0
ファイル: intraday_dao.py プロジェクト: Asconius/trading-bot
 def read_latest_date() -> List[IntradayEntity]:
     return db.session.query(func.max(IntradayEntity.date),
                             IntradayEntity.ticker).group_by(
                                 IntradayEntity.ticker).all()
コード例 #35
0
def get_states(hass,
               utc_point_in_time,
               entity_ids=None,
               run=None,
               filters=None):
    """Return the states at a specific point in time."""
    from homeassistant.components.recorder.models import States

    if run is None:
        run = recorder.run_information(hass, utc_point_in_time)

        # History did not run before utc_point_in_time
        if run is None:
            return []

    from sqlalchemy import and_, func

    with session_scope(hass=hass) as session:
        if entity_ids and len(entity_ids) == 1:
            # Use an entirely different (and extremely fast) query if we only
            # have a single entity id
            most_recent_state_ids = session.query(
                States.state_id.label('max_state_id')).filter(
                    (States.last_updated < utc_point_in_time)
                    & (States.entity_id.in_(entity_ids))).order_by(
                        States.last_updated.desc())

            most_recent_state_ids = most_recent_state_ids.limit(1)

        else:
            # We have more than one entity to look at (most commonly we want
            # all entities,) so we need to do a search on all states since the
            # last recorder run started.

            most_recent_states_by_date = session.query(
                States.entity_id.label('max_entity_id'),
                func.max(
                    States.last_updated).label('max_last_updated')).filter(
                        (States.last_updated >= run.start)
                        & (States.last_updated < utc_point_in_time))

            if entity_ids:
                most_recent_states_by_date.filter(
                    States.entity_id.in_(entity_ids))

            most_recent_states_by_date = most_recent_states_by_date.group_by(
                States.entity_id)

            most_recent_states_by_date = most_recent_states_by_date.subquery()

            most_recent_state_ids = session.query(
                func.max(States.state_id).label('max_state_id')).join(
                    most_recent_states_by_date,
                    and_(
                        States.entity_id ==
                        most_recent_states_by_date.c.max_entity_id,
                        States.last_updated ==
                        most_recent_states_by_date.c.max_last_updated))

            most_recent_state_ids = most_recent_state_ids.group_by(
                States.entity_id)

        most_recent_state_ids = most_recent_state_ids.subquery()

        query = session.query(States).join(
            most_recent_state_ids,
            States.state_id == most_recent_state_ids.c.max_state_id).filter(
                (~States.domain.in_(IGNORE_DOMAINS)))

        if filters:
            query = filters.apply(query, entity_ids)

        return [
            state for state in execute(query)
            if not state.attributes.get(ATTR_HIDDEN, False)
        ]
コード例 #36
0
ファイル: impl_sqlalchemy.py プロジェクト: xianms/ceilometer
    def get_resources(self, user=None, project=None, source=None,
                      start_timestamp=None, start_timestamp_op=None,
                      end_timestamp=None, end_timestamp_op=None,
                      metaquery=None, resource=None, limit=None):
        """Return an iterable of api_models.Resource instances

        :param user: Optional ID for user that owns the resource.
        :param project: Optional ID for project that owns the resource.
        :param source: Optional source filter.
        :param start_timestamp: Optional modified timestamp start range.
        :param start_timestamp_op: Optional start time operator, like gt, ge.
        :param end_timestamp: Optional modified timestamp end range.
        :param end_timestamp_op: Optional end time operator, like lt, le.
        :param metaquery: Optional dict with metadata to match on.
        :param resource: Optional resource filter.
        :param limit: Maximum number of results to return.
        """
        if limit == 0:
            return
        s_filter = storage.SampleFilter(user=user,
                                        project=project,
                                        source=source,
                                        start_timestamp=start_timestamp,
                                        start_timestamp_op=start_timestamp_op,
                                        end_timestamp=end_timestamp,
                                        end_timestamp_op=end_timestamp_op,
                                        metaquery=metaquery,
                                        resource=resource)

        session = self._engine_facade.get_session()
        # get list of resource_ids
        has_timestamp = start_timestamp or end_timestamp
        # NOTE: When sql_expire_samples_only is enabled, there will be some
        #       resources without any sample, in such case we should use inner
        #       join on sample table to avoid wrong result.
        if cfg.CONF.sql_expire_samples_only or has_timestamp:
            res_q = session.query(distinct(models.Resource.resource_id)).join(
                models.Sample,
                models.Sample.resource_id == models.Resource.internal_id)
        else:
            res_q = session.query(distinct(models.Resource.resource_id))
        res_q = make_query_from_filter(session, res_q, s_filter,
                                       require_meter=False)
        res_q = res_q.limit(limit) if limit else res_q
        for res_id in res_q.all():

            # get max and min sample timestamp value
            min_max_q = (session.query(func.max(models.Sample.timestamp)
                                       .label('max_timestamp'),
                                       func.min(models.Sample.timestamp)
                                       .label('min_timestamp'))
                                .join(models.Resource,
                                      models.Resource.internal_id ==
                                      models.Sample.resource_id)
                                .filter(models.Resource.resource_id ==
                                        res_id[0]))

            min_max_q = make_query_from_filter(session, min_max_q, s_filter,
                                               require_meter=False)

            min_max = min_max_q.first()

            # get resource details for latest sample
            res_q = (session.query(models.Resource.resource_id,
                                   models.Resource.user_id,
                                   models.Resource.project_id,
                                   models.Resource.source_id,
                                   models.Resource.resource_metadata)
                            .join(models.Sample,
                                  models.Sample.resource_id ==
                                  models.Resource.internal_id)
                            .filter(models.Sample.timestamp ==
                                    min_max.max_timestamp)
                            .filter(models.Resource.resource_id ==
                                    res_id[0])
                            .order_by(models.Sample.id.desc()).limit(1))

            res = res_q.first()

            yield api_models.Resource(
                resource_id=res.resource_id,
                project_id=res.project_id,
                first_sample_timestamp=min_max.min_timestamp,
                last_sample_timestamp=min_max.max_timestamp,
                source=res.source_id,
                user_id=res.user_id,
                metadata=res.resource_metadata
            )
コード例 #37
0
def populate_user_metadata(session, user_ids, users, current_user_id):
    # build dict of user id --> track count
    track_counts = (session.query(
        Track.owner_id, func.count(Track.owner_id)).filter(
            Track.is_current == True, Track.is_delete == False,
            Track.owner_id.in_(user_ids)).group_by(Track.owner_id).all())
    track_count_dict = {
        user_id: track_count
        for (user_id, track_count) in track_counts
    }

    # build dict of user id --> playlist count
    playlist_counts = (session.query(
        Playlist.playlist_owner_id,
        func.count(Playlist.playlist_owner_id)).filter(
            Playlist.is_current == True, Playlist.is_album == False,
            Playlist.is_private == False, Playlist.is_delete == False,
            Playlist.playlist_owner_id.in_(user_ids)).group_by(
                Playlist.playlist_owner_id).all())
    playlist_count_dict = {
        user_id: playlist_count
        for (user_id, playlist_count) in playlist_counts
    }

    # build dict of user id --> album count
    album_counts = (session.query(
        Playlist.playlist_owner_id,
        func.count(Playlist.playlist_owner_id)).filter(
            Playlist.is_current == True, Playlist.is_album == True,
            Playlist.is_private == False, Playlist.is_delete == False,
            Playlist.playlist_owner_id.in_(user_ids)).group_by(
                Playlist.playlist_owner_id).all())
    album_count_dict = {
        user_id: album_count
        for (user_id, album_count) in album_counts
    }

    # build dict of user id --> follower count
    follower_counts = (session.query(
        Follow.followee_user_id, func.count(Follow.followee_user_id)).filter(
            Follow.is_current == True, Follow.is_delete == False,
            Follow.followee_user_id.in_(user_ids)).group_by(
                Follow.followee_user_id).all())
    follower_count_dict = {
        user_id: follower_count
        for (user_id, follower_count) in follower_counts
    }

    # build dict of user id --> followee count
    followee_counts = (session.query(
        Follow.follower_user_id, func.count(Follow.follower_user_id)).filter(
            Follow.is_current == True, Follow.is_delete == False,
            Follow.follower_user_id.in_(user_ids)).group_by(
                Follow.follower_user_id).all())
    followee_count_dict = {
        user_id: followee_count
        for (user_id, followee_count) in followee_counts
    }

    # build dict of user id --> repost count
    repost_counts = (session.query(
        Repost.user_id, func.count(Repost.user_id)).filter(
            Repost.is_current == True, Repost.is_delete == False,
            Repost.user_id.in_(user_ids)).group_by(Repost.user_id).all())
    repost_count_dict = {
        user_id: repost_count
        for (user_id, repost_count) in repost_counts
    }

    # build dict of user id --> track blocknumber
    track_blocknumbers = (session.query(
        Track.owner_id, func.max(Track.blocknumber)).filter(
            Track.is_current == True, Track.is_delete == False,
            Track.owner_id.in_(user_ids)).group_by(Track.owner_id).all())
    track_blocknumber_dict = {
        user_id: track_blocknumber
        for (user_id, track_blocknumber) in track_blocknumbers
    }

    current_user_followed_user_ids = {}
    current_user_followee_follow_count_dict = {}
    if current_user_id:
        # does current user follow any of requested user ids
        current_user_followed_user_ids = (session.query(
            Follow.followee_user_id).filter(
                Follow.is_current == True, Follow.is_delete == False,
                Follow.followee_user_id.in_(user_ids),
                Follow.follower_user_id == current_user_id).all())
        current_user_followed_user_ids = {
            r[0]: True
            for r in current_user_followed_user_ids
        }

        # build dict of user id --> followee follow count
        current_user_followees = (session.query(
            Follow.followee_user_id).filter(
                Follow.is_current == True, Follow.is_delete == False,
                Follow.follower_user_id == current_user_id))
        current_user_followees = {r[0]: True for r in current_user_followees}

        current_user_followee_follow_counts = (session.query(
            Follow.followee_user_id,
            func.count(Follow.followee_user_id)).filter(
                Follow.is_current == True, Follow.is_delete == False,
                Follow.follower_user_id.in_(current_user_followees),
                Follow.followee_user_id.in_(user_ids)).group_by(
                    Follow.followee_user_id).all())
        current_user_followee_follow_count_dict = {
            user_id: followee_follow_count
            for (user_id,
                 followee_follow_count) in current_user_followee_follow_counts
        }

    for user in users:
        user_id = user["user_id"]
        user[response_name_constants.track_count] = track_count_dict.get(
            user_id, 0)
        user[response_name_constants.playlist_count] = playlist_count_dict.get(
            user_id, 0)
        user[response_name_constants.album_count] = album_count_dict.get(
            user_id, 0)
        user[response_name_constants.follower_count] = follower_count_dict.get(
            user_id, 0)
        user[response_name_constants.followee_count] = followee_count_dict.get(
            user_id, 0)
        user[response_name_constants.repost_count] = repost_count_dict.get(
            user_id, 0)
        user[response_name_constants.
             track_blocknumber] = track_blocknumber_dict.get(user_id, -1)
        # current user specific
        user[response_name_constants.
             does_current_user_follow] = current_user_followed_user_ids.get(
                 user_id, False)
        user[
            response_name_constants.
            current_user_followee_follow_count] = current_user_followee_follow_count_dict.get(
                user_id, 0)

    return users
コード例 #38
0
Base.prepare(engine, reflect=True)

# We can view all of the classes that automap found

Base.classes.keys()

# Save references to each table

measurement = Base.classes.measurement

station = Base.classes.station

# Design a query to retrieve the last 12 months of precipitation data and plot the results
# Calculate the date 1 year ago from the last data point in the database

max_date = session.query(func.max(measurement.date)).all()[0]
for row in max_date:
    pprint(row)

one_year = dt.timedelta(days=365)

min_date = dt.date(2017, 8, 23) - one_year

min_date

# Perform a query to retrieve the data and precipitation scores

lasy_year_prcp = session.query(
    measurement.date,
    measurement.prcp).filter(measurement.date >= min_date).all()
コード例 #39
0
ファイル: hawaii.py プロジェクト: SujiKap/Assignment_10
def tmin_tmax_tavg1():


# calculate the TMIN, TAVG, and TMAX for dates between the start and end date inclusive.
        session = Session(engine)
        between_date_temp = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
        filter(Measurement.date.between('2016-8-23','2017-8-23')).all()   
        
        between_list = {'min':between_date_temp[0][0],
                        'avg': between_date_temp[0][1],
                        'max':between_date_temp[0][2] }

        return jsonify(between_list)
コード例 #40
0
ファイル: hawaii.py プロジェクト: SujiKap/Assignment_10
def tmin_tmax_tavg():

#Return a JSON list of the minimum temperature, the average temperature, and the max temperature for a given start or start-end range.
# When given the start only, calculate TMIN, TAVG, and TMAX for all dates greater than and equal to the start date.
        session = Session(engine)
        start_date_temp = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
        filter(Measurement.date >= datetime.datetime(2017,8,16)).all()

        start_temp_list = {'min':start_date_temp[0][0],
                           'avg': start_date_temp[0][1],
                           'max':start_date_temp[0][2] }

        return jsonify(start_temp_list)
コード例 #41
0
ファイル: app.py プロジェクト: wkhan786/sqlalchemy-challenge
def start_end_day(start, end):
        start_end_day = session.query(Measurement.date, func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
                filter(Measurement.date >= start).\
                filter(Measurement.date <= end).\
                group_by(Measurement.date).all()
        # Convert List of Tuples Into Normal List
        start_end_day_list = list(start_end_day)
        # Return JSON List of Min Temp, Avg Temp and Max Temp for a Given Start-End Range
        return jsonify(start_end_day_list)
コード例 #42
0
def start_end(start, end):
    results1 = session.query(func.min(measurement.tobs), func.avg(measurement.tobs),func.max(measurement.tobs)).\
    filter(measurement.date >= start, measurement.date <= end).order_by(measurement.date.desc()).all()
    return jsonify(results1)
コード例 #43
0
def temp_start(start):
    results = session.query(func.min(measurement.tobs), func.avg(measurement.tobs), func.max(measurement.tobs)).\
    filter(measurement.date >= start).order_by(measurement.date.desc()).all()
    return jsonify(results)
コード例 #44
0
ファイル: impl_sqlalchemy.py プロジェクト: xianms/ceilometer
    def get_meters(self, user=None, project=None, resource=None, source=None,
                   metaquery=None, limit=None, unique=False):
        """Return an iterable of api_models.Meter instances

        :param user: Optional ID for user that owns the resource.
        :param project: Optional ID for project that owns the resource.
        :param resource: Optional ID of the resource.
        :param source: Optional source filter.
        :param metaquery: Optional dict with metadata to match on.
        :param limit: Maximum number of results to return.
        :param unique: If set to true, return only unique meter information.
        """
        if limit == 0:
            return
        s_filter = storage.SampleFilter(user=user,
                                        project=project,
                                        source=source,
                                        metaquery=metaquery,
                                        resource=resource)

        # NOTE(gordc): get latest sample of each meter/resource. we do not
        #              filter here as we want to filter only on latest record.
        session = self._engine_facade.get_session()

        subq = session.query(func.max(models.Sample.id).label('id')).join(
            models.Resource,
            models.Resource.internal_id == models.Sample.resource_id)

        if unique:
            subq = subq.group_by(models.Sample.meter_id)
        else:
            subq = subq.group_by(models.Sample.meter_id,
                                 models.Resource.resource_id)

        if resource:
            subq = subq.filter(models.Resource.resource_id == resource)
        subq = subq.subquery()

        # get meter details for samples.
        query_sample = (session.query(models.Sample.meter_id,
                                      models.Meter.name, models.Meter.type,
                                      models.Meter.unit,
                                      models.Resource.resource_id,
                                      models.Resource.project_id,
                                      models.Resource.source_id,
                                      models.Resource.user_id).join(
            subq, subq.c.id == models.Sample.id)
            .join(models.Meter, models.Meter.id == models.Sample.meter_id)
            .join(models.Resource,
                  models.Resource.internal_id == models.Sample.resource_id))
        query_sample = make_query_from_filter(session, query_sample, s_filter,
                                              require_meter=False)

        query_sample = query_sample.limit(limit) if limit else query_sample

        if unique:
            for row in query_sample.all():
                yield api_models.Meter(
                    name=row.name,
                    type=row.type,
                    unit=row.unit,
                    resource_id=None,
                    project_id=None,
                    source=None,
                    user_id=None)
        else:
            for row in query_sample.all():
                yield api_models.Meter(
                    name=row.name,
                    type=row.type,
                    unit=row.unit,
                    resource_id=row.resource_id,
                    project_id=row.project_id,
                    source=row.source_id,
                    user_id=row.user_id)
コード例 #45
0
def determine_temps_for_date_range(start, end):
    """Return min temperature, the average temp, and the max temp for a given range."""

    #Start and End.
    if end != None:
        result = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
            filter(Measurement.date >= start).filter(
            Measurement.date <= end).all()
    #Start only.
    else:
        result = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
            filter(Measurement.date >= start).all()

    # Convert to list.
    temp_list = []
    no_data = False
    for min_temp, avg_temp, max_temp in result:
        if min_temp == None or avg_temp == None or max_temp == None:
            no_data = True
        temp_list.append(min_temp)
        temp_list.append(avg_temp)
        temp_list.append(max_temp)
    # Return JSON.
    if no_data == True:
        return f"No Data in Date Range"
    else:
        return jsonify(temp_list)
コード例 #46
0
def get_distribution(
    summary_table,
    run_table,
    machine_table,
    commit_table,
    commit,
    summary,
    machine_hash,
    limit,
):
    commits_up = (
        get_commits_up(commit_table, commit, limit).subquery().alias("commits_up")
    )
    return (
        select(
            func.text(summary.case_id).label("case_id"),
            func.text(summary.context_id).label("context_id"),
            func.text(commit.id).label("commit_id"),
            func.concat(
                machine_table.c.name,
                "-",
                machine_table.c.gpu_count,
                "-",
                machine_table.c.cpu_core_count,
                "-",
                machine_table.c.cpu_thread_count,
                "-",
                machine_table.c.memory_bytes,
            ).label("hash"),
            func.max(summary_table.c.unit).label("unit"),
            func.avg(summary_table.c.mean).label("mean_mean"),
            func.stddev(summary_table.c.mean).label("mean_sd"),
            func.avg(summary_table.c.min).label("min_mean"),
            func.stddev(summary_table.c.min).label("min_sd"),
            func.avg(summary_table.c.max).label("max_mean"),
            func.stddev(summary_table.c.max).label("max_sd"),
            func.avg(summary_table.c.median).label("median_mean"),
            func.stddev(summary_table.c.median).label("median_sd"),
            func.min(commits_up.c.timestamp).label("first_timestamp"),
            func.max(commits_up.c.timestamp).label("last_timestamp"),
            func.count(summary_table.c.mean).label("observations"),
        )
        .group_by(
            summary_table.c.case_id,
            summary_table.c.context_id,
            machine_table.c.name,
            machine_table.c.gpu_count,
            machine_table.c.cpu_core_count,
            machine_table.c.cpu_thread_count,
            machine_table.c.memory_bytes,
        )
        .join(run_table, run_table.c.id == summary_table.c.run_id)
        .join(machine_table, machine_table.c.id == run_table.c.machine_id)
        .join(commits_up, commits_up.c.id == run_table.c.commit_id)
        .filter(
            run_table.c.name.like("commit: %"),
            summary_table.c.case_id == summary.case_id,
            summary_table.c.context_id == summary.context_id,
            func.concat(
                machine_table.c.name,
                "-",
                machine_table.c.gpu_count,
                "-",
                machine_table.c.cpu_core_count,
                "-",
                machine_table.c.cpu_thread_count,
                "-",
                machine_table.c.memory_bytes,
            )
            == machine_hash,
        )
    )
コード例 #47
0
def weather(start_date):
    session = Session(engine)
    results = session.query(Measurement.date, func.min(Measurement.tobs), func.max(Measurement.tobs), func.avg(Measurement.tobs)).\
            filter(Measurement.date >= start_date).group_by(Measurement.date).all()
    session.close()
    return (jsonify(results))
コード例 #48
0
ファイル: app.py プロジェクト: lmbejaran/sqlalchemy-challenge
def calc_temps_start_end(start_date, end_date):
    tobs_stats = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
        filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()
    tobs_stats_all = []
    for min, avg, max in tobs_stats:
        dict = {}
        dict['min'] = min
        dict['avg'] = avg
        dict['max'] = max
        tobs_stats_all.append(dict)
    return jsonify(tobs_stats_all)
コード例 #49
0
    
    stations_total = list(np.ravel(station_results))
    
    return jsonify(stations_total)


@app.route("/api/v1.0/tobs")
def tobs():
    tobs_results = session.query(Measurement.tobs).filter(Measurement.date > '2016-08-22').all()
    
    tobs_total = list(np.ravel(tobs_results))
    
    return jsonify(tobs_total)


@app.route("/api/v1.0/<start>")
    def calc_temp1(start_date)
      start_date = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
        filter(Measurement.date >= start_date).all()
    return jsonify(calc_temp1)


@app.route("/api/v1.0/<start>/<end>")
    def calc_temp2(start_date, end_date)
      start_date = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
        filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()
    return jsonify(calc_temp.)

if __name__ == "__main__":
    app.run(debug=True)
コード例 #50
0
ファイル: app.py プロジェクト: nt1983/sqlalchemy-challenge
def start_date(start=None):
    date_result=session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs),func.max(Measurement.tobs)).filter(Measurement.date>=start).all()
    d_result=list(np.ravel(date_result))
    session.close()
    return jsonify(d_result)
コード例 #51
0
ファイル: impl_sqlalchemy.py プロジェクト: xianms/ceilometer
from ceilometer.i18n import _, _LI
from ceilometer import storage
from ceilometer.storage import base
from ceilometer.storage import models as api_models
from ceilometer.storage.sqlalchemy import models
from ceilometer.storage.sqlalchemy import utils as sql_utils
from ceilometer import utils

LOG = log.getLogger(__name__)


STANDARD_AGGREGATES = dict(
    avg=func.avg(models.Sample.volume).label('avg'),
    sum=func.sum(models.Sample.volume).label('sum'),
    min=func.min(models.Sample.volume).label('min'),
    max=func.max(models.Sample.volume).label('max'),
    count=func.count(models.Sample.volume).label('count')
)

UNPARAMETERIZED_AGGREGATES = dict(
    stddev=func.stddev_pop(models.Sample.volume).label('stddev')
)

PARAMETERIZED_AGGREGATES = dict(
    validate=dict(
        cardinality=lambda p: p in ['resource_id', 'user_id', 'project_id']
    ),
    compute=dict(
        cardinality=lambda p: func.count(
            distinct(getattr(models.Resource, p))
        ).label('cardinality/%s' % p)
コード例 #52
0
def temps():
    lowest_temp = session.query(Measurements.station, func.min(Measurements.tobs), func.max(Measurements.tobs), func.avg(Measurements.tobs)).\
                            filter(Measurements.station == "USC00519281").all()
    session.close() 
    return jsonify(lowest_temp)
コード例 #53
0
    def run_task(self):
        date_insertion = datetime.now()
        logger.info("extracting %s ", self.input_filename)
        # this pattern matches the first date
        # e.g. '20200803ExtractApp'
        # will match 20200803
        date_string = self.input_filename.split('/')[-1][0:8] 
        try:
            self.last_historical_data_date_in_file = datetime.strptime(date_string, "%Y%m%d")
        except ValueError:
            raise Exception("couldn't find a date pattern in filename. filename should be \
                like 20200803ExtractApp.csv")

        count = 0
        statements = []
        something_new = False
        query = """
            INSERT into %s(
                siret,
                hiring_date,
                contract_type,
                departement,
                contract_duration,
                iiann,
                tranche_age,
                handicap_label,
                duree_pec,
                date_insertion
                )
            values(%%s, %%s, %%s, %%s, %%s, %%s, %%s, %%s, %%s, %%s)
        """ % settings.HIRING_TABLE
        imported_alternance_contracts = 0
        imported_alternance_contracts_distribution = {}
        not_imported_alternance_contracts = 0

        last_historical_data_date_in_db = db_session.query(func.max(Hiring.hiring_date))\
                                                            .filter(Hiring.contract_type == self.contract_type).first()[0]

        logger.info("will now extract all alternance contracts with hiring_date between %s and %s",
                    last_historical_data_date_in_db, self.last_historical_data_date_in_file)

        with import_util.get_reader(self.input_filename) as myfile:
            con, cur = import_util.create_cursor()
            header_line = myfile.readline().strip()   # FIXME detect column positions from header
            
            if b"SIRET" not in header_line:
                logger.debug(header_line)
                raise Exception("wrong header line")

            for line in myfile:
                line = line.decode()
                count += 1
                if not count % 10000:
                    logger.debug("reading line %i", count)
                    try:
                        try:
                            cur.executemany(query, statements)
                        except OperationalError:  # retry once in case of deadlock error
                            time.sleep(10)
                            cur.executemany(query, statements)
                        statements = []
                        con.commit()
                        something_new = True
                    except:
                        logger.error("error in executing statement into hirings table: %s", sys.exc_info()[1])
                        statements = []
                        raise
                try:
                    siret, hiring_date, departement = parse_alternance_line(line)
                except InvalidRowException:
                    logger.info("invalid_row met at row: %i", count)
                    self.invalid_row_errors += 1
                    continue
                except InvalidSiretException:
                    error_message = traceback.format_exc()
                    logger.info("invalid siret met at row: %i", count)
                    logger.info(error_message)
                    self.invalid_siret_errors += 1
                    continue
                except InvalidZipCodeException:
                    logger.info("invalid zip code met at row: %i", count)
                    self.invalid_zipcode_errors += 1
                    continue
                
                # This part of code is useless : 
                #   The data used has a lot of late contracts inputs
                #   So we have to insert ALL the contracts from different dates

                #  alternance_contract_should_be_imported = (
                #      hiring_date > last_historical_data_date_in_db 
                #      and hiring_date <= self.last_historical_data_date_in_file
                #)

                if hiring_date <= self.last_historical_data_date_in_file:
                    statement = (
                        siret,
                        hiring_date,
                        self.contract_type,
                        departement,
                        None, #contract_duration
                        None, #iiann
                        None, #tranche_age
                        None, #handicap_label
                        None,  #duree_pec
                        date_insertion

                    )
                    statements.append(statement)
                    imported_alternance_contracts += 1

                    if hiring_date.year not in imported_alternance_contracts_distribution:
                        imported_alternance_contracts_distribution[hiring_date.year] = {}
                    if hiring_date.month not in imported_alternance_contracts_distribution[hiring_date.year]:
                        imported_alternance_contracts_distribution[hiring_date.year][hiring_date.month] = {}
                    if hiring_date.day not in imported_alternance_contracts_distribution[hiring_date.year][hiring_date.month]:
                        imported_alternance_contracts_distribution[hiring_date.year][hiring_date.month][hiring_date.day] = 0
                    imported_alternance_contracts_distribution[hiring_date.year][hiring_date.month][hiring_date.day] += 1

        # run remaining statements
        try:
            cur.executemany(query, statements)
            something_new = True
        except:
            logger.error("error in executing statement into hirings table: %s", sys.exc_info()[1])
            raise

        logger.info(f"Types de contrats à importer : {self.contract_name}")
        logger.info(f"processed {count} lba_contracts...")
        logger.info(f"imported lba_contracts: {imported_alternance_contracts}")
        logger.info(f"not imported lba_contracts: {not_imported_alternance_contracts}")
        logger.info(f"zipcode errors: {self.invalid_zipcode_errors}")
        logger.info(f"invalid_row errors: {self.invalid_row_errors}")
        logger.info(f"invalid siret errors: {self.invalid_siret_errors}")
#        if self.zipcode_errors > settings.MAXIMUM_ZIPCODE_ERRORS:
#            raise IOError('too many zipcode errors')
#        if self.invalid_row_errors > settings.MAXIMUM_INVALID_ROWS:
#            raise IOError('too many invalid_row errors')

        con.commit()
        cur.close()
        con.close()

        try:
            statistics = DpaeStatistics(
                last_import=datetime.now(),
                most_recent_data_date=self.last_historical_data_date_in_file,
                file_type=self.file_type
            )
            db_session.add(statistics)
            db_session.commit()
            logger.info("First way to insert DPAE statistics in DB : OK")
        except OperationalError:
            # For an obscure reason, the DpaeStatistics way to insert does not work on the bonaparte server
            # So we insert it directly via an SQL query
            # This job has been broken for more than a year, only way to fix it : 
            db_session.rollback()
            last_import_date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            most_recent_date = self.last_historical_data_date_in_file.strftime('%Y-%m-%d %H:%M:%S')
            query = f"insert into dpae_statistics (last_import, most_recent_data_date, file_type) values ('{last_import_date}','{most_recent_date}','{self.file_type}')"
            con, cur = import_util.create_cursor()
            cur.execute(query)
            con.commit()
            cur.close()
            con.close()
            logger.info("Second way to insert DPAE statistics in DB : OK")


        logger.info("finished importing dpae...")
        return something_new
コード例 #54
0
ファイル: app.py プロジェクト: azreallpw/sqlalchemy
def calc_temps(start_date, end_date):
    return session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
        filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()
コード例 #55
0
def changelog_last_serial(request):
    return request.db.query(func.max(JournalEntry.id)).scalar()
コード例 #56
0
def start_end(start,end):
    session = Session(engine)
    temperatures = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).filter(Measurement.date >= start).filter(Measurement.date <= end).all()
    session.close()
    temperature_stats = ['Minimum Temp','Average Temp','Maximum Temp']
    temps = []
    for minimum, average, maximum in temperatures:
        temps.append(minimum)
        temps.append(average)
        temps.append(maximum)
    temp_dict = dict(zip(temperature_stats,temps))
    return jsonify(temp_dict)
コード例 #57
0
    def find_conversation_addressees(cls, login):

        query_base = cls.query.filter(or_(cls.receiver==login, cls.sender==login)).with_entities(case([(cls.receiver == login, cls.sender)], else_ = cls.receiver).label("receiver"), func.max(cls.sentAt)).group_by(case([(cls.receiver == login, cls.sender)], else_ = cls.receiver)).order_by(func.max(cls.sentAt).desc()).all()

        return query_base
コード例 #58
0
ファイル: app.py プロジェクト: RyanHuang1995/07-Advanced-SQL
def temperature(start=None,end=None):
    Min,Avg,Max = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
        filter(Measurement.date >= start).filter(Measurement.date <= end).all()[0]
    results = {
    "Max":Max,
    "Avg":Avg,
    "Min":Min}
    return jsonify(results)
コード例 #59
0
ファイル: files_service.py プロジェクト: aboellinger/zou
def get_last_output_files_for_instance(
    asset_instance_id,
    temporal_entity_id,
    task_type_id=None,
    output_type_id=None,
    name=None,
    representation=None,
    file_status_id=None,
):
    """
    Get last output files for given entity grouped by output type and name.
    """
    # Query maximum revision for each possible arguments
    # Only group and filter by file status if specified. Otherwise this could
    # lead to different groups of files sharing the same history but with
    # different status.
    # This could be very misleading when the user would want to get the last.
    if file_status_id:
        query = OutputFile.query.with_entities(
            OutputFile.temporal_entity_id,
            OutputFile.task_type_id,
            OutputFile.output_type_id,
            OutputFile.name,
            OutputFile.representation,
            OutputFile.file_status_id,
            func.max(OutputFile.revision).label("MAX"),
        ).group_by(
            OutputFile.temporal_entity_id,
            OutputFile.task_type_id,
            OutputFile.output_type_id,
            OutputFile.name,
            OutputFile.representation,
            OutputFile.file_status_id,
        )
    else:
        query = OutputFile.query.with_entities(
            OutputFile.temporal_entity_id,
            OutputFile.task_type_id,
            OutputFile.output_type_id,
            OutputFile.name,
            OutputFile.representation,
            func.max(OutputFile.revision).label("MAX"),
        ).group_by(
            OutputFile.temporal_entity_id,
            OutputFile.task_type_id,
            OutputFile.output_type_id,
            OutputFile.name,
            OutputFile.representation,
        )
    query = query.filter(OutputFile.asset_instance_id == asset_instance_id)
    query = query.filter(OutputFile.temporal_entity_id == temporal_entity_id)
    if file_status_id:
        query = query.filter(OutputFile.file_status_id == file_status_id)
    statement = query.subquery()

    # Create a join query to retrieve maximum revision
    query = OutputFile.query.join(
        statement,
        and_(
            OutputFile.temporal_entity_id == statement.c.temporal_entity_id,
            OutputFile.task_type_id == statement.c.task_type_id,
            OutputFile.output_type_id == statement.c.output_type_id,
            OutputFile.name == statement.c.name,
            OutputFile.representation == statement.c.representation,
            OutputFile.revision == statement.c.MAX,
        ),
    )

    # Filter by specified arguments
    query = query.filter(OutputFile.asset_instance_id == asset_instance_id)
    query = query.filter(OutputFile.temporal_entity_id == temporal_entity_id)
    if task_type_id:
        query = query.filter(OutputFile.task_type_id == task_type_id)
    if output_type_id:
        query = query.filter(OutputFile.output_type_id == output_type_id)
    if name:
        query = query.filter(OutputFile.name == name)
    if representation:
        query = query.filter(OutputFile.representation == representation)
    if representation:
        query = query.filter(OutputFile.file_status_id == file_status_id)

    output_files = query.all()
    return fields.serialize_models(output_files)
コード例 #60
0
def calc_temps(start, end):
    """TMIN, TAVG, and TMAX for a list of dates.
    
    Args:
        start_date (string): A date string in the format %Y-%m-%d
        end_date (string): A date string in the format %Y-%m-%d
        
    Returns:
        TMIN, TAVE, and TMAX
    """

    start_end = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
        filter(Measurement.date >= start).filter(Measurement.date <= end).all()

    return jsonify(start_end)