Exemple #1
0
    def index_old(self):
        from gviz_data_table import Table
        from rockpack.mainsite.services.user.models import User, UserActivity
        user_count = readonly_session.query(func.count(User.id)).\
            filter(User.refresh_token != '').scalar()
        header = ('user count', 'max lifetime', 'avg lifetime', 'stddev lifetime',
                  'max active days', 'avg active days', 'stddev active days')
        lifetime = func.date_part('days', func.max(UserActivity.date_actioned) -
                                  func.min(UserActivity.date_actioned)).label('lifetime')
        active_days = func.count(func.distinct(func.date(
            UserActivity.date_actioned))).label('active_days')
        activity = readonly_session.query(UserActivity.user, lifetime, active_days).\
            group_by(UserActivity.user)
        ctx = {}
        for key, having_expr in ('all', None), ('1day', lifetime > 1), ('7day', lifetime > 7):
            data = activity.having(having_expr).from_self(
                func.count('*'),
                func.max(lifetime),
                func.avg(lifetime),
                func.stddev_samp(lifetime),
                func.max(active_days),
                func.avg(active_days),
                func.stddev_samp(active_days)
            ).one()
            table = Table([
                dict(id='metric', type=str),
                dict(id='value', type=float),
                dict(id='%', type=str),
            ])
            pdata = ('%d%%' % (data[0] * 100 / user_count),) + ('',) * 6
            table.extend(zip(*(header, map(float, data), pdata)))
            ctx['ret_%s_data' % key] = table.encode()

        return self.render('admin/retention_stats_old.html', **ctx)
Exemple #2
0
def read_rst(rst_id):
    # 店の詳細情報取得
    session = Session()
    s = (
        session.query(
            Tabelog.Rcd.label("rst_id"),
            Tabelog.RestaurantName,
            Tabelog.TabelogMobileUrl,
            Tabelog.TotalScore,
            Tabelog.Situation,
            Tabelog.DinnerPrice,
            Tabelog.LunchPrice,
            Tabelog.Category,
            Tabelog.Station,
            Tabelog.Address,
            Tabelog.Tel,
            Tabelog.BusinessHours,
            Tabelog.Holiday,
            gf.x(Tabelog.LatLng).label("lat"),
            gf.y(Tabelog.LatLng).label("lng"),
            func.round(func.avg(UserPost.difficulty)).label("difficulty"),
            func.avg(UserPost.difficulty).label("raw_difficulty"),
        )
        .filter(Tabelog.Rcd == rst_id)
        .filter(UserPost.rst_id == Tabelog.Rcd)
        .group_by(UserPost.id)
        .first()
    )
    # gf.wkt(Tabelog.LatLng).label('Point'))\
    #         .filter('Rcd = :rcd').params(rcd=rst_id).first()
    session.commit()
    session.close()
    return s
  def _compute_ratios(stats_clause):
    tc_session.expunge_all()
    avg_from_rate = select([alias(
        select([func.avg(RouterStats.circ_from_rate)],
                           stats_clause)
        )]).as_scalar()
    avg_to_rate = select([alias(
        select([func.avg(RouterStats.circ_to_rate)],
                           stats_clause)
        )]).as_scalar()
    avg_bi_rate = select([alias(
        select([func.avg(RouterStats.circ_bi_rate)],
                           stats_clause)
        )]).as_scalar()
    avg_ext = select([alias(
        select([func.avg(RouterStats.avg_first_ext)],
                           stats_clause)
        )]).as_scalar()
    avg_sbw = select([alias(
        select([func.avg(RouterStats.sbw)],
                           stats_clause)
        )]).as_scalar()

    RouterStats.table.update(stats_clause, values=
       {RouterStats.table.c.circ_from_ratio:
         (1-RouterStats.table.c.circ_from_rate)/(1-avg_from_rate),
        RouterStats.table.c.circ_to_ratio:
         (1-RouterStats.table.c.circ_to_rate)/(1-avg_to_rate),
        RouterStats.table.c.circ_bi_ratio:
         (1-RouterStats.table.c.circ_bi_rate)/(1-avg_bi_rate),
        RouterStats.table.c.ext_ratio:
         avg_ext/RouterStats.table.c.avg_first_ext,
        RouterStats.table.c.sbw_ratio:
         RouterStats.table.c.sbw/avg_sbw}).execute()
    tc_session.commit()
Exemple #4
0
  def _compute_ranks():
    tc_session.clear()
    min_r = select([func.min(BwHistory.rank)],
        BwHistory.table.c.router_idhex
            == RouterStats.table.c.router_idhex).as_scalar()
    avg_r = select([func.avg(BwHistory.rank)],
        BwHistory.table.c.router_idhex
            == RouterStats.table.c.router_idhex).as_scalar()
    max_r = select([func.max(BwHistory.rank)],
        BwHistory.table.c.router_idhex
            == RouterStats.table.c.router_idhex).as_scalar()
    avg_bw = select([func.avg(BwHistory.bw)],
        BwHistory.table.c.router_idhex
            == RouterStats.table.c.router_idhex).as_scalar()
    avg_desc_bw = select([func.avg(BwHistory.desc_bw)],
        BwHistory.table.c.router_idhex
            == RouterStats.table.c.router_idhex).as_scalar()

    RouterStats.table.update(values=
       {RouterStats.table.c.min_rank:min_r,
        RouterStats.table.c.avg_rank:avg_r,
        RouterStats.table.c.max_rank:max_r,
        RouterStats.table.c.avg_bw:avg_bw,
        RouterStats.table.c.avg_desc_bw:avg_desc_bw}).execute()

    #min_avg_rank = select([func.min(RouterStats.avg_rank)]).as_scalar()
    max_avg_rank = select([func.max(RouterStats.avg_rank)]).as_scalar()

    RouterStats.table.update(values=
       {RouterStats.table.c.percentile:
            (100.0*RouterStats.table.c.avg_rank)/max_avg_rank}).execute()
    tc_session.commit()
Exemple #5
0
  def superblock_stats(self, sample_id):
    """Calculate superblock level metrics to annotate genes.

    Requires all related blocks to already be properly annotated.

    What's happening is that we are simply taking the average of the
    metrics on the transcript level and applying that as gene metrics.
    This gives a decent, albeit not perfect, represenation of gene
    level metrics.

    .. note::

      Annotation of transcripts needs to be acomplished before
      annotating genes!

    Args:
      sample_id (str): sample Id to match with coverage annotations

    Returns:
      list: list of tuples: ``(<gene_id>, <coverage>, <completeness>)``
    """
    return self.query(
      Block.superblock_id,
      func.avg(BlockData.coverage),
      func.avg(BlockData.completeness)
    ).join(BlockData, Block.id==BlockData.parent_id)\
     .filter(BlockData.sample_id == sample_id)\
     .group_by(Block.superblock_id)
  def write_bws(f, pct_low=0, pct_high=100, order_by=None, recompute=False, stat_clause=None, filter_clause=None, disp_clause=None):
    if not order_by:
      order_by=RouterStats.avg_first_ext

    if recompute:
      RouterStats.compute(pct_low, pct_high, stat_clause, filter_clause)

    pct_clause = and_(RouterStats.percentile >= pct_low, 
                         RouterStats.percentile < pct_high)

    # This is Fail City and sqlalchemy is running for mayor.
    if sqlalchemy.__version__ < "0.5.0":
      sbw = RouterStats.query.filter(pct_clause).filter(stat_clause).avg(RouterStats.sbw)
      filt_sbw = RouterStats.query.filter(pct_clause).filter(stat_clause).avg(RouterStats.filt_sbw)
    else:
      sbw = tc_session.query(func.avg(RouterStats.sbw)).filter(pct_clause).filter(stat_clause).scalar()
      filt_sbw = tc_session.query(func.avg(RouterStats.filt_sbw)).filter(pct_clause).filter(stat_clause).scalar()

    f.write(str(int(time.time()))+"\n")

    def cvt(a,b,c=1):
      if type(a) == float: return int(round(a/c,b))
      elif type(a) == int: return a
      elif type(a) == type(None): return "None"
      else: return type(a)

    for s in RouterStats.query.filter(pct_clause).filter(stat_clause).\
           filter(disp_clause).order_by(order_by).all():
      f.write("node_id=$"+s.router.idhex+" nick="+s.router.nickname)
      f.write(" strm_bw="+str(cvt(s.sbw,0)))
      f.write(" filt_bw="+str(cvt(s.filt_sbw,0)))
      f.write(" desc_bw="+str(int(cvt(s.avg_desc_bw,0))))
      f.write(" ns_bw="+str(int(cvt(s.avg_bw,0)))+"\n")

    f.flush()
Exemple #7
0
def politicoultimarodada(rodada_id=None,candidato_id=None):
    enviar = request.args.get('enviar')
    inicio = request.args.get('inicio')
    fim = request.args.get('fim')
    if enviar is None:
        enviar='True'
    rodada=politicaServices.getRodada(rodada_id)
    if rodada is None:
        return
    rodada_id=rodada.id
    total=0
      
    dominio=""
    #dominio="http://localhost:8084"
    politicos_pontos = politicaServices.topPoliticosRodada(tamanho=5)
    politicos_presenca = politicaServices.topPoliticosPresenca(rodada_id, tamanho=5)
    politicos_votacao = politicaServices.topPoliticosVotacoes(rodada_id, tamanho=5)
    politicos_proposicao = politicaServices.topPoliticosProposicao(rodada_id, tamanho=5)
    politicos_despesa = politicaServices.topPoliticosDespesa(rodada_id, tamanho=5)
    
    media_pontos = Candidatura.query.with_entities(func.avg(Candidatura.pontuacao_ultima).label("media"))
    media_presenca= Pontuacao.query.with_entities(func.avg(Pontuacao.presenca)).filter(Pontuacao.rodada==rodada_id)[0][0]
    media_votacao= Pontuacao.query.with_entities(func.avg(Pontuacao.votacao)).filter(Pontuacao.rodada==rodada_id)[0][0]
    media_proposicao= Pontuacao.query.with_entities(func.avg(Pontuacao.proposicao)).filter(Pontuacao.rodada==rodada_id)[0][0]
    media_despesa= Pontuacao.query.with_entities(func.avg(Pontuacao.despesa)).filter(Pontuacao.rodada==rodada_id)[0][0]
    
    rodada_atual = db.session.merge(session['rodada_atual'])
    titulo="Veja sua pontuacao da rodada"
    log=""
    if candidato_id is not None and candidato_id<>"all":
        candidato = politicaServices.pontuacaoByCandidato(candidato_id, rodada_id)
        nome_politico = Candidatura.query.filter_by(id=candidato_id).first()
        #log = log + enviaPoliticoUltimaRodada(candidato,rodada_id,titulo,rodada,rodada_atual,enviar, nome_politico) #politicos,
        enviaPoliticoUltimaRodada(candidato,rodada_id,titulo,rodada,rodada_atual,enviar, nome_politico, politicos_pontos,
                                  politicos_presenca, politicos_votacao, politicos_proposicao, politicos_despesa,
                                  media_presenca, media_votacao, media_proposicao, media_despesa)
        total = 0
        total = total +1
        log = log + ", " + str(nome_politico.email) #+ str(p[0]) 
        #return render_template("comunicado/politicoultimarodada.html",candidato, dominio=dominio,rodada=rodada,
         #                      rodada_atual=rodada_atual, nome_politico=nome_politico) #,politicos=politicos rodadaPontos=rodadaPontos,
    else:
        candidatos = Candidatura.query.all()
        total=0
        for politico in candidatos:
            if total<inicio or total>fim:
                continue
                
            candidato = politicaServices.pontuacaoByCandidato(candidato=politico.id, rodada_numero=rodada_id)
            nome_politico = Candidatura.query.filter_by(id=politico.id).first()
            total=total+1
            #log = log + enviaPoliticoUltimaRodada(candidato,rodada_id,titulo,rodada,politicos,rodada_atual,enviar)
            try:                
                enviaPoliticoUltimaRodada(candidato,rodada_id,titulo,rodada,rodada_atual,enviar, nome_politico, politicos_pontos,
                                  politicos_presenca, politicos_votacao, politicos_proposicao, politicos_despesa,
                                  media_presenca, media_votacao, media_proposicao, media_despesa)
            except:
                print "Email Error"
            log = log + ", " + str(nome_politico.email)
    return render_template("comunicado/statuscomunicado.html",dominio="",titulo=titulo,total=total,log=log) 
Exemple #8
0
def index(page = 1):
	form = DataForm()
	user_data = Data.query.filter_by(user_id = g.user.id)
	#ms = user_data.order_by(Data.systolic_pressure.desc()).first()
	four_weeks_ago = datetime.datetime.now() - datetime.timedelta(weeks=4)

	maxs = db.session.query(func.max(Data.systolic_pressure).label('max_systolic')).filter_by(user_id = g.user.id).one()
	max_systolic = maxs.max_systolic
	mins = db.session.query(func.min(Data.systolic_pressure).label('min_systolic')).filter_by(user_id = g.user.id).one()
	min_systolic = mins.min_systolic
	avgs = db.session.query(func.avg(Data.systolic_pressure).label('avg_systolic')).filter_by(user_id = g.user.id).\
			filter(Data.timestamp > four_weeks_ago).one()
	avg_systolic = avgs.avg_systolic

	maxd = db.session.query(func.max(Data.diastolic_pressure).label('max_diastolic')).filter_by(user_id = g.user.id).one()
	max_diastolic = maxd.max_diastolic
	mind = db.session.query(func.min(Data.diastolic_pressure).label('min_diastolic')).filter_by(user_id = g.user.id).one()
	min_diastolic = mind.min_diastolic
	avgd = db.session.query(func.avg(Data.diastolic_pressure).label('avg_diastolic')).filter_by(user_id = g.user.id).\
			filter(Data.timestamp > four_weeks_ago).one()
	avg_diastolic = avgd.avg_diastolic

	maxc = db.session.query(func.max(Data.cardiac_rate).label('max_rate')).filter_by(user_id = g.user.id).one()
	max_rate = maxc.max_rate
	minc = db.session.query(func.min(Data.cardiac_rate).label('min_rate')).filter_by(user_id = g.user.id).one()
	min_rate = minc.min_rate
	avgc = db.session.query(func.avg(Data.cardiac_rate).label('avg_rate')).filter_by(user_id = g.user.id).\
			filter(Data.timestamp > four_weeks_ago).one()
	avg_rate = avgc.avg_rate

	if form.validate_on_submit():
		data = Data(systolic_pressure = form.systolic_pressure.data,
					diastolic_pressure = form.diastolic_pressure.data,
					cardiac_rate = form.cardiac_rate.data,
					timestamp = datetime.datetime.now(),
					body = form.note.data,
					user = g.user)
		db.session.add(data)
		db.session.commit()
		db.session.close()
		flash('Added successfully')
		return redirect(url_for('index'))

	datas = user_data.order_by(Data.timestamp.desc()).paginate(page, DATAS_PER_PAGE, False)

	return render_template('index.html',
		title = 'Home',
		form = form,
		max_systolic = max_systolic,
		min_systolic = min_systolic,
		avg_systolic = avg_systolic,
		max_diastolic = max_diastolic,
		min_diastolic = min_diastolic,
		avg_diastolic = avg_diastolic,
		max_rate = max_rate,
		min_rate = min_rate,
		avg_rate = avg_rate,
		datas = datas)
Exemple #9
0
def home():
	highestRated = session.query(Product.id, Product.name, Product.price, 
		Product.imageName, func.avg(Review.rating).label('averageRating')) \
		.join(Review, Product.id == Review.product_id).group_by(Product.id) \
		.order_by(desc(func.avg(Review.rating))).limit(5).all()
	newlyAdded = session.query(Product).order_by(desc(Product.dateAdded)) \
		.limit(5).all()
	currentUser = getCurrentUser()
	return render_template('home.html', highestRated = highestRated, 
		newlyAdded = newlyAdded)
Exemple #10
0
 def fetch_top_rated_packages():
     package = table('package')
     rating = table('rating')
     sql = select([package.c.id, func.avg(rating.c.rating), func.count(rating.c.rating)],
                  from_obj=[package.join(rating)]). \
         where(package.c.private == 'f'). \
         group_by(package.c.id). \
         order_by(func.avg(rating.c.rating).desc(), func.count(rating.c.rating).desc()). \
         limit(limit)
     res_ids = model.Session.execute(sql).fetchall()
     return [(model.Session.query(model.Package).get(unicode(pkg_id)), avg, num) for pkg_id, avg, num in res_ids]
Exemple #11
0
 def get_players_stats(self, team, ruolo):
     '''get the avg of vote by serie_a team and ruolo'''
     query = self.session.query(Giocatore).filter(and_(
                         Giocatore.squadra == team,
                         Giocatore.ruolo == ruolo)).subquery()
     adalias = aliased(Giocatore, query)
     query_p = self.session.query(adalias.nome, func.avg(Voto.voto)).join(
                         adalias, Voto.giocatore_id).filter(Voto.pres > 0)\
                         .group_by(adalias).order_by(
                                     desc(func.avg(Voto.voto))).all()
     return query_p
Exemple #12
0
def get_query(db, model_map):
    Group = model_map['Group']
    User = model_map['User']
    return db.session.query(Group.id.label(u'国家编号'),
                            Group.name.label(u'国家'),
                            func.sum(User.impact).label(u'总影响力'),
                            func.avg(User.impact).label(u'平均影响力'),
                            func.sum(User.intelligience).label(u'总智力'),
                            func.avg(User.intelligience).label(u'平均智力'),
                            func.sum(User.strength).label(u'总武力'),
                            func.avg(User.strength).label(u'平均武力')).group_by(User.group_id).join(User)
Exemple #13
0
def get_average_rates():
    """Gets average rates for all period."""
    sub = Rate.query\
        .group_by('bank_id')\
        .order_by('update_time desc')\
        .subquery()

    return db.session.query(sub.c.update_time,
                            func.avg(sub.c.usd_buying),
                            func.avg(sub.c.usd_selling),
                            func.avg(sub.c.eur_buying),
                            func.avg(sub.c.eur_selling)).one()
 def top_rated_packages(cls, limit=10):
     # NB Not using sqlalchemy as sqla 0.4 doesn't work using both group_by
     # and apply_avg
     package = table('package')
     rating = table('rating')
     sql = select([package.c.id, func.avg(rating.c.rating), func.count(rating.c.rating)], from_obj=[package.join(rating)]).\
           group_by(package.c.id).\
           order_by(func.avg(rating.c.rating).desc(), func.count(rating.c.rating).desc()).\
           limit(limit)
     res_ids = model.Session.execute(sql).fetchall()
     res_pkgs = [(model.Session.query(model.Package).get(unicode(pkg_id)), avg, num) for pkg_id, avg, num in res_ids]
     return res_pkgs
Exemple #15
0
def query_average_by_group():
    print "test query_average_by_group"
    avg_by_name = session.query(Person.name, func.avg(Person.score))\
            .group_by(Person.name).all()
    print avg_by_name

    avg_by_name = session.query(Person.name, func.avg(Person.score))\
            .filter_by(is_man=False).group_by(Person.name).all()
    print avg_by_name

    avg_by_name = session.query(Person.name, func.sum(Person.score), func.count(Person.score))\
            .group_by(Person.name).all()
    print avg_by_name
def sample_queries():
    print('\nWhat is average rating by age group, gender, and occupation of the reviewers?')
    q = session.query(User.age, User.gender, User.occupation, func.avg(Rating.rating)).join(Rating)\
        .group_by(User.age, User.gender, User.occupation)
    print('{:^5s}|{:^9s}|{:^15s}|{:^8s}'.format('Age', 'Gender', 'Occupation', 'Rating'))
    print('-'*37)
    for li in q:
        print('{:^5d} {:^9s} {:^15s} {:^8.3f}'.format(li[0], li[1], li[2], li[3]))
    print('\nWhat is average rating by movie genre?')
    print('{:^15s}|{:^8s}'.format('Genre', 'Rating'))
    print('-'*24)
    for r in session.query(Genre.genre, func.avg(Rating.rating)).join(Rating).group_by(Genre.genre).all():
        print('{:^15s} {:^8.3f}'.format(r[0], r[1]))
Exemple #17
0
def get_average_rates_from_date(date_from):
    """Gets average rates counted from selected date."""
    sub = Rate.query\
        .group_by('bank_id')\
        .filter(Rate.update_time >= date_from)\
        .order_by('update_time desc')\
        .subquery()

    return db.session.query(sub.c.update_time,
                            func.avg(sub.c.usd_buying),
                            func.avg(sub.c.usd_selling),
                            func.avg(sub.c.eur_buying),
                            func.avg(sub.c.eur_selling)).one()
Exemple #18
0
def get_average_rate_for_date_range(date_from, date_to):
    """Gets average rates for date range."""
    sub = Rate.query\
        .group_by('bank_id')\
        .filter(and_(Rate.update_time >= date_from, Rate.update_time <= date_to))\
        .order_by('update_time desc')\
        .subquery()

    return db.session.query(sub.c.update_time,
                            func.avg(sub.c.usd_buying),
                            func.avg(sub.c.usd_selling),
                            func.avg(sub.c.eur_buying),
                            func.avg(sub.c.eur_selling)).one()
    def _calculate_clone_stats(self, sample_id, min_cdr3, max_cdr3,
                               include_outliers, only_full_reads):
        clone_statistics = {}
        for name, stat in _clone_contexts.items():
            clone_statistics[name] = CloneContextStats(seqs=None, **stat)

        # TODO: This should be automatically generated from _dist_fields
        query = self._session.query(
            Sequence.clone_id,
            func.round(func.avg(Sequence.v_match)).label('v_match'),
            func.round(func.avg(Sequence.j_match)).label('j_match'),
            func.round(func.avg(Sequence.j_length)).label('j_length'),
            Sequence.v_gene,
            Sequence.j_gene,
            func.count(Sequence.seq_id).label('copy_number'),
            func.round(
                func.avg(Sequence.v_length + Sequence.num_gaps)
            ).label('v_length'),
            func.round(
                func.avg(100 * Sequence.v_match / Sequence.v_length)
            ).label('v_identity'),
            Sequence.cdr3_num_nts.label('cdr3_length'),
            SelectionPressure.sigma_fwr.label('sp_fwr'),
            SelectionPressure.sigma_cdr.label('sp_cdr'),
        ).join(
            SelectionPressure,
            and_(
                SelectionPressure.clone_id == Sequence.clone_id,
                SelectionPressure.sample_id == Sequence.sample_id
            ),
            isouter=True
        ).filter(
            Sequence.sample_id == sample_id,
            ~Sequence.clone_id.is_(None)
        )

        if only_full_reads:
            query = query.filter(Sequence.partial == 0)
        query = query.group_by(Sequence.clone_id)

        for clone in query:
            clone_info = self._session.query(Clone.cdr3_nt).filter(
                Clone.id == clone.clone_id).first()
            in_frame = len(clone_info.cdr3_nt) % 3 == 0
            stop = '*' in lookups.aas_from_nts(clone_info.cdr3_nt)
            functional = in_frame and not stop
            for name, stat in clone_statistics.items():
                stat.add_if_match(clone, in_frame, stop, functional)

        self._add_stat(clone_statistics, sample_id, include_outliers,
                       only_full_reads)
Exemple #20
0
    def test_apply_avg(self):
        """Query.apply_avg(col)

        avg = session.query(Address).apply_avg(Address.bounces)

        """
        session = create_session()

        avgs = list(session.query(Address).values(func.avg(Address.bounces)))
        avg = avgs[0][0]
        assert avg > 0 and avg < 10

        avg = session.query(func.avg(Address.bounces)).one()[0]
        assert avg > 0 and avg < 10
Exemple #21
0
def album_list():
	ltype, size, offset = map(request.args.get, [ 'type', 'size', 'offset' ])
	try:
		size = int(size) if size else 10
		offset = int(offset) if offset else 0
	except:
		return request.error_formatter(0, 'Invalid parameter format')

	query = session.query(Folder).filter(Folder.tracks.any())
	if ltype == 'random':
		albums = []
		count = query.count()

		if not count:
			return request.formatter({ 'albumList': {} })

		for _ in xrange(size):
			x = random.choice(xrange(count))
			albums.append(query.offset(x).limit(1).one())

		return request.formatter({
			'albumList': {
				'album': [ a.as_subsonic_child(request.user) for a in albums ]
			}
		})
	elif ltype == 'newest':
		query = query.order_by(desc(Folder.created))
	elif ltype == 'highest':
		query = query.join(RatingFolder).group_by(Folder.id).order_by(desc(func.avg(RatingFolder.rating)))
	elif ltype == 'frequent':
		query = query.join(Track, Folder.tracks).group_by(Folder.id).order_by(desc(func.avg(Track.play_count)))
	elif ltype == 'recent':
		query = query.join(Track, Folder.tracks).group_by(Folder.id).order_by(desc(func.max(Track.last_play)))
	elif ltype == 'starred':
		query = query.join(StarredFolder).join(User).filter(User.name == request.username)
	elif ltype == 'alphabeticalByName':
		query = query.order_by(Folder.name)
	elif ltype == 'alphabeticalByArtist':
		# this is a mess because who knows how your file structure is set up
		# with the database changes it's more difficult to get the parent of a dir
		parent = aliased(Folder)
		query = query.join(parent, Folder.parent).order_by(parent.name).order_by(Folder.name)
	else:
		return request.error_formatter(0, 'Unknown search type')

	return request.formatter({
		'albumList': {
			'album': [ f.as_subsonic_child(request.user) for f in query.limit(size).offset(offset) ]
		}
	})
Exemple #22
0
 def average_donation(self, **kwargs):
     """Average amount of money (€) given by a donor"""
     filters = list(self.get_filters(**kwargs))
     filters.append(self.project_id == Project.id)
     filters.append(Project.status.in_(Project.PUBLISHED_PROJECTS))
     filters.append(self.status > self.STATUS_PENDING)
     sub = db.session.query(func.avg(self.amount).label('amount')) \
                     .join(Project) \
                     .filter(*filters) \
                     .group_by(self.user_id) \
                     .subquery()
     total = db.session.query(func.avg(sub.c.amount)).scalar()
     total = 0 if total is None else round(total, 2)
     return total
def area_average(station_data_ids, year_step=True, time='All'):
    query = session.query(TemperatureModel.station_data_id,
                          TemperatureModel.year,
                          TemperatureModel.month,
                          func.avg(TemperatureModel.data).label('average_temperature')
                          ).filter(TemperatureModel.station_data_id.in_(station_data_ids),
                                   TemperatureModel.data.isnot(None),
                                   TemperatureModel.time == time).group_by(TemperatureModel.year,
                                                                           TemperatureModel.month)
    if year_step:
        query = session.query('station_data_id', 'year',
                              func.avg('average_temperature').label('average_temperature')
                              ).select_from(query.subquery()).group_by('year')
    temperatures = query.all()
    pprint(temperatures)
Exemple #24
0
def get_trapData(start_date, end_date, location):
    data = {}
    try:
        quadrants = DBSession.query(
                                Quadrant.name
                            ).filter(
                                func.lower(Quadrant.location)==func.lower(location)
                            ).all()

        quadrants = [i[0] for i in quadrants]
        for i in quadrants:
            regions = DBSession.query(
                                Region.name
                            ).filter(
                                func.lower(Region.location)==func.lower(location),
                                func.lower(Region.quadrant)==func.lower(i),
                            )
            traps = DBSession.query(
                                Trap.date,
                                func.avg(Trap.count),
                            ).filter(
                                Trap.date.between(start_date, end_date),
                                Trap.region.in_(regions)
                            ).group_by(Trap.date).all()

            data[i] = [[str(j[0]), int(j[1])] for j in traps]
    except DBAPIError as e:
        print e
        return {}

    return data
Exemple #25
0
def get_rainData(start_date, end_date, location):
    data = {}
    try:
        readings = DBSession.query(
                                Gauge.quadrant,
                                RainReading.date,
                                func.avg(RainReading.amount),
                            ).filter(
                                Gauge.gauge_id==RainReading.gauge_id,
                                func.lower(Gauge.location)==func.lower(location),
                                func.lower(RainReading.location)==
                                    func.lower(location),
                                RainReading.date.between(start_date, end_date),
                            ).group_by(
                                Gauge.quadrant,
                                RainReading.date,
                            ).all()
    except DBAPIError as e:
        print e
        return {}

    for i in readings:
        new_item = [str(i[1]), int(i[2])]
        if i[0] in data:
            data[i[0]].append(new_item)
        else:
            data[i[0]] = [new_item]

    return data
def compute_vote_score(vote, old_vote=None, commit=True):
    """Compute actual score for a vote based on some factors, and update relevant models."""
    post = vote.post
    user = vote.user

    post_user = post.user

    if vote.computed_weight and old_vote:
        # if an existing vote is being updated, just use the already computed weight
        actual_vote = float(vote.vote) - float(old_vote)

        weighted_score = float(vote.computed_weight) * actual_vote
    else:
        user_weight = calculate_user_score_weight(user)
        vote_weight = calculate_vote_weight(vote, post)

        computed_weight = user_weight * vote_weight
        vote.computed_weight = computed_weight
        vote.save(commit=False)

        weighted_score = computed_weight * float(vote.vote)

    post.score = float(post.score) + weighted_score
    post.save(commit=commit)

    new_user_score = db.session.query(
        func.avg(ForumPost.score)
    ).filter(
        ForumPost.score != 0,
        ForumPost.user_id == post_user.id
    ).scalar()

    post_user.score = new_user_score
    post_user.save(commit=commit)
def get_stat_last_period(days = 30):
    users = config.get('WITHINGS', 'users').split(',')
    stat_list_header = ['Day']
    for user in users:
        stat_list_header += ['', user[0] + ' weight', user[0] + ' avg weight', user[0] + ' weight loss']
    stat_list_header = [stat_list_header]
    stat_list = []
    last_weights = {}
    last_avg_weights = {}
    min_avg_weight = {}
    min_weights = {}
    for user in users:
        last_weights[user] = []
        last_avg_weights[user] = []
        min_avg_weight[user] = 100
        min_weights[user] = 100 # TODO ??? better ???
    min_date = datetime.datetime.now() - datetime.timedelta(days=days+20)
    min_date = datetime.date(min_date.year, min_date.month, min_date.day)
    with get_session_manager() as session_manager:
        while min_date <= datetime.date.today():
            stat_row = [str(min_date)]
            for user in users:
                weight, avg_weight, weight_loss = None, None, None
                (weight,) = session_manager.query(func.avg(Weights.weight)).\
                    filter(and_(Weights.user == user,
                                Weights.date == str(min_date))).first()
                if weight:
                    last_weights[user].append(weight)
                else:
                    if last_weights[user]:
                        last_weights[user].append(last_weights[user][-1])
                        weight = last_weights[user][-1]
                if last_weights[user]:
                    if not last_avg_weights[user] or len(last_avg_weights[user])<5:
                        avg_weight = last_weights[user][-1]
                        weight_loss = 0
                        last_avg_weights[user].append(avg_weight)
                    else:
                        avg_weight = last_avg_weights[user][-1] + (weight - last_avg_weights[user][-5])/10
                        weight_loss = (last_avg_weights[user][-5]-avg_weight)/5
                        last_avg_weights[user].append(avg_weight)
                last_weights[user] = last_weights[user][-10:]
                last_avg_weights[user] = last_avg_weights[user][-10:]
                if weight and avg_weight and weight_loss:
                    if avg_weight < min_avg_weight[user]:
                        min_avg_weight[user] = avg_weight
                        avg_weight = ('background:#CC9999', avg_weight)
                    if weight < min_weights[user]:
                        min_weights[user] = weight
                        weight = ('background:#CC9999', weight)
                    stat_row += ['', weight, avg_weight, max(weight_loss,0)]
                else:
                    stat_row += ['', '', '', '']
            #print stat_row
            stat_list += [stat_row]
            min_date += datetime.timedelta(days=1)
    stat_list = sorted(stat_list, reverse=True)
    stat_list = stat_list[:days]
    stat_list = stat_list_header + stat_list
    return stat_list
Exemple #28
0
 def average_total(self, **kwargs):
     """Average money raised (€) for projects"""
     filters = self.get_filters(**kwargs)
     total = db.session.query(func.avg(self.amount)) \
                       .filter(*filters).scalar()
     total = 0 if total is None else round(total, 2)
     return total
Exemple #29
0
 def percent_pledged(self, **kwargs):
     """Percentage of money raised over the minimum on projects """
     filters = self.get_filters(**kwargs)
     total = db.session.query(func.avg(self.amount / self.minimum * 100)) \
                       .filter(*filters).scalar()
     total = 0 if total is None else round(total, 2)
     return total
def search(request):
    session = request.db_session
    if ('q' in request.GET) and request.GET['q'].strip():
        query_string = request.GET['q']

        entry_query = get_query(query_string)

        items = session.query(Item, func.avg(ItemRating.Rating)).outerjoin(ItemRating).filter(
            Item.Name.like('%' + entry_query[0] + '%')).group_by(Item.ID).all()

        results = []
        for entry in items:
            item = entry[0]
            direct_sale = [listing for listing in item.listings if type(listing) is DirectSaleListing]
            auction = [listing for listing in item.listings if type(listing) is AuctionListing]
            rating = entry[1] or 0
            results.append({
                "item": item,
                "available": any(auction) or any(direct_sale),
                "auctions": auction,
                "direct_sales": direct_sale,
                "min_direct_sale": min([listing.Price for listing in direct_sale]) if direct_sale else None,
                "link": reverse("view_item", kwargs={"item_id": item.ID}),
                "rating": rating
            })

        # for words in entry_query:
        # objects = request.db_session.query(Item).\
        # filter(Item.Name.like('%' +words+ '%'))
        #
        # for object in objects:
        #     found_entries.append(object)

        return render(request, 'item/search.html',
                      {'query_string': query_string, 'items': results})
Exemple #31
0
def start_date(start):
    # Create session from Python to the DB
    session = Session(engine)

    # Note: Date input format = 8-5-2017
    start_date = datetime.strptime(start, "%m-%d-%Y").date()

    temp_calc = session.query (func.min (Measurement.tobs), func.max (Measurement.tobs), func.avg (Measurement.tobs)).\
        filter(Measurement.date >= start_date).all()

    # Note: Flatten and put into dictionary format
    temp_results = list(np.ravel(temp_calc))

    min_temp = temp_results[0]
    max_temp = temp_results[1]
    avg_temp = temp_results[2]

    temp_data = []
    temp_dict = [{
        "Start Date": start_date
    }, {
        "Minimum Temperature for this date": min_temp
    }, {
        "Maximum Temperature for this date": max_temp
    }, {
        "Average Temperature for this date": avg_temp
    }]

    temp_data.append(temp_dict)

    # Note: To simplify code, remove above dictionary code and just use jsonify to display data results.
    return jsonify(temp_dict)

    session.close()
    Measurement.prcp).filter(Measurement.date >= year_date).all()
precip_df = pd.DataFrame(precip_l12).set_index('date').sort_values(
    'date', ascending=True)

session = Session(engine)
station_activity = session.query(Measurement.station, func.count(
    Measurement.id)).group_by(Measurement.station).order_by(
        func.count(Measurement.id).desc()).all()

session = Session(engine)
top_station = station_activity[0][0]
top_station_temps = session.query(
    Measurement.tobs).filter(Measurement.station == top_station)
low_temp = min(top_station_temps)
high_temp = max(top_station_temps)
avg_temp = session.query(func.avg(
    Measurement.tobs)).filter(Measurement.station == top_station).all()


def calc_temps(start_date, end_date):
    return session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
        filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()


year_date2 = str(year_date.date())
calc_temp2 = calc_temps(year_date2, last_date2)

total_rain_query = engine.execute(
    'SELECT station, sum(prcp), station.name, station.latitude, station.longitude, station.elevation \
                            FROM measurement JOIN station USING (station) \
                             GROUP BY station ORDER BY sum(prcp) desc'
).fetchall()
def start_end(start, end):
    # Create our session (link) from Python to the DB
    session = Session(engine)
    """Query the minimum, the average and the max temperature for a start-end range date"""
    # Query temperature observations
    results = session.query(func.min(Measurement.tobs), func.max(Measurement.tobs),         func.avg(Measurement.tobs)).\
                filter(Measurement.date >= start).\
                filter(Measurement.date <= end).all()

    session.close()

    # Convert query results to a dictionary
    temp_data = []
    for min_temp, max_temp, avg_temp in results:
        temp_dict = {}
        temp_dict["min_temp"] = min_temp
        temp_dict["max_temp"] = max_temp
        temp_dict["avg_temp"] = avg_temp
        temp_data.append(temp_dict)

    return jsonify(temp_data)
Exemple #34
0
    def calc_temps(start_date):

        return g.session.query(func.min(g.Measurement.tobs), func.avg(g.Measurement.tobs), func.max(g.Measurement.tobs)).\
            filter(g.Measurement.date >= start_date).all()
Exemple #35
0
def start_date(start):
    results = session.query(Measurement.date, func.avg(Measurement.tobs), func.max(Measurement.tobs),func.min(Measurement.tobs)).\
        filter(Measurement.date >= start).all()
    start_json = list(np.ravel(results))
    return jsonify(start_json)
Exemple #36
0
def get_user_average(id):
    average = Ratings.query.with_entities(
        func.avg(Ratings.value).label('average')).filter(
            Ratings.to_user == id).scalar()
    return average
def start_date(start):
    #"""Return a JSON list of the minimum temperature, the average temperature, and the max temperature for a given start or start-end range."""
    starting_date = dt.datetime.strptime(start, "%Y-%m-%d")
    # Query Min, Max, and Avg based on date
    summary_stats = session.query(func.min(Measurement.tobs),func.max(Measurement.tobs),func.round(func.avg(Measurement.tobs))).\
    filter(Measurement.date >= starting_date).all()
    session.close()

    summary_list = list(np.ravel(summary_stats))

    return jsonify(summary_list)
Exemple #38
0
 def averageRatingFor(self, user, language):
     """ Return the rating for the given user """
     return func.avg(
         self.entry_model.ratingFor(user)).over(partition_by=self.id)
Exemple #39
0
def start_end_date(start, end):
    #First to set up an if statement to make sure the end date comes after the start date
    if start >= end:
        return ("please set your end date to be after the beginning date")

    #Second, to set the string of the return from some functions to variables
    first_date_str = str(first_date())
    last_date_str = str(last_date())

    #Next is an if statement to check to see if the date passed is between the appropriate dates
    if (start < first_date_str) | (end > last_date_str):
        return (
            "please choose a set of dates between 2010-01-02 and 2017-08-23")

    # Create our session (link) from Python to the DB
    session = Session(engine)

    #Query to get the desired data
    temperature_readings = session.query(func.min(Measurement.tobs), func.max(Measurement.tobs), func.round(func.avg(Measurement.tobs),1)).\
                                     filter(Measurement.date >= start).\
                                     filter(Measurement.date <= end).all()

    session.close()

    qry_result = list(np.ravel(temperature_readings))

    return jsonify(qry_result)
Exemple #40
0
def startenddate(start1, end1):
    query_stdate1 = dt.datetime.strptime(start1, '%Y-%m-%d')
    query_enddate = dt.datetime.strptime(end1, '%Y-%m-%d')
    session = Session(engine)
    results_temp_st_end=session.query(func.min(Measurement.tobs), func.max(Measurement.tobs), func.avg(Measurement.tobs)).\
                filter(Measurement.date>=query_stdate1).\
                filter(Measurement.date<=query_enddate).all()

    return jsonify(results_temp_st_end)
Exemple #41
0
]

QUERY_STATISTICS_SHORT_TERM = [
    StatisticsShortTerm.metadata_id,
    StatisticsShortTerm.start,
    StatisticsShortTerm.mean,
    StatisticsShortTerm.min,
    StatisticsShortTerm.max,
    StatisticsShortTerm.last_reset,
    StatisticsShortTerm.state,
    StatisticsShortTerm.sum,
]

QUERY_STATISTICS_SUMMARY_MEAN = [
    StatisticsShortTerm.metadata_id,
    func.avg(StatisticsShortTerm.mean),
    func.min(StatisticsShortTerm.min),
    func.max(StatisticsShortTerm.max),
]

QUERY_STATISTICS_SUMMARY_SUM = [
    StatisticsShortTerm.metadata_id,
    StatisticsShortTerm.start,
    StatisticsShortTerm.last_reset,
    StatisticsShortTerm.state,
    StatisticsShortTerm.sum,
    func.row_number().over(
        partition_by=StatisticsShortTerm.metadata_id,
        order_by=StatisticsShortTerm.start.desc(),
    ).label("rownum"),
]
def start(start_date):
    session = Session(engine)
    #When given the start only, calculate `TMIN`, `TAVG`, and `TMAX` for all dates greater than and equal to the start date.

    return session.query(func.min(measurement.tobs), func.avg(measurement.tobs), func.max(measurement.tobs)).\
        filter(measurement.date >= start_date)
def start(date):

    date = datetime.strptime(date, '%Y%m%d')
    date_ = date.date()

    min_max_avg_temp = session.query((Measurement.date), func.min(Measurement.tobs), func.max(Measurement.tobs), func.avg(Measurement.tobs)).\
        filter(Measurement.date >= date_).group_by(Measurement.date).all()

    data = list(np.ravel(min_max_avg_temp))

    return jsonify(data)
def duration(start, end):

    Start_Date = dt.datetime.strptime(start, "%Y-%m-%d")
    End_Date = dt.datetime.strptime(end, "%Y-%m-%d")

    summary_stats = session.query(func.min(Measurement.tobs),func.max(Measurement.tobs),func.round(func.avg(Measurement.tobs))).\
    filter(Measurement.date.between(Start_Date,End_Date)).all()

    session.close()

    summary = list(np.ravel(summary_stats))

    return jsonify(summary)
Exemple #45
0
        temperature_dict = {}
        temperature_dict["date"] = date
        temperature_dict["tobs"] = temperature_observations
        dates_and_temperature_values.append(temperature_dict)

    return jsonify(dates_and_temperature_values)


# Create our session (link) from Python to the DB
session = Session(engine)

# Query all temperature parameters
temperature_parameters = [
    Measurement.date,
    func.min(Measurement.tobs),
    func.avg(Measurement.tobs),
    func.max(Measurement.tobs)
]
results = session.query(*temperature_parameters).\
                        group_by(Measurement.date).\
                        order_by(Measurement.date).all()

session.close()

temperature_parameters_list = []
for date, temperature_minimum, temperature_average, temperature_maximum in results:
    temperature_parameters_dict = {}
    temperature_parameters_dict["date"] = date
    temperature_parameters_dict["minimum temperature"] = temperature_minimum
    temperature_parameters_dict["average temperature"] = temperature_average
    temperature_parameters_dict["maximum temperature"] = temperature_maximum
Exemple #46
0
    CNO = Column(CHAR(10), primary_key=True, nullable=False)
    CNAME = Column(VARCHAR(30), nullable=False)
    CCREDIT = Column(SMALLINT)
    sss = Column(SMALLINT)
    PERIOD = Column(SMALLINT)
    x = Column(Boolean)


# Base.metadata.create_all()#将上面定义的内容映射到数据库中,相当于创建表。映射完改变字段,则不会更新映射

#加入数据
session = sessionmaker(engine)()

# for i in range(5,10):
#     course=Course(CNO='JO%s'%i,CNAME='呵呵%s'%i,CCREDIT=i,sss=i+1,PERIOD=2*i,x=(i%2==0))
#     session.add(course)
#     session.commit()

courses = session.query(Course).all()
for i in courses:
    print(i)

courses = session.query(Course.CNO, Course.CNAME).all()
for i in courses:
    print(i)

re = session.query(func.count(Course.CNAME)).first()
print(re)

re = session.query(func.avg(Course.sss)).first()
print(re)
Exemple #47
0
def temps(start):
    start= dt.datetime.strptime(start, '%Y-%m-%d')
    temp_data = session.query(func.min(Measurements.tobs), func.avg(Measurements.tobs), func.max(Measurements.tobs)).filter(Measurements.date >= start).all()
    temps = list(np.ravel(temp_data))
    return jsonify(temps)
def calc_datetemps(start_date, end_date):
    """TMIN, TAVG, and TMAX for a list of dates.
    
    Args:
        start_date (string): A date string in the format %Y-%m-%d
        end_date (string): A date string in the format %Y-%m-%d
        
    Returns:
        TMIN, TAVE, and TMAX
    """
    session = Session(engine)
    results_calc_datetemps = session.query(Measurement.date, func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
        group_by(Measurement.date).filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()

    # Create a dictionary from the row data and append to a list of all_passengers
    all_calcs = []
    for calc in results_calc_datetemps:
#        print(calc)

        calc_dict = {}
        calc_dict["date"] = calc[0]
        calc_dict["min"] = calc[1]
        calc_dict["avg"] = calc[2]
        calc_dict["max"] = calc[3]
        all_calcs.append(calc_dict)

#    print(calc_temps('2012-02-28', '2012-03-05'))
#    all_calc_datetemps = list(np.ravel(results_calc_datetemps))
    session.close()
    return jsonify(all_calcs)
Exemple #49
0
def start_end(start, end):
    results = session.query(func.avg(Measurement.tobs), func.max(Measurement.tobs), func.min(Measurement.tobs)).\
        filter(Measurement.date >= start, Measurement.date <= end).all()
    end_json = list(np.ravel(results))
    return jsonify(end_json)
Exemple #50
0
def start_end_search(start, end):

    session = Session(engine)

    start_end_search = session.query(func.min(Measurement.tobs), func.max(Measurement.tobs), func.avg(Measurement.tobs)).\
        filter(func.strftime("%Y-%m-%d",Measurement.date) >= start).\
        filter(func.strftime("%Y-%m-%d",Measurement.date) <= end).\
        group_by(Measurement.date).all()

    # create dict from data and append to a list of search_data2
    search_data2 = []
    for a, b, c in start_end_search:
        search_dict = {}
        search_dict["Min Temp"] = a
        search_dict["Max Temp"] = b
        search_dict["Avg Temp"] = c
        search_data2.append(search_dict)

    return jsonify(search_data2)
    print("Server received request for 'start_end_search' page")
Exemple #51
0
def getStatsSensors(db, SensorsDatabase):
    maxValue = db.session.query(func.max(SensorsDatabase.temperature)).scalar()
    minValue = db.session.query(func.min(SensorsDatabase.temperature)).scalar()
    meanValue = db.session.query(func.avg(
        SensorsDatabase.temperature)).scalar()
    return maxValue, minValue, meanValue
Exemple #52
0
def test_avg_age_rating_10():
	assert session.query(func.avg(Sailors.age).label('average')).filter(Sailors.rating == 10).scalar() == 35
Exemple #53
0
def start(start):

    start_date = session.query(Measurement.date,func.avg(Measurement.tobs),func.min(Measurement.tobs),func.max(Measurement.tobs)) \
             .filter(Measurement.date >= start).group_by(Measurement.date).all()

    return jsonify(start_date)
def startend(start, end):
    session = Session(engine)
    temp_stationQ=session.query(Measurement.station, func.min(Measurement.tobs),func.max(Measurement.tobs),func.avg(Measurement.tobs)).\
    filter(Measurement.date>=start).filter(Measurement.date<=end).all()
    return jsonify(temp_stationQ)
def calc_temps(start_date, end_date):
    return session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
        filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()
Exemple #56
0
#stmt = select([students]).where(or_(students.c.name == 'Ravi', students.c.id == 1))
rows = con.execute(stmt)
print(rows.fetchall())

stmt = select([students]).order_by(asc(students.c.id))
stmt = select([students]).order_by(desc(students.c.id))
stmt = select([students]).where(between(students.c.id, 2, 3))
prints(con.execute(stmt))

result = con.execute(select([func.now()]))
print(result.fetchone())

result = con.execute(select([func.count(students.c.id)]))
result = con.execute(select([func.max(students.c.id)]))
result = con.execute(select([func.min(students.c.id)]))
result = con.execute(select([func.avg(students.c.id).label('avg')]))
print(result.fetchone())

from sqlalchemy import union, union_all, except_, intersect
# u = union(addresses.select().where(addresses.c.email_add.like('%@gmail.com addresses.select().where(addresses.c.email_add.like('*****@*****.**'))))
u = union_all(
    addresses.select().where(addresses.c.email_add.like('*****@*****.**')),
    addresses.select().where(addresses.c.email_add.like('*****@*****.**')))
u = except_(
    addresses.select().where(addresses.c.email_add.like('*****@*****.**')),
    addresses.select().where(addresses.c.postal_add.like('%Pune')))
u = intersect(
    addresses.select().where(addresses.c.email_add.like('*****@*****.**')),
    addresses.select().where(addresses.c.postal_add.like('%Pune')))
prints(con.execute(u))
Exemple #57
0
def start_end(start_date, end_date):

    T_info2 = session.query(func.min(Measurement.tobs),func.avg(Measurement.tobs),func.max(Measurement.tobs)).\
                                  filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()

    return jsonify(T_info2)
Exemple #58
0
def start_end_date(start, end):

    #Create session to link database to Python
    session = Session(engine)

    #Design query to return min, max, and avg temperatures for all applicable start and end date range entered
    temp_date_range = session.query(func.min(Measurement.tobs), func.max(Measurement.tobs), func.avg(Measurement.tobs)).\
    filter(Measurement.date >= start, Measurement.date <= end).all()

    #Retrieve min, max, and avg temp values from query results, create dictionary, and append to empty list
    temp_values_date_range = []
    for min_temp, max_temp, avg_temp in temp_date_range:
        temp_value_range_dict = {}
        temp_value_range_dict["min temperature"] = min_temp
        temp_value_range_dict["max temperature"] = max_temp
        temp_value_range_dict["avg temperature"] = avg_temp
        temp_values_date_range.append(temp_value_range_dict)

    #Render temp values in json format thru app
    return jsonify(temp_values_date_range)
def start_date(start, end):

    start = datetime.strptime(start, '%Y%m%d')
    start_date = start.date()

    end = datetime.strptime(end, '%Y%m%d')
    end_date = end.date()

    min_max_avg_temp = session.query((Measurement.date), func.min(Measurement.tobs), func.max(Measurement.tobs), func.avg(Measurement.tobs)).\
        filter(Measurement.date >= start_date).group_by(Measurement.date).\
        filter(Measurement.date <= end_date).all()

    data = list(np.ravel(min_max_avg_temp))

    return jsonify(data)
def calc_temps(start_date, end_date):
    session = Session(engine)

    return session.query(func.min(measurement.tobs), func.avg(measurement.tobs), func.max(measurement.tobs)).\
        filter(measurement.date >= start_date).filter(measurement.date <= end_date).all()