예제 #1
0
파일: turnovers.py 프로젝트: alishir/tcr
 def __init__(self, context):
     ResourcesQueryBuilder.__init__(self, context)
     self._cashflows_from_sub = (
         DBSession.query(
             func.sum(Cashflow.sum).label('expenses'),
             Cashflow.account_item_id.label('account_item_id'),
         )
         .join(Subaccount, Cashflow.subaccount_from)
         .group_by(Cashflow.account_item_id)
     )
     self._cashflows_to_sub = (
         DBSession.query(
             func.sum(Cashflow.sum).label('revenue'),
             Cashflow.account_item_id.label('account_item_id'),
         )
         .join(Subaccount, Cashflow.subaccount_to)
         .group_by(Cashflow.account_item_id)
     )
     self._fields = {
         'id': AccountItem.id,
         '_id': AccountItem.id,
         'name': AccountItem.name,
         'text': AccountItem.name,
         'parent_id': AccountItem.parent_id.label('parent_id')
     }
     self.build_query()
예제 #2
0
파일: rank.py 프로젝트: xxguo/leopard
def weekList():
    if not redis.llen('rank:week'):
        rows = db_session.query(
            User.id,
            User.username,
            label('number', func.count(Investment.amount)),
            label('total_amount', func.sum(Investment.amount))
        ).filter(
            Investment.user_id == User.id,
            cast(Investment.added_at, Date) <= datetime.datetime.today(),
            cast(Investment.added_at, Date) >= datetime.datetime.today() -
            datetime.timedelta(weeks=1)
        ).group_by(User.id).order_by(
            func.sum(Investment.amount).desc()
        ).limit(15).all()

        rank_list = []

        for i in rows:
            i = dict(zip(i.keys(), i))
            data = {
                'id': i['id'],
                'username': i['username'],
                'total_amount': float(i['total_amount']),
                'number': i['number']
            }
            rank_list.append(data)
            redis.rpush('rank:week', json.dumps(data))
        redis.expire('rank:week', 3600)
    else:
        rank_list = [json.loads(i.decode()) for i in redis.lrange('rank:week', 0, redis.llen('rank:week'))]

    return rank_list
예제 #3
0
파일: admin.py 프로젝트: opmuse/opmuse
    def dashboard(self):
        library_path = cherrypy.request.app.config.get('opmuse').get('library.path')

        stat = os.statvfs(os.path.realpath(library_path))

        disk = {
            'path': library_path,
            'free': stat.f_frsize * stat.f_bavail,
            'total': stat.f_frsize * stat.f_blocks
        }

        formats = (get_database().query(Track.format, func.sum(Track.duration),
                                        func.sum(Track.size), func.count(Track.format)).group_by(Track.format).all())

        stats = {
            'tracks': library_dao.get_track_count(),
            'invalid': library_dao.get_invalid_track_count(),
            'albums': library_dao.get_album_count(),
            'artists': library_dao.get_artist_count(),
            'track_paths': library_dao.get_track_path_count(),
            'duration': library_dao.get_track_duration(),
            'size': library_dao.get_track_size(),
            'scanning': cherrypy.request.library.scanning,
            'processed': cherrypy.request.library.processed,
            'files_found': cherrypy.request.library.files_found
        }

        return {
            'cache_size': cache.storage.size(),
            'disk': disk,
            'stats': stats,
            'formats': formats
        }
예제 #4
0
def get_sales_by_date_and_user(dbapi, start_date, end_date):
    for date, codename, user, pretax, tax in dbapi.db_session.query(
            func.DATE(NSale.timestamp), NSale.seller_codename,
            NSale.user_id,
            func.sum(NSale.pretax_amount_usd),
            func.sum(NSale.tax_usd)):
        yield date, codename, user, (pretax or 0) + (tax or 0)
예제 #5
0
파일: rank.py 프로젝트: xxguo/leopard
def totalList():
    if not redis.llen('rank:total'):
        total_row = db_session.query(
            User.id,
            User.username,
            label('number', func.count(Investment.amount)),
            label('total_amount', func.sum(Investment.amount))
        ).filter(
            Investment.user_id == User.id,
        ).group_by(User.id).order_by(
            func.sum(Investment.amount).desc()
        ).limit(15).all()

        total_list = []

        for i in total_row:
            i = dict(zip(i.keys(), i))
            data = {
                'id': i['id'],
                'username': i['username'],
                'total_amount': float(i['total_amount']),
                'number': i['number']
            }
            total_list.append(data)
            redis.rpush('rank:total', json.dumps(data))
        redis.expire('rank:total', 3600)
    else:
        total_list = [json.loads(i.decode()) for i in redis.lrange('rank:total', 0, redis.llen('rank:total'))]

    return total_list
예제 #6
0
    def _ops_for_date_range(self, balance_uids, start_date, end_date, tags = [], change_categories = []):
        model = self.get_sa_model()
        db = self.get_sa_session()

        conditions = [or_(*[model.BalanceChange.balance_uid == balance_uid for balance_uid in balance_uids])]
        if start_date:
            conditions.append(model.BalanceChange.occurred_on >= start_date)

        if end_date:
            conditions.append(model.BalanceChange.occurred_on <= end_date)

        if isinstance(tags, list) and len(tags) > 0:
            conditions.extend([model.BalanceChange.tags.any(tag=tag.strip().lower()) for tag in tags if tag is not None and tag.strip() != ''])

        if isinstance(change_categories, list) and len(change_categories) > 0:
            conditions.extend([model.BalanceChange.change_category_uid == value.strip() for value in change_categories if value is not None and value.strip() != ''])

        try:
            summary = db.execute(select([
                    func.coalesce(func.sum(
                        case([[model.BalanceChange.amount<0, model.BalanceChange.amount]], else_=0).label("expenses"))), 
                    func.coalesce(func.sum(
                        case([[model.BalanceChange.amount>0, model.BalanceChange.amount]], else_=0).label("incomes")))
                ], 
                and_(*conditions),
                from_obj=[model.balance_changes_table])).fetchone()
            
            return {
                "expenses": summary[0],
                "incomes": summary[1],
            }
        except:
            log.error(_("Can't get summary"), exc_info=1)
            return 0
예제 #7
0
파일: stats.py 프로젝트: awoland/ichnaea
def leaders_weekly(session, batch=20):
    result = {'new_cell': [], 'new_wifi': []}
    today = util.utcnow().date()
    one_week = today - timedelta(7)

    score_rows = {}
    userids = set()
    for name in ('new_cell', 'new_wifi'):
        score_rows[name] = session.query(
            Score.userid, func.sum(Score.value)).filter(
            Score.key == ScoreKey[name]).filter(
            Score.time >= one_week).order_by(
            func.sum(Score.value).desc()).group_by(
            Score.userid).limit(batch).all()
        userids.update(set([s[0] for s in score_rows[name]]))

    if not userids:  # pragma: no cover
        return result

    user_rows = session.query(User.id, User.nickname).filter(
        User.id.in_(userids)).all()
    users = dict(user_rows)

    for name, value in score_rows.items():
        for userid, value in value:
            nickname = users.get(userid, 'anonymous')
            if len(nickname) > 24:  # pragma: no cover
                nickname = nickname[:24] + u'...'
            result[name].append(
                {'nickname': nickname, 'num': int(value)})

    return result
예제 #8
0
파일: models.py 프로젝트: doyousoft/pyvac
    def get_previsions(cls, session, end_date=None):
        """ Retrieve future validated requests per user """

        # Searching for requests with an timeframe
        #         [NOW()] ---------- ([end_date])?
        # exemples:
        #      <f --r1---- t>
        #                 <f --r2-- t>
        #                       <f ------r3-------- t>
        #      <f ----------- r4 -------------------- t>
        # => Matching period are periods ending after NOW()
        #   and if an end_date is specified periods starting before it:

        if end_date:
            future_requests = session.query(cls.user_id,func.sum(cls.days)).\
                                filter(cls.date_to >= func.current_timestamp(),
                                        cls.date_from < end_date,
                                       cls.vacation_type_id == 1,
                                       cls.status == 'APPROVED_ADMIN').\
                                    group_by(cls.user_id).\
                                    order_by(cls.user_id);
        else:
            future_requests = session.query(cls.user_id,func.sum(cls.days)).\
                                filter(cls.date_to >= func.current_timestamp(),
                                       cls.vacation_type_id == 1,
                                       cls.status == 'APPROVED_ADMIN').\
                                    group_by(cls.user_id).\
                                    order_by(cls.user_id);

        ret = {}
        for user_id, total in future_requests:
            ret[user_id] = total

        return ret
예제 #9
0
def prepareSuppliedValueProportionGraphData():
    data = request.form['data'] if request.form['data'] else None
    date = datetime(int(data[:4]), int(data[4:]), 1)
    begin_date = datetime(date.year, date.month, 1)
    today = datetime.utcnow() + timedelta(hours=9)
    if date.year == today.year and date.month == today.month:
        last_day = today.day
    else:
        last_day = calendar.monthrange(date.year, date.month)[1]
    end_date = datetime(date.year, date.month, last_day) + timedelta(days=1)
    products = SuppliedProducts.query\
        .join(Product)\
        .join(Maker)\
        .join(Supply)\
        .with_entities(Maker.name,
                       func.sum(Product.price_retail * SuppliedProducts.quantity))\
        .filter(Supply.created_dt >= begin_date)\
        .filter(Supply.created_dt <= end_date)\
        .group_by(Product.maker_id)\
        .order_by(func.sum(Product.price_retail * SuppliedProducts.quantity).desc())\
        .all()
    data = []
    for p in products:
        item = {}
        item['maker'] = p[0]
        item['total'] = str(p[1])
        data.append(item)
    return json.dumps(data)
def get_dag_duration_info():
    '''get duration of currently running DagRuns
    :return dag_info
    '''
    driver = Session.bind.driver
    durations = {
        'pysqlite': func.sum(
            (func.julianday(func.current_timestamp()) - func.julianday(DagRun.start_date)) * 86400.0
        ),
        'mysqldb': func.sum(func.timestampdiff(text('second'), DagRun.start_date, func.now())),
        'default': func.sum(func.now() - DagRun.start_date)
    }
    duration = durations.get(driver, durations['default'])

    with session_scope(Session) as session:
        return session.query(
            DagRun.dag_id,
            DagRun.run_id,
            duration.label('duration')
        ).group_by(
            DagRun.dag_id,
            DagRun.run_id
        ).filter(
            DagRun.state == State.RUNNING
        ).all()
예제 #11
0
파일: routes.py 프로젝트: Kozea/ymci
    def get(self, project_id, width=None, height=None):
        config = graph_config(width, height)
        config.logarithmic = True
        svg = pygal.Line(config)
        builds = (
            self.db.query(
                Build.build_id,
                func.sum(Coverage.lines).label('lines'),
                func.sum(Coverage.cls).label('cls'))
            .select_from(Build)
            .join(Coverage, Build.coverages)
            .filter(Build.project_id == project_id)
            .group_by(Build.build_id)
            .order_by(Build.build_id)
            .all())

        svg.add('Lines', [{
            'xlink': self.reverse_url('ProjectLog', project_id, b.build_id),
            'value': b.lines or None
        } for b in builds])

        svg.add('Classes', [{
            'xlink': self.reverse_url('ProjectLog', project_id, b.build_id),
            'value': b.cls or None
        } for b in builds])

        if width and height:
            svg.x_labels = ['#%d' % b.build_id for b in builds]

        svg.value_formatter = lambda x: '%d' % (x or 0)
        svg.title = 'Source metric'
        self.set_header("Content-Type", "image/svg+xml")
        self.write(svg.render())
예제 #12
0
파일: api.py 프로젝트: adamngray/Magic
def getSetStatistics(id):

	results = db.session.query(Entity, Set, func.sum(Statistics.match_wins).label("total_match_wins"), 
								func.sum(Statistics.match_losses).label("total_match_losses"), 
		                    	func.sum(Statistics.game_wins).label("total_game_wins"), 
		                    	func.sum(Statistics.game_losses).label("total_game_losses")).join(Statistics).join(Tournament).join(TournamentType).join(Set).filter(TournamentType.description == 'Normal').filter(Set.id == id).group_by(Entity.id, Set.id).all()

	tournaments = db.session.query(Entity, Tournament.id).join(Statistics).join(Tournament).join(TournamentType).join(Set).filter(Statistics.position == 1).filter(Statistics.matches_unfinished == 0).filter(TournamentType.description == 'Normal').filter(Set.id == id).all()

	statistics = []
	for row in results:

		wins = 0
		for tournament in tournaments:
			if row.Entity == tournament[0] and not unfinishedMatchesInTournament(tournament[1]):
				wins += 1

		rowDictionary = {'total_match_wins':row.total_match_wins,
						 'total_match_losses':row.total_match_losses,
						 'total_game_wins':row.total_game_wins,
						 'total_game_losses':row.total_game_losses,		
			 			 'match_win_percentage':row.total_match_wins/(row.total_match_wins + row.total_match_losses) * 100 if (row.total_match_wins + row.total_match_losses) > 0 else 0.0,
						 'game_win_percentage':row.total_game_wins/(row.total_game_wins + row.total_game_losses) * 100 if (row.total_game_wins + row.total_game_losses) > 0 else 0.0,
						 'total_matches_played':row.total_match_wins + row.total_match_losses,
						 'player':row.Entity.participants[0].player.name,
						 'set':row.Set.name,
						 'tournament_wins':wins}
		statistics.append(rowDictionary)	

	return addPositions(statistics)	
예제 #13
0
    def get(self):
        i = request.args
        today = datetime.datetime.now().strftime('%Y%m%d')
        date = i.get('date', today).strip()
        type_ = i.get('type', 'total').strip()  # 收入类型:card/cash/total

        if type_ in ('card', 'total'):
            if date == today:
                TransTable = aliased(Trans)
            else:
                TransTable = aliased(HistoryTrans)

            q_card = db_session.query(func.sum(HistoryTrans.amount).label('amount')) \
                .filter(TransTable.trans_date == date) \
                .filter(TransTable.trans_code == '000010') \
                .filter(TransTable.status == '0')

            if session['user_level'] == 'unit':
                q_card = q_card.filter(TransTable.unit_no == session['unit_no'])
            if session['user_level'] == 'shop':
                q_card = q_card.filter(TransTable.shop_no == session['shop_no'])

        if type_ in ('cash', 'total'):
            q_cash = db_session.query(func.sum(SaleOrderInfo.cash_pay_amount).label('amount')) \
                .filter(SaleOrderInfo.is_paid == True) \
                .filter(SaleOrderInfo.pay_time == '20000000')

        if type_ == 'card':
            return jsonify(success=True, total=q_card.one().amount)
        if type_ == 'cash':

            return jsonify(success=True, total=q_cash.one().amount)
def main(argv=None):

  fig = plt.figure(figsize=(3,2.5))
  
  #fig.suptitle("Residential Growth by Year", fontsize=18, weight='bold')
  s = Session()
  
  region = s.query(Context).get(3)
  q = s.query(TCADSegment).join(TCADSegment.improvement).filter(TCADImprovement.description.in_(['FOURPLEX','APARTMENT 100+','CONDO (STACKED)','APARTMENT 50-100', '1 FAM DWELLING', '2 FAM DWELLING', '1/2 DUPLEX', 'APARTMENT 5-25', 'APARTMENT 26-49']))

  X = range(1980, 2010)
  All = array( [ q.filter( TCADSegment.year_built == x ).value(func.sum(TCADSegment.area)) for x in X ], dtype=float )
  All_norm = array( [ q.filter( TCADSegment.year_built <= x ).value(func.sum(TCADSegment.area)) for x in X ], dtype=float )
  East = array( [ q.join(TCADImprovement.parcel).filter(TCAD_2010.the_geom.within(region.geom)).filter(TCADSegment.year_built == x).value(func.sum(TCADSegment.area)) for x in X ], dtype=float )
  East_norm = array( [ q.join(TCADImprovement.parcel).filter(TCAD_2010.the_geom.within(region.geom)).filter(TCADSegment.year_built <= x).value(func.sum(TCADSegment.area)) for x in X ], dtype=float )
             
  ax = plt.subplot(111)
  p2=ax.plot(X,100 * All/All_norm, color='.75')
  p1=ax.plot(X,100 * East/East_norm, color='k', lw=2)
  
  
  ax.set_ylabel("Growth in Square Feet (%)")
  ax.set_xlabel("Year")
  ax.grid(True)
  ax.axis([1980,2010,0,5])
  ax.legend([p2,p1],['All Austin','East Side'], loc='upper left')
    
  plt.subplots_adjust(right=.93, top=.95, bottom=.15, left=.15)
  
  show()
예제 #15
0
def top_spenders(limit = 10, start_date = None, end_date = None):
    if start_date and end_date and start_date <= end_date:
        return DBSession.query(
                    MoneyLog.username,
                    func.sum(func.abs(MoneyLog.amount))
                ).filter(
                    MoneyLog.reason == 'drop',
                    MoneyLog.time >= start_date,
                    MoneyLog.time <= end_date,
                    MoneyLog.username != 'openhouse'
                ).group_by(
                    MoneyLog.username
                ).order_by(
                    'sum_1 desc'
                ).limit(
                    limit
                ).all()
    else:
        return DBSession.query(
                    MoneyLog.username,
                    func.sum(func.abs(MoneyLog.amount))
                ).filter(
                    MoneyLog.reason == 'drop',
                    MoneyLog.username != 'openhouse'
                ).group_by(
                    MoneyLog.username
                ).order_by(
                    'sum_1 desc'
                ).limit(
                    limit
                ).all()
예제 #16
0
def member_payments():
    q = Session.query(Member.id, Member.nick, func.sum(Payment.amount) /
            func.sum(Payment.months)).join((Payment, Member.id
                == Payment.member_id)).filter(Member.active == True).\
                        group_by(Member.id).order_by(Member.id)

    return q
예제 #17
0
파일: user.py 프로젝트: imclab/skylines
def _get_last_year_statistics():
    query = db.session.query(func.count('*').label('flights'),
                             func.sum(Flight.olc_classic_distance).label('distance'),
                             func.sum(Flight.duration).label('duration')) \
                      .filter(Flight.pilot == g.user) \
                      .filter(Flight.date_local > (date.today() - timedelta(days=365))) \
                      .first()

    last_year_statistics = dict(flights=0,
                                distance=0,
                                duration=timedelta(0),
                                speed=0)

    if query and query.flights > 0:
        duration_seconds = query.duration.days * 24 * 3600 + query.duration.seconds

        if duration_seconds > 0:
            last_year_statistics['speed'] = float(query.distance) / duration_seconds

        last_year_statistics['flights'] = query.flights
        last_year_statistics['distance'] = query.distance
        last_year_statistics['duration'] = query.duration

        last_year_statistics['average_distance'] = query.distance / query.flights
        last_year_statistics['average_duration'] = query.duration / query.flights

    return last_year_statistics
예제 #18
0
    def get(self):
        args = grouped_revenue_parser.parse_args()
        years = args['years']
        levels = args['levels']

        # Create the query
        levels_columns = [ revenue_levels[l] for l in levels ]
        # Sum Outcome and predicted values
        complete_query = levels_columns + [ func.sum(Revenue.monthly_predicted).label('Total predicted'),
                                            func.sum(Revenue.monthly_outcome).label('Total outcome') ]

        # Create the query
        revenue_query_base = db.session.query(*complete_query)

        if not years:
            years = ['all']

        revenue_grouped = {}
        for year in years:
            year = str(year)
            if year != 'all':
                revenue_data = (revenue_query_base.filter(extract('year', Revenue.date) == year)
                                                  .group_by(*levels_columns))
            else:
                revenue_data = revenue_query_base.group_by(*levels_columns)

            revenue_grouped[year] = [{'category_code': rev[:len(levels)],
                                      'total_predicted': str(rev[len(levels)]),
                                      'total_outcome': str(rev[len(levels) + 1])} for rev in revenue_data.all()]

        return revenue_grouped
예제 #19
0
파일: utils.py 프로젝트: SwoJa/ruman
def get_time_count(topic,start_ts,end_ts,unit=MinInterval):#按时间趋势的不同情绪的数量
    count = {}
    if (end_ts - start_ts < unit):
        upbound = long(math.ceil(end_ts / (unit * 1.0)) * unit)
        items = db.session.query(PropagateCount.mtype,func.sum(PropagateCount.dcount)).filter(PropagateCount.end==upbound, \
                                                       PropagateCount.topic==topic).group_by(PropagateCount.mtype).all()
        count[end_ts]={}
        for item in items:
            try:
                count[end_ts][item[0]] += item[1]
            except:
                count[end_ts][item[0]] = item[1]        
    else:
        upbound = long(math.ceil(end_ts / (unit * 1.0)) * unit)
        lowbound = long((start_ts / unit) * unit)
        interval = (upbound-lowbound)/unit
        for i in range(interval, 0, -1):    
            begin_ts = upbound - unit * i
            end_ts = begin_ts + unit
            try:
                items = db.session.query(PropagateCount.mtype,func.sum(PropagateCount.dcount)).filter(PropagateCount.end>begin_ts, \
                                                         PropagateCount.end<=end_ts, \
                                                         PropagateCount.topic==topic).group_by(PropagateCount.mtype).all()
            except:
                db.session.rollback()
            count[end_ts] = {}
            for item in items:
                print 'item::::::::',item
                try:
                    count[end_ts][item[0]] += item[1]
                except:
                    count[end_ts][item[0]] = item[1]
    return count
예제 #20
0
파일: models.py 프로젝트: yeehanchan/noi2
    def nearest_neighbors(self, limit=10):
        '''
        Returns a list of (user, score) tuples with the closest matching
        skills.  If they haven't answered the equivalent skill question, we
        consider that a very big difference (12).

        Order is closest to least close, which is an ascending score.
        '''
        my_skills = aliased(UserSkill, name='my_skills', adapt_on_names=True)
        their_skills = aliased(UserSkill, name='their_skills', adapt_on_names=True)

        # difference we assume for user that has not answered question
        unanswered_difference = (LEVELS['LEVEL_I_CAN_DO_IT']['score'] -
                                 LEVELS['LEVEL_I_WANT_TO_LEARN']['score']) * 2

        return User.query_in_deployment().\
                add_column(((len(self.skills) - func.count(func.distinct(their_skills.id))) *
                            unanswered_difference) + \
                       func.sum(func.abs(their_skills.level - my_skills.level))).\
                filter(their_skills.user_id != my_skills.user_id).\
                filter(User.id == their_skills.user_id).\
                filter(their_skills.name == my_skills.name).\
                filter(my_skills.user_id == self.id).\
                group_by(User).\
                order_by(((len(self.skills) - func.count(func.distinct(their_skills.id)))
                          * unanswered_difference) + \
                     func.sum(func.abs(their_skills.level - my_skills.level))).\
                limit(limit)
예제 #21
0
    def get(self):
        # Extract the argumnets in GET request
        args = revenue_code_parser.parse_args()
        codes = args['code']

        series = {}
        for code in codes:
            # revenues_results = db.session.query(
            #     Revenue.date,
            #     Revenue.monthly_predicted,
            #     Revenue.monthly_outcome)\
            #     .filter(Revenue.code.like(code+'%')).all()
            try:
                formated_code = RevenueCode.format_code(code)
            except:
                formated_code = code
            code_levels = formated_code.split('.')
            args = [revenue_levels[l] == v for l, v in enumerate(code_levels)]
            q = db.session.query(
                Revenue.date,
                func.sum(Revenue.monthly_predicted),
                (func.sum(Revenue.monthly_outcome))
                 .filter(and_(*args)).group_by(Revenue.date))
            revenues_results = q.all()

            series[code] = {'date': [r[0].isoformat() for r in revenues_results],
                            'predicted': [str(r[1]) for r in revenues_results],
                            'outcome': [str(r[2]) for r in revenues_results]}

        return series
예제 #22
0
 def at_snapshot(snapshot):
     p_by_user = db.query(func.sum(Job.processors), Job.user).group_by(Job.user).filter_by(cluster=cluster, snapshot=snapshot,
         status='R').all()
     n_free_procs = db.query(func.sum(Node.n_procs)).filter_by(cluster=cluster,
         snapshot=snapshot, state='free').first()[0]
     p_by_user.append((n_free_procs, u'free'))
     return p_by_user, snapshot.time.isoformat()
예제 #23
0
파일: seed.py 프로젝트: magshi/golem
def load_globalcounts(list_of_wordcounts):
    """
    Adds wordcounts for all unique words. There should only be one row per unique word.
    """
    # i = 0

    for localcount_dict in list_of_wordcounts:
        # if i < 5:
        for word, count in localcount_dict.iteritems():
            item = session.query(GlobalCount).filter(GlobalCount.term == word).first()
            if item:
                print "%r is already in globalcounts. Updating count..." % word
                # update the global count for this word, because we have added new songs with more occurrences of this word
                q = session.query(LocalCount.term, func.sum(LocalCount.count))
                q = q.group_by(LocalCount.term)
                q = q.filter(LocalCount.term == word)
                results = q.all()

                # print "Current count for %r is %d" % (item.term, item.count)
                item.count = results[0][1]
                print "Updating %r's count to %d" % (item.term, item.count)
                session.commit()

            else:
                print "%r not in globalcounts table, creating new row" % word
                qq = session.query(LocalCount.term, func.sum(LocalCount.count))
                qq = qq.group_by(LocalCount.term)
                qq = qq.filter(LocalCount.term == word)
                resultsresults = qq.all()

                countcount = resultsresults[0][1]
                new_row = GlobalCount(term = word, count = countcount)
                session.add(new_row)
                # you must commit before you query the same word/item again!
                session.commit()
예제 #24
0
def test_proportion_overlap(loader):
    # Calculate proportion of each parcel overlapped by water.
    parcels = loader.tables.sample.heather_farms
    water = loader.tables.sample.hf_water
    assert not hasattr(parcels, 'proportion_water')
    spatialtoolz.proportion_overlap(parcels, water, 'proportion_water')
    assert hasattr(parcels, 'proportion_water')

    # Build DataFrame from columns of parcels table.
    columns = [parcels.parcel_id, parcels.geom.ST_Area(),
               parcels.proportion_water]
    parcels_df = db_to_df(columns, index_col='parcel_id')

    # Assert that proportion overlap values are between 0 and 1.
    assert parcels_df.proportion_water.dtype == float
    assert not (parcels_df.proportion_water < 0).any()
    assert not (parcels_df.proportion_water > 1).any()

    # Assert that sum of overlapped parcel area is <= total water area.
    with loader.database.session() as sess:
        overlapped_area = sess.query(
            func.sum(parcels.proportion_water * parcels.geom.ST_Area())
        ).scalar()
        water_area = sess.query(func.sum(water.geom.ST_Area())).scalar()
    assert overlapped_area <= water_area
예제 #25
0
 def get_dop(cls, p_kode, p_tahun):
     pkey = FixLength(NOP)
     pkey.set_raw(p_kode)
     query = pbb_DBSession.query( func.concat(cls.kd_propinsi, '.').concat(cls.kd_dati2).concat('-').\
                concat(cls.kd_kecamatan).concat('.').concat(cls.kd_kelurahan).concat('-').\
                concat(cls.kd_blok).concat('.').concat(cls.no_urut).concat('-').\
                concat(cls.kd_jns_op).label('nop'),
           cls.thn_pajak_sppt, cls.luas_bumi_sppt, cls.njop_bumi_sppt, 
           cls.luas_bng_sppt, cls.njop_bng_sppt, cls.nm_wp_sppt,
           cls.pbb_yg_harus_dibayar_sppt, cls.status_pembayaran_sppt,
           DatObjekPajak.jalan_op, DatObjekPajak.blok_kav_no_op, 
           DatObjekPajak.rt_op, DatObjekPajak.rw_op,
           func.coalesce(SpptOpBersama.luas_bumi_beban_sppt,0).label('luas_bumi_beban'), 
           func.coalesce(SpptOpBersama.luas_bng_beban_sppt,0).label('luas_bng_beban'), 
           func.coalesce(SpptOpBersama.njop_bumi_beban_sppt,0).label('njop_bumi_beban'), 
           func.coalesce(SpptOpBersama.njop_bng_beban_sppt,0).label('njop_bng_beban'),
           Kelurahan.nm_kelurahan, Kecamatan.nm_kecamatan, Dati2.nm_dati2,
           func.max(PembayaranSppt.tgl_pembayaran_sppt).label('tgl_bayar'),
           func.sum(func.coalesce(PembayaranSppt.jml_sppt_yg_dibayar,0)).label('jml_sppt_yg_dibayar'),
           func.sum(func.coalesce(PembayaranSppt.denda_sppt,0)).label('denda_sppt'),).\
           outerjoin(DatObjekPajak).\
           outerjoin(SpptOpBersama).\
           outerjoin(PembayaranSppt,and_(
               cls.kd_propinsi==PembayaranSppt.kd_propinsi,
               cls.kd_dati2==PembayaranSppt.kd_dati2,
               cls.kd_kecamatan==PembayaranSppt.kd_kecamatan,
               cls.kd_kelurahan==PembayaranSppt.kd_kelurahan,
               cls.kd_blok==PembayaranSppt.kd_blok,
               cls.no_urut==PembayaranSppt.no_urut,
               cls.kd_jns_op==PembayaranSppt.kd_jns_op,
               cls.thn_pajak_sppt==PembayaranSppt.thn_pajak_sppt
               )).\
           filter(cls.kd_propinsi == Kelurahan.kd_propinsi, 
                 cls.kd_dati2 == Kelurahan.kd_dati2, 
                 cls.kd_kecamatan == Kelurahan.kd_kecamatan, 
                 cls.kd_kelurahan == Kelurahan.kd_kelurahan,).\
           filter(cls.kd_propinsi == Kecamatan.kd_propinsi, 
                 cls.kd_dati2 == Kecamatan.kd_dati2, 
                 cls.kd_kecamatan == Kecamatan.kd_kecamatan,).\
           filter(cls.kd_propinsi == Dati2.kd_propinsi, 
                 cls.kd_dati2 == Dati2.kd_dati2,).\
           group_by(cls.kd_propinsi, cls.kd_dati2, cls.kd_kecamatan, cls.kd_kelurahan, cls.kd_blok,
                 cls.no_urut, cls.kd_jns_op, cls.thn_pajak_sppt, cls.luas_bumi_sppt, cls.njop_bumi_sppt, 
                 cls.luas_bng_sppt, cls.njop_bng_sppt, cls.pbb_yg_harus_dibayar_sppt, 
                 cls.status_pembayaran_sppt, DatObjekPajak.jalan_op, DatObjekPajak.blok_kav_no_op, 
                 DatObjekPajak.rt_op, DatObjekPajak.rw_op,
                 SpptOpBersama.luas_bumi_beban_sppt, 
                 SpptOpBersama.luas_bng_beban_sppt, 
                 SpptOpBersama.njop_bumi_beban_sppt, 
                 SpptOpBersama.njop_bng_beban_sppt,
                 Kelurahan.nm_kelurahan, Kecamatan.nm_kecamatan, Dati2.nm_dati2,)
     return query.filter(
                         cls.kd_propinsi == pkey['kd_propinsi'], 
                         cls.kd_dati2 == pkey['kd_dati2'], 
                         cls.kd_kecamatan == pkey['kd_kecamatan'], 
                         cls.kd_kelurahan == pkey['kd_kelurahan'], 
                         cls.kd_blok == pkey['kd_blok'], 
                         cls.no_urut == pkey['no_urut'], 
                         cls.kd_jns_op == pkey['kd_jns_op'],
                         cls.thn_pajak_sppt==p_tahun)
예제 #26
0
    def _total(self, balance_uid):
        model = request.environ["sqlalchemy.model"]
        db = request.environ["sqlalchemy.session"]

        incomes = db.execute(
            select(
                [func.coalesce(func.sum(model.BalanceChange.c.amount), 0)],
                and_(model.BalanceChange.c.balance_uid == balance_uid, model.BalanceChange.c.is_income == True),
                from_obj=[model.balance_changes_table],
            )
        ).fetchone()[0]

        expenses = db.execute(
            select(
                [func.coalesce(func.sum(model.BalanceChange.c.amount), 0)],
                and_(model.BalanceChange.c.balance_uid == balance_uid, model.BalanceChange.c.is_income == False),
                from_obj=[model.balance_changes_table],
            )
        ).fetchone()[0]

        try:
            return incomes - expenses
        except:
            log.error("", exc_info=1)
            return 0
예제 #27
0
    def GetInvoiceTotals(self, tenantId, param=None):
        """
			Calculates invoice totals, amounts, due, etc.,
		"""
        if tenantId:
            query = DBSession.query(
                func.count(Order.Id).label("Count"),
                func.ifnull(func.sum(Order.OrderAmount), 0).label("TotalAmount"),
                func.ifnull(
                    func.sum(func.IF(Order.PaidAmount >= Order.OrderAmount, Order.OrderAmount, Order.PaidAmount)), 0
                ).label("PaidAmount"),
            )

            query = query.filter(Order.TenantId == tenantId, Order.Status == True)

            if param:
                query = self.applySearchParam(query, param)

            totals = query.first()

            if totals:
                oq = query.filter((Order.OrderAmount - Order.PaidAmount) > 0.5, Order.DueDate < func.now()).subquery()
                totals.Overdues = DBSession.query(
                    oq.c.Count, (oq.c.TotalAmount - oq.c.PaidAmount).label("OverdueAmount")
                ).first()

            return totals
        return None
예제 #28
0
    def get(self):
        i = request.args
        unit_no = session['unit_no']
        user_level = session['user_level']
        page, limit = int(i.get('page', 1)), int(i.get('limit', 10))

        q = db_session.query(
            func.sum(CardInfo.amount).label('amount'),
            func.sum(CardInfo.points).label('points'),
            func.count(CardInfo.card_no).label('count'),
            UnitInfo.unit_no,
            UnitInfo.unit_name
        )
        q = q.outerjoin(UnitInfo, CardInfo.unit_no == UnitInfo.unit_no)
        q = q.group_by(UnitInfo.unit_no, UnitInfo.unit_name)

        if user_level == 'unit':
            q = q.filter(UnitInfo.unit_no == unit_no)

        if user_level in ['shop', 'operator']:
            return jsonify(success=False, msg=rspmsg.PERMISSION_DENIDE)

        total = q.count()
        balances = q.limit(limit).offset((page - 1) * limit).all()
        return jsonify(success=True, total=total, page=page, limit=limit,
                       data=[{'amount': b.amount, 'points': b.points, 'count': b.count,
                              'unit_no': b.unit_no, 'unit_name': b.unit_name} for b in balances])
예제 #29
0
파일: reader.py 프로젝트: xybydy/kirilim
def create_summary_accs():
    query = db.session.query(db.Hesaplar.ana_hesap,
                             db.Hesaplar.lead_code,
                             func.sum(db.Hesaplar.py1).label('py1'),
                             func.sum(db.Hesaplar.py2).label('py2'),
                             func.sum(db.Hesaplar.cy).label('cy'),
                             ).group_by('ana_hesap')

    for k in query.all():
        unmapped = None

        if k.lead_code == 'Unmapped':
            unmapped = db.session.query(db.Hesaplar).filter_by(len=3,
                                                               ana_hesap=k.ana_hesap).first() or db.session.query(
                db.Hesaplar).filter_by(ana_hesap=k.ana_hesap).first()

        source = db.session.query(db.Lead).filter_by(account=k.ana_hesap).first()

        main_source = unmapped or source
        name = main_source.name

        db.session.add(
            db.Hesaplar(number=k.ana_hesap, ana_hesap=k.ana_hesap, name=name, lead_code=main_source.lead_code, cy=k.cy,
                        py1=k.py1, py2=k.py2, len=3))

    db.session.commit()
예제 #30
0
    def _get_dep_statuses(self, ti, session, dep_context):
        TI = airflow.models.TaskInstance
        TR = airflow.models.TriggerRule

        # Checking that all upstream dependencies have succeeded
        if not ti.task.upstream_list:
            yield self._passing_status(
                reason="The task instance did not have any upstream tasks.")
            raise StopIteration

        if ti.task.trigger_rule == TR.DUMMY:
            yield self._passing_status(reason="The task had a dummy trigger rule set.")
            raise StopIteration

        # TODO(unknown): this query becomes quite expensive with dags that have many
        # tasks. It should be refactored to let the task report to the dag run and get the
        # aggregates from there.
        qry = (
            session
            .query(
                func.coalesce(func.sum(
                    case([(TI.state == State.SUCCESS, 1)], else_=0)), 0),
                func.coalesce(func.sum(
                    case([(TI.state == State.EXCLUDED, 1)], else_=0)), 0),
                func.coalesce(func.sum(
                    case([(TI.state == State.SKIPPED, 1)], else_=0)), 0),
                func.coalesce(func.sum(
                    case([(TI.state == State.FAILED, 1)], else_=0)), 0),
                func.coalesce(func.sum(
                    case([(TI.state == State.UPSTREAM_FAILED, 1)], else_=0)), 0),
                func.count(TI.task_id),
            )
            .filter(
                TI.dag_id == ti.dag_id,
                TI.task_id.in_(ti.task.upstream_task_ids),
                TI.execution_date == ti.execution_date,
                TI.state.in_([
                    State.SUCCESS, State.FAILED, State.EXCLUDED,
                    State.UPSTREAM_FAILED, State.SKIPPED]),
            )
        )

        successes, excluded, skipped, failed, upstream_failed, done = qry.first()

        # Add excluded tasks into successful tasks as they are equivalent for
        # dependency purposes. This is done in this way, not using the
        # state_for_dependents function, due to the constraints of SQLAlchemy
        # queries.
        successes = successes + excluded

        for dep_status in self._evaluate_trigger_rule(
                ti=ti,
                successes=successes,
                skipped=skipped,
                failed=failed,
                upstream_failed=upstream_failed,
                done=done,
                flag_upstream_failed=dep_context.flag_upstream_failed,
                session=session):
            yield dep_status
예제 #31
0
from sqlalchemy.orm import aliased

import ceilometer
from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common import log
from ceilometer import storage
from ceilometer.storage import base
from ceilometer.storage import models as api_models
from ceilometer.storage.sqlalchemy import models
from ceilometer.storage.sqlalchemy import utils as sql_utils
from ceilometer import utils

LOG = log.getLogger(__name__)

STANDARD_AGGREGATES = dict(avg=func.avg(models.Sample.volume).label('avg'),
                           sum=func.sum(models.Sample.volume).label('sum'),
                           min=func.min(models.Sample.volume).label('min'),
                           max=func.max(models.Sample.volume).label('max'),
                           count=func.count(
                               models.Sample.volume).label('count'))

UNPARAMETERIZED_AGGREGATES = dict(
    stddev=func.stddev_pop(models.Sample.volume).label('stddev'))

PARAMETERIZED_AGGREGATES = dict(
    validate=dict(
        cardinality=lambda p: p in ['resource_id', 'user_id', 'project_id']),
    compute=dict(cardinality=lambda p: func.count(
        distinct(getattr(models.Resource, p))).label('cardinality/%s' % p)))

AVAILABLE_CAPABILITIES = {
# Execute the statement and store all the records: results
results = connection.execute(stmt).fetchall()

# Print results
print(results)

# Print the keys/column names of the results returned
print(results[0].keys())

#10
# Import func
from sqlalchemy import func

# Build an expression to calculate the sum of pop2008 labeled as population
pop2008_sum = func.sum(census.columns.pop2008).label('population')

# Build a query to select the state and sum of pop2008: stmt
stmt = select([census.columns.state, pop2008_sum])

# Group stmt by state
stmt = stmt.group_by(census.columns.state)

# Execute the statement and store all the records: results
results = connection.execute(stmt).fetchall()

# Print results
print(results)

# Print the keys/column names of the results returned
print(results[0].keys())
def Sum_profit():

    sum_pay_query = int(
        db.session.query(func.sum(Artist.gross_worth)).scalar())
    sum_pay = {"Sum of all artists": f"${sum_pay_query}"}
    return sum_pay
예제 #34
0
파일: warns_sql.py 프로젝트: farihyuta/Kaga
def num_warns():
    try:
        return SESSION.query(func.sum(Warns.num_warns)).scalar() or 0
    finally:
        SESSION.close()
 def get_today_pay(restuarant_id):
     num = db.session.query(func.sum(Order.pay_price)).filter(
         Order.rid == restuarant_id, Order.status == 1,
         func.date(Order.pay_time) == date.today()).scalar()
     return num
 def get_month_pay(restuarant_id):
     num = db.session.query(func.sum(Order.pay_price)).filter(
         Order.rid == restuarant_id, Order.status == 1,
         extract('month',
                 Order.pay_time) == datetime.datetime.now().month).scalar()
     return num
예제 #37
0
 def online(self):
     return func.IF(func.sum(Contract.online) > 0, true(), false())
예제 #38
0
 def online_size(cls):
     return func.sum(func.IF(Contract.online, File.__table__.c.size, 0))
예제 #39
0
 def online_count(cls):
     return func.sum(func.IF(Contract.online, 1, 0))
예제 #40
0
def anonymous_tip_total(receiver_id):
    return db.session.query(func.sum(Tip.amount))\
                     .filter_by(receiver_id=receiver_id, sender=None)\
                     .filter(Tip.amount.isnot(None))\
                     .first()
예제 #41
0
def statistics(city_id, nbh_id):
    print("inside")
    print(nbh_id)
    # retrieve overview and insights data
    if nbh_id != "0":
        results =  db.session.query(Nbh_Overview.nbh_id.label("nbh_id"), \
                            func.sum(Nbh_Overview.airbnb_count).label("airbnb_count"),\
                            func.sum(Nbh_Overview.other_count).label("other_count"),\
                            func.avg(Nbh_Overview.avg_occupancy).label("avg_occupancy"),\
                            func.avg(Nbh_Overview.median_price).label("median_price"),\
                            func.avg(Nbh_Overview.sqft_price).label("sqft_price"),\
                            func.avg(Nbh_Insights.rental_income).label("rental_income"),\
                            func.avg(Nbh_Insights.rental_income_change_pct).label("rental_income_change_pct"),\
                            func.avg(Nbh_Insights.occupancy).label("occupancy"),\
                            func.avg(Nbh_Insights.occupancy_change_pct).label("occupancy_change_pct"),\
                            func.avg(Nbh_Insights.reviews_count_slope).label("reviews_count_slope"),\
                            func.avg(Nbh_Insights.reviews_count_rsquare).label("reviews_count_rsquare"),\
                            func.avg(Listing_Info.night_price).label("night_price"),\
                            func.avg(Listing_Info.cleaning_fee).label("cleaning_fee"),\
                            func.avg(Listing_Info.nights_booked).label("nights_booked"),\
                            func.avg(Listing_Info.rental_income).label("rental_income"),
                            func.avg(Listing_Info.total_reviews).label("total_reviews"),\
                            func.count(Listing_Info.night_price).label("listing_count"))\
                    .join(Nbh_Insights, Nbh_Overview.nbh_id==Nbh_Insights.nbh_id)  \
                    .join(Listing_Info, Nbh_Overview.nbh_id==Listing_Info.nbh_id)  \
                    .filter(Nbh_Overview.nbh_id == int(nbh_id))\
                    .group_by(Nbh_Overview.nbh_id)
    else:
        results =  db.session.query(City_Nbh.city_id.label("city_id"),
                            func.sum(Nbh_Overview.airbnb_count).label("airbnb_count"),\
                            func.sum(Nbh_Overview.other_count).label("other_count"),\
                            func.avg(Nbh_Overview.avg_occupancy).label("avg_occupancy"),\
                            func.avg(Nbh_Overview.median_price).label("median_price"),\
                            func.avg(Nbh_Overview.sqft_price).label("sqft_price"),\
                            func.avg(Nbh_Insights.rental_income).label("rental_income"),\
                            func.avg(Nbh_Insights.rental_income_change_pct).label("rental_income_change_pct"),\
                            func.avg(Nbh_Insights.occupancy).label("occupancy"),\
                            func.avg(Nbh_Insights.occupancy_change_pct).label("occupancy_change_pct"),\
                            func.avg(Nbh_Insights.reviews_count_slope).label("reviews_count_slope"),\
                            func.avg(Nbh_Insights.reviews_count_rsquare).label("reviews_count_rsquare"),\
                            func.avg(Listing_Info.night_price).label("night_price"),\
                            func.avg(Listing_Info.cleaning_fee).label("cleaning_fee"),\
                            func.avg(Listing_Info.nights_booked).label("nights_booked"),\
                            func.avg(Listing_Info.rental_income).label("rental_income"),
                            func.avg(Listing_Info.total_reviews).label("total_reviews"),\
                            func.count(Listing_Info.night_price).label("listing_count"))\
                    .join(Nbh_Insights, Nbh_Overview.nbh_id==Nbh_Insights.nbh_id)  \
                    .join(Listing_Info, Nbh_Overview.nbh_id==Listing_Info.nbh_id)  \
                    .join(City_Nbh, City_Nbh.nbh_id ==  Nbh_Overview.nbh_id)\
                    .filter(City_Nbh.city_id == int(city_id))\
                    .group_by(City_Nbh.city_id)

    statList = []

    for result in results:
        # ADR is calculated as rentalIncome *12 /nights_booked
        daily_rate = (result[15] * 12) / result[14]
        StatData = {
            "statinfo": {
                "city_id": city_id,
                "nbh_id": nbh_id,
                "airbnb_count": str(result[1]),
                "other_count": str(result[2]),
                "avg_occupancy": str("{0:.2f}".format(result[3])),
                "median_price": str("{0:.2f}".format(result[4])),
                "sqft_price": str("{0:.2f}".format(result[5])),
                "rental_income": str("{0:.2f}".format(result[6])),
                "rental_income_change_pct": str("{0:.2f}".format(result[7])),
                "occupancy": str("{0:.2f}".format(result[8])),
                "occupancy_change_pct": str("{0:.2f}".format(result[9])),
                "reviews_count_slope": str("{0:.2f}".format(result[10])),
                "reviews_count_rsquare": str("{0:.2f}".format(result[11])),
                "average_daily_price": str("{0:.2f}".format(daily_rate)),
                "night_price": str("{0:.2f}".format(result[12])),
                "cleaning_fee": str("{0:.2f}".format(result[13])),
                "nights_booked": str("{0:.2f}".format(result[14])),
                "rental_income": str("{0:.2f}".format(result[15])),
                "review_count": str("{0:.2f}".format(result[16])),
                "listing_count": str("{0:.2f}".format(result[17]))
            }
        }
        statList.append(StatData)

    return jsonify(statList)
예제 #42
0
    def get_monthly_steps(self, startdate, enddate):
        errorcode = {}
        key1 = "R"
        key2 = "F"
        first_posn = 0
        second_posn = 0
        weekly_activity_tuple = {}
        activity_tuples = {}

        engine = db
        #create a Session
        Session = sessionmaker(bind=engine)
        session = Session()

        print startdate, enddate

        try:

            day_of_week = startdate.weekday()
            week_start_date = startdate
            #week_end_date=(startdate-datetime.timedelta(days=day_of_week))+datetime.timedelta(days=6)
            week_end_date = startdate + datetime.timedelta(days=6)

            #key1="R"
            #key2="F"
            #first_posn=0
            #second_posn=0

            days_iterator = 0
            weeks_iterator = 1

            total_weekly_steps = 0

            #activity_tuples={}
            found_data = 0

            if (
                    week_end_date > enddate
            ):  # make sure week end date is not greater than the current end date
                week_end_date = enddate

            daily_activity_tuple = None
            weekly_activity_tuple = {}

            engine = db
            #create a Session
            Session = sessionmaker(bind=engine)
            session = Session()

            while ((week_start_date >= startdate)
                   and (week_start_date <= enddate)):

                weekly_activity_tuple = {}
                daily_activity_tuple = None
                total_weekly_steps = 0

                if week_end_date < enddate:
                    days_iterator = 7
                else:
                    diff = week_end_date - week_start_date
                    days_iterator = diff.days + 1

            # daily_activity_tuple=json.loads(self.get_weekly_steps(week_start_date,week_end_date))
            #steps=0
                res = session.query(
                    func.sum(PhysicalActivity.stepscounter).label("sum_steps")
                ).filter(PhysicalActivity.beneficiary_id == self.b_id).filter(
                    PhysicalActivity.datecaptured >= week_start_date).filter(
                        PhysicalActivity.datecaptured <= week_end_date
                    ).order_by(PhysicalActivity.datecaptured).first()
                if res.sum_steps == None:
                    pass
                else:
                    total_weekly_steps = int(res.sum_steps)

                if total_weekly_steps > 0:
                    found_data = found_data + 1

                #weekly_activity_tuple[key2+"%d"%second_posn]="Week %s"%weeks_iterator
                weekly_activity_tuple[key2 + "%d" % second_posn] = "%s-%s" % (
                    week_start_date.strftime("%d"),
                    week_end_date.strftime("%d"))
                second_posn = second_posn + 1
                weeks_iterator = weeks_iterator + 1

                if (days_iterator > 0):
                    weekly_activity_tuple[
                        key2 + "%d" %
                        second_posn] = total_weekly_steps / days_iterator
                    second_posn = 0
                else:
                    weekly_activity_tuple[key2 + "%d" % second_posn] = 0
                    second_posn = 0

                if first_posn < 10:
                    key1 = "R0"
                else:
                    key1 = "R"

                activity_tuples[key1 +
                                "%d" % first_posn] = weekly_activity_tuple

                first_posn = first_posn + 1

                week_start_date = week_end_date + datetime.timedelta(
                    days=1)  # move to next monday
                week_end_date = week_start_date + datetime.timedelta(
                    days=6)  # move to next sunday

                if (week_end_date > enddate):
                    week_end_date = enddate  #for the next iteration

            session.close()
            engine.dispose()
            dbconn.close()

            if (found_data > 0):
                return (json.JSONEncoder().encode(
                    OrderedDict(
                        sorted(activity_tuples.items(), key=lambda t: t[0]))))

                #errorcode["error"]=-4
            activity_tuples = {}
            first_posn = 0
            weekly_activity_tuple[key2 + "%d" % second_posn] = "No week"
            second_posn = second_posn + 1

            weekly_activity_tuple[key2 + "%d" % second_posn] = -4
            second_posn = 0
            if first_posn < 10:
                key1 = "R0"
            else:
                key1 = "R"

            activity_tuples[key1 + "%d" % first_posn] = weekly_activity_tuple

            first_posn = first_posn + 1
            activity_tuple = {}
            return (json.JSONEncoder().encode(
                OrderedDict(sorted(activity_tuples.items(),
                                   key=lambda t: t[0]))))

        except Exception as e:
            session.close()
            engine.dispose()
            dbconn.close()

            second_posn = 0
            weekly_activity_tuple[key2 + "%d" % second_posn] = "%s" % e
            second_posn = second_posn + 1

            weekly_activity_tuple[key2 + "%d" % second_posn] = -1
            second_posn = 0
            if first_posn < 10:
                key1 = "R0"
            else:
                key1 = "R"

            activity_tuples[key1 + "%d" % first_posn] = weekly_activity_tuple

            first_posn = first_posn + 1
            weekly_activity_tuple = {}
            return (json.JSONEncoder().encode(activity_tuples))
예제 #43
0
    is_primary_split = Column(Boolean, default=0)

    transaction = relationship('Transaction', lazy=False)
    account = relationship('Account')

    __table_args__ = (UniqueConstraint(
        'transaction_id',
        'account_id',
        name='transaction__account__uix',
    ), )


# The following creates a unique constraint on the 'splits' table for the
# combination of (transaction_id, is_primary_split), but only where
# is_primary_split is not 0. This allows us to ensure only 1 split per
# transaction is marked as the primary split. This takes advantage of the
# "partial index" feature provided by sqlite and postgresql. MySQL does not
# have this feature.
primary_split_uix_where = Split.is_primary_split != 0
Index(
    'transaction__primary_split__uix',
    Split.transaction_id,
    Split.is_primary_split,
    unique=True,
    postgresql_where=primary_split_uix_where,
    sqlite_where=primary_split_uix_where,
)

Account.balance = column_property(
    select([func.sum(Split.amount)]).where(Account.id == Split.account_id))
예제 #44
0
    def index(self):

        # Get the month details by fetching distinct values and determining the
        # month names from the values.
        c.months, c.day = _month_details(GA_Stat)

        # Work out which month to show, based on query params of the first item
        c.month_desc = 'all months'
        c.month = request.params.get('month', '')
        if c.month:
            c.month_desc = ''.join([m[1] for m in c.months if m[0] == c.month])

        q = model.Session.query(GA_Stat).\
            filter(GA_Stat.stat_name=='Totals')
        if c.month:
            q = q.filter(GA_Stat.period_name == c.month)
        entries = q.order_by('ga_stat.key').all()

        def clean_key(key, val):
            if key in [
                    'Average time on site', 'Pages per visit', 'New visits',
                    'Bounce rate (home page)'
            ]:
                val = "%.2f" % round(float(val), 2)
                if key == 'Average time on site':
                    mins, secs = divmod(float(val), 60)
                    hours, mins = divmod(mins, 60)
                    val = '%02d:%02d:%02d (%s seconds) ' % (hours, mins, secs,
                                                            val)
                if key in ['New visits', 'Bounce rate (home page)']:
                    val = "%s%%" % val
            if key in ['Total page views', 'Total visits']:
                val = int(val)

            return key, val

        # Query historic values for sparkline rendering
        sparkline_query = model.Session.query(GA_Stat)\
                .filter(GA_Stat.stat_name=='Totals')\
                .order_by(GA_Stat.period_name)
        sparkline_data = {}
        for x in sparkline_query:
            sparkline_data[x.key] = sparkline_data.get(x.key, [])
            key, val = clean_key(x.key, float(x.value))
            tooltip = '%s: %s' % (_get_month_name(x.period_name), val)
            sparkline_data[x.key].append((tooltip, x.value))
        # Trim the latest month, as it looks like a huge dropoff
        for key in sparkline_data:
            sparkline_data[key] = sparkline_data[key][:-1]

        c.global_totals = []
        if c.month:
            for e in entries:
                key, val = clean_key(e.key, e.value)
                sparkline = sparkline_data[e.key]
                c.global_totals.append((key, val, sparkline))
        else:
            d = collections.defaultdict(list)
            for e in entries:
                d[e.key].append(float(e.value))
            for k, v in d.iteritems():
                if k in ['Total page views', 'Total visits']:
                    v = sum(v)
                else:
                    v = float(sum(v)) / float(len(v))
                sparkline = sparkline_data[k]
                key, val = clean_key(k, v)

                c.global_totals.append((key, val, sparkline))
        # Sort the global totals into a more pleasant order
        def sort_func(x):
            key = x[0]
            total_order = [
                'Total page views', 'Total visits', 'Pages per visit'
            ]
            if key in total_order:
                return total_order.index(key)
            return 999

        c.global_totals = sorted(c.global_totals, key=sort_func)

        keys = {
            'Browser versions': 'browser_versions',
            'Browsers': 'browsers',
            'Operating Systems versions': 'os_versions',
            'Operating Systems': 'os',
            'Social sources': 'social_networks',
            'Languages': 'languages',
            'Country': 'country'
        }

        def shorten_name(name, length=60):
            return (name[:length] + '..') if len(name) > 60 else name

        def fill_out_url(url):
            import urlparse
            return urlparse.urljoin(config.get('ckan.site_url'), url)

        c.social_referrer_totals, c.social_referrers = [], []
        q = model.Session.query(GA_ReferralStat)
        q = q.filter(GA_ReferralStat.period_name == c.month) if c.month else q
        q = q.order_by('ga_referrer.count::int desc')
        for entry in q.all():
            c.social_referrers.append(
                (shorten_name(entry.url), fill_out_url(entry.url),
                 entry.source, entry.count))

        q = model.Session.query(GA_ReferralStat.url,
                                func.sum(GA_ReferralStat.count).label('count'))
        q = q.filter(GA_ReferralStat.period_name == c.month) if c.month else q
        q = q.order_by('count desc').group_by(GA_ReferralStat.url)
        for entry in q.all():
            c.social_referrer_totals.append(
                (shorten_name(entry[0]), fill_out_url(entry[0]), '', entry[1]))

        for k, v in keys.iteritems():
            q = model.Session.query(GA_Stat).\
                filter(GA_Stat.stat_name==k).\
                order_by(GA_Stat.period_name)
            # Buffer the tabular data
            if c.month:
                entries = []
                q = q.filter(GA_Stat.period_name==c.month).\
                          order_by('ga_stat.value::int desc')
            d = collections.defaultdict(int)
            for e in q.all():
                d[e.key] += int(e.value)
            entries = []
            for key, val in d.iteritems():
                entries.append((
                    key,
                    val,
                ))
            entries = sorted(entries, key=operator.itemgetter(1), reverse=True)

            # Run a query on all months to gather graph data
            graph_query = model.Session.query(GA_Stat).\
                filter(GA_Stat.stat_name==k).\
                order_by(GA_Stat.period_name)
            graph_dict = {}
            for stat in graph_query:
                graph_dict[stat.key] = graph_dict.get(stat.key, {
                    'name': stat.key,
                    'raw': {}
                })
                graph_dict[stat.key]['raw'][stat.period_name] = float(
                    stat.value)
            stats_in_table = [x[0] for x in entries]
            stats_not_in_table = set(graph_dict.keys()) - set(stats_in_table)
            stats = stats_in_table + sorted(list(stats_not_in_table))
            graph = [graph_dict[x] for x in stats]
            setattr(c, v + '_graph',
                    json.dumps(_to_rickshaw(graph, percentageMode=True)))

            # Get the total for each set of values and then set the value as
            # a percentage of the total
            if k == 'Social sources':
                total = sum([
                    x for n, x, graph in c.global_totals if n == 'Total visits'
                ])
            else:
                total = sum([num for _, num in entries])
            setattr(c, v, [(k, _percent(v, total)) for k, v in entries])

        return render('ga_report/site/index.html')
예제 #45
0
 def abs_other(self):
     return func.sum(HybridPerson.other)
예제 #46
0
파일: models.py 프로젝트: raphaelm/muesli
 def getLectureResultsByCategory(self, *args, **kwargs):
     session = Session.object_session(self)
     results = self.getLectureResults(*args, **kwargs).subquery()
     return session.query(func.sum(results.c.points).label('points'), results.c.student_id, results.c.category)\
      .group_by(results.c.category, results.c.student_id)
예제 #47
0
파일: routes.py 프로젝트: digrigor/PQuery
def data():
    """Return server side data."""
    # Getting html datatables parameters
    params = request.args.to_dict()

    # Getting samples selected
    lsams = session["dt_samples"]

    #Get all the whole table queried as a subquery (So can be used in SELECT * FROM this_subquery) for example
    all_info_query = db.session.query(Var).subquery()

    # Build the initial query:
    ## 1. Query all variants first -> db.session.query(Var.variant_id)
    ## 2. Then pass to the builder the user-selected coords -> session["dt_coords"]
    ## 3. Build the query
    filter_info_query_in = initial_query_builder(
        db.session.query(Var.variant_id), session["dt_coords"], Var)

    #Do the same but as a subquery (o can be used in SELECT * FROM this_subquery)
    filter_info_subquery_in = initial_query_builder(
        db.session.query(Var.variant_id), session["dt_coords"],
        Var).subquery()

    # Query the table with the genotypes (Alleles model)
    ## select_from(Alleles): which is the basic table
    ##
    ## filter(Alleles.variant_id.in_(filter_info_subquery_in): Filter the super
    ## long Alleles table to only include variants which are also in the above-queried info table.
    ##
    ## Alleles.sample.in_(lsams): Filter the alleles table to only include rows with samples that the user has selected.
    ##
    ## func.sum(Alleles.gt)>0: Only include samples which have at least one variant in the given coordinates
    geno_query = db.session.query().select_from(Alleles).filter(
        Alleles.variant_id.in_(filter_info_subquery_in),
        Alleles.sample.in_(lsams)).group_by(
            Alleles.variant_id).having(func.sum(Alleles.gt) > 0)

    nondynamics_state = False

    #If user has performed filter on the var_Table:
    if session['fpane_clean_dict'] != '':
        fpd = session['fpane_clean_dict']
        nondynamics_state = (len([
            y for y in ['dynamic_ac', 'dynamic_af', 'dynamic_an']
            for x in list(fpd.keys()) if y not in x
        ]) > 0)
        # True if AC,AN.AF should be calculated on the fly for the selected samples, FALSE is not. It's always TRUE

        # Filter the initial query according to the user-defined filters:
        filter_info_query_in_filtered = fpane_query_builder_var(
            filter_info_query_in, session['fpane_clean_dict'], lsams, Var,
            field_types)
        filter_info_subquery_in = filter_info_query_in_filtered.subquery()

        #Re-do the geno_query as above but now filter to have same variants with the filter_info_query_in filtered
        geno_query = db.session.query().select_from(Alleles).filter(
            Alleles.variant_id.in_(filter_info_subquery_in),
            Alleles.sample.in_(lsams)).group_by(
                Alleles.variant_id).having(func.sum(Alleles.gt) > 0)
        #Filter sample columns (hom,het etc..) and dynamic_af,dynamic_an,dynamic_ac columns:
        geno_query_filtered = fpane_query_builder_having(
            geno_query, session['fpane_clean_dict'], lsams, Alleles,
            field_types)
        geno_query = geno_query_filtered

    #Join varquery and geno query
    query = geno_query.join(
        all_info_query,
        Alleles.variant_id == all_info_query.columns["variant_id"])

    # Here we control which columns of the above-built queries will be returned in the final table.
    #'# if queried samples < cohort_cutoff, PQuery will return all info columns and the individual genotypes per sample.
    if len(lsams) <= cohort_cutoff:
        cols = column_dt_builder(info_cols_ordered, lsams, Alleles,
                                 all_info_query, "samples")
    ## If queried samples > cohort_cutoff, then PQuery will run in cohort mode without showing individual genotypes.
    elif len(lsams) > cohort_cutoff:
        cols = column_dt_builder(info_cols_ordered, lsams, Alleles,
                                 all_info_query)

    # Here we specify the number of columns returned and building the final object which will be returned
    # on the var_table page:
    if session["dt_coords"] != "all" or nondynamics_state == False:
        # HERE IS A BUG:
        # We just count the total rows before the filtering as otherwise it
        # would be super slow:
        rowcount = filter_info_query_in.count()
        rowTable = DataTables(params, query, cols, rowcount)
        return jsonify(rowTable.output_result())

    # If the user selects to return whole exome we rebuild the queries to deal with this huge task:
    else:
        print("2exome")
        geno_query = db.session.query(Alleles).filter(
            Alleles.sample.in_(lsams)).subquery()
        infoquery = db.session.query().select_from(Var).group_by(
            geno_query.columns.variant_id).having(
                func.sum(geno_query.columns.gt) > 0)
        query = infoquery.join(geno_query,
                               geno_query.columns.variant_id == Var.variant_id)
        if len(lsams) <= cohort_cutoff:
            cols = column_dt_builder_all(info_cols_ordered,
                                         lsams,
                                         geno_query,
                                         Var,
                                         mode="samples")
        elif len(lsams) > cohort_cutoff:
            cols = column_dt_builder_all(info_cols_ordered, lsams, geno_query,
                                         Var)
        rowcount_query = db.session.query(Alleles.variant_id).filter(
            Alleles.sample.in_(lsams)).group_by(
                Alleles.variant_id).having(func.sum(Alleles.gt) > 0)
        rowcount = rowcount_query.count()
        rowTable = DataTables(params, query, cols, rowcount)
        return jsonify(rowTable.output_result())


#PREVIOUS CHUNKS OF CODE, MIGHT BE USEFUL
#     try:
#         if session['fpane_clean_dict']!='':
#             print(session['fpane_clean_dict'])
#             fpd = session['fpane_clean_dict']
#             dynamics_state = len([y for y in ['dynamic_ac', 'dynamic_af', 'dynamic_an'] for x in list(fpd.keys()) if y in x])>0
#             nondynamics_state = len([y for y in ['dynamic_ac', 'dynamic_af', 'dynamic_an'] for x in list(fpd.keys()) if y not in x])>0
#
# def final_query_builder(db, Var, Alleles, coords, filt, insams, fpd, params, dynamics_state, nondynamics_state):
#
# general_genoquery = db.session.query(A)
# genoquery_pre = db.session.query(Alleles.variant_id, func.sum(Alleles.gt).label('dynamic_AC'),
#                                  (func.count(Alleles.gt) * 2).label('dynamic_AN'),
#                                  (func.sum(Alleles.gt) / (func.count(Alleles.gt) * 2)).label('dynamic_AF')).filter(
#     Alleles.sample.in_(lsams)).group_by(
#     Alleles.variant_id)
#     if dynamics_state == False & nondynamics_state == False:
#         dt_columns = dt_columns_builder()
#         varquery = db.session.query().select_from(Var)
#         genoquery_final = genoquery_pre.limit(params.get('length')).offset(params.get('start')).subquery()
#         varquery_final = varquery
#         varquery_final2 = initial_query_builder(varquery_final, session["dt_coords"], session["dt_filt"], Var)
#         varquery_final3 = fpane_query_builder_var(varquery_final2, fpd, lsams, Var, field_types)
#         final_query = varquery_final3.join(genoquery_final, genoquery_final.columns.variant_id == Var.variant_id)
#         row_count = varquery_final3.add_columns(*[c.sqla_expr for c in [ColumnDT(Var.variant_id)]]).count()
#     elif dynamics_state == False & nondynamics_state == True:
#         dt_columns = dt_columns_builder()
#         genoquery_final = genoquery_pre.subquery()
#         varquery_sq1 = db.session.query(Var)
#         varquery_sq1 = initial_query_builder(varquery_sq1, session["dt_coords"], session["dt_filt"], Var)
#         varquery_sq1 = fpane_query_builder_var(varquery_sq1, fpd, lsams, Var, field_types)
#         varquery_sq1 = varquery_sq1.limit(params.get('length')).offset(params.get('start')).subquery()
#         varquery_sq2 = db.session.query().select_from(varquery_sq1).add_columns(*[c.sqla_expr for c in dt_columns])
#
# main_geno = db.session.query(Alleles)
#
# main_geno = db.session.query(mc.variant_id, func.sum(mc.gt).label('AC'),
#                  (func.count(mc.gt)*2).label('AN'),
#                  (func.sum(mc.gt)/(func.count(mc.gt)*2)).label('AF'),
#                  func.group_concat(case([(mc.sample == "S2201",  mc.gt+":"+mc.dp+":"+mc.gq+":"+mc.pgt+":"+mc.pid+":"+mc.sample)])).label('S2201'),
#                  func.group_concat(case([(mc.sample == "S2202",  mc.gt+":"+mc.dp+":"+mc.gq+":"+mc.pgt+":"+mc.pid+":"+mc.sample)])).label('S2202'),
#                  func.group_concat(case([(mc.sample == "S2203",  mc.gt+":"+mc.dp+":"+mc.gq+":"+mc.pgt+":"+mc.pid+":"+mc.sample)])).label('S2203')).group_by(main_sub.columns.variant_id)
#
#         final_query = varquery_sq2.join
#
#
#
#             if dynamics_state == True:
#                 if nondynamics_state == True:
#                     print('case1')
#                     genoquery_fpane_filt_pre = fpane_query_builder_geno(genoquery_pre, fpd, lsams, Alleles, field_types)
#                     row_count = genoquery_fpane_filt_pre.count()
#                     genoquery_final = genoquery_fpane_filt_pre.subquery()
#                     varquery_final = varquery
#                     final_query = varquery_final.join(genoquery_final, genoquery_final.columns.variant_id == Var.variant_id)
#                     final_query_infilt = initial_query_builder(final_query, session["dt_coords"], session["dt_filt"], Var)
#                     final_query_infilt_fpanefilt = fpane_query_builder_var(final_query_infilt, fpd, lsams, Var, field_types).limit(params.get('length')).offset(params.get('start'))
#                 elif nondynamics_state == False:
#                     print('case2')
#                     genoquery_fpane_filt_pre = fpane_query_builder_geno(genoquery_pre, fpd, lsams, Alleles, field_types)
#                     row_count = genoquery_fpane_filt_pre.count()
#                     genoquery_final = genoquery_fpane_filt_pre.limit(params.get('length')).offset(params.get('start')).subquery()
#                     varquery_final = varquery
#                     final_query = varquery_final.join(genoquery_final,genoquery_final.columns.variant_id == Var.variant_id)
#                     final_query_infilt = initial_query_builder(final_query, session["dt_coords"], session["dt_filt"],Var)
#                     final_query_infilt_fpanefilt = final_query_infilt
#                 session['fpane_clean_dict'] = ''
#             else:
#                 print('case3')
#                 varquery_final = varquery
#                 varquery_final2 = initial_query_builder(varquery_final, session["dt_coords"], session["dt_filt"], Var)
#                 varquery_final3 = fpane_query_builder_var(varquery_final2, fpd, lsams, Var, field_types)
#                 varquery_sq = varquery_final3.add_columns(*[c.sqla_expr for c in [ColumnDT(Var.variant_id)]]).subquery()
#                 genoquery_fpane_filt_pre = genoquery_pre
#                 genoquery_final = genoquery_fpane_filt_pre.join(varquery_sq,  = ).limit(params.get('length')).offset(params.get('start')).subquery()
#                 final_query = varquery_final3.join(genoquery_final, genoquery_final.columns.variant_id == Var.variant_id)
#                 final_query_infilt_fpanefilt = fpane_query_builder_var(final_query, fpd, lsams, Var, field_types)
#                 row_count = varquery_final3.add_columns(*[c.sqla_expr for c in [ColumnDT(Var.variant_id)]]).count()
#                 session['fpane_clean_dict'] = ''
#         else:
#             print('case4')
#             #print("fpane_query_else")
#             #print(len(session['fpane_clean_dict']))
#             #print(session['fpane_clean_dict'])
#             dynamics_state = False
#             genoquery_final = genoquery_pre.limit(params.get('length')).offset(params.get('start')).subquery()
#             varquery_final = varquery
#             final_query = varquery_final.join(genoquery_final, genoquery_final.columns.variant_id == Var.variant_id)
#             final_query_infilt = initial_query_builder(final_query, session["dt_coords"], session["dt_filt"], Var)
#             final_query_infilt_fpanefilt = final_query_infilt
#             row_count = varquery_final.add_columns(*[c.sqla_expr for c in [ColumnDT(Var.variant_id)]]).count()
#             session['fpane_clean_dict'] = ''
#     except KeyError:
#         print('case5')
#         genoquery_final = genoquery_pre.limit(params.get('length')).offset(params.get('start')).subquery()
#         varquery_final = varquery
#         final_query = varquery_final.join(genoquery_final, genoquery_final.columns.variant_id == Var.variant_id)
#         final_query_infilt = initial_query_builder(final_query, session["dt_coords"], session["dt_filt"], Var)
#         final_query_infilt_fpanefilt = final_query_infilt
#         row_count = final_query_infilt_fpanefilt.add_columns(*[c.sqla_expr for c in [ColumnDT(Var.variant_id)]]).count()
# dt_columns = []
# for x in info_cols_ordered+lsams:
#     if x in lsams: dt_columns.append(ColumnDT(Var.__table__.c[x]))
#     else:
#         if field_types[x]['db_status'] == 'in_db': dt_columns.append(ColumnDT(Var.__table__.c[x]))
#         elif field_types[x]['db_status'] == 'out_db':
#             if '_ac' in x: dt_columns.append(ColumnDT(func.sum(Alleles.gt)))
#             elif '_an' in x: dt_columns.append(ColumnDT(func.count(Alleles.gt)*2))
#             elif '_af' in x: dt_columns.append(ColumnDT(func.sum(Alleles.gt)/(func.count(Alleles.gt)*2)))
#     #print(fquery)
#     rowTable = DataTables(params, final_query_infilt_fpanefilt, dt_columns, row_count=row_count)
#     #print(rowTable)
#     return jsonify(rowTable.output_result())
예제 #48
0
    def get_weekly_steps(self, startdate, enddate):

        errorcode = {}
        daily_activity_tuple = {}
        first_posn = 0
        second_posn = 0
        key1 = "R"
        key2 = "F"
        activity_tuples = {}

        engine = db
        #create a Session
        Session = sessionmaker(bind=engine)
        session = Session()

        try:

            #first_posn=0
            #second_posn=0
            #previousdate_string=None
            current_retrieved_date_string = ""

            step_sum_by_date = 0

            #activity_tuples={}
            #current_date_searched=startdate
            #number_of_tuples_counted=0
            #week_num_of_days=7 # the week has seven days. We will use this variable to determine if the total number of dates counted from the database is less than number of days in week
            num_of_weekdays_iterated = 0  # keeping track of the positiion within a seven days week.
            weekdays_clusters = [
                'Mon', 'Tue', 'Wed', 'Thur', 'Frid', 'Sat', 'Sun'
            ]

            #weekday=enddate.weekday()# is for checking if a week is shorter when this function is used by get_manth_data

            found_data = 0
            day = ""
            #engine=db
            #create a Session
            #Session = sessionmaker(bind=engine)
            #session = Session()

            #new code
            daily_activity_tuple = {}
            while (startdate <= enddate):

                day_of_week = startdate.weekday()

                day = weekdays_clusters[day_of_week]

                #for week_day in weekdays_clusters:
                #activity_tuple=json.loads(self.get_daily_steps(startdate, startdate))
                daily_activity_tuple = {}
                hourly_activity_tuple = None
                total_daily_steps = 0

                #activity_tuple=json.JSONDecoder.decode(self.get_daily_steps(startdate, startdate))
                #activity_tuple=json.JSONDecoder.decode(self.get_daily_steps(startdate, startdate), _w=WHITESPACE.match)
                #hourly_activity_tuple=json.loads(self.get_daily_steps(startdate, startdate))

                #exception_info=""

                #steps=0

                res = session.query(
                    func.sum(PhysicalActivity.stepscounter).label("sum_steps")
                ).filter(PhysicalActivity.beneficiary_id == self.b_id).filter(
                    PhysicalActivity.datecaptured == startdate).order_by(
                        PhysicalActivity.datecaptured).first()
                if res.sum_steps == None:
                    pass
                else:
                    total_daily_steps = int(res.sum_steps)

                startdate = startdate + datetime.timedelta(
                    days=1)  #move to the next day

                if total_daily_steps > 0:
                    found_data = found_data + 1

                daily_activity_tuple[key2 + "%d" % second_posn] = day
                second_posn = second_posn + 1

                daily_activity_tuple[key2 +
                                     "%d" % second_posn] = total_daily_steps
                second_posn = 0

                if first_posn < 10:
                    key1 = "R0"
                else:
                    key1 = "R"

                activity_tuples[key1 +
                                "%d" % first_posn] = daily_activity_tuple

                first_posn = first_posn + 1
                total_daily_steps = 0

                #if(num_of_weekdays_iterated==weekday):# we have encontered the end of the week. This is for shorter weeks within the month
                #     break
                #num_of_weekdays_iterated=num_of_weekdays_iterated+1

            #end of new code

            session.close()
            engine.dispose()
            dbconn.close()

            if (found_data > 0):
                return (json.JSONEncoder().encode(
                    OrderedDict(
                        sorted(activity_tuples.items(), key=lambda t: t[0]))))
            else:
                #errorcode["error"]=-4
                activity_tuples = {}
                first_posn = 0
                daily_activity_tuple[key2 + "%d" % second_posn] = "No day"
                second_posn = second_posn + 1

                daily_activity_tuple[key2 + "%d" % second_posn] = -4
                second_posn = 0
                if first_posn < 10:
                    key1 = "R0"
                else:
                    key1 = "R"

                activity_tuples[key1 +
                                "%d" % first_posn] = daily_activity_tuple

                first_posn = first_posn + 1
                activity_tuple = {}
                return (json.JSONEncoder().encode(
                    OrderedDict(
                        sorted(activity_tuples.items(), key=lambda t: t[0]))))

                #return (json.JSONEncoder().encode(errorcode))

        except Exception as e:
            session.close()
            engine.dispose()
            dbconn.close()
            second_posn = 0

            daily_activity_tuple[key2 + "%d" % second_posn] = "%s" % e
            second_posn = second_posn + 1

            daily_activity_tuple[key2 + "%d" % second_posn] = -1
            second_posn = 0
            if first_posn < 10:
                key1 = "R0"
            else:
                key1 = "R"

            activity_tuples[key1 + "%d" % first_posn] = daily_activity_tuple

            first_posn = first_posn + 1
            daily_activity_tuple = {}
            return (json.JSONEncoder().encode(activity_tuples))
예제 #49
0
def get_channel_annotation_stats(channel_id, checksums=None):
    bridge = Bridge(app_name=CONTENT_APP_NAME)

    ContentNodeTable = bridge.get_table(ContentNode)
    FileTable = bridge.get_table(File)
    LocalFileTable = bridge.get_table(LocalFile)
    if checksums is not None:
        file_table = FileTable.join(
            LocalFileTable,
            and_(
                FileTable.c.local_file_id == LocalFileTable.c.id,
                or_(
                    # checksums are not uuids and have been got from
                    # get_channel_stats_from_disk, so no need to validate them:
                    filter_by_uuids(LocalFileTable.c.id, checksums, validate=False),
                    LocalFileTable.c.available == True,  # noqa
                ),
            ),
        )
    else:
        file_table = FileTable.join(
            LocalFileTable, FileTable.c.local_file_id == LocalFileTable.c.id
        )

    contentnode_statement = (
        select([FileTable.c.contentnode_id])
        .select_from(file_table)
        .where(FileTable.c.supplementary == False)  # noqa
        .where(
            or_(*(FileTable.c.preset == preset for preset in renderable_files_presets))
        )
        .where(ContentNodeTable.c.id == FileTable.c.contentnode_id)
    )
    connection = bridge.get_connection()

    # start a transaction

    trans = connection.begin()

    connection.execute(
        ContentNodeTable.update()
        .where(
            and_(
                ContentNodeTable.c.kind != content_kinds.TOPIC,
                ContentNodeTable.c.channel_id == channel_id,
            )
        )
        .values(available=exists(contentnode_statement))
    )

    ContentNodeClass = bridge.get_class(ContentNode)

    node_depth = (
        bridge.session.query(func.max(ContentNodeClass.level))
        .filter_by(channel_id=channel_id)
        .scalar()
    )

    child = ContentNodeTable.alias()

    # Update all leaf ContentNodes to have num_coach_content to 1 or 0
    # Update all leaf ContentNodes to have on_device_resources to 1 or 0
    connection.execute(
        ContentNodeTable.update()
        .where(
            and_(
                # In this channel
                ContentNodeTable.c.channel_id == channel_id,
                # That are not topics
                ContentNodeTable.c.kind != content_kinds.TOPIC,
            )
        )
        .values(
            num_coach_contents=cast(ContentNodeTable.c.coach_content, Integer()),
            on_device_resources=cast(ContentNodeTable.c.available, Integer()),
        )
    )

    # Before starting set availability to False on all topics.
    connection.execute(
        ContentNodeTable.update()
        .where(
            and_(
                # In this channel
                ContentNodeTable.c.channel_id == channel_id,
                # That are topics
                ContentNodeTable.c.kind == content_kinds.TOPIC,
            )
        )
        .values(available=False)
    )

    # Expression to capture all available child nodes of a contentnode
    available_nodes = select([child.c.available]).where(
        and_(
            child.c.available == True,  # noqa
            ContentNodeTable.c.id == child.c.parent_id,
        )
    )

    # Expressions for annotation of coach content

    # Expression that will resolve a boolean value for all the available children
    # of a content node, whereby if they all have coach_content flagged on them, it will be true,
    # but otherwise false.
    # Everything after the select statement should be identical to the available_nodes expression above.
    if bridge.engine.name == "sqlite":
        # Use a min function to simulate an AND.
        coach_content_nodes = select([func.min(child.c.coach_content)]).where(
            and_(
                child.c.available == True,  # noqa
                ContentNodeTable.c.id == child.c.parent_id,
            )
        )
    elif bridge.engine.name == "postgresql":
        # Use the postgres boolean AND operator
        coach_content_nodes = select([func.bool_and(child.c.coach_content)]).where(
            and_(
                child.c.available == True,  # noqa
                ContentNodeTable.c.id == child.c.parent_id,
            )
        )

    # Expression that sums the total number of coach contents for each child node
    # of a contentnode
    coach_content_num = select([func.sum(child.c.num_coach_contents)]).where(
        and_(
            child.c.available == True,  # noqa
            ContentNodeTable.c.id == child.c.parent_id,
        )
    )

    # Expression that sums the total number of on_device_resources for each child node
    # of a contentnode
    on_device_num = select([func.sum(child.c.on_device_resources)]).where(
        and_(
            child.c.available == True,  # noqa
            ContentNodeTable.c.id == child.c.parent_id,
        )
    )

    stats = {}

    # Go from the deepest level to the shallowest
    for level in range(node_depth, 0, -1):

        # Only modify topic availability here
        connection.execute(
            ContentNodeTable.update()
            .where(
                and_(
                    ContentNodeTable.c.level == level - 1,
                    ContentNodeTable.c.channel_id == channel_id,
                    ContentNodeTable.c.kind == content_kinds.TOPIC,
                )
            )
            # Because we have set availability to False on all topics as a starting point
            # we only need to make updates to topics with available children.
            .where(exists(available_nodes))
            .values(
                available=exists(available_nodes),
                coach_content=coach_content_nodes,
                num_coach_contents=coach_content_num,
                on_device_resources=on_device_num,
            )
        )

        level_stats = connection.execute(
            select(
                [
                    ContentNodeTable.c.id,
                    ContentNodeTable.c.coach_content,
                    ContentNodeTable.c.num_coach_contents,
                    ContentNodeTable.c.on_device_resources,
                ]
            ).where(
                and_(
                    ContentNodeTable.c.level == level,
                    ContentNodeTable.c.channel_id == channel_id,
                    ContentNodeTable.c.available == True,  # noqa
                )
            )
        )

        for stat in level_stats:
            stats[stat[0]] = {
                "coach_content": bool(stat[1]),
                "num_coach_contents": stat[2] or 0,
                "total_resources": stat[3] or 0,
            }

    root_node_stats = connection.execute(
        select(
            [
                ContentNodeTable.c.id,
                ContentNodeTable.c.coach_content,
                ContentNodeTable.c.num_coach_contents,
                ContentNodeTable.c.on_device_resources,
            ]
        ).where(
            and_(
                ContentNodeTable.c.level == 0,
                ContentNodeTable.c.channel_id == channel_id,
            )
        )
    ).fetchone()

    stats[root_node_stats[0]] = {
        "coach_content": root_node_stats[1],
        "num_coach_contents": root_node_stats[2],
        "total_resources": root_node_stats[3],
    }

    # rollback the transaction to undo the temporary annotation
    trans.rollback()

    bridge.end()

    return stats
예제 #50
0
 def actual_amount(self):
     return (select([func.sum(SalesOrderLine.unit_price * SalesOrderLine.quantity)])
             .where(self.id == SalesOrderLine.sales_order_id).label('actual_amount'))
예제 #51
0
 def original_amount(self):
     return (select([func.sum(SalesOrderLine.original_amount)])
             .where(self.id == SalesOrderLine.sales_order_id)
             .label('original_amount'))
예제 #52
0
    def get(self):
        ''''''
        parser = RequestParser(trim=True)
        parser.add_argument('page', type=int, default=DEFAULT_PAGE)
        parser.add_argument('pageSize', type=int, default=DEFAULT_PAGE_SIZE)
        parser.add_argument('timeLower', type=int)
        parser.add_argument('timeUpper', type=int)
        args = parser.parse_args(strict=True)

        result_BlastBets_set = set()
        result_BlastBets_set.add(BlastBets.state == 2)
        result_BlastBetsCredit_set = set()
        result_BlastBetsCredit_set.add(BlastBetsCredit.state == 2)
        result_EntertainmentCityBetsDetail_set = set()
        result_EntertainmentCityBetsDetail_set.add(
            EntertainmentCityBetsDetail.Flag == 1)

        if args['timeLower']:
            result_BlastBets_set.add(BlastBets.actionTime >= args['timeLower'])
            result_BlastBetsCredit_set.add(
                BlastBetsCredit.betTime >= args['timeLower'])
            result_EntertainmentCityBetsDetail_set.add(
                EntertainmentCityBetsDetail.BetTime >= args['timeLower'])
        if args['timeUpper']:
            result_BlastBets_set.add(
                BlastBets.actionTime <= args['timeUpper'] + SECONDS_PER_DAY)
            result_BlastBetsCredit_set.add(
                BlastBetsCredit.betTime <= args['timeUpper'] + SECONDS_PER_DAY)
            result_EntertainmentCityBetsDetail_set.add(
                EntertainmentCityBetsDetail.BetTime <= args['timeUpper'] +
                SECONDS_PER_DAY)
        '''查询blast_bet表'''
        result_BlastBets = db.session.query(
            BlastBets.username.label('username'),
            func.sum(BlastBets.mode * BlastBets.beiShu *
                     BlastBets.actionNum).label('betAmount'),
            BlastBets.state.label('state')).group_by(
                BlastBets.username).filter(*result_BlastBets_set).subquery()
        '''查询tb_bets_credit表'''
        result_BlastBetsCredit = db.session.query(
            BlastBetsCredit.memberUsername.label('username'),
            func.sum(BlastBetsCredit.betAmount).label('betAmount'),
            BlastBetsCredit.state.label('state')).group_by(
                BlastBetsCredit.memberUsername).filter(
                    *result_BlastBetsCredit_set).subquery()
        '''查询tb_entertainment_city_bets_detail表'''
        result_EntertainmentCityBetsDetail = db.session.query(
            EntertainmentCityBetsDetail.PlayerName.label('username'),
            EntertainmentCityBetsDetail.ECCode.label('ECCode'),
            EntertainmentCityBetsDetail.childType.label('childType'),
            func.sum(EntertainmentCityBetsDetail.BetAmount).label('betAmount'),
            EntertainmentCityBetsDetail.Flag.label('state'),
        ).group_by(
            EntertainmentCityBetsDetail.PlayerName,
            EntertainmentCityBetsDetail.ECCode,
            EntertainmentCityBetsDetail.childType,
        ).filter(*result_EntertainmentCityBetsDetail_set).all()
        '''blast_bet和tb_bets_credit组合查询'''
        result_BB_left_l = db.session.query(
            result_BlastBets.c.username.label('result_BlastBets_username'),
            result_BlastBets.c.betAmount.label('result_BlastBets_betAmount'),
            result_BlastBets.c.state.label('result_BlastBets_state'),
            result_BlastBetsCredit.c.username.label(
                'result_BlastBetsCredit_username'),
            result_BlastBetsCredit.c.betAmount.label(
                'result_BlastBetsCredit_betAmount'),
            result_BlastBetsCredit.c.state.label(
                'result_BlastBetsCredit_state'),
        )
        result_BB_left_l = result_BB_left_l.outerjoin(
            result_BlastBetsCredit,
            result_BlastBetsCredit.c.username == result_BlastBets.c.username)

        result_BBC_right_l = db.session.query(
            result_BlastBets.c.username.label('result_BlastBets_username'),
            result_BlastBets.c.betAmount.label('result_BlastBets_betAmount'),
            result_BlastBets.c.state.label('result_BlastBets_state'),
            result_BlastBetsCredit.c.username.label(
                'result_BlastBetsCredit_username'),
            result_BlastBetsCredit.c.betAmount.label(
                'result_BlastBetsCredit_betAmount'),
            result_BlastBetsCredit.c.state.label(
                'result_BlastBetsCredit_state'))
        result_BBC_right_l = result_BBC_right_l.outerjoin(
            result_BlastBets,
            result_BlastBets.c.username == result_BlastBetsCredit.c.username)

        result_all_1 = union(result_BB_left_l, result_BBC_right_l)
        # a = execute(result_all_1)
        # print(a)
        user_alias = aliased(result_all_1, name='user_alias')
        user_alias = db.session.query(user_alias).order_by().all()
예제 #53
0
def create(tallySheetId):
    tallySheet, tallySheetVersion = TallySheet.create_latest_version(
        tallySheetId=tallySheetId, tallySheetCode=TallySheetCodeEnum.PRE_34_PD)

    query = db.session.query(
        Election.Model.electionId,
        Area.Model.areaId,
        ElectionCandidate.Model.candidateId,
        func.sum(
            func.IF(TallySheetVersionRow_PRE_30_PD.Model.count == None, 0,
                    TallySheetVersionRow_PRE_30_PD.Model.count)).label(
                        "firstPreferenceCount"),
        func.sum(
            func.IF(
                and_(TallySheetVersionRow_PRE_34_preference.Model.
                     preferenceNumber == 2),
                TallySheetVersionRow_PRE_34_preference.Model.preferenceCount,
                0)).label("secondPreferenceCount"),
        func.sum(
            func.IF(
                and_(TallySheetVersionRow_PRE_34_preference.Model.
                     preferenceNumber == 3),
                TallySheetVersionRow_PRE_34_preference.Model.preferenceCount,
                0)).label("thirdPreferenceCount"),
    ).join(
        Submission.Model, Submission.Model.areaId == Area.Model.areaId).join(
            Election.Model,
            Election.Model.electionId == Area.Model.electionId).join(
                ElectionCandidate.Model,
                or_(
                    ElectionCandidate.Model.electionId ==
                    Election.Model.electionId,
                    ElectionCandidate.Model.electionId ==
                    Election.Model.parentElectionId)).join(
                        TallySheet.Model,
                        and_(
                            TallySheet.Model.tallySheetId ==
                            Submission.Model.submissionId,
                            TallySheet.Model.tallySheetCode.in_([
                                TallySheetCodeEnum.PRE_30_PD,
                                TallySheetCodeEnum.PRE_34_I_RO
                            ]))
                    ).join(
                        TallySheetVersionRow_PRE_30_PD.Model,
                        and_(
                            TallySheetVersionRow_PRE_30_PD.Model.
                            tallySheetVersionId == Submission.Model.
                            lockedVersionId,
                            TallySheetVersionRow_PRE_30_PD.Model.candidateId ==
                            ElectionCandidate.Model.candidateId),
                        isouter=True).join(
                            TallySheetVersionRow_PRE_34_preference.Model,
                            and_(
                                TallySheetVersionRow_PRE_34_preference.Model.
                                tallySheetVersionId ==
                                Submission.Model.lockedVersionId,
                                TallySheetVersionRow_PRE_34_preference.Model.
                                candidateId ==
                                ElectionCandidate.Model.candidateId),
                            isouter=True).filter(
                                Area.Model.areaId ==
                                tallySheet.submission.areaId,
                                ElectionCandidate.Model.qualifiedForPreferences
                                == True).group_by(
                                    ElectionCandidate.Model.candidateId,
                                    Submission.Model.areaId).order_by(
                                        ElectionCandidate.Model.candidateId,
                                        Submission.Model.areaId).all()

    is_complete = True  # TODO:Change other reports to validate like this
    for row in query:
        if row.candidateId is not None and row.firstPreferenceCount is not None and row.secondPreferenceCount is not None and row.thirdPreferenceCount is not None:
            tallySheetVersion.add_row(electionId=row.electionId,
                                      candidateId=row.candidateId,
                                      preferenceNumber=1,
                                      preferenceCount=row.firstPreferenceCount)
            tallySheetVersion.add_row(
                electionId=row.electionId,
                candidateId=row.candidateId,
                preferenceNumber=2,
                preferenceCount=row.secondPreferenceCount)
            tallySheetVersion.add_row(electionId=row.electionId,
                                      candidateId=row.candidateId,
                                      preferenceNumber=3,
                                      preferenceCount=row.thirdPreferenceCount)
        else:
            is_complete = False

    if is_complete:
        tallySheetVersion.set_complete()

    db.session.commit()

    return TallySheetVersionSchema().dump(tallySheetVersion).data
예제 #54
0
#connect to DB
engine = create_engine("sqlite:///census.sqlite")
connection = engine.connect()

#set up metadata
metadata = MetaData()

#get the table (there is only one in census.sqlite)
census = Table('census', metadata, autoload=True, autoload_with=engine)

#A pandas DataFrame can take a SQLAlchemy ResultSet as an argument
#but you will need to supply the column names separately
#let's take a query we did in census3.py
stmt = select([
    census.columns.sex,
    func.sum(census.columns.pop2008).label('pop2008_sum')
])
stmt = stmt.group_by(census.columns.sex)
results = connection.execute(stmt).fetchall()

#now we just use the ResultSet as the argument for pandas.DataFrame()
df = pd.DataFrame(results)
print(df)

#NOTE: pandas won't automatically import the column names at this time, so you must do it manually
#usually with just the keys of the first result, that's the easiest way to ensure all columns are met
df.columns = results[0].keys()
print(df)

df.plot.bar()
plt.xticks(df.index, df.sex)
예제 #55
0
 def aggregate(cls, value):
     return {
         'weight': func.sum(cls.weight),
         'value': func.sum(cls.value),
     }[value]
예제 #56
0
파일: reserve.py 프로젝트: gkoller/SuPA
    def _port_resources_in_use(
            self, session: orm.Session) -> Dict[str, PortResources]:
        """Calculate port resources in use for active reservations that overlap with ours.

        Active reservations being those that:

        - are currently being held
        - have been committed and not yet been terminated.

        Overlap as in: their start times and end times overlap with ours.

        The bandwidth in use is calculated per port.
        Eg, if a port is used in two active reservations,
        (one reservation for a connection with a bandwidth of 100 Mbps
        and another with a bandwidth of 400 Mbps)
        the bandwidth in use for the port will be:
        100 + 400 = 500 Mbps.

        Similarly for the VLANs in use.
        Given the same port used in two active reservations
        (one reservation where the port has a VLAN of 100
        and another one where the port has a VLAN of 105),
        the VLANs in use for the port will be:
        VlanRanges([100, 105])

        Args:
            session: A SQLAlchemy session to construct and run the DB query

        Returns:
            A dict mapping port (names) to their port resources.

        """
        # To calculate the active overlapping reservation we need to perform a self-join.
        # One part of the join is for our (current) reservation.
        # The other part is for joining the overlapping ones with our (current) reservation.
        CurrentReservation = aliased(Reservation, name="cr")
        overlap_active = (
            # The other part
            session.query(Reservation).join((
                CurrentReservation,
                # Do they overlap?
                and_(
                    CurrentReservation.start_time < Reservation.end_time,
                    CurrentReservation.end_time > Reservation.start_time,
                ),
            )).filter(
                # Only select active reservations
                or_(
                    and_(
                        Reservation.reservation_state ==
                        ReservationStateMachine.ReserveStart.name,
                        Reservation.provisioning_state.isnot(None),
                        Reservation.lifecycle_state ==
                        LifecycleStateMachine.Created.name,
                    ),
                    Reservation.reservation_state ==
                    ReservationStateMachine.ReserveHeld.name,
                ))
            # And only those that overlap with our reservation.
            .filter(CurrentReservation.connection_id == self.connection_id
                    )).subquery()
        OverlappingActiveReservation = aliased(Reservation,
                                               overlap_active,
                                               name="oar")

        # To map ports to resources (bandwidth and vlan) in use
        # we need to unpivot the two pair of port columns from the reservations table into separate rows.
        # Eg, from:
        #
        # row 1:  connection_id, ..., src_port, src_selected_vlan, dst_port, .dst_selected_vlan ..
        #
        # to:
        #
        # row 1: connection_id, port, vlan  <-- former src_port, src_selected_vlan
        # row 2: connection_id, port, vlan  <-- former dst_port, dst_selected_vlan
        src_port = session.query(
            Reservation.connection_id.label("connection_id"),
            Reservation.src_port.label("port"),
            Reservation.src_selected_vlan.label("vlan"),
        )
        dst_port = session.query(
            Reservation.connection_id,
            Reservation.dst_port.label("port"),
            Reservation.dst_selected_vlan.label("vlan"),
        )
        ports = src_port.union(dst_port).subquery()

        # With the 'hard' work done for us in two subqueries,
        # calculating the port resources (bandwidth, VLANs) in use is now relatively straightforward.
        port_resources_in_use = (
            session.query(
                ports.c.port,
                func.sum(
                    OverlappingActiveReservation.bandwidth).label("bandwidth"),
                func.group_concat(ports.c.vlan,
                                  ",").label("vlans"),  # yes, plural!
            ).select_from(OverlappingActiveReservation).join(
                ports, OverlappingActiveReservation.connection_id ==
                ports.c.connection_id).filter(
                    ports.c.port.in_((
                        OverlappingActiveReservation.src_port,
                        OverlappingActiveReservation.dst_port,
                    ))).group_by(ports.c.port).all())

        return {
            rec.port: PortResources(bandwidth=rec.bandwidth,
                                    vlans=VlanRanges(rec.vlans))
            for rec in port_resources_in_use
        }
예제 #57
0
 def get_category_type_items_quantity(cls, categoryTypeID):
     return db.session.query(func.sum(
         cls.quantity).label("summ")).filter_by(
             categoryTypeID=categoryTypeID).first().summ
예제 #58
0
def return_industry_and_its_total_EV():
    #alpha order indusrty list and gets its EV
    return session.query(Company.industry, func.sum(
        Company.enterprise_value)).group_by(Company.industry).all()
 def get_total_pay(restuarant_id):
     num = db.session.query(func.sum(Order.pay_price)).filter(
         Order.rid == restuarant_id, Order.status == 1).scalar()
     return num
예제 #60
0
# * roster:
#     - TEAGE - respondent age
#     - TESEX - respondent sex

activity = tables['activity']
print(activity.columns)

# We can use Python instead of SQL code to build up a query, and it will
# automatically be translated to match the SQL dialect used by the database
# we're querying.

from sqlalchemy import select, literal_column, func

sleep_sums_q = select([
    activity.c.TRCODE,
    func.sum(activity.c.TUACTDUR24).label('SLEEP')
]). \
    where(activity.c.TRCODE == 10101). \
    group_by(activity.c.TUCASEID)

print(sleep_sums_q)

# To query the database, we have to set up a connection.

conn = engine.connect()

# Now we can execute our query; the result is an iterator over rows.

result = conn.execute(sleep_sums_q)

for row in result: