コード例 #1
0
ファイル: views.py プロジェクト: ruipacheco/fruitshow
def archive(year=0, month=0):
    """
    Shows archived threads, meaning all threads sorted by year and month.
    If no year is passed, then a list of all the years for which we have archived topics is displayed.
    If a year is passed, a list of all the months for which there are archived topics is displayed.
    If a month is passed, we show all archived topics for that month.

    @todo Need to pass the timezone to the extract() function.

    :param year:
    :type year: int
    :param month:
    :type month: int
    """
    if year > 0 and month > 0:
        elements = Topic.query.filter(func.extract('YEAR', Topic.date_created) == year,
                                      func.extract('MONTH', Topic.date_created) == month).all()
    else:
        if year > 0 and month == 0:
            results = db.session.query(distinct(func.extract('MONTH', Topic.date_created))) \
                .filter(func.extract('YEAR', Topic.date_created) == year)
        if year == 0 and month == 0:
            results = db.session.query(distinct(func.extract('YEAR', Topic.date_created)))
        elements = []
        for result in results.all():
            elements.append(int(result[0]))
    return render_template('archive.html', elements=elements, year=year, month=month)
コード例 #2
0
    def get(self):
        """
        Help find the daily count of vouchers created within a range of month
        """
        args = monthQuery_parser.parse_args()
        authentication_header_parser(args['Authorization'])

        str_year = args['year']
        try:
            year = int(str_year)
        except ValueError:
            return '', http.client.BAD_REQUEST

        result = {}

        if year < 2020:
            return '', http.client.BAD_REQUEST

        for month in range(1, 13):
            vouchers = (db.session.query(func.count(VoucherModel.id)).filter(
                func.extract('year', VoucherModel.timestamp) == year).filter(
                    func.extract('month', VoucherModel.timestamp) ==
                    month).all())

            result[f'{month}'] = vouchers[0][0]

        return result
コード例 #3
0
def precipitations():
    # Create our session (link) from Python to the DB
    session = Session(engine)
    """Return a list of past 12 months precip"""
    # Query the precip for the past 12 months

    result = session.query(func.max(Measurement.date)).one()
    for row in result:
        start_date = row

    sub_months = dt.datetime.strptime(start_date,
                                      '%Y-%m-%d') + relativedelta(days=-366)

    last_12 = pd.read_sql(session.query(Measurement.date,Measurement.prcp,\
        func.extract('year',Measurement.date).label('year'),\
            func.extract('week',Measurement.date).label('week')).\
                filter(Measurement.date >= sub_months).\
                    order_by(Measurement.date.asc()).statement,engine)

    session.close()

    # Convert list of tuples into normal list
    all_precip = list(np.ravel(last_12))

    return jsonify(all_precip)
コード例 #4
0
ファイル: test_lesson_rules.py プロジェクト: zhmkof/Dryvo
def test_init_hours(student, teacher, meetup, dropoff):
    # TODO improve test logic
    initial_hours = LessonRule.hours
    date = datetime.utcnow().replace(hour=6, minute=0) + timedelta(days=2)
    Appointment.create(
        teacher=teacher,
        student=student,
        creator=teacher.user,
        duration=teacher.lesson_duration,
        date=date,
        meetup_place=meetup,
        dropoff_place=dropoff,
        is_approved=True,
    )
    query = Appointment.query.filter(
        func.extract("day", Appointment.date) == date.day).filter(
            func.extract("month", Appointment.date) == date.month)
    new_hours = LessonRule.init_hours(
        date,
        student,
        teacher.work_hours_for_date(date),
        teacher.taken_appointments_tuples(query, only_approved=True),
    )
    assert initial_hours != new_hours
    # we want to fill the gap after 6, so hours 7 and 8 should be really cold
    hour_8 = new_hours[1].score
    hour_7 = new_hours[0].score
    old_hour_7 = initial_hours[0].score
    old_hour_8 = initial_hours[1].score
    assert hour_7 < old_hour_7
    assert hour_8 < old_hour_8
コード例 #5
0
    def birth_time(cls):
        hour = cast(func.extract("hour", cls.birth_datetime), String)
        minute = cast(func.extract("minute", cls.birth_datetime), String)

        hour = case([(func.length(hour) == 1, "0" + hour)], else_=hour)
        minute = case([(func.length(minute) == 1, "0" + minute)], else_=minute)
        return hour + ":" + minute
コード例 #6
0
 def round_time(time_expr, ceil=False):
     round_func = func.ceil if ceil else func.trunc
     step_epoch = func.extract('epoch', literal_column('arg_step'))
     return cast(
         func.to_timestamp(
             round_func(func.extract('epoch', time_expr) / step_epoch) *
             step_epoch), timestamptz)
コード例 #7
0
def dbtest(bot, update, args, chat_data):
    chat_id = update.message.chat_id
    unix_time = int(time.time())
    t = datetime.date.today()
    print(type(t.year))

    try:
        year = args[0]
        month = args[1]
        if not (year.isdigit() and month.isdigit()):
            update.message.reply_text('Invalid Month')
            return
        else:
            #req_date = datetime.datetime(int(year), int(month), 1)
            req_date = datetime.date.today()
            #print(req_date, req_date.year, req_date.month)
            query_result = session.query(MiningHistory) \
                .filter(func.extract('year', MiningHistory.timestamp) == req_date.year) \
                .filter(func.extract('month', MiningHistory.timestamp) == req_date.month)
            #print(query_result)
            msg = 'Requested Date : {}-{}\n'.format(year, month)
            msg = 'Time, MinedCoin, MinedAmount, BTC\n'
            total_amount = 0.0
            total_amount_btc = 0.0
            for query in query_result:
                total_amount += float(query.amount)
                total_amount += float(query.amount_btc)
                msg += '{}, {}, {}, {}\n'.format(query.timestamp,
                                                 query.currency, query.amount,
                                                 query.amount_btc)
            msg += 'Averaged mined coin = {}, {}\n'.format(
                total_amount / float(req_date.day),
                total_amount_btc / float(req_date.day))

            update.message.reply_text(msg)

    except (IndexError):
        today = datetime.date.today()
        query_result = session.query(MiningHistory) \
            .filter(func.extract('year', MiningHistory.timestamp) == today.year) \
            .filter(func.extract('month', MiningHistory.timestamp) == today.month)

        total_amount = 0.0
        total_amount_btc = 0.0
        msg = 'Requested Date : {}-{}\n'.format(today.year, today.month)
        msg += 'Time, MinedCoin, MinedAmount, BTC\n'
        for query in query_result:
            total_amount += float(query.amount)
            total_amount_btc += float(query.amount_btc)
            msg += '{}, {}, {:.6f}, {:.6f}\n'.format(query.timestamp,
                                                     query.currency,
                                                     query.amount,
                                                     query.amount_btc)

        msg += '\nDayily Averaged = {:.6f}, {:.6f}\n'.format(
            total_amount / float(today.day),
            total_amount_btc / float(today.day))

        update.message.reply_text(msg)
コード例 #8
0
def csv_update_backlog(session, csvdate):
    query = session.query(
        func.count(Data.vaers_id).label('events'),
        func.extract('month', Data.recvdate).label('month'))
    query = query.filter(Data.csvdate == csvdate)
    query = query.group_by(func.extract('month', Data.recvdate))
    query = query.order_by('month')
    return query
コード例 #9
0
def generate_daily_reports(date):
    # Need to pass app context around because of how flask works
    # can take a single argument date as follows
    # flask generate_daily_reports --date 2017/01/31 will compute the billings for jan 2017, up to the 31st day of
    # January

    try:
        timeend = datetime.strptime(date, '%Y/%m/%d').replace(tzinfo=pytz.UTC).replace(minute=0, second=0, hour=0, microsecond=0)
    except:
        timeend = datetime.utcnow().replace(tzinfo=pytz.UTC).replace(minute=0, second=0, hour=0, microsecond=0)


    # HANDLE CLOSING OUT BILLINGS at end of month
    if timeend.day == 1:
        projects = get_projects_list()
        for project in projects:
            bill = Billing.query().filter(func.extract('month', Billing.end_date) == getLastMonth(timeend.month)) \
                .filter(Billing.closed_out is False).filter(Billing.project == project).first()
            if bill:
                bill.update(end_date=timeend, closed_out=True)


    monthstart = timeend.replace(day=1)
    projects = get_projects_list()
    seconds_into_month = (timeend-monthstart).total_seconds()
    daysinmonth = calendar.monthrange(timeend.year, timeend.month)[1]
    portion_of_month = Decimal(seconds_into_month)/Decimal(daysinmonth*3600*24)

    for project in projects:
        print(project)
        file_size = get_previous_file_sizes(monthstart, project=project)
        this_months_files = get_months_uploads(project, monthstart, timeend)
        compute_cost_search =  make_search_filter_query(monthstart,timeend,project)
        compute_costs = get_compute_costs(compute_cost_search)
        analysis_compute_json = create_analysis_costs_json(compute_cost_search['hits']['hits'], monthstart, timeend)

        all_proj_files = get_previous_file_sizes(timeend, project)['hits']['hits']
        analysis_storage_json = create_storage_costs_json(all_proj_files, monthstart, timeend, daysinmonth*3600*24)
        storage_costs = get_storage_costs( file_size, portion_of_month,
                                            this_months_files, timeend, daysinmonth*3600*24)


        bill = Billing.query().filter(Billing.project == project).filter(func.extract('month', Billing.start_date) == monthstart.month).first()
        itemized_costs = {
            "itemized_compute_costs": analysis_compute_json,
            "itemized_storage_costs": analysis_storage_json
        }
        try:
            if bill:
                bill.update(compute_cost=compute_costs, storage_cost=storage_costs, end_date=timeend,
                            cost_by_analysis=itemized_costs)
            else:
                Billing.create(compute_cost=compute_costs, storage_cost=storage_costs, start_date=monthstart, \
                                end_date=timeend, project=project, closed_out=False,
                                cost_by_analysis=itemized_costs)
        except:
            print("IT'S GONE FAR SOUTH")
コード例 #10
0
 def __init__(self, date, student, hours, places):
     super().__init__(date, student, hours)
     self.meetup_place_id = places[0]
     self.dropoff_place_id = places[1]
     self.today_lessons = self.student.teacher.lessons.filter(
         Appointment.approved_filter(
             func.extract("day", Appointment.date) == self.date.day,
             func.extract("month", Appointment.date) == self.date.month,
         )).all()
コード例 #11
0
ファイル: birthday.py プロジェクト: NoOne-dev/RobotEgg
 def _check_today(self):
     date = datetime.datetime.now().date()
     try:
         return session.query(Birthday_Table)\
             .filter(func.extract('day', Birthday_Table.birthday) == date.day)\
             .filter(func.extract('month', Birthday_Table.birthday) == date.month)\
             .all()
     except Exception as e:
         print(e)
         return False
コード例 #12
0
    def serialize_participant_phones(self, participant_set, zip_file):
        query = participant_set.participants.join(PhoneContact).with_entities(
            cast(Participant.uuid, String), PhoneContact.number,
            func.extract('epoch', PhoneContact.created),
            func.extract('epoch', PhoneContact.updated), PhoneContact.verified)

        with zip_file.open('phone-contacts.ndjson', 'w') as f:
            for record in query:
                line = f'{json.dumps(record)}\n'
                f.write(line.encode('utf-8'))
コード例 #13
0
ファイル: models.py プロジェクト: ddkaty/flask-blog
 def filter_by_date(self, year=None, month=None, day=None):
     """Return posts by specified date."""
     terms = []
     if year:
         terms.append(func.extract('year', Post.published) == year)
     if month:
         terms.append(func.extract('month', Post.published) == month)
     if day:
         terms.append(func.extract('day', Post.published) == day)
     return Post.query.filter(and_(*terms))
コード例 #14
0
ファイル: models.py プロジェクト: ddkaty/flask-blog
 def date_archive(self):
     """Return archive where every post grouped by year and month."""
     year = func.extract('year', Post.published)
     month = func.extract('month', Post.published)
     count = func.count()
     return db.session.query(year.label('year'),
                             month.label('month'),
                             count.label('entries')) \
                      .order_by(year.desc(), month.desc()) \
                      .group_by(year, month) \
                      .all()
コード例 #15
0
ファイル: test_lesson_rules.py プロジェクト: zhmkof/Dryvo
def hours(student, teacher):
    date = datetime.utcnow() + timedelta(days=2)
    query = Appointment.query.filter(
        func.extract("day", Appointment.date) == date.day).filter(
            func.extract("month", Appointment.date) == date.month)
    return LessonRule.init_hours(
        date,
        student,
        teacher.work_hours_for_date(date),
        teacher.taken_appointments_tuples(query, only_approved=True),
    )
コード例 #16
0
def show_kintai_history_csv(message, time=None):
    """指定した月の勤怠記録をCSV形式で返す

    :param message: slackbotの各種パラメータを保持したclass
    :param str time: `/` 区切りの年月(例: 2016/1)
    """
    user_id = message.body['user']
    if time:
        year_str, month_str = time.split('/')
    else:
        now = datetime.datetime.now()
        year_str, month_str = now.strftime('%Y'), now.strftime('%m')
    year, month = int(year_str), int(month_str)

    if not 1 <= month <= 12:
        message.send('指定した対象月は存在しません')
        return

    s = Session()
    qs = (s.query(KintaiHistory).filter(
        KintaiHistory.user_id == user_id).filter(
            func.extract('year', KintaiHistory.registered_at) == year).filter(
                func.extract('month', KintaiHistory.registered_at) == month))

    kintai = defaultdict(list)
    for q in qs:
        registered_at = q.registered_at.strftime('%Y-%m-%d')
        kintai[registered_at].append(
            (q.is_workon, '{:%I:%M:%S}'.format(q.registered_at)))

    rows = []
    for day in range(1, monthrange(year, month)[1] + 1):
        aligin_date = '{}-{:02d}-{:02d}'.format(year, month, day)
        workon, workoff = '', ''
        for d in sorted(kintai[aligin_date]):
            if d[0]:
                workon = d[1]
            else:
                workoff = d[1]
        rows.append([aligin_date, workon, workoff])

    output = StringIO()
    w = csv.writer(output)
    w.writerows(rows)

    param = {
        'token': settings.API_TOKEN,
        'channels': message.body['channel'],
        'title': '勤怠記録'
    }
    requests.post(settings.FILE_UPLOAD_URL,
                  params=param,
                  files={'file': output.getvalue()})
コード例 #17
0
ファイル: dal.py プロジェクト: enkore/potstats2
def daily_statistics_agg(session):
    """
    Aggregate statistics for each day in each year.

    Result columns:
    - day_of_year, year
    - bid
    - post_count, edit_count, posts_length, threads_created, active_users
    - active_threads: list of dicts of the most active threads (w.r.t. post count) of the day.
      Each dict consists of json_thread_columns (tid, [sub]title) plus "thread_post_count".
    """
    year = func.extract('year', Post.timestamp).label('year')
    cte = aggregate_stats_segregated_by_time(session, func.extract('doy', Post.timestamp), 'day_of_year').subquery()

    json_thread_columns = (Thread.tid, Thread.title, Thread.subtitle)

    threads_active_during_time = (
        session
            .query(*json_thread_columns,
                   func.count(Post.pid).label('thread_post_count'),
                   func.extract('doy', Post.timestamp).label('doy'),
                   year,
                   Thread.bid,
                   func.row_number().over(
                       partition_by=tuple_(year, Thread.bid, func.extract('doy', Post.timestamp)),
                       order_by=tuple_(desc(func.count(Post.pid)), Thread.tid)
                   ).label('rank'))
            .join(Post.thread)
            .group_by(*json_thread_columns, 'doy', Thread.bid, year)
        ).subquery('tadt')

    active_threads = (
        session
        .query(threads_active_during_time.c.doy,
               threads_active_during_time.c.year,
               threads_active_during_time.c.bid,
               func.json_agg(column('tadt')).label('active_threads'))
        .select_from(threads_active_during_time)
        .filter(threads_active_during_time.c.rank <= 5)
        .group_by('doy', 'bid', 'year')
        .subquery()
    )

    return (
        session
        .query(
            *cte.c,
            active_threads.c.active_threads)
        .join(active_threads, and_(active_threads.c.doy == cte.c.day_of_year,
                                   active_threads.c.year == cte.c.year,
                                   active_threads.c.bid == cte.c.bid))
    )
コード例 #18
0
    def run_query(
        self, 
        operator_col, 
        activity_name, 
        athlete_id, 
        start_date, 
        end_date, 
        group_by):

        year_extract  = func.extract("year" , Activity.date).label('year')
        month_extract = func.extract("month", Activity.date).label('month')
        day_extract   = func.extract("day"  , Activity.date).label('day')

        #if a group by has been provided for period, perform groupby
        if group_by is not None:
            if (group_by == "year"):
                all_things = database.session\
                    .query(operator_col, year_extract)\
                    .group_by(year_extract)

            elif (group_by == "month"):
                all_things = database.session\
                    .query(operator_col, year_extract, month_extract)\
                    .group_by(year_extract, month_extract)

            elif (group_by == "day"):
                all_things = database.session\
                    .query(operator_col, year_extract, month_extract, day_extract)\
                    .group_by(year_extract, month_extract, day_extract)

            else:
                raise "Incorrect group_by passed to StatisticsEngine"
        else:
            all_things = database.session.query(operator_col)

        # if an activity has been provided, filter by activity
        if activity_name is not None:
            all_things = all_things.filter(Activity.type == activity_name)

        # if an athlete_id has been provided, filter by athlete_id
        if athlete_id is not None:
            all_things = all_things.filter(Activity.athlete_id == athlete_id)

        # if a starting date has been provided, filter dates earlier than start
        if start_date is not None:
            all_things = all_things.filter(Activity.date >= start_date)

        # if an ending date has been provided, filter dates later than end        
        if end_date is not None:
            all_things = all_things.filter(Activity.date <= end_date)
        
        return all_things.all()
コード例 #19
0
ファイル: callbacks.py プロジェクト: AdaptationSongs/earth-hz
 def update_graph(query, selected_label_id, min_prob, max_prob, start_hour,
                  end_hour, selected_iteration_id):
     try:
         if selected_label_id == None:
             raise
         project_id = get_project_id(query)
         with dashapp.server.app_context():
             per_file = db.session.query(
                 ModelOutput.file_name,
                 func.count(ModelOutput.id).label('count')).filter(
                     ModelOutput.iteration_id == selected_iteration_id,
                     ModelOutput.label_id == selected_label_id,
                     ModelOutput.probability >= float(min_prob),
                     ModelOutput.probability <= float(max_prob)).group_by(
                         ModelOutput.file_name).subquery()
             per_day = db.session.query(
                 func.min(MonitoringStation.id).label('station_id'),
                 MonitoringStation.name.label('station'),
                 func.sum(per_file.columns.count).label('count'),
                 func.date(AudioFile.timestamp).label('date')).join(
                     AudioFile, per_file.columns.file_name == AudioFile.name
                 ).join(AudioFile.monitoring_station).filter(
                     MonitoringStation.project_id == project_id,
                     func.extract('hour', AudioFile.timestamp) >=
                     start_hour,
                     func.extract('hour', AudioFile.timestamp) <= end_hour,
                 ).group_by('station').group_by('date').order_by('date')
             df = pd.read_sql(per_day.statement, db.session.bind)
             station_colors = get_station_colors(project_id)
             label = Label.query.get(selected_label_id)
         fig = px.line(
             df,
             x='date',
             y='count',
             custom_data=['station_id', 'station'],
             color='station',
             color_discrete_map=station_colors,
             title='Daily count for {label} {start}:00-{end}:59'.format(
                 label=label, start=start_hour, end=end_hour))
         fig.update_traces(mode='markers')
     except:
         fig = go.Figure()
         fig.update_layout(title='Daily count',
                           annotations=[{
                               'text': 'Please select a label',
                               'showarrow': False,
                               'font': {
                                   'size': 28
                               }
                           }])
     return fig
コード例 #20
0
    def available_hours(
        self,
        requested_date: datetime,
        student: "Student" = None,
        duration: int = None,
        only_approved: bool = False,
        places: Tuple[Optional[str]] = (None, None),
    ) -> Iterable[Tuple[datetime, datetime]]:
        """
        1. calculate available hours - decrease existing lessons times from work hours
        2. calculate lesson hours from available hours by default lesson duration
        MUST BE 24-hour format. 09:00, not 9:00
        """
        if not requested_date:
            return []

        todays_appointments = self.appointments.filter(
            func.extract("day", Appointment.date) == requested_date.day
        ).filter(
            func.extract("month", Appointment.date) == requested_date.month)
        work_hours = self.work_hours_for_date(requested_date, student=student)
        taken_appointments = self.taken_appointments_tuples(
            todays_appointments, only_approved)
        blacklist_hours = {"start_hour": set(), "end_hour": set()}
        if student and work_hours:
            approved_taken_appointments = self.taken_appointments_tuples(
                todays_appointments, only_approved=True)
            hours = LessonRule.init_hours(requested_date, student, work_hours,
                                          approved_taken_appointments)
            for rule_class in rules_registry:
                rule_instance: LessonRule = rule_class(requested_date, student,
                                                       hours, places)
                blacklisted = rule_instance.blacklisted()
                for key in blacklist_hours.keys():
                    blacklist_hours[key].update(blacklisted[key])

        work_hours.sort(key=lambda x: x.from_hour)  # sort from early to late
        for slot in work_hours:
            hours = (
                requested_date.replace(hour=slot.from_hour,
                                       minute=slot.from_minutes),
                requested_date.replace(hour=slot.to_hour,
                                       minute=slot.to_minutes),
            )
            yield from get_slots(
                hours,
                taken_appointments,
                timedelta(minutes=duration or self.lesson_duration),
                force_future=True,
                blacklist=blacklist_hours,
            )
コード例 #21
0
ファイル: views.py プロジェクト: millsja/Klompenflasken
def getAOTByCreator(userId, days):

	# stringify our date into the form "YYYY-MM-DD"
	# because this is the most logical way to look
	# at dates...
	def strDate( date ):
		return str(date.year) + "-" + \
			str(date.month) + "-" +\
			str(date.day)

	# search awards for str date, and set corresponding
	# count to the new count value
	def addAtDate( awards, strDate, count ):
		for q in awards:
			if q['day'] == strDate:
				q['count'] = count
				break	
		return awards
	

	# pull our by-date data from the database
	query = session.query( award.creatorID, 
		award.awardDate,
		func.count(award.id).label('count')).\
		filter( award.creatorID == userId ).\
		group_by( award.creatorID, award.awardDate,
			func.extract('year', award.awardDate), 
			func.extract('month', award.awardDate), 
			func.extract('day', award.awardDate)).\
		order_by( desc(award.awardDate) )

	# now add a date entry for each day from today
	# back to start - days i.e. only go back 7, 30, 90
	# days and make sure that even if there were no
	# awards for a day that there is still an entry
	awards = []
	startDate = datetime.today() - timedelta(days=days)
	endDate = datetime.now()
	while endDate >= startDate:
		awards.append( { "day" : strDate(endDate),
				 "count" : 0 } )
		endDate = endDate - timedelta(days=1)

	for q in query:
		errorPrint(str(q[1]) + " processed...")
		if q[1] < startDate:
			break
		awards = addAtDate( awards, strDate(q[1]), q[2])

	return awards
コード例 #22
0
def get_user_activity(session):
    """Create a plot showing the user statistics."""
    # Create a subquery to ensure that the user fired a inline query
    # Group the new users by date
    creation_date = cast(User.created_at, Date).label('creation_date')
    all_users_subquery = session.query(creation_date, func.count(User.id).label('count')) \
        .filter(User.inline_queries.any()) \
        .group_by(creation_date) \
        .subquery()

    # Create a running window which sums all users up to this point for the current millennium ;P
    all_users = session.query(
        all_users_subquery.c.creation_date,
        cast(func.sum(all_users_subquery.c.count).over(
            partition_by=func.extract('millennium', all_users_subquery.c.creation_date),
            order_by=all_users_subquery.c.creation_date.asc(),
        ), Integer).label('running_total'),
    ) \
        .order_by(all_users_subquery.c.creation_date) \
        .all()
    all_users = [('all', q[0], q[1]) for q in all_users]

    # Combine the results in a single dataframe and name the columns
    user_statistics = all_users
    dataframe = pandas.DataFrame(user_statistics,
                                 columns=['type', 'date', 'users'])

    # Plot each result set
    fig, ax = plt.subplots(figsize=(30, 15), dpi=120)
    for key, group in dataframe.groupby(['type']):
        ax = group.plot(ax=ax, kind='line', x='date', y='users', label=key)

    image = image_from_figure(fig)
    image.name = 'user_statistics.png'
    return image
コード例 #23
0
def years_with_events(series_slug):
    query = db.session.query(tables.Event)
    query = query.filter(tables.Event.series_slug == series_slug)
    year_col = func.extract('year', tables.Event.date)
    query = query.with_entities(year_col)
    query = query.order_by(year_col)
    return [x[0] for x in query.distinct()]
コード例 #24
0
    def get_totals_by_hour(self):
        hourly_charts_data = dict()
        days_in_search = (self.end_date - self.start_date).days

        cases = []
        for i in range(24):
            cases.append(
                func.count(case([(func.extract('hour',
                                               Sample.date) == i, 1)])))

        q = db.session.query(Sample.location, Sample.station,
                             *cases).group_by(Sample.location, Sample.station)

        q = add_sample_search_filters(q, self.filters)
        for result in q:
            location, station = result[0], result[1]
            if location not in hourly_charts_data:
                hourly_charts_data[location] = dict()
            # Here I'm accounting for the difference in UTC and GMT time zones
            # by moving the five results from the start to the end.
            offset = 6
            counts = result[2:]
            counts = counts[offset:] + counts[:offset]
            hourly_charts_data[location][station] = [
                round(i / days_in_search + .4) for i in counts
            ]
        return hourly_charts_data
コード例 #25
0
ファイル: context.py プロジェクト: soralis-nem/cryptowelder
    def delete_metrics(self, cutoff_time, *, exclude_minutes=None):

        session = self.__session()

        try:

            filters = [Metric.mc_time < cutoff_time]

            if exclude_minutes is not None and len(exclude_minutes) > 0:
                filters.append(
                    cast(func.extract('minute', Metric.mc_time),
                         Integer).notin_(exclude_minutes))

            count = session.query(Metric).filter(*filters).delete(
                synchronize_session='fetch')

            if count > 0:
                session.commit()

        except BaseException as e:

            self.__logger.error('Delete - %s : %s', type(e), e.args)

            session.rollback()

            raise e

        finally:

            session.close()

        return count
コード例 #26
0
    def filter_date(self, query, appstruct):
        """
        filter the query and restrict it to the given year
        """
        year = appstruct.get('year')
        date_range_start = appstruct.get('date_range_start')
        date_range_end = appstruct.get('date_range_end')

        if date_range_start not in (None, colander.null):
            query = query.filter(
                func.date(Activity.datetime) >= date_range_start
            )

        if date_range_end not in (None, colander.null):
            query = query.filter(
                func.date(Activity.datetime) <= date_range_end
            )

        if (
            year not in (None, colander.null, -1) and
            date_range_start in (None, colander.null) and
            date_range_end in (None, colander.null)
        ):
            query = query.filter(
                func.extract('YEAR', Activity.datetime) == year
            )
        return query
コード例 #27
0
ファイル: dal.py プロジェクト: enkore/potstats2
def social_graph_agg(session, count_cutoff=10):
    """
    Retrieve social graph (based on how users quote each other).

    Result columns:

    User: quoter, User: quotee, count

    intensity is count relative to the maximum count in the current result
    (thus, intensity is between zero and one).
    """

    quoter = aliased(User, name='quoter')
    quoted = aliased(User, name='quoted')
    quoted_post = aliased(Post, name='quoted_post')

    count = func.sum(PostQuotes.count).label('count')

    return (
        session
        .query(
            func.extract('year', Post.timestamp).label('year'), Thread.bid,
            quoter.uid.label('quoter_uid'), quoted.uid.label('quoted_uid'), count)
        .join(Post, PostQuotes.post)
        .join(Post.thread)
        .join(quoted_post, PostQuotes.quoted_post)
        .join(quoter, Post.poster)
        .join(quoted, quoted_post.poster)
        .group_by('year', Thread.bid, quoter.uid, quoted.uid)
        .having(count > count_cutoff)
    )
コード例 #28
0
    def get_totals_by_weekday(self):
        weekday_charts_data = dict()
        dow_counts = dow_count(self.start_date,
                               self.end_date - dt.timedelta(1))
        cases = []
        for i in range(7):
            cases.append(
                func.count(
                    case([(func.extract('isodow', Sample.date) == i + 1, 1)])))

        q = db.session.query(Sample.location, Sample.station,
                             *cases).group_by(Sample.location, Sample.station)
        q = add_sample_search_filters(q, self.filters)
        for result in q:
            location, station = result[0], result[1]
            if location not in weekday_charts_data:
                weekday_charts_data[location] = dict()
            weekday_charts_data[location][station] = []
            for dow, total in zip(range(7), result[2:]):
                if dow_counts[dow] > 0:
                    weekday_charts_data[location][station].append(
                        round(total / dow_counts[dow] + .4))
                else:
                    weekday_charts_data[location][station].append(total)
        return weekday_charts_data
コード例 #29
0
    def get_waiting_time(cls, unit):
        unit_time = 86400 if unit == UnitTime.DAY.value else 60 * 60 if unit == UnitTime.HR.value else 60
        median_waiting_time = db.session.query(
            func.percentile_cont(0.5).within_group((func.extract('epoch', Event.eventDate) -
                                                    func.extract('epoch', Request.submittedDate)) / unit_time).label(
                'examinationTime')). \
            join(Request, and_(Event.nrId == Request.id)). \
            filter(Event.action == EventAction.PATCH.value,
                   Event.stateCd.in_(
                       [EventState.APPROVED.value, EventState.REJECTED.value,
                        EventState.CONDITIONAL.value, EventState.CANCELLED.value]),
                   Event.userId != EventUserId.SERVICE_ACCOUNT.value,
                   Event.eventDate.cast(Date) >= (func.now() - timedelta(days=1)).cast(Date)
                   )

        return median_waiting_time
コード例 #30
0
    def weekday(self):
        """Returns the weekday for the startdate ranging from 1 to 7.

        Where 1 is Monday and 7 is Sunday. The reason it ranges from 1 to 7
        is to match already existing implementations in the project.
        """

        return func.extract('dow', self.start_date) + 1
コード例 #31
0
    def _apply_age_filter(
        query: sqlalchemy.orm.query.Query,
        age_beg: Optional[int] = None,
        age_end: Optional[int] = None,
    ) -> sqlalchemy.orm.query.Query:

        # Convert the ages in years to seconds.
        age_beg_sec = age_beg * 31536000 if age_beg else None
        age_end_sec = age_end * 31536000 if age_end else None

        # Define the function to convert the minimum eligible age of each
        # study to seconds.
        func_age_beg_sec = sqlalchemy_func.cast(
            sqlalchemy_func.extract(
                "EPOCH",
                sqlalchemy_func.cast(
                    ModelEligibility.minimum_age,
                    postgresql.INTERVAL,
                ),
            ),
            postgresql.BIGINT,
        )

        # Define the function to convert the maximum eligible age of each
        # study to seconds.
        func_age_end_sec = sqlalchemy_func.cast(
            sqlalchemy_func.extract(
                "EPOCH",
                sqlalchemy_func.cast(
                    ModelEligibility.maximum_age,
                    postgresql.INTERVAL,
                ),
            ),
            postgresql.BIGINT,
        )

        # Define INT8RANGE ranges on the requested and study ages.
        range_age_user = sqlalchemy_func.int8range(age_beg_sec, age_end_sec)
        range_age_studies = sqlalchemy_func.int8range(func_age_beg_sec,
                                                      func_age_end_sec)

        # Filter studies with an eligibility age range overlapping the
        # user-defined age-range.
        query = query.filter(range_age_user.op("&&")(range_age_studies))

        return query
コード例 #32
0
def fetch_isins_not_updated_financials(Model: Union[IncomeStatement,
                                                    BalanceSheetStatement,
                                                    CashFlowStatement]) -> List[Tuple]:
    session = Session()
    res: List[Tuple] = session.query(Stock.isin, Stock.yahoo_ticker).filter(~Stock.isin.in_(
        session.query(Model.isin).filter(func.extract('year', Model.report_date) == get_last_year().year).all()
    )).group_by(Stock.isin, Stock.yahoo_ticker).all()
    return res
コード例 #33
0
    def build_query_to_report(self, query, aggregate_table, params):
        assert params in self._known_units
        res = params

        truncated_time = func.date_trunc(res, aggregate_table.c.time_step)
        return (query
                .column(label("time_slice", func.extract("epoch", truncated_time)))
                .group_by(truncated_time))
コード例 #34
0
ファイル: mastery.py プロジェクト: cloew/VocabTester
 def stalenessRating(self):
     """ Return the Queryable staleness rating of the mastery """
     return func.coalesce(
         cast(
             func.floor(
                 func.extract('epoch',
                              func.now() - self.lastCorrectAnswer) / 86400),
             db.Integer) / StalenessPeriod.days, 0)
コード例 #35
0
def getsharenowdata():
    date_param = request.args['date']
    try:
        date = datetime.datetime.strptime(date_param, "%Y-%m-%d")
    except:
        return "Check date format. %Y-%m-%d is required"

    if 'hour' not in request.args:
        results = SampleData.query.filter(
            func.extract('year', SampleData.time_2_end) == date.year).filter(
                func.extract('month', SampleData.time_2_end) == date.month
            ).filter(
                func.extract('day', SampleData.time_2_end) == date.day).all()
    else:
        time_param = request.args['hour']
        try:
            time = datetime.datetime.strptime(time_param, '%H').time()
        except:
            return "Check time format. %H is required"
        results = SampleData.query.filter(
            func.extract('year', SampleData.time_2_end) == date.year).filter(
                func.extract('month', SampleData.time_2_end) == date.month
            ).filter(
                func.extract('day', SampleData.time_2_end) == date.day).filter(
                    func.extract('hour', SampleData.time_2_end) ==
                    time.hour).all()
    rs = jsonify(
        {'sharenow_data': [r.serialize_location_only() for r in results]})
    return rs
コード例 #36
0
ファイル: municipality.py プロジェクト: OneGov/onegov.wtfs
    def apply_model(self, model):
        start = model.pickup_dates.filter(
            func.extract('year', PickupDate.date) ==
            date.today().year).first()
        self.start.data = start.date if start else None

        end = model.pickup_dates.order_by(None)
        end = end.order_by(PickupDate.date.desc()).first()
        self.end.data = end.date if end else None
コード例 #37
0
ファイル: util.py プロジェクト: retroherna/rhinventory
def figure_counter(session, field_identifier, field_date, filter, groups, to_x,
                   count_total, title):
    """Makes a bar figure with time as x, and something counted as y.

    `field_identifier` is what is counted (e.g. Action.id)
    `field_date` is how it is counted (eg. Action.date_created)
    `groups` is how to group the dates (e.g. ['year', 'month'])
    `to_x` is a function that will create a datetime from a row
      (e.g. lambda x: datetime.date(int(x.year), (x.month), 1))
    `title` is the title of the plot
    """
    groups = [func.extract(x, field_date).label(x) for x in groups]
    q = (session.query(
        func.count(field_identifier).label('count'),
        *groups).filter(filter).group_by(*groups).order_by(*groups).all())
    x = []
    y = []
    date = datetime.datetime(int(q[0].year), int(q[0].month), int(q[0].day))
    today = datetime.date.today()
    today = datetime.datetime(today.year, today.month, today.day)
    delta = datetime.timedelta(days=1)
    it = iter(q)
    total = 0
    el = next(it)
    el_date = datetime.datetime(int(el.year), int(el.month), int(el.day))
    while date <= today:
        x.append(date)

        #print(date, el_date)

        if not el or el_date > date:
            y.append(total)
        elif el:
            y.append(total)
            total += el.count

            try:
                el = next(it)
                el_date = datetime.datetime(int(el.year), int(el.month),
                                            int(el.day))
            except StopIteration:
                el = None
        date += delta

    #print(len(x), len(y))

    width = (x[-1] - x[0]).total_seconds() / len(x) * 900

    p = figure(plot_height=400,
               plot_width=900,
               title=title,
               x_axis_type='datetime')
    #p.vbar(x=x, width=width, bottom=0, top=y)
    p.line(x=x, y=y, line_width=6)

    return bokeh.embed.components(p)
コード例 #38
0
def get_rain_info(date_info, session_mgr):
    logger.debug("checking for rain info: " + date_info.strftime('%d/%m/%y'))
    rain = 0.0
    weather_info = list()
    is_raining_status = True
    session = session_mgr()
    info = session.query(HALDB.WeatherHistory).filter(
        and_(func.extract('day', HALDB.WeatherHistory.created_on) == date_info.day,
             func.extract('month', HALDB.WeatherHistory.created_on) == date_info.month)) \
        .order_by(desc(HALDB.WeatherHistory.created_on)).first()
    if info is not None:
        rain = info.value
    else:
        logger.debug('No history value')
    logger.debug('rain = ' + str(rain))
    if rain == 0:
        is_raining_status = False
    session.close()
    return [is_raining_status, rain, weather_info]
コード例 #39
0
ファイル: wsgi.py プロジェクト: opentechinstitute/piecewise
def retrieve_extra_data():
    if request.args.get("limit"):
        limit = int(request.args.get("limit"))
    else:
        limit = 50

    if request.args.get("page"):
        offset = (int(request.args.get("page")) - 1) * limit
    else:
        offset = 0

    record_count = int(db_session.query(ExtraData).count())

    query = db_session.query(ExtraData, func.extract("epoch", ExtraData.timestamp).label("epoch"))

    query = query.outerjoin(Maxmind, Maxmind.ip_range.contains(ExtraData.client_ip))
    query = query.add_columns(Maxmind.label)

    for aggregation in aggregations:
        query = query.outerjoin(
            aggregation["orm"],
            ST_Intersects(ExtraData.location, eval('aggregation["orm"].%s' % aggregation["geometry_column"])),
        )
        query = query.add_columns(eval('aggregation["orm"].%s' % aggregation["key"]))

    try:
        results = query.limit(limit).offset(offset).all()
        db_session.commit()
    except:
        db_session.rollback()

    records = []
    for row in results:
        record = {}
        record["id"] = row.ExtraData.id
        record["date_pretty"] = row.ExtraData.timestamp
        record["timestamp"] = int(row.epoch)
        record["client_ip"] = row.ExtraData.client_ip
        record["min_rtt"] = row.ExtraData.min_rtt
        record["advertised_download"] = row.ExtraData.advertised_download
        record["actual_download"] = row.ExtraData.actual_download
        record["advertised_upload"] = row.ExtraData.advertised_upload
        record["actual_upload"] = row.ExtraData.actual_upload
        record["isp_user"] = row.ExtraData.isp_user
        record["connection_type"] = row.ExtraData.connection_type
        record["cost_of_service"] = row.ExtraData.cost_of_service
        record["isp"] = rewrite_isp(row.label)
        for aggregation in aggregations:
            record[aggregation["table"]] = eval("row.%s" % aggregation["key"])
        records.append(record)

    if len(records):
        return (jsonify(record_count=record_count, records=records), 200, {})
    else:
        return ("", 500, {})
コード例 #40
0
ファイル: wsgi.py プロジェクト: elf11/piecewise-1
def retrieve_extra_data():
    if request.args.get('limit'):
       limit = int(request.args.get('limit'))
    else:
        limit = 50

    if request.args.get('page'):
        offset = (int(request.args.get('page')) - 1) * limit
    else:
        offset = 0

    record_count = int(db_session.query(ExtraData).count())

    query = db_session.query(ExtraData, func.extract('epoch', ExtraData.timestamp).label('epoch'))

    query = query.outerjoin(Maxmind, Maxmind.ip_range.contains(ExtraData.client_ip))
    query = query.add_columns(Maxmind.label)

    for aggregation in aggregations:
        query = query.outerjoin(aggregation['orm'], ST_Intersects(ExtraData.location, eval('aggregation["orm"].%s' % aggregation['geometry_column'])))
        query = query.add_columns(eval('aggregation["orm"].%s' % aggregation['key']))

    try:
        results = query.limit(limit).offset(offset).all()
        db_session.commit()
    except:
        db_session.rollback()

    records = []
    for row in results:
        record = {}
        record['id'] = row.ExtraData.id
        record['date_pretty'] = row.ExtraData.timestamp
        record['timestamp'] = int(row.epoch)
        record['client_ip'] = row.ExtraData.client_ip
        record['min_rtt'] = row.ExtraData.min_rtt
        record['advertised_download'] = row.ExtraData.advertised_download
        record['actual_download'] = row.ExtraData.actual_download
        record['advertised_upload'] = row.ExtraData.advertised_upload
        record['actual_upload'] = row.ExtraData.actual_upload
        record['location_type'] = row.ExtraData.location_type
        record['connection_type'] = row.ExtraData.connection_type
        record['cost_of_service'] = row.ExtraData.cost_of_service
        record['isp'] = rewrite_isp(row.label)
        for aggregation in aggregations:
            record[aggregation['table']] = eval('row.%s' % aggregation['key'])
        records.append(record)

    if len(records):
        return (jsonify(record_count=record_count, records=records), 200, {})
    else:
        return ('', 500, {})
コード例 #41
0
def create_years_list():
    """
    init app.years field, with last 10 years that used in db
    """
    while True:
        try:
            year_col = db.session.query(distinct(func.extract("year", Marker.created)))
            app.years = sorted([int(year[0]) for year in year_col], reverse=True)[:10]
            break
        except OperationalError as err:
            logging.warn(err)
            time.sleep(1)
    logging.info("Years for date selection: " + ", ".join(map(str, app.years)))
コード例 #42
0
ファイル: main.py プロジェクト: agamaloni/anyway
def create_years_list():
    """
    Edits 'years.js', a years structure ready to be presented in app.js
    as user's last-4-years filter choices.
    """
    year_col = db.session.query(distinct(func.extract("year", Marker.created)))
    years = OrderedDict([("שנת" + " %d" % year, year_range(year))
                         for year in sorted(year_col, reverse=True)[:4]])
    years_file = os.path.join(app.static_folder, 'js/years.js')
    with open(years_file, 'w') as outfile:
        outfile.write("var ACCYEARS = ")
        json.dump(years, outfile, encoding='utf-8')
        outfile.write(";\n")
    logging.debug("wrote '%s'" % years_file)
    logging.debug("\n".join("\t{0}: {1}".format(k, str(v)) for k, v in years.items()))
コード例 #43
0
    def filter_date(self, query, appstruct):
        date = appstruct.get('date')
        year = appstruct.get('year')
        if date is not None:
            query = query.filter(
                models.Workshop.timeslots.any(
                    func.date(models.Timeslot.start_time) == date
                )
            )
        # Only filter by year if no date filter is set
        elif year is not None:
            query = query.filter(
                models.Workshop.timeslots.any(
                    func.extract('YEAR', models.Timeslot.start_time) == year
                )
            )

        return query
コード例 #44
0
ファイル: views.py プロジェクト: mianos/etemp
def sensord(id, start, end, aft_name):
    functions = aft[aft_name]
    table = inspect(DataTable).mapped_table

    fields = list()
    for agg_func in functions:
        agg_func_name = str(agg_func()).replace('()', '')
        fields.append(func.cast(agg_func(DataTable.value), types.Integer).label(agg_func_name))

    per_seconds = (end - start).total_seconds() / 100
    ots = func.to_timestamp(func.round(func.extract('epoch', DataTable.timestamp) / per_seconds) * per_seconds).label('timestamp')

    if id == 0:
        qry = g.session.query(ots, *fields) \
                       .filter(DataTable.probe == 1)    # TODO: get probe 1
    else:
        qry = g.session.query(ots, *fields) \
                       .filter(DataTable.probe == id)

    qry = qry.filter(table.c.timestamp >= start, table.c.timestamp <= end) \
             .group_by(ots) \
             .order_by(ots)
    return qry
コード例 #45
0
    def filter_date(self, query, appstruct):
        """
        filter the query and restrict it to the given year
        """
        year = appstruct.get('year')
        date_range_start = appstruct.get('date_range_start')
        date_range_end = appstruct.get('date_range_end')

        if date_range_start is not None:
            query = query.filter(
                func.date(Activity.datetime) >= date_range_start
            )

        if date_range_end is not None:
            query = query.filter(
                func.date(Activity.datetime) <= date_range_end
            )

        if year is not None and date_range_start is None and \
                date_range_end is None:
            query = query.filter(
                func.extract('YEAR', Activity.datetime) == year
            )
        return query
コード例 #46
0
ファイル: models.py プロジェクト: wgalloway/burddy
 def popularity(self, gravity=2):
     seconds = func.extract('epoch', self.timestamp - func.now())
     hours = seconds / 3600
     return (self.views - 1) / func.power((hours + 2), gravity)
コード例 #47
0
ファイル: mastery.py プロジェクト: cloew/VocabTester
 def stalenessRating(self):
     """ Return the Queryable staleness rating of the mastery """
     return func.coalesce(cast(func.floor(func.extract('epoch', func.now()-self.lastCorrectAnswer)/86400), db.Integer)/StalenessPeriod.days, 0)
コード例 #48
0
ファイル: SpecWeather.py プロジェクト: tkinst/spec-cal
    def get(self, location):
        precip_dates = db.session.query(func.sum(WeatherUpdate.precipitation), WeatherUpdate.date).filter_by(location=location).group_by(func.extract('day',WeatherUpdate.date), func.extract('month', WeatherUpdate.date)).all()

        counter = 1
        the_array = []
        the_dict = {}

        for precip, eachDate in precip_dates:
            y = {'id': counter,
            'title': str(location) + ": " + str(precip) + "in",
            'allDay': True,
            'start' :eachDate.date().isoformat(),
            # 'end':'',
            'url':'',
            'color': ''}

            if precip > 0.1:
                y['color'] = '#FF0000'
            elif precip > 0:
            	y['color'] = '#ff912f'

            the_array.append(y)

            counter += 1

        # the_dict['events'] = the_array

        # return jsonify(the_dict)
        return make_response(json.dumps(the_array))
コード例 #49
0
ファイル: helpers.py プロジェクト: wonderpl/dolly-web
    def import_channel_share(self, automatic_flush=True):
        from rockpack.mainsite.services.share.models import ShareLink
        from rockpack.mainsite.services.user.models import UserActivity, User
        from rockpack.mainsite.services.video.models import VideoInstance, Channel

        total = 0
        missing = 0
        start_time = time.time()

        def _normalised(val, max_val, min_val):
            try:
                return (val - min_val) / (abs(max_val) - abs(min_val))
            except (ZeroDivisionError, decimal.DivisionByZero, decimal.InvalidOperation):
                return 0

        def _update_channel_id(id, val, max_val, min_val):
            channel_dict[id] = channel_dict.setdefault(id, 0) + _normalised(val, max_val, min_val)

        # The strength of actions decay until any older than zulu have no effect
        zulu = datetime.now() - timedelta(days=app.config.get('CHANNEL_RANK_ZULU', 1))
        time_since_zulu = (datetime.utcnow() - zulu).total_seconds()

        for locale in ['en-gb', 'en-us']:
            app.logger.debug('starting for %s', locale)
            channel_dict = {}
            channel_shares = {}

            summation = func.sum(
                (time_since_zulu - (func.extract('epoch', datetime.utcnow()) - func.extract('epoch', UserActivity.date_actioned))) / time_since_zulu
            )

            # activity for channels from videos
            query = readonly_session.query(
                distinct(Channel.id).label('channel_id'),
                summation.label('summed')
            ).join(
                VideoInstance, VideoInstance.channel == Channel.id
            ).join(
                UserActivity, UserActivity.object_id == VideoInstance.id
            ).join(
                User, User.id == UserActivity.user
            ).filter(
                UserActivity.action == 'star',
                UserActivity.object_type == 'video_instance',
                UserActivity.date_actioned > zulu,
                User.locale == locale
            ).group_by(Channel.id)

            summed = query.subquery().columns.summed
            q_max, q_min = UserActivity.query.session.query(func.max(summed), func.min(summed)).one()

            for id, count in query.yield_per(6000):
                channel_dict.setdefault(id, {})
                channel_dict[id]['user_activity'] = [count, _normalised(count, q_max, q_min)]
                channel_dict[id]['norm_user_activity'] = _normalised(count, q_max, q_min)

            app.logger.debug('user activity done')

            summation = func.sum(
                (time_since_zulu - (func.extract('epoch', datetime.utcnow()) - func.extract('epoch', ShareLink.date_created))) / time_since_zulu
            )

            # activity for channel shares
            query = readonly_session.query(
                distinct(Channel.id).label('channel_id'),
                summation.label('summed')
            ).join(
                ShareLink,
                ShareLink.object_id == Channel.id
            ).join(
                User, User.id == ShareLink.user
            ).filter(
                Channel.deleted == False,
                Channel.public == True,
                ShareLink.object_type == 'channel',
                ShareLink.date_created > zulu,
                ShareLink.click_count > 0,
                User.locale == locale
            ).group_by(Channel.id)

            summed = query.subquery().columns.summed

            q_max, q_min = ShareLink.query.session.query(func.max(summed), func.min(summed)).one()
            channel_share_vals = (q_max, q_min)

            for id, count in query.yield_per(6000):
                channel_dict.setdefault(id, {})
                channel_shares[id] = count
                channel_dict[id]['share_link_channel'] = [count, _normalised(count, q_max, q_min)]

            app.logger.debug('channel shares done')
            # activity for videos shares of channels
            query = readonly_session.query(
                distinct(Channel.id).label('channel_id'),
                summation.label('summed')
            ).join(
                VideoInstance,
                VideoInstance.channel == Channel.id
            ).join(
                ShareLink,
                ShareLink.object_id == VideoInstance.id
            ).join(
                User, User.id == ShareLink.user
            ).filter(
                Channel.deleted == False,
                Channel.public == True,
                ShareLink.object_type == 'video_instance',
                ShareLink.date_created > zulu,
                ShareLink.click_count > 0,
                User.locale == locale
            ).group_by(Channel.id)

            summed = query.subquery().columns.summed

            q_max, q_min = ShareLink.query.session.query(func.max(summed), func.min(summed)).one()

            for id, count in query.yield_per(6000):
                channel_dict.setdefault(id, {})
                channel_dict[id]['share_link_video'] = [count, _normalised(count, q_max, q_min)]
                val = channel_shares.get(id, 0)
                # We may get None returned in the data
                if None in channel_share_vals:
                    channel_share_vals = [0, 0]
                channel_dict[id]['norm_share_link_channel'] = channel_dict[id].setdefault('norm_share_link_channel', 0) + _normalised(count + val, q_max + channel_share_vals[0], q_min + channel_share_vals[1])

            app.logger.debug('video shares done')

            app.logger.debug('... updating elasticsearch for %s ...', locale)

            done = 1
            i_total = len(channel_dict)
            ec = ESChannel.updater(bulk=True)
            for id, _dict in channel_dict.iteritems():
                try:
                    count = 0
                    for k, v in _dict.iteritems():
                        if k.startswith('norm'):
                            count += v

                    if count == 0:
                        continue

                    ec.set_document_id(id)
                    ec.add_field('normalised_rank[\'%s\']' % locale, float(count))
                    ec.update()

                except exceptions.DocumentMissingException:
                    missing += 1
                finally:
                    ec.reset()
                total += 1
                if app.logger.isEnabledFor(logging.DEBUG):
                    self.print_percent_complete(done, i_total)
                done += 1

            if automatic_flush:
                ESChannel.flush()

        app.logger.debug('%s total updates in two passes. finished in %s seconds (%s channels not in es)', total, time.time() - start_time, missing)
コード例 #50
0
ファイル: views.py プロジェクト: VitorPizzuto/gastos_abertos
    def get(self):
        args = _filter_parser.parse_args()
        group_by = args['group_by'].split(',')
        group_by_fields = []

        # Always return a count
        query_args = [func.count(Contrato.id).label('count')]
        keys = []
        temporary_keys = []
        partial_fields = []
        # Tuples with SQLAlchemy function and args to get parts of values.
        # This allows to group by years or months for example.
        parts = {
            'year': (
                lambda field: [func.extract('year', field)],
                lambda values: list(values)[0]
                ),
            'month': (
                lambda field: [
                    func.extract('year', field),
                    func.extract('month', field),
                    ],
                lambda values: '-'.join([format(v, '02') for v in values])
                ),
            'day': (
                lambda field: [
                    func.extract('year', field),
                    func.extract('month', field),
                    func.extract('day', field),
                    ],
                lambda values: '-'.join([format(v, '02') for v in values])
                ),
        }

        for field_name in group_by:
            part = None
            if field_name.endswith(
                    tuple(map(lambda a: '__{}'.format(a), parts.keys()))):
                # User asked to group using only part of value.
                # Get the original field name and which part we should use.
                # "?group_by=data_publicacao__year" results in
                # field_name = 'data_publicacao'
                # part = 'year'
                field_name, part = field_name.split('__', 1)
            if field_name in contratos_fields:
                group_by_field = [getattr(Contrato, field_name)]
                if part:
                    # Apply the "part" function
                    group_by_field = parts[part][0](group_by_field[0])
                    temporary_keys.extend(['{}__{}'.format(field_name, i)
                                           for i in range(len(group_by_field))
                                           ])
                    partial_fields.append({
                        'field_name': field_name,
                        'count': len(group_by_field),
                        'part_name': part,
                    })
                else:
                    keys.append(field_name)
                    temporary_keys.append(field_name)
                group_by_fields.extend(group_by_field)
                query_args.extend(group_by_field)

        query_args.append(func.sum(Contrato.valor).label('valor'))
        keys.append('valor')
        temporary_keys.append('valor')

        contratos_data = db.session.query(*query_args)
        if group_by_fields:
            contratos_data = contratos_data.group_by(*group_by_fields)

        contratos_data = self.order(contratos_data)
        contratos_data = self.filter(contratos_data)

        total_count = contratos_data.count()

        contratos_data = self.paginate(contratos_data)

        headers = {
            # Add 'Access-Control-Expose-Headers' header here is a workaround
            # until Flask-Restful adds support to it.
            'Access-Control-Expose-Headers': 'X-Total-Count',
            'X-Total-Count': total_count
        }

        # Create the dictionary used to marshal
        fields_ = {'count': fields.Integer()}
        fields_.update({key: contratos_fields.get(key, fields.String())
                        for key in keys})

        # Create a list of dictionaries
        result = map(lambda a: dict(zip(['count'] + temporary_keys, a)),
                     contratos_data.all())

        # Set partial dates type to string
        for f in partial_fields:
            fields_[f['field_name']] = fields.String()
            for item in result:
                item[f['field_name']] = parts[f['part_name']][1](
                    (item.pop('{}__{}'.format(f['field_name'], i))
                     for i in range(f['count'])))

        return restful.marshal(result, fields_), 200, headers
コード例 #51
0
 def filter_query_to_report(self, query, aggregate_table, params):
     if isinstance(params, basestring):
         params = map(int, params.split(","))
     after, before = params
     time = func.extract("epoch", aggregate_table.c.time_step)
     return query.where(between(time, after, before))
コード例 #52
0
ファイル: query.py プロジェクト: anthonyrgreen/work_records
def getLogs(startTime, endTime,
            count='numLoads',
            dataAggregation=['module'],
            timeAggregation='month',
            filters={},
            sortBy='module',
            sortOrder='desc'):
  """ 
  Get logs between 'startTime' and 'endTime', selected according key-value args.
  
  'startTime' and 'endTime' are DateTime objects.

  'count' must be in ['numLoads', 'numUsers', 'none']
  
  'sortBy' must be set to an option which is being aggregated.
  
  'dataAggregation' must be a subset of ['module', 'version', 'user'], where
  'version' is present only if 'module' is as well.
  
  'timeAggregation' must be in ['timespan', 'year', 'month', 'day'], 
  
  'sortBy' must be in ['count', 'module', 'version', 'user'], and must be a
  feature listed in either dataAggregation, or 'count' if the value of count
  is not 'none'
  
  'sortOrder' must be in ['asc', 'desc']
  
  'filters' should be a dictionary. Keys represent properties to filter over, and
  their associated values are lists of valid matches. list-values are OR'd together,
  while key-values are AND'd together.
  For example:
  getLog(Datetime(1,1,1), DateTime(1999, 1, 1), 
         dataAggregation=['module', version']
         filters={ 'module' : ['R', 'openmpi'] })
  would return triples (count, module, version) between 1/1/1 and 1/1/1999,
  where module was either 'R' or 'openmpi'.
  Another example:
  getLog(Datetime(1,1,1), DateTime(1999, 1, 1), 
         dataAggregation=['module', version']
         filters={ 'module' : ['R', 'openmpi'], 'user' : ['grundoon'] })
  gives the same as above, except counts only those records generated by user
  'grundoon'.
  An empty filter dictionary means no filter.
  'filters' keys must be in ['module',
                             'version',
                             'user',
                             'lessThan',
                             'greaterThan']
                                                                                          
  non-existent or non-sensicle options will be overriden
  """
  # First, we sanitize the input options
  count = cleanCount(count)
  dataAggregation = cleanDataAggregation(dataAggregation)
  timeAggregation = cleanTimeAggregation(timeAggregation)
  filters = cleanFilters(filters, count)
  sortBy = cleanSortBy(sortBy, dataAggregation, count)
  sortOrder = cleanSortOrder(sortOrder)
  
  # second, we construct the columns and filters
  if count == 'numLoads':
    countColumn = [func.count(getattr(ModuleLoadRecord, dataAggregation[-1])).label('count')]
  elif count == 'numUsers':
    countColumn = [func.count(ModuleLoadRecord.user.distinct()).label('count')]
  else:
    countColumn = []
  #countColumn = [func.count(getattr(ModuleLoadRecord, dataAggregation[-2])).label('count')]
  #countColumn = [func.count(ModuleLoadRecord.user.distinct()).label('count')]
  if 'timespan' not in timeAggregation:
    timeColumns = [func.extract(opt, ModuleLoadRecord.loadDate).label(opt) 
                   for opt in timeAggregation]
  else: 
    timeAggregation = []
    timeColumns = []
  dataColumns = [getattr(ModuleLoadRecord, opt) 
                 for opt in dataAggregation]
  sortOrder = asc if sortOrder == 'asc' else desc
  sortByColumn = map(asc, timeColumns) + [sortOrder(sortBy)]

  timeFilters = [ModuleLoadRecord.loadDate >= startTime, 
                 ModuleLoadRecord.loadDate < endTime]
  dataFilters = []
  countFilters = [sqlalchemy.sql.true()]
  # We go through each key in filters, ORing the filters for every individual key
  for key in filters:
    if key in ['greaterThan', 'lessThan']:
      if key == 'greaterThan':
        criterion = 'count > ' + str(int(filters[key]))
      else:
        criterion = 'count < ' + str(int(filters[key]))
      countFilters = [sqlalchemy.sql.text(criterion)]
    else:
      # This is a place-holder that we need to begin building up the query.
      # It won't wreck our query since it's going to be OR'd with extra criteria
      criterion = sqlalchemy.sql.false()
      for val in filters[key]:
        criterion = criterion | (getattr(ModuleLoadRecord, key) == val)
      # Deal with the edge case that a filter list was empty
      if criterion != sqlalchemy.sql.false():
        dataFilters.append(criterion)

  # Now, the actual querying
  with cliQuery() as session:
    results = session.query(*(timeColumns + dataColumns + countColumn)) \
            .filter(*(timeFilters + dataFilters)) \
            .group_by(*(timeColumns + dataColumns)) \
            .order_by(*sortByColumn) \
            .having(*countFilters) \
            .all()
  if count == 'none':
    return (timeAggregation + dataAggregation, results)
  elif count == 'numUsers':
    return (timeAggregation + dataAggregation + ['uniqueUsers'], results)
  else:
    return (timeAggregation + dataAggregation + ['numLoads'], results)
コード例 #53
0
#! coding: utf-8
from flask import Blueprint, render_template
from sqlalchemy import func as sql_func
from decimal import Decimal
from io import BytesIO
from base64 import b64encode
from datetime import date

from cebulany.models import db, Transaction, Payment, PaymentType

report_page = Blueprint('report_page', 'report', template_folder='../templates')
month_field = sql_func.extract('month', Transaction.date)
year_field = sql_func.extract('year', Transaction.date)


def accumulate_sum(iterable):  # py2.7 why
    values = []
    s = 0.0
    for i in iterable:
        s += float(i)
        values.append(s)
    return values


def save_plot(fig):
    bio = BytesIO()
    fig.savefig(bio, format='png')
    return b64encode(bio.getvalue())


class ReportMonth(object):
コード例 #54
0
ファイル: models.py プロジェクト: maze88/anyway
    def bounding_box_query(is_thin=False, yield_per=None, **kwargs):

        # example:
        # ne_lat=32.36292402647484&ne_lng=35.08873443603511&sw_lat=32.29257266524761&sw_lng=34.88445739746089
        # >>>  m = Marker.bounding_box_query(32.36, 35.088, 32.292, 34.884)
        # >>> m.count()
        # 250

        approx = kwargs.get('approx', True)
        accurate = kwargs.get('accurate', True)

        if not kwargs.get('show_markers', True):
            return Marker.query.filter(sql.false())
        markers = Marker.query \
            .filter(Marker.longitude <= kwargs['ne_lng']) \
            .filter(Marker.longitude >= kwargs['sw_lng']) \
            .filter(Marker.latitude <= kwargs['ne_lat']) \
            .filter(Marker.latitude >= kwargs['sw_lat']) \
            .filter(Marker.created >= kwargs['start_date']) \
            .filter(Marker.created < kwargs['end_date']) \
            .order_by(desc(Marker.created))
        if yield_per:
            markers = markers.yield_per(yield_per)
        if accurate and not approx:
            markers = markers.filter(Marker.locationAccuracy == 1)
        elif approx and not accurate:
            markers = markers.filter(Marker.locationAccuracy != 1)
        elif not accurate and not approx:
            return Marker.query.filter(sql.false())
        if not kwargs.get('show_fatal', True):
            markers = markers.filter(Marker.severity != 1)
        if not kwargs.get('show_severe', True):
            markers = markers.filter(Marker.severity != 2)
        if not kwargs.get('show_light', True):
            markers = markers.filter(Marker.severity != 3)
        if kwargs.get('show_urban', 3) != 3:
            if kwargs['show_urban'] == 2:
                markers = markers.filter(Marker.roadType >= 1).filter(Marker.roadType <= 2)
            elif kwargs['show_urban'] == 1:
                markers = markers.filter(Marker.roadType >= 3).filter(Marker.roadType <= 4)
            else:
                return Marker.query.filter(sql.false())
        if kwargs.get('show_intersection', 3) != 3:
            if kwargs['show_intersection'] == 2:
                markers = markers.filter(Marker.roadType != 2).filter(Marker.roadType != 4)
            elif kwargs['show_intersection'] == 1:
                markers = markers.filter(Marker.roadType != 1).filter(Marker.roadType != 3)
            else:
                return Marker.query.filter(sql.false())
        if kwargs.get('show_lane', 3) != 3:
            if kwargs['show_lane'] == 2:
                markers = markers.filter(Marker.one_lane >= 2).filter(Marker.one_lane <= 3)
            elif kwargs['show_lane'] == 1:
                markers = markers.filter(Marker.one_lane == 1)
            else:
                return Marker.query.filter(sql.false())

        if kwargs.get('show_day', 7) != 7:
            markers = markers.filter(func.extract("dow", Marker.created) == kwargs['show_day'])
        if kwargs.get('show_holiday', 0) != 0:
            markers = markers.filter(Marker.dayType == kwargs['show_holiday'])

        if kwargs.get('show_time', 24) != 24:
            if kwargs['show_time'] == 25:     # Daylight (6-18)
                markers = markers.filter(func.extract("hour", Marker.created) >= 6)\
                                 .filter(func.extract("hour", Marker.created) < 18)
            elif kwargs['show_time'] == 26:   # Darktime (18-6)
                markers = markers.filter((func.extract("hour", Marker.created) >= 18) |
                                         (func.extract("hour", Marker.created) < 6))
            else:
                markers = markers.filter(func.extract("hour", Marker.created) >= kwargs['show_time'])\
                                 .filter(func.extract("hour", Marker.created) < kwargs['show_time']+6)
        elif kwargs['start_time'] != 25 and kwargs['end_time'] != 25:
            markers = markers.filter(func.extract("hour", Marker.created) >= kwargs['start_time'])\
                             .filter(func.extract("hour", Marker.created) < kwargs['end_time'])
        if kwargs.get('weather', 0) != 0:
            markers = markers.filter(Marker.weather == kwargs['weather'])
        if kwargs.get('road', 0) != 0:
            markers = markers.filter(Marker.roadShape == kwargs['road'])
        if kwargs.get('separation', 0) != 0:
            markers = markers.filter(Marker.multi_lane == kwargs['separation'])
        if kwargs.get('surface', 0) != 0:
            markers = markers.filter(Marker.road_surface == kwargs['surface'])
        if kwargs.get('acctype', 0) != 0:
            markers = markers.filter(Marker.subtype == kwargs['acctype'])
        if kwargs.get('controlmeasure', 0) != 0:
            markers = markers.filter(Marker.road_control == kwargs['controlmeasure'])
        if kwargs.get('district', 0) != 0:
            markers = markers.filter(Marker.unit == kwargs['district'])

        if kwargs.get('case_type', 0) != 0:
            markers = markers.filter(Marker.provider_code == kwargs['case_type'])

        if is_thin:
            markers = markers.options(load_only("id", "longitude", "latitude"))
        return markers
コード例 #55
0
ファイル: traffic.py プロジェクト: agdsn/pycroft
 def round_time(time_expr, ceil=False):
     round_func = func.ceil if ceil else func.trunc
     step_epoch = func.extract('epoch', literal_column('arg_step'))
     return cast(func.to_timestamp(round_func(func.extract('epoch', time_expr) / step_epoch) * step_epoch), timestamptz)
コード例 #56
0
ファイル: test_functions.py プロジェクト: cpcloud/sqlalchemy
    def test_non_functions(self):
        expr = func.cast("foo", Integer)
        self.assert_compile(expr, "CAST(:param_1 AS INTEGER)")

        expr = func.extract("year", datetime.date(2010, 12, 5))
        self.assert_compile(expr, "EXTRACT(year FROM :param_1)")
コード例 #57
0
ファイル: SpecWeather.py プロジェクト: tkinst/spec-cal
    def get(self):
        precip_dates = db.session.query(func.sum(WeatherUpdate.precipitation), WeatherUpdate).group_by(WeatherUpdate.location, func.extract('day',WeatherUpdate.date), func.extract('month', WeatherUpdate.date)).all()

        counter = 1
        the_array = []
        the_dict = {}

        for precip, eachUpdate in precip_dates:
            url = "http://spec-cal.tomkinstrey.com/single/%s/%s/%s/%s/" % (eachUpdate.location,eachUpdate.date.year,eachUpdate.date.month,eachUpdate.date.day)


            y = {'id': counter,
            'title': str(eachUpdate.location) + ": " + str(precip) + "in",
            'allDay': True,
            'start' :eachUpdate.date.date().isoformat(),
            # 'end':'',
            'url': url,
            'color': ''}

            if precip > 0.1:
                y['color'] = '#FF0000'
            elif precip > 0:
            	y['color'] = '#ff912f' 

            the_array.append(y)

            counter += 1

        # the_dict['events'] = the_array

        # return jsonify(the_dict)
        return make_response(json.dumps(the_array))
コード例 #58
-1
    def trajectSpeed(self):
        """traject speed by distance between previous and current location divided by current date_time - previous date_time

        round(CAST
        (
            ST_Length_Spheroid(
                ST_MakeLine(location, lag(location) over (order by device_info_serial, date_time)),
                'SPHEROID[\"WGS 84\",6378137,298.257223563]'
            )
        /
        EXTRACT(
            EPOCH FROM (date_time - lag(date_time) over (order by device_info_serial, date_time))
        )
        ) AS NUMERIC, 4)
        """
        order_by = (self.device_info_serial, self.date_time,)
        spheroid = 'SPHEROID["WGS 84",6378137,298.257223563]'
        line = func.ST_MakeLine(self.rawlocation, func.lag(self.rawlocation).over(order_by=order_by))
        distance = func.ST_Length_Spheroid(line, spheroid)
        duration = func.extract('epoch', self.date_time - func.lag(self.date_time).over(order_by=order_by))
        return func.round(cast(distance / duration, Numeric), 4).label('tspeed')