Esempio n. 1
0
def find_close_pose_record(open_record):
    '''
    open_record = volatility_storage object.
    find sql entry for the same ticket on the same day at hour_close_max or earlier if exists
    returns matching volatility_storage object or None if not found
    '''
    record_found = False

    # Next lines are to simplify query = > not entirely necessary
    
    search_date = open_record.timestamp.date()
    search_name = open_record.name
    search_time = time_close
    while not record_found:
        close_record = query_records.filter(and_(Volatility_storage.name == open_record.name,
                            extract('day', Volatility_storage.timestamp) == search_date.day, 
                            extract('month', Volatility_storage.timestamp) == search_date.month, 
                            extract('hour', Volatility_storage.timestamp) == search_time.hour,
                            extract('minute', Volatility_storage.timestamp) == search_time.minute)).first()
        if close_record:
            record_found = True
            return close_record
        curr_min = search_time.minute        
        curr_hour = search_time.hour
        if curr_min >=1:
            curr_min -= 1
        else:
            curr_min = 59
            curr_hour = curr_hour - 1
        search_time = search_time.replace(hour=curr_hour, minute=(search_time.minute-1))
        if search_time < hour_close_min:
            # no closing record found
            return None
Esempio n. 2
0
def update_archives(arc_month=LAST_MONTH):
    """Archives packages of a month, by default: last month"""
    query = db_session.query(SubPac.pkg_name, SubPac.pkg_version,
                             SubPac.pkg_release, SubPac.pkg_arch,
                             SubPac.vendor_name, SubPac.pkg_status,
                             Sub.distro_name, Sub.distro_version,
                             func.min(SubPac.sub_date),
                             func.count('*').label('count')
                             ).join(Sub)

    arcs = query.filter(extract('month', SubPac.sub_date) == arc_month.month,
                        extract('year', SubPac.sub_date) == arc_month.year
                        ).group_by(extract('month', SubPac.sub_date),
                                   extract('year', SubPac.sub_date),
                                   SubPac.pkg_name, SubPac.pkg_version,
                                   SubPac.pkg_release, SubPac.pkg_arch,
                                   SubPac.vendor_name,
                                   SubPac.pkg_status, Sub.distro_name,
                                   Sub.distro_version).all()
    pkg_archives = []
    for pkg in arcs:
        pkg_archive = PackageArchive(*pkg)
        pkg_archive.month = pkg_archive.month.replace(day=1)
        pkg_archives.append(pkg_archive)

    db_session.add_all(pkg_archives)
    db_session.commit()
Esempio n. 3
0
def get_count_item_for_last_days(trailcam_id, days=7):
    date_now = datetime.now()
    date_7_days_ago = date_now - timedelta(days=days)

    time_group = DBSession.query(extract('months', TrailcamItem.date_original).label('m'),
                                 extract('days', TrailcamItem.date_original).label('d'),
                                 sa.func.count(TrailcamItem.id)) \
        .filter(TrailcamItem.date_original <= date_now) \
        .filter(TrailcamItem.date_original >= date_7_days_ago) \
        .filter(TrailcamItem.trailcam_id == trailcam_id) \
        .group_by('d') \
        .group_by('m') \
        .order_by(sa.desc('m')) \
        .order_by(sa.desc('d')) \
        .all()

    count_by_days = dict(
        ('{0}-{1}'.format(m, d), count) for m, d, count in time_group)
    count_for_last_days = []

    for i in range(days):
        date_i_days_ago = date_now - timedelta(days=i)
        current_key = '{d.month}-{d.day}'.format(d=date_i_days_ago)
        if current_key in count_by_days:
            count_for_last_days.append(
                (date_i_days_ago.isoformat(), count_by_days[current_key]))
        else:
            count_for_last_days.append((date_i_days_ago.isoformat(), 0))

    return count_for_last_days
Esempio n. 4
0
def checkCurMonthOrders(userId):
    session = db.session()
    now = datetime.now()
    numOrders = session.query(Orders).filter_by(AccountId=userId) \
                                     .filter(extract('year', Orders.OrderDate) == now.year) \
                                     .filter(extract('month', Orders.OrderDate) == now.month).all()
    return len(numOrders)
Esempio n. 5
0
    def event_birthday_calendar(self, out, session):
        out.writerow([
            'Subject', 'Start Date', 'Start Time', 'End Date', 'End Time',
            'All Day Event', 'Description', 'Location', 'Private'])

        is_multiyear = c.EPOCH.year != c.ESCHATON.year
        is_multimonth = c.EPOCH.month != c.ESCHATON.month
        query = session.query(Attendee).filter(Attendee.birthdate != None)  # noqa: E711
        birth_month = extract('month', Attendee.birthdate)
        birth_day = extract('day', Attendee.birthdate)
        if is_multiyear:
            # The event starts in one year and ends in another
            query = query.filter(or_(
                or_(
                    birth_month > c.EPOCH.month,
                    birth_month < c.ESCHATON.month),
                and_(
                    birth_month == c.EPOCH.month,
                    birth_day >= c.EPOCH.day),
                and_(
                    birth_month == c.ESCHATON.month,
                    birth_day <= c.ESCHATON.day)))
        elif is_multimonth:
            # The event starts in one month and ends in another
            query = query.filter(or_(
                and_(
                    birth_month > c.EPOCH.month,
                    birth_month < c.ESCHATON.month),
                and_(
                    birth_month == c.EPOCH.month,
                    birth_day >= c.EPOCH.day),
                and_(
                    birth_month == c.ESCHATON.month,
                    birth_day <= c.ESCHATON.day)))
        else:
            # The event happens entirely within a single month
            query = query.filter(and_(
                birth_month == c.EPOCH.month,
                birth_day >= c.EPOCH.day,
                birth_day <= c.ESCHATON.day))

        for person in query.all():
            subject = "%s's Birthday" % person.full_name

            year_of_birthday = c.ESCHATON.year
            if is_multiyear:
                birth_month = person.birthdate.month
                birth_day = person.birthdate.day
                if birth_month >= c.EPOCH.month and birth_day >= c.EPOCH.day:
                    year_of_birthday = c.EPOCH.year

            delta_years = year_of_birthday - person.birthdate.year
            start_date = person.birthdate + relativedelta(years=delta_years)
            end_date = start_date
            all_day = True
            private = False
            out.writerow([
                subject, start_date, '', end_date, '', all_day, '', '', private
            ])
Esempio n. 6
0
def total_tickets(account, year=None, month=None):
    """ Returns the total number of tickets sold by the account. """
    tickets = Session.query(Ticket.id)
    # filter by year
    if year is not None:
        tickets = tickets.filter(extract('year', Ticket.created_at) == year)
    # filter by month
    if month is not None:
        tickets = tickets.filter(extract('month', Ticket.created_at) == month)
    tickets = tickets.join(TicketOrder,
                           Order).filter(Order.account_id == account.id)
    return tickets.count()
Esempio n. 7
0
def parse_recent_games(season, game_type,
                       daysold=None, month=None, day=None, year=None,
                       game_id=None, limit=None):
    global session, Base, Classes
    
    """
    season: the NHL season
    game_type: the NHL game type (2: regular, 3: playoffs)
    daysold: only process games within N of das
    month: all games for the month specified
    day: all games on the day of the month specified
    year: all games during the year specified
    game_id: process a specific game
    limit: only process N number of games
    """

    logger = logging.getLogger("nhlcom")

    games = Classes.games
    filters = [games.season == season, games.game_type == game_type]

    if month:
        filters.append(extract('month', games.game_date) == month)
    if day:
        filters.append(extract('day', games.game_date) == day)
    if year:
        filters.append(extract('year', games.game_date) == year)
    if daysold:
        then = datetime.today() - timedelta(days=daysold)
        filters.append(games.game_date >= then)
    if game_id:
        filters.append(games.game_id == game_id)

    query = session.query(games). \
        filter(and_(*filters)). \
        order_by(games.game_date.asc()). \
        limit(limit)
    
    count = query.count()
    
    logger.log(logging.INFO, 'parsing %s games for season %s (game type %s)' % \
        (count, season, game_type))

    for i, game in enumerate(query):
        logger.log(logging.INFO, 'parsing game %s on date %s (%s/%s)' % \
            (game.game_id, game.game_date, i+1, count))
        save_pbp(reports.PlayByPlay(game.season, game.game_id))
        save_faceoffs(reports.Faceoffs(game.season, game.game_id))
        save_toi(reports.TimeOnIce(game.season, game.game_id))
        save_roster(reports.Rosters(game.season, game.game_id))
        save_box(reports.Boxscore(game.season, game.game_id))
        save_events(reports.Events(game.season, game.game_id))
Esempio n. 8
0
def find_high_low_pos(open_record):
    search_date = open_record.timestamp.date()
    search_name = open_record.name
    highest_record_id = sql_session.query(func.max(Volatility_storage.asset_price),
                        Volatility_storage.id).filter(and_(Volatility_storage.name == open_record.name,
                        extract('day', Volatility_storage.timestamp) == search_date.day, 
                        extract('month', Volatility_storage.timestamp) == search_date.month)).one()
    # print type(highest_record_id[1]), highest_record_id[1]
    highest_record = query_records.filter(Volatility_storage.id == highest_record_id[1]).one()
    lowest_record_id = sql_session.query(func.min(Volatility_storage.asset_price), Volatility_storage.id).\
                        filter(and_(Volatility_storage.name == open_record.name,
                        extract('day', Volatility_storage.timestamp) == search_date.day, 
                        extract('month', Volatility_storage.timestamp) == search_date.month)).one()
    lowest_record = query_records.filter(Volatility_storage.id == lowest_record_id[1]).one()
    return highest_record, lowest_record
Esempio n. 9
0
    def current_priority_expression(cls, collection, last_build):
        """
        Return computed value for packages priority or None if package is not
        schedulable.

        :param: collection package's collection.
                           It should either be the Collection class object,
                           or it should be a concrete collection object if all packages
                           have the same collection. This is done as an optimization to
                           avoid adding join on collection table when the collection is
                           known.
        :param: last_build package's last complete build.
                           As with the previous argument, should be either Build class
                           object or particular last complete build object.

        :returns: SQLA expression that, when evaluated in the DB, returns the priority
        """
        # if last_build is concrete object, it may be None
        # packages with no last build should have no priority
        # (they cannot be scheduled)
        if not last_build:
            return null()

        dynamic_priority = cls.dependency_priority + cls.build_priority

        # compute time priority
        seconds = extract('EPOCH', func.clock_timestamp() - last_build.started)
        a, b = TIME_PRIORITY.inputs
        # avoid zero/negative values, when time difference too small
        log_arg = func.greatest(0.000001, seconds / 3600)
        dynamic_priority += func.greatest(a * func.log(log_arg) + b, -30)

        # dynamic priority is affected by coefficient
        dynamic_priority *= collection.priority_coefficient

        # manual and static priority are not affected by coefficient
        current_priority = cls.manual_priority + cls.static_priority + dynamic_priority

        return case(
            [
                # handle unschedulable packages
                (
                    # WHEN blocked OR untracked
                    cls.blocked | ~cls.tracked |
                    # OR has running build
                    (cls.last_complete_build_id != cls.last_build_id) |
                    # OR is unresolved
                    (cls.resolved == False) |
                    # OR resolution is not yet done
                    ((cls.resolved == None) & ~cls.skip_resolution) |
                    # OR the collection's buildroot is broken
                    (collection.latest_repo_resolved == False) |
                    # OR the collection's buildroot wasn't resolved yet
                    (collection.latest_repo_resolved == None),
                    # THEN return NULL
                    None,
                )
            ],
            # ELSE return the computed priority
            else_=current_priority)
Esempio n. 10
0
def crimes_per_day_per_year():

  crime_count_year=[]
  for year in range(2003, 2016):
    crime_count=[]
    crime_count.append(session.query(Train).filter(Train.day=='Sunday', extract('year', Train.datetime) == year).count())
    crime_count.append(session.query(Train).filter(Train.day=='Monday', extract('year', Train.datetime) == year).count())
    crime_count.append(session.query(Train).filter(Train.day=='Tuesday', extract('year', Train.datetime) == year).count())
    crime_count.append(session.query(Train).filter(Train.day=='Wednesday', extract('year', Train.datetime) == year).count())
    crime_count.append(session.query(Train).filter(Train.day=='Thursday', extract('year', Train.datetime) == year).count())
    crime_count.append(session.query(Train).filter(Train.day=='Friday', extract('year', Train.datetime) == year).count())
    crime_count.append(session.query(Train).filter(Train.day=='Saturday', extract('year', Train.datetime) == year).count())
    crime_count_year.append(crime_count)

  fig = plt.figure()
  #Sunday = 1, .... Saturday = 7
  x=range(1,8)
  # These are the colors that will be used in the plot
  color_sequence = ['#1f77b4', '#aec7e8', '#ff7f0e', '#ffbb78', '#2ca02c', \
                    '#98df8a', '#d62728', '#ff9896', '#9467bd', '#c5b0d5', \
                    '#8c564b', '#c49c94', '#e377c2', '#f7b6d2', '#7f7f7f', \
                    '#c7c7c7', '#bcbd22', '#dbdb8d', '#17becf', '#9edae5']

  i=0
  for cc in crime_count_year:
    plt.plot(x, cc, 'r--', linewidth=1, color=color_sequence[i])
    i+=1
  plt.xlabel('Day')
  plt.ylabel('Count')
  plt.title(r'Number of Crimes per day')
  # Tweak spacing to prevent clipping of ylabel
  plt.subplots_adjust(left=0.15)
  plt.show()
 def get_years_tweets_counts(self, year):
     """
     Returns tweet count for given year
     :param year: string
     :return: int
     """
     model = self.get_new_model()
     tweets_count = self.session.query(model).filter(year == extract('year', model.created_at)).count()
     return tweets_count
Esempio n. 12
0
 def get_years_tweets_counts(self, year):
     """
     Returns tweet count for given year
     :param year: string
     :return: int
     """
     model = self.get_new_model()
     tweets_count = self.session.query(model).filter(
         year == extract('year', model.created_at)).count()
     return tweets_count
Esempio n. 13
0
    def get(self, userid: Optional[int] = None, categoryid: Optional[int] = None,
            month: Optional[int] = None, year: Optional[int] = None ):
        """
        Get a list of all bills
        """
        query = Bill.query
        args = request.args
        if ("userid" in args):
            query = query.filter_by(user_id=args["userid"])
        if ("categoryid" in args):
            query = query.filter_by(category_id=args["categoryid"])

        if "month" in args and "year" not in args:
            return BadRequest("year not set")

        if ("year" in args):
            query =  query.filter(extract("year", Bill.date) == args["year"])
            if ("month" in args):
                query =  query.filter(extract("month", Bill.date) == args["month"])
        return [bil.json() for bil in query.all()]
Esempio n. 14
0
File: run.py Progetto: dngl0/py-nhl
def parse_recent_games(season, game_type,
                       daysold=None, month=None, day=None, year=None,
                       game_id=None, limit=None):
    global session, Base, Classes

    logger = logging.getLogger("nhlcom")

    games = Classes.games
    filters = [games.season == season, games.game_type == game_type]

    if month:
        filters.append(extract('month', games.game_date) == month)
    if day:
        filters.append(extract('day', games.game_date) == day)
    if year:
        filters.append(extract('year', games.game_date) == year)
    if daysold:
        then = datetime.today() - timedelta(days=daysold)
        filters.append(games.game_date >= then)
    if game_id:
        filters.append(games.game_id == game_id)

    query = session.query(games). \
        filter(and_(*filters)). \
        order_by(games.game_date.asc()). \
        limit(limit)
    
    count = query.count()
    
    logger.log(logging.INFO, 'parsing %s games for season %s (game type %s)' % \
        (count, season, game_type))

    for i, game in enumerate(query):
        logger.log(logging.INFO, 'parsing game %s on date %s (%s/%s)' % \
            (game.game_id, game.game_date, i+1, count))
        save_pbp(reports.PlayByPlay(game.season, game.game_id))
        save_faceoffs(reports.Faceoffs(game.season, game.game_id))
        save_toi(reports.TimeOnIce(game.season, game.game_id))
        save_roster(reports.Rosters(game.season, game.game_id))
        save_box(reports.Boxscore(game.season, game.game_id))
        save_events(reports.Events(game.season, game.game_id))
Esempio n. 15
0
 def in_rotation_window(cls):
     """
     Determines if a certificate is available for rotation based
     on the rotation policy associated.
     :return:
     """
     return case(
         [
             (extract('day', cls.not_after - func.now()) <= RotationPolicy.days, True)
         ],
         else_=False
     )
Esempio n. 16
0
    def add_to_query(self, query, objformatter, value=None, op_num=None, negate=False,
                     collection=None, join_cache=None):
        no_filter = op_num is None

        if self.tree_rank is None and self.get_field() is None:
            query, orm_field = objformatter.objformat(query, getattr(models, self.root_table.name), None, join_cache)
            no_filter = True
        elif self.is_relationship():
            # will be formatting or aggregating related objects
            if self.get_field().type == 'many-to-one':
                query, orm_model, table, field = self.build_join(query, self.join_path, join_cache)
                query, orm_field = objformatter.objformat(query, orm_model, None, join_cache)
            else:
                query, orm_model, table, field = self.build_join(query, self.join_path[:-1], join_cache)
                orm_field = objformatter.aggregate(query, self.get_field(), orm_model, None)
        else:
            query, orm_model, table, field = self.build_join(query, self.join_path, join_cache)

            if self.tree_rank is not None:
                query, orm_field = handle_tree_field(query, orm_model, table, self.tree_rank,
                                                     self.tree_field, collection, join_cache)
            else:
                orm_field = getattr(orm_model, self.get_field().name)

                if field.type == "java.sql.Timestamp":
                    # Only consider the date portion of timestamp fields.
                    # This is to replicate the behavior of Sp6. It might
                    # make sense to condition this on whether there is a
                    # time component in the input value.
                    orm_field = sql.func.DATE(orm_field)

                if field.is_temporal() and self.date_part != "Full Date":
                    orm_field = extract(self.date_part, orm_field)

        if not no_filter:
            if isinstance(value, QueryFieldSpec):
                _, other_field = value.add_to_query(query.reset_joinpoint(),
                                                    objformatter,
                                                    join_cache=join_cache)
                uiformatter = None
                value = other_field
            else:
                uiformatter = field and get_uiformatter(collection, table.name, field.name)
                value = value

            op = QueryOps(uiformatter).by_op_num(op_num)
            f = op(orm_field, value)
            query = query.filter(sql.not_(f) if negate else f)

        query = query.reset_joinpoint()

        return query, orm_field
Esempio n. 17
0
def fetch_records(resources, year, month):
    """ Returns the records used for the dataset. """
    if not resources.keys():
        return []

    query = Session().query(Reservation)
    query = query.filter(Reservation.resource.in_(resources.keys()))

    if year != 'all':
        query = query.filter(extract('year', Reservation.start) == int(year))

    if month != 'all':
        query = query.filter(extract('month', Reservation.start) == int(month))

    query = query.order_by(
        Reservation.resource,
        Reservation.status,
        Reservation.start,
        Reservation.email,
        Reservation.token,
    )

    return query.all()
Esempio n. 18
0
def start_game():
    today = datetime.datetime.now()
    routine = session.query(Routine).filter(
        extract('year', Routine.created_at) == today.year,
        extract('month', Routine.created_at) == today.month,
        extract('day', Routine.created_at) == today.day).first()
    msg = ""
    if not routine:
        routine = create_routine()
    is_over = False
    if today >= datetime.datetime(today.year, today.month, today.day, 22, 30):
        is_over = True

# routine = session.query(Routine).first()
    list1 = compute_takens(routine)
    is_missed_num = list1[0]
    is_taken_num = list1[1]
    return render_template('take_meds.html',
                           routine=routine,
                           today=today,
                           is_over=is_over,
                           is_missed_num=is_missed_num,
                           is_taken_num=is_taken_num)
Esempio n. 19
0
def start_game():
    today = datetime.datetime.now()
    routine = session.query(Routine).filter(extract('year', Routine.created_at) == today.year, extract('month', Routine.created_at) == today.month, extract('day', Routine.created_at) == today.day).first()
    msg = ""
    if not routine:
        routine = create_routine()
    is_over = False
    if today >= datetime.datetime(today.year, today.month, today.day, 22, 30):
    	is_over = True
   # routine = session.query(Routine).first()
    list1 = compute_takens(routine)
    is_missed_num = list1[0]
    is_taken_num = list1[1]
    return render_template('take_meds.html', routine=routine, today=today, is_over=is_over, is_missed_num = is_missed_num, is_taken_num = is_taken_num)
Esempio n. 20
0
def fetch_records(resources, year, month):
    """ Returns the records used for the dataset. """
    if not resources.keys():
        return []

    query = Session().query(Reservation)
    query = query.filter(Reservation.resource.in_(resources.keys()))

    if year != 'all':
        query = query.filter(extract('year', Reservation.start) == int(year))

    if month != 'all':
        query = query.filter(extract('month', Reservation.start) == int(month))

    query = query.order_by(
        Reservation.resource,
        Reservation.status,
        Reservation.start,
        Reservation.email,
        Reservation.token,
    )

    return query.all()
Esempio n. 21
0
def leaderboard_query(session, start_date, until_date):
    """
    This is, admittedly, a really ugly sql query. Query optimization has not
    been performed, but it shouldn't be anything more complicated than a few
    indices. Good luck.
    """
    #start_date = datetime.strptime(start_date, '%Y-%m-%d')
    #until_date = datetime.strptime(until_date_str, '%Y-%m-%d')
    subq = session\
        .query(
            Instance,
            InstanceType,
            User,
            case([(Instance.end_date != None, Instance.end_date)], else_=now()).label('stop_date'))\
        .join(Instance.user)\
        .join(Instance.type)\
        .subquery()

    uptime_column = case(
        [
            (subq.c.created_date > until_date, 0),
            (subq.c.stop_date < start_date, 0)
        ],
        else_=extract('epoch',
            func.LEAST(subq.c.stop_date, cast(until_date, DateTime)) -
            func.GREATEST(subq.c.created_date, cast(start_date, DateTime))
        )
    )

    print subq.c
    subq2 = session.query(
        subq.c.user_id,
        sum(case([(uptime_column == 0, 0)], else_=1)).label('instance_count'),
        #func.count(subq.c.instance_id).label('instance_count'),
        sum(uptime_column).label('uptime'),
        sum(uptime_column * subq.c.cpu).label('cpu_seconds')
    ).group_by(subq.c.user_id).order_by(desc('cpu_seconds')).subquery()

    q = session.query(
        subq2.c.user_id,
        subq2.c.uptime,
        subq2.c.cpu_seconds,
        subq2.c.instance_count,
        User.username,
        User.is_staff,
        User.name
    ).join(User)

    return q
Esempio n. 22
0
def _resource_consumption_stats_view():
    time_difference_expr = func.sum(KojiTask.finished - KojiTask.started)
    time_difference = extract('EPOCH', time_difference_expr)
    time_difference_all = select([time_difference]).select_from(KojiTask)
    return (select([
        Package.name,
        KojiTask.arch,
        time_difference_expr.label('time'),
        cast(time_difference / time_difference_all,
             Float).label('time_percentage'),
    ]).select_from(
        join(
            join(Package, Build, Package.id == Build.package_id),
            KojiTask,
        )).group_by(Package.name, KojiTask.arch))
Esempio n. 23
0
    def _filter(self, **terms):
        filters = []
        for k, v in terms.items():
            if not "__" in k:
                comparison = "exact"
            else:
                k, comparison = k.split("__")

            try:
                column = getattr(self.model.__table__.c, k)
            except AttributeError:
                raise KeyError("Invalid key '%s'" % k)

            lookups = {
                "exact": lambda c, v: c == v,
                "day": lambda c, v: extract('day', c) == v,
                "month": lambda c, v: extract('month', c) == v,
                "year": lambda c, v: extract('year', c) == v,
                "week_date": lambda c, v: extract('dow', c) == v,
                "in": lambda c, v: c.in_(v),
                "contains": lambda c, v: c.contains(v),
                "icontains": lambda c, v: c.ilike("%" + v + "%"),
                "startswith": lambda c, v: c.startswith(v),
                "istartswith": lambda c, v: c.ilike(v + "%"),
                "endswith": lambda c, v: c.endswith(v),
                "iendswith": lambda c, v: c.ilike("%" + v),
                "range": lambda c, v: between(c, v[0], v[1]),
                "isnull": lambda c, v: c == None if v else c != None,
                }

            if not comparison in lookups:
                raise KeyError("Invalid comparison type %s" % comparison)

            filters.append(lookups[comparison](column, v))

        return and_(*filters)
Esempio n. 24
0
    def get_basequery(self):
        basequery = super(ArchiveView, self).get_basequery()
        day_kwarg = self.get_day_kwarg()
        month_kwarg = self.get_month_kwarg()
        year_kwarg = self.get_year_kwarg()

        day = self.kwargs.get(day_kwarg) or self.request.args.get(day_kwarg)
        month = self.kwargs.get(month_kwarg) or self.request.args.get(
            month_kwarg)
        year = self.kwargs.get(year_kwarg) or self.request.args.get(year_kwarg)

        if year is None:
            raise ValueError, 'feel bad'
        basequery = basequery.filter(
            extract('year', self.model.create_time) == int(year))
        if month is None:
            return basequery
        basequery = basequery.filter(
            extract('month', self.model.create_time) == int(month))
        if day is None:
            return basequery
        basequery = basequery.filter(
            extract('day', self.model.create_time) == int(day))
        return basequery
Esempio n. 25
0
    def current_priority_expression(cls, collection, last_build):
        """
        Return computed value for packages priority or None if package is not
        schedulable.
        """
        # if last_build is concrete object, it may be None
        # packages with no last build should have no priority
        # (they cannot be scheduled)
        if not last_build:
            return null()

        dynamic_priority = cls.dependency_priority + cls.build_priority

        # compute time priority
        seconds = extract('EPOCH', func.clock_timestamp() - last_build.started)
        a, b = TIME_PRIORITY.inputs
        # avoid zero/negative values, when time difference too small
        log_arg = func.greatest(0.000001, seconds / 3600)
        dynamic_priority += func.greatest(a * func.log(log_arg) + b, -30)

        # dynamic priority is affected by coefficient
        dynamic_priority *= collection.priority_coefficient

        # manual and static priority are not affected by coefficient
        current_priority = cls.manual_priority + cls.static_priority + dynamic_priority

        return case(
            [
                # handle unschedulable packages
                (
                    # WHEN blocked OR untracked
                    cls.blocked | ~cls.tracked |
                    # OR has running build
                    (cls.last_complete_build_id != cls.last_build_id) |
                    # OR is unresolved
                    (cls.resolved == False) |
                    # OR resolution is not yet done
                    ((cls.resolved == None) & ~cls.skip_resolution) |
                    # OR the collection's buildroot is broken
                    (collection.latest_repo_resolved == False) |
                    # OR the collection's buildroot wasn't resolved yet
                    (collection.latest_repo_resolved == None),
                    # THEN return NULL
                    None,
                )
            ],
            # ELSE return the computed priority
            else_=current_priority)
    def get_tweets_for_year(self, year):
        """
        Returns tweet created at given year
        :param year: string, year
        :return: list, tweets matching criteria
        """

        model = self.get_new_model()
        if not 'ALL' == year:
            tweets = self.session.query(model).filter(
                year == extract('year', model.created_at)).order_by(
                model.created_at).limit(ALE_EACH_YEAR_TWEET_LIMIT_COUNT).all()
        else:
            tweets = self.session.query(model).order_by(model.created_at).all()

        print("Retrieved " + str(len(tweets)) + " rows from database for year:" + str(year) + ".")
        return tweets
Esempio n. 27
0
    def sanitize_build_year(cls) -> sase.Update:
        """
        Query to update build_year with the year in firstuse if build_year is lower than 1940 and higher than 2020.
        A quick scna of the data showed that 1940 is approximately the lowest build_year found that looks reasonable
        compared with firstuse. Somewhere it showed that the data files were created in 2018, therefore 2020 is a bit
        optimistic.
        All "years" that fall outside of this range are overwritten with the year of firstuse. If firstuse has a
        diverging year that is not further remedied because there is not anything to quickly test or check against.

        :return: A sql statement to update the build_year column with the firstuse year
        """

        stmt = sase.update(cls).prefix_with("IGNORE").where(sase.or_(
            sase.cast(cls.build_year, Integer) < constants.MIN_YEAR,
            sase.cast(cls.build_year, Integer) > constants.MAX_YEAR)
        ).values(
            build_year=sase.cast(sase.extract('year', sase.cast(cls.firstuse, Date)), String)
        )
        return stmt
Esempio n. 28
0
def _resource_consumption_stats_view():
    time_difference_expr = func.sum(KojiTask.finished - KojiTask.started)
    time_difference = extract('EPOCH', time_difference_expr)
    time_difference_all = select([time_difference]).select_from(KojiTask)
    return (
        select([
            Package.name,
            KojiTask.arch,
            time_difference_expr.label('time'),
            cast(time_difference / time_difference_all, Float).label('time_percentage'),
        ])
        .select_from(
            join(
                join(Package, Build, Package.id == Build.package_id),
                KojiTask,
            )
        )
        .group_by(Package.name, KojiTask.arch)
    )
Esempio n. 29
0
    def get_tweets_for_year(self, year):
        """
        Returns tweet created at given year
        :param year: string, year
        :return: list, tweets matching criteria
        """

        model = self.get_new_model()
        if not 'ALL' == year:
            tweets = self.session.query(model).filter(
                year == extract('year', model.created_at)).order_by(
                    model.created_at).limit(
                        ALE_EACH_YEAR_TWEET_LIMIT_COUNT).all()
        else:
            tweets = self.session.query(model).order_by(model.created_at).all()

        print("Retrieved " + str(len(tweets)) +
              " rows from database for year:" + str(year) + ".")
        return tweets
    def period_expression(self, period: PeriodType, column: any) -> any:
        """Builds a period expression from a provided period type and for a given column.

        :param period: the period type
        :param column: the column
        :return: the period expression
        """
        if period == PeriodType.DAY:
            return cast(extract('year', column), String) \
                   + '-' \
                   + self.__entity_manager.query_adapter.pad(extract('month', column), '00') \
                   + '-' \
                   + self.__entity_manager.query_adapter.pad(extract('day', column), '00')
        if period == PeriodType.MONTH:
            return cast(extract('year', column), String) \
                   + '-' \
                   + self.__entity_manager.query_adapter.pad(extract('month', column), '00')
        if period == PeriodType.QUARTER:
            return cast(extract('year', column), String) \
                   + '-Q' \
                   + cast(cast(cast(extract('month', TransactionDbo.date_value) - 1, Integer) / 3 + 1, Integer), String)
        if period == PeriodType.YEAR:
            return cast(extract('year', column), String)
Esempio n. 31
0
def crime_by_time(category):
  counts=[]
  for hour in range(0, 24):
    counts.append(session.query(Train.datetime).filter(Train.category == category, extract('hour', Train.datetime) == hour).count())

  fig = plt.figure()
  #Sunday = 1, .... Saturday = 7
  x=range(0,24)

  # These are the colors that will be used in the plot
  color_sequence = ['#1f77b4', '#aec7e8', '#ff7f0e', '#ffbb78', '#2ca02c', \
                    '#98df8a', '#d62728', '#ff9896', '#9467bd', '#c5b0d5', \
                    '#8c564b', '#c49c94', '#e377c2', '#f7b6d2', '#7f7f7f', \
                    '#c7c7c7', '#bcbd22', '#dbdb8d', '#17becf', '#9edae5',
                    '#98df8a', '#d62728', '#ff9896', '#9467bd']

  i=0
  plt.plot(x, counts, 'ro', linewidth=1, color=color_sequence[i])
  plt.xlabel('Hour')
  plt.ylabel('Count')
  plt.title(r'Number of Crimes per Hour')
  # Tweak spacing to prevent clipping of ylabel
  plt.subplots_adjust(left=0.15)
  plt.show()
Esempio n. 32
0
 def get_events(self, country, year):
     events = []
     for event in self.session.query(Event.longitud,Event.latitud, Event.event_id_cnty,Event.fatalities,Event.notes).filter(extract('year', Event.event_date) == year).filter(Event.country_id == country):
         feature = {}
         feature["type"] ="Feature"
         feature["geometry"] = {"type": "Point", "coordinates": [event.longitud,event.latitud]}
         feature["properties"] = {"title": event.event_id_cnty,
                                  "description": "%s. Fatalities: %s" % (event.notes, event.fatalities),
                                  "marker-symbol": "hospital",
                                  "marker-color": "#ff4136"}
         events.append(feature)
     return events
Esempio n. 33
0
 def func_extract(self, property, tree):
     return expression.extract(property.field, self.get_selectable(property.property, tree))
Esempio n. 34
0
 def get_events_center(self, country, year):
     centers = []
     for center in self.session.query(func.avg(Event.longitud).label('longitud'),func.avg(Event.latitud).label('latitud')).filter(extract('year', Event.event_date) == year).filter(Event.country_id == country):
         centers.append(center.longitud)
         centers.append(center.latitud)
     if len(centers) == 0:
         return [23.992075732837595,4.755856763705008]
     return centers
Esempio n. 35
0
def insert_headlines(session: Session, dir_nikkei_headline: Path,
                     train_span: Span, valid_span: Span, test_span: Span,
                     logger: Logger) -> None:

    dests = list(dir_nikkei_headline.glob('*.csv.gz')) + list(
        dir_nikkei_headline.glob('*.csv'))
    for dest in dests:
        with gzip.open(str(dest),
                       mode='rt') if dest.suffix == '.gz' else dest.open(
                           mode='r') as f:

            N = sum(1 for _ in f) - 1
            f.seek(0)
            reader = csv.reader(f, delimiter=',', quoting=csv.QUOTE_ALL)
            next(reader)
            fields = next(reader)
            t = fields[1]
            if 'Z' not in t or '+' not in t:
                t = t + '+0000'
            t = datetime.strptime(t, NIKKEI_DATETIME_FORMAT).astimezone(JST)
            first = session \
                .query(Headline) \
                .filter(extract('year', in_jst(Headline.t)) == t.year) \
                .first()
            if first is not None:
                return

            logger.info('start {}'.format(f.name))

            f.seek(0)
            next(reader)
            headlines = []
            for _ in tqdm(range(N)):
                fields = next(reader)
                t = fields[1]
                if 'Z' not in t or '+' not in t:
                    t = t + '+0000'
                article_id = fields[5]
                headline = fields[6]
                isins = None if fields[25] == '' else fields[25].split(':')
                countries = None if fields[36] == '' else fields[36].split(':')
                categories = None if fields[37] == '' else fields[37].split(
                    ':')
                keywords_headline = None if fields[-2] == '' else fields[
                    -2].split(':')
                keywords_article = None if fields[-1] == '' else fields[
                    -1].split(':')
                try:
                    t = datetime.strptime(t, NIKKEI_DATETIME_FORMAT)
                except ValueError:
                    message = 'ValueError: {}, {}, {}'
                    logger.info(message.format(t, article_id, headline))
                    continue

                if train_span.start <= t and t < train_span.end:
                    phase = Phase.Train.value
                elif valid_span.start <= t and t < valid_span.end:
                    phase = Phase.Valid.value
                elif test_span.start <= t and t < test_span.end:
                    phase = Phase.Test.value
                else:
                    phase = None

                headlines.append({
                    'article_id': article_id,
                    't': t,
                    'headline': headline,
                    'isins': isins,
                    'countries': countries,
                    'categories': categories,
                    'keywords_headline': keywords_headline,
                    'keywords_article': keywords_article,
                    'is_used': None,
                    'phase': phase
                })

            session.execute(Headline.__table__.insert(), headlines)
            session.commit()
Esempio n. 36
0
acct_balance = tdAccounts.starting_balance + tdAccounts.ending_balance
acct = tdAccounts.assign(
    ck_acct=case([(tdAccounts.acct_type == "CK", 1)], else_=0),
    sv_acct=case([(tdAccounts.acct_type == "SV", 1)], else_=0),
    cc_acct=case([(tdAccounts.acct_type == "CC", 1)], else_=0),
    ck_bal=case([(tdAccounts.acct_type == "CK", acct_balance.expression)],
                else_=0),
    sv_bal=case([(tdAccounts.acct_type == "SV", acct_balance.expression)],
                else_=0),
    cc_bal=case([(tdAccounts.acct_type == "CC", acct_balance.expression)],
                else_=0))
acct.to_pandas().head(10)

# Next: Get the transaction information required for the aggregation. Pull out
# the quarter the transaction was made.
acct_mon = extract('month', tdTransactions.tran_date.expression).expression
trans = tdTransactions.assign(
    q1_trans=case([(acct_mon == "1", 1), (acct_mon == "2", 1),
                   (acct_mon == "3", 1)],
                  else_=0),
    q2_trans=case([(acct_mon == "4", 1), (acct_mon == "5", 1),
                   (acct_mon == "6", 1)],
                  else_=0),
    q3_trans=case([(acct_mon == "7", 1), (acct_mon == "8", 1),
                   (acct_mon == "9", 1)],
                  else_=0),
    q4_trans=case([(acct_mon == "10", 1), (acct_mon == "11", 1),
                   (acct_mon == "12", 1)],
                  else_=0),
)
trans.to_pandas().head(10)
Esempio n. 37
0
 def year(cls):
     return extract('year', cls.date)
Esempio n. 38
0
 def on_get(self, req, resp, year, month):
     schedules = sess.query(ms.Schedule).filter(extract("year", ms.Schedule.date) == year,
                                                extract("month", ms.Schedule.date) == month)
     resp.status = falcon.HTTP_200
     resp.body = json.dumps([schedule.to_dict() for schedule in schedules])
Esempio n. 39
0
def get_periodic_stats_quarter_hour(session, start, end):
    quarter_hour_step = func.date_trunc(literal('hour'), StatCallOnQueue.time) + \
        (cast(extract('minute', StatCallOnQueue.time), Integer) / 15) * timedelta(minutes=15)
    return _get_periodic_stat_by_step(session, start, end, quarter_hour_step)
Esempio n. 40
0
    def get_restaurant_people(restaurant_id: int):
        """
        Given the id of the restaurant return the number of people at lunch and dinner
        """
        openings = (
            db.session.query(OpeningHours)
            .filter(
                OpeningHours.week_day == datetime.today().weekday(),
                OpeningHours.restaurant_id == restaurant_id,
            )
            .first()
        )
        if openings is None:
            return [0, 0, 0]

        tables = (
            db.session.query(RestaurantTable)
            .filter_by(restaurant_id=restaurant_id)
            .all()
        )
        tables_id = []
        for table in tables:
            tables_id.append(table.id)

        reservations_l = (
            db.session.query(Reservation)
            .filter(
                Reservation.table_id.in_(tables_id),
                extract("day", Reservation.reservation_date)
                == extract("day", datetime.today()),
                extract("month", Reservation.reservation_date)
                == extract("month", datetime.today()),
                extract("year", Reservation.reservation_date)
                == extract("year", datetime.today()),
                extract("hour", Reservation.reservation_date)
                >= extract("hour", openings.open_lunch),
                extract("hour", Reservation.reservation_date)
                <= extract("hour", openings.close_lunch),
            )
            .all()
        )

        reservations_d = (
            db.session.query(Reservation)
            .filter(
                Reservation.table_id.in_(tables_id),
                extract("day", Reservation.reservation_date)
                == extract("day", datetime.today()),
                extract("month", Reservation.reservation_date)
                == extract("month", datetime.today()),
                extract("year", Reservation.reservation_date)
                == extract("year", datetime.today()),
                extract("hour", Reservation.reservation_date)
                >= extract("hour", openings.open_dinner),
                extract("hour", Reservation.reservation_date)
                <= extract("hour", openings.close_dinner),
            )
            .all()
        )

        reservations_now = (
            db.session.query(Reservation)
            .filter(
                Reservation.checkin is True,
                Reservation.reservation_date <= datetime.now(),
                Reservation.reservation_end >= datetime.now(),
            )
            .all()
        )

        return [len(reservations_l), len(reservations_d), len(reservations_now)]
Esempio n. 41
0
def get_educational_year_beginning_at_given_year(year: int) -> EducationalYear:
    educational_year = EducationalYear.query.filter(
        extract("year", EducationalYear.beginningDate) == year).one_or_none()
    if educational_year is None:
        raise EducationalYearNotFound()
    return educational_year
Esempio n. 42
0
    def add_to_query(self,
                     query,
                     objformatter,
                     value=None,
                     op_num=None,
                     negate=False,
                     collection=None,
                     join_cache=None):
        no_filter = op_num is None

        if self.tree_rank is None and self.get_field() is None:
            query, orm_field = objformatter.objformat(
                query, getattr(models, self.root_table.name), None, join_cache)
            no_filter = True
        elif self.is_relationship():
            # will be formatting or aggregating related objects
            if self.get_field().type == 'many-to-one':
                query, orm_model, table, field = self.build_join(
                    query, self.join_path, join_cache)
                query, orm_field = objformatter.objformat(
                    query, orm_model, None, join_cache)
            else:
                query, orm_model, table, field = self.build_join(
                    query, self.join_path[:-1], join_cache)
                orm_field = objformatter.aggregate(query, self.get_field(),
                                                   orm_model, None)
        else:
            query, orm_model, table, field = self.build_join(
                query, self.join_path, join_cache)

            if self.tree_rank is not None:
                query, orm_field = handle_tree_field(query, orm_model, table,
                                                     self.tree_rank,
                                                     self.tree_field,
                                                     collection, join_cache)
            else:
                orm_field = getattr(orm_model, self.get_field().name)

                if field.type == "java.sql.Timestamp":
                    # Only consider the date portion of timestamp fields.
                    # This is to replicate the behavior of Sp6. It might
                    # make sense to condition this on whether there is a
                    # time component in the input value.
                    orm_field = sql.func.DATE(orm_field)

                if field.is_temporal() and self.date_part != "Full Date":
                    orm_field = extract(self.date_part, orm_field)

        if not no_filter:
            if isinstance(value, QueryFieldSpec):
                _, other_field = value.add_to_query(query.reset_joinpoint(),
                                                    objformatter,
                                                    join_cache=join_cache)
                uiformatter = None
                value = other_field
            else:
                uiformatter = field and get_uiformatter(
                    collection, table.name, field.name)
                value = value

            op = QueryOps(uiformatter).by_op_num(op_num)
            f = op(orm_field, value)
            query = query.filter(sql.not_(f) if negate else f)

        query = query.reset_joinpoint()

        return query, orm_field
Esempio n. 43
0
    def current_priority_expression(cls, collection, last_build):
        """
        Return computed value for packages priority or None if package is not
        schedulable.

        :param: collection package's collection.
                           It should either be the Collection class object,
                           or it should be a concrete collection object if all packages
                           have the same collection. This is done as an optimization to
                           avoid adding join on collection table when the collection is
                           known.
        :param: last_build package's last complete build.
                           As with the previous argument, should be either Build class
                           object or particular last complete build object.

        :returns: SQLA expression that, when evaluated in the DB, returns the priority
        """
        # if last_build is concrete object, it may be None
        # packages with no last build should have no priority
        # (they cannot be scheduled)
        if not last_build:
            return null()

        dynamic_priority = cls.dependency_priority + cls.build_priority

        # compute time priority
        seconds = extract('EPOCH', func.clock_timestamp() - last_build.started)
        a, b = TIME_PRIORITY.inputs
        # avoid zero/negative values, when time difference too small
        log_arg = func.greatest(0.000001, seconds / 3600)
        dynamic_priority += func.greatest(a * func.log(log_arg) + b, -30)

        # dynamic priority is affected by coefficient
        dynamic_priority *= collection.priority_coefficient

        # manual and static priority are not affected by coefficient
        current_priority = cls.manual_priority + cls.static_priority + dynamic_priority

        return case(
            [
                # handle unschedulable packages
                (
                    # WHEN blocked OR untracked
                    cls.blocked | ~cls.tracked |
                    # OR has running build
                    (cls.last_complete_build_id != cls.last_build_id) |
                    # OR is unresolved
                    (cls.resolved == False) |
                    # OR resolution is not yet done
                    ((cls.resolved == None) & ~cls.skip_resolution) |
                    # OR the collection's buildroot is broken
                    (collection.latest_repo_resolved == False) |
                    # OR the collection's buildroot wasn't resolved yet
                    (collection.latest_repo_resolved == None),
                    # THEN return NULL
                    None,
                )
            ],
            # ELSE return the computed priority
            else_=current_priority
        )
                                                   tdAccounts.acct_type.expression == "CC", acct_balance.expression
                                               )
                                           ], else_=0).expression.label("cc_bal")
                                       ]
acct = DataFrame.from_query(str(select(acct_select_query_column_projection).compile(compile_kwargs={"literal_binds": True})))
# Using to_pandas() for a cleaner display format:
acct.to_pandas().head(10)

# Next: Get the transaction information required for the aggregation. Pull out
# the quarter the transaction was made.
trans_select_query_column_projection = [tdTransactions.acct_nbr.expression,
                                        tdTransactions.principal_amt.expression,
                                        tdTransactions.interest_amt.expression,
                                        tdTransactions.tran_id.expression,
                                        tdTransactions.tran_date.expression,
                                        extract('month', tdTransactions.tran_date.expression).expression.label("acct_mon"),
                                        case_when(
                                            [
                                                (
                                                    or_(
                                                        text("acct_mon = '1'"), text("acct_mon = '2'"), text("acct_mon = '3'"),
                                                    ), 1
                                                )
                                            ], else_=0).expression.label("q1_trans"),
                                        case_when(
                                            [
                                                (
                                                    or_(
                                                        text("acct_mon = '4'"), text("acct_mon = '5'"), text("acct_mon = '6'"),
                                                    ), 1
                                                )
Esempio n. 45
0
def inbox():
    is_information = request.args.get("is_information")
    #drop group_id; not used in IZ*ONE PM
    is_star = request.args.get("is_star", "0")
    is_unread = request.args.get("is_unread", "0")
    q = request.args.get("q", "")
    member_id = request.args.get("member_id", "0")  # 0 for everyone
    user = get_user()
    page = request.args.get("page", "0")

    try:
        page = int(page)
        if page < 1:
            page = 1
    except ValueError:
        page = 1
    try:
        member_id = int(member_id)
        if member_id < 0 or 13 < member_id:
            member_id = 0
    except ValueError:
        member_id = 0

    mails = []

    # Search
    query = user.mails

    if member_id == 0:
        query = query.filter(Mail.member_id < 13)
    else:
        query = query.filter(Mail.member_id == member_id)

    if q != "":
        search_query = parse_search_query(q)
        if "begin" in search_query:
            query = query.filter(Mail.datetime >= search_query["begin"])
        if "end" in search_query:
            query = query.filter(Mail.datetime <= search_query["end"])
        if "q" in search_query:
            query = query.filter(
                or_(Mail.subject.contains(search_query["q"]),
                    Mail.content.contains(search_query["q"])))

        if "today" in search_query:
            today = datetime.now()
            query = query.filter(
                and_(
                    extract("month", Mail.datetime) == today.month,
                    extract("day", Mail.datetime) == today.day))
        if "reverse" in search_query and search_query["reverse"] == True:
            query = query.order_by(Mail.id.desc())
    else:
        pasttoday = user.get_config("pasttoday")
        pasttoday = pasttoday.value if pasttoday else "0"
        if pasttoday == "1":
            today = datetime.now()
            query2 = query.filter(
                and_(
                    extract("month", Mail.datetime) == today.month,
                    extract("day", Mail.datetime) == today.day))
            mails = query2.all()

    mails += query.all()

    if is_star != "0" and is_star != "false":
        mails = [m for m in mails if user.is_star(m.id)]
    if is_unread != "0" and is_star != "false":
        mails = [m for m in mails if not user.is_read(m.id)]

    total = len(mails)
    mails = mails[(page - 1) * 20:page * 20]

    result_obj = {
        "mail_count": len(mails),
        "page": page,
        "has_next_page": page * 20 < total,
        "unread_count": user.m_unreads[member_id] if member_id < 13 else 0,
        "star_count": user.m_stars[member_id] if member_id < 13 else 0,
        "mails": generate_mails(mails)
    }

    randompm = user.get_config("randompm")
    randompm = randompm.value if randompm else "0"
    if member_id == 0 and randompm == "1" and q == "" and (
            is_star == "0"
            or is_star == "false") and (is_unread == "0"
                                        or is_unread == "false") and page == 1:
        result_obj["mail_count"] += 1
        result_obj["mails"].insert(0, RANDOM_MAIL)

    return generate_json(result_obj)
Esempio n. 46
0
    def create_date_filter(self, column):
        if self.maybe_year:
            return extract('year', column) == int(self.term)

        if self.as_date:
            return column == self.as_date
Esempio n. 47
0
 def aligned_notification_time(cls, date):
     hour = extract('hour', cls.notification_time)
     minute = extract('minute', cls.notification_time)
     add_hours = func.printf('+%d hours', hour)
     add_minutes = func.printf('+%d minutes', minute)
     return func.datetime(date, add_hours, add_minutes)
Esempio n. 48
0
def fetch_monthly_template_usage_for_service(start_date, end_date, service_id):
    # services_dao.replaces dao_fetch_monthly_historical_usage_by_template_for_service
    stats = db.session.query(
        FactNotificationStatus.template_id.label('template_id'),
        Template.name.label('name'),
        Template.template_type.label('template_type'),
        Template.is_precompiled_letter.label('is_precompiled_letter'),
        extract('month', FactNotificationStatus.bst_date).label('month'),
        extract('year', FactNotificationStatus.bst_date).label('year'),
        func.sum(
            FactNotificationStatus.notification_count).label('count')).join(
                Template,
                FactNotificationStatus.template_id == Template.id).filter(
                    FactNotificationStatus.service_id == service_id,
                    FactNotificationStatus.bst_date >= start_date,
                    FactNotificationStatus.bst_date <= end_date,
                    FactNotificationStatus.key_type != KEY_TYPE_TEST,
                    FactNotificationStatus.notification_status !=
                    NOTIFICATION_CANCELLED,
                ).group_by(
                    FactNotificationStatus.template_id,
                    Template.name,
                    Template.template_type,
                    Template.is_precompiled_letter,
                    extract('month',
                            FactNotificationStatus.bst_date).label('month'),
                    extract('year',
                            FactNotificationStatus.bst_date).label('year'),
                ).order_by(extract('year', FactNotificationStatus.bst_date),
                           extract('month', FactNotificationStatus.bst_date),
                           Template.name)

    if start_date <= datetime.utcnow() <= end_date:
        today = get_london_midnight_in_utc(datetime.utcnow())
        month = get_london_month_from_utc_column(Notification.created_at)

        stats_for_today = db.session.query(
            Notification.template_id.label('template_id'),
            Template.name.label('name'),
            Template.template_type.label('template_type'),
            Template.is_precompiled_letter.label('is_precompiled_letter'),
            extract('month', month).label('month'),
            extract('year', month).label('year'),
            func.count().label('count')).join(
                Template,
                Notification.template_id == Template.id,
            ).filter(Notification.created_at >= today,
                     Notification.service_id == service_id,
                     Notification.key_type != KEY_TYPE_TEST,
                     Notification.status != NOTIFICATION_CANCELLED).group_by(
                         Notification.template_id, Template.hidden,
                         Template.name, Template.template_type, month)

        all_stats_table = stats.union_all(stats_for_today).subquery()
        query = db.session.query(
            all_stats_table.c.template_id,
            all_stats_table.c.name,
            all_stats_table.c.is_precompiled_letter,
            all_stats_table.c.template_type,
            func.cast(all_stats_table.c.month, Integer).label('month'),
            func.cast(all_stats_table.c.year, Integer).label('year'),
            func.cast(func.sum(all_stats_table.c.count),
                      Integer).label('count'),
        ).group_by(
            all_stats_table.c.template_id,
            all_stats_table.c.name,
            all_stats_table.c.is_precompiled_letter,
            all_stats_table.c.template_type,
            all_stats_table.c.month,
            all_stats_table.c.year,
        ).order_by(all_stats_table.c.year, all_stats_table.c.month,
                   all_stats_table.c.name)
    else:
        query = stats
    return query.all()
Esempio n. 49
0
    parser = ArgumentParser()
    parser.add_argument("-y", "--year", help="Year to process data for.")
    parser.add_argument("-m", "--month", help="Month to process data for.")
    parser.add_argument("-a",
                        "--account_id",
                        help="Account ID to process data for.")
    args = parser.parse_args()

    Session = sessionmaker(bind=get_db_engine())
    session = Session()

    print(
        f"Clearing out data - account: {args.account_id}, year: {args.year}, month: {args.month}"
    )
    session.query(Transaction).\
        filter(extract('year', Transaction.ds) == args.year).\
        filter(extract('month', Transaction.ds) == args.month).\
        filter(Transaction.account_id == args.account_id).\
        delete(synchronize_session=False)

    bank = re.search(r'(.*?)_(.*?)_(.*?)', args.account_id).group(1)

    print(f"Loading: {args.account_id}")
    csv_handler = get_csv_handler(bank=bank)(account_id=args.account_id,
                                             data_dir=os.path.join(
                                                 SPENDY_DATA, args.year,
                                                 args.month))

    for transaction in csv_handler.process_csv():
        session.add(transaction)
Esempio n. 50
0
def xtract(label, expr):
    return extract('EPOCH', expr).label(label)
Esempio n. 51
0
def get_contacts_by_birthday_day(address_book_id, day):
    return ResponseUtil.parse_collection(
        Session().query(Contact).join(AddressBook).filter(
            AddressBook.id == address_book_id).filter(
                extract('day', Contact.birthday) == day).all())