Exemple #1
0
def detail(year, month, day, slug):
    year = int(year)
    month = int(month)
    day = int(day)
    with session_context() as session:
        post = session.query(Post).options(
            subqueryload(Post.tags)).filter(Post.slug == slug).filter(
                extract('year', Post.create_at) == year,
                extract('month', Post.create_at) == month,
                extract('day', Post.create_at) == day)
        try:
            if post.count() < 1:
                redirect('/blog')
        except:
            print "error occurred"
            redirect('/blog')
        post = post.first()
        post.view_count += 1
        # get related posts
        # dont forget to filter self
        tag_ids = [_t.id for _t in post.tags]
        related_posts = session.query(Post).filter(
            Post.tags.any(Tag.id.in_(tag_ids))
        ).filter(
            Post.id != post.id
        ).order_by(Post.view_count.desc())

        session.commit()
    return {'post': post, 'title': post.title, 'related_posts': related_posts}
Exemple #2
0
    def get_committee(self, args, page_num, per_page, committee_id, candidate_id):

        if committee_id is not None:
            committees = CommitteeDetail.query
            committees = committees.filter_by(**{'committee_id': committee_id})

        if candidate_id is not None:
            committees = CommitteeDetail.query.join(CandidateCommitteeLink).filter(CandidateCommitteeLink.candidate_id==candidate_id)

        for argname in ['designation', 'organization_type', 'committee_type']:
            if args.get(argname):
                if ',' in args[argname]:
                    committees = committees.filter(getattr(Committee, argname).in_(args[argname].split(',')))
                else:
                    committees = committees.filter(getattr(Committee, argname)==args[argname])

        # default year filtering
        if args.get('year') is None:
            earliest_year = int(sorted(default_year().split(','))[0])
            # still going or expired after the earliest year we are looking for
            committees = committees.filter(or_(extract('year', CommitteeDetail.expire_date) >= earliest_year, CommitteeDetail.expire_date == None))

        # Should this handle a list of years to make it consistent with /candidate ?
        elif args.get('year') and args['year'] != '*':
            # before expiration
            committees = committees.filter(or_(extract('year', CommitteeDetail.expire_date) >= int(args['year']), CommitteeDetail.expire_date == None))
            # after origination
            committees = committees.filter(extract('year', CommitteeDetail.original_registration_date) <= int(args['year']))

        count = committees.count()

        return count, committees.order_by(CommitteeDetail.name).paginate(page_num, per_page, False).items
Exemple #3
0
def agg_week(crime, year):
    week_days = {
        0: 'Sun',
        1: 'Mon',
        2: 'Tue',
        3: 'Wed',
        4: 'Thu',
        5: 'Fri',
        6: 'Sat'
    }

    data = Crimes.query.with_entities(
        extract('dow', Crimes.datetime).label('day'),
        func.count(Crimes.cat)
        ).filter(Crimes.cat == crime
        ).filter(extract('year', Crimes.datetime) == year
        ).group_by('day'
        ).order_by('day'
        ).all()

    return jsonify({
        'crime': crime,
        'aggregates': [ {'day': week_days[day], 'occurrences': occurences}
            for day, occurences in data ]
    })
Exemple #4
0
def get_model_changes(
    entity_type, year=None, month=None, day=None, hour=None, since=None
):
    # type: (Text, int, int, int, int, datetime) -> Query
    """Get models modified at the given date with the Audit service.

    :param entity_type: string like "extranet_medicen.apps.crm.models.Compte".
      Beware the typo, there won't be a warning message.
    :param since: datetime
    :param year: int
    :param month: int
    :param day: int
    :param hour: int

    :returns: a query object
    """
    query = AuditEntry.query

    if since:
        query = query.filter(AuditEntry.happened_at >= since)

    if year:
        query = query.filter(extract("year", AuditEntry.happened_at) == year)
    if month:
        query = query.filter(extract("month", AuditEntry.happened_at) == month)
    if day:
        query = query.filter(extract("day", AuditEntry.happened_at) == day)
    if hour:
        query = query.filter(extract("hour", AuditEntry.happened_at) == hour)

    query = query.filter(AuditEntry.entity_type.like(entity_type)).order_by(
        AuditEntry.happened_at
    )

    return query
Exemple #5
0
 def getPostByMonth(self, year, month):
     posts = web.ctx.orm.query(Post).\
             filter(and_(Post.content_type=='post', Post.status=='publish')).\
             filter(extract('year', Post.created)==int(year)).\
             filter(extract('month', Post.created)==int(month)).\
             order_by('posts.created DESC')
     return posts
Exemple #6
0
def send_profile_visitor_email(**kwargs):
    template = email_template_env.get_template('profile_visitors.html')
    senders = AccountUser.query.filter(
        extract('dow', AccountUser.date_added) == extract('dow', func.now()),
        extract('hour', AccountUser.date_added) == extract('hour', func.now()),
    ).join(
        AccountUserVisit,
        (AccountUserVisit.profile_user_id == AccountUser.id) &
        (AccountUserVisit.notified == False)
    )

    for sender in senders:
        visitors = AccountUserVisit.get_all_visits_in_last_7_days(sender, 5)

        visitors = [AccountUserVisit.visit_item(*v) for v in visitors.all()]
        if visitors:
            body = template.render(
                sender=sender,
                visitors=visitors,
                **kwargs
            )
            send_email(sender.username, body)

            try:
                db.session.query(AccountUserVisit).filter_by(profile_user_id=sender.id).update({"notified": True})
                db.session.commit()
            except:
                db.session.rollback()
                raise
Exemple #7
0
    def get_booked(self, date: datetime, is_after: bool, is_now: bool = False):
        order_func = None
        compare_op = None
        if is_after:
            compare_op = operator.ge
            if not is_now:
                date += timedelta(days=1)
            order_func = stub_func
        else:
            compare_op = operator.lt
            order_func = desc

        date = date.replace(hour=0, minute=0, second=0, microsecond=0)
        start_date = dataentities.BookedRange.start_date
        next_date = session.query(dataentities.BookedRange) \
            .filter(dataentities.BookedRange.start_date > datetime.now()) \
            .filter(compare_op(dataentities.BookedRange.start_date, date)) \
            .order_by(order_func(dataentities.BookedRange.start_date)) \
            .limit(1).first()

        if not next_date:
            return None

        return session.query(dataentities.BookedRange) \
            .filter(extract('day', start_date) == next_date.start_date.day) \
            .filter(extract('month', start_date) == next_date.start_date.month) \
            .filter(extract('year', start_date) == next_date.start_date.year) \
            .order_by(dataentities.BookedRange.start_date).all()
Exemple #8
0
    def get_candidate(self, args, page_num, per_page, candidate_id, committee_id):
        if candidate_id is not None:
            candidates = CandidateDetail.query
            candidates = candidates.filter_by(**{'candidate_id': candidate_id})

        if committee_id is not None:
            candidates = CandidateDetail.query.join(CandidateCommitteeLink).filter(CandidateCommitteeLink.committee_id==committee_id)

        for argname in ['candidate_id', 'candidate_status', 'district', 'incumbent_challenge', 'office', 'party', 'state']:
            if args.get(argname):
                # this is not working and doesn't look like it would work for _short
                if ',' in args[argname]:
                    candidates = candidates.filter(getattr(CandidateDetail, argname).in_(args[argname].split(',')))
                else:
                    candidates = candidates.filter_by(**{argname: args[argname]})

        if args.get('year') and args['year'] != '*':
            # before expiration
            candidates = candidates.filter(or_(extract('year', CandidateDetail.expire_date) >= int(args['year']), CandidateDetail.expire_date == None))
            # after origination
            candidates = candidates.filter(extract('year', CandidateDetail.load_date) <= int(args['year']))

        count = candidates.count()

        return count, candidates.order_by(CandidateDetail.expire_date.desc()).paginate(page_num, per_page, False).items
Exemple #9
0
def write_states_dataset(fobj, source, slug,
                         start_year=None, end_year=None,
                         adjusted=True, delta=False):
    """
    Writes a geographic csv of the series to the open file-like object passed
    in as `fobj`. Each row is an individual state, and each column is the
    period for the series. You may also specify seasonally adjusted with the
    `adjusted` boolean.

    The source can be either "LAUS" or "CESSM". The slug should be the URL-safe
    slug that groups similar datasets by state/category.

    This will write in place to the fobj that is passed to it.
    """

    fields = ["fips", "State"]

    start_year = start_year or int(elmr.app.config['STARTYEAR'])
    end_year   = end_year or int(elmr.app.config['ENDYEAR'])

    ## TODO: Somehow get this from the database, not hardcoded logic.
    for year in xrange(start_year, end_year + 1):
        for month in xrange(1, 13):
            if year == 2015 and month > 3:
                break
            fields.append(date(year, month, 1).strftime("%b %Y"))

    # Create the CSV writer
    writer = csv.DictWriter(fobj, fieldnames=fields)
    writer.writeheader()

    # Create the database query - note, there is no checking
    for state in USAState.query.order_by('name'):
        ss = state.series.filter_by(adjusted=adjusted, source=source, slug=slug)

        ss = ss.first()  # TODO: Check to make sure this returns a single result
        if ss is None:
            ss = state.series.filter_by(source=source, slug=slug).first()

        series = ss.series.delta if delta else ss.series

        if ss is None:
            continue
        # TODO: above was just a temporary fix

        row = {
            "fips": state.fips,
            "State": state.name,
        }

        field   = SeriesRecord.period
        records = series.records

        records = records.filter(extract('year', field) >= start_year)
        records = records.filter(extract('year', field) <= end_year)
        for record in records:
            row[record.period.strftime("%b %Y")] = record.value

        writer.writerow(row)
Exemple #10
0
def view_post(year, month, day, slug):
    post = Post.query.filter(
        (extract('year', Post.pub_date) == year) &
        (extract('month', Post.pub_date) == month) &
        (extract('day', Post.pub_date) == day) &
        (Post.status == PostStatus.published) &
        (Post.slug == slug)).first_or_404()
    return render_template('blog/post_view.html', post=post)
Exemple #11
0
 def get_birthday(cls):
     today = date.today()
     tomorrow = today + timedelta(days=1)
     return cls.query.filter(
         or_(
             and_(extract('month', User.birth_date) == today.month, extract('day', User.birth_date) == today.day),
             and_(extract('month', User.birth_date) == tomorrow.month, extract('day', User.birth_date) == tomorrow.day)
         ), or_(User.status != User.STATUS_DELETED, User.status==None)).order_by(User.birth_date.desc(), User.full_name).all()
Exemple #12
0
def archive():
    archive = (db_session.query(extract("year", Post.pub_date).label("year"),
                                extract("month", Post.pub_date).label("month"))
                         .group_by("year", "month")
                         .order_by("-year", "-month"))

    return render_template(env.get_template("archive.html"),
                           archive=archive)
    def group_join_leave_events(self, group_id):
        aet = self.auditEventTable
#        SELECT EXTRACT(year FROM event_date) AS year,
#          EXTRACT(month FROM event_date) AS month,
#          subsystem, event_date, instance_user_id, user_id
#        FROM audit_event
#        WHERE
#          ((subsystem = 'gs.group.member.join' AND event_code = '1')
#           OR
#           (subsystem = 'gs.group.member.leave' AND event_code = '1'))
#          AND group_id = 'example_group';
        s = sa.select([
            sa.extract('year', aet.c.event_date).label('year'),
            sa.extract('month', aet.c.event_date).label('month'),
            aet.c.subsystem,
            aet.c.event_date,
            aet.c.instance_user_id,
            aet.c.user_id
        ])
        joinClauses = ((aet.c.subsystem == JOIN_SUBSYSTEM)
                       & (aet.c.event_code == JOIN))
        leaveClauses = ((aet.c.subsystem == LEAVE_SUBSYSTEM)
                        & (aet.c.event_code == LEAVE))
        s.append_whereclause(joinClauses | leaveClauses)
        s.append_whereclause(aet.c.group_id == group_id)

        session = getSession()
        r = session.execute(s)
        rows = []
        if r.rowcount:
            rows = [{
                'year': int(row['year']),
                'month': int(row['month']),
                'date': row['event_date'],
                'subsystem': row['subsystem'],
                'user_id': row['instance_user_id'],
                'admin_id': row['user_id']
            } for row in r]
        years = {}
        for row in rows:
            if row['year'] not in years:
                years[row['year']] = {}
        for row in rows:
            if row['month'] not in years[row['year']]:
                years[row['year']][row['month']] = {
                    JOIN_SUBSYSTEM: [],
                    LEAVE_SUBSYSTEM: []
                }
        for row in rows:
            years[row['year']][row['month']][row['subsystem']].append({
                'date': row['date'],
                'user_id': row['user_id'],
                'admin_id': row['admin_id']
            })
        retval = years
        assert type(retval) == dict
        return retval
Exemple #14
0
def birthdays():
    today = datetime.datetime.now()
    session = DBSession()

    bday_members = session.query(Person).filter(
        extract('day', Person.dob) == today.day).filter(
        extract('month', Person.dob) == today.month).all()
    session.close()
    return bday_members
Exemple #15
0
def index():
    filter_query = []
    hoje = date.today()
    session['tela'] = "relatorio"

    form = PesForm()

    categoria = []
    categoria.append(('0','Todos'))
    for h in Categoria.query.filter(Categoria.empresa_id == session['empresa']).all():
        categoria.append((h.id,str(h.id) + " " + h.titulo))

    form.categoria_id.choices = categoria

    conta =  []
    conta.append(('0', 'Todos'))
    for h in Conta.query.filter(Conta.empresa_id == session['empresa']).all():
        conta.append((h.id,h.tipo + '-' + h.conta))

    form.conta_id.choices = conta
   
        
    if request.method == "POST":
        if form.categoria_id.data != "0":
            filter_query.append(Movimentacao.categoria_id == form.categoria_id.data)
        
        if form.conta_id.data != "0":
            filter_query.append(Movimentacao.conta_id == form.conta_id.data)    

        if form.data.data:
            (data_inicio,data_fim) = form.data.data.replace(" ","").split("-")

            data_inicio  = datetime.strptime(data_inicio, '%m/%d/%Y') + timedelta(days=-1)
            data_fim     = datetime.strptime(data_fim, '%m/%d/%Y')

            filter_query.append(Movimentacao.data_v >= data_inicio)
            filter_query.append(Movimentacao.data_v <= data_fim   ) 

        else:
            filter_query.append(extract('month', Movimentacao.data_v) == hoje.month)
            filter_query.append(extract('year' , Movimentacao.data_v) == hoje.year )        


        todos = Movimentacao.query.filter(
                Movimentacao.empresa_id == session['empresa'],
                *filter_query,
                ).order_by('data_v').all() 
    
    else:
        todos = Movimentacao.query.filter(
                Movimentacao.empresa_id == session['empresa'],
                extract('month', Movimentacao.data_v) == hoje.month,
                extract('year', Movimentacao.data_v) == hoje.year).order_by('data_v').all() 

    credito = sum([item.valor  for item in todos if item.categoria.status == 0])
    debito = sum([item.valor  for item in todos if item.categoria.status == 1])
    return render_template('relatorio/index.html',title='Relatório de Contas',form=form, todos=todos,credito=credito,debito=debito)
Exemple #16
0
    def get_busy_on_date(self, day, month, year):
        start_date = dataentities.BookedRange.start_date
        on_day = session.query(dataentities.BookedRange) \
            .filter(extract('day', start_date) == day) \
            .filter(extract('month', start_date) == month) \
            .filter(extract('year', start_date) == year) \
            .all()

        return list(map(lambda x: [x.start_date.hour, x.end_date.hour], on_day))
Exemple #17
0
def csr_table(filter_val):

    _csr_type = sort_tuple(csr_types)
    _status = sort_tuple(csr_status)

    # Joins CSR and VENDORS


    if filter_val is not None:

        results = results = Csr.query.join(Vendor).filter(Csr.status==1)

        _v = filter_val.split("-")
        if _v[0] == 'date':
            results = results.filter(extract('year', Csr.report_date) == int(_v[1]))
            results = results.filter(extract('month', Csr.report_date) == int(_v[2]))
        elif _v[0] == 'vendor':
            results = results.filter(Vendor.short_hand==_v[1])
        elif _v[0] == 'duration':
            date_max = dt.now() - timedelta(int(_v[1]))
            date_min = dt.now() - timedelta(int(_v[2]))
            results = results.filter(Csr.report_date.between(date_min, date_max))

    else:
        results = Csr.query.join(Vendor)
    # Specify Column Data
    results = results.values(Vendor.name,
                       Csr.id,
                       Csr.csr_ref,
                       Csr.contract_no,
                       Csr.description,
                       Csr.csr_type,
                       Csr.status,
                       Csr.incident_date,
                       Csr.report_date,
                       Csr.close_date,
                       Csr.remarks,
                       )

    resultlist = []
    for v_name, csr_id, ref, contract_no, description, csr_type,\
            status, incident, report, close, remarks in results:

        resultlist.append({ 'vendor_name': v_name,
                            'id': csr_id,
                            'csr_reference' : ref,
                            'contract_no': contract_no,
                            'description': description,
                            'csr_type': _csr_type[csr_type-1],
                            'status': _status[status-1],
                            'incident_date': get_date(incident),
                            'report_date': get_date(report),
                            'close_date': get_date(close),
                            'remarks': remarks,
                           })
    return resultlist
Exemple #18
0
def archive(year, month):
    entries = Posts.query.filter(
        (extract('month', Posts.created) == month) &
        (extract('year', Posts.created) == year)
    ).all()
    if not entries:
        abort(404)
    for entry in entries:
        entry.content = markdown(entry.content)
    return render_template('archive.html', entries=entries)
Exemple #19
0
def review_kpi(filename=None):
    """
    Review Details of the KPI per section by month and year
    """

    section_id = current_user.get_id()
    section = User.query.filter(User.id == section_id).first_or_404()
    specs = SpecSectionKpi.query.filter(SpecSectionKpi.filename == filename).first()
    if specs is None:
        return render_template(
            "section_kpi_mgt/coming-soon.html", msg="Specification for {} doesn't exist yet".format(filename)
        )

    spec = specs.doc[section.full_name]
    classes_description = spec["_classes_description"]
    targets = spec["_target"]
    data = {}

    record = SectionRawScore.query.filter(
        and_(
            SectionRawScore.section_id == section_id,
            extract("month", SectionRawScore.score_date) == filename.split("-")[1],
            extract("year", SectionRawScore.score_date) == filename.split("-")[0],
        )
    ).all()

    if record is None:
        return render_template("section_kpi_mgt/coming-soon.html")

    df = query_to_df(record)
    df = df.set_index("score_class")

    temp_list = []
    for i in ["a11", "a12", "a13", "a2", "a3"]:
        temp_list.append(
            {"class": classes_description[i], "target": targets[i], "actual": float(df.loc[i, "raw_score"])}
        )
    data["capex"] = temp_list

    temp_list = []
    for i in ["b1", "b2"]:
        temp_list.append(
            {"class": classes_description[i], "target": targets[i], "actual": float(df.loc[i, "raw_score"])}
        )
    data["opex"] = temp_list

    temp_list = []
    for i in ["c1", "c2"]:
        temp_list.append(
            {"class": classes_description[i], "target": targets[i], "actual": float(df.loc[i, "raw_score"])}
        )
    data["initiatives"] = temp_list

    _template = "section_kpi_mgt/detailed-dashboard.html"
    return render_template(_template, data=data)
Exemple #20
0
def _contents():
    categories = Category.query.all()
    query = db.session.query(Tag, db.func.count(Post.id)).join(Post.tags).group_by(Tag.id).order_by(
        db.desc(db.func.count(Post.id)))
    tags = query.all()
    year_func = extract('year', Post.publish_date)
    month_func = extract('month', Post.publish_date)
    archives = db.session.query(year_func, month_func, func.count(Post.id)).group_by(year_func).group_by(
        month_func).all()
    recent_posts = Post.query.order_by(Post.publish_date.desc()).limit(5).all()

    return {'categories': categories, 'tags': tags, 'archives': archives, 'recent_posts': recent_posts}
Exemple #21
0
def update_kpi_dashboard(section_id=None, date=None):
    mn = date.month
    yr = date.year
    if section_id is None:
        return (False, 'Section ID must not be None')

    record = SectionWeightedScore.query.\
        filter(and_(SectionWeightedScore.section_id==section_id,
                    extract('month', SectionWeightedScore.weighted_score_date) == mn,
                    extract('year', SectionWeightedScore.weighted_score_date) == yr
                    )).\
        first()

    if record is not None:
        return (False, "Can't Update, Updated Record Already Exist")

    else:
        section = 'BP&TE'
        scores = SectionRawScore.query.\
        filter(and_(SectionRawScore.section_id==section_id,
                    extract('month', SectionRawScore.score_date) == mn,
                    extract('year', SectionRawScore.score_date) == yr
                    )).\
        all()
        df_score = query_to_df(scores)
        df_score = df_score[['score_class', 'raw_score']]
        score_dict = df_score.set_index('score_class').to_dict()

        score_from_raw = cal_raw_scores(raw_scores=score_dict['raw_score'],
                                        targets=targets[section],
                                        classes=classes
                                        )
        score_eqvs = get_score_equivalent(score_from_raw, criteria)

        weighted_each_class = get_weighted_each_class(score_eqvs,
                                                      weightage_class[section],
                                                      classes)
        weighted_each_category = get_weighted_each_category(weighted_each_class,
                                                            weightage_category[section],
                                                            categories)

        for k, v in weighted_each_category.items():

            record = SectionWeightedScore(
                section_id=section_id,
                weighted_score_date=scores[0].score_date,
                weighted_score=v,
                category=k
            )
            db.session.add(record)
        db.session.commit()

        return (True, 'Success')
Exemple #22
0
    def test_extract(self):
        t = sql.table("t", sql.column("col1"))

        for field in "year", "month", "day":
            self.assert_compile(
                select([extract(field, t.c.col1)]), "SELECT EXTRACT(%s FROM t.col1) AS anon_1 FROM t" % field
            )

        # millsecondS to millisecond
        self.assert_compile(
            select([extract("milliseconds", t.c.col1)]), "SELECT EXTRACT(millisecond FROM t.col1) AS anon_1 FROM t"
        )
Exemple #23
0
    def test_extract(self):
        t = sql.table('t', sql.column('col1'))

        for field in 'year', 'month', 'day':
            self.assert_compile(
                select([extract(field, t.c.col1)]),
                "SELECT EXTRACT(%s FROM t.col1) AS anon_1 FROM t" % field)

        # millsecondS to millisecond
        self.assert_compile(
            select([extract('milliseconds', t.c.col1)]),
            "SELECT EXTRACT(millisecond FROM t.col1) AS anon_1 FROM t")
def filter_year(model, query, years):
    return query.filter(
        sa.or_(*[
            sa.and_(
                sa.or_(
                    sa.extract('year', model.last_file_date) >= year,
                    model.last_file_date == None,
                ),
                sa.extract('year', model.first_file_date) <= year,
            )
            for year in years
        ])
    )  # noqa
Exemple #25
0
    def with_category_paid(cls, cid, year, month, total_only=False):
        base = DBSession.query(Invoice)

        if total_only:
            base = DBSession.query(func.sum(Invoice.amount).label('a_sum'))\

        base = base.filter(and_(Invoice.category_id == cid,
                                Invoice.archived == False,
                                Invoice.paid != None))\
                   .filter(extract('year', Invoice.due) == year)\
                   .filter(extract('month', Invoice.due) == month)\
                   .all()
        return base
 def get_query(self, content_type, id, date, lang):
     d = date.split('-')
     date = datetime.date(int(d[0]), int(d[1]), int(d[2]))
     
     if content_type == "bill":
         return self.session.query(Bill).filter(Bill.registry_number==id).\
                                         filter(Bill.publication_date==date).\
                                         filter(Bill.language==lang)
     return self.session.query(Doc).filter(Doc.registry_number==id).\
                                                  filter(Doc.type==content_type).\
                                                  filter(Doc.language==lang).\
                                                  filter(extract('year',Doc.status_date)==date.year).\
                                                  filter(extract('month',Doc.status_date)==date.month).\
                                                  filter(extract('day',Doc.status_date)==date.day)
Exemple #27
0
    def cvtTimeField(self, obj, time_type):
        if 'year'       == time_type:
            tc  =   extract('year', obj)
        elif 'month'    == time_type:
            tc  =   extract('month', obj)
        elif 'day'      == time_type:
            tc  =   extract('day', obj)
        elif 'hour'     == time_type:
            tc  =   extract('hour', obj)
        else:
            logger.info(sys.exc_info())
            tc = obj

        return tc
Exemple #28
0
def build_months_dict():
    month_year_to_string_mapping = {}
    #always add today's month as part of the months dict
    today = datetime.datetime.now()
    today_year = int(today.strftime("%Y"))
    today_month = int(today.strftime("%m"))
    month_year_to_string_mapping[today_month, today_year] = convert_to_month_string(today_month, today_year)

    #now insert all the months that have menus
    distinct_months_years = DBSession.query(extract('month', Menu.date), extract('year', Menu.date)).distinct().all()
    for distinct_month_year in distinct_months_years:
        month = int(distinct_month_year[0])
        year = int(distinct_month_year[1])
        month_year_to_string_mapping[month, year] = convert_to_month_string(month, year)
    return month_year_to_string_mapping
Exemple #29
0
def view_by_date(year=None, month=None, day=None):
    with session_scope() as session:
        if not (year or (month and day)):
            return redirect(url_for('.index'))

        js = session.query(db.Journal).order_by(db.Journal.timestamp)

        if year:
            js = js.filter(extract('year', db.Journal.timestamp) == year)
        if month:
            js = js.filter(extract('month', db.Journal.timestamp) == month)
        if day:
            js = js.filter(extract('day', db.Journal.timestamp) == day)

        return render_template('journal/view_by_date.html', journals=js.all())
    def test_extract(self):
        t = table('t', column('col1'))

        for field in 'day', 'month', 'year':
            self.assert_compile(
                select([extract(field, t.c.col1)]),
                'SELECT DATEPART(%s, t.col1) AS anon_1 FROM t' % field)
Exemple #31
0
    def _query(cls, invoice):
        from autonomie.models.task import Task

        q = super(MonthInvoiceSequence, cls)._query(invoice)
        q = q.filter(extract('month', Task.date) == invoice.date.month)
        return q
Exemple #32
0
def get_logs_by_habit_in_month(habit_id: int, logyear: int,
                               logmonth: int) -> List[Log]:
    """Return logs for the given habit_id in the given month"""
    return (query_logs_by_habit_id(habit_id).filter(
        extract("year", Log.date) == logyear).filter(
            extract("month", Log.date) == logmonth).all())
 def execute(field):
     return testing.db.execute(select([extract(field, date)])).scalar()
Exemple #34
0
 def expr(cls):
     return extract('year', getattr(cls, date_attr))
def dao_fetch_monthly_historical_usage_by_template_for_service(service_id, year):

    results = dao_get_template_usage_stats_by_service(service_id, year)

    stats = []
    for result in results:
        stat = type("", (), {})()
        stat.template_id = result.template_id
        stat.template_type = result.template_type
        stat.name = str(result.name)
        stat.month = result.month
        stat.year = result.year
        stat.count = result.count
        stat.is_precompiled_letter = result.is_precompiled_letter
        stats.append(stat)

    month = get_london_month_from_utc_column(Notification.created_at)
    year_func = func.date_trunc("year", Notification.created_at)
    start_date = datetime.combine(date.today(), time.min)

    fy_start, fy_end = get_financial_year(year)

    if fy_start < datetime.now() < fy_end:
        today_results = db.session.query(
            Notification.template_id,
            Template.is_precompiled_letter,
            Template.name,
            Template.template_type,
            extract('month', month).label('month'),
            extract('year', year_func).label('year'),
            func.count().label('count')
        ).join(
            Template, Notification.template_id == Template.id,
        ).filter(
            Notification.created_at >= start_date,
            Notification.service_id == service_id,
            # we don't want to include test keys
            Notification.key_type != KEY_TYPE_TEST
        ).group_by(
            Notification.template_id,
            Template.hidden,
            Template.name,
            Template.template_type,
            month,
            year_func
        ).order_by(
            Notification.template_id
        ).all()

        for today_result in today_results:
            add_to_stats = True
            for stat in stats:
                if today_result.template_id == stat.template_id and today_result.month == stat.month \
                        and today_result.year == stat.year:
                    stat.count = stat.count + today_result.count
                    add_to_stats = False

            if add_to_stats:
                new_stat = type("StatsTemplateUsageByMonth", (), {})()
                new_stat.template_id = today_result.template_id
                new_stat.template_type = today_result.template_type
                new_stat.name = today_result.name
                new_stat.month = int(today_result.month)
                new_stat.year = int(today_result.year)
                new_stat.count = today_result.count
                new_stat.is_precompiled_letter = today_result.is_precompiled_letter
                stats.append(new_stat)

    return stats
Exemple #36
0
def _day_of_week_index(t, expr):
    sa_arg, = map(t.translate, expr.op().args)
    return sa.cast(
        sa.cast(sa.extract('dow', sa_arg) + 6, sa.SMALLINT) % 7,
        sa.SMALLINT
    )
Exemple #37
0
def _second(t, expr):
    # extracting the second gives us the fractional part as well, so smash that
    # with a cast to SMALLINT
    sa_arg, = map(t.translate, expr.op().args)
    return sa.cast(sa.func.FLOOR(sa.extract('second', sa_arg)), sa.SMALLINT)
Exemple #38
0
def main():
    yelp_api = YelpAPI(login_data['yelp_consumer_key'],
                       login_data['yelp_consumer_secret'],
                       login_data['yelp_token'],
                       login_data['yelp_token_secret'])

    zip_codes = [row.zip_code for row in session.query(ZipCode).all()]

    current_month = datetime.date.today().month
    current_rows = session.query(YelpAPIDb).filter(
        extract('month', YelpAPIDb.date_created) == current_month).all()
    current_rows = [row.as_dict() for row in current_rows]
    existing_zip_codes = [row['zip_code'] for row in current_rows]
    remaining_zip_codes = [
        zip_code for zip_code in zip_codes
        if zip_code not in existing_zip_codes
    ]

    category_list = [
        "cafes", "newamerican", "indpak", "italian", "japanese", "thai"
    ]

    for i, zip_code in enumerate(remaining_zip_codes):
        zip_code_results = []
        for category in category_list:
            offset = 0
            total_count = 21
            results_per_query_limit = 20
            business_counter = 1
            remaining_count = 1

            LOGGER.info(
                "Extracting {} restaurants from zip code {} ({} out of {})".
                format(category, zip_code, i, len(remaining_zip_codes)))
            while remaining_count > 0:
                try:

                    search_results = yelp_api.search_query(
                        location=zip_code,
                        category_filter=category,
                        sort=0,
                        limit=20,
                        offset=offset)
                    total_count = search_results['total']
                except YelpAPI.YelpAPIError as e:
                    print e
                    break
                if search_results['total'] == 0:
                    session.merge(
                        YelpAPIDb(zip_code=zip_code,
                                  date_created=datetime.date.today(),
                                  avg_rating=None,
                                  business_count=0))
                    session.commit()
                    break
                for business in search_results['businesses']:
                    if is_business_valid(business, zip_code):
                        print "{} out of {} businesses".format(
                            business_counter, total_count)
                        zip_code_results.append({
                            "zip_code":
                            zip_code,
                            "rating":
                            business['rating'],
                            "review_count":
                            business["review_count"]
                        })
                    business_counter += 1

                remaining_count = total_count - business_counter
                offset += results_per_query_limit

        if zip_code_results:
            total_review_count = sum(
                [business['review_count'] for business in zip_code_results])
            zip_code_avg_rating = sum([
                business['rating'] * business['review_count']
                for business in zip_code_results
            ]) / total_review_count
            row = YelpAPIDb(zip_code=zip_code,
                            date_created=datetime.date.today(),
                            avg_rating=zip_code_avg_rating,
                            business_count=len(zip_code_results))
            session.merge(row)
            session.commit()
        else:
            session.merge(
                YelpAPIDb(zip_code=zip_code,
                          date_created=datetime.date.today(),
                          avg_rating=None,
                          business_count=0))
            session.commit()
    session.close()
Exemple #39
0
 def execute(field):
     return connection.execute(select([extract(field, date)])).scalar()
Exemple #40
0
class CoreFixtures(object):
    # lambdas which return a tuple of ColumnElement objects.
    # must return at least two objects that should compare differently.
    # to test more varieties of "difference" additional objects can be added.
    fixtures = [
        lambda: (
            column("q"),
            column("x"),
            column("q", Integer),
            column("q", String),
        ),
        lambda: (~column("q", Boolean), ~column("p", Boolean)),
        lambda: (
            table_a.c.a.label("foo"),
            table_a.c.a.label("bar"),
            table_a.c.b.label("foo"),
        ),
        lambda: (
            _label_reference(table_a.c.a.desc()),
            _label_reference(table_a.c.a.asc()),
        ),
        lambda: (_textual_label_reference("a"), _textual_label_reference("b")),
        lambda: (
            text("select a, b from table").columns(a=Integer, b=String),
            text("select a, b, c from table").columns(
                a=Integer, b=String, c=Integer
            ),
            text("select a, b, c from table where foo=:bar").bindparams(
                bindparam("bar", type_=Integer)
            ),
            text("select a, b, c from table where foo=:foo").bindparams(
                bindparam("foo", type_=Integer)
            ),
            text("select a, b, c from table where foo=:bar").bindparams(
                bindparam("bar", type_=String)
            ),
        ),
        lambda: (
            column("q") == column("x"),
            column("q") == column("y"),
            column("z") == column("x"),
            column("z") + column("x"),
            column("z") - column("x"),
            column("x") - column("z"),
            column("z") > column("x"),
            column("x").in_([5, 7]),
            column("x").in_([10, 7, 8]),
            # note these two are mathematically equivalent but for now they
            # are considered to be different
            column("z") >= column("x"),
            column("x") <= column("z"),
            column("q").between(5, 6),
            column("q").between(5, 6, symmetric=True),
            column("q").like("somstr"),
            column("q").like("somstr", escape="\\"),
            column("q").like("somstr", escape="X"),
        ),
        lambda: (
            table_a.c.a,
            table_a.c.a._annotate({"orm": True}),
            table_a.c.a._annotate({"orm": True})._annotate({"bar": False}),
            table_a.c.a._annotate(
                {"orm": True, "parententity": MyEntity("a", table_a)}
            ),
            table_a.c.a._annotate(
                {"orm": True, "parententity": MyEntity("b", table_a)}
            ),
            table_a.c.a._annotate(
                {"orm": True, "parententity": MyEntity("b", select([table_a]))}
            ),
        ),
        lambda: (
            cast(column("q"), Integer),
            cast(column("q"), Float),
            cast(column("p"), Integer),
        ),
        lambda: (
            bindparam("x"),
            bindparam("y"),
            bindparam("x", type_=Integer),
            bindparam("x", type_=String),
            bindparam(None),
        ),
        lambda: (_OffsetLimitParam("x"), _OffsetLimitParam("y")),
        lambda: (func.foo(), func.foo(5), func.bar()),
        lambda: (func.current_date(), func.current_time()),
        lambda: (
            func.next_value(Sequence("q")),
            func.next_value(Sequence("p")),
        ),
        lambda: (True_(), False_()),
        lambda: (Null(),),
        lambda: (ReturnTypeFromArgs("foo"), ReturnTypeFromArgs(5)),
        lambda: (FunctionElement(5), FunctionElement(5, 6)),
        lambda: (func.count(), func.not_count()),
        lambda: (func.char_length("abc"), func.char_length("def")),
        lambda: (GenericFunction("a", "b"), GenericFunction("a")),
        lambda: (CollationClause("foobar"), CollationClause("batbar")),
        lambda: (
            type_coerce(column("q", Integer), String),
            type_coerce(column("q", Integer), Float),
            type_coerce(column("z", Integer), Float),
        ),
        lambda: (table_a.c.a, table_b.c.a),
        lambda: (tuple_(1, 2), tuple_(3, 4)),
        lambda: (func.array_agg([1, 2]), func.array_agg([3, 4])),
        lambda: (
            func.percentile_cont(0.5).within_group(table_a.c.a),
            func.percentile_cont(0.5).within_group(table_a.c.b),
            func.percentile_cont(0.5).within_group(table_a.c.a, table_a.c.b),
            func.percentile_cont(0.5).within_group(
                table_a.c.a, table_a.c.b, column("q")
            ),
        ),
        lambda: (
            func.is_equal("a", "b").as_comparison(1, 2),
            func.is_equal("a", "c").as_comparison(1, 2),
            func.is_equal("a", "b").as_comparison(2, 1),
            func.is_equal("a", "b", "c").as_comparison(1, 2),
            func.foobar("a", "b").as_comparison(1, 2),
        ),
        lambda: (
            func.row_number().over(order_by=table_a.c.a),
            func.row_number().over(order_by=table_a.c.a, range_=(0, 10)),
            func.row_number().over(order_by=table_a.c.a, range_=(None, 10)),
            func.row_number().over(order_by=table_a.c.a, rows=(None, 20)),
            func.row_number().over(order_by=table_a.c.b),
            func.row_number().over(
                order_by=table_a.c.a, partition_by=table_a.c.b
            ),
        ),
        lambda: (
            func.count(1).filter(table_a.c.a == 5),
            func.count(1).filter(table_a.c.a == 10),
            func.foob(1).filter(table_a.c.a == 10),
        ),
        lambda: (
            and_(table_a.c.a == 5, table_a.c.b == table_b.c.a),
            and_(table_a.c.a == 5, table_a.c.a == table_b.c.a),
            or_(table_a.c.a == 5, table_a.c.b == table_b.c.a),
            ClauseList(table_a.c.a == 5, table_a.c.b == table_b.c.a),
            ClauseList(table_a.c.a == 5, table_a.c.b == table_a.c.a),
        ),
        lambda: (
            case(whens=[(table_a.c.a == 5, 10), (table_a.c.a == 10, 20)]),
            case(whens=[(table_a.c.a == 18, 10), (table_a.c.a == 10, 20)]),
            case(whens=[(table_a.c.a == 5, 10), (table_a.c.b == 10, 20)]),
            case(
                whens=[
                    (table_a.c.a == 5, 10),
                    (table_a.c.b == 10, 20),
                    (table_a.c.a == 9, 12),
                ]
            ),
            case(
                whens=[(table_a.c.a == 5, 10), (table_a.c.a == 10, 20)],
                else_=30,
            ),
            case({"wendy": "W", "jack": "J"}, value=table_a.c.a, else_="E"),
            case({"wendy": "W", "jack": "J"}, value=table_a.c.b, else_="E"),
            case({"wendy_w": "W", "jack": "J"}, value=table_a.c.a, else_="E"),
        ),
        lambda: (
            extract("foo", table_a.c.a),
            extract("foo", table_a.c.b),
            extract("bar", table_a.c.a),
        ),
        lambda: (
            Slice(1, 2, 5),
            Slice(1, 5, 5),
            Slice(1, 5, 10),
            Slice(2, 10, 15),
        ),
        lambda: (
            select([table_a.c.a]),
            select([table_a.c.a, table_a.c.b]),
            select([table_a.c.b, table_a.c.a]),
            select([table_a.c.a]).where(table_a.c.b == 5),
            select([table_a.c.a])
            .where(table_a.c.b == 5)
            .where(table_a.c.a == 10),
            select([table_a.c.a]).where(table_a.c.b == 5).with_for_update(),
            select([table_a.c.a])
            .where(table_a.c.b == 5)
            .with_for_update(nowait=True),
            select([table_a.c.a]).where(table_a.c.b == 5).correlate(table_b),
            select([table_a.c.a])
            .where(table_a.c.b == 5)
            .correlate_except(table_b),
        ),
        lambda: (
            select([table_a.c.a]).cte(),
            select([table_a.c.a]).cte(recursive=True),
            select([table_a.c.a]).cte(name="some_cte", recursive=True),
            select([table_a.c.a]).cte(name="some_cte"),
            select([table_a.c.a]).cte(name="some_cte").alias("other_cte"),
            select([table_a.c.a])
            .cte(name="some_cte")
            .union_all(select([table_a.c.a])),
            select([table_a.c.a])
            .cte(name="some_cte")
            .union_all(select([table_a.c.b])),
            select([table_a.c.a]).lateral(),
            select([table_a.c.a]).lateral(name="bar"),
            table_a.tablesample(func.bernoulli(1)),
            table_a.tablesample(func.bernoulli(1), seed=func.random()),
            table_a.tablesample(func.bernoulli(1), seed=func.other_random()),
            table_a.tablesample(func.hoho(1)),
            table_a.tablesample(func.bernoulli(1), name="bar"),
            table_a.tablesample(
                func.bernoulli(1), name="bar", seed=func.random()
            ),
        ),
        lambda: (
            select([table_a.c.a]),
            select([table_a.c.a]).prefix_with("foo"),
            select([table_a.c.a]).prefix_with("foo", dialect="mysql"),
            select([table_a.c.a]).prefix_with("foo", dialect="postgresql"),
            select([table_a.c.a]).prefix_with("bar"),
            select([table_a.c.a]).suffix_with("bar"),
        ),
        lambda: (
            select([table_a_2.c.a]),
            select([table_a_2_fs.c.a]),
            select([table_a_2_bs.c.a]),
        ),
        lambda: (
            select([table_a.c.a]),
            select([table_a.c.a]).with_hint(None, "some hint"),
            select([table_a.c.a]).with_hint(None, "some other hint"),
            select([table_a.c.a]).with_hint(table_a, "some hint"),
            select([table_a.c.a])
            .with_hint(table_a, "some hint")
            .with_hint(None, "some other hint"),
            select([table_a.c.a]).with_hint(table_a, "some other hint"),
            select([table_a.c.a]).with_hint(
                table_a, "some hint", dialect_name="mysql"
            ),
            select([table_a.c.a]).with_hint(
                table_a, "some hint", dialect_name="postgresql"
            ),
        ),
        lambda: (
            table_a.join(table_b, table_a.c.a == table_b.c.a),
            table_a.join(
                table_b, and_(table_a.c.a == table_b.c.a, table_a.c.b == 1)
            ),
            table_a.outerjoin(table_b, table_a.c.a == table_b.c.a),
        ),
        lambda: (
            table_a.alias("a"),
            table_a.alias("b"),
            table_a.alias(),
            table_b.alias("a"),
            select([table_a.c.a]).alias("a"),
        ),
        lambda: (
            FromGrouping(table_a.alias("a")),
            FromGrouping(table_a.alias("b")),
        ),
        lambda: (
            SelectStatementGrouping(select([table_a])),
            SelectStatementGrouping(select([table_b])),
        ),
        lambda: (
            select([table_a.c.a]).scalar_subquery(),
            select([table_a.c.a]).where(table_a.c.b == 5).scalar_subquery(),
        ),
        lambda: (
            exists().where(table_a.c.a == 5),
            exists().where(table_a.c.b == 5),
        ),
        lambda: (
            union(select([table_a.c.a]), select([table_a.c.b])),
            union(select([table_a.c.a]), select([table_a.c.b])).order_by("a"),
            union_all(select([table_a.c.a]), select([table_a.c.b])),
            union(select([table_a.c.a])),
            union(
                select([table_a.c.a]),
                select([table_a.c.b]).where(table_a.c.b > 5),
            ),
        ),
        lambda: (
            table("a", column("x"), column("y")),
            table("a", column("y"), column("x")),
            table("b", column("x"), column("y")),
            table("a", column("x"), column("y"), column("z")),
            table("a", column("x"), column("y", Integer)),
            table("a", column("q"), column("y", Integer)),
        ),
        lambda: (table_a, table_b),
    ]

    dont_compare_values_fixtures = [
        lambda: (
            # note the in_(...) all have different column names becuase
            # otherwise all IN expressions would compare as equivalent
            column("x").in_(random_choices(range(10), k=3)),
            column("y").in_(
                bindparam(
                    "q",
                    random_choices(range(10), k=random.randint(0, 7)),
                    expanding=True,
                )
            ),
            column("z").in_(random_choices(range(10), k=random.randint(0, 7))),
            column("x") == random.randint(1, 10),
        )
    ]

    def _complex_fixtures():
        def one():
            a1 = table_a.alias()
            a2 = table_b_like_a.alias()

            stmt = (
                select([table_a.c.a, a1.c.b, a2.c.b])
                .where(table_a.c.b == a1.c.b)
                .where(a1.c.b == a2.c.b)
                .where(a1.c.a == 5)
            )

            return stmt

        def one_diff():
            a1 = table_b_like_a.alias()
            a2 = table_a.alias()

            stmt = (
                select([table_a.c.a, a1.c.b, a2.c.b])
                .where(table_a.c.b == a1.c.b)
                .where(a1.c.b == a2.c.b)
                .where(a1.c.a == 5)
            )

            return stmt

        def two():
            inner = one().subquery()

            stmt = select([table_b.c.a, inner.c.a, inner.c.b]).select_from(
                table_b.join(inner, table_b.c.b == inner.c.b)
            )

            return stmt

        def three():

            a1 = table_a.alias()
            a2 = table_a.alias()
            ex = exists().where(table_b.c.b == a1.c.a)

            stmt = (
                select([a1.c.a, a2.c.a])
                .select_from(a1.join(a2, a1.c.b == a2.c.b))
                .where(ex)
            )
            return stmt

        return [one(), one_diff(), two(), three()]

    fixtures.append(_complex_fixtures)
    def collect(self):
        debt_schema = PartnerDebtSchema(many=True, only=['id', 'amount'])
        debts, error = debt_schema.load(self.data['debts'])

        if not error and debts:
            year_list = list(set([debt.fecha_vencimiento.year for debt in debts]))

            # Valida que no se paguen las cuotas desordenadamente
            for year in year_list:
                debt_list = [debt for debt in debts if debt.fecha_vencimiento.year == year]
                debt_id_list = [debt.id for debt in debts if debt.fecha_vencimiento.year == year]
                for dbt in debt_list:
                    query = PartnerDebt.query.filter(PartnerDebt.id_cliente == dbt.id_cliente,
                                                     extract('year', PartnerDebt.fecha_vencimiento) == year,
                                                     PartnerDebt.id < dbt.id,
                                                     PartnerDebt.estado == "Pendiente",
                                                     not_(PartnerDebt.id.in_(debt_id_list)))\
                        .order_by(PartnerDebt.fecha_vencimiento)
                    fetched = query.all()
                    if fetched:
                        return response_with(resp.DISORDERED_FEE_PAYMENT_422)

            # Valida que el monto a pagar sea mayor a cero, que la cuota no este cancelada o
            # que se intente abonar un monto mayor al monto de la cuota
            band = False
            total_amount = 0
            for debt in debts:
                if debt.amount > 0:
                    total_amount += debt.amount
                else:
                    return response_with(resp.FEE_BAD_REQUEST_400)

                if debt.estado == 'Cobrado' or debt.saldo == 0:
                    return response_with(resp.FEE_CANCELED_REQUEST_422)

                if debt.amount > debt.saldo:
                    return response_with(resp.FEE_AMOUNT_EXCEEDED_422)

            if total_amount > 0 and not band:
                payment_provider = PaymentProvider.query \
                    .filter_by(name=self.data['payment_provider_data']['name'], active=True).one_or_none()

                if payment_provider:
                    payment_provider_type = payment_provider.get_config_by_name('TYPE')
                    if payment_provider_type == 'RED_COBRANZA':
                        # Valida que la red de cobranza no envie un voucher repetido
                        coll_trx = CollectionTransaction.query.filter(
                            CollectionTransaction.payment_provider_id == payment_provider.id,
                            CollectionTransaction.payment_provider_voucher ==
                            self.data['payment_provider_data']['voucher']).one_or_none()
                        if coll_trx:
                            return response_with(resp.VOUCHER_EXISTENT_422)

                    partner_id = debts[0].id_cliente
                    collection_transaction = CollectionTransaction(
                        context_id='partner_fee',
                        collection_entity_id=g.entity.id,
                        payment_provider_id=payment_provider.id,
                        amount=total_amount,
                        data=json.dumps(self.data),
                        partner_id=partner_id,
                        status='pending'
                    ).create()

                    if payment_provider.name == 'tigo_money':
                        tm_manager = TigoMoneyManager()
                        return tm_manager.payment_request(collection_transaction)

                    elif payment_provider.name == 'bancard_vpos':
                        vpos_manager = BancardVposManager()
                        return vpos_manager.payment_request(collection_transaction)

                    elif payment_provider_type == 'RED_COBRANZA':
                        red_cobranza_manager = RedCobranzaManager()
                        return red_cobranza_manager.payment_request(debts, collection_transaction)
                    else:
                        return response_with(resp.INVALID_PAYMENT_PROVIDER_NAME_400)
                else:
                    return response_with(resp.INVALID_PAYMENT_PROVIDER_400)
            else:
                return response_with(resp.FEE_AMOUNT_INVALID_400)
        else:
            return response_with(resp.FEE_BAD_REQUEST_400)
Exemple #42
0
def compute_up(expr, data, **kwargs):
    if expr.attr == 'date':
        return sa.func.date(data).label(expr._name)

    return sa.extract(expr.attr, data).label(expr._name)
Exemple #43
0
 def exists(self):
     return db.session.query(exists().where(and_(Price.item_id == self.item_id,
                                                 func.DATE(Price.date) == datetime.now().date(),
                                                 extract('hour', Price.date) == datetime.now().hour))).scalar()
Exemple #44
0
def attacks():
    if request.method == 'POST' and request.form['date'] != '':
        d = request.form['date'].split(',')
        y = datetime.datetime(int(d[2]),int(d[0]),int(d[1]))
        next = y + datetime.timedelta(days = 1)
        returns = Target.query.filter(Target.tstamp >= y).filter(Target.tstamp <= next)
    else:
        y=datetime.date.today() - datetime.timedelta(days  = 1)
        returns = Target.query.filter(Target.tstamp >= y)
    retme = ''
    for x in returns:
    	retme += f'at {x.time}::{x.data}<br>'
    return render_template('attacks.html',body=retme,title='Recent attacks!',current=y,options=db.session.query(extract('month',Target.tstamp), extract('day',Target.tstamp), extract('year',Target.tstamp)).distinct())
Exemple #45
0
 def get_all_by_datetime(self, date: datetime):
     return self.db.query(MatchModel).filter(
         extract('month', MatchModel.played_at) == date.month,
         extract('year', MatchModel.played_at) == date.year,
         extract('day', MatchModel.played_at) == date.day).order_by(
             desc(MatchModel.created_at)).all()
Exemple #46
0
def index():
    # mesAtual = Dado.query.filter(extract('month', Dado.dataHora) == datetime.datetime.utcnow().month).all()

    minhaMeta = Meta.query.first()

    minhaMeta = {
         "valor": {
             "m³": minhaMeta.valor
            ,"L":  minhaMeta.valor*1000
            ,"R$": minhaMeta.valor*PM
        }
        ,"unidadeDoValor": minhaMeta.unidadeDoValor
        ,"inicio": minhaMeta.inicio.isoformat()+'Z'
        ,"fim": (minhaMeta.inicio + minhaMeta.intervalo).isoformat()+'Z'
    }

    eu = Usuario.query.first()
    eu = {
         "nome": eu.nome
        ,"email": eu.email
        ,"senha": eu.senhaHash
    }


    # 21 dados para poder calcular 20 últimas vazões
    dados_ultimos21 = Medicao.query.all()[-21:]
    vazao_ultimos20 = []
    dataHora_ultimos20 = []
    tempTxt = ''
    temp = 0
    for i in range(1,len(dados_ultimos21)):
        '''
        Os dados em formato datetime serão utilizados no lado do cliente em conjunto com a biblioteca moment.js
        para oferecer a conversão da data e hora de acordo com a localização e configuração do usuário. O que vai
        acontecer no javascript do cliente é:
        moment("2012-12-31T23:55:13Z").format('LLLL');
        Pra isso tem que ser enviado no lugar do objeto datetime uma string usando isoformat(), como:
        obj.isoformat();
        que coloca um 'T' entre a data e a hora e depois adicionar um 'Z' no final da string pro moment.js
        reconhecer a parada
        '''
        tempTxt = dados_ultimos21[i].dataHora.isoformat()+'Z'
        dataHora_ultimos20.append(tempTxt)
        '''
        (60 s/min)*(1/1000 L/mL)*(cte mL/pulsos)*(intervaloDeConsumo pulsos)/(intervaloDeTempo s)
            = 0.06*cte*intervaloDeConsumo/intervaloDeTempo L/min
        '''
        temp = (0.06*cte)*(dados_ultimos21[i].valor - dados_ultimos21[i-1].valor)/\
               (dados_ultimos21[i].dataHora - dados_ultimos21[i-1].dataHora).total_seconds() # L/min
        vazao_ultimos20.append(temp)
    
    '''        CONSUMO ACUMULADO NO DIA E NO MÊS         '''
    # consumo do mês em m³
    dado_ultimo = dados_ultimos21[-1]
    consumoMes = dado_ultimo.valor*cte/1000000 # m³
    # consumo do dia em m³
    # Primeiro pega-se a última medição do dia anterior. No caso utilizamos '<=' para o caso que o medidor não tenha consumo no
    # dia anterior
    agora = datetime.utcnow()
    ultimaMedicaoOntem = \
        Medicao.query.filter(Medicao.dataHora <= datetime(agora.year, agora.month, agora.day)).order_by(Medicao.id.desc()).first()
    consumoDia = ultimaMedicaoOntem.valor if (ultimaMedicaoOntem is not None) else 0 # caso não exista consumo nos dias anteriores
    consumoDia = consumoMes - consumoDia*cte/1000000 # m³

    # consumos em m³, L e R$
    consumoMes = {
         "m³": consumoMes
        ,"L":  consumoMes*1000
        ,"R$": consumoMes*PM
    }
    consumoDia = {
         "m³": consumoDia
        ,"L":  consumoDia*1000
        ,"R$": consumoDia*PM
    }

    # dicionário final
    consumoDiaMes = {
         "dia": consumoDia
        ,"mês": consumoMes
    }


    consumoAcumuladoTotal = {
         "valor": Medicao.query.order_by(Medicao.id.desc()).first().valor # pegando o último valor
        ,"dataHora": Medicao.query.first().dataHora.isoformat()+'Z'
    }
    consumoAcumuladoTotal['valor'] = cte*consumoAcumuladoTotal['valor']/1000000 # m³

    historico_1mes = {
         "valor": {
             "m³": []
            ,"L":  []
            ,"R$": []
        }
        ,"dataHora": []
    }
    # supondo mẽs com 31 dias
    temp = []
    for i in range(32,0,-1): #[32, 30, ..., 2, 1]
        # última medição do dia i-ésimo dia anterior
        temp2 = Medicao.query.filter(extract('day', Medicao.dataHora) == datetime.utcnow().day-i).order_by(Medicao.id.desc()).first()
        if temp2 is not None:
            temp.append(temp2)
        if len(temp) > 1:
            consumoDoDia = (temp[-1].valor - temp[-2].valor)*cte/1000000 # m³
            historico_1mes["valor"]["m³"].append(consumoDoDia)
            historico_1mes["valor"]["L"].append(consumoDoDia*1000)
            historico_1mes["valor"]["R$"].append(consumoDoDia*PM)
            # Formato de dataHora para a biblioteca plotly.js
            historico_1mes["dataHora"].append("%d-%d-%d" %(temp[-1].dataHora.year, temp[-1].dataHora.month, temp[-1].dataHora.day))
    
    
    return render_template('painelDeControle.html', async_mode=socketio.async_mode, cte=cte, PM=PM,
                                         vazao_ultimos20=vazao_ultimos20, dataHora_ultimos20=dataHora_ultimos20, 
                                         consumoDiaMes=consumoDiaMes, minhaMeta=minhaMeta, eu=eu, 
                                         consumoAcumuladoTotal=consumoAcumuladoTotal, historico_1mes=historico_1mes)
Exemple #47
0
def _reduce_tokens(tokens, arg):
    # current list of tokens
    curtokens = []

    # reduced list of tokens that accounts for blacklisted values
    reduced = []

    non_special_tokens = (
        frozenset(_strftime_to_postgresql_rules) - _strftime_blacklist
    )

    # TODO: how much of a hack is this?
    for token in tokens:

        # we are a non-special token %A, %d, etc.
        if token in non_special_tokens:
            curtokens.append(_strftime_to_postgresql_rules[token])

        # we have a string like DD, to escape this we
        # surround it with double quotes
        elif token in _lexicon_values:
            curtokens.append('"%s"' % token)

        # we have a token that needs special treatment
        elif token in _strftime_blacklist:
            if token == '%w':
                value = sa.extract('dow', arg)  # 0 based day of week
            elif token == '%U':
                value = sa.cast(sa.func.to_char(arg, 'WW'), sa.SMALLINT) - 1
            elif token == '%c' or token == '%x' or token == '%X':
                # re scan and tokenize this pattern
                try:
                    new_pattern = _strftime_to_postgresql_rules[token]
                except KeyError:
                    raise ValueError(
                        'locale specific date formats (%%c, %%x, %%X) are '
                        'not yet implemented for %s' % platform.system()
                    )

                new_tokens, _ = _scanner.scan(new_pattern)
                value = functools.reduce(
                    sa.sql.ColumnElement.concat,
                    _reduce_tokens(new_tokens, arg)
                )
            elif token == '%e':
                # pad with spaces instead of zeros
                value = sa.func.replace(sa.func.to_char(arg, 'DD'), '0', ' ')

            reduced += [
                sa.func.to_char(arg, ''.join(curtokens)),
                sa.cast(value, sa.TEXT)
            ]

            # empty current token list in case there are more tokens
            del curtokens[:]

        # uninteresting text
        else:
            curtokens.append(token)
    else:
        # append result to r if we had more tokens or if we have no
        # blacklisted tokens
        if curtokens:
            reduced.append(sa.func.to_char(arg, ''.join(curtokens)))
    return reduced
Exemple #48
0
def compute_up(expr, data, **kwargs):
    return sa.extract(expr.attr, data).label(expr._name)
Exemple #49
0
 def translator(t, expr):
     arg, = expr.op().args
     sa_arg = t.translate(arg)
     return sa.cast(sa.extract(fmt, sa_arg), sa.SMALLINT)
class CoreFixtures(object):
    # lambdas which return a tuple of ColumnElement objects.
    # must return at least two objects that should compare differently.
    # to test more varieties of "difference" additional objects can be added.
    fixtures = [
        lambda: (
            column("q"),
            column("x"),
            column("q", Integer),
            column("q", String),
        ),
        lambda: (~column("q", Boolean), ~column("p", Boolean)),
        lambda: (
            table_a.c.a.label("foo"),
            table_a.c.a.label("bar"),
            table_a.c.b.label("foo"),
        ),
        lambda: (
            _label_reference(table_a.c.a.desc()),
            _label_reference(table_a.c.a.asc()),
        ),
        lambda: (_textual_label_reference("a"), _textual_label_reference("b")),
        lambda: (
            text("select a, b from table").columns(a=Integer, b=String),
            text("select a, b, c from table").columns(
                a=Integer, b=String, c=Integer),
            text("select a, b, c from table where foo=:bar").bindparams(
                bindparam("bar", type_=Integer)),
            text("select a, b, c from table where foo=:foo").bindparams(
                bindparam("foo", type_=Integer)),
            text("select a, b, c from table where foo=:bar").bindparams(
                bindparam("bar", type_=String)),
        ),
        lambda: (
            column("q") == column("x"),
            column("q") == column("y"),
            column("z") == column("x"),
            column("z") + column("x"),
            column("z") - column("x"),
            column("x") - column("z"),
            column("z") > column("x"),
            column("x").in_([5, 7]),
            column("x").in_([10, 7, 8]),
            # note these two are mathematically equivalent but for now they
            # are considered to be different
            column("z") >= column("x"),
            column("x") <= column("z"),
            column("q").between(5, 6),
            column("q").between(5, 6, symmetric=True),
            column("q").like("somstr"),
            column("q").like("somstr", escape="\\"),
            column("q").like("somstr", escape="X"),
        ),
        lambda: (
            table_a.c.a,
            table_a.c.a._annotate({"orm": True}),
            table_a.c.a._annotate({
                "orm": True
            })._annotate({"bar": False}),
            table_a.c.a._annotate({
                "orm": True,
                "parententity": MyEntity("a", table_a)
            }),
            table_a.c.a._annotate({
                "orm": True,
                "parententity": MyEntity("b", table_a)
            }),
            table_a.c.a._annotate(
                {
                    "orm": True,
                    "parententity": MyEntity("b", select([table_a]))
                }),
            table_a.c.a._annotate({
                "orm":
                True,
                "parententity":
                MyEntity("b",
                         select([table_a]).where(table_a.c.a == 5)),
            }),
        ),
        lambda: (
            table_a,
            table_a._annotate({"orm": True}),
            table_a._annotate({
                "orm": True
            })._annotate({"bar": False}),
            table_a._annotate({
                "orm": True,
                "parententity": MyEntity("a", table_a)
            }),
            table_a._annotate({
                "orm": True,
                "parententity": MyEntity("b", table_a)
            }),
            table_a._annotate({
                "orm": True,
                "parententity": MyEntity("b", select([table_a]))
            }),
        ),
        lambda: (
            table("a", column("x"), column("y")),
            table("a", column("x"), column("y"))._annotate({"orm": True}),
            table("b", column("x"), column("y"))._annotate({"orm": True}),
        ),
        lambda: (
            cast(column("q"), Integer),
            cast(column("q"), Float),
            cast(column("p"), Integer),
        ),
        lambda: (
            bindparam("x"),
            bindparam("y"),
            bindparam("x", type_=Integer),
            bindparam("x", type_=String),
            bindparam(None),
        ),
        lambda: (_OffsetLimitParam("x"), _OffsetLimitParam("y")),
        lambda: (func.foo(), func.foo(5), func.bar()),
        lambda: (func.current_date(), func.current_time()),
        lambda: (
            func.next_value(Sequence("q")),
            func.next_value(Sequence("p")),
        ),
        lambda: (True_(), False_()),
        lambda: (Null(), ),
        lambda: (ReturnTypeFromArgs("foo"), ReturnTypeFromArgs(5)),
        lambda: (FunctionElement(5), FunctionElement(5, 6)),
        lambda: (func.count(), func.not_count()),
        lambda: (func.char_length("abc"), func.char_length("def")),
        lambda: (GenericFunction("a", "b"), GenericFunction("a")),
        lambda: (CollationClause("foobar"), CollationClause("batbar")),
        lambda: (
            type_coerce(column("q", Integer), String),
            type_coerce(column("q", Integer), Float),
            type_coerce(column("z", Integer), Float),
        ),
        lambda: (table_a.c.a, table_b.c.a),
        lambda: (tuple_(1, 2), tuple_(3, 4)),
        lambda: (func.array_agg([1, 2]), func.array_agg([3, 4])),
        lambda: (
            func.percentile_cont(0.5).within_group(table_a.c.a),
            func.percentile_cont(0.5).within_group(table_a.c.b),
            func.percentile_cont(0.5).within_group(table_a.c.a, table_a.c.b),
            func.percentile_cont(0.5).within_group(table_a.c.a, table_a.c.b,
                                                   column("q")),
        ),
        lambda: (
            func.is_equal("a", "b").as_comparison(1, 2),
            func.is_equal("a", "c").as_comparison(1, 2),
            func.is_equal("a", "b").as_comparison(2, 1),
            func.is_equal("a", "b", "c").as_comparison(1, 2),
            func.foobar("a", "b").as_comparison(1, 2),
        ),
        lambda: (
            func.row_number().over(order_by=table_a.c.a),
            func.row_number().over(order_by=table_a.c.a, range_=(0, 10)),
            func.row_number().over(order_by=table_a.c.a, range_=(None, 10)),
            func.row_number().over(order_by=table_a.c.a, rows=(None, 20)),
            func.row_number().over(order_by=table_a.c.b),
            func.row_number().over(order_by=table_a.c.a,
                                   partition_by=table_a.c.b),
        ),
        lambda: (
            func.count(1).filter(table_a.c.a == 5),
            func.count(1).filter(table_a.c.a == 10),
            func.foob(1).filter(table_a.c.a == 10),
        ),
        lambda: (
            and_(table_a.c.a == 5, table_a.c.b == table_b.c.a),
            and_(table_a.c.a == 5, table_a.c.a == table_b.c.a),
            or_(table_a.c.a == 5, table_a.c.b == table_b.c.a),
            ClauseList(table_a.c.a == 5, table_a.c.b == table_b.c.a),
            ClauseList(table_a.c.a == 5, table_a.c.b == table_a.c.a),
        ),
        lambda: (
            case(whens=[(table_a.c.a == 5, 10), (table_a.c.a == 10, 20)]),
            case(whens=[(table_a.c.a == 18, 10), (table_a.c.a == 10, 20)]),
            case(whens=[(table_a.c.a == 5, 10), (table_a.c.b == 10, 20)]),
            case(whens=[
                (table_a.c.a == 5, 10),
                (table_a.c.b == 10, 20),
                (table_a.c.a == 9, 12),
            ]),
            case(
                whens=[(table_a.c.a == 5, 10), (table_a.c.a == 10, 20)],
                else_=30,
            ),
            case({
                "wendy": "W",
                "jack": "J"
            }, value=table_a.c.a, else_="E"),
            case({
                "wendy": "W",
                "jack": "J"
            }, value=table_a.c.b, else_="E"),
            case({
                "wendy_w": "W",
                "jack": "J"
            }, value=table_a.c.a, else_="E"),
        ),
        lambda: (
            extract("foo", table_a.c.a),
            extract("foo", table_a.c.b),
            extract("bar", table_a.c.a),
        ),
        lambda: (
            Slice(1, 2, 5),
            Slice(1, 5, 5),
            Slice(1, 5, 10),
            Slice(2, 10, 15),
        ),
        lambda: (
            select([table_a.c.a]),
            select([table_a.c.a, table_a.c.b]),
            select([table_a.c.b, table_a.c.a]),
            select([table_a.c.b, table_a.c.a]).apply_labels(),
            select([table_a.c.a]).where(table_a.c.b == 5),
            select([table_a.c.a]).where(table_a.c.b == 5).where(table_a.c.a ==
                                                                10),
            select([table_a.c.a]).where(table_a.c.b == 5).with_for_update(),
            select([table_a.c.a]).where(table_a.c.b == 5).with_for_update(
                nowait=True),
            select([table_a.c.a]).where(table_a.c.b == 5).correlate(table_b),
            select([table_a.c.a]).where(table_a.c.b == 5).correlate_except(
                table_b),
        ),
        lambda: (
            future_select(table_a.c.a),
            future_select(table_a.c.a).join(table_b, table_a.c.a == table_b.c.a
                                            ),
            future_select(table_a.c.a).join_from(table_a, table_b, table_a.c.a
                                                 == table_b.c.a),
            future_select(table_a.c.a).join_from(table_a, table_b),
            future_select(table_a.c.a).join_from(table_c, table_b),
            future_select(table_a.c.a).join(
                table_b, table_a.c.a == table_b.c.a).join(
                    table_c, table_b.c.b == table_c.c.x),
            future_select(table_a.c.a).join(table_b),
            future_select(table_a.c.a).join(table_c),
            future_select(table_a.c.a).join(table_b, table_a.c.a == table_b.c.b
                                            ),
            future_select(table_a.c.a).join(table_c, table_a.c.a == table_c.c.x
                                            ),
        ),
        lambda: (
            select([table_a.c.a]).cte(),
            select([table_a.c.a]).cte(recursive=True),
            select([table_a.c.a]).cte(name="some_cte", recursive=True),
            select([table_a.c.a]).cte(name="some_cte"),
            select([table_a.c.a]).cte(name="some_cte").alias("other_cte"),
            select([table_a.c.a]).cte(name="some_cte").union_all(
                select([table_a.c.a])),
            select([table_a.c.a]).cte(name="some_cte").union_all(
                select([table_a.c.b])),
            select([table_a.c.a]).lateral(),
            select([table_a.c.a]).lateral(name="bar"),
            table_a.tablesample(func.bernoulli(1)),
            table_a.tablesample(func.bernoulli(1), seed=func.random()),
            table_a.tablesample(func.bernoulli(1), seed=func.other_random()),
            table_a.tablesample(func.hoho(1)),
            table_a.tablesample(func.bernoulli(1), name="bar"),
            table_a.tablesample(
                func.bernoulli(1), name="bar", seed=func.random()),
        ),
        lambda: (
            table_a.insert(),
            table_a.insert().values({})._annotate({"nocache": True}),
            table_b.insert(),
            table_b.insert().with_dialect_options(sqlite_foo="some value"),
            table_b.insert().from_select(["a", "b"], select([table_a])),
            table_b.insert().from_select(["a", "b"],
                                         select([table_a]).where(table_a.c.a >
                                                                 5)),
            table_b.insert().from_select(["a", "b"], select([table_b])),
            table_b.insert().from_select(["c", "d"], select([table_a])),
            table_b.insert().returning(table_b.c.a),
            table_b.insert().returning(table_b.c.a, table_b.c.b),
            table_b.insert().inline(),
            table_b.insert().prefix_with("foo"),
            table_b.insert().with_hint("RUNFAST"),
            table_b.insert().values(a=5, b=10),
            table_b.insert().values(a=5),
            table_b.insert().values({
                table_b.c.a: 5,
                "b": 10
            })._annotate({"nocache": True}),
            table_b.insert().values(a=7, b=10),
            table_b.insert().values(a=5, b=10).inline(),
            table_b.insert().values([{
                "a": 5,
                "b": 10
            }, {
                "a": 8,
                "b": 12
            }])._annotate({"nocache": True}),
            table_b.insert().values([{
                "a": 9,
                "b": 10
            }, {
                "a": 8,
                "b": 7
            }])._annotate({"nocache": True}),
            table_b.insert().values([(5, 10),
                                     (8, 12)])._annotate({"nocache": True}),
            table_b.insert().values([(5, 9),
                                     (5, 12)])._annotate({"nocache": True}),
        ),
        lambda: (
            table_b.update(),
            table_b.update().where(table_b.c.a == 5),
            table_b.update().where(table_b.c.b == 5),
            table_b.update().where(table_b.c.b == 5).with_dialect_options(
                mysql_limit=10),
            table_b.update().where(table_b.c.b == 5).with_dialect_options(
                mysql_limit=10, sqlite_foo="some value"),
            table_b.update().where(table_b.c.a == 5).values(a=5, b=10),
            table_b.update().where(table_b.c.a == 5).values(a=5, b=10, c=12),
            table_b.update().where(table_b.c.b == 5).values(a=5, b=10).
            _annotate({"nocache": True}),
            table_b.update().values(a=5, b=10),
            table_b.update().values({
                "a": 5,
                table_b.c.b: 10
            })._annotate({"nocache": True}),
            table_b.update().values(a=7, b=10),
            table_b.update().ordered_values(("a", 5), ("b", 10)),
            table_b.update().ordered_values(("b", 10), ("a", 5)),
            table_b.update().ordered_values((table_b.c.a, 5), ("b", 10)),
        ),
        lambda: (
            table_b.delete(),
            table_b.delete().with_dialect_options(sqlite_foo="some value"),
            table_b.delete().where(table_b.c.a == 5),
            table_b.delete().where(table_b.c.b == 5),
        ),
        lambda: (
            values(
                column("mykey", Integer),
                column("mytext", String),
                column("myint", Integer),
                name="myvalues",
            ).data([(1, "textA", 99),
                    (2, "textB", 88)])._annotate({"nocache": True}),
            values(
                column("mykey", Integer),
                column("mytext", String),
                column("myint", Integer),
                name="myothervalues",
            ).data([(1, "textA", 99),
                    (2, "textB", 88)])._annotate({"nocache": True}),
            values(
                column("mykey", Integer),
                column("mytext", String),
                column("myint", Integer),
                name="myvalues",
            ).data([(1, "textA", 89),
                    (2, "textG", 88)])._annotate({"nocache": True}),
            values(
                column("mykey", Integer),
                column("mynottext", String),
                column("myint", Integer),
                name="myvalues",
            ).data([(1, "textA", 99),
                    (2, "textB", 88)])._annotate({"nocache": True}),
            # TODO: difference in type
            # values(
            #    [
            #        column("mykey", Integer),
            #        column("mytext", Text),
            #        column("myint", Integer),
            #    ],
            #    (1, "textA", 99),
            #    (2, "textB", 88),
            #    alias_name="myvalues",
            # ),
        ),
        lambda: (
            select([table_a.c.a]),
            select([table_a.c.a]).prefix_with("foo"),
            select([table_a.c.a]).prefix_with("foo", dialect="mysql"),
            select([table_a.c.a]).prefix_with("foo", dialect="postgresql"),
            select([table_a.c.a]).prefix_with("bar"),
            select([table_a.c.a]).suffix_with("bar"),
        ),
        lambda: (
            select([table_a_2.c.a]),
            select([table_a_2_fs.c.a]),
            select([table_a_2_bs.c.a]),
        ),
        lambda: (
            select([table_a.c.a]),
            select([table_a.c.a]).with_hint(None, "some hint"),
            select([table_a.c.a]).with_hint(None, "some other hint"),
            select([table_a.c.a]).with_hint(table_a, "some hint"),
            select([table_a.c.a]).with_hint(table_a, "some hint").with_hint(
                None, "some other hint"),
            select([table_a.c.a]).with_hint(table_a, "some other hint"),
            select([table_a.c.a]).with_hint(
                table_a, "some hint", dialect_name="mysql"),
            select([table_a.c.a]).with_hint(
                table_a, "some hint", dialect_name="postgresql"),
        ),
        lambda: (
            table_a.join(table_b, table_a.c.a == table_b.c.a),
            table_a.join(table_b,
                         and_(table_a.c.a == table_b.c.a, table_a.c.b == 1)),
            table_a.outerjoin(table_b, table_a.c.a == table_b.c.a),
        ),
        lambda: (
            table_a.alias("a"),
            table_a.alias("b"),
            table_a.alias(),
            table_b.alias("a"),
            select([table_a.c.a]).alias("a"),
        ),
        lambda: (
            FromGrouping(table_a.alias("a")),
            FromGrouping(table_a.alias("b")),
        ),
        lambda: (
            SelectStatementGrouping(select([table_a])),
            SelectStatementGrouping(select([table_b])),
        ),
        lambda: (
            select([table_a.c.a]).scalar_subquery(),
            select([table_a.c.a]).where(table_a.c.b == 5).scalar_subquery(),
        ),
        lambda: (
            exists().where(table_a.c.a == 5),
            exists().where(table_a.c.b == 5),
        ),
        lambda: (
            union(select([table_a.c.a]), select([table_a.c.b])),
            union(select([table_a.c.a]), select([table_a.c.b])).order_by("a"),
            union_all(select([table_a.c.a]), select([table_a.c.b])),
            union(select([table_a.c.a])),
            union(
                select([table_a.c.a]),
                select([table_a.c.b]).where(table_a.c.b > 5),
            ),
        ),
        lambda: (
            table("a", column("x"), column("y")),
            table("a", column("y"), column("x")),
            table("b", column("x"), column("y")),
            table("a", column("x"), column("y"), column("z")),
            table("a", column("x"), column("y", Integer)),
            table("a", column("q"), column("y", Integer)),
        ),
        lambda: (table_a, table_b),
    ]

    dont_compare_values_fixtures = [
        lambda: (
            # note the in_(...) all have different column names becuase
            # otherwise all IN expressions would compare as equivalent
            column("x").in_(random_choices(range(10), k=3)),
            column("y").in_(
                bindparam(
                    "q",
                    random_choices(range(10), k=random.randint(0, 7)),
                    expanding=True,
                )),
            column("z").in_(random_choices(range(10), k=random.randint(0, 7))),
            column("x") == random.randint(1, 10),
        )
    ]

    def _complex_fixtures():
        def one():
            a1 = table_a.alias()
            a2 = table_b_like_a.alias()

            stmt = (select([table_a.c.a, a1.c.b,
                            a2.c.b]).where(table_a.c.b == a1.c.b).where(
                                a1.c.b == a2.c.b).where(a1.c.a == 5))

            return stmt

        def one_diff():
            a1 = table_b_like_a.alias()
            a2 = table_a.alias()

            stmt = (select([table_a.c.a, a1.c.b,
                            a2.c.b]).where(table_a.c.b == a1.c.b).where(
                                a1.c.b == a2.c.b).where(a1.c.a == 5))

            return stmt

        def two():
            inner = one().subquery()

            stmt = select([table_b.c.a, inner.c.a, inner.c.b]).select_from(
                table_b.join(inner, table_b.c.b == inner.c.b))

            return stmt

        def three():

            a1 = table_a.alias()
            a2 = table_a.alias()
            ex = exists().where(table_b.c.b == a1.c.a)

            stmt = (select([a1.c.a, a2.c.a]).select_from(
                a1.join(a2, a1.c.b == a2.c.b)).where(ex))
            return stmt

        return [one(), one_diff(), two(), three()]

    fixtures.append(_complex_fixtures)

    def _statements_w_context_options_fixtures():

        return [
            select([table_a])._add_context_option(opt1, True),
            select([table_a])._add_context_option(opt1, 5),
            select([table_a])._add_context_option(opt1,
                                                  True)._add_context_option(
                                                      opt2, True),
            select([table_a
                    ])._add_context_option(opt1,
                                           True)._add_context_option(opt2, 5),
            select([table_a])._add_context_option(opt3, True),
        ]

    fixtures.append(_statements_w_context_options_fixtures)

    def _statements_w_anonymous_col_names():
        def one():
            c = column("q")

            l = c.label(None)

            # new case as of Id810f485c5f7ed971529489b84694e02a3356d6d
            subq = select([l]).subquery()

            # this creates a ColumnClause as a proxy to the Label() that has
            # an anoymous name, so the column has one too.
            anon_col = subq.c[0]

            # then when BindParameter is created, it checks the label
            # and doesn't double up on the anonymous name which is uncachable
            return anon_col > 5

        def two():
            c = column("p")

            l = c.label(None)

            # new case as of Id810f485c5f7ed971529489b84694e02a3356d6d
            subq = select([l]).subquery()

            # this creates a ColumnClause as a proxy to the Label() that has
            # an anoymous name, so the column has one too.
            anon_col = subq.c[0]

            # then when BindParameter is created, it checks the label
            # and doesn't double up on the anonymous name which is uncachable
            return anon_col > 5

        def three():

            l1, l2 = table_a.c.a.label(None), table_a.c.b.label(None)

            stmt = select([table_a.c.a, table_a.c.b, l1, l2])

            subq = stmt.subquery()
            return select([subq]).where(subq.c[2] == 10)

        return (
            one(),
            two(),
            three(),
        )

    fixtures.append(_statements_w_anonymous_col_names)
Exemple #51
0
def _millisecond(t, expr):
    # we get total number of milliseconds including seconds with extract so we
    # mod 1000
    sa_arg, = map(t.translate, expr.op().args)
    return sa.cast(sa.extract('millisecond', sa_arg), sa.SMALLINT) % 1000
# -*- coding: utf-8 -*-
import logging

from sqlalchemy import extract
from cables.models import TEquipementsPoteauxErdf, TEquipementsTronconsErdf

from numpy import unique, concatenate

log = logging.getLogger(__name__)

year_extract_p = extract('year', TEquipementsPoteauxErdf.date_equipement)
year_extract_t = extract('year',
                         TEquipementsTronconsErdf.date_equipement_troncon)
years_p = ()
years_t = ()

R_HIG = u'Risque élevé'
R_SEC = u'Risque secondaire'
R_LOW = u'Peu ou pas de risque'


def to_int(x):
    return int(x[0]) if x[0] is not None else 0


def add_header_row(entries, name, years_p, years_t):
    labels_years_p = tuple(
            u'Nb poteaux équipés en %s' % year for year in years_p)
    labels_years_t = tuple(
            u'Longueur troncons équipés en %s' % year for year in years_t)
    entries.insert(0, (name if isinstance(name, tuple) else (name,)) + (
Exemple #53
0
class CompareAndCopyTest(fixtures.TestBase):

    # lambdas which return a tuple of ColumnElement objects.
    # must return at least two objects that should compare differently.
    # to test more varieties of "difference" additional objects can be added.
    fixtures = [
        lambda: (
            column("q"),
            column("x"),
            column("q", Integer),
            column("q", String),
        ),
        lambda: (~column("q", Boolean), ~column("p", Boolean)),
        lambda: (
            table_a.c.a.label("foo"),
            table_a.c.a.label("bar"),
            table_a.c.b.label("foo"),
        ),
        lambda: (
            _label_reference(table_a.c.a.desc()),
            _label_reference(table_a.c.a.asc()),
        ),
        lambda: (_textual_label_reference("a"), _textual_label_reference("b")),
        lambda: (
            text("select a, b from table").columns(a=Integer, b=String),
            text("select a, b, c from table").columns(
                a=Integer, b=String, c=Integer),
        ),
        lambda: (
            column("q") == column("x"),
            column("q") == column("y"),
            column("z") == column("x"),
        ),
        lambda: (
            cast(column("q"), Integer),
            cast(column("q"), Float),
            cast(column("p"), Integer),
        ),
        lambda: (
            bindparam("x"),
            bindparam("y"),
            bindparam("x", type_=Integer),
            bindparam("x", type_=String),
            bindparam(None),
        ),
        lambda: (_OffsetLimitParam("x"), _OffsetLimitParam("y")),
        lambda: (func.foo(), func.foo(5), func.bar()),
        lambda: (func.current_date(), func.current_time()),
        lambda: (
            func.next_value(Sequence("q")),
            func.next_value(Sequence("p")),
        ),
        lambda: (True_(), False_()),
        lambda: (Null(), ),
        lambda: (ReturnTypeFromArgs("foo"), ReturnTypeFromArgs(5)),
        lambda: (FunctionElement(5), FunctionElement(5, 6)),
        lambda: (func.count(), func.not_count()),
        lambda: (func.char_length("abc"), func.char_length("def")),
        lambda: (GenericFunction("a", "b"), GenericFunction("a")),
        lambda: (CollationClause("foobar"), CollationClause("batbar")),
        lambda: (
            type_coerce(column("q", Integer), String),
            type_coerce(column("q", Integer), Float),
            type_coerce(column("z", Integer), Float),
        ),
        lambda: (table_a.c.a, table_b.c.a),
        lambda: (tuple_([1, 2]), tuple_([3, 4])),
        lambda: (func.array_agg([1, 2]), func.array_agg([3, 4])),
        lambda: (
            func.percentile_cont(0.5).within_group(table_a.c.a),
            func.percentile_cont(0.5).within_group(table_a.c.b),
            func.percentile_cont(0.5).within_group(table_a.c.a, table_a.c.b),
            func.percentile_cont(0.5).within_group(table_a.c.a, table_a.c.b,
                                                   column("q")),
        ),
        lambda: (
            func.is_equal("a", "b").as_comparison(1, 2),
            func.is_equal("a", "c").as_comparison(1, 2),
            func.is_equal("a", "b").as_comparison(2, 1),
            func.is_equal("a", "b", "c").as_comparison(1, 2),
            func.foobar("a", "b").as_comparison(1, 2),
        ),
        lambda: (
            func.row_number().over(order_by=table_a.c.a),
            func.row_number().over(order_by=table_a.c.a, range_=(0, 10)),
            func.row_number().over(order_by=table_a.c.a, range_=(None, 10)),
            func.row_number().over(order_by=table_a.c.a, rows=(None, 20)),
            func.row_number().over(order_by=table_a.c.b),
            func.row_number().over(order_by=table_a.c.a,
                                   partition_by=table_a.c.b),
        ),
        lambda: (
            func.count(1).filter(table_a.c.a == 5),
            func.count(1).filter(table_a.c.a == 10),
            func.foob(1).filter(table_a.c.a == 10),
        ),
        lambda: (
            and_(table_a.c.a == 5, table_a.c.b == table_b.c.a),
            and_(table_a.c.a == 5, table_a.c.a == table_b.c.a),
            or_(table_a.c.a == 5, table_a.c.b == table_b.c.a),
            ClauseList(table_a.c.a == 5, table_a.c.b == table_b.c.a),
            ClauseList(table_a.c.a == 5, table_a.c.b == table_a.c.a),
        ),
        lambda: (
            case(whens=[(table_a.c.a == 5, 10), (table_a.c.a == 10, 20)]),
            case(whens=[(table_a.c.a == 18, 10), (table_a.c.a == 10, 20)]),
            case(whens=[(table_a.c.a == 5, 10), (table_a.c.b == 10, 20)]),
            case(whens=[
                (table_a.c.a == 5, 10),
                (table_a.c.b == 10, 20),
                (table_a.c.a == 9, 12),
            ]),
            case(
                whens=[(table_a.c.a == 5, 10), (table_a.c.a == 10, 20)],
                else_=30,
            ),
            case({
                "wendy": "W",
                "jack": "J"
            }, value=table_a.c.a, else_="E"),
            case({
                "wendy": "W",
                "jack": "J"
            }, value=table_a.c.b, else_="E"),
            case({
                "wendy_w": "W",
                "jack": "J"
            }, value=table_a.c.a, else_="E"),
        ),
        lambda: (
            extract("foo", table_a.c.a),
            extract("foo", table_a.c.b),
            extract("bar", table_a.c.a),
        ),
        lambda: (
            Slice(1, 2, 5),
            Slice(1, 5, 5),
            Slice(1, 5, 10),
            Slice(2, 10, 15),
        ),
        lambda: (
            select([table_a.c.a]),
            select([table_a.c.a, table_a.c.b]),
            select([table_a.c.b, table_a.c.a]),
            select([table_a.c.a]).where(table_a.c.b == 5),
            select([table_a.c.a]).where(table_a.c.b == 5).where(table_a.c.a ==
                                                                10),
            select([table_a.c.a]).where(table_a.c.b == 5).with_for_update(),
            select([table_a.c.a]).where(table_a.c.b == 5).with_for_update(
                nowait=True),
            select([table_a.c.a]).where(table_a.c.b == 5).correlate(table_b),
            select([table_a.c.a]).where(table_a.c.b == 5).correlate_except(
                table_b),
        ),
        lambda: (
            table_a.join(table_b, table_a.c.a == table_b.c.a),
            table_a.join(table_b,
                         and_(table_a.c.a == table_b.c.a, table_a.c.b == 1)),
            table_a.outerjoin(table_b, table_a.c.a == table_b.c.a),
        ),
        lambda: (
            table_a.alias("a"),
            table_a.alias("b"),
            table_a.alias(),
            table_b.alias("a"),
            select([table_a.c.a]).alias("a"),
        ),
        lambda: (
            FromGrouping(table_a.alias("a")),
            FromGrouping(table_a.alias("b")),
        ),
        lambda: (
            select([table_a.c.a]).as_scalar(),
            select([table_a.c.a]).where(table_a.c.b == 5).as_scalar(),
        ),
        lambda: (
            exists().where(table_a.c.a == 5),
            exists().where(table_a.c.b == 5),
        ),
        lambda: (
            union(select([table_a.c.a]), select([table_a.c.b])),
            union(select([table_a.c.a]), select([table_a.c.b])).order_by("a"),
            union_all(select([table_a.c.a]), select([table_a.c.b])),
            union(select([table_a.c.a])),
            union(
                select([table_a.c.a]),
                select([table_a.c.b]).where(table_a.c.b > 5),
            ),
        ),
        lambda: (
            table("a", column("x"), column("y")),
            table("a", column("y"), column("x")),
            table("b", column("x"), column("y")),
            table("a", column("x"), column("y"), column("z")),
            table("a", column("x"), column("y", Integer)),
            table("a", column("q"), column("y", Integer)),
        ),
        lambda: (
            Table("a", MetaData(), Column("q", Integer), Column("b", String)),
            Table("b", MetaData(), Column("q", Integer), Column("b", String)),
        ),
    ]

    @classmethod
    def setup_class(cls):
        # TODO: we need to get dialects here somehow, perhaps in test_suite?
        [
            importlib.import_module("sqlalchemy.dialects.%s" % d)
            for d in dialects.__all__ if not d.startswith("_")
        ]

    def test_all_present(self):
        need = set(
            cls for cls in class_hierarchy(ClauseElement)
            if issubclass(cls, (ColumnElement, Selectable)) and "__init__" in
            cls.__dict__ and not issubclass(cls, (Annotated))
            and "orm" not in cls.__module__ and "crud" not in cls.__module__
            and "dialects" not in cls.__module__  # TODO: dialects?
        ).difference({ColumnElement, UnaryExpression})
        for fixture in self.fixtures:
            case_a = fixture()
            for elem in case_a:
                for mro in type(elem).__mro__:
                    need.discard(mro)

        is_false(bool(need), "%d Remaining classes: %r" % (len(need), need))

    def test_compare(self):
        for fixture in self.fixtures:
            case_a = fixture()
            case_b = fixture()

            for a, b in itertools.combinations_with_replacement(
                    range(len(case_a)), 2):
                if a == b:
                    is_true(
                        case_a[a].compare(case_b[b],
                                          arbitrary_expression=True),
                        "%r != %r" % (case_a[a], case_b[b]),
                    )

                else:
                    is_false(
                        case_a[a].compare(case_b[b],
                                          arbitrary_expression=True),
                        "%r == %r" % (case_a[a], case_b[b]),
                    )

    def test_cache_key(self):
        def assert_params_append(assert_params):
            def append(param):
                if param._value_required_for_cache:
                    assert_params.append(param)
                else:
                    is_(param.value, None)

            return append

        for fixture in self.fixtures:
            case_a = fixture()
            case_b = fixture()

            for a, b in itertools.combinations_with_replacement(
                    range(len(case_a)), 2):

                assert_a_params = []
                assert_b_params = []

                visitors.traverse_depthfirst(
                    case_a[a],
                    {},
                    {"bindparam": assert_params_append(assert_a_params)},
                )
                visitors.traverse_depthfirst(
                    case_b[b],
                    {},
                    {"bindparam": assert_params_append(assert_b_params)},
                )
                if assert_a_params:
                    assert_raises_message(
                        NotImplementedError,
                        "bindparams collection argument required ",
                        case_a[a]._cache_key,
                    )
                if assert_b_params:
                    assert_raises_message(
                        NotImplementedError,
                        "bindparams collection argument required ",
                        case_b[b]._cache_key,
                    )

                if not assert_a_params and not assert_b_params:
                    if a == b:
                        eq_(case_a[a]._cache_key(), case_b[b]._cache_key())
                    else:
                        ne_(case_a[a]._cache_key(), case_b[b]._cache_key())

    def test_cache_key_gather_bindparams(self):
        for fixture in self.fixtures:
            case_a = fixture()
            case_b = fixture()

            # in the "bindparams" case, the cache keys for bound parameters
            # with only different values will be the same, but the params
            # themselves are gathered into a collection.
            for a, b in itertools.combinations_with_replacement(
                    range(len(case_a)), 2):
                a_params = {"bindparams": []}
                b_params = {"bindparams": []}
                if a == b:
                    a_key = case_a[a]._cache_key(**a_params)
                    b_key = case_b[b]._cache_key(**b_params)
                    eq_(a_key, b_key)

                    if a_params["bindparams"]:
                        for a_param, b_param in zip(a_params["bindparams"],
                                                    b_params["bindparams"]):
                            assert a_param.compare(b_param)
                else:
                    a_key = case_a[a]._cache_key(**a_params)
                    b_key = case_b[b]._cache_key(**b_params)

                    if a_key == b_key:
                        for a_param, b_param in zip(a_params["bindparams"],
                                                    b_params["bindparams"]):
                            if not a_param.compare(b_param):
                                break
                        else:
                            assert False, "Bound parameters are all the same"
                    else:
                        ne_(a_key, b_key)

                assert_a_params = []
                assert_b_params = []
                visitors.traverse_depthfirst(
                    case_a[a], {}, {"bindparam": assert_a_params.append})
                visitors.traverse_depthfirst(
                    case_b[b], {}, {"bindparam": assert_b_params.append})

                # note we're asserting the order of the params as well as
                # if there are dupes or not.  ordering has to be deterministic
                # and matches what a traversal would provide.
                eq_(a_params["bindparams"], assert_a_params)
                eq_(b_params["bindparams"], assert_b_params)

    def test_compare_col_identity(self):
        stmt1 = (select([table_a.c.a, table_b.c.b
                         ]).where(table_a.c.a == table_b.c.b).alias())
        stmt1_c = (select([table_a.c.a, table_b.c.b
                           ]).where(table_a.c.a == table_b.c.b).alias())

        stmt2 = union(select([table_a]), select([table_b]))

        stmt3 = select([table_b])

        equivalents = {table_a.c.a: [table_b.c.a]}

        is_false(
            stmt1.compare(stmt2, use_proxies=True, equivalents=equivalents))

        is_true(
            stmt1.compare(stmt1_c, use_proxies=True, equivalents=equivalents))
        is_true((table_a.c.a == table_b.c.b).compare(
            stmt1.c.a == stmt1.c.b,
            use_proxies=True,
            equivalents=equivalents,
        ))

    def test_copy_internals(self):
        for fixture in self.fixtures:
            case_a = fixture()
            case_b = fixture()

            assert case_a[0].compare(case_b[0])

            clone = case_a[0]._clone()
            clone._copy_internals()

            assert clone.compare(case_b[0])

            stack = [clone]
            seen = {clone}
            found_elements = False
            while stack:
                obj = stack.pop(0)

                items = [
                    subelem for key, elem in clone.__dict__.items()
                    if key != "_is_clone_of" and elem is not None
                    for subelem in util.to_list(elem)
                    if (isinstance(subelem, (ColumnElement, ClauseList))
                        and subelem not in seen and not isinstance(
                            subelem, Immutable) and subelem is not case_a[0])
                ]
                stack.extend(items)
                seen.update(items)

                if obj is not clone:
                    found_elements = True
                    # ensure the element will not compare as true
                    obj.compare = lambda other, **kw: False
                    obj.__visit_name__ = "dont_match"

            if found_elements:
                assert not clone.compare(case_b[0])
            assert case_a[0].compare(case_b[0])
Exemple #54
0
def overall_stats():
    if current_user.admin == 'Y':
        q = Users.query.filter_by(admin='N').all()
    else:
        q = Users.query.filter_by(manager_id=current_user.id).all()
    ################
    skills_table = LookupTable.query.filter_by(field="skill").all()
    users = Users.query.filter_by(admin='N').all()
    final_res = []
    for i in users:
        user_res = []
        print("---------new---------")
        for j in skills_table:
            a = db.session.query(db.func.max(Skills.skill_id)).group_by(Skills.skill, Skills.employee_id).filter(
                Skills.employee_id == i.id, Skills.skill == j.value).first()
            if a is not None:
                skill1 = Skills.query.filter_by(skill_id=a[0]).first()
                print(skill1)
                if skill1.manager_rating is not None:
                    user_res.append(
                        (round(0.4 * skill1.emp_rating + 0.6 * skill1.manager_rating, 2), skill1.skill_interest, 0))
                else:
                    user_res.append((skill1.emp_rating, skill1.skill_interest, 1))
            else:
                user_res.append(None)
        final_res.append(user_res)
        print(final_res)
    ################
    x = db.session.query(Skills.skill, db.func.count(Skills.employee_id.distinct())).group_by(Skills.skill).all()
    y = db.session.query(Users.location, db.func.count(Users.id)).group_by(Users.location).all()
    z = db.session.query(Users.practice, db.func.count(Users.id)).group_by(Users.practice).all()
    print(x, y, z)
    skills = [['Tech', 'No. of ppl']]
    loc = [['Location', 'No. of ppl']]
    prac = [['Practice', 'No. of ppl']]
    for i in x:
        skills.append([i[0], i[1]])
    for i in y:
        if i[0] is not None:
            loc.append([i[0], i[1]])
    for i in z:
        if i[0] is not None:
            prac.append([i[0], i[1]])
    print(skills, loc, prac)
    s = LookupTable.query.filter_by(field="skill").all()
    t = db.session.query(extract('year', Skills.timestamp), extract('month', Skills.timestamp),
                         extract('day', Skills.timestamp)).order_by(Skills.skill_id).all()
    res = [['time']]
    for i in s:
        res[0].append(i.value)
    print(res)
    dates = list(dict.fromkeys(t))
    print(s)
    print(dates)
    for i in range(0, len(dates)):
        res.append([dates[i]])
        for j in s:
            sk = db.session.query(Skills.employee_id.distinct()).filter(extract('year', Skills.timestamp)<=dates[i][0], extract('month', Skills.timestamp)<=dates[i][1],
                             extract('day', Skills.timestamp)<=dates[i][2], Skills.skill==j.value).all()
            res[i+1].append(len(sk))
            print(res)
            print(sk)
    print(res)
    if request.method == 'POST':
        id = request.form.get('choose_employee')
        return redirect(url_for('emp_stats', id=id))
    return render_template('overall_stats.html', skills=json.dumps(skills), loc=json.dumps(loc), data=json.dumps(res),
                           prac=json.dumps(prac), emp=q, skills_table=skills_table, skills_len=len(skills_table), res=final_res, users=users, len=len(users))
Exemple #55
0
def dashboard(id_role):
    # REQUETES
    if "form_data" in session:
        session.pop("form_data")
    nb_constat_by_dep = (DB.session.query(
        LAreas.area_name,
        func.sum(Constats.nb_victimes_mort),
        func.sum(Constats.nb_indemnises),
        func.count(Constats.id_constat),
    ).select_from(LAreas).filter(LAreas.id_type == 26))
    if current_app.config.get("CODE_DEPARTMENT", None):
        nb_constat_by_dep = nb_constat_by_dep.filter(
            LAreas.area_code.in_(current_app.config["CODE_DEPARTMENT"]))
    nb_constat_by_dep = nb_constat_by_dep.group_by(LAreas.area_name)

    nb_constat_by_sect = (DB.session.query(
        LAreas.area_name,
        func.sum(Constats.nb_victimes_mort),
        func.sum(Constats.nb_indemnises),
        func.count(Constats.id_constat),
        func.sum(Constats.nb_jour_agent),
    ).select_from(LAreas).filter(LAreas.id_type == 30).group_by(
        LAreas.area_name))

    query_constat_by_sect_by_animal_type = (DB.session.query(
        LAreas.area_name, func.sum(Constats.nb_indemnises),
        bib_type_animaux.nom).select_from(LAreas).filter(LAreas.id_type == 30))
    filter_query = request.args.to_dict()
    current_year = datetime.now().year
    form = FilterForm(date=current_year)
    form.date.choices.insert(0, (0, ""))

    # we must add filter in the join condition and not in where clause if we want the outer join to work (have all secteurs/dep)
    filters = []
    if "date" in filter_query and filter_query["date"] != "0":
        filters.append(
            extract("year", Constats.date_constat) == int(
                filter_query["date"]))
    if "animaux" in filter_query:
        if filter_query["animaux"] != "__None":
            filters.append(Constats.type_animaux == filter_query["animaux"])
    if "statut" in filter_query and filter_query["statut"] != "__None":
        filters.append(Constats.statut == filter_query["statut"])
    if "localisation" in filter_query:
        if filter_query["localisation"] == "1":
            filters.append(Constats.dans_coeur == True)
        elif filter_query["localisation"] == "2":
            filters.append(Constats.dans_aa == True)
        elif filter_query["localisation"] == "3":
            filters.append(Constats.dans_aa == False
                           and Constats.dans_coeur == False)
    if "type_constat" in filter_query and filter_query[
            "type_constat"] != "None":
        try:
            is_declaratif = bool(int(filter_query["type_constat"]))
        except Exception as e:
            raise BadRequest(str(e))
        filters.append(Constats.declaratif == is_declaratif)

    nb_constat_by_dep = (nb_constat_by_dep.outerjoin(
        Constats, and_(Constats.id_departement == LAreas.id_area,
                       *filters)).group_by(LAreas.area_name).all())
    nb_constat_by_sect = (nb_constat_by_sect.outerjoin(
        Constats, and_(Constats.id_secteur == LAreas.id_area,
                       *filters)).group_by(LAreas.area_name).all())

    data = (query_constat_by_sect_by_animal_type.outerjoin(
        Constats,
        and_(Constats.id_secteur == LAreas.id_area, *filters)).outerjoin(
            bib_type_animaux,
            bib_type_animaux.id == Constats.type_animaux).group_by(
                LAreas.area_name, bib_type_animaux.nom))

    data = data.all()
    constat_by_sect_by_animal_type = {}
    for item in data:
        constat_by_sect_by_animal_type.setdefault(item[0], {"total": 0})
        constat_by_sect_by_animal_type[item[0]]["total"] += item[1] or 0
        constat_by_sect_by_animal_type[item[0]][item[2]] = item[1]

    return render_template(
        "dashboard.html",
        nb_constat_by_dep=nb_constat_by_dep,
        nb_constat_by_sect=nb_constat_by_sect,
        constat_by_sect_by_animal_type=constat_by_sect_by_animal_type,
        form=form,
    )
Exemple #56
0
 def translator(t, expr):
     (arg,) = expr.op().args
     sa_arg = t.translate(arg)
     if fmt == 'millisecond':
         return sa.extract('microsecond', sa_arg) % 1000
     return sa.extract(fmt, sa_arg)
Exemple #57
0
 def translator(t, expr, output_type=output_type):
     (arg, ) = expr.op().args
     sa_arg = t.translate(arg)
     return sa.cast(sa.extract(fmt, sa_arg), output_type)
Exemple #58
0
def get_played_tracks(workout_intensity='all',
                      sport='all',
                      pop_time_period='all'):
    '''

    :param workout_intensity: (Optional) Filters the spotify tracks by the intensity of the workout that was done
    :return: df of spotify tracks that were done during a workout
    '''

    # Query tracks
    if pop_time_period == 'all':
        df_tracks = pd.read_sql(
            sql=app.session.query(spotifyPlayHistory).statement, con=engine)
        df_tracks['Period'] = 'Current'

    elif pop_time_period == 'ytd':
        df_tracks = pd.read_sql(
            sql=app.session.query(spotifyPlayHistory).filter(
                extract('year', spotifyPlayHistory.timestamp_utc) >= (
                    datetime.utcnow().year - 1)).statement,
            con=engine)

        df_tracks['Period'] = 'Current'
        df_tracks.at[df_tracks['timestamp_utc'].dt.year == (
            datetime.utcnow().date().year - 1), 'Period'] = 'Previous'

    elif pop_time_period in ['l90d', 'l6w', 'l30d']:
        days = {'l90d': 180, 'l6w': 84, 'l30d': 60}
        df_tracks = pd.read_sql(
            sql=app.session.query(spotifyPlayHistory).filter(
                spotifyPlayHistory.timestamp_utc >= (
                    datetime.utcnow().date() -
                    timedelta(days=days[pop_time_period]))).statement,
            con=engine)
        df_tracks['Period'] = 'Current'
        df_tracks.at[df_tracks['timestamp_utc'].dt.date <=
                     (datetime.utcnow().date() -
                      timedelta(days=days[pop_time_period] / 2)),
                     'Period'] = 'Previous'

    # Query workouts
    df_summary = pd.read_sql(sql=app.session.query(
        stravaSummary.start_date_utc, stravaSummary.activity_id,
        stravaSummary.name, stravaSummary.elapsed_time, stravaSummary.type,
        stravaSummary.workout_intensity).statement,
                             con=engine)
    df_summary[
        'end_date_utc'] = df_summary['start_date_utc'] + pd.to_timedelta(
            df_summary['elapsed_time'], 's')
    df_summary.drop(columns=['elapsed_time'], inplace=True)

    # Full Cross Join
    df_tracks = df_tracks.assign(join_key=1)
    df_summary = df_summary.assign(join_key=1)
    df_merge = pd.merge(df_summary, df_tracks, on='join_key').drop('join_key',
                                                                   axis=1)
    # Filter only on tracks performed during workout times
    df_merge = df_merge.query(
        'timestamp_utc >= start_date_utc and timestamp_utc <= end_date_utc')
    # Join back to original date range table and drop key column
    df = df_tracks.merge(df_merge, on=['timestamp_utc'],
                         how='left').fillna('').drop('join_key', axis=1)
    # Days with no workout_intensity are rest days
    df.at[df['start_date_utc'] == '', 'workout_intensity'] = 'rest'
    # Cleanup the end resulting df
    df = df[[c for c in df.columns if '_y' not in c]]
    df.columns = [c.replace('_x', '') for c in df.columns]
    df = df.rename(columns={'type': 'workout_type', 'name': 'workout_name'})
    # Filter on workout intensity/rest day
    if workout_intensity == 'workout':
        df = df[df['workout_intensity'] != 'rest']
    elif workout_intensity != 'all':
        df = df[df['workout_intensity'] == workout_intensity]
    # Filter on workout type
    if sport != 'all':
        df = df[df['workout_type'] == sport]

    df.drop(columns=['start_date_utc', 'end_date_utc'], inplace=True)

    df.set_index('timestamp_utc', inplace=True)

    return df
Exemple #59
0
def admin_driver_records(did):
    driver = model.User.query.get(did)
    assert driver and driver.acct_type == model.User.DRIVER
    
    records = []
    date = datetime.date.today()
    if 'date' in request.args:
        try:
            date = datetime.datetime.strptime(request.args['date'], '%Y-%m')
        except:
            flash('Bad date format.', 'error')
    
    if request.method == 'POST' and 'add_day' in request.form and request.form['add_day'] != 'None':
        try:
            add_date = datetime.datetime.strptime(request.form['add_day'], '%Y-%m-%d')
            count = model.DriverDailyRoute.query.filter(model.DriverDailyRoute.driver_id == did,
                                                extract('month', model.DriverDailyRoute.date) == add_date.month,
                                                extract('year', model.DriverDailyRoute.date) == add_date.year, 
                                                extract('day', model.DriverDailyRoute.date) == add_date.day).count()
            if not count:                                   
                r = model.DriverDailyRoute()
                r.driver_id = did
                r.date = add_date
                model.db.session.add(r)
                model.db.session.commit()
            else:
                flash('Record already exists.', 'error')
            return redirect(request.url)
        except:
            model.db.session.rollback()
            flash('Error adding record!', 'error')
            log_except()
        
    
    display = None
    if 'display' in request.args:
        display = model.DriverDailyRoute.query.get(int(request.args['display']))
    
    if display and request.method == 'POST' and 'add_stop' in request.form and request.form['add_stop'] != 'None':
        a = model.Agency.query.get(int(request.form['add_stop']))
        assert a
        try:
            s = model.DriverStop()
            s.agency_id = a.id
            s.position_in_day = len(display.stops)
            s.time = None
            display.stops.append(s)
            model.db.session.commit()
            return redirect(request.url)
        except:
            model.db.session.rollback()
            flash('Error adding record!', 'error')
            log_except()
    
    if display and request.method == 'POST' and 'delete_stop' in request.form:
        try:
            r = model.DriverStop.query.get(int(request.form['delete_stop']))
            assert r and r.schedule_id == display.id
            model.db.session.delete(r)
            model.db.session.commit()
            return redirect(request.url)
        except:
            model.db.session.rollback()
            flash('Error removing record!', 'error')
            log_except()
    records = model.DriverDailyRoute.query.filter(model.DriverDailyRoute.driver_id == did, 
                                                  extract('month', model.DriverDailyRoute.date) == date.month,
                                                  extract('year', model.DriverDailyRoute.date) == date.year).order_by(model.DriverDailyRoute.date).all()
    day_used = [False] * 32
    for r in records:
        day_used[r.date.day] = True                                        
    date_counter = datetime.datetime(date.year, date.month, 1)
    unused_dates = []
    while date_counter.month == date.month:
        if not day_used[date_counter.day]:
            unused_dates.append(date_counter)
        date_counter += datetime.timedelta(days=1)
    
    agencies = model.Agency.query.options(load_only(model.Agency.name, model.Agency.id)).all() if display else None
    return render_template('/admin/driver_records.html', records=records, 
                           date=date, display_record=display, agencies=agencies, driver=driver, unused_days=unused_dates, page_title=driver.last_name + ', ' + driver.first_name + ' Records')
Exemple #60
0
def time_archives():
    times = db.session.query(
        extract('year', Blog.created_at).label('y'),
        extract('month', Blog.created_at).label('m'),
        func.count("*")).group_by('y', 'm')
    return times.all()