Ejemplo n.º 1
0
def count_for_payment_sales(curr_user):
    try:
        transdate = request.args.get('transdate')
        for_payment = db.session.query(SalesHeader).filter(
            and_(
                func.cast(SalesHeader.transdate, DATE) == transdate,
                or_(
                    and_(SalesHeader.confirm == True,
                         SalesHeader.transtype != 'CASH'),
                    and_(SalesHeader.confirm != True,
                         SalesHeader.transtype == 'CASH')),
                SalesHeader.docstatus == 'O')).count()
        for_confirmation = db.session.query(SalesHeader).filter(
            and_(
                func.cast(SalesHeader.transdate, DATE) == transdate,
                SalesHeader.confirm != True, SalesHeader.transtype != 'CASH',
                SalesHeader.docstatus == 'O')).count()

        return ResponseMessage(True,
                               data={
                                   "for_payment": for_payment,
                                   "for_confirmation": for_confirmation
                               }).resp()
    except (pyodbc.IntegrityError, exc.IntegrityError) as err:
        return ResponseMessage(False, message=f"{err}").resp(), 500
    except Exception as err:
        return ResponseMessage(False, message=f"{err}").resp(), 500
    finally:
        db.session.close()
Ejemplo n.º 2
0
    def direct_run_single_job_task(self, execute_date, job_list, session=None):
        """
        direct_run a job, no dep condition
        :param execute_date: YYYY-MM-DD
        :param task_id:
        :param session:
        :return:
        """
        scheduelr_time = datetime.now()
        task_ids = []
        instance_list = []
        for job in job_list:
            instance = TaskInstance(
                etl_day=execute_date,
                task_id=job.task_id,
                sub_task_id='0',
                name=job.task_name,
                task_type=State.TASK_JOB,
                module="bi",
                status=State.QUEUED,
                scheduler_time=scheduelr_time,
                scheduler_retry=0,
                worker_retry=0,
            )
            task_ids.append(instance.task_id)
            instance_list.append(instance)
        session.add_all(instance_list)
        session.commit()

        # refresh task_instance
        task_instance = session.query(TaskInstance).filter(TaskInstance.task_id.in_(task_ids))\
            .filter(func.cast(TaskInstance.scheduler_time, DateTime) == func.cast(scheduelr_time, DateTime)) \
            .filter(TaskInstance.etl_day == execute_date, TaskInstance.status == State.QUEUED).all()
        return task_instance
Ejemplo n.º 3
0
    def create_cron_task_instance(self, execute_date, cron_list, session=None):
        """
        create_cron_task_instance for cron_conf(exclude extract task)
        :param execute_date: YYYY-MM-DD
        :param task_id: task id
        :param session:
        :return: task instance
        """
        scheduelr_time = datetime.now()
        task_ids = []
        instance_list = []
        for cron_conf in cron_list:
            instance = TaskInstance(
                etl_day=execute_date,
                task_id=cron_conf.task_id,
                name=cron_conf.name,
                task_type=State.TASK_CRON,
                module="bi",
                status=State.QUEUED,
                scheduler_time=scheduelr_time,
                scheduler_retry=0,
                worker_retry=0,
            )
            task_ids.append(instance.task_id)
            instance_list.append(instance)
        session.add_all(instance_list)
        session.commit()

        # refresh
        task_instance = session.query(TaskInstance).filter(TaskInstance.task_id.in_(task_ids)) \
            .filter(TaskInstance.etl_day == execute_date) \
            .filter(func.cast(TaskInstance.scheduler_time, DateTime) == func.cast(scheduelr_time, DateTime)) \
            .all()
        return task_instance
Ejemplo n.º 4
0
 def daily_amount_select():
     return select([
         func.cast(
             func.sum(SalesOrderLine.unit_price * SalesOrderLine.quantity) /
             func.greatest(
                 func.cast(
                     func.date_part(
                         'DAY',
                         func.current_date() - Product.create_date),
                     Integer), 1), Numeric)
     ]).as_scalar()
 def test_filter_ref_case(self, Student):
     assert len(Student.filter(
         case([(
             ref('created') <=
             func.now() - func.cast('2 YEARS', INTERVAL),
             'senior'
         ), (
             ref('created') <=
             func.now() - func.cast('1 YEAR', INTERVAL),
             'junior'
         )], else_='freshman') == 'freshman'
     ).all()) == 3
Ejemplo n.º 6
0
def weekday_stats(session, year=None, bid=None):
    query = (_daily_stats_agg_query(session).add_column(
        func.to_char(
            func.cast(func.concat(DailyStats.year, '-01-01'), Date) +
            func.cast(func.concat(DailyStats.day_of_year - 1, ' days'),
                      Interval),
            'ID').label('weekday')).group_by('weekday').order_by('weekday'))
    if year:
        query = query.filter(DailyStats.year == year)
    if bid:
        query = query.filter(DailyStats.bid == bid)
    return query
Ejemplo n.º 7
0
 def get(self):
     session = DBSession()
     try:
         dep_station = urllib.parse.unquote(request.args.get('dep_station'))
         arv_station = urllib.parse.unquote(request.args.get('arv_station'))
         dg_only = urllib.parse.unquote(
             request.args.get('DG_only')).lower() == 'true'
         dep_train_info = session.query(Interval.train_id, Interval.dep_station) \
             .join(Station, Interval.dep_station == Station.station_id) \
             .filter(Station.station_name == dep_station) \
             .subquery()
         arv_train_info = session.query(Interval.train_id, Interval.arv_station) \
             .join(Station, Interval.arv_station == Station.station_id) \
             .filter(Station.station_name == arv_station) \
             .subquery()
         raw_train_info = session.query(Interval.train_id, Train.train_name,
                                        func.min(Interval.interval_id).label('first_interval'),
                                        func.max(Interval.interval_id).label('last_interval')) \
             .join(Train, Train.train_id == Interval.train_id) \
             .join(dep_train_info, Interval.train_id == dep_train_info.c.train_id) \
             .join(arv_train_info, Interval.train_id == arv_train_info.c.train_id) \
             .filter(or_(Interval.dep_station == dep_train_info.c.dep_station,
                         Interval.arv_station == arv_train_info.c.arv_station)) \
             .group_by(Interval.train_id, Train.train_name) \
             .subquery()
         dep_i = aliased(Interval, name='dep_i')
         arv_i = aliased(Interval, name='arv_i')
         dep_s = aliased(Station, name='dep_s')
         arv_s = aliased(Station, name='arv_s')
         train_info_list = session.query(raw_train_info.c.train_name,
                                         raw_train_info.c.first_interval, raw_train_info.c.last_interval,
                                         dep_s.station_name.label('dep_station'),
                                         func.cast(dep_i.dep_datetime, String).label('dep_time'),
                                         arv_s.station_name.label('arv_station'),
                                         func.cast(arv_i.arv_datetime, String).label('arv_time')) \
             .join(dep_i, dep_i.interval_id == raw_train_info.c.first_interval) \
             .join(arv_i, arv_i.interval_id == raw_train_info.c.last_interval) \
             .join(dep_s, dep_s.station_id == dep_i.dep_station) \
             .join(arv_s, arv_s.station_id == arv_i.arv_station) \
             .filter(dep_s.station_name == dep_station, arv_s.station_name == arv_station) \
             .order_by(dep_i.dep_datetime) \
             .all()
         train_info_list = list(
             filter(
                 lambda x: x['train_name'][0] in 'DG' if dg_only else True,
                 map(lambda x: dict(zip(x.keys(), x)), train_info_list)))
         return jsonify(result=train_info_list, code=0)
     finally:
         session.close()
Ejemplo n.º 8
0
def user_list():
    """
    用户列表
    :return:
    """
    id = request.args.get("id")
    if id:
        return get_user(id)

    q = db.session.query(User.id, User.department_key, func.max(User.name).label("name"),
                         func.max(User.loginid).label("loginid"), func.max(User.telephone)
                         .label("telephone"), func.max(User.address).label("address"),
                         func.string_agg(func.cast(Role.id, Text), ',').label("roles")).outerjoin(UserRole,
                                                                                                  UserRole.user_id == User.id).outerjoin(
        Role, Role.id == UserRole.role_id).group_by(User.id)

    name = request.args.get("name")
    if name is not None:
        q = q.filter(User.name.like("%" + name.split(".")[-1] + "%"))
    # q = q.order_by(User.name.desc())
    offset = int(request.args.get('offset'))
    limit = int(request.args.get('limit'))
    sort = request.args.get('sort')
    if sort == None:
        sort = "-id"
    res, total = sql_tool.model_page(q, limit, offset, sort)
    return JsonResult.res_page(res, total)
Ejemplo n.º 9
0
 def getCronTask(self, task_id=None, fetch_time=None, session=None):
     """
     根据任务ID查询
     :param task_id: 任务ID
     :param fetch_time:
     :param session:
     :return: CronConfig
     """
     query = session.query(CronConf)
     if task_id is not None:
         query = query.filter(CronConf.task_id == task_id)
     if fetch_time is not None:
         query = query.filter(
             func.cast(CronConf.modify_time, DateTime)
             >= func.cast(fetch_time, DateTime))
     return query.all()
Ejemplo n.º 10
0
 def add_area_order_by(self, query, column):
     if self.submission.election.voteType is VoteTypeEnum.Postal:
         # Because the counting centre names are not numbers. eg:- "PV 41", "PV 42"
         return query.order_by(column)
     elif self.submission.election.voteType is VoteTypeEnum.NonPostal:
         # Counting centre names are numbers. eg:- "34", "35", "36"
         return query.order_by(func.cast(column, db.Integer))
Ejemplo n.º 11
0
    def advanced_search(cls, params):
        query = cls.query
        columns = cls.__table__.columns.keys()
        for attr in params:
            if params[attr] != "" and attr in columns:
                query = query.filter(getattr(cls, attr) == params[attr])
            elif params[attr] != "":
                if attr == 'product_part_number_query':
                    formatted_query = format_match_query(params['product_part_number_query_type'], params[attr])
                    query = query.filter(cls.part.has(Part.design.has(Design.design_number.ilike(formatted_query))))
                elif attr == 'product_serial_number_query':
                    formatted_query = format_match_query(params['product_serial_number_query_type'], params[attr])
                    query = query.filter(func.cast(cls.serial_number, types.Text).ilike(formatted_query))
                elif attr == 'text_fields_query':
                    formatted_query = format_match_query('includes', params['text_fields_query'])
                    query = query.filter(cls.summary.ilike(formatted_query) | cls.notes.ilike(formatted_query))
                elif attr == 'open_discrepancies':
                    query = query.filter(cls.discrepancies.any(Discrepancy.state.in_(['Open'])))
                elif attr == 'created_on_start':
                    query = query.filter(cls.created_at >= params['created_on_start'])
                elif attr == 'created_on_end':
                    query = query.filter(cls.created_at <= params['created_on_end'])
                elif attr == 'in_open_state':
                    query = query.filter(cls.state.in_(cls.workflow.open_states))
                elif attr =='exclude_obsolete':
                    query = query.filter(cls.state != cls.workflow.obsolete_state)
                elif attr == 'material_id':
                    query = query.filter(cls.part.has(Part.material_id == params[attr]))

        return query.all()
Ejemplo n.º 12
0
    def areaWiseSummary(self):
        area_wise_valid_vote_count_subquery = self.area_wise_valid_vote_count(
        ).subquery()
        area_wise_rejected_vote_count_subquery = self.area_wise_rejected_vote_count(
        ).subquery()

        return db.session.query(
            Area.Model.areaId, Area.Model.areaName,
            func.sum(
                area_wise_valid_vote_count_subquery.c.validVoteCount).label(
                    "validVoteCount"),
            func.sum(area_wise_rejected_vote_count_subquery.c.rejectedVoteCount
                     ).label("rejectedVoteCount"),
            func.sum(area_wise_valid_vote_count_subquery.c.validVoteCount +
                     area_wise_rejected_vote_count_subquery.c.rejectedVoteCount
                     ).label("totalVoteCount")
        ).join(
            area_wise_valid_vote_count_subquery,
            area_wise_valid_vote_count_subquery.c.areaId == Area.Model.areaId,
            isouter=True).join(
                area_wise_rejected_vote_count_subquery,
                area_wise_rejected_vote_count_subquery.c.areaId ==
                Area.Model.areaId,
                isouter=True).group_by(Area.Model.areaId).order_by(
                    func.cast(Area.Model.areaName, db.Integer)).filter(
                        Area.Model.areaId.in_([
                            area.areaId for area in self.countingCentres
                        ])).all()
Ejemplo n.º 13
0
def search_around(timestamp: T.number, ):
    get_logger().info('/search_around %s', timestamp)
    utc_timestamp = timestamp  # old 'timestamp' name is legacy

    # TODO meh. use count/pagination instead?
    delta_back = timedelta(hours=3).total_seconds()
    delta_front = timedelta(minutes=2).total_seconds()
    # TODO not sure about delta_front.. but it also serves as quick hack to accomodate for all the truncations etc

    return search_common(
        url=
        'http://dummy.org',  # NOTE: not used in the where query (below).. perhaps need to get rid of this
        where=lambda table, url: between(
            func.strftime(
                '%s',  # NOTE: it's tz aware, e.g. would distinguish +05:00 vs -03:00
                # this is a bit fragile, relies on cachew internal timestamp format, e.g.
                # 2020-11-10T06:13:03.196376+00:00 Europe/London
                func.substr(
                    table.c.dt,
                    1,  # substr is 1-indexed
                    # instr finds the first match, but if not found it defaults to 0.. which we hack by concatting with ' '
                    func.instr(
                        func.cast(table.c.dt, types.Unicode).op('||')
                        (' '), ' ') - 1,
                    # for f***s sake.. seems that cast is necessary otherwise it tries to treat ' ' as datetime???
                )) - literal(utc_timestamp),
            literal(-delta_back),
            literal(delta_front),
        ),
    )
Ejemplo n.º 14
0
    def refresh_from_db(self, session=None):
        """
        Reloads the current dagrun from the database
        :param session: database session
        """
        DR = DagRun

        exec_date = func.cast(self.execution_date, DateTime)

        dr = session.query(DR).filter(
            DR.dag_id == self.dag_id,
            func.cast(DR.execution_date, DateTime) == exec_date,
            DR.run_id == self.run_id).one()

        self.id = dr.id
        self.state = dr.state
Ejemplo n.º 15
0
class Reporter(Base):
    __tablename__ = "reporters"

    id = Column(Integer(), primary_key=True)
    first_name = Column(String(30), doc="First name")
    last_name = Column(String(30), doc="Last name")
    email = Column(String(), doc="Email")
    favorite_pet_kind = Column(PetKind)
    pets = relationship("Pet",
                        secondary=association_table,
                        backref="reporters",
                        order_by="Pet.id")
    articles = relationship("Article", backref="reporter")
    favorite_article = relationship("Article", uselist=False)

    @hybrid_property
    def hybrid_prop(self):
        return self.first_name

    column_prop = column_property(select([func.cast(func.count(id), Integer)]),
                                  doc="Column property")

    composite_prop = composite(CompositeFullName,
                               first_name,
                               last_name,
                               doc="Composite")
Ejemplo n.º 16
0
def get_admin_monthly_overview() -> List:
    monthly_stats = {
        1: 0,
        2: 0,
        3: 0,
        4: 0,
        5: 0,
        6: 0,
        7: 0,
        8: 0,
        9: 0,
        10: 0,
        11: 0,
        12: 0
    }

    month = func.date_trunc('month', func.cast(PhishingEmail.created_at, Date))

    # Returns a list of PE in all email addresses that was detected
    # in the current year
    mails_detected_yearly = db.session.query(PhishingEmail) \
    .filter(PhishingEmail.receiver_id==EmailAddress.email_id \
    , PhishingEmail.created_at_year == datetime.now().year) \
    .order_by(month).all()

    for pe in mails_detected_yearly:
        monthly_stats[pe.get_created_month()] = monthly_stats\
        .get(pe.get_created_month(), 0)+1
    monthly_stats = list(monthly_stats.values())
    return monthly_stats
Ejemplo n.º 17
0
def msrun_page(request):
    try:
        query = DBSession.query(func.count(SpectrumHit.spectrum_hit_id).label("count_hits"),
                                func.count(SpectrumHit.sequence.distinct()).label("count_pep"),
                                func.count(Protein.name.distinct()).label("count_prot")
        )

        query = query.join(MsRun, SpectrumHit.ms_run_ms_run_id == MsRun.ms_run_id)
        query = query.join(t_spectrum_protein_map)
        query = query.join(Protein)
        query = query.filter(MsRun.ms_run_id == request.matchdict["msrun"])
        statistics = json.dumps(query.all())

        query = DBSession.query(MsRun.filename,
                                func.group_concat(
                                    (HlaType.hla_string.distinct().op('order by')(HlaType.hla_string))).label(
                                    'hla_typing'),
                                Source.histology, Source.source_id, Source.patient_id, Source.organ,
                                Source.comment, Source.dignity, Source.celltype, Source.location,
                                Source.metastatis, Source.person, Source.organism, Source.treatment, Source.comment.label("comment"),
                                func.cast(MsRun.ms_run_date, String).label("ms_run_date"), MsRun.used_share,
                                MsRun.comment.label("msrun_comment"),
                                MsRun.sample_mass, MsRun.sample_volume, MsRun.antibody_set,
                                MsRun.antibody_mass)
        query = query.join(Source)
        query = query.join(t_hla_map)
        query = query.join(HlaType)
        query = query.filter(MsRun.ms_run_id == request.matchdict["msrun"])
        metadata = json.dumps(query.all())

    except:
        return Response(conn_err_msg, content_type='text/plain', status_int=500)
    return {"statistics": statistics, "metadata": metadata, "msrun": request.matchdict["msrun"]}
Ejemplo n.º 18
0
    def count_rejected(self):
        """ Returns the number of rejected notices by user.

        Returns a tuple ``(user name, number of rejections)``
        for each user. Does not filter by the state of the collection.

        """

        query = self.session.query(GazetteNoticeChange.channel_id,
                                   GazetteNoticeChange.meta['event'],
                                   GazetteNoticeChange.owner,
                                   GazetteNoticeChange.meta['user_name'])
        query = query.filter(
            or_(GazetteNoticeChange.meta['event'] == 'rejected',
                GazetteNoticeChange.meta['event'] == 'submitted'))
        query = query.order_by(GazetteNoticeChange.channel_id,
                               GazetteNoticeChange.created.desc())

        users = dict(
            self.session.query(func.cast(User.id, String),
                               User.realname).all())

        result = {}
        for id_, changes in groupby(query, lambda x: x[0]):
            marker = False
            for notice, state, user_id, user_name in changes:
                if state == 'submitted':
                    name = users.get(user_id) or user_name
                    if marker and name:
                        result.setdefault(name, 0)
                        result[name] = result[name] + 1
                marker = state == 'rejected'
        return sorted(list(result.items()), key=lambda x: x[1], reverse=True)
Ejemplo n.º 19
0
def build_topio_id_column_property(
        owner_namespace: ColumnProperty,
        asset_id: int,
        asset_type: str):

    return 'topio.' + owner_namespace.expression + '.' + \
        func.cast(asset_id, String) + '.' + asset_type
Ejemplo n.º 20
0
 def custom_time(self, stop, start):
     stat_date = func.cast(Stat.time, db.Date)
     return db.session.query(stat_date, func.sum(Stat.ammount)). \
         group_by(stat_date). \
         filter(and_(Stat.activity_id == self.id,
             stat_date >= str(start), stat_date <= str(stop))). \
         order_by(stat_date).all()
Ejemplo n.º 21
0
def _summary_to_row(summary: TimePeriodOverview) -> dict:
    day_values, day_counts = _counter_key_vals(summary.timeline_dataset_counts)
    region_values, region_counts = _counter_key_vals(
        summary.region_dataset_counts)

    begin, end = summary.time_range if summary.time_range else (None, None)

    if summary.footprint_geometry and summary.footprint_srid is None:
        raise ValueError("Geometry without srid", summary)

    return dict(
        dataset_count=summary.dataset_count,
        timeline_dataset_start_days=day_values,
        timeline_dataset_counts=day_counts,
        # TODO: SQLALchemy needs a bit of type help for some reason. Possible PgGridCell bug?
        regions=func.cast(region_values, type_=postgres.ARRAY(String)),
        region_dataset_counts=region_counts,
        timeline_period=summary.timeline_period,
        time_earliest=begin,
        time_latest=end,
        size_bytes=summary.size_bytes,
        footprint_geometry=(None if summary.footprint_geometry is None else
                            geo_shape.from_shape(summary.footprint_geometry,
                                                 summary.footprint_srid)),
        footprint_count=summary.footprint_count,
        generation_time=func.now(),
        newest_dataset_creation_time=summary.newest_dataset_creation_time,
        crses=summary.crses,
    )
Ejemplo n.º 22
0
 def unjustified_score(self):
     return db.session.query(func.cast(func.sum(
         ItemAudit.score), Integer)).filter(
             ItemAudit.item_id == self.id, ItemAudit.justified == False,
             ItemAudit.fixed == False, ItemAudit.auditor_setting_id
             == AuditorSettings.id, AuditorSettings.disabled
             == False).one()[0] or 0
Ejemplo n.º 23
0
def computeHostPercentageGraph(fromDate=None, packageId=None):
    date = None
    jobsGraphData = FcWorkunit.query

    if fromDate:
        try:
            date = datetime.datetime.strptime(fromDate, "%Y-%m-%d %H:%M:%S")
        except ValueError:
            abort(500, 'Wrong date fromat')
        jobsGraphData = jobsGraphData.filter(FcWorkunit.time >= date)

    # TODO: Zakomponovat datum

    jobsGraphData = db.session.query(
        Host.domain_name.label('label'),
        func.cast(func.sum(
            FcWorkunit.hc_keyspace), Integer()).label('value')).filter(
                FcWorkunit.job_id == packageId).group_by(
                    FcWorkunit.boinc_host_id).join(FcWorkunit.host).all()

    data = []
    for tup in jobsGraphData:
        data.append({'label': tup[0], 'value': tup[1]})

    return {
        'type': 'pie',
        'data': data,
        'dateFrom': fromDate,
    }
Ejemplo n.º 24
0
def update_channel_stats(date_from=None, date_to=None):
    """Update frequency stats columns on channel."""
    interval_weeks = app.config.get('UPDATE_FREQUENCY_INTERVAL_WEEKS', 4)
    updates = dict(
        (stats_field,
         # Select averate numbers of items per week
         src_model.query.with_entities(func.count() / float(interval_weeks)).
         # Look at only last N weeks and only if added a day after the channel was created
         filter(
             src_model.channel == Channel.id,
             date_added > func.now() - text("interval '%d weeks'" % interval_weeks),
             date_added > Channel.date_added + text("interval '1 day'")
         ).as_scalar())
        for stats_field, src_model, date_added in (
            (Channel.update_frequency, VideoInstance, VideoInstance.date_added),
            (Channel.subscriber_frequency, Subscription, Subscription.date_created),
        )
    )
    updates[Channel.date_updated] = Channel.date_updated  # override column onupdate

    channels = Channel.query
    if date_from and date_to:
        # Update only those channels created within the time window.
        # All channels should be updated within a 24 hour period.
        channels = channels.filter(func.cast(Channel.date_added, TIME).between(
            date_from.time(), date_to.time()))
    channels.update(updates, False)
Ejemplo n.º 25
0
def get_inline_query_performance_statistics(session):
    """Plot statistics regarding performance of inline query requests."""
    creation_date = func.cast(InlineQueryRequest.created_at,
                              Date).label("creation_date")
    # Group the started users by date
    strict_search_subquery = (session.query(
        creation_date,
        func.avg(InlineQueryRequest.duration).label("count")).group_by(
            creation_date).order_by(creation_date).all())
    strict_queries = [("strict", q[0], q[1]) for q in strict_search_subquery]

    # Combine the results in a single dataframe and name the columns
    request_statistics = strict_queries
    dataframe = pandas.DataFrame(request_statistics,
                                 columns=["type", "date", "duration"])

    months = mdates.MonthLocator()  # every month
    months_fmt = mdates.DateFormatter("%Y-%m")

    # Plot each result set
    fig, ax = plt.subplots(figsize=(30, 15), dpi=120)
    for key, group in dataframe.groupby(["type"]):
        ax = group.plot(ax=ax, kind="bar", x="date", y="duration", label=key)
        ax.xaxis.set_major_locator(months)
        ax.xaxis.set_major_formatter(months_fmt)

    image = image_from_figure(fig)
    image.name = "request_duration_statistics.png"
    return image
Ejemplo n.º 26
0
def individual(person):
    personal_query = db.session.query(Stats).filter(Stats.author == person)
    post_count = personal_query.count()
    sum_query = db.session.query(Stats.author,
                                 func.sum(Stats.hit_count).label("Total")).group_by(Stats.author). \
        filter(Stats.author == person).first()

    query_chart = db.session.query(func.cast(Stats.date, Date).label("Date"),
                                   func.sum(Stats.hit_count).label("Total"),
                                   Stats.author).group_by(func.cast(Stats.date, Date),
                                                          Stats.author).filter(Stats.author == person)
    script, div = person_plot(query_chart, person, query_chart.count())


    return render_template("person.html", stats=personal_query, script=script, div=div, person_total=sum_query,
                           post_count=post_count)
Ejemplo n.º 27
0
    class Student(db.Model):
        __tablename__ = 'student'

        id = Column(Integer, primary_key=True)
        contextual_id = column_property(
            func.md5(
                bindparam('context', value='', type_=String) +
                func.cast(id, String)))
        name = Column(String(64), index=True, nullable=False)
        address = Column(String(128), index=False, nullable=True)
        phone = Column(String(35), nullable=True)
        home_phone = Column(String(35), nullable=True)
        email = Column(CIText(64, True), nullable=True)

        created = Column(DateTime, server_default='now()')

        guardian_number = MapColumn('home_phone')
        phone_numbers = MapColumn(['phone', 'home_phone'])

        contact_info = MapColumn({
            'phone': 'phone',
            'home_phone': 'home_phone',
            'email': 'email'
        })

        teachers = relationship(
            "Teacher",
            secondary='teacher_to_student',
            primaryjoin="Student.id == teacher_to_student.c.student_id",
            secondaryjoin="teacher_to_student.c.teacher_id == Teacher.id")

        first_name = column_property(func.split_part(func.trim(name), " ", 1))
Ejemplo n.º 28
0
    def get(self):
        """
        Train information query API

        **argument**:
        - `train_name`: `str`

        **return**: A JSON dictionary with values:
        - `code`: `int`, always equals to 0
        - `result`: `list` of dictionaries of passing station information:
            - `id`: `int`
            - `district`: `str`
            - `station`: `str`
            - `time`: `str`
        """
        session = DBSession()
        try:
            train_name = request.args.get('train_name')
            successive_train_rec = get_interval_list(train_name, session)
            interval_list = session.query(successive_train_rec.c.interval_no.label('id'),
                                          City.city_name.concat(',').concat(District.district_name).label('district'),
                                          Station.station_name.label('station'),
                                          func.cast(successive_train_rec.c.dep_datetime, String).label('time')) \
                .join(Station, Station.station_id == successive_train_rec.c.dep_station) \
                .join(District, Station.district_id == District.district_id) \
                .join(City, District.city_id == City.city_id) \
                .order_by(successive_train_rec.c.interval_id, Station.available == True) \
                .all()

            last_no = interval_list[-1].id
            resp = list(map(lambda x: dict(zip(x.keys(), x)), interval_list))
            last_station = session.query(func.cast(successive_train_rec.c.arv_datetime, String).label('time'),
                                         City.city_name.concat(',').concat(District.district_name).label('district'),
                                         Station.station_name.label('station'),
                                         literal(last_no + 1).label('id')) \
                .join(Station, Station.station_id == successive_train_rec.c.arv_station) \
                .join(District, Station.district_id == District.district_id) \
                .join(City, District.city_id == City.city_id) \
                .filter(successive_train_rec.c.interval_no == last_no, Station.available == True) \
                .first()
            if last_station:
                resp.append(dict(zip(last_station.keys(), last_station)))
            return jsonify(result=resp, code=0)
        except:
            return jsonify(code=10, error='未找到线路')
        finally:
            session.close()
Ejemplo n.º 29
0
    def reddit_score(self):
        s = self.upvotes - self.downvotes
        order = func.log(10, func.greatest(func.abs(s), 1))
        sign = func.sign(s)
        seconds = func.date_part('epoch', self.timestamp) - 1134028003

        return func.round(func.cast(sign * order + seconds / 45000, Numeric),
                          7)
Ejemplo n.º 30
0
    def refresh_from_db(self, session=None):
        """
        Reloads the current dagrun from the database
        :param session: database session
        """
        DR = DagRun

        exec_date = func.cast(self.execution_date, DateTime)

        dr = session.query(DR).filter(
            DR.dag_id == self.dag_id,
            func.cast(DR.execution_date, DateTime) == exec_date,
            DR.run_id == self.run_id
        ).one()

        self.id = dr.id
        self.state = dr.state
Ejemplo n.º 31
0
def fuzzy_query(dep_place, arv_place, dg_only, session):
    dep_train_info = session.query(Interval.train_id, Interval.dep_station) \
        .join(Station, Interval.dep_station == Station.station_id) \
        .filter(Station.station_name.like(dep_place),
                Station.available == True,
                Interval.available == True) \
        .subquery()
    arv_train_info = session.query(Interval.train_id, Interval.arv_station) \
        .join(Station, Interval.arv_station == Station.station_id) \
        .filter(Station.station_name.like(arv_place),
                Station.available == True,
                Interval.available == True) \
        .subquery()
    raw_train_info = session.query(Interval.train_id, Train.train_name,
                                   func.min(Interval.interval_id).label('first_interval'),
                                   func.max(Interval.interval_id).label('last_interval')) \
        .join(Train, Train.train_id == Interval.train_id) \
        .join(dep_train_info, Interval.train_id == dep_train_info.c.train_id) \
        .join(arv_train_info, Interval.train_id == arv_train_info.c.train_id) \
        .filter(or_(Interval.dep_station == dep_train_info.c.dep_station,
                    Interval.arv_station == arv_train_info.c.arv_station),
                Train.available == True) \
        .group_by(Interval.train_id, Train.train_name) \
        .subquery()
    dep_i = aliased(Interval, name='dep_i')
    arv_i = aliased(Interval, name='arv_i')
    dep_s = aliased(Station, name='dep_s')
    arv_s = aliased(Station, name='arv_s')
    train_info_list = session.query(raw_train_info.c.train_name,
                                    raw_train_info.c.first_interval, raw_train_info.c.last_interval,
                                    dep_s.station_name.label('dep_station'),
                                    func.cast(dep_i.dep_datetime, String).label('dep_time'),
                                    arv_s.station_name.label('arv_station'),
                                    func.cast(arv_i.arv_datetime, String).label('arv_time')) \
        .join(dep_i, dep_i.interval_id == raw_train_info.c.first_interval) \
        .join(arv_i, arv_i.interval_id == raw_train_info.c.last_interval) \
        .join(dep_s, dep_s.station_id == dep_i.dep_station) \
        .join(arv_s, arv_s.station_id == arv_i.arv_station) \
        .filter(dep_s.station_name.like(dep_place), arv_s.station_name.like(arv_place),
                dep_s.available == True, arv_s.available == True,
                dep_i.available == True, arv_i.available == True) \
        .order_by(dep_i.dep_datetime) \
        .all()
    return list(
        filter(lambda x: x['train_name'][0] in 'DG' if dg_only else True,
               map(lambda x: dict(zip(x.keys(), x)), train_info_list)))
Ejemplo n.º 32
0
def test():
    session = DBSession()
    try:
        dep_place = '%南京%'
        arv_place = '%西安%'
        dg_only = True
        dep_train_info = session.query(Interval.train_id, Interval.dep_station) \
            .join(Station, Interval.dep_station == Station.station_id) \
            .filter(Station.station_name.like(dep_place)) \
            .subquery()
        arv_train_info = session.query(Interval.train_id, Interval.arv_station) \
            .join(Station, Interval.arv_station == Station.station_id) \
            .filter(Station.station_name.like(arv_place)) \
            .subquery()
        raw_train_info = session.query(Interval.train_id, Train.train_name,
                                       func.min(Interval.interval_id).label('first_interval'),
                                       func.max(Interval.interval_id).label('last_interval')) \
            .join(Train, Train.train_id == Interval.train_id) \
            .join(dep_train_info, Interval.train_id == dep_train_info.c.train_id) \
            .join(arv_train_info, Interval.train_id == arv_train_info.c.train_id) \
            .filter(or_(Interval.dep_station == dep_train_info.c.dep_station,
                        Interval.arv_station == arv_train_info.c.arv_station)) \
            .group_by(Interval.train_id, Train.train_name) \
            .subquery()
        dep_i = aliased(Interval, name='dep_i')
        arv_i = aliased(Interval, name='arv_i')
        dep_s = aliased(Station, name='dep_s')
        arv_s = aliased(Station, name='arv_s')
        train_info_list = session.query(raw_train_info.c.train_name,
                                        raw_train_info.c.first_interval, raw_train_info.c.last_interval,
                                        dep_s.station_name.label('dep_station'),
                                        func.cast(dep_i.dep_datetime, String).label('dep_time'),
                                        arv_s.station_name.label('arv_station'),
                                        func.cast(arv_i.arv_datetime, String).label('arv_time')) \
            .join(dep_i, dep_i.interval_id == raw_train_info.c.first_interval) \
            .join(arv_i, arv_i.interval_id == raw_train_info.c.last_interval) \
            .join(dep_s, dep_s.station_id == dep_i.dep_station) \
            .join(arv_s, arv_s.station_id == arv_i.arv_station) \
            .filter(dep_s.station_name.like(dep_place), arv_s.station_name.like(arv_place)) \
            .order_by(dep_i.dep_datetime) \
            .all()
        train_info_list = list(filter(lambda x: x['train_name'][0] in 'DG' if dg_only else True,
                                      map(lambda x: dict(zip(x.keys(), x)), train_info_list)))
        return dict(result=train_info_list, code=0)
    finally:
        session.close()
Ejemplo n.º 33
0
 def score(self):
     return db.session.query(
         func.cast(
             func.sum(ItemAudit.score),
             Integer)
     ).filter(
         ItemAudit.item_id == self.id,
         ItemAudit.auditor_setting_id == AuditorSettings.id,
         AuditorSettings.disabled == False).one()[0] or 0
Ejemplo n.º 34
0
def update_metadata_source(request):
    if "id" in request.params:
        result_dict = dict()
        result_dict["id"] = request.params['id']
        query = DBSession.query(Source.patient_id, Source.organ,
                                Source.organism, Source.histology,
                                Source.dignity,Source.celltype,
                                Source.location, Source.metastatis,
                                Source.treatment,Source.person,
                                func.cast(Source.prep_date, String).label("prep_date")
        ).filter(Source.source_id==request.params["id"])

        source = json.dumps(query.all())
        result_dict['source'] = source
        query = DBSession.query(Source.source_id, HlaType.hla_string).join(t_hla_map).join(HlaType).filter(Source.source_id==request.params["id"])
        hla = json.dumps(query.all())
        result_dict['hla'] = hla


        # getting autocomplete items
        allowed_elements = {"patient_id": Source.patient_id, "organ": Source.organ,
                            "organism": Source.organism, "histology": Source.histology,
                            "dignity": Source.dignity, "celltype": Source.celltype,
                            "location": Source.location, "metastatis": Source.metastatis,
                            "treatment": Source.treatment, "person": Source.person,
                            "comment" : Source.comment, "typing": HlaType.hla_string}
        for k, v in allowed_elements.iteritems():
            query = DBSession.query(v)
            query = query.group_by(v)
            query_result = js_list_creator(query.all())
            result_dict[k] = query_result
        #result_dict['original'] = source

        return result_dict

    else:
        try:
            # query data for autocomplete
            result_dict = dict()
            allowed_elements = {"source_id": Source.source_id}

            for k, v in allowed_elements.iteritems():
                query = DBSession.query(v)
                query = query.group_by(v)
                query_result = js_list_creator(query.all())
                result_dict[k] = query_result
            # setting a different renderer
            result = render('../templates/upload_templates/update_metadata_source.pt',
                            result_dict,
                            request=request)
            response = Response(result)
            return response
        except:
            return Response(conn_err_msg, content_type='text/plain', status_int=500)
Ejemplo n.º 35
0
def technical_rec_process(request, obj):
    if request.method == 'GET':
        hazard_types = DBSession.query(HazardType).order_by(HazardType.order)
        hazard_levels = []
        for level in [u'HIG', u'MED', u'LOW', u'VLO']:
            hazard_levels.append(HazardLevel.get(level))
        if obj.id is None:
            action = request.route_url('admin_technical_rec_new')
        else:
            action = request.route_url('admin_technical_rec_edit', id=obj.id)
        return {
            'obj': obj,
            'action': action,
            'hazard_types': hazard_types,
            'hazard_levels': hazard_levels,
        }

    if request.method == 'POST':
        obj.text = request.POST.get('text')
        obj.detail = request.POST.get('detail')
        if inspect(obj).transient:
            DBSession.add(obj)

        associations = request.POST.getall('associations')
        records = obj.hazardcategory_associations

        # Remove unchecked ones
        for record in records:
            if record.hazardcategory.name() not in associations:
                DBSession.delete(record)

        # Add new ones
        for association in associations:
            hazardtype, hazardlevel = association.split(' - ')
            if not obj.has_association(hazardtype, hazardlevel):
                hazardcategory = HazardCategory.get(hazardtype, hazardlevel)
                order = DBSession.query(
                        func.coalesce(
                            func.cast(
                                func.max(HcTr.order),
                                Integer),
                            0)) \
                    .select_from(HcTr) \
                    .filter(HcTr.hazardcategory_id == hazardcategory.id) \
                    .first()[0] + 1

                record = HcTr(
                    hazardcategory=hazardcategory,
                    order=order)
                obj.hazardcategory_associations.append(record)

        DBSession.flush()
        return HTTPFound(request.route_url('admin_technical_rec'))
Ejemplo n.º 36
0
def aggregate(group_by=[], order_by=[('count', 'desc'),], **filters):
    contract_alias = contracts_table.table.alias('contract')
    document_alias = documents_table.table.alias('document')
    _tables = [contract_alias, document_alias]

    def field(name):
        table, column = name.split('_', 1)
        for alias in _tables:
            if alias.name == table:
               return alias.c[column]

    _fields = [
        func.count(func.distinct(contract_alias.c.id)).label('count'),
        func.sum(func.cast(contract_alias.c.total_value_cost, FLOAT)).label('contract_total_value_cost_eur'),
        func.sum(func.cast(contract_alias.c.initial_value_cost, FLOAT)).label('contract_initial_value_cost_eur'),
        func.sum(func.cast(contract_alias.c.contract_value_cost, FLOAT)).label('contract_contract_value_cost_eur')
        ]

    _filters = contract_alias.c.doc_no == document_alias.c.doc_no
    if len(filters):
        _filters = [_filters]
        for f, value in filters.items():
            _filters.append(field(f)==value)
        _filters = and_(*_filters)

    _group_by = []
    for group in group_by:
        f = field(group)
        _group_by.append(f)
        _fields.append(f)

    _order_by = []
    for field in _fields:
        for name, direction in order_by:
            if field._label == name:
                _order_by.append(field.desc() if direction == 'desc' else field.asc())

    q = select(_fields, _filters, _tables, use_labels=True,
        group_by=_group_by, order_by=_order_by)
    return engine.query(q)
Ejemplo n.º 37
0
def map_csv(session):
    # use a logarithmic scale to give lesser used regions a chance
    query = select(
        columns=(MapStat.lat, MapStat.lon,
                 func.cast(func.ceil(func.log10(MapStat.value)), Integer)),
        whereclause=MapStat.value >= 2)
    result = session.execute(query).fetchall()
    rows = StringIO()
    csvwriter = csv.writer(rows)
    csvwriter.writerow(('lat', 'lon', 'value'))
    for lat, lon, value in result:
        csvwriter.writerow((lat / 1000.0, lon / 1000.0, value))
    return rows.getvalue()
Ejemplo n.º 38
0
    def create_extract_task_instance(self, execute_date, rule_list, session=None):
        """
        create_extract_task_instance for etl.extract_rule
        :param execute_date: YYYY-MM-DD
        :param rule_list: extract rule list
        :param session:
        :return: task instance list
        """
        scheduelr_time = datetime.now()
        task_ids = []
        instance_list = []
        for rule in rule_list:
            instance = TaskInstance(
                etl_day=execute_date,
                task_id=rule.dest_table,
                sub_task_id="{}".format(rule.rule_id),
                name="{}.{}".format(rule.src_db, rule.src_table),
                task_type=State.TASK_EXTRACT,
                module="bi",
                status=State.QUEUED,
                scheduler_time=scheduelr_time,
                scheduler_retry=0,
                worker_retry=0
            )
            task_ids.append(instance.task_id)
            instance_list.append(instance)

        session.add_all(instance_list)
        session.commit()
        if len(task_ids) == 0:
            return instance_list

        # refresh
        task_instance = session.query(TaskInstance).filter(TaskInstance.task_id.in_(task_ids)) \
            .filter(TaskInstance.etl_day == execute_date) \
            .filter(func.cast(TaskInstance.scheduler_time, DateTime) == func.cast(scheduelr_time, DateTime)) \
            .all()
        return task_instance
Ejemplo n.º 39
0
    def create_retry_instance(self, timer_out=-1, max_schedule=0, session=None):
        """
        create_retry_instance, when need schedule, crecate a new instance,
        :param timer_out: worker execute timeout seconds, -1 means never timeout
        :param session:
        :return:
        """
        task_instance = []
        # we don't need retry schedule
        if timer_out < 0 or max_schedule <= 0:
            return task_instance

        # only create worker node heartbeat fialed instance, other issue should handle by worker self
        now = datetime.now()

        # onle re schedule time limit is 24 hours
        max_schedule_at = now - timedelta(hours=24)

        # heartbeat time
        need_heartbeat_at = now - timedelta(seconds=timer_out)
        instance_list = session.query(TaskInstance).filter(TaskInstance.status.in_([State.RUNNING, State.PREPAREED])) \
            .filter(func.cast(TaskInstance.heartbeat, DateTime) < func.cast(need_heartbeat_at, DateTime)) \
            .filter(func.cast(TaskInstance.scheduler_time, DateTime) > func.cast(max_schedule_at, DateTime)) \
            .all()

        task_ids = []
        for instance in instance_list:
            if (instance.scheduler_retry + 1) > max_schedule:
                log.logger.info("{} reach the max retry schedule {} times, stop schedule".format(instance, max_schedule))
                continue
            retry_instance = TaskInstance(
                etl_day=instance.etl_day,
                task_id=instance.task_id,
                sub_task_id=instance.sub_task_id,
                name=instance.name,
                task_type=instance.task_type,
                module=instance.module,
                status=State.QUEUED,
                scheduler_time=now,
                scheduler_retry=instance.scheduler_retry + 1,
                worker_retry=instance.worker_retry,
            )
            session.add(retry_instance)
            task_ids.append(retry_instance.task_id)
            session.query(TaskInstance).filter(TaskInstance.id == instance.id)\
                .update({TaskInstance.status: State.ABANDONNED})

        session.commit()

        # refresh
        if len(task_ids) > 0:
            task_instance = session.query(TaskInstance).filter(TaskInstance.task_id.in_(task_ids)) \
                .filter(func.cast(TaskInstance.scheduler_time, DateTime) == func.cast(now, DateTime)) \
                .all()
        return task_instance
Ejemplo n.º 40
0
    def __enter__(self):
        """
        Add a table (prefixed with n_) to the database
        with one record for each record found in the staging table
        with a hash not present in the existing table.
        If there are no such records, do nothing.
        """

        # create n_table with point_date, geom, and id columns
        s = self.staging
        e = self.existing
        d = self.dataset

        derived_dates = func.cast(s.c[d.date], TIMESTAMP).label('point_date')
        derived_geoms = self._geom_col()
        cols_to_insert = [s.c['hash'], derived_dates, derived_geoms]

        # Select the hash and the columns we're deriving from the staging table.
        sel = select(cols_to_insert)
        # And limit our results to records
        # whose hashes aren't already present in the existing table.
        sel = sel.select_from(s.outerjoin(e, s.c['hash'] == e.c['hash'])).\
            where(e.c['hash'] == None)

        # Drop the table first out of healthy paranoia
        self._drop()
        try:
            self.table.create(bind=postgres_engine)
        except Exception as e:
            raise PlenarioETLError(repr(e) +
                                   '\nCould not create table n_' + d.name)

        ins = self.table.insert().from_select(cols_to_insert, sel)
        # Populate it with records from our select statement.
        try:
            postgres_engine.execute(ins)
        except Exception as e:
            raise PlenarioETLError(repr(e) + '\n' + str(sel))
        else:
            # Would be nice to check if we have new records or not right here.
            return self
Ejemplo n.º 41
0
def update_metadata_msrun(request):
    if "filename" in request.params:
        result_dict = dict()
        result_dict["filename"] = request.params['filename']
        query = DBSession.query(MsRun.filename, MsRun.used_share,
                                MsRun.comment, MsRun.source_source_id,
                                MsRun.sample_mass, MsRun.sample_volume,
                                MsRun.antibody_set, MsRun.antibody_mass,
                                func.cast(MsRun.ms_run_date, String).label("ms_run_date")
        ).filter(MsRun.filename==request.params["filename"])

        ms_run = json.dumps(query.all())
        result_dict['ms_run'] = ms_run

        # getting autocomplete items
        allowed_elements = {"used_share":MsRun.used_share, "comment":MsRun.comment,
                            "sample_mass":MsRun.sample_mass, "antibody_set":MsRun.antibody_set,
                            "antibody_mass":MsRun.antibody_mass,"sample_volume":MsRun.sample_volume, 'source_source_id': Source.source_id }
        for k, v in allowed_elements.iteritems():
            query = DBSession.query(v)
            query = query.group_by(v)
            query_result = js_list_creator(query.all())
            result_dict[k] = query_result
        #result_dict['original'] = source

        return result_dict

    else:
        try:
            # query data for autocomplete
            result_dict = dict()
            result_dict["filename"] = js_list_creator(DBSession.query(MsRun.filename).filter(MsRun.flag_trash==0).group_by(MsRun.filename).all())

            # setting a different renderer
            result = render('../templates/upload_templates/update_metadata_msrun.pt',
                            result_dict,
                            request=request)
            response = Response(result)
            return response
        except:
            return Response(conn_err_msg, content_type='text/plain', status_int=500)
Ejemplo n.º 42
0
    def get_query(self, date_from, date_to):
        from rockpack.mainsite.services.video.models import Video, VideoInstance
        from rockpack.mainsite.services.user.models import UserActivity
        from rockpack.mainsite.services.share.models import ShareLink

        activity = readonly_session.query(
            UserActivity.object_id.label('video_instance'),
            UserActivity.action.label('action'),
            literal(0).label('click_count')
        ).filter(
            UserActivity.object_type == 'video_instance',
            UserActivity.date_actioned.between(date_from, date_to)
        )
        shares = readonly_session.query(
            ShareLink.object_id.label('video_instance'),
            literal('share').label('action'),
            ShareLink.click_count.label('click_count')
        ).filter(
            ShareLink.object_type == 'video_instance',
            ShareLink.date_created.between(date_from, date_to)
        )
        activity = activity.union_all(shares).subquery()

        query = readonly_session.query(
            Video.id,
            Video.title,
            func.count(activity.c.video_instance),
            func.count(VideoInstance.id.distinct()),
            *([func.sum(func.cast(activity.c.action == c, Integer)) for c in self.counts[2:-1]] +
              [func.sum(activity.c.click_count)])
        ).join(
            VideoInstance, VideoInstance.video == Video.id
        ).join(
            activity, activity.c.video_instance == VideoInstance.id
        ).group_by(
            Video.id
        )

        return query
Ejemplo n.º 43
0
def sensord(id, start, end, aft_name):
    functions = aft[aft_name]
    table = inspect(DataTable).mapped_table

    fields = list()
    for agg_func in functions:
        agg_func_name = str(agg_func()).replace('()', '')
        fields.append(func.cast(agg_func(DataTable.value), types.Integer).label(agg_func_name))

    per_seconds = (end - start).total_seconds() / 100
    ots = func.to_timestamp(func.round(func.extract('epoch', DataTable.timestamp) / per_seconds) * per_seconds).label('timestamp')

    if id == 0:
        qry = g.session.query(ots, *fields) \
                       .filter(DataTable.probe == 1)    # TODO: get probe 1
    else:
        qry = g.session.query(ots, *fields) \
                       .filter(DataTable.probe == id)

    qry = qry.filter(table.c.timestamp >= start, table.c.timestamp <= end) \
             .group_by(ots) \
             .order_by(ots)
    return qry
def source_overview(request):
    try:
        # TODO: Update
        # query Sources
        your_json = json.dumps(
            DBSession.query(
                Source.source_id,
                Source.patient_id,
                Source.organ,
                Source.dignity,
                Source.celltype,
                Source.histology,
                Source.location,
                Source.metastatis,
                Source.organism,
                Source.treatment,
                Source.person,
                func.cast(Source.prep_date, String).label("prep_date"),
            ).all()
        )
    except DBAPIError:
        return Response(conn_err_msg, content_type="text/plain", status_int=500)
    return {"project": your_json}
def run_overview(request):
    try:
        # query MS runs. Important: run_date has to be castet to string, otherwise  json.dumps can not create json object
        your_json = json.dumps(
            DBSession.query(
                MsRun.ms_run_id,
                MsRun.filename,
                Source.patient_id,
                Source.organ,
                Source.dignity,
                func.cast(MsRun.ms_run_date, String).label("ms_run_date"),
                MsRun.antibody_set,
                MsRun.used_share,
                MsRun.sample_mass,
            )
            .join(Source)
            .group_by(MsRun.ms_run_id)
            .all()
        )

    except DBAPIError:
        return Response(conn_err_msg, content_type="text/plain", status_int=500)
    return {"project": your_json}
Ejemplo n.º 46
0
def aggregate(group_by=[], order_by=[('total_value_cost_eur', 'desc'), ], _filters=[],
              limit=PAGE_SIZE, offset=0):
    _filters = list(_filters)

    _fields = [
        func.count(func.distinct(contract_alias.c.id)).label('count'),
        func.sum(func.cast(contract_alias.c.total_value_cost_eur, FLOAT)).label('total_value_cost_eur'),
        #func.sum(func.cast(contract_alias.c.initial_value_cost_eur, FLOAT)).label('initial_value_cost_eur'),
        #func.sum(func.cast(contract_alias.c.contract_value_cost_eur, FLOAT)).label('contract_value_cost_eur')
        ]

    _filters.append(contract_alias.c.doc_no == document_alias.c.doc_no)
    _filters.append(contract_alias.c.total_value_cost_eur != None)
    _filters.append(contract_alias.c.total_value_currency == 'EUR')
    _filters = and_(*_filters)
    
    _group_by = []
    for group in group_by:
        f = name_to_field(group)
        if f is not None:
            _group_by.append(f)
            _fields.append(f)
            _filters.append(f != None)

    _order_by = []
    for field in _fields:
        for name, direction in order_by:
            if field._label == name:
                _order_by.append(field.desc().nullslast() if direction == 'desc' else field.asc())

    q = select(_group_by, _filters, _tables, use_labels=True,
               group_by=_group_by).alias('foo')
    count = list(engine.query(q.count())).pop().values().pop()
    q = select(_fields, _filters, _tables, use_labels=True,
               group_by=_group_by, order_by=_order_by,
               limit=limit, offset=offset)
    return count, engine.query(q)
def derive_office_data(obj, office_dict, sess):
    """ Deriving office data

        Args:
            obj: a dictionary containing the details we need to derive from and to
            office_dict: a dictionary containing all the data for Office objects keyed by office code
            sess: the current DB session
    """
    # If we don't have an awarding office code, we need to copy it from the earliest transaction of that award
    if not obj['awarding_office_code'] or not obj['funding_office_code']:
        first_transaction = None
        pafa = PublishedAwardFinancialAssistance
        if obj['record_type'] == 1:
            # Get the minimum action date for this uri/AwardingSubTierCode combo
            min_action_date = sess.query(func.min(pafa.action_date).label("min_date")). \
                filter(pafa.uri == obj['uri'], pafa.awarding_sub_tier_agency_c == obj['awarding_sub_tier_agency_c'],
                       pafa.is_active.is_(True), pafa.record_type == 1).one()
            # If we have a minimum action date, get the office codes for the first entry that matches it
            if min_action_date.min_date:
                first_transaction = sess.query(pafa.awarding_office_code, pafa.funding_office_code,
                                               pafa.award_modification_amendme).\
                    filter(pafa.uri == obj['uri'], pafa.is_active.is_(True),
                           pafa.awarding_sub_tier_agency_c == obj['awarding_sub_tier_agency_c'],
                           func.cast_as_date(pafa.action_date) == min_action_date.min_date,
                           pafa.record_type == 1).first()
        else:
            # Get the minimum action date for this fain/AwardingSubTierCode combo
            min_action_date = sess.query(func.min(func.cast(pafa.action_date, DATE)).label("min_date")).\
                filter(pafa.fain == obj['fain'], pafa.awarding_sub_tier_agency_c == obj['awarding_sub_tier_agency_c'],
                       pafa.is_active.is_(True), pafa.record_type != 1).one()
            # If we have a minimum action date, get the office codes for the first entry that matches it
            if min_action_date.min_date:
                first_transaction = sess.query(pafa.awarding_office_code, pafa.funding_office_code,
                                               pafa.award_modification_amendme).\
                    filter(pafa.fain == obj['fain'], pafa.is_active.is_(True),
                           pafa.awarding_sub_tier_agency_c == obj['awarding_sub_tier_agency_c'],
                           func.cast_as_date(pafa.action_date) == min_action_date.min_date,
                           pafa.record_type != 1).first()

        # If we managed to find a transaction, copy the office codes into it. Don't copy if the mod is the same because
        # we don't want to auto-fill the base record for an award.
        if first_transaction and first_transaction.award_modification_amendme != obj['award_modification_amendme']:
            # No need to copy if new code isn't blank or old code is
            if not obj['awarding_office_code'] and first_transaction.awarding_office_code:
                # Make sure the code we're copying is a valid awarding office code
                award_office = office_dict.get(first_transaction.awarding_office_code)
                if award_office and award_office['grant_office']:
                    obj['awarding_office_code'] = first_transaction.awarding_office_code
            if not obj['funding_office_code'] and first_transaction.funding_office_code:
                # Make sure the code we're copying is a valid funding office code
                fund_office = office_dict.get(first_transaction.funding_office_code)
                if fund_office and fund_office['funding_office']:
                    obj['funding_office_code'] = first_transaction.funding_office_code

    # Deriving awarding_office_name based off awarding_office_code
    awarding_office_data = office_dict.get(obj['awarding_office_code'])
    if awarding_office_data:
        obj['awarding_office_name'] = awarding_office_data['office_name']
    else:
        obj['awarding_office_name'] = None

    # Deriving funding_office_name based off funding_office_code
    funding_office_data = office_dict.get(obj['funding_office_code'])
    if funding_office_data:
        obj['funding_office_name'] = funding_office_data['office_name']
    else:
        obj['funding_office_name'] = None
Ejemplo n.º 48
0
 def daily_profit_select():
     return select([func.cast(func.sum((SalesOrderLine.unit_price - Product.purchase_price) * SalesOrderLine.quantity)
                              / func.greatest(func.cast(func.date_part('DAY', func.current_date() - Product.create_date), Integer), 1), Numeric)]).as_scalar()
Ejemplo n.º 49
0
    def test_non_functions(self):
        expr = func.cast("foo", Integer)
        self.assert_compile(expr, "CAST(:param_1 AS INTEGER)")

        expr = func.extract("year", datetime.date(2010, 12, 5))
        self.assert_compile(expr, "EXTRACT(year FROM :param_1)")
Ejemplo n.º 50
0
 def fraction(cls):
     return func.IF(func.ABS(cls.total_time) > 0,
                    func.cast(cls.online_time, Float) /
                    func.cast(cls.total_time, Float), 0)
Ejemplo n.º 51
0
 def daily_amount_select():
     return select([func.cast(func.sum(SalesOrderLine.unit_price * SalesOrderLine.quantity)
                    /func.greatest(func.cast(func.date_part('DAY', func.current_date() - Supplier.create_date),Integer), 1), Integer)]).as_scalar()
Ejemplo n.º 52
0
reports_mapper = mapper(Report, reports_table)

# Create session
Session = sessionmaker(bind=db)
session = Session()

# How many reports
layout_count = session.query(Layout).count()
report_count = session.query(Report).count()
incident_count = session.query(Incident).count()
print "Layout rows: %s" % layout_count
print "Report rows: %s" % report_count
print "Incident rows: %s" % incident_count

# Year selection
year = func.cast(func.strftime("%Y", Incident.Date_Inc), Integer).label('year')
# Common filter to get specific data
common_filter = year >= 2000
# Some shared values.  This one gets individual incidents and key fields.
common_query_desc = session.query(
  Incident.Rpt_Num,
  Incident.Date_Inc, Incident.Time_Inc,
  Incident.Inc_City, Incident.Inc_St, Incident.Inc_Zip,
  Incident.Orig_City, Incident.Orig_State, Incident.Orig_Zip,
  Incident.Dest_City, Incident.Dest_State, Incident.Dest_Zip,
  Incident.grouped_C_R_Name, Incident.grouped_Ship_Name,
  Incident.Mode_Transpo,
  Incident.What_Failed_Desc, Incident.How_Failed_Desc,
  Incident.grouped_Commod_Long_Name, Incident.Quant_Released, Incident.Unit_of_Measure,
  Incident.Explosion_Result_Ind, Incident.Spill_Result_Ind,
  Incident.Fire_Result_Ind, Incident.Gas_Dispersion_Result_Ind,
Ejemplo n.º 53
0
            table.extend(
                readonly_session.query(AppDownloadRecord).filter_by(action=action).
                group_by('1').order_by('1').
                values(AppDownloadRecord.date, func.sum(AppDownloadRecord.count))
            )
            ctx['%s_data' % action] = table.encode()
        table = Table([dict(id='date', type=date)] +
                      [dict(id=i, type=long) for i in 'Total', 'US', 'UK', 'Other'])
        table.extend(
            readonly_session.query(User).filter(
                User.date_joined > LAUNCHDATE, User.refresh_token != None).
            group_by('1').order_by('1').
            values(
                func.date(User.date_joined),
                func.count('*'),
                func.sum(func.cast(User.locale == 'en-us', Integer)),
                func.sum(func.cast(User.locale == 'en-gb', Integer)),
                func.sum(func.cast(User.locale.notin_(('en-us', 'en-gb')), Integer)),
            )
        )
        ctx['reg_data'] = table.encode()
        return self.render('admin/app_stats.html', **ctx)


class ActivityStatsView(StatsView):
    @expose('/')
    def index(self):
        from gviz_data_table import Table
        from rockpack.mainsite.services.user.models import UserActivity
        table = Table([dict(id='date', type=date)] +
                      [dict(id=i, type=long) for i in
Ejemplo n.º 54
0
 def _cols(self, Channel, User, activity):
     return ([User.id, User.username, func.count()] +
             [func.sum(func.cast(activity.c.action == c, Integer)) for c in self.counts[1:-1]] +
             [func.sum(activity.c.click_count)])