def get_grade_info(self, all_snapshots):
        grade = all_snapshots.c.grade
        graduation_date = all_snapshots.c.graduation_date
        school_year = all_snapshots.c.school_year
        status = all_snapshots.c.status
        student_lookup = all_snapshots.c.student_lookup
        withdraw_reason = all_snapshots.c.withdraw_reason

        end_grade = sql.case([(db_func.max(grade) > 12, 12)],
                             else_=db_func.max(grade))
        start_grade = sql.case([(db_func.min(grade) > 12, 12)],
                               else_=db_func.min(grade))

        return \
            sql.select([
                student_lookup,
                (end_grade - start_grade).label('num_grades'),
                (db_func.max(school_year) - db_func.min(school_year)).label('num_hs_years'),
                start_grade.label('start_grade'),
                end_grade.label('end_grade'),
                db_func.min(school_year).label('start_year'),
                db_func.max(school_year).label('end_year'),
                db_func.min(graduation_date).label('graduation_date'),
                db_func.array_agg(sql.distinct(status)).label('statuses'),
                db_func.array_agg(sql.distinct(withdraw_reason)).label('withdraw_reasons'),
                db_func.array_agg(sql.distinct(sql.func.substr(withdraw_reason, 1, 7))).label('withdraw_reasons_short'),
            ]).\
            where(
                grade >= 9
            ).\
            group_by(
                student_lookup
            )
Example #2
0
File: maps.py Project: munin/merlin
    def fleet_overview(self):
        if self.scantype not in ("J",):
            return

        from sqlalchemy.sql.functions import min, sum

        f = aliased(FleetScan)
        a = aliased(FleetScan)
        d = aliased(FleetScan)

        Q = session.query(
            f.landing_tick,
            f.landing_tick - min(Scan.tick),
            count(a.id),
            coalesce(sum(a.fleet_size), 0),
            count(d.id),
            coalesce(sum(d.fleet_size), 0),
        )
        Q = Q.join(f.scan)
        Q = Q.filter(f.scan == self)

        Q = Q.outerjoin((a, and_(a.id == f.id, a.mission.ilike("Attack"))))
        Q = Q.outerjoin((d, and_(d.id == f.id, d.mission.ilike("Defend"))))

        Q = Q.group_by(f.landing_tick)
        Q = Q.order_by(asc(f.landing_tick))
        return Q.all()
Example #3
0
def testing_function_3(query_module):
    models_module = sqlalchemy_models
    query = query_module.get_query(min(models_module.Author.id))

    rows = query.all()
    result = map(extract_row, rows)
    return str(result)
Example #4
0
def powa_getstatdata_db():
    base_query = powa_base_statdata_db()
    diffs = get_diffs_forstatdata()
    return (select([column("dbid")] + diffs)
            .select_from(base_query)
            .group_by(column("dbid"))
            .having(max(column("calls")) - min(column("calls")) > 0))
Example #5
0
def report_root_ids(connection, root_ids):
    rep = []

    subq = sa.alias(find_children(root_ids))
    q = (sa.select([
        model.paths.c.uid.label('uid'),
        model.paths.c.gid.label('gid'),
        safunc.count().label('inodes'),
        safunc.coalesce(safunc.sum(model.paths.c.size), 0).label('size'),
        safunc.min(model.paths.c.last_seen).label('last seen'),
    ]).select_from(
        model.paths.join(subq, subq.c.id == model.paths.c.id)).group_by(
            model.paths.c.uid, model.paths.c.gid).order_by(sa.desc('size')))

    for u in connection.execute(q):
        u = dict(u)
        u['user'] = pwd.getpwuid(u['uid']).pw_name
        u['cn'] = pwd.getpwuid(u['uid']).pw_gecos
        u['group'] = grp.getgrgid(u['gid']).gr_name
        if u['last seen'] is not None:
            u['last seen'] = datetime.fromtimestamp(u['last seen'])

        rep.append(u)

    return rep
Example #6
0
    def _get_active_pending_versions(self, mappedClass, uid):
        """
        Returns the current active version and the pending version to review
        """
        # TODO: Is this still needed?

        def _check_mandatory_keys():
            mandatory_keys = get_mandatory_keys(self.request, 'a')
            log.debug(mandatory_keys)

        # Get the current active version number
        av = Session.query(
            mappedClass.version
        ).\
            filter(mappedClass.identifier == uid).\
            filter(mappedClass.fk_status == 2).\
            first()
        active_version = av.version if av is not None else None

        # Get the lowest pending version
        pv = Session.query(min(mappedClass.version)).\
            filter(mappedClass.identifier == uid).\
            filter(mappedClass.fk_status == 1).\
            first()
        pending_version = pv.version if pv is not None else None

        # Some logging
        log.debug("active version: %s" % active_version)
        log.debug("pending version: %s" % pending_version)

        return active_version, pending_version
Example #7
0
File: maps.py Project: munin/merlin
 def caprate(self, attacker=None):
     maxcap = PA.getfloat("roids", "maxcap")
     mincap = PA.getfloat("roids", "mincap")
     if not attacker or not self.value:
         return maxcap
     modifier = (float(self.value) / float(attacker.value)) ** 0.5
     return max(mincap, min(maxcap * modifier, maxcap))
Example #8
0
async def service_list(request):
    company = request['company']
    pagination, offset = get_pagination(request)

    where = ser_c.company == company.id, apt_c.start > _today()
    q1 = (
        select([ser_c.id, ser_c.name, ser_c.colour, ser_c.extra_attributes, sql_f.min(apt_c.start).label('min_start')])
        .select_from(sa_appointments.join(sa_services))
        .where(and_(*where))
        .group_by(ser_c.id)
        .alias('q1')
    )

    conn = await request['conn_manager'].get_connection()
    results = [
        dict(row)
        async for row in conn.execute(
            select([q1.c.id, q1.c.name, q1.c.colour, q1.c.extra_attributes])
            .select_from(q1)
            .order_by(q1.c.min_start)
            .offset(offset)
            .limit(pagination)
        )
    ]

    cur_count = await conn.execute(
        select([sql_f.count(distinct(ser_c.id))]).select_from(sa_appointments.join(sa_services)).where(and_(*where))
    )

    return json_response(
        request,
        results=results,
        count=(await cur_count.first())[0],
    )
Example #9
0
    def fetch_transactions(self, start_time, end_time):

        session = self.__session()

        try:

            transactions = session.query(
                Transaction.tx_site,
                Transaction.tx_code,
                functions.count(Transaction.tx_time).label('tx_size'),
                functions.min(Transaction.tx_time).label('tx_time_min'),
                functions.max(Transaction.tx_time).label('tx_time_max'),
                functions.sum(Transaction.tx_inst).label('tx_net_inst'),
                functions.sum(Transaction.tx_fund).label('tx_net_fund'),
                functions.sum(func.abs(
                    Transaction.tx_inst)).label('tx_grs_inst'),
                functions.sum(func.abs(
                    Transaction.tx_fund)).label('tx_grs_fund'),
            ).filter(Transaction.tx_time >= start_time,
                     Transaction.tx_time < end_time).group_by(
                         Transaction.tx_site,
                         Transaction.tx_code,
                     ).subquery()

            inst = aliased(Evaluation, name='ev_inst')
            fund = aliased(Evaluation, name='ev_fund')

            results = session.query(transactions, Product, inst, fund).join(
                Product,
                and_(
                    Product.pr_site == transactions.c.tx_site,
                    Product.pr_code == transactions.c.tx_code,
                )).outerjoin(
                    inst,
                    and_(
                        inst.ev_site == Product.pr_site,
                        inst.ev_unit == Product.pr_inst,
                    )).outerjoin(
                        fund,
                        and_(
                            fund.ev_site == Product.pr_site,
                            fund.ev_unit == Product.pr_fund,
                        )).all()

        finally:

            session.close()

        dto = namedtuple(
            'TransactionDto',
            ('tx_site', 'tx_code', 'tx_size', 'tx_time_min', 'tx_time_max',
             'tx_net_inst', 'tx_net_fund', 'tx_grs_inst', 'tx_grs_fund',
             'product', 'ev_inst', 'ev_fund'))

        return [dto(*r) for r in results]
Example #10
0
 def stats(self):
     """
     select host, count(pk), min(created), max(created) from responses group by host;
     """
     q = select([
         responses.c.host.label('host'),
         functions.count(responses.c.pk).label('amount'),
         functions.min(responses.c.created),
         functions.max(responses.c.created),
     ]).group_by('host').order_by(desc('amount'))
     return self.db.execute(q).fetchall()
Example #11
0
    def on_get(self, req, resp):

        dates = (self.db_session.query(
            functions.min(ActivityLog.local_time),
            functions.max(ActivityLog.local_time),
        ).all())

        resp.body = WorkdayPublicSchema().dumps({
            'min_date': dates[0][0],
            'max_date': dates[0][1],
        })
Example #12
0
def powa_getwaitdata_db():
    base_query = powa_base_waitdata_db()

    return (select([
        column("dbid"),
        column("event_type"),
        column("event"),
        diff("count")
    ])
        .select_from(base_query)
        .group_by(column("dbid"), column("event_type"), column("event"))
        .having(max(column("count")) - min(column("count")) > 0))
Example #13
0
    def handle(self, *args, **options):
        # set up
        config = get_config()
        if config is None:
            raise CommandError(
                'Unable to process configuration file p_to_p.yml')

        connection = get_connection(config)
        pedsnet_session = init_pedsnet(connection)
        init_pcornet(connection)
        init_vocab(connection)

        pedsnet_pcornet_valueset_map = aliased(ValueSetMap)

        # extract the data from the death table
        death_cause = pedsnet_session.query(DeathPedsnet.person_id,
                                            func.left(DeathPedsnet.cause_source_value, 8),
                                            coalesce(pedsnet_pcornet_valueset_map.target_concept, 'OT'),
                                            bindparam("death_cause_type", "NI"),
                                            bindparam("death_cause_source", "L"),
                                            bindparam("death_cause_confidence", None),
                                            min(DeathPedsnet.site)
                                            ) \
            .join(Demographic, Demographic.patid == cast(DeathPedsnet.person_id, String(256)), ) \
            .join(VocabularyConcept, VocabularyConcept.concept_id == DeathPedsnet.cause_concept_id) \
            .outerjoin(pedsnet_pcornet_valueset_map,
                       and_(pedsnet_pcornet_valueset_map.source_concept_class == 'death cause code',
                            cast(VocabularyConcept.vocabulary_id, String(200)) ==
                            pedsnet_pcornet_valueset_map.source_concept_id)) \
            .filter(and_(DeathPedsnet.cause_source_value != None,
                         DeathPedsnet.cause_source_concept_id != 44814650)) \
            .group_by(DeathPedsnet.person_id, func.left(DeathPedsnet.cause_source_value, 8),
                      coalesce(pedsnet_pcornet_valueset_map.target_concept, 'OT')) \
            .all()

        # transform data to pcornet names and types
        # load to demographic table
        odo(death_cause,
            DeathCause.__table__,
            dshape=
            'var * {patid: string, death_cause: string, death_cause_code: string,'
            'death_cause_type: string, death_cause_source:string, '
            'death_cause_confidence: string, site: string}')

        # close session
        pedsnet_session.close()

        # output result
        self.stdout.ending = ''
        print('Death Cause ETL completed successfully',
              end='',
              file=self.stdout)
    def __init__(self):
        """Find the date range and instantiate the data dictionary."""
        connect_to_db(app)

        self.min_year = db.session.query(
            cast(func.min(extract('year', MedicalCall.received_dttm)), Integer)
        ).scalar()

        self.max_year = db.session.query(
            cast(func.max(extract('year', MedicalCall.received_dttm)), Integer)
        ).scalar()

        self.data = {}
Example #15
0
def powa_getstatdata_detailed_db(srvid):
    base_query = powa_base_statdata_detailed_db()
    diffs = get_diffs_forstatdata()
    return (select([
        column("srvid"),
        column("queryid"),
        column("dbid"),
        column("userid"),
        column("datname"),
    ] + diffs).select_from(base_query).where(
        column("srvid") == srvid).group_by(
            column("srvid"), column("queryid"), column("dbid"),
            column("userid"), column("datname")).having(
                max(column("calls")) - min(column("calls")) > 0))
    def handle(self, *args, **options):
        # set up
        config = get_config()
        if config is None:
            raise CommandError('Unable to process configuration file p_to_p.yml')

        connection = get_connection(config)
        pedsnet_session = init_pedsnet(connection)
        init_pcornet(connection)
        init_vocab(connection)

        pedsnet_pcornet_valueset_map = aliased(ValueSetMap)

        # extract the data from the death table
        death_cause = pedsnet_session.query(DeathPedsnet.person_id,
                                            func.left(DeathPedsnet.cause_source_value, 8),
                                            coalesce(pedsnet_pcornet_valueset_map.target_concept, 'OT'),
                                            bindparam("death_cause_type", "NI"),
                                            bindparam("death_cause_source", "L"),
                                            bindparam("death_cause_confidence", None),
                                            min(DeathPedsnet.site)
                                            ) \
            .join(Demographic, Demographic.patid == cast(DeathPedsnet.person_id, String(256)), ) \
            .join(VocabularyConcept, VocabularyConcept.concept_id == DeathPedsnet.cause_concept_id) \
            .outerjoin(pedsnet_pcornet_valueset_map,
                       and_(pedsnet_pcornet_valueset_map.source_concept_class == 'death cause code',
                            cast(VocabularyConcept.vocabulary_id, String(200)) ==
                            pedsnet_pcornet_valueset_map.source_concept_id)) \
            .filter(and_(DeathPedsnet.cause_source_value != None,
                         DeathPedsnet.cause_source_concept_id != 44814650)) \
            .group_by(DeathPedsnet.person_id, func.left(DeathPedsnet.cause_source_value, 8),
                      coalesce(pedsnet_pcornet_valueset_map.target_concept, 'OT')) \
            .all()

        # transform data to pcornet names and types
        # load to demographic table
        odo(death_cause, DeathCause.__table__,
            dshape='var * {patid: string, death_cause: string, death_cause_code: string,'
                   'death_cause_type: string, death_cause_source:string, '
                   'death_cause_confidence: string, site: string}'
            )

        # close session
        pedsnet_session.close()

        # output result
        self.stdout.ending = ''
        print('Death Cause ETL completed successfully', end='', file=self.stdout)
Example #17
0
def nearby_earlier(s, ajournal, constraint=None, threshold=0.3):
    ajlo = aliased(ActivityJournal)
    ajhi = aliased(ActivityJournal)
    q = s.query(ActivitySimilarity). \
                join(ajhi, ActivitySimilarity.activity_journal_hi_id == ajhi.id). \
                join(ajlo, ActivitySimilarity.activity_journal_lo_id == ajlo.id). \
                filter(or_(ActivitySimilarity.activity_journal_hi_id == ajournal.id,
                           ActivitySimilarity.activity_journal_lo_id == ajournal.id),
                       or_(ajhi.id == ajournal.id, ajhi.start < ajournal.start),
                       or_(ajhi.id == ajournal.id, ajlo.start < ajournal.start),
                       ActivitySimilarity.similarity > threshold). \
                order_by(desc(min(ajlo.start, ajhi.start)))
    if constraint:
        q = q.filter(ActivitySimilarity.constraint == constraint)
    return [(asm.activity_journal_lo if asm.activity_journal_lo != ajournal else asm.activity_journal_hi, asm)
            for asm in q.all()]
    def handle(self, *args, **options):
        # set up
        config = get_config()
        if config is None:
            raise CommandError(
                'Unable to process configuration file p_to_p.yml')

        connection = get_connection(config)
        pedsnet_session = init_pedsnet(connection)
        init_pcornet(connection)

        pedsnet_pcornet_valueset_map = aliased(ValueSetMap)

        # extract the data from the death table
        death_pedsnet = pedsnet_session.query(DeathPedsnet.death_date,
                                              coalesce(pedsnet_pcornet_valueset_map.target_concept, 'OT'),
                                              bindparam("death_match_confidence", None),
                                              bindparam("death_source", "L"),
                                              DeathPedsnet.person_id,
                                              min(DeathPedsnet.site)
                                              ). \
            outerjoin(pedsnet_pcornet_valueset_map,
                      and_(pedsnet_pcornet_valueset_map.source_concept_class == 'Death date impute',
                           cast(DeathPedsnet.death_impute_concept_id, String(200)) ==
                           pedsnet_pcornet_valueset_map.source_concept_id)) \
            .filter(and_(exists().where(DeathPedsnet.person_id == PersonVisit.person_id),
                         DeathPedsnet.death_type_concept_id == 38003569)) \
            .group_by(DeathPedsnet.person_id, DeathPedsnet.death_date,
                      coalesce(pedsnet_pcornet_valueset_map.target_concept, 'OT')) \
            .all()

        # transform data to pcornet names and types
        # load to demographic table
        odo(death_pedsnet,
            DeathPcornet.__table__,
            dshape=
            'var * {death_date: date, death_date_impute: string, death_match_confidence: string,'
            'death_source: string, patid:string, site: string}')

        # close session
        pedsnet_session.close()

        # output result
        self.stdout.ending = ''
        print('Death ETL completed successfully', end='', file=self.stdout)
    def handle(self, *args, **options):
        # set up
        config = get_config()
        if config is None:
            raise CommandError('Unable to process configuration file p_to_p.yml')

        connection = get_connection(config)
        pedsnet_session = init_pedsnet(connection)
        init_pcornet(connection)

        pedsnet_pcornet_valueset_map = aliased(ValueSetMap)

        # extract the data from the death table
        death_pedsnet = pedsnet_session.query(DeathPedsnet.death_date,
                                              coalesce(pedsnet_pcornet_valueset_map.target_concept, 'OT'),
                                              bindparam("death_match_confidence", None),
                                              bindparam("death_source", "L"),
                                              DeathPedsnet.person_id,
                                              min(DeathPedsnet.site)
                                              ). \
            outerjoin(pedsnet_pcornet_valueset_map,
                      and_(pedsnet_pcornet_valueset_map.source_concept_class == 'Death date impute',
                           cast(DeathPedsnet.death_impute_concept_id, String(200)) ==
                           pedsnet_pcornet_valueset_map.source_concept_id)) \
            .filter(and_(exists().where(DeathPedsnet.person_id == PersonVisit.person_id),
                         DeathPedsnet.death_type_concept_id == 38003569)) \
            .group_by(DeathPedsnet.person_id, DeathPedsnet.death_date,
                      coalesce(pedsnet_pcornet_valueset_map.target_concept, 'OT')) \
            .all()

        # transform data to pcornet names and types
        # load to demographic table
        odo(death_pedsnet, DeathPcornet.__table__,
            dshape='var * {death_date: date, death_date_impute: string, death_match_confidence: string,'
                   'death_source: string, patid:string, site: string}'
            )

        # close session
        pedsnet_session.close()

        # output result
        self.stdout.ending = ''
        print('Death ETL completed successfully', end='', file=self.stdout)
Example #20
0
def summarise_tag(conn, tag, config):
    """
    Provide a summary of a single tag
    """
    path_ids = [get_path_id(p, conn) for p in config.get('paths', [])]
    q = find_children(path_ids)
    q = q.with_only_columns([
        safunc.count().label('inodes'),
        safunc.coalesce(safunc.sum(model.paths.c.size), 0).label('size'),
        safunc.min(model.paths.c.last_seen).label('last seen'),
    ])

    r = conn.execute(q).first()
    r = dict(r)

    if r['last seen'] is not None:
        r['last seen'] = datetime.fromtimestamp(r['last seen'])

    return tag, r
Example #21
0
 def heappop(self):
     """Pop the smallest item off the heap, maintaining the heap invariant."""
     row = self.conn.execute(
         select([
             self.heapq.c.id,
             min(self.heapq.c.cost).label("min_cost"), self.heapq.c.count,
             self.heapq.c.data
         ]).where(self.heapq.c.instance == self.instance).group_by(
             self.heapq.c.id, self.heapq.c.count,
             self.heapq.c.data)).fetchone()
     if row is not None:
         self.conn.execute(
             delete(self.heapq).where(self.heapq.c.id == row.id))
         logger_memory.info(
             f"HeapSQL: heappop min_cost: {row.min_cost} count: {row.count} "
         )
         return row.min_cost, row.count, pickle.loads(row.data)
     else:
         return None, None, None
    def __init__(self,
                 tract_id=None,
                 min_date=None,
                 max_date=None):
        connect_to_db(app)

        if not min_date:
            min_date = db.session.query(
                func.min(MedicalCall.received_dttm)
            ).first()[0]

        if not max_date:
            max_date = db.session.query(
                func.max(MedicalCall.received_dttm)
            ).first()[0]

        self.min_date = min_date
        self.max_date = max_date

        if tract_id:
            self.tract = tract_id
Example #23
0
    def get_states(
        self,
        *,
        size: Optional[int] = None,
        offset: int = 0,
        selected_actors: Optional[List[str]] = None,
        selected_statuses: Optional[List[str]] = None,
        selected_message_ids: Optional[List[str]] = None,
        selected_composition_ids: Optional[List[str]] = None,
        start_datetime: Optional[datetime.datetime] = None,
        end_datetime: Optional[datetime.datetime] = None,
        sort_column: Optional[str] = None,
        sort_direction: Optional[str] = None,
    ):
        sort_column = sort_column or "enqueued_datetime"
        sort_direction = sort_direction or "desc"

        with self.client.begin() as session:
            query = session.query(StoredState)
            query = filter_query(
                query=query,
                selected_actors=selected_actors,
                selected_statuses=selected_statuses,
                selected_message_ids=selected_message_ids,
                selected_composition_ids=selected_composition_ids,
                start_datetime=start_datetime,
                end_datetime=end_datetime,
            )
            if size is not None:
                query = query.subquery()
                query_group = (session.query(
                    max(query.c.composition_id).label(
                        "grouped_composition_id"),
                    max(query.c.message_id).label("grouped_message_id"),
                    max(query.c.status).label("grouped_status"),
                    max(query.c.actor_name).label("grouped_actor_name"),
                    max(query.c.priority).label("grouped_priority"),
                    func.avg(query.c.progress).label("grouped_progress"),
                    min(query.c.enqueued_datetime).label(
                        "grouped_enqueued_datetime"),
                    min(query.c.started_datetime).label(
                        "grouped_started_datetime"),
                    max(query.c.end_datetime).label("grouped_end_datetime"),
                    max(query.c.queue_name).label("grouped_queue_name"),
                ).group_by(coalesce(
                    query.c.composition_id, query.c.message_id)).order_by(
                        text(f"grouped_{sort_column} {sort_direction}")))
                query_group = query_group.offset(offset).limit(size).subquery()
                query = (
                    session.query(StoredState).select_from(StoredState).join(
                        query_group,
                        or_(
                            StoredState.message_id ==
                            query_group.c.grouped_message_id,
                            StoredState.composition_id ==
                            query_group.c.grouped_composition_id,
                        ),
                    ))
            query = query.order_by(text(f"{sort_column} {sort_direction}"))
            return [
                state_model.as_state(self.encoder) for state_model in query
            ]
Example #24
0
def diff(var):
    return (max(column(var)) - min(column(var))).label(var)
Example #25
0
def total_measure_interval(column):
    return extract(
        "epoch",
        case([(min(column) == '0 second', '1 second')], else_=min(column)))
Example #26
0
File: maps.py Project: munin/merlin
 def bravery(self, target):
     bravery = (
         max(0, (min(2, float(target.value) / self.value) - 0.1) * (min(2, float(target.score) / self.score) - 0.2))
         * 10
     )
     return bravery
Example #27
0
def get_first_time(session):
    res = session.query(cast(min(QueueLog.time), TIMESTAMP)).first()[0]
    if res is None:
        raise LookupError('Table is empty')
    return res
Example #28
0
def set_score():
    if not is_request_valid(request):
        abort(400)

    commands = [
        "help",
        "score",
        "past_scores",
        "compare_scores",
        "today",
        "my_best",
        "top10",
        "leaderboard",
    ]
    user_id = request.form["user_id"]
    user_name = request.form["user_name"]
    text_input = request.form["text"]

    if text_input:
        params = text_input.split()
    else:
        return jsonify(
            response_type="ephermal",
            type="section",
            text=
            "I didn't catch that! Type 'help' for a list of appropriate commands",
        )

    if params[0] not in commands:
        return jsonify(
            response_type="ephemeral",
            type="section",
            text=
            "I didn't catch that! Type 'help' for a list of appropriate commands",
        )

    if params[0] == "help":
        return jsonify(
            response_type="ephemeral",
            text=
            "Here are some commands you can try:\n *score* [set score format including backticks and hours]\n *past_scores*\n *compare_scores* [slack username]\n *today*",
        )

    if not User.query.filter_by(slack_userid=user_id).first():
        user = User(slack_userid=user_id, slack_username=user_name)
        db.session.add(user)
        db.session.commit()

    else:
        user = User.query.filter_by(slack_userid=user_id).first()

    if params[0] == "score":
        input_value = text_input.split(" `")
        try:
            value = arrow.Arrow.strptime(
                input_value[1],
                "%H hours %M minutes and %S.%f seconds`").time()
        except Exception as e:
            return jsonify(
                response_type="ephemeral",
                text=
                "*Uh oh*, I didn't catch that! Please input your score in a code block using backticks, in set score form ex: `0 hours 00 minutes and 0.00 seconds`",
            )
        score = Score(orig_input=f"`{input_value[1]}", user=user, value=value)
        db.session.add(score)
        db.session.commit()
        congrats = [
            "Good job!",
            "Well done!",
            "Very impressive.",
            "Something something blind dog sunshine something.",
        ]
        return jsonify(
            response_type="in_channel",
            text=f"Thanks *{user.slack_username}*! {random.choice(congrats)}",
        )

    if params[0] == "past_scores":
        return_text = ""
        try:
            scores = user.set_scores.all()
            print(scores)
        except:
            return jsonify(
                response_type="ephemeral",
                text=
                "You don't have any scores yet! Add one by using `*/set_score*`!",
            )

        for val in scores:
            print(val.value)
            return_text += f'{val.value.strftime("`%H hours %M minutes and %S.%f seconds`")} on *{val.timestamp.strftime("%c")}*\n'

            print(
                f'{val.value.strftime("`%H hours %M minutes and %S.%f seconds`")} on *{val.timestamp.strftime("%c")}*\n'
            )

        return jsonify(
            type="section",
            response_type="ephemeral",
            text=f"Here are your past scores!\n{return_text}",
        )

    if params[0] == "compare_scores":
        if not params[1]:
            return jsonify(
                type="section",
                response_type="ephemeral",
                text=
                "*Oops!* Looks like you didn't tell me who you'd like to compare yourself to. Try `compare_scores` followed by `<slack username>`",
            )
        compare_username = params[1]
        compare_user = User.query.filter_by(
            slack_username=compare_username).first()
        if not compare_user:
            return jsonify(
                type="section",
                response_type="ephemeral",
                text=
                "*Oh no!* Either that's not a valid username, or that user hasn't played yet! Try again.",
            )
        compare_user_scores = compare_user.set_scores.all()
        # user_df = pd.read_sql('SELECT * FROM User', db.session.bind)
        # scores_df = pd.read_sql('SELECT * FROM Score', db.session.bind)
        # merge_df = pd.merge(df, df1, left_on='id', right_on='user_id')
        # print(compare_user_scores)
        return jsonify(
            type="section",
            response_type="in_channel",
            text=f"{compare_user.slack_userid, compare_user.slack_username}",
        )

    if params[0] == "my_best":
        scores = user.set_scores.order_by(Score.value).all()
        if scores:
            best_time = scores.pop(0)
            return_text = f"*{best_time.timestamp.strftime('%c')}* - {best_time.orig_input} \U0001F451 \n"
            for s in scores[1:5]:
                return_text += f"*{s.timestamp.strftime('%c')}* - {s.orig_input}\n"
            return jsonify(
                type="section",
                response_type="ephemeral",
                text=f"Your best scores at set are ~ \n\n {return_text}",
            )
        else:
            return jsonify(
                type="section",
                response_type="in_channel",
                text=
                "No scores found for you. Input your score using `/set score`.",
            )

    if params[0] == "top10":
        scores = Score.query.order_by(Score.value).all()
        if scores:
            best_time = scores.pop(0)
            return_text = f"*{best_time.timestamp.strftime('%c')}* - {best_time.user.slack_username} - {best_time.orig_input} \U0001F451 \n"
            for s in scores[0:9]:
                return_text += f"*{s.timestamp.strftime('%c')}* - {s.user.slack_username} - {s.orig_input}\n"
            return jsonify(
                type="section",
                response_type="ephemeral",
                text=f"Top 10 overall scores are ~ \n\n {return_text}",
            )
        else:
            return jsonify(
                type="section",
                response_type="ephemeral",
                text=
                "No scores found yet. Input your score using `/set score`.",
            )

    if params[0] == "leaderboard":
        scores = (Score.query.group_by(Score.user_id).having(
            min(Score.value) > 0).order_by(Score.value).all())
        if scores:
            best_time = scores.pop(0)
            return_text = f"*{best_time.timestamp.strftime('%c')}* - {best_time.user.slack_username} - {best_time.orig_input} \U0001F451 \n"
            for s in scores[0:9]:
                return_text += f"*{s.timestamp.strftime('%c')}* - {s.user.slack_username} - {s.orig_input}\n"
            return jsonify(
                type="section",
                response_type="ephemeral",
                text=f"Top 10 Personal Bests are ~ \n\n {return_text}",
            )
        else:
            return jsonify(
                type="section",
                response_type="ephemeral",
                text=
                "No scores found yet. Input your score using `/set score`.",
            )

    if params[0] == "today":
        todays_datetime = (arrow.now("US/Pacific").replace(
            hour=0, minute=0, second=0, microsecond=0).datetime)
        scores = (Score.query.filter(
            Score.timestamp >= todays_datetime).order_by(Score.value).all())
        if scores:
            winner = scores.pop(0)
            return_text = (
                f"*{winner.user.slack_username}:*   {winner.orig_input} \U0001F451 \n"
            )
            print(return_text)
            for s in scores:
                return_text += f"*{s.user.slack_username}:*   {s.orig_input}\n"
            return jsonify(
                type="section",
                response_type="in_channel",
                text=f"So far today the scores are ~ \n\n {return_text}",
            )
        else:
            return jsonify(
                type="ephemeral",
                response_type="in_channel",
                text=
                "No scores have been recorded yet today! Input your score using `/set score`.",
            )
    def get_hint(self, flag_name: str, user_id: int):

        now = datetime.datetime.utcnow()

        if flag_name is None:
            logging.error("FLASK : get_hint provided with None flag_name.")
            return None

        session = Session()

        awake, mins = self.is_awake()
        if not awake:
            return f"The hintbot will be activated in {mins} minutes."

        # get the desired flag
        flag = session.query(Solution).filter_by(flag_name=flag_name).first()
        if flag is None:
            return f"{flag_name} is not a flag."

        # how long has it been since they last asked for a hint with this flag?
        query = session.query(sqlfunc.min(hintreqs.columns.time))
        query = query.filter(hintreqs.columns.solu_id == flag.solu_id,
                             hintreqs.columns.user_id == user_id)
        first_req_time = query.first()[0]

        # getting all our hints for the flag
        query = session.query(Hint).filter_by(solu_id=flag.solu_id)
        flag_hints = query.order_by(Hint.strength.asc()).all()

        # hint calculation
        hint = None
        time_to_next = None
        # skip if there are no hints for the flag
        if len(flag_hints) > 0:
            # if they haven't asked before, or asked before activation,
            # give them the lowest strength
            if (first_req_time is None or
                (first_req_time - self.start).seconds < self.sec_til_live):
                hint = flag_hints[0]
            else:
                # calculate where the user is in the hint wait cycle
                i = 0
                time = 0
                while hint is None and i < len(flag_hints):
                    time = time + flag_hints[1].minutes_live
                    mins_since_first = ((now - first_req_time).seconds // 60)
                    if mins_since_first < time:
                        hint = flag_hints[i]
                        if i != len(flag_hints) - 1:
                            time_to_next = time - mins_since_first
                    i = i + 1
                # if hint is still none, the user is past the end of the cycle,
                # so give them the final hint
                if hint is None:
                    hint = flag_hints[-1]
                    time_to_next = None

        # stripping the hint down to text so we can close the session
        if hint is not None:
            hint = hint.hint

        session.close()

        # no hints available for the flag
        if hint is None:
            return f"No hints available for this flag."

        # add the time to next hint, if there is one
        if time_to_next is not None:
            suffix = ""
            if time_to_next > 1:
                suffix = "s"
            return (hint + " The next hint will be available " +
                    f"in {time_to_next} minute{suffix}.")
        else:
            return hint
def hints():
    awake, mins = hintbot.is_awake()
    if not awake:
        session = Session()
        query = session.query(Chattext).filter_by(usage="c_u_ltr")
        text = query.order_by(sqlfunc.random()).first()
        session.close()
        if text is None:
            return Response("Sorry, I have no response.",
                            status=503,
                            mimetype="text/plain")
        else:
            return text.ctext.format(minutes=mins)

    if request.content_length < 1:
        app.logger.info("-- recieved empty request or non-text mime type")
        return Response("nope.", status=400, mimetype="text/plain")

    # get user uuid from query string
    try:
        user_uuid = str(uuid.UUID(request.args.get("uuid")))
    except ValueError:
        return Response("Not a user.", status=400, mimetype="text/plain")

    # get user's "chat"
    try:
        usrmsg = request.data.decode("utf-8")
    except Exception:
        return Response("I only speak ascii.",
                        status=400,
                        mimetype="text/plain")

    # get current time
    now = datetime.datetime.utcnow()

    session = Session()

    # check that user exists
    user = session.query(User).filter_by(uuid=user_uuid).first()
    if user is None:
        session.close()
        return Response("That user does not exist.",
                        status=400,
                        mimetype="text/plain")

    # scan user chat for the name of a flag
    flag = None
    sols = session.query(Solution).all()
    for sol in sols:
        if usrmsg.find(sol.flag_name) >= 0:
            if flag is None or len(sol.flag_name) > len(flag.flag_name):
                flag = sol

    if flag is None:
        chatmsg = None
        info = None
        prev_reqs = session.query(Hintreq).filter_by(
            user_id=user.user_id).all()
        if len(prev_reqs) == 0:
            query = session.query(Chattext).filter_by(usage="hello_start")
            text1 = query.order_by(sqlfunc.random()).first()
            query = session.query(Chattext).filter_by(usage="hello_end")
            text2 = query.order_by(sqlfunc.random()).first()
            if text1 is None or text2 is None:
                chatmsg = None
            else:
                chatmsg = text1.ctext + text2.ctext
            info = [user.uname]
        else:
            query = session.query(Chattext).filter_by(usage="convo")
            text = query.order_by(sqlfunc.random()).first()
            if text is None or random.random() >= 0.5:
                fortune = subprocess.run(["/usr/games/fortune", "-s"],
                                         capture_output=True)
                chatmsg = fortune.stdout.decode("utf-8").strip()
            else:
                chatmsg = text.ctext
            # get total points
            query = session.query(sqlfunc.sum(solutions.columns.point_val))
            query = query.filter(
                correct.columns.user_id == user.user_id,
                correct.columns.solu_id == solutions.columns.solu_id)
            total_points = query.first()[0]
            if total_points is None:
                total_points = 0
            # get first hint time
            query = session.query(sqlfunc.min(hintreqs.columns.time))
            first_hint = query.filter(
                hintreqs.columns.user_id == user.user_id).first()[0]
            # provide info for conversation
            info = [user.uname, total_points, (now - first_hint).seconds // 60]

        ureq = Hintreq(user_id=user.user_id, time=now, solu_id=None)
        session.add(ureq)
        session.commit()
        session.close()

        if chatmsg is None:
            return Response("Sorry, I have no response.",
                            status=503,
                            mimetype="text/plain")

        return chatmsg.format(info=info)

    # if asked about a flag, link the hint request entry with it
    ureq = Hintreq(user_id=user.user_id, time=now, solu_id=flag.solu_id)
    session.add(ureq)
    session.commit()

    hint = hintbot.get_hint(flag.flag_name, user.user_id)

    # record request on the mqtt server
    if mclient is not None:
        mclient.publish(
            "hints",
            payload=
            f"{user.uname} has asked for a hint on flag {flag.flag_name}.",
            qos=0,
            retain=False)

    session.close()

    if hint is None:
        return "Sorry, no hints are available for this flag."

    return hint
Example #31
0
def diff(var):
    return (max(column(var)) - min(column(var))).label(var)
Example #32
0
 def sql(self):
   return sql(compile_expr([
       saf.min(self.expr)
     ]))
Example #33
0
File: maps.py Project: munin/merlin
 def resources_per_agent(self, target):
     return min(10000, (target.value * 2000) / self.value)
def reconstruct_idea_history(db):
    from assembl.models import (
        Idea, IdeaLink, Discussion, SynthesisPost, Synthesis,
        SubGraphIdeaLinkAssociation)
    start_by_discussion = dict(db.query(Discussion.id, sqlfunc.min(Idea.creation_date)).join(Idea).filter(Idea.sqla_type != 'assembl:RootIdea').all())
    ideas=db.query(Idea).all()
    idea_links=db.query(IdeaLink).all()
    synthesis_dates = dict(db.query(IdeaLink.id, SynthesisPost.creation_date).join(SubGraphIdeaLinkAssociation).join(Synthesis).join(SynthesisPost).all())
    link_by_id = {l.id: l for l in idea_links}
    link_ids = link_by_id.keys()
    link_ids.sort()
    end = datetime.datetime.now()
    min_creation = {
        l.id: max(l.source_ts.creation_date, l.target_ts.creation_date) for l in idea_links
    }
    max_end = {id:end for id in link_ids}

    for id, date in synthesis_dates.iteritems():
        min_creation[id] = date
        max_end[id] = date
        link_by_id[id].tombstone_date = date

    # monotonicity of start
    last_date = min_creation[link_ids[0]]
    for id in link_ids:
        last_date = max(last_date, min_creation[id])
        min_creation[id] = last_date
        # arbitrary increment
        if last_date < max_end[id]:
            last_date = min(last_date+datetime.timedelta(seconds=10),
                            max_end[id])

    # child is target. So no two links with same target can coexist.
    by_target = defaultdict(list)
    ends_before = {}
    for l in idea_links:
        by_target[l.target_id].append(l.id)

    live_ids = {}
    # if same target, one replaces the other.
    for l in by_target.itervalues():
        l.sort()
        live_id=[id for id in l if not link_by_id[id].is_tombstone]
        assert len(live_id) < 2
        if len(live_id):
            live_id = live_id[0] if live_id else l[-1]
            non_synth = [id for id in l if id not in synthesis_dates]
            if not non_synth[-1] == live_id:
                import pdb; pdb.set_trace()
        else:
            live_id = l[-1]
        live_ids.update({id:live_id for id in l})
        last_id = None
        for id in l:
            link = link_by_id[id]
            link.base_id = live_id
            if last_id:
                ends_before[last_id] = id
            if id in synthesis_dates:
                continue
            last_id = id
            max_end[last_id] = min(min_creation[id], max_end[last_id])
        assert last_id == live_id

    for link in idea_links:
        if link.is_tombstone:
            link.tombstone_date = max_end[link.id]

    # for id in link_ids:
    #     print id, min_creation[id] != max_end[id]

    # for id,date in max_end.iteritems():
    #     print id, id in synthesis_dates, date if link_by_id[id].is_tombstone else None

    dead_ideas=[idea for idea in ideas if idea.is_tombstone]
    for idea in dead_ideas:
        links = db.query(IdeaLink.id).filter((IdeaLink.source_id == idea.id) | (IdeaLink.target_id == idea.id)).all()
        idea.tombstone_date = max((max_end[id] for (id,) in links))
Example #35
0
    def get_all_study_summaries(self) -> List[StudySummary]:

        with _create_scoped_session(self.scoped_session) as session:
            summarized_trial = (
                session.query(
                    models.TrialModel.study_id,
                    functions.min(models.TrialModel.datetime_start).label("datetime_start"),
                    functions.count(models.TrialModel.trial_id).label("n_trial"),
                )
                .group_by(models.TrialModel.study_id)
                .with_labels()
                .subquery()
            )
            study_summary_stmt = session.query(
                models.StudyModel.study_id,
                models.StudyModel.study_name,
                summarized_trial.c.datetime_start,
                functions.coalesce(summarized_trial.c.n_trial, 0).label("n_trial"),
            ).select_from(orm.outerjoin(models.StudyModel, summarized_trial))

            study_summary = study_summary_stmt.all()

            _directions = defaultdict(list)
            for d in session.query(models.StudyDirectionModel).all():
                _directions[d.study_id].append(d.direction)

            _user_attrs = defaultdict(list)
            for a in session.query(models.StudyUserAttributeModel).all():
                _user_attrs[d.study_id].append(a)

            _system_attrs = defaultdict(list)
            for a in session.query(models.StudySystemAttributeModel).all():
                _system_attrs[d.study_id].append(a)

            study_summaries = []
            for study in study_summary:
                directions = _directions[study.study_id]
                best_trial: Optional[models.TrialModel] = None
                try:
                    if len(directions) > 1:
                        raise ValueError
                    elif directions[0] == StudyDirection.MAXIMIZE:
                        best_trial = models.TrialModel.find_max_value_trial(
                            study.study_id, 0, session
                        )
                    else:
                        best_trial = models.TrialModel.find_min_value_trial(
                            study.study_id, 0, session
                        )
                except ValueError:
                    best_trial_frozen: Optional[FrozenTrial] = None
                if best_trial:
                    value = models.TrialValueModel.find_by_trial_and_objective(
                        best_trial, 0, session
                    )
                    assert value
                    params = (
                        session.query(
                            models.TrialParamModel.param_name,
                            models.TrialParamModel.param_value,
                            models.TrialParamModel.distribution_json,
                        )
                        .filter(models.TrialParamModel.trial_id == best_trial.trial_id)
                        .all()
                    )
                    param_dict = {}
                    param_distributions = {}
                    for param in params:
                        distribution = distributions.json_to_distribution(param.distribution_json)
                        param_dict[param.param_name] = distribution.to_external_repr(
                            param.param_value
                        )
                        param_distributions[param.param_name] = distribution
                    user_attrs = models.TrialUserAttributeModel.where_trial_id(
                        best_trial.trial_id, session
                    )
                    system_attrs = models.TrialSystemAttributeModel.where_trial_id(
                        best_trial.trial_id, session
                    )
                    intermediate = models.TrialIntermediateValueModel.where_trial_id(
                        best_trial.trial_id, session
                    )
                    best_trial_frozen = FrozenTrial(
                        best_trial.number,
                        TrialState.COMPLETE,
                        value.value,
                        best_trial.datetime_start,
                        best_trial.datetime_complete,
                        param_dict,
                        param_distributions,
                        {i.key: json.loads(i.value_json) for i in user_attrs},
                        {i.key: json.loads(i.value_json) for i in system_attrs},
                        {value.step: value.intermediate_value for value in intermediate},
                        best_trial.trial_id,
                    )
                user_attrs = _user_attrs.get(study.study_id, [])
                system_attrs = _system_attrs.get(study.study_id, [])
                study_summaries.append(
                    StudySummary(
                        study_name=study.study_name,
                        direction=None,
                        directions=directions,
                        best_trial=best_trial_frozen,
                        user_attrs={i.key: json.loads(i.value_json) for i in user_attrs},
                        system_attrs={i.key: json.loads(i.value_json) for i in system_attrs},
                        n_trials=study.n_trial,
                        datetime_start=study.datetime_start,
                        study_id=study.study_id,
                    )
                )

        return study_summaries
Example #36
0
def total_measure_interval(column):
    return extract(
        "epoch",
        case([(min(column) == '0 second', '1 second')],
             else_=min(column)))
    def fromCollections(cls,
                        registry,
                        datasetType,
                        collections,
                        addResultColumns=True):
        """Construct a builder that searches a multiple collections for
        datasets single dataset type.

        Parameters
        ----------
        registry : `SqlRegistry`
            Registry instance the query is being run against.
        datasetType : `DatasetType`
            `DatasetType` of the datasets this query searches for.
        collections : `list` of `str`
            List of collections to search, ordered from highest-priority to
            lowest.
        addResultColumns : `bool`
            If `True` (default), add result columns to ``self.resultColumns``
            for the dataset ID and dimension links used to identify this
            `DatasetType.

        Returns
        -------
        builder : `SingleDatasetQueryBuilder`
            New query builder instance initialized with a
            `~QueryBuilder.fromClause` that either directly includes the
            dataset table or includes a subquery equivalent.

        Notes
        -----
        If ``len(collections)==1``, this method simply calls
        `fromSingleCollection`.

        If there are multiple collections, then there can be multiple matching
        Datasets for the same DataId. In that case we need only one Dataset
        record, which comes from earliest collection (in the user-provided
        order). Here things become complicated; we have to:
        - replace collection names with their order in input list
        - select all combinations of rows from dataset and dataset_collection
          which match collection names and dataset type name
        - from those only select rows with lowest collection position if
          there are multiple collections for the same DataId

        Replacing collection names with positions is easy:

            SELECT dataset_id,
                CASE collection
                    WHEN 'collection1' THEN 0
                    WHEN 'collection2' THEN 1
                    ...
                END AS collorder
            FROM dataset_collection

        Combined query will look like (CASE ... END is as above):

            SELECT dataset.dataset_id AS dataset_id,
                CASE dataset_collection.collection ... END AS collorder,
                dataset.link1,
                ...
                dataset.linkN
            FROM dataset JOIN dataset_collection
                ON dataset.dataset_id = dataset_collection.dataset_id
            WHERE dataset.dataset_type_name = <dsType.name>
                AND dataset_collection.collection IN (<collections>)

        Filtering is complicated; it would be simpler to use Common Table
        Expressions (WITH clause) but not all databases support CTEs, so we
        will have to do with the repeating sub-queries. We use GROUP BY for
        the data ID (link columns) and MIN(collorder) to find ``collorder``
        for a particular DataId, then join it with previous combined selection:

            SELECT
                DS.dataset_id AS dataset_id,
                DS.link1 AS link1,
                ...
                DS.linkN AS linkN
            FROM (
                SELECT dataset.dataset_id AS dataset_id,
                    CASE ... END AS collorder,
                    dataset.link1,
                    ...
                    dataset.linkN
                FROM dataset JOIN dataset_collection
                    ON dataset.dataset_id = dataset_collection.dataset_id
                WHERE dataset.dataset_type_name = <dsType.name>
                    AND dataset_collection.collection IN (<collections>)
                ) DS
            INNER JOIN (
                SELECT
                    MIN(CASE ... END AS) collorder,
                    dataset.link1,
                    ...
                    dataset.linkN
                FROM dataset JOIN dataset_collection
                    ON dataset.dataset_id = dataset_collection.dataset_id
                WHERE dataset.dataset_type_name = <dsType.name>
                   AND dataset_collection.collection IN (<collections>)
                GROUP BY (
                    dataset.link1,
                    ...
                    dataset.linkN
                    )
                ) DSG
            ON (DS.colpos = DSG.colpos
                    AND
                DS.link1 = DSG.link1
                    AND
                ...
                    AND
                DS.linkN = DSG.linkN)
        """
        if len(collections) == 1:
            return cls.fromSingleCollection(registry,
                                            datasetType,
                                            collections[0],
                                            addResultColumns=addResultColumns)

        # helper method
        def _columns(selectable, names):
            """Return list of columns for given column names"""
            return [selectable.columns[name].label(name) for name in names]

        datasetTable = registry._schema.tables["dataset"]
        datasetCollectionTable = registry._schema.tables["dataset_collection"]

        # full set of link names for this DatasetType
        links = list(datasetType.dimensions.links())

        # Starting point for both subqueries below: a join of dataset to
        # dataset_collection
        subJoin = datasetTable.join(
            datasetCollectionTable, datasetTable.columns.dataset_id ==
            datasetCollectionTable.columns.dataset_id)
        subWhere = and_(
            datasetTable.columns.dataset_type_name == datasetType.name,
            datasetCollectionTable.columns.collection.in_(collections))

        # CASE clause that transforms collection name to position in the given
        # list of collections
        collorder = case([(datasetCollectionTable.columns.collection == coll,
                           pos) for pos, coll in enumerate(collections)])

        # first GROUP BY sub-query, find minimum `collorder` for each DataId
        columns = [functions.min(collorder).label("collorder")] + _columns(
            datasetTable, links)
        groupSubq = select(columns).select_from(subJoin).where(subWhere)
        groupSubq = groupSubq.group_by(*links)
        groupSubq = groupSubq.alias("sub1" + datasetType.name)

        # next combined sub-query
        columns = [collorder.label("collorder")] + _columns(
            datasetTable, ["dataset_id"] + links)
        combined = select(columns).select_from(subJoin).where(subWhere)
        combined = combined.alias("sub2" + datasetType.name)

        # now join these two
        joinsOn = [groupSubq.columns.collorder == combined.columns.collorder] + \
                  [groupSubq.columns[colName] == combined.columns[colName] for colName in links]

        return cls(registry,
                   fromClause=combined.join(groupSubq, and_(*joinsOn)),
                   datasetType=datasetType,
                   selectableForDataset=combined,
                   addResultColumns=addResultColumns)
Example #38
0
    def get_all_study_summaries(self) -> List[StudySummary]:

        session = self.scoped_session()

        summarized_trial = (session.query(
            models.TrialModel.study_id,
            functions.min(
                models.TrialModel.datetime_start).label("datetime_start"),
            functions.count(models.TrialModel.trial_id).label("n_trial"),
        ).group_by(models.TrialModel.study_id).with_labels().subquery())
        study_summary_stmt = session.query(
            models.StudyModel.study_id,
            models.StudyModel.study_name,
            models.StudyModel.direction,
            summarized_trial.c.datetime_start,
            functions.coalesce(summarized_trial.c.n_trial, 0).label("n_trial"),
        ).select_from(orm.outerjoin(models.StudyModel, summarized_trial))

        study_summary = study_summary_stmt.all()
        study_summaries = []
        for study in study_summary:
            best_trial: Optional[models.TrialModel] = None
            try:
                if study.direction == StudyDirection.MAXIMIZE:
                    best_trial = models.TrialModel.find_max_value_trial(
                        study.study_id, session)
                else:
                    best_trial = models.TrialModel.find_min_value_trial(
                        study.study_id, session)
            except ValueError:
                best_trial_frozen: Optional[FrozenTrial] = None
            if best_trial:
                params = (session.query(
                    models.TrialParamModel.param_name,
                    models.TrialParamModel.param_value,
                    models.TrialParamModel.distribution_json,
                ).filter(models.TrialParamModel.trial_id ==
                         best_trial.trial_id).all())
                param_dict = {}
                param_distributions = {}
                for param in params:
                    distribution = distributions.json_to_distribution(
                        param.distribution_json)
                    param_dict[
                        param.param_name] = distribution.to_external_repr(
                            param.param_value)
                    param_distributions[param.param_name] = distribution
                user_attrs = session.query(
                    models.TrialUserAttributeModel).filter(
                        models.TrialUserAttributeModel.trial_id ==
                        best_trial.trial_id)
                system_attrs = session.query(
                    models.TrialSystemAttributeModel).filter(
                        models.TrialSystemAttributeModel.trial_id ==
                        best_trial.trial_id)
                intermediate = session.query(models.TrialValueModel).filter(
                    models.TrialValueModel.trial_id == best_trial.trial_id)
                best_trial_frozen = FrozenTrial(
                    best_trial.number,
                    TrialState.COMPLETE,
                    best_trial.value,
                    best_trial.datetime_start,
                    best_trial.datetime_complete,
                    param_dict,
                    param_distributions,
                    {i.key: json.loads(i.value_json)
                     for i in user_attrs},
                    {i.key: json.loads(i.value_json)
                     for i in system_attrs},
                    {value.step: value.value
                     for value in intermediate},
                    best_trial.trial_id,
                )
            user_attrs = session.query(models.StudyUserAttributeModel).filter(
                models.StudyUserAttributeModel.study_id == study.study_id)
            system_attrs = session.query(
                models.StudySystemAttributeModel).filter(
                    models.StudySystemAttributeModel.study_id ==
                    study.study_id)
            study_summaries.append(
                StudySummary(
                    study_name=study.study_name,
                    direction=study.direction,
                    best_trial=best_trial_frozen,
                    user_attrs={
                        i.key: json.loads(i.value_json)
                        for i in user_attrs
                    },
                    system_attrs={
                        i.key: json.loads(i.value_json)
                        for i in system_attrs
                    },
                    n_trials=study.n_trial,
                    datetime_start=study.datetime_start,
                    study_id=study.study_id,
                ))

        # Terminate transaction explicitly to avoid connection timeout during transaction.
        self._commit(session)

        return study_summaries
Example #39
0
def get_first_time(session):
    res = session.query(cast(min(QueueLog.time), TIMESTAMP)).first()[0]
    if res is None:
        raise LookupError('Table is empty')
    return res