コード例 #1
0
def get_temps(st_dt = "", end_dt = ""):
    """TMIN, TAVG, and TMAX for a list of dates.
    
    Args:
        st_dt (string): A date string in the format %Y-%m-%d
        end_dt (string): A date string in the format %Y-%m-%d
        
    Returns:
        TMIN, TAVE, and TMAX
    """
    session = Session(engine)
    
    if(st_dt == ""):
        end_dt, st_dt = get_year_past()
    
    if(end_dt == "" or end_dt is None):
        res = session.query(coalesce(func.min(M.tobs),0), coalesce(func.avg(M.tobs),0), coalesce(func.max(M.tobs),0)).\
                    filter(M.date >= st_dt).one()
    else:
        res = session.query(coalesce(func.min(M.tobs),0), coalesce(func.avg(M.tobs),0), coalesce(func.max(M.tobs),0)).\
                    filter(M.date.between(st_dt, end_dt)).one()
    
    session.close()
    
    return res
コード例 #2
0
ファイル: maps.py プロジェクト: munin/merlin
    def fleet_overview(self):
        if self.scantype not in ("J",):
            return

        from sqlalchemy.sql.functions import min, sum

        f = aliased(FleetScan)
        a = aliased(FleetScan)
        d = aliased(FleetScan)

        Q = session.query(
            f.landing_tick,
            f.landing_tick - min(Scan.tick),
            count(a.id),
            coalesce(sum(a.fleet_size), 0),
            count(d.id),
            coalesce(sum(d.fleet_size), 0),
        )
        Q = Q.join(f.scan)
        Q = Q.filter(f.scan == self)

        Q = Q.outerjoin((a, and_(a.id == f.id, a.mission.ilike("Attack"))))
        Q = Q.outerjoin((d, and_(d.id == f.id, d.mission.ilike("Defend"))))

        Q = Q.group_by(f.landing_tick)
        Q = Q.order_by(asc(f.landing_tick))
        return Q.all()
コード例 #3
0
def featurelist(platform):
    table_counts = db.session.query(
                Table.feature_id,
                func.count(Table.id).label('count')
            )\
            .group_by(Table.feature_id).subquery()
    register_counts = db.session.query(
                Register.feature_id,
                func.count(Register.id).label('count')
            )\
            .group_by(Register.feature_id).subquery()
    query = db.session.query(
            Feature.name.label('name'),
            label('table_count', coalesce(table_counts.c.count, 0)),
            label('register_count', coalesce(register_counts.c.count, 0))
        )\
        .outerjoin(table_counts, table_counts.c.feature_id == Feature.id)\
        .outerjoin(register_counts, register_counts.c.feature_id == Feature.id)\
        .join(Feature.family)\
        .filter(Family.name == platform)\
        .order_by(Feature.name)

    if query.count() == 0:
        abort(404)

    return render_template('featurelist.html',
                           platform=platform,
                           features=query.all())
コード例 #4
0
async def get_mail_ratios(db: Session, scope: dict, since: datetime):

    query = db.query(func.count(MailChimpCampaign.id).label('nbCampagnes'), \
            coalesce(func.round(func.sum(MailChimpCampaignReport.open_unique) / func.sum(MailChimpCampaignReport.email_sent), 4), 0).label('txOuverture'), \
            coalesce(func.round(func.sum(MailChimpCampaignReport.click_unique) / func.sum(MailChimpCampaignReport.email_sent), 4), 0).label('txClique'), \
            coalesce(func.round(func.sum(MailChimpCampaignReport.unsubscribed) / func.sum(MailChimpCampaignReport.email_sent), 4), 0).label('txDesabonnement')) \
        .select_from(MailChimpCampaignReport) \
        .join(MailChimpCampaignReport.mailchimp_campaign) \
        .join(MailChimpCampaign.message.and_( \
            AdherentMessages.type == scope['code'], \
            AdherentMessages.sent_at >= since)) \
        .join(AdherentMessages.author)

    res = filter_role(db, query, scope['zones'], scope['code']).first()

    nat = db.query(
            func.round(func.sum(MailChimpCampaignReport.open_unique) / func.sum(MailChimpCampaignReport.email_sent), 4).label('txOuverture'), \
            func.round(func.sum(MailChimpCampaignReport.click_unique) / func.sum(MailChimpCampaignReport.email_sent), 4).label('txClique'), \
            func.round(func.sum(MailChimpCampaignReport.unsubscribed) / func.sum(MailChimpCampaignReport.email_sent), 4).label('txDesabonnement')) \
        .select_from(MailChimpCampaignReport) \
        .join(MailChimpCampaignReport.mailchimp_campaign) \
        .join(MailChimpCampaign.message.and_( \
            AdherentMessages.type == scope['code'])) \
        .first()

    return {'local': res, 'national': nat}
コード例 #5
0
ファイル: search.py プロジェクト: rockonedege/calibre-web
def adv_search_read_status(read_status):
    if not config.config_read_column:
        if read_status == "True":
            db_filter = and_(
                ub.ReadBook.user_id == int(current_user.id),
                ub.ReadBook.read_status == ub.ReadBook.STATUS_FINISHED)
        else:
            db_filter = coalesce(ub.ReadBook.read_status,
                                 0) != ub.ReadBook.STATUS_FINISHED
    else:
        try:
            if read_status == "True":
                db_filter = db.cc_classes[
                    config.config_read_column].value == True
            else:
                db_filter = coalesce(
                    db.cc_classes[config.config_read_column].value,
                    False) != True
        except (KeyError, AttributeError, IndexError):
            log.error("Custom Column No.{} does not exist in calibre database".
                      format(config.config_read_column))
            flash(_(
                "Custom Column No.%(column)d does not exist in calibre database",
                column=config.config_read_column),
                  category="error")
            return true()
    return db_filter
コード例 #6
0
ファイル: database.py プロジェクト: dingens/zwl
    def lookup(cls, traintype, loc, track=None):
        """
        Find the minimum stopping time for a train of type `traintype` at
        `loc` and (optionally) `track`.

        Important: There must be a fallback (None, None, None) entry in the
        database, else incorrect results will be returned by `lookup()`.

        :param traintype: Train object, TrainType object, or TrainType id
        """
        if isinstance(traintype, Train):
            traintype = traintype.type_id
        elif isinstance(traintype, TrainType):
            traintype = traintype.id
        if track is not None and loc is None:
            raise ValueError('loc cannot be None when track is not')

        # rank lines by how good they fit. If the field we look at is NULL,
        # its line is ranked between matching (1) and contradicting (0) lines,
        # by using a ordering value of 0.5. That way default entries can be
        # defined by setting to NULL in some or all columns.
        q = db.session.query(cls.minimum_stop_time)
        if track is not None:
            q = q.order_by(
                coalesce((cls.loc == loc) & (cls.track == track), 0.5).desc())
        q = q.order_by(
            coalesce((cls.loc == loc) & cls.track.is_(None), 0.5).desc())
        q = q.order_by(coalesce(cls.traintype_id == traintype, 0.5).desc())

        result = db.session.execute(q).scalar()
        if result is None:
            raise ValueError('No minimum stop time defined')
        return result
コード例 #7
0
def adv_search_read_status(q, read_status):
    if read_status:
        if config.config_read_column:
            try:
                if read_status == "True":
                    q = q.join(db.cc_classes[config.config_read_column], isouter=True) \
                        .filter(db.cc_classes[config.config_read_column].value == True)
                else:
                    q = q.join(db.cc_classes[config.config_read_column], isouter=True) \
                        .filter(coalesce(db.cc_classes[config.config_read_column].value, False) != True)
            except (KeyError, AttributeError):
                log.error(
                    u"Custom Column No.%d is not existing in calibre database",
                    config.config_read_column)
                flash(_(
                    "Custom Column No.%(column)d is not existing in calibre database",
                    column=config.config_read_column),
                      category="error")
                return q
        else:
            if read_status == "True":
                q = q.join(ub.ReadBook, db.Books.id == ub.ReadBook.book_id, isouter=True) \
                    .filter(ub.ReadBook.user_id == int(current_user.id),
                            ub.ReadBook.read_status == ub.ReadBook.STATUS_FINISHED)
            else:
                q = q.join(ub.ReadBook, db.Books.id == ub.ReadBook.book_id, isouter=True) \
                    .filter(ub.ReadBook.user_id == int(current_user.id),
                            coalesce(ub.ReadBook.read_status, 0) != ub.ReadBook.STATUS_FINISHED)
    return q
コード例 #8
0
def show_prodchart_json(user_id, month_num, attr_list):
    """show sale profit chart data as a json"""

    firstday_month = month_num.replace('-', '') + "01"

    set_date = datetime.strptime(firstday_month, "%Y%m%d").date() + relativedelta(months=1)

    purch = db.session.query(Purchase.prd_id,
                             db.func.round(db.func.sum(coalesce(Purchase.quantities, 0))).label("purch_qty"),
                             db.func.sum(coalesce(db.func.round(Purchase.quantities) * Purchase.purchase_price, 0)).label("purch_price_sum"))\
                      .filter(Purchase.purchase_at < set_date)\
                      .group_by(Purchase.prd_id).subquery()

    sale = db.session.query(Sale.prd_id,
                            db.func.round(db.func.sum(coalesce(Sale.quantities, 0))).label("sale_qty"),
                            db.func.sum(coalesce(db.func.round(Sale.quantities) * Sale.transc_price, 0)).label("sale_price_sum"))\
                     .filter(Sale.transc_at < set_date)\
                     .group_by(Sale.prd_id).subquery()

    # prod = db.session.query(Product.prd_id,
    #                         Product.cg_id, Category.cg_name)\
    #                  .join(Category).join(Product.prddetail)\
    #                  .filter(CategoryDetailValue.attr_val.in_(attr_list), Product.user_id == user_id)\
    #                  .group_by(Product.prd_id, Product.cg_id, Category.cg_name).subquery()

    product_sum = db.session.query(db.func.sum(sale.c.sale_qty).label("sale_qty_sum"),
                                   db.func.sum(purch.c.purch_qty - sale.c.sale_qty).label("purch_onhand_qty"))\
                            .join(purch, sale.c.prd_id == purch.c.prd_id).all()

    return sql_to_pichartejson(product_sum, "Sales Chart")
コード例 #9
0
ファイル: es_initial.py プロジェクト: uktrade/data-hub-korben
def get_remote_name_select(cols):
    'Get either the `name` column or `first_name` ++ `last_name`'
    if hasattr(cols, 'name'):
        return cols.name
    if all(map(functools.partial(hasattr, cols), ('first_name', 'last_name'))):
        return (sqla_func.coalesce(getattr(cols, 'first_name'), '') +
                sqla_func.coalesce(getattr(cols, 'last_name'), ''))
コード例 #10
0
ファイル: guess.py プロジェクト: JDD/merlin
    def list_fleets(self, message, user, params):
        # Check the planet exists
        planet = Planet.load(*params.group(1,3,5))
        if planet is None:
            message.alert("No planet with coords %s:%s:%s" % params.group(1,3,5))
            return

        # Find all fleets with a known alliance who have defended this planet
        OQ = session.query(coalesce(FleetScan.launch_tick, FleetScan.landing_tick), literal_column("'From'").label("dir"), Planet.x, Planet.y, Planet.z, Alliance.name).select_from(FleetScan)
        OQ = OQ.filter(FleetScan.target_id == planet.id, FleetScan.in_galaxy==False, FleetScan.mission=="Defend")
        OQ = OQ.join(Intel, FleetScan.owner_id == Intel.planet_id).filter(Intel.alliance_id != None)
        OQ = OQ.join(Alliance, Intel.alliance_id == Alliance.id).join(Planet, FleetScan.owner_id == Planet.id)

        # Find all fleets with a known alliance who have been defended by this planet
        TQ = session.query(coalesce(FleetScan.launch_tick, FleetScan.landing_tick), literal_column("'To  '").label("dir"), Planet.x, Planet.y, Planet.z, Alliance.name).select_from(FleetScan)
        TQ = TQ.filter(FleetScan.owner_id == planet.id, FleetScan.in_galaxy==False, FleetScan.mission=="Defend")
        TQ = TQ.join(Intel, FleetScan.target_id == Intel.planet_id).filter(Intel.alliance_id != None)
        TQ = TQ.join(Alliance, Intel.alliance_id == Alliance.id).join(Planet, FleetScan.target_id == Planet.id)

        # Combine the results into one sorted list
        results = sorted(OQ.all()+TQ.all(), reverse=True)

        # Quit now if there are no results
        if len(results) == 0:
            message.reply("No suggestions found")
            return

        # Reply to the user
        message.reply("Tick  Dir   Planet     Alliance")
        limit = int(params.group(6) or 5)
        for r in results[:limit]:
            message.reply("%4s  %s  %-9s  %s" % (r[0], r[1], "%s:%s:%s" % (r[2], r[3], r[4]), r[5]))
        if len(results) > limit:
            message.reply("%s results not shown (%s total)" % (len(results)-limit, len(results)))
コード例 #11
0
def show_product():
    """show products"""

    user_id = session.get("user_id")

    purch = db.session.query(
        Product.prd_id, Product.user_id, Product.prd_name, Product.cg_id,
        Category.cg_name, Product.sale_price, Product.description,
        db.func.sum(coalesce(Purchase.quantities, 0)).label("purch_qty"),
        db.func.sum(coalesce(
            Purchase.quantities * Purchase.purchase_price,
            0)).label("purch_price_sum")).outerjoin(Purchase).outerjoin(
                Category).filter(Product.user_id == user_id).group_by(
                    Product.prd_id, Product.user_id, Product.prd_name,
                    Product.cg_id, Category.cg_name, Product.sale_price,
                    Product.description).order_by(Product.prd_id).subquery()

    products = db.session.query(
        purch.c.prd_id, purch.c.user_id, purch.c.prd_name, purch.c.cg_id,
        purch.c.cg_name, purch.c.sale_price, purch.c.description,
        purch.c.purch_qty, purch.c.purch_price_sum,
        db.func.sum(coalesce(Sale.quantities, 0)).label("sale_qty"),
        db.func.sum(coalesce(
            Sale.quantities * Sale.transc_price,
            0)).label("sale_price_sum")).outerjoin(
                Sale, purch.c.prd_id == Sale.prd_id).group_by(
                    purch.c.prd_id, purch.c.user_id, purch.c.prd_name,
                    purch.c.cg_id, purch.c.cg_name, purch.c.sale_price,
                    purch.c.description, purch.c.purch_qty,
                    purch.c.purch_price_sum).order_by(purch.c.prd_id).all()

    return render_template("product.html", products=products)
コード例 #12
0
def demographic_etl(config):
    # set up
    connection = get_connection(config)
    pedsnet_session = init_pedsnet(connection)
    init_pcornet(connection)

    # multiple aliases for pedsnet_pcornet_valueset_map
    # to allow the three named joins
    gender_value_map = aliased(ValueSetMap)
    ethnicity_value_map = aliased(ValueSetMap)
    race_value_map = aliased(ValueSetMap)

    # extract the data from the person table
    person = pedsnet_session.query(Person.person_id,
                                   Person.birth_date,
                                   Person.birth_time,
                                   coalesce(gender_value_map.target_concept, 'OT'),
                                   coalesce(ethnicity_value_map.target_concept, 'OT'),
                                   coalesce(race_value_map.target_concept, 'OT'),
                                   bindparam("biobank_flag", "N"),
                                   Person.gender_source_value,
                                   Person.ethnicity_source_value,
                                   Person.race_source_value,
                                   Person.site,
                                   bindparam("gender_identity", None),
                                   bindparam("raw_gender_identity", None),
                                   bindparam("sexual_orientation", None),
                                   bindparam("raw_sexual_orientation", None)
                                   ). \
        outerjoin(gender_value_map,
                  and_(gender_value_map.source_concept_class == 'Gender',
                       case([(and_(Person.gender_concept_id == None,
                                   gender_value_map.source_concept_id == None), True)],
                            else_=cast(Person.gender_concept_id, String(200)) ==
                                  gender_value_map.source_concept_id))). \
        outerjoin(ethnicity_value_map,
                  and_(ethnicity_value_map.source_concept_class == 'Hispanic',
                       case([(and_(Person.ethnicity_concept_id == None,
                                   ethnicity_value_map.source_concept_id == None), True)],
                            else_=cast(Person.ethnicity_concept_id, String(200)) ==
                                  ethnicity_value_map.source_concept_id))). \
        outerjoin(race_value_map,
                  and_(race_value_map.source_concept_class == 'Race',
                       case([(and_(Person.race_concept_id == None,
                                   race_value_map.source_concept_id == None), True)],
                            else_=cast(Person.race_concept_id, String(200)) ==
                                  race_value_map.source_concept_id))).all()

    # transform data to pcornet names and types
    # load to demographic table
    odo(person, Demographic.__table__,
        dshape='var * {patid: string, birth_date: date, birth_time: string, sex: string,'
               'hispanic: string, race: string, biobank_flag: string, raw_sex: string,'
               'raw_hispanic: string, raw_race:string, site: string, gender_identity: string,'
               'raw_gender_identity: string, sexual_orientation: string, raw_sexual_orientation: string}'
        )
    # close session

    pedsnet_session.close()
コード例 #13
0
def user_question_view():
    questions_likes = db.session.query(Evaluation.id,Evaluation.evaluation_category,\
                    Evaluation.evaluation_question,cast(coalesce(func.avg(Evaluation_Likes.like),0),sqlalchemy.Integer).\
                    label('Likes'),cast(coalesce(func.avg(Evaluation_Difficulty.difficulty),0),sqlalchemy.Integer).label('Difficulty')).\
                    outerjoin(Evaluation_Likes).outerjoin(Evaluation_Difficulty).group_by(Evaluation.id).\
                    filter(Evaluation.user_id == current_user.id).order_by(desc('Likes')).all()
    return render_template('user_question_view.html',
                           questions_likes=questions_likes)
コード例 #14
0
ファイル: models.py プロジェクト: esquonk/test_flights
    def price(cls, adult=1, child=0, infant=0):
        expr = cls.price_adult * adult
        if child:
            expr += coalesce(cls.price_child, cls.price_adult) * child
        if infant:
            expr += coalesce(cls.price_infant, cls.price_adult) * infant

        return expr
コード例 #15
0
ファイル: models.py プロジェクト: ktt-ol/poisk
 def query_ordered(cls):
     # order by most recent last_seen OR key transaction
     newest_date = functions.max(
         functions.coalesce(User.last_seen, 0),
         functions.coalesce(KeyTransaction.start, 0)
     )
     query = Key.query.outerjoin(Key.holder).outerjoin(Key.current_transaction)
     return query.order_by(db.desc(newest_date))
コード例 #16
0
ファイル: functions.py プロジェクト: jetavator/jetavator
def hash_record(table: Table, deleted_ind_name: str,
                column_names: Iterable[str]) -> FunctionElement:
    value_to_hash = cast(
        coalesce(table.c[deleted_ind_name], literal_column("FALSE")), String())
    for column_name in column_names:
        value_to_hash = value_to_hash.concat(
            coalesce(cast(table.c[column_name], String()),
                     literal_column("''")))
    return hash_value(value_to_hash)
コード例 #17
0
    def handle(self, *args, **options):
        # set up
        config = get_config()
        if config is None:
            raise CommandError(
                'Unable to process configuration file p_to_p.yml')

        connection = get_connection(config)
        pedsnet_session = init_pedsnet(connection)
        init_pcornet(connection)
        init_vocab(connection)

        pedsnet_pcornet_valueset_map = aliased(ValueSetMap)

        # extract the data from the death table
        death_cause = pedsnet_session.query(DeathPedsnet.person_id,
                                            func.left(DeathPedsnet.cause_source_value, 8),
                                            coalesce(pedsnet_pcornet_valueset_map.target_concept, 'OT'),
                                            bindparam("death_cause_type", "NI"),
                                            bindparam("death_cause_source", "L"),
                                            bindparam("death_cause_confidence", None),
                                            min(DeathPedsnet.site)
                                            ) \
            .join(Demographic, Demographic.patid == cast(DeathPedsnet.person_id, String(256)), ) \
            .join(VocabularyConcept, VocabularyConcept.concept_id == DeathPedsnet.cause_concept_id) \
            .outerjoin(pedsnet_pcornet_valueset_map,
                       and_(pedsnet_pcornet_valueset_map.source_concept_class == 'death cause code',
                            cast(VocabularyConcept.vocabulary_id, String(200)) ==
                            pedsnet_pcornet_valueset_map.source_concept_id)) \
            .filter(and_(DeathPedsnet.cause_source_value != None,
                         DeathPedsnet.cause_source_concept_id != 44814650)) \
            .group_by(DeathPedsnet.person_id, func.left(DeathPedsnet.cause_source_value, 8),
                      coalesce(pedsnet_pcornet_valueset_map.target_concept, 'OT')) \
            .all()

        # transform data to pcornet names and types
        # load to demographic table
        odo(death_cause,
            DeathCause.__table__,
            dshape=
            'var * {patid: string, death_cause: string, death_cause_code: string,'
            'death_cause_type: string, death_cause_source:string, '
            'death_cause_confidence: string, site: string}')

        # close session
        pedsnet_session.close()

        # output result
        self.stdout.ending = ''
        print('Death Cause ETL completed successfully',
              end='',
              file=self.stdout)
コード例 #18
0
    def list_fleets(self, message, user, params):
        # Check the planet exists
        planet = Planet.load(*params.group(1, 3, 5))
        if planet is None:
            message.alert("No planet with coords %s:%s:%s" %
                          params.group(1, 3, 5))
            return

        # Find all fleets with a known alliance who have defended this planet
        OQ = session.query(
            coalesce(FleetScan.launch_tick, FleetScan.landing_tick),
            literal_column("'From'").label("dir"), Planet.x, Planet.y,
            Planet.z, Alliance.name).select_from(FleetScan)
        OQ = OQ.filter(FleetScan.target_id == planet.id,
                       FleetScan.in_galaxy == False,
                       FleetScan.mission == "Defend")
        OQ = OQ.join(Intel, FleetScan.owner_id == Intel.planet_id).filter(
            Intel.alliance_id != None)
        OQ = OQ.join(Alliance, Intel.alliance_id == Alliance.id).join(
            Planet, FleetScan.owner_id == Planet.id)

        # Find all fleets with a known alliance who have been defended by this planet
        TQ = session.query(
            coalesce(FleetScan.launch_tick, FleetScan.landing_tick),
            literal_column("'To  '").label("dir"), Planet.x, Planet.y,
            Planet.z, Alliance.name).select_from(FleetScan)
        TQ = TQ.filter(FleetScan.owner_id == planet.id,
                       FleetScan.in_galaxy == False,
                       FleetScan.mission == "Defend")
        TQ = TQ.join(Intel, FleetScan.target_id == Intel.planet_id).filter(
            Intel.alliance_id != None)
        TQ = TQ.join(Alliance, Intel.alliance_id == Alliance.id).join(
            Planet, FleetScan.target_id == Planet.id)

        # Combine the results into one sorted list
        results = sorted(OQ.all() + TQ.all(), reverse=True)

        # Quit now if there are no results
        if len(results) == 0:
            message.reply("No suggestions found")
            return

        # Reply to the user
        message.reply("Tick  Dir   Planet     Alliance")
        limit = int(params.group(6) or 5)
        for r in results[:limit]:
            message.reply("%4s  %s  %-9s  %s" % (r[0], r[1], "%s:%s:%s" %
                                                 (r[2], r[3], r[4]), r[5]))
        if len(results) > limit:
            message.reply("%s results not shown (%s total)" %
                          (len(results) - limit, len(results)))
コード例 #19
0
    def get_user_contributions(project_id: int) -> ProjectContributionsDTO:
        """ Get all user contributions on a project"""

        mapped_stmt = (Task.query.with_entities(
            Task.mapped_by,
            func.count(Task.mapped_by).label("count"),
            func.array_agg(Task.id).label("task_ids"),
        ).filter(Task.project_id == project_id).group_by(
            Task.mapped_by).subquery())
        validated_stmt = (Task.query.with_entities(
            Task.validated_by,
            func.count(Task.validated_by).label("count"),
            func.array_agg(Task.id).label("task_ids"),
        ).filter(Task.project_id == project_id).group_by(
            Task.validated_by).subquery())

        results = (db.session.query(
            User.id,
            User.username,
            User.name,
            User.mapping_level,
            User.picture_url,
            coalesce(mapped_stmt.c.count, 0).label("mapped"),
            coalesce(validated_stmt.c.count, 0).label("validated"),
            (coalesce(mapped_stmt.c.count, 0) +
             coalesce(validated_stmt.c.count, 0)).label("total"),
            (mapped_stmt.c.task_ids +
             validated_stmt.c.task_ids).label("task_ids"),
        ).outerjoin(
            validated_stmt,
            mapped_stmt.c.mapped_by == validated_stmt.c.validated_by).join(
                User, User.id == mapped_stmt.c.mapped_by).order_by(
                    desc("total")).all())

        contrib_dto = ProjectContributionsDTO()
        user_contributions = [
            UserContribution(
                dict(
                    username=r.username,
                    name=r.name,
                    mapping_level=MappingLevel(r.mapping_level).name,
                    picture_url=r.picture_url,
                    mapped=r.mapped,
                    validated=r.validated,
                    total=r.total,
                    task_ids=r.task_ids,
                )) for r in results
        ]
        contrib_dto.user_contributions = user_contributions

        return contrib_dto
コード例 #20
0
    def get_registered_voters_count(self, vote_type=None):
        polling_stations_subquery = get_associated_areas_query(
            areas=[self], areaType=AreaTypeEnum.PollingStation).subquery()

        _registeredVotersCount = db.Column(db.Integer(), nullable=True)
        _registeredPostalVotersCount = db.Column(db.Integer(), nullable=True)
        _registeredQuarantineVotersCount = db.Column(db.Integer(),
                                                     nullable=True)
        _registeredDisplacedVotersCount = db.Column(db.Integer(),
                                                    nullable=True)

        if vote_type == NonPostal:
            registered_voters_column = coalesce(
                polling_stations_subquery.c._registeredVotersCount, 0)
        elif vote_type == Postal:
            registered_voters_column = coalesce(
                polling_stations_subquery.c._registeredPostalVotersCount, 0)
        elif vote_type == Quarantine:
            registered_voters_column = coalesce(
                polling_stations_subquery.c._registeredQuarantineVotersCount,
                0)
        elif vote_type == Quarantine:
            registered_voters_column = coalesce(
                polling_stations_subquery.c._registeredDisplacedVotersCount, 0)
        else:
            registered_voters_column = coalesce(polling_stations_subquery.c._registeredVotersCount, 0) \
                                       + coalesce(polling_stations_subquery.c._registeredPostalVotersCount, 0) \
                                       + coalesce(polling_stations_subquery.c._registeredQuarantineVotersCount, 0) \
                                       + coalesce(polling_stations_subquery.c._registeredDisplacedVotersCount, 0)

        total_registered_voters_count = db.session.query(
            func.sum(registered_voters_column)).scalar()

        return float(total_registered_voters_count)
コード例 #21
0
ファイル: __init__.py プロジェクト: pombredanne/feedback-main
def import_keywords():
    Article.__ts_vector__ = create_tsvector(
        cast(coalesce(Article.title, ''), TEXT),
        cast(coalesce(Article.summary, ''), TEXT),
    )
    Article.__table_args__ = (Index('idx_article_fts',
                                    Article.__ts_vector__,
                                    postgresql_using='gin'), )

    Review.__ts_vector__ = create_tsvector(
        cast(coalesce(Review.comment, ''), TEXT), )
    Review.__table_args__ = (Index('idx_review_fts',
                                   Review.__ts_vector__,
                                   postgresql_using='gin'), )

    Tag.__ts_vector__ = create_tsvector(cast(coalesce(Tag.text, ''), TEXT), )
    Tag.__table_args__ = (Index('idx_tag_fts',
                                Tag.__ts_vector__,
                                postgresql_using='gin'), )

    User.__ts_vector__ = create_tsvector(
        cast(coalesce(User.email, ''), TEXT),
        cast(coalesce(User.firstName, ''), TEXT),
        cast(coalesce(User.lastName, ''), TEXT),
    )
    User.__table_args__ = (Index('idx_event_fts',
                                 User.__ts_vector__,
                                 postgresql_using='gin'), )

    Verdict.__ts_vector__ = create_tsvector(
        cast(coalesce(Verdict.comment, ''), TEXT), )
    Verdict.__table_args__ = (Index('idx_verdict_fts',
                                    Verdict.__ts_vector__,
                                    postgresql_using='gin'), )
コード例 #22
0
    def handle(self, *args, **options):
        # set up
        config = get_config()
        if config is None:
            raise CommandError('Unable to process configuration file p_to_p.yml')

        connection = get_connection(config)
        pedsnet_session = init_pedsnet(connection)
        init_pcornet(connection)
        init_vocab(connection)

        pedsnet_pcornet_valueset_map = aliased(ValueSetMap)

        # extract the data from the death table
        death_cause = pedsnet_session.query(DeathPedsnet.person_id,
                                            func.left(DeathPedsnet.cause_source_value, 8),
                                            coalesce(pedsnet_pcornet_valueset_map.target_concept, 'OT'),
                                            bindparam("death_cause_type", "NI"),
                                            bindparam("death_cause_source", "L"),
                                            bindparam("death_cause_confidence", None),
                                            min(DeathPedsnet.site)
                                            ) \
            .join(Demographic, Demographic.patid == cast(DeathPedsnet.person_id, String(256)), ) \
            .join(VocabularyConcept, VocabularyConcept.concept_id == DeathPedsnet.cause_concept_id) \
            .outerjoin(pedsnet_pcornet_valueset_map,
                       and_(pedsnet_pcornet_valueset_map.source_concept_class == 'death cause code',
                            cast(VocabularyConcept.vocabulary_id, String(200)) ==
                            pedsnet_pcornet_valueset_map.source_concept_id)) \
            .filter(and_(DeathPedsnet.cause_source_value != None,
                         DeathPedsnet.cause_source_concept_id != 44814650)) \
            .group_by(DeathPedsnet.person_id, func.left(DeathPedsnet.cause_source_value, 8),
                      coalesce(pedsnet_pcornet_valueset_map.target_concept, 'OT')) \
            .all()

        # transform data to pcornet names and types
        # load to demographic table
        odo(death_cause, DeathCause.__table__,
            dshape='var * {patid: string, death_cause: string, death_cause_code: string,'
                   'death_cause_type: string, death_cause_source:string, '
                   'death_cause_confidence: string, site: string}'
            )

        # close session
        pedsnet_session.close()

        # output result
        self.stdout.ending = ''
        print('Death Cause ETL completed successfully', end='', file=self.stdout)
コード例 #23
0
    def __init__(self, b, table_exists=True):
        """table_exists flag is used when we do not need to ask table as it is not created yet"""
        self.address = b.address #will be needed for form generating
        self.tablename = b.tablename
        self.name = b.name
        self.fullname = b.fullname
        self.description = b.description
        self.pictures = b.pictures
        self.delete_posts = bool(b.bool_settings & 1)
        self.delete_threads = bool(b.bool_settings & 2) #will be redone to get values from sql
        self.bumplimit = b.bumplimit
        self.maxthreads = b.maxthreads
        self.post_form_type = 'lxml' #or html
        self.post_form = self._lxml_form_generator() #will be added the form generating, or reading from file

        self.generate_sql_class()
        
        self.threads = array.array('L') #we do this because we need a list of integers, not ordered tuples
        if table_exists:
            #here we need to get threads ordered by last post time and depending on bumplimit
            #a_alias = self.post_class
            subq2 = sess.query(coalesce(self.post_class.op_post, self.post_class.id).label('coalesce'), self.post_class.id).filter().subquery()
            subq = sess.query(self.post_class.id, subq2.c.id.label('threadid'), sqla.func.count(subq2.c.coalesce).label('posts_before')).outerjoin(subq2, subq2.c.coalesce == coalesce(self.post_class.op_post, self.post_class.id)).filter(subq2.c.id <= self.post_class.id).having(sqla.func.count(subq2.c.coalesce) <= self.bumplimit+1).group_by(self.post_class.id, subq2.c.coalesce).order_by(self.post_class.id.desc()).subquery()        
            threads_tuples = sess.query(subq.c.threadid).filter().distinct().all() #getting threads list
            for thread in threads_tuples:
                self.threads.append(thread[0])
            #self.threads.reverse()
            print(self.threads)
        self.posts_dict = {}
        for thread in self.threads:
            self.posts_dict[thread] = array.array('L') #array.array is faster
        if table_exists:
            posts = sess.query(self.post_class.id, self.post_class.op_post).filter(self.post_class.op_post != None).all()
            for each_post in posts:
                self.posts_dict[each_post.op_post].append(each_post.id)
コード例 #24
0
ファイル: format.py プロジェクト: dohoit2016/specify7
    def aggregate(self, query, field, rel_table, aggregator_name):
        logger.info('aggregating field %s on %s using %s', field, rel_table,
                    aggregator_name)
        specify_model = datamodel.get_table(field.relatedModelName,
                                            strict=True)
        aggregatorNode = self.getAggregatorDef(specify_model, aggregator_name)
        if aggregatorNode is None:
            logger.warn("aggregator is not defined")
            return literal("<Aggregator not defined.>")
        logger.debug("using aggregator: %s",
                     ElementTree.tostring(aggregatorNode))
        formatter_name = aggregatorNode.attrib.get('format', None)
        separator = aggregatorNode.attrib.get('separator', None)
        order_by = aggregatorNode.attrib.get('orderfieldname', None)

        orm_table = getattr(models, field.relatedModelName)
        if order_by is not None and order_by != '':
            order_by = getattr(orm_table, order_by)

        join_column = list(
            inspect(getattr(orm_table,
                            field.otherSideName)).property.local_columns)[0]
        subquery = orm.Query([]).select_from(orm_table) \
                             .filter(join_column == getattr(rel_table, rel_table._id)) \
                             .correlate(rel_table)
        subquery, formatted = self.objformat(subquery, orm_table,
                                             formatter_name, {})
        aggregated = coalesce(group_concat(formatted, separator, order_by), '')
        return subquery.add_column(aggregated).as_scalar()
コード例 #25
0
ファイル: report.py プロジェクト: coecms/dusqlite
def report_root_ids(connection, root_ids):
    rep = []

    subq = sa.alias(find_children(root_ids))
    q = (sa.select([
        model.paths.c.uid.label('uid'),
        model.paths.c.gid.label('gid'),
        safunc.count().label('inodes'),
        safunc.coalesce(safunc.sum(model.paths.c.size), 0).label('size'),
        safunc.min(model.paths.c.last_seen).label('last seen'),
    ]).select_from(
        model.paths.join(subq, subq.c.id == model.paths.c.id)).group_by(
            model.paths.c.uid, model.paths.c.gid).order_by(sa.desc('size')))

    for u in connection.execute(q):
        u = dict(u)
        u['user'] = pwd.getpwuid(u['uid']).pw_name
        u['cn'] = pwd.getpwuid(u['uid']).pw_gecos
        u['group'] = grp.getgrgid(u['gid']).gr_name
        if u['last seen'] is not None:
            u['last seen'] = datetime.fromtimestamp(u['last seen'])

        rep.append(u)

    return rep
コード例 #26
0
def get_top_trends_for_location(a_woeid):
    # Query to obtain all trends in the 'trends' table
    # REVISED FOR GeoTweet+: Needs to account for retention of trends over time

    # Create a subquery to find the most recent "updated_at" record per woeid
    trend_subq = db.session.query(Trend.woeid, func.max(Trend.updated_at).label("max_updated_at")) \
                                .group_by(Trend.woeid).subquery()

    results = db.session.query(Trend) \
                            .filter( and_( \
                                Trend.woeid == a_woeid, \
                                Trend.woeid == trend_subq.c.woeid, \
                                Trend.updated_at == trend_subq.c.max_updated_at \
                            )).order_by( coalesce(Trend.twitter_tweet_volume, -9999).desc() ).limit(10).all()

    trend_list = []
    for r in results:
        trend_info = {
            'updated_at': r.updated_at,
            'woeid': r.woeid,
            'twitter_as_of': r.twitter_as_of,
            'twitter_created_at': r.twitter_created_at,
            'twitter_name': r.twitter_name,
            'twitter_tweet_name': r.twitter_tweet_name,
            'twitter_tweet_promoted_content': r.twitter_tweet_promoted_content,
            'twitter_tweet_query': r.twitter_tweet_query,
            'twitter_tweet_url': r.twitter_tweet_url,
            'twitter_tweet_volume': r.twitter_tweet_volume
        }

        trend_list.append(trend_info)

    return jsonify(trend_list)
コード例 #27
0
ファイル: groups.py プロジェクト: SevenLines/university-map
    def get_data(self):
        groups = Raspis.query \
            .filter(Kontgrp.kont_id == request.args.get('kont_id')) \
            .filter((Raspis.day - 1) % 7 + 1 == request.args.get('day')) \
            .outerjoin(Auditory, Auditory.id == Raspis.aud_id) \
            .outerjoin(Raspnagr, Raspnagr.id == Raspis.raspnagr_id) \
            .outerjoin(Kontgrp, Kontgrp.id == Raspnagr.kontgrp_id) \
            .outerjoin(Kontkurs, Kontkurs.id == Raspnagr.kontkurs_id) \
            .outerjoin(Potoklist, Potoklist.op == Raspnagr.op) \
            .with_entities(
            Kontgrp.kont_id,
            Raspis.day,
            Raspis.para,
            Auditory.id.label("auditory_id"),
            func.rtrim(Auditory.title).label("auditory"),
            func.rtrim(coalesce(Potoklist.title, Kontgrp.title, Kontkurs.title)).label("group")
        ) \
            .order_by(Raspis.para)

        result = [{
            'kont_id': t.kont_id,
            'day': t.day,
            'para': t.para,
            'auditory_id': t.auditory_id,
            'auditory': t.auditory,
            'group': t.group.strip()
        } for t in groups]

        return result
コード例 #28
0
def order_by_name(query, table, language=None, *extra_languages, **kwargs):
    """Order a query by name.

    query: The query to order
    table: Table of the named objects
    language: The language to order names by. If None, use the
        connection default.
    extra_languages: Extra languages to order by, should the translations for
        `language` be incomplete (or ambiguous).

    name_attribute (keyword argument): the attribute to use; defaults to 'name'

    Uses the identifier as a fallback ordering.
    """
    name_attribute = kwargs.pop('name', 'name')
    if kwargs:
        raise ValueError('Unexpected keyword arguments: %s' % kwargs.keys())
    order_columns = []
    if language is None:
        query = query.outerjoin(table.names_local)
        order_columns.append(func.lower(getattr(table.names_table, name_attribute)))
    else:
        extra_languages = (language, ) + extra_languages
    for language in extra_languages:
        names_table = aliased(table.names_table)
        query = query.outerjoin(names_table)
        query = query.filter(names_table.foreign_id == table.id)
        query = query.filter(names_table.local_language_id == language.id)
        order_columns.append(func.lower(getattr(names_table, name_attribute)))
    order_columns.append(table.identifier)
    query = query.order_by(coalesce(*order_columns))
    return query
コード例 #29
0
ファイル: util.py プロジェクト: aidan-fitz/pokedex3
def order_by_name(query, table, language=None, *extra_languages, **kwargs):
    """Order a query by name.

    query: The query to order
    table: Table of the named objects
    language: The language to order names by. If None, use the
        connection default.
    extra_languages: Extra languages to order by, should the translations for
        `language` be incomplete (or ambiguous).

    name_attribute (keyword argument): the attribute to use; defaults to 'name'

    Uses the identifier as a fallback ordering.
    """
    name_attribute = kwargs.pop('name', 'name')
    if kwargs:
        raise ValueError('Unexpected keyword arguments: %s' % list(kwargs.keys()))
    order_columns = []
    if language is None:
        query = query.outerjoin(table.names_local)
        order_columns.append(func.lower(getattr(table.names_table, name_attribute)))
    else:
        extra_languages = (language, ) + extra_languages
    for language in extra_languages:
        names_table = aliased(table.names_table)
        query = query.outerjoin(names_table)
        query = query.filter(names_table.foreign_id == table.id)
        query = query.filter(names_table.local_language_id == language.id)
        order_columns.append(func.lower(getattr(names_table, name_attribute)))
    order_columns.append(table.identifier)
    query = query.order_by(coalesce(*order_columns))
    return query
コード例 #30
0
ファイル: admin.py プロジェクト: molecul/qa-training-frontend
def admin_stats():
    users_registered = db.session.query(db.func.count(Users.id)).first()[0]
    wrong_count = db.session.query(db.func.count(WrongKeys.id)).first()[0]
    solve_count = db.session.query(db.func.count(Solves.id)).first()[0]
    challenge_count = db.session.query(db.func.count(Challenges.id)).first()[0]

    solves_raw = db.func.count(Solves.chalid).label('solves_raw')
    solves_sub = db.session.query(Solves.chalid, solves_raw) \
        .group_by(Solves.chalid).subquery()
    solves_cnt = coalesce(solves_sub.columns.solves_raw, 0).label('solves_cnt')
    most_solved_chal = Challenges.query.add_columns(solves_cnt) \
        .outerjoin(solves_sub, solves_sub.columns.chalid == Challenges.id) \
        .order_by(solves_cnt.desc()).first()
    least_solved_chal = Challenges.query.add_columns(solves_cnt) \
        .outerjoin(solves_sub, solves_sub.columns.chalid == Challenges.id) \
        .order_by(solves_cnt.asc()).first()

    db.session.expunge_all()
    db.session.commit()
    db.session.close()

    return render_template('admin/statistics.html',
                           user_count=users_registered,
                           wrong_count=wrong_count,
                           solve_count=solve_count,
                           challenge_count=challenge_count,
                           most_solved=most_solved_chal,
                           least_solved=least_solved_chal)
コード例 #31
0
    def add_calculation(self, proposal_id, calculator_id, mode, version,
                        input_, output, calc_version, title):
        with self._transaction() as conn:
            calculation_alias = calculation.alias()

            result = conn.execute(calculation.insert().values({
                calculation.c.proposal_id:
                proposal_id,
                calculation.c.sort_order:
                select([
                    coalesce(max_(calculation_alias.c.sort_order), 0) + 1
                ]).where(calculation_alias.c.proposal_id == proposal_id),
                calculation.c.calculator_id:
                calculator_id,
                calculation.c.mode:
                mode,
                calculation.c.version:
                version,
                calculation.c.input:
                input_,
                calculation.c.output:
                output,
                calculation.c.date_run:
                datetime.utcnow(),
                calculation.c.calc_version:
                calc_version,
                calculation.c.title:
                title,
            }))

            return result.inserted_primary_key[0]
コード例 #32
0
def get_permission_options():
    perm_cls = flask.current_app.auth_manager.entity_registry.permission_cls
    query = perm_cls.query.with_entities(
        perm_cls.id,
        coalesce(perm_cls.description,
                 perm_cls.token).label('desc')).order_by('desc').all()
    return [(str(perm.id), perm.desc) for perm in query]
コード例 #33
0
ファイル: admin.py プロジェクト: molecul/qa-training-frontend
def admin_stats():
    users_registered = db.session.query(db.func.count(Users.id)).first()[0]
    wrong_count = db.session.query(db.func.count(WrongKeys.id)).first()[0]
    solve_count = db.session.query(db.func.count(Solves.id)).first()[0]
    challenge_count = db.session.query(db.func.count(Challenges.id)).first()[0]
    
    solves_raw = db.func.count(Solves.chalid).label('solves_raw')
    solves_sub = db.session.query(Solves.chalid, solves_raw) \
        .group_by(Solves.chalid).subquery()
    solves_cnt = coalesce(solves_sub.columns.solves_raw, 0).label('solves_cnt')
    most_solved_chal = Challenges.query.add_columns(solves_cnt) \
        .outerjoin(solves_sub, solves_sub.columns.chalid == Challenges.id) \
        .order_by(solves_cnt.desc()).first()
    least_solved_chal = Challenges.query.add_columns(solves_cnt) \
        .outerjoin(solves_sub, solves_sub.columns.chalid == Challenges.id) \
        .order_by(solves_cnt.asc()).first()
        
    db.session.expunge_all()
    db.session.commit()
    db.session.close()

    return render_template('admin/statistics.html', user_count=users_registered,
        wrong_count=wrong_count,
        solve_count=solve_count,
        challenge_count=challenge_count,
        most_solved=most_solved_chal,
        least_solved=least_solved_chal
        )
コード例 #34
0
    def test_filter_by_group_and_day(self):
        """
            SELECT para, a.obozn as aud,coalesce(pl.konts, kg.obozn, kk.obozn) as kont, a.id_60
            FROM raspis r
              LEFT JOIN auditories a ON r.aud = a.id_60
              LEFT JOIN raspnagr rn ON rn.id_51 = r.raspnagr
              LEFT JOIN kontgrp kg ON kg.id_7 = rn.kontid
              LEFT JOIN kontkurs kk ON kk.id_1 = rn.kont
              LEFT JOIN potoklist pl ON pl.op = rn.op
              WHERE (day-1)%7+1 = 5 AND kg.kont =22979
            ORDER BY para
            """
        schedule = Raspis.query \
            .filter(Kontgrp.kont_id == 22979) \
            .filter((Raspis.day - 1) % 7 + 1 == 1) \
            .filter(
            (Raspis.para == 5) | (Raspis.para == 4)) \
            .outerjoin(Auditory, Auditory.id == Raspis.aud_id) \
            .outerjoin(Raspnagr, Raspnagr.id == Raspis.raspnagr_id) \
            .outerjoin(Kontgrp, Kontgrp.id == Raspnagr.kontgrp_id) \
            .outerjoin(Kontkurs, Kontkurs.id == Raspnagr.kontkurs_id) \
            .outerjoin(Potoklist, Potoklist.op == Raspnagr.op) \
            .with_entities(
            Raspis.para,
            Auditory.id.label("auditory_id"),
            func.rtrim(Auditory.title).label("auditory"),
            func.rtrim(coalesce(Potoklist.title, Kontgrp.title, Kontkurs.title)).label("group")
        ) \
            .order_by(Raspis.para)
        print(schedule)

        for item in schedule:
            print(
                f"Пара: {item.para}  Аудитория: {item.auditory}  Аудитория_id: {item.auditory_id} Группа: {item.group} "
            )
コード例 #35
0
ファイル: analysis.py プロジェクト: coredamage/DRAT
    def fetch_modified_rpm_details(self) -> ResultProxy:

        rd: RpmDetail = aliased(RpmDetail)
        fd: FileDetail = aliased(FileDetail)
        rdl: RpmFileDetailLink = aliased(RpmFileDetailLink)
        s: System = aliased(System)

        query = self._session.query(rd).join(
            s,
            (s.system_id == rd.system_id),
        ).join(
            rdl,
            (rdl.rpm_detail_id == rd.rpm_detail_id),
        ).outerjoin(
            fd, (fd.file_type == "F") &
            (rdl.file_detail_id == fd.file_detail_id) & (rd.digest == case(
                {
                    32: fd.md5_digest,
                    64: fd.sha256_digest,
                },
                value=func.length(coalesce(rd.digest, "")),
                else_=None,
            ))).filter((s.system_id == self.system.system_id)
                       & (fd.file_detail_id == None)
                       & (func.coalesce(rd.file_info, "") != "directory")
                       & (~func.coalesce(rd.file_info, "").startswith(
                           "symbolic link"))).distinct()

        result: ResultProxy = query.all()

        return result
コード例 #36
0
ファイル: student.py プロジェクト: zhmkof/Dryvo
 def total_lessons_price(cls):
     q = (select([coalesce(func.sum(Appointment.price), 0)]).where(
         Appointment.approved_lessons_filter(
             Appointment.date < datetime.utcnow(),
             Appointment.student_id == cls.id,
         )).label("total_lessons_price"))
     return q + cls.number_of_old_lessons * cls.price
コード例 #37
0
ファイル: scheduler.py プロジェクト: jplesnik/koschei
 def get_dependency_priority_query(self):
     update_weight = self.priority_conf['package_update']
     distance = coalesce(DependencyChange.distance, 8)
     return self.db.query(DependencyChange.package_id.label('pkg_id'),
                          (update_weight / distance)
                          .label('priority'))\
                   .filter_by(applied_in_id=None)
コード例 #38
0
    def objformat(self, query, orm_table, formatter_name, join_cache=None):
        logger.info('formatting %s using %s', orm_table, formatter_name)
        specify_model = datamodel.get_table(inspect(orm_table).class_.__name__, strict=True)
        formatterNode = self.getFormatterDef(specify_model, formatter_name)
        if formatterNode is None:
            logger.warn("no dataobjformatter for %s", specify_model)
            return query, literal("<Formatter not defined.>")
        logger.debug("using dataobjformatter: %s", ElementTree.tostring(formatterNode))

        def case_value_convert(value): return value

        switchNode = formatterNode.find('switch')
        single = switchNode.attrib.get('single', 'true') == 'true'
        if not single:
            sp_control_field = specify_model.get_field(switchNode.attrib['field'])
            if sp_control_field.type == 'java.lang.Boolean':
                def case_value_convert(value): return value == 'true'

        def make_expr(query, fieldNode):
            path = fieldNode.text.split('.')
            query, table, model, specify_field = build_join(query, specify_model, orm_table, path, join_cache)
            if specify_field.is_relationship:
                formatter_name = fieldNode.attrib.get('formatter', None)
                query, expr = self.objformat(query, table, formatter_name, join_cache)
            else:
                expr = self._fieldformat(specify_field, getattr(table, specify_field.name))

            if 'format' in fieldNode.attrib:
                expr = self.pseudo_sprintf(fieldNode.attrib['format'], expr)

            if 'sep' in fieldNode.attrib:
                expr = concat(fieldNode.attrib['sep'], expr)

            return query, coalesce(expr, '')

        def make_case(query, caseNode):
            field_exprs = []
            for node in caseNode.findall('field'):
                query, expr = make_expr(query, node)
                field_exprs.append(expr)

            expr = concat(*field_exprs) if len(field_exprs) > 1 else field_exprs[0]
            return query, case_value_convert(caseNode.attrib.get('value', None)), expr

        cases = []
        for caseNode in switchNode.findall('fields'):
            query, value, expr = make_case(query, caseNode)
            cases.append((value, expr))

        if single:
            value, expr = cases[0]
        else:
            control_field = getattr(orm_table, switchNode.attrib['field'])
            expr = case(cases, control_field)

        return query, coalesce(expr, '')
コード例 #39
0
    def handle(self, *args, **options):
        # set up
        config = get_config()
        if config is None:
            raise CommandError('Unable to process configuration file p_to_p.yml')

        connection = get_connection(config)
        pedsnet_session = init_pedsnet(connection)
        init_pcornet(connection)

        pedsnet_pcornet_valueset_map = aliased(ValueSetMap)

        # extract the data from the death table
        death_pedsnet = pedsnet_session.query(DeathPedsnet.death_date,
                                              coalesce(pedsnet_pcornet_valueset_map.target_concept, 'OT'),
                                              bindparam("death_match_confidence", None),
                                              bindparam("death_source", "L"),
                                              DeathPedsnet.person_id,
                                              min(DeathPedsnet.site)
                                              ). \
            outerjoin(pedsnet_pcornet_valueset_map,
                      and_(pedsnet_pcornet_valueset_map.source_concept_class == 'Death date impute',
                           cast(DeathPedsnet.death_impute_concept_id, String(200)) ==
                           pedsnet_pcornet_valueset_map.source_concept_id)) \
            .filter(and_(exists().where(DeathPedsnet.person_id == PersonVisit.person_id),
                         DeathPedsnet.death_type_concept_id == 38003569)) \
            .group_by(DeathPedsnet.person_id, DeathPedsnet.death_date,
                      coalesce(pedsnet_pcornet_valueset_map.target_concept, 'OT')) \
            .all()

        # transform data to pcornet names and types
        # load to demographic table
        odo(death_pedsnet, DeathPcornet.__table__,
            dshape='var * {death_date: date, death_date_impute: string, death_match_confidence: string,'
                   'death_source: string, patid:string, site: string}'
            )

        # close session
        pedsnet_session.close()

        # output result
        self.stdout.ending = ''
        print('Death ETL completed successfully', end='', file=self.stdout)
コード例 #40
0
ファイル: album.py プロジェクト: ErinCall/catsnap
    def images_for_album_id(cls, album_id):
        from catsnap.table.image import Image

        session = Client().session()
        return (
            session.query(Image)
            .filter(Image.album_id == album_id)
            .order_by(coalesce(Image.photographed_at, Image.created_at))
            .all()
        )
コード例 #41
0
ファイル: scheduler.py プロジェクト: W3SS/koschei
 def get_dependency_priority_query(self):
     update_weight = get_config('priorities.package_update')
     # pylint: disable=E1120
     distance = coalesce(UnappliedChange.distance, 8)
     # inner join with package last build to get rid of outdated dependency changes
     return self.db.query(UnappliedChange.package_id.label('pkg_id'),
                          (update_weight / distance)
                          .label('priority'))\
                   .join(Package,
                         Package.last_build_id == UnappliedChange.prev_build_id)
コード例 #42
0
ファイル: model.py プロジェクト: jessedhillon/roxy
    def order_by(self, asc=None, desc=None):
        if asc:
            if not isinstance(asc, list):
                asc = [asc]
        else:
            asc = []

        if desc:
            if not isinstance(desc, list):
                desc = [desc]
        else:
            desc = []

        sorts = []
        for c in asc + desc:
            if self.model.__mapper__.has_property(c):
                column = getattr(self.model, c)
                sorts.append(column)
            else:
                aliased_assoc = aliased(self.assoc_table)
                aliased_property = aliased(Property, name=c)
                fk_name = '_'.join(self.assoc_table.name.split('_')[:-1] + ['key'])
                fk = getattr(aliased_assoc.c, fk_name)
                self.query = self.query.\
                        outerjoin(aliased_assoc,
                                  fk == self.model.key).\
                        outerjoin(aliased_property,
                                  and_(aliased_assoc.c.property_key == Property.key,
                                       Property.name == c))

                sorts.append((c, aliased_property))

        for s in sorts:
            if isinstance(s, tuple):
                c, alias = s
                if c in asc:
                    f = ascending
                if c in desc:
                    f = descending

                ordering = f(coalesce(
                                alias.bool_value,
                                alias.int_value,
                                alias.float_value,
                                alias.date_value,
                                alias.datetime_value,
                                alias.str_value))
                self.query = self.query.order_by(ordering)
            else:
                if s.key in asc:
                    self.query = self.query.order_by(s.asc())
                if s.key in desc:
                    self.query = self.query.order_by(s.desc())

        return self
コード例 #43
0
ファイル: utils.py プロジェクト: margus-parnsalu/Lifecycle
def sqla_dyn_filters(filter_dict, query_object, validation_class):
    """SqlAlchemy query object modification with dynamic filters"""
    for attr, value in filter_dict.items():
        if value == '':
            value = '%'
        try:
            query_object = (query_object.filter(coalesce(getattr(validation_class, attr), '').
                                                ilike(value)))
        except:
            pass#When model object does not have dictionary value do nothing
    return query_object
コード例 #44
0
ファイル: question.py プロジェクト: juniorsilver/dokomoforms
def get_free_sequence_number(connection: Connection, survey_id: str) -> int:
    """
    Return the highest existing sequence number + 1 (or 1 if there aren't
    any) associated with the given survey_id.

    :param connection: a SQLAlchemy Connection
    :param survey_id: the UUID of the survey
    :return: the free sequence number
    """
    sequence_number = question_table.c.sequence_number
    return connection.execute(select(
        [coalesce(sqlmax(sequence_number, type_=Integer), 0)])).scalar() + 1
コード例 #45
0
ファイル: format.py プロジェクト: Colombia1819/specify7
        def make_expr(query, fieldNode):
            path = fieldNode.text.split('.')
            query, table, model, specify_field = build_join(query, specify_model, orm_table, path, join_cache)
            if specify_field.is_relationship:
                formatter_name = fieldNode.attrib.get('formatter', None)
                query, expr = self.objformat(query, table, formatter_name, join_cache)
            else:
                expr = self._fieldformat(specify_field, getattr(table, specify_field.name))

            if 'sep' in fieldNode.attrib:
                expr = concat(fieldNode.attrib['sep'], expr)

            return query, coalesce(expr, '')
コード例 #46
0
ファイル: grid.py プロジェクト: RaHus/portal
def add_connection_info(results, by_user):
    if by_user.primary_type == 'customer':
        return results
    empl_ids = map(attrgetter('id'), chain.from_iterable(map(attrgetter('all_users'), results)))
    if empl_ids:
        last_conns = DBSession.query(Connection.user_id, Connection.accepted,
                                     Connection.accept_at, Connection.sent_at). \
            filter(Connection.by_user == by_user, Connection.user_id.in_(empl_ids)) \
            .order_by(coalesce(Connection.accept_at, Connection.sent_at)).all()
        conn_data_by_user_id = dict(zip(map(itemgetter(0), last_conns), last_conns))
        for comp in results:
            for employee in comp.all_users:
                setattr(employee, 'connection', conn_data_by_user_id.get(employee.id))
    return results
コード例 #47
0
ファイル: sample.py プロジェクト: helixyte/TheLMA
def create_mapper(sample_tbl, sample_molecule_tbl, molecule_tbl,
                  molecule_design_pool_tbl):
    "Mapper factory."
    s = sample_tbl
    sm = sample_molecule_tbl
    m = molecule_tbl
    mdp = molecule_design_pool_tbl
    s1 = sample_tbl.alias()
    # FIXME: The following construct introduces a dependency on string_agg
    #        in the SQL engine. Consider a materialized view instead.
    mds_sel = select(
        [mdp.c.molecule_design_set_id],
        mdp.c.member_hash ==
          select([func.md5(
                    string_agg(cast(m.c.molecule_design_id, String),
                               literal(';'),
                               order_by=m.c.molecule_design_id))
                  ],
                 from_obj=[s1.join(sm,
                                   and_(sm.c.sample_id == s1.c.sample_id,
                                        s1.c.sample_id == s.c.sample_id))
                           .join(m,
                                 m.c.molecule_id == sm.c.molecule_id)
                           ]) \
                .group_by(sm.c.sample_id))
    m = mapper(Sample, sample_tbl,
            id_attribute='sample_id',
            properties=dict(
                container=relationship(Container,
                                       uselist=False,
                                       back_populates='sample'),
                sample_molecules=
                        relationship(SampleMolecule,
                                     back_populates='sample',
                                     cascade='all,delete,delete-orphan',
                                     ),
                molecule_design_pool_id=
                    column_property(coalesce(mds_sel.as_scalar(), null()),
                                    deferred=True),
                ),
            polymorphic_on=sample_tbl.c.sample_type,
            polymorphic_identity=SAMPLE_TYPES.BASIC
            )
    # Listen to changes to the sample_type attribute.
    event.listen(Sample.sample_type, "set", check_set_sample_type) # pylint: disable=E1101
    return m
コード例 #48
0
ファイル: filter.py プロジェクト: paulfitz/catsql
 def _add_grep(self, table, query, sequence, case_sensitive):
     # functions.concat would be neater, but doesn't seem to translate
     # correctly on sqlite
     parts = ''
     for idx, column in enumerate(table.columns):
         if not self.ok_column(column.name):
             continue
         if parts != '':
             parts = parts + ' // '
         part = functions.coalesce(expression.cast(column,
                                     types.Unicode),
                                     '')
         parts = parts + part
     if case_sensitive:
         query = query.filter(parts.contains(sequence))
     else:
         query = query.filter(parts.ilike('%%' + sequence + '%%'))
     return query
コード例 #49
0
ファイル: format.py プロジェクト: Colombia1819/specify7
    def objformat(self, query, orm_table, formatter_name, join_cache=None):
        logger.info('formatting %s using %s', orm_table, formatter_name)
        specify_model = datamodel.get_table(inspect(orm_table).class_.__name__, strict=True)
        formatterNode = self.getFormatterDef(specify_model, formatter_name)
        logger.debug("using dataobjformater: %s", ElementTree.tostring(formatterNode))

        switchNode = formatterNode.find('switch')

        def make_expr(query, fieldNode):
            path = fieldNode.text.split('.')
            query, table, model, specify_field = build_join(query, specify_model, orm_table, path, join_cache)
            if specify_field.is_relationship:
                formatter_name = fieldNode.attrib.get('formatter', None)
                query, expr = self.objformat(query, table, formatter_name, join_cache)
            else:
                expr = self._fieldformat(specify_field, getattr(table, specify_field.name))

            if 'sep' in fieldNode.attrib:
                expr = concat(fieldNode.attrib['sep'], expr)

            return query, coalesce(expr, '')

        def make_case(query, caseNode):
            field_exprs = []
            for node in caseNode.findall('field'):
                query, expr = make_expr(query, node)
                field_exprs.append(expr)

            expr = concat(*field_exprs) if len(field_exprs) > 1 else field_exprs[0]
            return query, caseNode.attrib.get('value', None), expr

        cases = []
        for caseNode in switchNode.findall('fields'):
            query, value, expr = make_case(query, caseNode)
            cases.append((value, expr))

        if switchNode.attrib.get('single', 'true') == 'true':
            value, expr = cases[0]
        else:
            control_field = getattr(orm_table, switchNode.attrib['field'])
            expr = case(cases, control_field)

        return query, coalesce(expr, '')
コード例 #50
0
ファイル: calculator.py プロジェクト: eaobservatory/hedwig
    def add_calculation(self, proposal_id, calculator_id,
                        mode, version, input_, output, calc_version, title):
        with self._transaction() as conn:
            calculation_alias = calculation.alias()

            result = conn.execute(calculation.insert().values({
                calculation.c.proposal_id: proposal_id,
                calculation.c.sort_order: select(
                    [coalesce(max_(calculation_alias.c.sort_order), 0) + 1]
                    ).where(calculation_alias.c.proposal_id == proposal_id),
                calculation.c.calculator_id: calculator_id,
                calculation.c.mode: mode,
                calculation.c.version: version,
                calculation.c.input: input_,
                calculation.c.output: output,
                calculation.c.date_run: datetime.utcnow(),
                calculation.c.calc_version: calc_version,
                calculation.c.title: title,
            }))

            return result.inserted_primary_key[0]
コード例 #51
0
ファイル: xivo_db.py プロジェクト: Eyepea/xivo-confgen
    def all(self, *args, **kwargs):
        # get all supervised bsfilters
        (_u, _p, _e, _l) = [getattr(self.db, options)._table for options in
                ('userfeatures', 'phonefunckey', 'extenumbers', 'linefeatures')]

        _l2 = alias(_l)

        conds = [
            _l.c.iduserfeatures == _p.c.iduserfeatures,
            _u.c.id == _l.c.iduserfeatures,
            _p.c.typeextenumbers == 'extenfeatures',
            _p.c.typevalextenumbers == 'bsfilter',
            _p.c.typeextenumbersright == 'user',
            _p.c.supervision == 1,
            cast(_p.c.typeextenumbersright, VARCHAR(255)) == 	cast(_e.c.type, VARCHAR(255)), # 'user'
            _p.c.typevalextenumbersright == cast(_l2.c.iduserfeatures, VARCHAR(255)),
            _e.c.typeval == cast(_l2.c.id, VARCHAR(255)),
            coalesce(_l.c.number, '') != ''
        ]
        if 'context' in kwargs:
            conds.append(_l.c.context == kwargs['context'])
        q = select([_e.c.exten, _l.c.number, _u.c.bsfilter], 	and_(*conds))
        return self.execute(q).fetchall()
コード例 #52
0
ファイル: format.py プロジェクト: Colombia1819/specify7
    def aggregate(self, query, field, rel_table, aggregator_name):
        logger.info('aggregating field %s on %s using %s', field, rel_table, aggregator_name)
        specify_model = datamodel.get_table(field.relatedModelName, strict=True)
        aggregatorNode = self.getAggregatorDef(specify_model, aggregator_name)
        if aggregatorNode is None:
            logger.warn("aggregator is not defined")
            return literal("<Aggregator not defined.>")
        logger.debug("using aggregator: %s", ElementTree.tostring(aggregatorNode))
        formatter_name = aggregatorNode.attrib.get('format', None)
        separator = aggregatorNode.attrib.get('separator', None)
        order_by = aggregatorNode.attrib.get('orderfieldname', None)

        orm_table = getattr(models, field.relatedModelName)
        if order_by is not None and order_by != '':
            order_by = getattr(orm_table, order_by)

        join_column = list(inspect(getattr(orm_table, field.otherSideName)).property.local_columns)[0]
        subquery = orm.Query([]).select_from(orm_table) \
                             .filter(join_column == getattr(rel_table, rel_table._id)) \
                             .correlate(rel_table)
        subquery, formatted = self.objformat(subquery, orm_table, formatter_name, {})
        aggregated = coalesce(group_concat(formatted, separator, order_by), '')
        return subquery.add_column(aggregated).as_scalar()
コード例 #53
0
ファイル: stockinfo.py プロジェクト: helixyte/TheLMA
def create_view(metadata, molecule_design_pool_tbl, stock_sample_tbl, sample_tbl, container_tbl):
    """
    stock_info_view factory.
    """
    mdp = molecule_design_pool_tbl
    ss = stock_sample_tbl
    c = container_tbl
    s = sample_tbl
    stock = (
        select(
            [
                (
                    literal("mdp")
                    + cast(mdp.c.molecule_design_set_id, String)
                    + literal("c")
                    + cast(coalesce(ss.c.concentration * 1e6, 0), String)
                ).label("stock_info_id"),
                mdp.c.molecule_design_set_id,
                # We need to set the label explicitly here because
                # mdp.c.molecule_type_id is really mdp.c.molecule_type.
                mdp.c.molecule_type_id.label("molecule_type_id"),
                # pylint: disable=E1101
                coalesce(ss.c.concentration, 0).label("concentration"),
                coalesce(func.count(c.c.container_id), 0).label("total_tubes"),
                coalesce(func.sum(s.c.volume), 0).label("total_volume"),
                coalesce(func.min(s.c.volume), 0).label("minimum_volume"),
                coalesce(func.max(s.c.volume), 0).label("maximum_volume")
                # pylint: enable=E1101
            ],
            from_obj=mdp.outerjoin(ss, ss.c.molecule_design_set_id == mdp.c.molecule_design_set_id)
            .outerjoin(s, s.c.sample_id == ss.c.sample_id)
            .outerjoin(c, and_(c.c.container_id == s.c.container_id, c.c.item_status == _STOCK_CONTAINER_ITEM_STATUS)),
        )
        .group_by(mdp.c.molecule_design_set_id, ss.c.concentration)
        .alias("ssi")
    )
    fkey_mds = ForeignKey(mdp.c.molecule_design_set_id)
    fkey_mds.parent = stock.c.molecule_design_set_id
    stock.c.molecule_design_set_id.foreign_keys.add(fkey_mds)
    fkey_mt = ForeignKey(mdp.c.molecule_type_id)
    fkey_mt.parent = stock.c.molecule_type_id
    stock.c.molecule_type_id.foreign_keys.add(fkey_mt)
    return view_factory(VIEW_NAME, metadata, stock)
コード例 #54
0
ファイル: 08_Printer.py プロジェクト: mrasu/SqlPuzzles
from sqlalchemy.orm import Query
from sqlalchemy.sql.functions import coalesce
from util import Base, session


class PrinterControl(Base):
    __tablename__ = "printercontrol"

    user_id = Column(String(10))
    printer_name = Column(String(4), nullable=False, primary_key=True)
    printer_description = Column(String(40), nullable=False)

user_name = "leea"
user_printer = Query([PrinterControl]).filter(PrinterControl.user_id == user_name)
s = session.query(PrinterControl).filter(case(
    [(user_printer.exists(), PrinterControl.user_id == user_name)],
    else_=(PrinterControl.user_id == None))
)

[print(s.printer_name) for s in s]


# 集計関数がNULLを返すことを利用する
anonymous_printer = Query([func.min(PrinterControl.printer_name)])\
    .filter(PrinterControl.user_id == None).as_scalar()

s = session.query(coalesce(func.min(PrinterControl.printer_name), anonymous_printer))\
    .filter(PrinterControl.user_id == user_name)

[print(s) for s in s]
コード例 #55
0
ファイル: nlp.py プロジェクト: assembl/assembl
 def english_id_calc(self):
     return coalesce(self.english_id, self.id)
コード例 #56
0
ファイル: nigredo.py プロジェクト: RedGlow/supplycrate
 def __init__(self, session):
     # prepare aliases
     self._output_item_alias = aliased(models.Item, name="output_item")
     self._ingredient_1_item_alias = aliased(models.Item, name="ingredient_1_item")
     self._ingredient_1_vendor_data_alias = aliased(models.VendorData, name="ingredient_1_vendor_data")
     self._ingredient_2_item_alias = aliased(models.Item, name="ingredient_2_item")
     self._ingredient_2_vendor_data_alias = aliased(models.VendorData, name="ingredient_2_vendor_data")
     self._ingredient_3_item_alias = aliased(models.Item, name="ingredient_3_item")
     self._ingredient_3_vendor_data_alias = aliased(models.VendorData, name="ingredient_3_vendor_data")
     self._ingredient_4_item_alias = aliased(models.Item, name="ingredient_4_item")
     self._ingredient_4_vendor_data_alias = aliased(models.VendorData, name="ingredient_4_vendor_data")
     self._ingredient_item_aliases = [
         self._ingredient_1_item_alias,
         self._ingredient_2_item_alias,
         self._ingredient_3_item_alias,
         self._ingredient_4_item_alias,
     ]
     self._ingredient_vendor_data_aliases = [
         self._ingredient_1_vendor_data_alias,
         self._ingredient_2_vendor_data_alias,
         self._ingredient_3_vendor_data_alias,
         self._ingredient_4_vendor_data_alias,
     ]
     # produce the labeled columns
     sum_func = lambda t1, t2: t1 + t2
     skill_point_cost = self._fold(
         lambda i: functions.coalesce(self._ingredient_vendor_data_aliases[i].skill_point_cost, literal_column("0")),
         sum_func,
     ).label("skill_point_cost")
     ingredients_are_sold = and_(
         *self._map(
             lambda i: or_(
                 self._ingredient_vendor_data_aliases[i].skill_point_cost != None,
                 self._ingredient_vendor_data_aliases[i].copper_cost != None,
                 self._ingredient_item_aliases[i].sell_count > literal_column("0"),
             )
         )
     ).label("ingredients_are_sold")
     self.__ingredients_are_sold = ingredients_are_sold
     output_is_bought = (self._output_item_alias.buy_count > literal_column("0")).label("output_is_bought")
     self.__output_is_bought = output_is_bought
     cost_bo, cost_bi = self._buy_o(
         lambda buy, o: self._fold(
             lambda i: self._if(
                 self._ingredient_vendor_data_aliases[i].copper_cost == None,
                 self._get_price(self._ingredient_item_aliases[i], buy),
                 self._least(
                     self._ingredient_vendor_data_aliases[i].copper_cost,
                     self._get_price(self._ingredient_item_aliases[i], buy),
                 ),
             )
             * self._get_ingredient_count(i),
             sum_func,
         ).label("cost_b" + o)
     )
     cost_b = {"o": cost_bo, "i": cost_bi}
     profit_so, profit_si = self._buy_i(
         lambda buy, i: (
             self._get_price(self._output_item_alias, buy)
             * models.SkillPointRecipe.output_count
             * literal_column("85")
             / literal_column("100")
         ).label("profit_s" + i)
     )
     profit_s = {"o": profit_so, "i": profit_si}
     net_profit_bo_so_per_sp, net_profit_bo_si_per_sp, net_profit_bi_so_per_sp, net_profit_bi_si_per_sp = self._b_s(
         lambda b, s, buy, sell: (
             self._round(
                 (profit_s[s] - cost_b[b])
                 / self._fold(
                     lambda i: functions.coalesce(
                         self._ingredient_vendor_data_aliases[i].skill_point_cost, literal_column("0")
                     ),
                     sum_func,
                 )
             )
         ).label("net_profit_b" + b + "_s" + s + "_per_sp")
     )
     # produce the query
     queryset = (
         session.query(
             models.SkillPointRecipe,
             self._output_item_alias,
             self._ingredient_1_item_alias,
             self._ingredient_1_vendor_data_alias,
             self._ingredient_2_item_alias,
             self._ingredient_2_vendor_data_alias,
             self._ingredient_3_item_alias,
             self._ingredient_3_vendor_data_alias,
             self._ingredient_4_item_alias,
             self._ingredient_4_vendor_data_alias,
             skill_point_cost,
             ingredients_are_sold,
             output_is_bought,
             cost_bo,
             cost_bi,
             profit_so,
             profit_si,
             net_profit_bo_so_per_sp,
             net_profit_bo_si_per_sp,
             net_profit_bi_so_per_sp,
             net_profit_bi_si_per_sp,
         )
         .join(self._output_item_alias, models.SkillPointRecipe.output_item)
         .outerjoin(
             (self._ingredient_1_item_alias, models.SkillPointRecipe.ingredient_1_item),
             (
                 self._ingredient_1_vendor_data_alias,
                 self._ingredient_1_vendor_data_alias.item_id == self._ingredient_1_item_alias.data_id,
             ),
         )
         .outerjoin(
             (self._ingredient_2_item_alias, models.SkillPointRecipe.ingredient_2_item),
             (
                 self._ingredient_2_vendor_data_alias,
                 self._ingredient_2_vendor_data_alias.item_id == self._ingredient_2_item_alias.data_id,
             ),
         )
         .outerjoin(
             (self._ingredient_3_item_alias, models.SkillPointRecipe.ingredient_3_item),
             (
                 self._ingredient_3_vendor_data_alias,
                 self._ingredient_3_vendor_data_alias.item_id == self._ingredient_3_item_alias.data_id,
             ),
         )
         .outerjoin(
             (self._ingredient_4_item_alias, models.SkillPointRecipe.ingredient_4_item),
             (
                 self._ingredient_4_vendor_data_alias,
                 self._ingredient_4_vendor_data_alias.item_id == self._ingredient_4_item_alias.data_id,
             ),
         )
     )
     # create column definitions
     column_descriptions = [
         tmodels.ColumnDescription("Item", True, self._output_item_alias.name, name="item"),
         tmodels.ColumnDescription("Ingredients", True, None, name="ingredients"),
         tmodels.ColumnDescription("Skill point cost", True, skill_point_cost, name="skillpointcost"),
         tmodels.ColumnDescription("Gold cost", True, [[cost_bo, cost_bo], [cost_bi, cost_bi]], name="goldcost"),
         tmodels.ColumnDescription("Profit", True, [[profit_so, profit_si], [profit_so, profit_si]], name="profit"),
         tmodels.ColumnDescription(
             "Net profit per skill point",
             True,
             [
                 [net_profit_bo_so_per_sp, net_profit_bo_si_per_sp],
                 [net_profit_bi_so_per_sp, net_profit_bi_si_per_sp],
             ],
             name="netprofit",
         ),
     ]
     # call super constructor
     tmodels.Table.__init__(self, column_descriptions, queryset)
コード例 #57
0
ファイル: gooddeed.py プロジェクト: RedGlow/supplycrate
def _update_masterwork(session):
    # empty the rtlp table
    session.query(RTLPMemoryTable).delete()
    # fill the rtlp table with average prices per masterwork and rares
    select_query = session. \
        query(models.Item.rarity.label("rarity"),
              models.Item.weapon_type.label("weapon_type"),
              models.Item.level.label("level"),
              tmodels.Table._round(func.avg(models.Item.buy_price) * 0.85).label("price_i"),
              tmodels.Table._round(func.avg(models.Item.sell_price) * 0.85).label("price_o"),
              func.min(models.Item.last_tp_update).label("last_update")). \
        filter(models.Item.type == models.Item.WEAPON). \
        filter(or_(models.Item.rarity == models.Item.MASTERWORK, models.Item.rarity == models.Item.RARE)). \
        group_by(models.Item.rarity, models.Item.weapon_type, models.Item.level)
    insert_query = insert(RTLPMemoryTable).from_select(
        ["rarity", "weapon_type", "level", "price_i", "price_o", "last_update"],
        select_query
    )
    insert_query.execute()
    # clear the profit table
    session.query(Profit).delete()
    # fill the profit table with the average selling prices
    rtlp_m = aliased(models.RTLPMemoryTable, name="rtlp_m")
    rtlp_r = aliased(models.RTLPMemoryTable, name="rtlp_r")
    lvl = rtlp_m.level.label("level")
    select_query = session. \
        query(lvl,
              rtlp_m.weapon_type.label("weapon_type"),
              tmodels.Table._if(rtlp_r.price_o == None,
                                rtlp_m.price_o,
                                rtlp_m.price_o * 0.8 + rtlp_r.price_o * 0.2).label("avg_price_o"),
              tmodels.Table._if(rtlp_r.price_i == None,
                                rtlp_m.price_i,
                                rtlp_m.price_i * 0.8 + rtlp_r.price_i * 0.2).label("avg_price_i"),
              tmodels.Table._least(rtlp_r.last_update, rtlp_m.last_update).label("last_update")). \
        join(rtlp_r, and_(rtlp_m.weapon_type == rtlp_r.weapon_type, rtlp_m.level == rtlp_r.level)). \
        filter(rtlp_m.rarity == models.Item.MASTERWORK,
               rtlp_r.rarity == models.Item.RARE)
    insert_query = insert(Profit).from_select(
        ["level", "weapon_type", "avg_price_o", "avg_price_i", "last_update"],
        select_query
    )
    insert_query.execute()
    # create the rg_raw table
    session.query(RG_Raw).delete()
    # compute the average selling prices considering a uniform level-up probability between +5 and +12
    r = range(5, 13)
    p = {i: aliased(Profit, name="p_%d" % i) for i in r}
    avg_profit_so, avg_profit_si = tmodels.Table._buy_o(
        lambda l, s: (
            sum(coalesce(getattr(p[i], 'avg_price_'+s), 0) for i in r) /
            (8 - sum(tmodels.Table._if(getattr(p[i], "avg_price_" + s) == None, 1, 0) for i in r))
        ).label("avg_profit_s" + s)
    )
    last_update = tmodels.Table._least(*(
        coalesce(p[i].last_update, func.now())
        for i
        in r
    )).label("last_update")
    join_query = reduce(
        lambda prev, i: prev.outerjoin(p[i],
                                        and_(KarmaWeaponsData.weapon_type == p[i].weapon_type,
                                             KarmaWeaponsData.level + i == p[i].level)),
        r,
        session.query(KarmaWeaponsData.weapon_type,
                      KarmaWeaponsData.level,
                      KarmaWeaponsData.karma_cost,
                      avg_profit_so,
                      avg_profit_si,
                      last_update))
    condition1, condition2 = tmodels.Table._buy_o(
        lambda l, s: or_(*(getattr(p[i], "avg_price_" + s) != None for i in r))
    )
    filtered_query = join_query.filter(condition1).filter(condition2)
    insert_query = insert(RG_Raw).from_select(
        ['weapon_type', 'level', 'karma_cost', 'avg_profit_so', 'avg_profit_si', 'last_update'],
        filtered_query
    )
    insert_query.execute()
コード例 #58
0
ファイル: receiver.py プロジェクト: kerel-fs/ogn-python
def update_receivers():
    """Update the receiver table."""
    # get the timestamp of last update
    last_update_query = app.session.query(
        coalesce(func.max(Receiver.lastseen), "2015-01-01 00:00:00").label("last_entry")
    )
    last_update = last_update_query.one().last_entry

    # get last receiver beacons since last update
    last_receiver_beacon_sq = (
        app.session.query(ReceiverBeacon.name, func.max(ReceiverBeacon.timestamp).label("lastseen"))
        .filter(ReceiverBeacon.timestamp >= last_update)
        .group_by(ReceiverBeacon.name)
        .subquery()
    )

    receivers_to_update = (
        app.session.query(
            ReceiverBeacon.name,
            ReceiverBeacon.latitude,
            ReceiverBeacon.longitude,
            ReceiverBeacon.altitude,
            last_receiver_beacon_sq.columns.lastseen,
            ReceiverBeacon.version,
            ReceiverBeacon.platform,
        )
        .filter(
            and_(
                ReceiverBeacon.name == last_receiver_beacon_sq.columns.name,
                ReceiverBeacon.timestamp == last_receiver_beacon_sq.columns.lastseen,
            )
        )
        .subquery()
    )

    # set country code to None if lat or lon changed
    count = (
        app.session.query(Receiver)
        .filter(
            and_(
                Receiver.name == receivers_to_update.columns.name,
                or_(
                    Receiver.latitude != receivers_to_update.columns.latitude,
                    Receiver.longitude != receivers_to_update.columns.longitude,
                ),
            )
        )
        .update(
            {
                "latitude": receivers_to_update.columns.latitude,
                "longitude": receivers_to_update.columns.longitude,
                "country_code": null(),
            }
        )
    )

    logger.info("Count of receivers who changed lat or lon: {}".format(count))

    # update lastseen of known receivers
    count = (
        app.session.query(Receiver)
        .filter(Receiver.name == receivers_to_update.columns.name)
        .update(
            {
                "altitude": receivers_to_update.columns.altitude,
                "lastseen": receivers_to_update.columns.lastseen,
                "version": receivers_to_update.columns.version,
                "platform": receivers_to_update.columns.platform,
            }
        )
    )

    logger.info("Count of receivers who where updated: {}".format(count))

    # add new receivers
    empty_sq = (
        app.session.query(
            ReceiverBeacon.name,
            ReceiverBeacon.latitude,
            ReceiverBeacon.longitude,
            ReceiverBeacon.altitude,
            last_receiver_beacon_sq.columns.lastseen,
            ReceiverBeacon.version,
            ReceiverBeacon.platform,
        )
        .filter(
            and_(
                ReceiverBeacon.name == last_receiver_beacon_sq.columns.name,
                ReceiverBeacon.timestamp == last_receiver_beacon_sq.columns.lastseen,
            )
        )
        .outerjoin(Receiver, Receiver.name == ReceiverBeacon.name)
        .filter(Receiver.name == null())
        .order_by(ReceiverBeacon.name)
    )

    for receiver_beacon in empty_sq.all():
        receiver = Receiver()
        receiver.name = receiver_beacon.name
        receiver.latitude = receiver_beacon.latitude
        receiver.longitude = receiver_beacon.longitude
        receiver.altitude = receiver_beacon.altitude
        receiver.firstseen = None
        receiver.lastseen = receiver_beacon.lastseen
        receiver.version = receiver_beacon.version
        receiver.platform = receiver_beacon.platform

        app.session.add(receiver)
        logger.info("{} added".format(receiver.name))

    # update firstseen if None
    firstseen_null_query = (
        app.session.query(Receiver.name, func.min(ReceiverBeacon.timestamp).label("firstseen"))
        .filter(Receiver.firstseen == null())
        .join(ReceiverBeacon, Receiver.name == ReceiverBeacon.name)
        .group_by(Receiver.name)
        .subquery()
    )

    count = (
        app.session.query(Receiver)
        .filter(Receiver.name == firstseen_null_query.columns.name)
        .update({"firstseen": firstseen_null_query.columns.firstseen})
    )
    logger.info("Total: {} receivers added".format(count))

    # update country code if None
    unknown_country_query = app.session.query(Receiver).filter(Receiver.country_code == null()).order_by(Receiver.name)

    for receiver in unknown_country_query.all():
        receiver.country_code = get_country_code(receiver.latitude, receiver.longitude)
        if receiver.country_code is not None:
            logger.info("Updated country_code for {} to {}".format(receiver.name, receiver.country_code))

    app.session.commit()