예제 #1
0
def venues():
    # substitute NULLs in state and city with N/A - this will be a group
    city_venues = db.session.query(func.coalesce(Venue.city, 'N/A'),
                                   func.coalesce(Venue.state, 'N/A'),
                                   func.array_agg(Venue.id),
                                   func.array_agg(Venue.name)).group_by(
                                       Venue.city, Venue.state).all()
    data = []
    for venue_data_tuple in city_venues:
        city_venue_dict = {}
        city_venue_dict["city"] = venue_data_tuple[0]
        city_venue_dict["state"] = venue_data_tuple[1]
        city_venue_dict["venues"] = []
        for i in range(len(venue_data_tuple[2])):
            venue_dict = {}
            venue_dict["id"] = venue_data_tuple[2][i]
            venue_dict["name"] = venue_data_tuple[3][i]
            # count the number of shows linked to Venue that have start_time after now
            venue_dict["num_upcoming_shows"] = sum([
                x.start_time > datetime.now(pytz.utc)
                for x in Venue.query.get(venue_data_tuple[2][i]).venue_shows
            ])
            city_venue_dict["venues"].append(venue_dict)
        data.append(city_venue_dict)
    return render_template('pages/venues.html', areas=data)
예제 #2
0
 def query(self):
     pq = qualstat_getstatdata(column("eval_type") == "f")
     base = alias(pq)
     query = (select([
         func.array_agg(column("queryid")).label("queryids"),
         "qualid",
         cast(column("quals"), JSONB).label('quals'),
         "occurences",
         "execution_count",
         func.array_agg(column("query")).label("queries"),
         "avg_filter",
         "filter_ratio"
     ]).select_from(
         join(base, powa_databases,
              onclause=(
                  powa_databases.c.oid == literal_column("dbid"))))
         .where(powa_databases.c.datname == bindparam("database"))
         .where(column("avg_filter") > 1000)
         .where(column("filter_ratio") > 0.3)
         .group_by(column("qualid"), column("execution_count"),
                   column("occurences"),
                   cast(column("quals"), JSONB),
                  column("avg_filter"), column("filter_ratio"))
         .order_by(column("occurences").desc())
         .limit(200))
     return query
예제 #3
0
    def _build_subquery(self):
        if not self.kwargs.get("tad_set_uuid"):
            return None

        set_pk = TadSet.objects.get(sodar_uuid=self.kwargs["tad_set_uuid"]).pk

        if self.kwargs["database_select"] == "refseq":
            term_join_gene_id = GeneInterval.sa.gene_id == Hgnc.sa.entrez_id
        else:  # kwargs["database_select"] == "ensembl"
            term_join_gene_id = GeneInterval.sa.gene_id == Hgnc.sa.ensembl_gene_id

        query = (select([
            func.array_agg(
                GeneInterval.sa.gene_id).label("itv_shared_gene_ids"),
            func.array_agg(Hgnc.sa.symbol).label("itv_shared_gene_symbols"),
        ]).select_from(
            GeneInterval.sa.table.outerjoin(
                Hgnc.sa.table, term_join_gene_id).outerjoin(
                    TadInterval.sa.table,
                    and_(
                        TadInterval.sa.tad_set_id == set_pk,
                        overlaps(TadInterval, StructuralVariant),
                    ),
                )).where(
                    and_(
                        GeneInterval.sa.database ==
                        self.kwargs["database_select"],
                        overlaps(TadInterval, GeneInterval),
                    )).alias("subquery_genes_intervals_inner")
                 ).lateral("subquery_genes_intervals_outer")

        return query
def attribute_pictures(language_code='en'):
    """
    Retrieve attribute pictures in a given language. If no language code prameter supplied, the default language is English.
    
    :param language_code: language code, in lower case two letter format. Example: 'fr' for French
    :type language_code: string
    
    
    **Example requests**:
     
     .. sourcecode:: html
         
         # get attribute pictures in default language (English)
         GET {0}/api/v0.2/attributes/pictures
         
         # get attribute pictures in French
         GET {0}/api/v0.2/attributes/pictures/fr  
    """
    
    result = tree()
    
    picpath = conf.ATTRIBUTES_PICTURES_SITE_PATH
   
    query = db.session.query(Attribute.name, Category.name.label("category_name"), 
                            func.array_agg(picpath + '/' + language_code + '/' + AttributePicture.file_name).label("file_names"))\
    		.join(AttributePicture, Attribute.id==AttributePicture.attribute_id)\
    		.join(Category, Category.id==Attribute.category_id)\
    		.filter(AttributePicture.language_code==language_code)\
    		.group_by(Category.name, Attribute.name)
   
    for a in query:
    	result[a.category_name][a.name] = a.file_names
    
    return jsonify(result)
예제 #5
0
def sql_jsonb_agg(table: Table) -> Any:
    """Equivalent to `func.JSONB_AGG` but removes empty elements from the output"""
    return func.to_jsonb(
        func.array_remove(
            func.array_agg(table.table_valued().distinct()),
            None,
        )).label(table.name)
예제 #6
0
def query_parts():
    """Query Product objects."""
    data = request.get_json().get("query")

    try:
        return_data = QuerySchema(["search"]).load(data)
        # Return silent errors
        if len(return_data.errors) > 0:
            raise ValidationError(next(iter(return_data.errors.values()))[0])
        data = return_data.data
    except ValidationError as e:
        return e.messages[0], 400

    search = data["search"]
    sort_by = data["sort_by"]
    limit = data["limit"]
    offset = data["offset"]

    query = db.session.query(
        Product.id,
        Product.name,
        Product.supplier_id,
        Supplier.name.label("supplier_name"),
        Product.units_in_stock,
        Product.unit_price,
        Product.part_number,
        Product.image_name,
        Product.image,
        func.array_agg(ProductCategory.category_id).label("product_category_list")
    ).join(
        Supplier,
        Supplier.id == Product.supplier_id,
        isouter=True
    ).join(
        ProductCategory,
        ProductCategory.product_id == Product.id,
        isouter=True
    ).filter(
        or_(
            Product.name.ilike(search + "%"),
            Product.name.ilike("% " + search + "%")
        )
    ).group_by(
        Product.id,
        Supplier.name
    )

    if sort_by == "id":
        query = query.order_by(Product.id)
    elif sort_by == "alpha":
        query = query.order_by(Product.name)

    query = query.offset(offset * limit).limit(limit + 1)

    query = query.all()

    query = process_result(query, ["name"])

    return jsonify(result=populate_return(query, limit, offset))
예제 #7
0
    def test_cloned_alias(self):
        entity = table(
            "entity", column("id"), column("employer_id"), column("name")
        )
        tag = table("tag", column("tag"), column("entity_id"))

        tags = (
            select([tag.c.entity_id, func.array_agg(tag.c.tag).label("tags")])
            .group_by(tag.c.entity_id)
            .cte("unaliased_tags")
        )

        entity_tags = tags.alias(name="entity_tags")
        employer_tags = tags.alias(name="employer_tags")

        q = (
            select([entity.c.name])
            .select_from(
                entity.outerjoin(
                    entity_tags, tags.c.entity_id == entity.c.id
                ).outerjoin(
                    employer_tags, tags.c.entity_id == entity.c.employer_id
                )
            )
            .where(entity_tags.c.tags.op("@>")(bindparam("tags")))
            .where(employer_tags.c.tags.op("@>")(bindparam("tags")))
        )

        self.assert_compile(
            q,
            "WITH unaliased_tags AS "
            "(SELECT tag.entity_id AS entity_id, array_agg(tag.tag) AS tags "
            "FROM tag GROUP BY tag.entity_id)"
            " SELECT entity.name "
            "FROM entity "
            "LEFT OUTER JOIN unaliased_tags AS entity_tags ON "
            "unaliased_tags.entity_id = entity.id "
            "LEFT OUTER JOIN unaliased_tags AS employer_tags ON "
            "unaliased_tags.entity_id = entity.employer_id "
            "WHERE (entity_tags.tags @> :tags) AND "
            "(employer_tags.tags @> :tags)",
        )

        cloned = q.params(tags=["tag1", "tag2"])
        self.assert_compile(
            cloned,
            "WITH unaliased_tags AS "
            "(SELECT tag.entity_id AS entity_id, array_agg(tag.tag) AS tags "
            "FROM tag GROUP BY tag.entity_id)"
            " SELECT entity.name "
            "FROM entity "
            "LEFT OUTER JOIN unaliased_tags AS entity_tags ON "
            "unaliased_tags.entity_id = entity.id "
            "LEFT OUTER JOIN unaliased_tags AS employer_tags ON "
            "unaliased_tags.entity_id = entity.employer_id "
            "WHERE (entity_tags.tags @> :tags) AND "
            "(employer_tags.tags @> :tags)",
        )
예제 #8
0
    def test_cloned_alias(self):
        entity = table(
            "entity", column("id"), column("employer_id"), column("name")
        )
        tag = table("tag", column("tag"), column("entity_id"))

        tags = (
            select([tag.c.entity_id, func.array_agg(tag.c.tag).label("tags")])
            .group_by(tag.c.entity_id)
            .cte("unaliased_tags")
        )

        entity_tags = tags.alias(name="entity_tags")
        employer_tags = tags.alias(name="employer_tags")

        q = (
            select([entity.c.name])
            .select_from(
                entity.outerjoin(
                    entity_tags, tags.c.entity_id == entity.c.id
                ).outerjoin(
                    employer_tags, tags.c.entity_id == entity.c.employer_id
                )
            )
            .where(entity_tags.c.tags.op("@>")(bindparam("tags")))
            .where(employer_tags.c.tags.op("@>")(bindparam("tags")))
        )

        self.assert_compile(
            q,
            "WITH unaliased_tags AS "
            "(SELECT tag.entity_id AS entity_id, array_agg(tag.tag) AS tags "
            "FROM tag GROUP BY tag.entity_id)"
            " SELECT entity.name "
            "FROM entity "
            "LEFT OUTER JOIN unaliased_tags AS entity_tags ON "
            "unaliased_tags.entity_id = entity.id "
            "LEFT OUTER JOIN unaliased_tags AS employer_tags ON "
            "unaliased_tags.entity_id = entity.employer_id "
            "WHERE (entity_tags.tags @> :tags) AND "
            "(employer_tags.tags @> :tags)",
        )

        cloned = q.params(tags=["tag1", "tag2"])
        self.assert_compile(
            cloned,
            "WITH unaliased_tags AS "
            "(SELECT tag.entity_id AS entity_id, array_agg(tag.tag) AS tags "
            "FROM tag GROUP BY tag.entity_id)"
            " SELECT entity.name "
            "FROM entity "
            "LEFT OUTER JOIN unaliased_tags AS entity_tags ON "
            "unaliased_tags.entity_id = entity.id "
            "LEFT OUTER JOIN unaliased_tags AS employer_tags ON "
            "unaliased_tags.entity_id = entity.employer_id "
            "WHERE (entity_tags.tags @> :tags) AND "
            "(employer_tags.tags @> :tags)",
        )
예제 #9
0
    def test_cloned_alias(self):
        entity = table(
            'entity', column('id'), column('employer_id'), column('name'))
        tag = table('tag', column('tag'), column('entity_id'))

        tags = select([
            tag.c.entity_id,
            func.array_agg(tag.c.tag).label('tags'),
        ]).group_by(tag.c.entity_id).cte('unaliased_tags')

        entity_tags = tags.alias(name='entity_tags')
        employer_tags = tags.alias(name='employer_tags')

        q = (
            select([entity.c.name])
            .select_from(
                entity
                .outerjoin(entity_tags, tags.c.entity_id == entity.c.id)
                .outerjoin(employer_tags,
                           tags.c.entity_id == entity.c.employer_id)
            )
            .where(entity_tags.c.tags.op('@>')(bindparam('tags')))
            .where(employer_tags.c.tags.op('@>')(bindparam('tags')))
        )

        self.assert_compile(
            q,
            'WITH unaliased_tags AS '
            '(SELECT tag.entity_id AS entity_id, array_agg(tag.tag) AS tags '
            'FROM tag GROUP BY tag.entity_id)'
            ' SELECT entity.name '
            'FROM entity '
            'LEFT OUTER JOIN unaliased_tags AS entity_tags ON '
            'unaliased_tags.entity_id = entity.id '
            'LEFT OUTER JOIN unaliased_tags AS employer_tags ON '
            'unaliased_tags.entity_id = entity.employer_id '
            'WHERE (entity_tags.tags @> :tags) AND '
            '(employer_tags.tags @> :tags)'
        )

        cloned = q.params(tags=['tag1', 'tag2'])
        self.assert_compile(
            cloned,
            'WITH unaliased_tags AS '
            '(SELECT tag.entity_id AS entity_id, array_agg(tag.tag) AS tags '
            'FROM tag GROUP BY tag.entity_id)'
            ' SELECT entity.name '
            'FROM entity '
            'LEFT OUTER JOIN unaliased_tags AS entity_tags ON '
            'unaliased_tags.entity_id = entity.id '
            'LEFT OUTER JOIN unaliased_tags AS employer_tags ON '
            'unaliased_tags.entity_id = entity.employer_id '
            'WHERE (entity_tags.tags @> :tags) AND '
            '(employer_tags.tags @> :tags)')
def available_translations():
    #"""
    #Retrieve language codes of available translations 
    #"""
    result = tree()
    
    subquery = db.session.query(Translation.language_code).group_by(Translation.language_code).subquery()
    query = db.session.query(func.array_agg(subquery.c.language_code)).first()
    #for language in available_languages
    result["languages"]= query[0]
	
    return jsonify_utf8(result)
예제 #11
0
파일: dictionary.py 프로젝트: lopalo/drill
 def on_get(self, req, resp, phrase_id):
     columns = []
     columns.extend(phrase.c)
     columns.append(
         f.coalesce(
             select([f.array_agg(gs_phrase.c.section_id)]).
             where(gs_phrase.c.phrase_id == phrase_id).
             as_scalar(),
             []
         ).
         label("grammar_sections")
     )
     columns.append(
         f.coalesce(
             select([f.array_agg(theme_phrase.c.theme_id)]).
             where(theme_phrase.c.phrase_id == phrase_id).
             as_scalar(),
             []
         ).
         label("themes")
     )
     sel = select(columns).where(phrase.c.id == phrase_id)
     resp.body = phrase_editor_view(self.db.execute(sel).fetchone())
def available_translations():
    """
    Retrieve language codes of available translations 
    """
    result = tree()

    subquery = db.session.query(Translation.language_code).group_by(
        Translation.language_code).subquery()
    available_languages = db.session.query(
        func.array_agg(subquery.c.language_code)).first()
    #for language in available_languages
    result["languages"] = available_languages[0]

    return Response(json.dumps(result, indent=3),
                    mimetype='application/json;charset=utf-8')
예제 #13
0
 def query(self):
     pq = qualstat_getstatdata(bindparam("server"),
                               column("eval_type") == "f")
     base = alias(pq)
     query = (
         select([
             # queryid in pg11+ is int64, so the value can exceed javascript's
             # Number.MAX_SAFE_INTEGER, which mean that the value can get
             # truncated by the browser, leading to looking for unexisting
             # queryid when processing this data.  To avoid that, simply cast
             # the value to text.
             func.array_agg(cast(column("queryid"),
                                 TEXT)).label("queryids"),
             column("qualid"),
             cast(column("quals"), JSONB).label('quals'),
             column("occurences"),
             column("execution_count"),
             func.array_agg(column("query")).label("queries"),
             column("avg_filter"),
             column("filter_ratio")
         ]).select_from(
             join(base,
                  powa_databases,
                  onclause=(powa_databases.c.oid == literal_column("dbid")
                            and powa_databases.c.srvid
                            == literal_column("srvid")))).
         where(powa_databases.c.datname == bindparam("database")).where(
             powa_databases.c.srvid == bindparam("server")).where(
                 column("avg_filter") > 1000).where(
                     column("filter_ratio") > 0.3).group_by(
                         column("qualid"), column("execution_count"),
                         column("occurences"), cast(column("quals"), JSONB),
                         column("avg_filter"),
                         column("filter_ratio")).order_by(
                             column("occurences").desc()).limit(200))
     return query
예제 #14
0
    def test_cloned_alias(self):
        entity = table('entity', column('id'), column('employer_id'),
                       column('name'))
        tag = table('tag', column('tag'), column('entity_id'))

        tags = select([
            tag.c.entity_id,
            func.array_agg(tag.c.tag).label('tags'),
        ]).group_by(tag.c.entity_id).cte('unaliased_tags')

        entity_tags = tags.alias(name='entity_tags')
        employer_tags = tags.alias(name='employer_tags')

        q = (select([entity.c.name]).select_from(
            entity.outerjoin(
                entity_tags, tags.c.entity_id == entity.c.id).outerjoin(
                    employer_tags,
                    tags.c.entity_id == entity.c.employer_id)).where(
                        entity_tags.c.tags.op('@>')(bindparam('tags'))).where(
                            employer_tags.c.tags.op('@>')(bindparam('tags'))))

        self.assert_compile(
            q, 'WITH unaliased_tags AS '
            '(SELECT tag.entity_id AS entity_id, array_agg(tag.tag) AS tags '
            'FROM tag GROUP BY tag.entity_id)'
            ' SELECT entity.name '
            'FROM entity '
            'LEFT OUTER JOIN unaliased_tags AS entity_tags ON '
            'unaliased_tags.entity_id = entity.id '
            'LEFT OUTER JOIN unaliased_tags AS employer_tags ON '
            'unaliased_tags.entity_id = entity.employer_id '
            'WHERE (entity_tags.tags @> :tags) AND '
            '(employer_tags.tags @> :tags)')

        cloned = q.params(tags=['tag1', 'tag2'])
        self.assert_compile(
            cloned, 'WITH unaliased_tags AS '
            '(SELECT tag.entity_id AS entity_id, array_agg(tag.tag) AS tags '
            'FROM tag GROUP BY tag.entity_id)'
            ' SELECT entity.name '
            'FROM entity '
            'LEFT OUTER JOIN unaliased_tags AS entity_tags ON '
            'unaliased_tags.entity_id = entity.id '
            'LEFT OUTER JOIN unaliased_tags AS employer_tags ON '
            'unaliased_tags.entity_id = entity.employer_id '
            'WHERE (entity_tags.tags @> :tags) AND '
            '(employer_tags.tags @> :tags)')
예제 #15
0
def unified_package_view(template, query_fn=None, **template_args):
    untracked = request.args.get('untracked') == '1'
    order_name = request.args.get('order_by', 'running,failing,name')
    # pylint: disable=E1101
    subq = db.query(Package.name,
                    func.array_agg(func.row(Package.collection_id,
                                            Package.tracked,
                                            Package.resolved,
                                            Package.last_complete_build_state))
                    .label("states"),
                    func.bool_or(Package.resolved).label("resolved"),
                    func.bool_or(Package.last_complete_build_state == Build.FAILED)
                    .label("failing"),
                    func.bool_or(Package.last_complete_build_id != Package.last_build_id)
                    .label("has_running_build"))\
        .filter(Package.blocked == False)\
        .group_by(Package.name)
    if not untracked:
        subq = subq.filter(Package.tracked == True)
    if query_fn:
        subq = query_fn(subq)
    subq = subq.subquery()

    order_map = {
        'name': [subq.c.name],
        'failing': [subq.c.resolved, Reversed(subq.c.failing)],
        'running': [NullsLastOrder(subq.c.has_running_build)],
    }
    order_names, order = get_order(order_map, order_name)

    package_query = db.query(subq.c.name, subq.c.states,
                             subq.c.has_running_build)
    page = package_query.order_by(*order).paginate(packages_per_page)
    page.items = map(UnifiedPackage, page.items)
    populate_package_groups(page.items)
    return render_template(template,
                           packages=page.items,
                           page=page,
                           order=order_names,
                           collection=None,
                           **template_args)
예제 #16
0
파일: tile.py 프로젝트: mozilla/splice
def get_tile_ids_by_group(group_by, filters=None):
    from splice.environment import Environment
    env = Environment.instance()

    group_by_field = {
        'category': AdgroupCategory.category,
        'account_id': Account.id,
        'campaign_id': Campaign.id,
        'adgroup_id': Adgroup.id
    }.get(group_by)

    rows = (env.db.session.query(
        group_by_field.label(group_by),
        func.array_agg(Tile.id).label('tile_ids')).select_from(Tile).group_by(
            group_by_field))

    rows = add_joins_for_group_by(query=rows, group_by=group_by)
    rows = add_filters(rows, filters, group_by)
    rows = rows.all()

    return [tuple_to_dict(r) for r in rows] if rows else None
def attribute_pictures(language_code='en'):
    """
    Retrieve attribute pictures
    
    :param language_code: language code 
    :type language_code: string
    """

    result = tree()

    picpath = conf.ATTRIBUTES_PICTURES_SITE_PATH

    query = db.session.query(Attribute.name, Category.name.label("category_name"), func.array_agg(picpath + '/' + language_code + '/' + AttributePicture.file_name).label("file_names"))\
      .join(AttributePicture, Attribute.id==AttributePicture.attribute_id)\
      .join(Category, Category.id==Attribute.category_id)\
      .filter(AttributePicture.language_code==language_code)\
      .group_by(Category.name, Attribute.name)

    for a in query:
        result[a.category_name][a.name] = a.file_names

    return jsonify(result)
예제 #18
0
def counthistory(page):
    '''
    Count history for all shelters for the past 14 days.
    Used for chart showing counts over time.
    Supports pagination with page in path
    '''
    tz = Prefs['timezone']

    pagesize = 14  # days
    daysback = int(page) * pagesize + pagesize - 1

    today = pendulum.today(tz).subtract(days=(int(page) * pagesize))
    backthen = pendulum.today(tz).subtract(days=daysback)

    date_list = func.generate_series(cast(backthen.to_date_string(), Date),
                                     cast(today.to_date_string(), Date),
                                     '1 day').alias('gen_day')

    time_series = db.session.query(Shelter.name.label('label'), func.array_agg(Count.personcount).label('data'))\
        .join(date_list, true())\
        .outerjoin(Count, (Count.day == column('gen_day')) & (Count.shelter_id == Shelter.id))

    if 'user' not in g or set(['admin', 'visitor']).isdisjoint(
            set([role.name for role in g.user.roles])):
        time_series = time_series.filter(Shelter.visible == True,
                                         Shelter.public)
    else:
        time_series = time_series.filter(Shelter.visible == True)

    time_series = time_series\
        .group_by(Shelter.name)\
        .order_by(Shelter.name)

    results = {
        "dates": [d.to_date_string() for d in (today - backthen)],
        "shelters": [row._asdict() for row in time_series]
    }
    return jsonify(results)
예제 #19
0
파일: views.py 프로젝트: W3SS/koschei
def unified_package_view(template, query_fn=None, **template_args):
    untracked = request.args.get('untracked') == '1'
    order_name = request.args.get('order_by', 'running,failing,name')
    # pylint: disable=E1101
    subq = db.query(Package.name,
                    func.array_agg(func.row(Package.collection_id,
                                            Package.tracked,
                                            Package.resolved,
                                            Package.last_complete_build_state))
                    .label("states"),
                    func.bool_or(Package.resolved).label("resolved"),
                    func.bool_or(Package.last_complete_build_state == Build.FAILED)
                    .label("failing"),
                    func.bool_or(Package.last_complete_build_id != Package.last_build_id)
                    .label("has_running_build"))\
        .filter(Package.blocked == False)\
        .group_by(Package.name)
    if not untracked:
        subq = subq.filter(Package.tracked == True)
    if query_fn:
        subq = query_fn(subq)
    subq = subq.subquery()

    order_map = {
        'name': [subq.c.name],
        'failing': [subq.c.resolved, Reversed(subq.c.failing)],
        'running': [NullsLastOrder(subq.c.has_running_build)],
    }
    order_names, order = get_order(order_map, order_name)

    package_query = db.query(subq.c.name, subq.c.states, subq.c.has_running_build)
    page = package_query.order_by(*order).paginate(packages_per_page)
    page.items = map(UnifiedPackage, page.items)
    populate_package_groups(page.items)
    return render_template(template, packages=page.items, page=page,
                           order=order_names, collection=None, **template_args)
예제 #20
0
 def build_sqlalchemy_columns(
         self, fields, base_query_tables=None, custom_columns=None):
     '''
     Returns an ordered dict of sqlalchemy.sql.schema.Column objects, associated 
     with the sqlalchemy.sql.schema.Table definitions, which are bound to 
     the sqlalchemy.engine.Engine: 
     "Connects a Pool and Dialect together to provide a source of database 
     connectivity and behavior."
     
     @param fields - field definitions, from the resource schema
     @param bridge - a reports.utils.sqlalchemy_bridge.Bridge
     @param base_query_tables - if specified, the fields for these tables 
     will be available as part of the base query, so the column definitions
     become simpler, and do not need to be joined in. 
     @param manual_includes - columns to include even if the field 
     visibility is not set
     '''
     DEBUG_BUILD_COLUMNS = False or logger.isEnabledFor(logging.DEBUG)
     base_query_tables = base_query_tables or []
     custom_columns = custom_columns or []
     
     try:
         columns = OrderedDict()
         for field in fields:
             key = field['key']
             if key in custom_columns:
                 if DEBUG_BUILD_COLUMNS: 
                     logger.info(
                         'custom field: %r, %r', key,custom_columns[key])
                 columns[key] = custom_columns[key].label(key)
                 continue
             
             if DEBUG_BUILD_COLUMNS: 
                 logger.info('build column: %r, %r', field['key'], field)
             field_name = field.get('field', None)
             if not field_name:
                 field_name = field['key']
             
             field_table = field.get('table', None)
             
             if not field_table and DEBUG_BUILD_COLUMNS:
                 logger.info(
                     'field: %r, val: %r, skipping field because there is no '
                     '"field_table" value set',key,field)
                 continue
             if DEBUG_BUILD_COLUMNS: 
                 logger.info(
                     'field: %r, field_table: %r', field['key'], field_table )
             
             if field_table in base_query_tables:
                 # simple case: table.fields already selected in the base query:
                 # just need to specify them
                 if field_name in get_tables()[field_table].c:
                     col = get_tables()[field_table].c[field_name]
                 else:
                     raise Exception(
                         'field: %r, not found in table: %r'
                         % (field_name, field_table))
                 col = col.label(key)
                 columns[key] = col
                 
             elif field.get('linked_field_value_field', None):
                 link_table = field['table']
                 link_table_def = get_tables()[link_table]
                 linked_field_parent = field['linked_field_parent']
                 link_field = linked_field_parent + '_id'
                 join_args = { 
                     'link_table': link_table, 'link_field': link_field,
                     'parent_table': linked_field_parent
                     }
                 
                 if field['linked_field_type'] != 'fields.ListField':
                     join_stmt = select([link_table_def.c[field_name]]).\
                         where(text('{link_table}.{link_field}='
                                 '{parent_table}.{link_field}'.format(**join_args)))
                     if field.get('linked_field_meta_field', None):
                         # TODO: test - the linked meta field is the "datacolumn type"
                         linked_field_meta_field = field['linked_field_meta_field']
                         meta_field_obj = MetaHash.objects.get(
                             key=field['key'], scope=field['scope'])
                         meta_table_def = get_tables()['metahash']
                         join_stmt.join(meta_table_def, 
                             link_table_def.c[linked_field_meta_field]==
                                 getattr(meta_field_obj,'pk') )
                     join_stmt = join_stmt.label(key)
                     columns[key] = join_stmt
                 elif field['linked_field_type'] == 'fields.ListField':
                     join_stmt = select([link_table_def.c[field_name]]).\
                         where(text('{link_table}.{link_field}='
                                 '{parent_table}.{link_field}'.format(**join_args)))
 
                     if field.get('linked_field_meta_field', None):
                         # TODO: test - the linked meta field is the "datacolumn type"
                         linked_field_meta_field = field['linked_field_meta_field']
                         meta_field_obj = MetaHash.objects.get(
                             key=field['key'], scope=field['scope'])
                         meta_table_def = get_tables()['metahash']
                         join_stmt.join(meta_table_def, 
                             link_table_def.c[linked_field_meta_field]==
                                 getattr(meta_field_obj,'pk') )
                     
                     ordinal_field = field.get('ordinal_field', None)
                     if ordinal_field:
                         join_stmt = join_stmt.order_by(link_table_def.c[ordinal_field])
                     join_stmt = join_stmt.alias('a')
                     stmt2 = select([func.array_to_string(
                                     func.array_agg(column(field_name)),
                                                    LIST_DELIMITER_SQL_ARRAY)])
                     stmt2 = stmt2.select_from(join_stmt).label(key)
                     columns[key] = stmt2
             else:
                 if DEBUG_BUILD_COLUMNS:        
                     logger.info(
                         'field is not in the base tables %r, nor in a linked field, '
                         'and is not custom: %s', base_query_tables, key)
         if DEBUG_BUILD_COLUMNS: 
             logger.info('columns: %r', columns.keys())
         return columns
     except Exception, e:
         logger.exception('on build sqlalchemy columns')
         raise e   
예제 #21
0
class CoreFixtures(object):
    # lambdas which return a tuple of ColumnElement objects.
    # must return at least two objects that should compare differently.
    # to test more varieties of "difference" additional objects can be added.
    fixtures = [
        lambda: (
            column("q"),
            column("x"),
            column("q", Integer),
            column("q", String),
        ),
        lambda: (~column("q", Boolean), ~column("p", Boolean)),
        lambda: (
            table_a.c.a.label("foo"),
            table_a.c.a.label("bar"),
            table_a.c.b.label("foo"),
        ),
        lambda: (
            _label_reference(table_a.c.a.desc()),
            _label_reference(table_a.c.a.asc()),
        ),
        lambda: (_textual_label_reference("a"), _textual_label_reference("b")),
        lambda: (
            text("select a, b from table").columns(a=Integer, b=String),
            text("select a, b, c from table").columns(
                a=Integer, b=String, c=Integer),
            text("select a, b, c from table where foo=:bar").bindparams(
                bindparam("bar", type_=Integer)),
            text("select a, b, c from table where foo=:foo").bindparams(
                bindparam("foo", type_=Integer)),
            text("select a, b, c from table where foo=:bar").bindparams(
                bindparam("bar", type_=String)),
        ),
        lambda: (
            column("q") == column("x"),
            column("q") == column("y"),
            column("z") == column("x"),
            column("z") + column("x"),
            column("z") - column("x"),
            column("x") - column("z"),
            column("z") > column("x"),
            column("x").in_([5, 7]),
            column("x").in_([10, 7, 8]),
            # note these two are mathematically equivalent but for now they
            # are considered to be different
            column("z") >= column("x"),
            column("x") <= column("z"),
            column("q").between(5, 6),
            column("q").between(5, 6, symmetric=True),
            column("q").like("somstr"),
            column("q").like("somstr", escape="\\"),
            column("q").like("somstr", escape="X"),
        ),
        lambda: (
            table_a.c.a,
            table_a.c.a._annotate({"orm": True}),
            table_a.c.a._annotate({
                "orm": True
            })._annotate({"bar": False}),
            table_a.c.a._annotate({
                "orm": True,
                "parententity": MyEntity("a", table_a)
            }),
            table_a.c.a._annotate({
                "orm": True,
                "parententity": MyEntity("b", table_a)
            }),
            table_a.c.a._annotate(
                {
                    "orm": True,
                    "parententity": MyEntity("b", select([table_a]))
                }),
            table_a.c.a._annotate({
                "orm":
                True,
                "parententity":
                MyEntity("b",
                         select([table_a]).where(table_a.c.a == 5)),
            }),
        ),
        lambda: (
            table_a,
            table_a._annotate({"orm": True}),
            table_a._annotate({
                "orm": True
            })._annotate({"bar": False}),
            table_a._annotate({
                "orm": True,
                "parententity": MyEntity("a", table_a)
            }),
            table_a._annotate({
                "orm": True,
                "parententity": MyEntity("b", table_a)
            }),
            table_a._annotate({
                "orm": True,
                "parententity": MyEntity("b", select([table_a]))
            }),
        ),
        lambda: (
            table("a", column("x"), column("y")),
            table("a", column("x"), column("y"))._annotate({"orm": True}),
            table("b", column("x"), column("y"))._annotate({"orm": True}),
        ),
        lambda: (
            cast(column("q"), Integer),
            cast(column("q"), Float),
            cast(column("p"), Integer),
        ),
        lambda: (
            bindparam("x"),
            bindparam("y"),
            bindparam("x", type_=Integer),
            bindparam("x", type_=String),
            bindparam(None),
        ),
        lambda: (_OffsetLimitParam("x"), _OffsetLimitParam("y")),
        lambda: (func.foo(), func.foo(5), func.bar()),
        lambda: (func.current_date(), func.current_time()),
        lambda: (
            func.next_value(Sequence("q")),
            func.next_value(Sequence("p")),
        ),
        lambda: (True_(), False_()),
        lambda: (Null(), ),
        lambda: (ReturnTypeFromArgs("foo"), ReturnTypeFromArgs(5)),
        lambda: (FunctionElement(5), FunctionElement(5, 6)),
        lambda: (func.count(), func.not_count()),
        lambda: (func.char_length("abc"), func.char_length("def")),
        lambda: (GenericFunction("a", "b"), GenericFunction("a")),
        lambda: (CollationClause("foobar"), CollationClause("batbar")),
        lambda: (
            type_coerce(column("q", Integer), String),
            type_coerce(column("q", Integer), Float),
            type_coerce(column("z", Integer), Float),
        ),
        lambda: (table_a.c.a, table_b.c.a),
        lambda: (tuple_(1, 2), tuple_(3, 4)),
        lambda: (func.array_agg([1, 2]), func.array_agg([3, 4])),
        lambda: (
            func.percentile_cont(0.5).within_group(table_a.c.a),
            func.percentile_cont(0.5).within_group(table_a.c.b),
            func.percentile_cont(0.5).within_group(table_a.c.a, table_a.c.b),
            func.percentile_cont(0.5).within_group(table_a.c.a, table_a.c.b,
                                                   column("q")),
        ),
        lambda: (
            func.is_equal("a", "b").as_comparison(1, 2),
            func.is_equal("a", "c").as_comparison(1, 2),
            func.is_equal("a", "b").as_comparison(2, 1),
            func.is_equal("a", "b", "c").as_comparison(1, 2),
            func.foobar("a", "b").as_comparison(1, 2),
        ),
        lambda: (
            func.row_number().over(order_by=table_a.c.a),
            func.row_number().over(order_by=table_a.c.a, range_=(0, 10)),
            func.row_number().over(order_by=table_a.c.a, range_=(None, 10)),
            func.row_number().over(order_by=table_a.c.a, rows=(None, 20)),
            func.row_number().over(order_by=table_a.c.b),
            func.row_number().over(order_by=table_a.c.a,
                                   partition_by=table_a.c.b),
        ),
        lambda: (
            func.count(1).filter(table_a.c.a == 5),
            func.count(1).filter(table_a.c.a == 10),
            func.foob(1).filter(table_a.c.a == 10),
        ),
        lambda: (
            and_(table_a.c.a == 5, table_a.c.b == table_b.c.a),
            and_(table_a.c.a == 5, table_a.c.a == table_b.c.a),
            or_(table_a.c.a == 5, table_a.c.b == table_b.c.a),
            ClauseList(table_a.c.a == 5, table_a.c.b == table_b.c.a),
            ClauseList(table_a.c.a == 5, table_a.c.b == table_a.c.a),
        ),
        lambda: (
            case(whens=[(table_a.c.a == 5, 10), (table_a.c.a == 10, 20)]),
            case(whens=[(table_a.c.a == 18, 10), (table_a.c.a == 10, 20)]),
            case(whens=[(table_a.c.a == 5, 10), (table_a.c.b == 10, 20)]),
            case(whens=[
                (table_a.c.a == 5, 10),
                (table_a.c.b == 10, 20),
                (table_a.c.a == 9, 12),
            ]),
            case(
                whens=[(table_a.c.a == 5, 10), (table_a.c.a == 10, 20)],
                else_=30,
            ),
            case({
                "wendy": "W",
                "jack": "J"
            }, value=table_a.c.a, else_="E"),
            case({
                "wendy": "W",
                "jack": "J"
            }, value=table_a.c.b, else_="E"),
            case({
                "wendy_w": "W",
                "jack": "J"
            }, value=table_a.c.a, else_="E"),
        ),
        lambda: (
            extract("foo", table_a.c.a),
            extract("foo", table_a.c.b),
            extract("bar", table_a.c.a),
        ),
        lambda: (
            Slice(1, 2, 5),
            Slice(1, 5, 5),
            Slice(1, 5, 10),
            Slice(2, 10, 15),
        ),
        lambda: (
            select([table_a.c.a]),
            select([table_a.c.a, table_a.c.b]),
            select([table_a.c.b, table_a.c.a]),
            select([table_a.c.b, table_a.c.a]).apply_labels(),
            select([table_a.c.a]).where(table_a.c.b == 5),
            select([table_a.c.a]).where(table_a.c.b == 5).where(table_a.c.a ==
                                                                10),
            select([table_a.c.a]).where(table_a.c.b == 5).with_for_update(),
            select([table_a.c.a]).where(table_a.c.b == 5).with_for_update(
                nowait=True),
            select([table_a.c.a]).where(table_a.c.b == 5).correlate(table_b),
            select([table_a.c.a]).where(table_a.c.b == 5).correlate_except(
                table_b),
        ),
        lambda: (
            future_select(table_a.c.a),
            future_select(table_a.c.a).join(table_b, table_a.c.a == table_b.c.a
                                            ),
            future_select(table_a.c.a).join_from(table_a, table_b, table_a.c.a
                                                 == table_b.c.a),
            future_select(table_a.c.a).join_from(table_a, table_b),
            future_select(table_a.c.a).join_from(table_c, table_b),
            future_select(table_a.c.a).join(
                table_b, table_a.c.a == table_b.c.a).join(
                    table_c, table_b.c.b == table_c.c.x),
            future_select(table_a.c.a).join(table_b),
            future_select(table_a.c.a).join(table_c),
            future_select(table_a.c.a).join(table_b, table_a.c.a == table_b.c.b
                                            ),
            future_select(table_a.c.a).join(table_c, table_a.c.a == table_c.c.x
                                            ),
        ),
        lambda: (
            select([table_a.c.a]).cte(),
            select([table_a.c.a]).cte(recursive=True),
            select([table_a.c.a]).cte(name="some_cte", recursive=True),
            select([table_a.c.a]).cte(name="some_cte"),
            select([table_a.c.a]).cte(name="some_cte").alias("other_cte"),
            select([table_a.c.a]).cte(name="some_cte").union_all(
                select([table_a.c.a])),
            select([table_a.c.a]).cte(name="some_cte").union_all(
                select([table_a.c.b])),
            select([table_a.c.a]).lateral(),
            select([table_a.c.a]).lateral(name="bar"),
            table_a.tablesample(func.bernoulli(1)),
            table_a.tablesample(func.bernoulli(1), seed=func.random()),
            table_a.tablesample(func.bernoulli(1), seed=func.other_random()),
            table_a.tablesample(func.hoho(1)),
            table_a.tablesample(func.bernoulli(1), name="bar"),
            table_a.tablesample(
                func.bernoulli(1), name="bar", seed=func.random()),
        ),
        lambda: (
            table_a.insert(),
            table_a.insert().values({})._annotate({"nocache": True}),
            table_b.insert(),
            table_b.insert().with_dialect_options(sqlite_foo="some value"),
            table_b.insert().from_select(["a", "b"], select([table_a])),
            table_b.insert().from_select(["a", "b"],
                                         select([table_a]).where(table_a.c.a >
                                                                 5)),
            table_b.insert().from_select(["a", "b"], select([table_b])),
            table_b.insert().from_select(["c", "d"], select([table_a])),
            table_b.insert().returning(table_b.c.a),
            table_b.insert().returning(table_b.c.a, table_b.c.b),
            table_b.insert().inline(),
            table_b.insert().prefix_with("foo"),
            table_b.insert().with_hint("RUNFAST"),
            table_b.insert().values(a=5, b=10),
            table_b.insert().values(a=5),
            table_b.insert().values({
                table_b.c.a: 5,
                "b": 10
            })._annotate({"nocache": True}),
            table_b.insert().values(a=7, b=10),
            table_b.insert().values(a=5, b=10).inline(),
            table_b.insert().values([{
                "a": 5,
                "b": 10
            }, {
                "a": 8,
                "b": 12
            }])._annotate({"nocache": True}),
            table_b.insert().values([{
                "a": 9,
                "b": 10
            }, {
                "a": 8,
                "b": 7
            }])._annotate({"nocache": True}),
            table_b.insert().values([(5, 10),
                                     (8, 12)])._annotate({"nocache": True}),
            table_b.insert().values([(5, 9),
                                     (5, 12)])._annotate({"nocache": True}),
        ),
        lambda: (
            table_b.update(),
            table_b.update().where(table_b.c.a == 5),
            table_b.update().where(table_b.c.b == 5),
            table_b.update().where(table_b.c.b == 5).with_dialect_options(
                mysql_limit=10),
            table_b.update().where(table_b.c.b == 5).with_dialect_options(
                mysql_limit=10, sqlite_foo="some value"),
            table_b.update().where(table_b.c.a == 5).values(a=5, b=10),
            table_b.update().where(table_b.c.a == 5).values(a=5, b=10, c=12),
            table_b.update().where(table_b.c.b == 5).values(a=5, b=10).
            _annotate({"nocache": True}),
            table_b.update().values(a=5, b=10),
            table_b.update().values({
                "a": 5,
                table_b.c.b: 10
            })._annotate({"nocache": True}),
            table_b.update().values(a=7, b=10),
            table_b.update().ordered_values(("a", 5), ("b", 10)),
            table_b.update().ordered_values(("b", 10), ("a", 5)),
            table_b.update().ordered_values((table_b.c.a, 5), ("b", 10)),
        ),
        lambda: (
            table_b.delete(),
            table_b.delete().with_dialect_options(sqlite_foo="some value"),
            table_b.delete().where(table_b.c.a == 5),
            table_b.delete().where(table_b.c.b == 5),
        ),
        lambda: (
            values(
                column("mykey", Integer),
                column("mytext", String),
                column("myint", Integer),
                name="myvalues",
            ).data([(1, "textA", 99),
                    (2, "textB", 88)])._annotate({"nocache": True}),
            values(
                column("mykey", Integer),
                column("mytext", String),
                column("myint", Integer),
                name="myothervalues",
            ).data([(1, "textA", 99),
                    (2, "textB", 88)])._annotate({"nocache": True}),
            values(
                column("mykey", Integer),
                column("mytext", String),
                column("myint", Integer),
                name="myvalues",
            ).data([(1, "textA", 89),
                    (2, "textG", 88)])._annotate({"nocache": True}),
            values(
                column("mykey", Integer),
                column("mynottext", String),
                column("myint", Integer),
                name="myvalues",
            ).data([(1, "textA", 99),
                    (2, "textB", 88)])._annotate({"nocache": True}),
            # TODO: difference in type
            # values(
            #    [
            #        column("mykey", Integer),
            #        column("mytext", Text),
            #        column("myint", Integer),
            #    ],
            #    (1, "textA", 99),
            #    (2, "textB", 88),
            #    alias_name="myvalues",
            # ),
        ),
        lambda: (
            select([table_a.c.a]),
            select([table_a.c.a]).prefix_with("foo"),
            select([table_a.c.a]).prefix_with("foo", dialect="mysql"),
            select([table_a.c.a]).prefix_with("foo", dialect="postgresql"),
            select([table_a.c.a]).prefix_with("bar"),
            select([table_a.c.a]).suffix_with("bar"),
        ),
        lambda: (
            select([table_a_2.c.a]),
            select([table_a_2_fs.c.a]),
            select([table_a_2_bs.c.a]),
        ),
        lambda: (
            select([table_a.c.a]),
            select([table_a.c.a]).with_hint(None, "some hint"),
            select([table_a.c.a]).with_hint(None, "some other hint"),
            select([table_a.c.a]).with_hint(table_a, "some hint"),
            select([table_a.c.a]).with_hint(table_a, "some hint").with_hint(
                None, "some other hint"),
            select([table_a.c.a]).with_hint(table_a, "some other hint"),
            select([table_a.c.a]).with_hint(
                table_a, "some hint", dialect_name="mysql"),
            select([table_a.c.a]).with_hint(
                table_a, "some hint", dialect_name="postgresql"),
        ),
        lambda: (
            table_a.join(table_b, table_a.c.a == table_b.c.a),
            table_a.join(table_b,
                         and_(table_a.c.a == table_b.c.a, table_a.c.b == 1)),
            table_a.outerjoin(table_b, table_a.c.a == table_b.c.a),
        ),
        lambda: (
            table_a.alias("a"),
            table_a.alias("b"),
            table_a.alias(),
            table_b.alias("a"),
            select([table_a.c.a]).alias("a"),
        ),
        lambda: (
            FromGrouping(table_a.alias("a")),
            FromGrouping(table_a.alias("b")),
        ),
        lambda: (
            SelectStatementGrouping(select([table_a])),
            SelectStatementGrouping(select([table_b])),
        ),
        lambda: (
            select([table_a.c.a]).scalar_subquery(),
            select([table_a.c.a]).where(table_a.c.b == 5).scalar_subquery(),
        ),
        lambda: (
            exists().where(table_a.c.a == 5),
            exists().where(table_a.c.b == 5),
        ),
        lambda: (
            union(select([table_a.c.a]), select([table_a.c.b])),
            union(select([table_a.c.a]), select([table_a.c.b])).order_by("a"),
            union_all(select([table_a.c.a]), select([table_a.c.b])),
            union(select([table_a.c.a])),
            union(
                select([table_a.c.a]),
                select([table_a.c.b]).where(table_a.c.b > 5),
            ),
        ),
        lambda: (
            table("a", column("x"), column("y")),
            table("a", column("y"), column("x")),
            table("b", column("x"), column("y")),
            table("a", column("x"), column("y"), column("z")),
            table("a", column("x"), column("y", Integer)),
            table("a", column("q"), column("y", Integer)),
        ),
        lambda: (table_a, table_b),
    ]

    dont_compare_values_fixtures = [
        lambda: (
            # note the in_(...) all have different column names becuase
            # otherwise all IN expressions would compare as equivalent
            column("x").in_(random_choices(range(10), k=3)),
            column("y").in_(
                bindparam(
                    "q",
                    random_choices(range(10), k=random.randint(0, 7)),
                    expanding=True,
                )),
            column("z").in_(random_choices(range(10), k=random.randint(0, 7))),
            column("x") == random.randint(1, 10),
        )
    ]

    def _complex_fixtures():
        def one():
            a1 = table_a.alias()
            a2 = table_b_like_a.alias()

            stmt = (select([table_a.c.a, a1.c.b,
                            a2.c.b]).where(table_a.c.b == a1.c.b).where(
                                a1.c.b == a2.c.b).where(a1.c.a == 5))

            return stmt

        def one_diff():
            a1 = table_b_like_a.alias()
            a2 = table_a.alias()

            stmt = (select([table_a.c.a, a1.c.b,
                            a2.c.b]).where(table_a.c.b == a1.c.b).where(
                                a1.c.b == a2.c.b).where(a1.c.a == 5))

            return stmt

        def two():
            inner = one().subquery()

            stmt = select([table_b.c.a, inner.c.a, inner.c.b]).select_from(
                table_b.join(inner, table_b.c.b == inner.c.b))

            return stmt

        def three():

            a1 = table_a.alias()
            a2 = table_a.alias()
            ex = exists().where(table_b.c.b == a1.c.a)

            stmt = (select([a1.c.a, a2.c.a]).select_from(
                a1.join(a2, a1.c.b == a2.c.b)).where(ex))
            return stmt

        return [one(), one_diff(), two(), three()]

    fixtures.append(_complex_fixtures)

    def _statements_w_context_options_fixtures():

        return [
            select([table_a])._add_context_option(opt1, True),
            select([table_a])._add_context_option(opt1, 5),
            select([table_a])._add_context_option(opt1,
                                                  True)._add_context_option(
                                                      opt2, True),
            select([table_a
                    ])._add_context_option(opt1,
                                           True)._add_context_option(opt2, 5),
            select([table_a])._add_context_option(opt3, True),
        ]

    fixtures.append(_statements_w_context_options_fixtures)

    def _statements_w_anonymous_col_names():
        def one():
            c = column("q")

            l = c.label(None)

            # new case as of Id810f485c5f7ed971529489b84694e02a3356d6d
            subq = select([l]).subquery()

            # this creates a ColumnClause as a proxy to the Label() that has
            # an anoymous name, so the column has one too.
            anon_col = subq.c[0]

            # then when BindParameter is created, it checks the label
            # and doesn't double up on the anonymous name which is uncachable
            return anon_col > 5

        def two():
            c = column("p")

            l = c.label(None)

            # new case as of Id810f485c5f7ed971529489b84694e02a3356d6d
            subq = select([l]).subquery()

            # this creates a ColumnClause as a proxy to the Label() that has
            # an anoymous name, so the column has one too.
            anon_col = subq.c[0]

            # then when BindParameter is created, it checks the label
            # and doesn't double up on the anonymous name which is uncachable
            return anon_col > 5

        def three():

            l1, l2 = table_a.c.a.label(None), table_a.c.b.label(None)

            stmt = select([table_a.c.a, table_a.c.b, l1, l2])

            subq = stmt.subquery()
            return select([subq]).where(subq.c[2] == 10)

        return (
            one(),
            two(),
            three(),
        )

    fixtures.append(_statements_w_anonymous_col_names)
if __name__ == "__main__":
    engine = create_engine(PSQL_CONNECTION_STRING, echo=False)
    Session = sessionmaker(bind=engine)
    session = Session()

    # query get average score of ratings for each year
    averageScores = session.query(func.avg(
        Review.score), Review.pub_year).group_by(Review.pub_year).all()

    # query get best new music scores are scores for somgs that weren't best new music
    bestNewMusicScores = session.query(
        Review.score).filter(Review.best_new_music == 1).all()
    bestNewMusicOrNotScores = (session.query(
        Review.best_new_music,
        func.array_agg(Review.score)).group_by(Review.best_new_music).all())
    bestNewMusicByYear = (session.query(
        Review.pub_year, func.array_agg(Review.score)).group_by(
            Review.pub_year).filter(Review.best_new_music == 1).all())
    # Join query for Genre and Review to see reviews by genre
    averageReviewsScoreByGenre = (session.query(
        Genre.genre, func.avg(Review.score)).filter(
            Genre.genre != None, Review.best_new_music == 1).join(
                Review,
                Genre.reviewid == Review.reviewid).group_by(Genre.genre).all())

    # Lists of scores by genre
    allScoresByGenre = (session.query(Genre.genre, func.array_agg(
        Review.score)).filter(Genre.genre != None).join(
            Review,
            Genre.reviewid == Review.reviewid).group_by(Genre.genre).all())
예제 #23
0
class CoreFixtures(object):
    # lambdas which return a tuple of ColumnElement objects.
    # must return at least two objects that should compare differently.
    # to test more varieties of "difference" additional objects can be added.
    fixtures = [
        lambda: (
            column("q"),
            column("x"),
            column("q", Integer),
            column("q", String),
        ),
        lambda: (~column("q", Boolean), ~column("p", Boolean)),
        lambda: (
            table_a.c.a.label("foo"),
            table_a.c.a.label("bar"),
            table_a.c.b.label("foo"),
        ),
        lambda: (
            _label_reference(table_a.c.a.desc()),
            _label_reference(table_a.c.a.asc()),
        ),
        lambda: (_textual_label_reference("a"), _textual_label_reference("b")),
        lambda: (
            text("select a, b from table").columns(a=Integer, b=String),
            text("select a, b, c from table").columns(
                a=Integer, b=String, c=Integer
            ),
            text("select a, b, c from table where foo=:bar").bindparams(
                bindparam("bar", type_=Integer)
            ),
            text("select a, b, c from table where foo=:foo").bindparams(
                bindparam("foo", type_=Integer)
            ),
            text("select a, b, c from table where foo=:bar").bindparams(
                bindparam("bar", type_=String)
            ),
        ),
        lambda: (
            column("q") == column("x"),
            column("q") == column("y"),
            column("z") == column("x"),
            column("z") + column("x"),
            column("z") - column("x"),
            column("x") - column("z"),
            column("z") > column("x"),
            column("x").in_([5, 7]),
            column("x").in_([10, 7, 8]),
            # note these two are mathematically equivalent but for now they
            # are considered to be different
            column("z") >= column("x"),
            column("x") <= column("z"),
            column("q").between(5, 6),
            column("q").between(5, 6, symmetric=True),
            column("q").like("somstr"),
            column("q").like("somstr", escape="\\"),
            column("q").like("somstr", escape="X"),
        ),
        lambda: (
            table_a.c.a,
            table_a.c.a._annotate({"orm": True}),
            table_a.c.a._annotate({"orm": True})._annotate({"bar": False}),
            table_a.c.a._annotate(
                {"orm": True, "parententity": MyEntity("a", table_a)}
            ),
            table_a.c.a._annotate(
                {"orm": True, "parententity": MyEntity("b", table_a)}
            ),
            table_a.c.a._annotate(
                {"orm": True, "parententity": MyEntity("b", select([table_a]))}
            ),
        ),
        lambda: (
            cast(column("q"), Integer),
            cast(column("q"), Float),
            cast(column("p"), Integer),
        ),
        lambda: (
            bindparam("x"),
            bindparam("y"),
            bindparam("x", type_=Integer),
            bindparam("x", type_=String),
            bindparam(None),
        ),
        lambda: (_OffsetLimitParam("x"), _OffsetLimitParam("y")),
        lambda: (func.foo(), func.foo(5), func.bar()),
        lambda: (func.current_date(), func.current_time()),
        lambda: (
            func.next_value(Sequence("q")),
            func.next_value(Sequence("p")),
        ),
        lambda: (True_(), False_()),
        lambda: (Null(),),
        lambda: (ReturnTypeFromArgs("foo"), ReturnTypeFromArgs(5)),
        lambda: (FunctionElement(5), FunctionElement(5, 6)),
        lambda: (func.count(), func.not_count()),
        lambda: (func.char_length("abc"), func.char_length("def")),
        lambda: (GenericFunction("a", "b"), GenericFunction("a")),
        lambda: (CollationClause("foobar"), CollationClause("batbar")),
        lambda: (
            type_coerce(column("q", Integer), String),
            type_coerce(column("q", Integer), Float),
            type_coerce(column("z", Integer), Float),
        ),
        lambda: (table_a.c.a, table_b.c.a),
        lambda: (tuple_(1, 2), tuple_(3, 4)),
        lambda: (func.array_agg([1, 2]), func.array_agg([3, 4])),
        lambda: (
            func.percentile_cont(0.5).within_group(table_a.c.a),
            func.percentile_cont(0.5).within_group(table_a.c.b),
            func.percentile_cont(0.5).within_group(table_a.c.a, table_a.c.b),
            func.percentile_cont(0.5).within_group(
                table_a.c.a, table_a.c.b, column("q")
            ),
        ),
        lambda: (
            func.is_equal("a", "b").as_comparison(1, 2),
            func.is_equal("a", "c").as_comparison(1, 2),
            func.is_equal("a", "b").as_comparison(2, 1),
            func.is_equal("a", "b", "c").as_comparison(1, 2),
            func.foobar("a", "b").as_comparison(1, 2),
        ),
        lambda: (
            func.row_number().over(order_by=table_a.c.a),
            func.row_number().over(order_by=table_a.c.a, range_=(0, 10)),
            func.row_number().over(order_by=table_a.c.a, range_=(None, 10)),
            func.row_number().over(order_by=table_a.c.a, rows=(None, 20)),
            func.row_number().over(order_by=table_a.c.b),
            func.row_number().over(
                order_by=table_a.c.a, partition_by=table_a.c.b
            ),
        ),
        lambda: (
            func.count(1).filter(table_a.c.a == 5),
            func.count(1).filter(table_a.c.a == 10),
            func.foob(1).filter(table_a.c.a == 10),
        ),
        lambda: (
            and_(table_a.c.a == 5, table_a.c.b == table_b.c.a),
            and_(table_a.c.a == 5, table_a.c.a == table_b.c.a),
            or_(table_a.c.a == 5, table_a.c.b == table_b.c.a),
            ClauseList(table_a.c.a == 5, table_a.c.b == table_b.c.a),
            ClauseList(table_a.c.a == 5, table_a.c.b == table_a.c.a),
        ),
        lambda: (
            case(whens=[(table_a.c.a == 5, 10), (table_a.c.a == 10, 20)]),
            case(whens=[(table_a.c.a == 18, 10), (table_a.c.a == 10, 20)]),
            case(whens=[(table_a.c.a == 5, 10), (table_a.c.b == 10, 20)]),
            case(
                whens=[
                    (table_a.c.a == 5, 10),
                    (table_a.c.b == 10, 20),
                    (table_a.c.a == 9, 12),
                ]
            ),
            case(
                whens=[(table_a.c.a == 5, 10), (table_a.c.a == 10, 20)],
                else_=30,
            ),
            case({"wendy": "W", "jack": "J"}, value=table_a.c.a, else_="E"),
            case({"wendy": "W", "jack": "J"}, value=table_a.c.b, else_="E"),
            case({"wendy_w": "W", "jack": "J"}, value=table_a.c.a, else_="E"),
        ),
        lambda: (
            extract("foo", table_a.c.a),
            extract("foo", table_a.c.b),
            extract("bar", table_a.c.a),
        ),
        lambda: (
            Slice(1, 2, 5),
            Slice(1, 5, 5),
            Slice(1, 5, 10),
            Slice(2, 10, 15),
        ),
        lambda: (
            select([table_a.c.a]),
            select([table_a.c.a, table_a.c.b]),
            select([table_a.c.b, table_a.c.a]),
            select([table_a.c.a]).where(table_a.c.b == 5),
            select([table_a.c.a])
            .where(table_a.c.b == 5)
            .where(table_a.c.a == 10),
            select([table_a.c.a]).where(table_a.c.b == 5).with_for_update(),
            select([table_a.c.a])
            .where(table_a.c.b == 5)
            .with_for_update(nowait=True),
            select([table_a.c.a]).where(table_a.c.b == 5).correlate(table_b),
            select([table_a.c.a])
            .where(table_a.c.b == 5)
            .correlate_except(table_b),
        ),
        lambda: (
            select([table_a.c.a]).cte(),
            select([table_a.c.a]).cte(recursive=True),
            select([table_a.c.a]).cte(name="some_cte", recursive=True),
            select([table_a.c.a]).cte(name="some_cte"),
            select([table_a.c.a]).cte(name="some_cte").alias("other_cte"),
            select([table_a.c.a])
            .cte(name="some_cte")
            .union_all(select([table_a.c.a])),
            select([table_a.c.a])
            .cte(name="some_cte")
            .union_all(select([table_a.c.b])),
            select([table_a.c.a]).lateral(),
            select([table_a.c.a]).lateral(name="bar"),
            table_a.tablesample(func.bernoulli(1)),
            table_a.tablesample(func.bernoulli(1), seed=func.random()),
            table_a.tablesample(func.bernoulli(1), seed=func.other_random()),
            table_a.tablesample(func.hoho(1)),
            table_a.tablesample(func.bernoulli(1), name="bar"),
            table_a.tablesample(
                func.bernoulli(1), name="bar", seed=func.random()
            ),
        ),
        lambda: (
            select([table_a.c.a]),
            select([table_a.c.a]).prefix_with("foo"),
            select([table_a.c.a]).prefix_with("foo", dialect="mysql"),
            select([table_a.c.a]).prefix_with("foo", dialect="postgresql"),
            select([table_a.c.a]).prefix_with("bar"),
            select([table_a.c.a]).suffix_with("bar"),
        ),
        lambda: (
            select([table_a_2.c.a]),
            select([table_a_2_fs.c.a]),
            select([table_a_2_bs.c.a]),
        ),
        lambda: (
            select([table_a.c.a]),
            select([table_a.c.a]).with_hint(None, "some hint"),
            select([table_a.c.a]).with_hint(None, "some other hint"),
            select([table_a.c.a]).with_hint(table_a, "some hint"),
            select([table_a.c.a])
            .with_hint(table_a, "some hint")
            .with_hint(None, "some other hint"),
            select([table_a.c.a]).with_hint(table_a, "some other hint"),
            select([table_a.c.a]).with_hint(
                table_a, "some hint", dialect_name="mysql"
            ),
            select([table_a.c.a]).with_hint(
                table_a, "some hint", dialect_name="postgresql"
            ),
        ),
        lambda: (
            table_a.join(table_b, table_a.c.a == table_b.c.a),
            table_a.join(
                table_b, and_(table_a.c.a == table_b.c.a, table_a.c.b == 1)
            ),
            table_a.outerjoin(table_b, table_a.c.a == table_b.c.a),
        ),
        lambda: (
            table_a.alias("a"),
            table_a.alias("b"),
            table_a.alias(),
            table_b.alias("a"),
            select([table_a.c.a]).alias("a"),
        ),
        lambda: (
            FromGrouping(table_a.alias("a")),
            FromGrouping(table_a.alias("b")),
        ),
        lambda: (
            SelectStatementGrouping(select([table_a])),
            SelectStatementGrouping(select([table_b])),
        ),
        lambda: (
            select([table_a.c.a]).scalar_subquery(),
            select([table_a.c.a]).where(table_a.c.b == 5).scalar_subquery(),
        ),
        lambda: (
            exists().where(table_a.c.a == 5),
            exists().where(table_a.c.b == 5),
        ),
        lambda: (
            union(select([table_a.c.a]), select([table_a.c.b])),
            union(select([table_a.c.a]), select([table_a.c.b])).order_by("a"),
            union_all(select([table_a.c.a]), select([table_a.c.b])),
            union(select([table_a.c.a])),
            union(
                select([table_a.c.a]),
                select([table_a.c.b]).where(table_a.c.b > 5),
            ),
        ),
        lambda: (
            table("a", column("x"), column("y")),
            table("a", column("y"), column("x")),
            table("b", column("x"), column("y")),
            table("a", column("x"), column("y"), column("z")),
            table("a", column("x"), column("y", Integer)),
            table("a", column("q"), column("y", Integer)),
        ),
        lambda: (table_a, table_b),
    ]

    dont_compare_values_fixtures = [
        lambda: (
            # note the in_(...) all have different column names becuase
            # otherwise all IN expressions would compare as equivalent
            column("x").in_(random_choices(range(10), k=3)),
            column("y").in_(
                bindparam(
                    "q",
                    random_choices(range(10), k=random.randint(0, 7)),
                    expanding=True,
                )
            ),
            column("z").in_(random_choices(range(10), k=random.randint(0, 7))),
            column("x") == random.randint(1, 10),
        )
    ]

    def _complex_fixtures():
        def one():
            a1 = table_a.alias()
            a2 = table_b_like_a.alias()

            stmt = (
                select([table_a.c.a, a1.c.b, a2.c.b])
                .where(table_a.c.b == a1.c.b)
                .where(a1.c.b == a2.c.b)
                .where(a1.c.a == 5)
            )

            return stmt

        def one_diff():
            a1 = table_b_like_a.alias()
            a2 = table_a.alias()

            stmt = (
                select([table_a.c.a, a1.c.b, a2.c.b])
                .where(table_a.c.b == a1.c.b)
                .where(a1.c.b == a2.c.b)
                .where(a1.c.a == 5)
            )

            return stmt

        def two():
            inner = one().subquery()

            stmt = select([table_b.c.a, inner.c.a, inner.c.b]).select_from(
                table_b.join(inner, table_b.c.b == inner.c.b)
            )

            return stmt

        def three():

            a1 = table_a.alias()
            a2 = table_a.alias()
            ex = exists().where(table_b.c.b == a1.c.a)

            stmt = (
                select([a1.c.a, a2.c.a])
                .select_from(a1.join(a2, a1.c.b == a2.c.b))
                .where(ex)
            )
            return stmt

        return [one(), one_diff(), two(), three()]

    fixtures.append(_complex_fixtures)
예제 #24
0
class CompareAndCopyTest(fixtures.TestBase):

    # lambdas which return a tuple of ColumnElement objects.
    # must return at least two objects that should compare differently.
    # to test more varieties of "difference" additional objects can be added.
    fixtures = [
        lambda: (
            column("q"),
            column("x"),
            column("q", Integer),
            column("q", String),
        ),
        lambda: (~column("q", Boolean), ~column("p", Boolean)),
        lambda: (
            table_a.c.a.label("foo"),
            table_a.c.a.label("bar"),
            table_a.c.b.label("foo"),
        ),
        lambda: (
            _label_reference(table_a.c.a.desc()),
            _label_reference(table_a.c.a.asc()),
        ),
        lambda: (_textual_label_reference("a"), _textual_label_reference("b")),
        lambda: (
            text("select a, b from table").columns(a=Integer, b=String),
            text("select a, b, c from table").columns(
                a=Integer, b=String, c=Integer),
        ),
        lambda: (
            column("q") == column("x"),
            column("q") == column("y"),
            column("z") == column("x"),
        ),
        lambda: (
            cast(column("q"), Integer),
            cast(column("q"), Float),
            cast(column("p"), Integer),
        ),
        lambda: (
            bindparam("x"),
            bindparam("y"),
            bindparam("x", type_=Integer),
            bindparam("x", type_=String),
            bindparam(None),
        ),
        lambda: (_OffsetLimitParam("x"), _OffsetLimitParam("y")),
        lambda: (func.foo(), func.foo(5), func.bar()),
        lambda: (func.current_date(), func.current_time()),
        lambda: (
            func.next_value(Sequence("q")),
            func.next_value(Sequence("p")),
        ),
        lambda: (True_(), False_()),
        lambda: (Null(), ),
        lambda: (ReturnTypeFromArgs("foo"), ReturnTypeFromArgs(5)),
        lambda: (FunctionElement(5), FunctionElement(5, 6)),
        lambda: (func.count(), func.not_count()),
        lambda: (func.char_length("abc"), func.char_length("def")),
        lambda: (GenericFunction("a", "b"), GenericFunction("a")),
        lambda: (CollationClause("foobar"), CollationClause("batbar")),
        lambda: (
            type_coerce(column("q", Integer), String),
            type_coerce(column("q", Integer), Float),
            type_coerce(column("z", Integer), Float),
        ),
        lambda: (table_a.c.a, table_b.c.a),
        lambda: (tuple_([1, 2]), tuple_([3, 4])),
        lambda: (func.array_agg([1, 2]), func.array_agg([3, 4])),
        lambda: (
            func.percentile_cont(0.5).within_group(table_a.c.a),
            func.percentile_cont(0.5).within_group(table_a.c.b),
            func.percentile_cont(0.5).within_group(table_a.c.a, table_a.c.b),
            func.percentile_cont(0.5).within_group(table_a.c.a, table_a.c.b,
                                                   column("q")),
        ),
        lambda: (
            func.is_equal("a", "b").as_comparison(1, 2),
            func.is_equal("a", "c").as_comparison(1, 2),
            func.is_equal("a", "b").as_comparison(2, 1),
            func.is_equal("a", "b", "c").as_comparison(1, 2),
            func.foobar("a", "b").as_comparison(1, 2),
        ),
        lambda: (
            func.row_number().over(order_by=table_a.c.a),
            func.row_number().over(order_by=table_a.c.a, range_=(0, 10)),
            func.row_number().over(order_by=table_a.c.a, range_=(None, 10)),
            func.row_number().over(order_by=table_a.c.a, rows=(None, 20)),
            func.row_number().over(order_by=table_a.c.b),
            func.row_number().over(order_by=table_a.c.a,
                                   partition_by=table_a.c.b),
        ),
        lambda: (
            func.count(1).filter(table_a.c.a == 5),
            func.count(1).filter(table_a.c.a == 10),
            func.foob(1).filter(table_a.c.a == 10),
        ),
        lambda: (
            and_(table_a.c.a == 5, table_a.c.b == table_b.c.a),
            and_(table_a.c.a == 5, table_a.c.a == table_b.c.a),
            or_(table_a.c.a == 5, table_a.c.b == table_b.c.a),
            ClauseList(table_a.c.a == 5, table_a.c.b == table_b.c.a),
            ClauseList(table_a.c.a == 5, table_a.c.b == table_a.c.a),
        ),
        lambda: (
            case(whens=[(table_a.c.a == 5, 10), (table_a.c.a == 10, 20)]),
            case(whens=[(table_a.c.a == 18, 10), (table_a.c.a == 10, 20)]),
            case(whens=[(table_a.c.a == 5, 10), (table_a.c.b == 10, 20)]),
            case(whens=[
                (table_a.c.a == 5, 10),
                (table_a.c.b == 10, 20),
                (table_a.c.a == 9, 12),
            ]),
            case(
                whens=[(table_a.c.a == 5, 10), (table_a.c.a == 10, 20)],
                else_=30,
            ),
            case({
                "wendy": "W",
                "jack": "J"
            }, value=table_a.c.a, else_="E"),
            case({
                "wendy": "W",
                "jack": "J"
            }, value=table_a.c.b, else_="E"),
            case({
                "wendy_w": "W",
                "jack": "J"
            }, value=table_a.c.a, else_="E"),
        ),
        lambda: (
            extract("foo", table_a.c.a),
            extract("foo", table_a.c.b),
            extract("bar", table_a.c.a),
        ),
        lambda: (
            Slice(1, 2, 5),
            Slice(1, 5, 5),
            Slice(1, 5, 10),
            Slice(2, 10, 15),
        ),
        lambda: (
            select([table_a.c.a]),
            select([table_a.c.a, table_a.c.b]),
            select([table_a.c.b, table_a.c.a]),
            select([table_a.c.a]).where(table_a.c.b == 5),
            select([table_a.c.a]).where(table_a.c.b == 5).where(table_a.c.a ==
                                                                10),
            select([table_a.c.a]).where(table_a.c.b == 5).with_for_update(),
            select([table_a.c.a]).where(table_a.c.b == 5).with_for_update(
                nowait=True),
            select([table_a.c.a]).where(table_a.c.b == 5).correlate(table_b),
            select([table_a.c.a]).where(table_a.c.b == 5).correlate_except(
                table_b),
        ),
        lambda: (
            table_a.join(table_b, table_a.c.a == table_b.c.a),
            table_a.join(table_b,
                         and_(table_a.c.a == table_b.c.a, table_a.c.b == 1)),
            table_a.outerjoin(table_b, table_a.c.a == table_b.c.a),
        ),
        lambda: (
            table_a.alias("a"),
            table_a.alias("b"),
            table_a.alias(),
            table_b.alias("a"),
            select([table_a.c.a]).alias("a"),
        ),
        lambda: (
            FromGrouping(table_a.alias("a")),
            FromGrouping(table_a.alias("b")),
        ),
        lambda: (
            select([table_a.c.a]).as_scalar(),
            select([table_a.c.a]).where(table_a.c.b == 5).as_scalar(),
        ),
        lambda: (
            exists().where(table_a.c.a == 5),
            exists().where(table_a.c.b == 5),
        ),
        lambda: (
            union(select([table_a.c.a]), select([table_a.c.b])),
            union(select([table_a.c.a]), select([table_a.c.b])).order_by("a"),
            union_all(select([table_a.c.a]), select([table_a.c.b])),
            union(select([table_a.c.a])),
            union(
                select([table_a.c.a]),
                select([table_a.c.b]).where(table_a.c.b > 5),
            ),
        ),
        lambda: (
            table("a", column("x"), column("y")),
            table("a", column("y"), column("x")),
            table("b", column("x"), column("y")),
            table("a", column("x"), column("y"), column("z")),
            table("a", column("x"), column("y", Integer)),
            table("a", column("q"), column("y", Integer)),
        ),
        lambda: (
            Table("a", MetaData(), Column("q", Integer), Column("b", String)),
            Table("b", MetaData(), Column("q", Integer), Column("b", String)),
        ),
    ]

    @classmethod
    def setup_class(cls):
        # TODO: we need to get dialects here somehow, perhaps in test_suite?
        [
            importlib.import_module("sqlalchemy.dialects.%s" % d)
            for d in dialects.__all__ if not d.startswith("_")
        ]

    def test_all_present(self):
        need = set(
            cls for cls in class_hierarchy(ClauseElement)
            if issubclass(cls, (ColumnElement, Selectable)) and "__init__" in
            cls.__dict__ and not issubclass(cls, (Annotated))
            and "orm" not in cls.__module__ and "crud" not in cls.__module__
            and "dialects" not in cls.__module__  # TODO: dialects?
        ).difference({ColumnElement, UnaryExpression})
        for fixture in self.fixtures:
            case_a = fixture()
            for elem in case_a:
                for mro in type(elem).__mro__:
                    need.discard(mro)

        is_false(bool(need), "%d Remaining classes: %r" % (len(need), need))

    def test_compare(self):
        for fixture in self.fixtures:
            case_a = fixture()
            case_b = fixture()

            for a, b in itertools.combinations_with_replacement(
                    range(len(case_a)), 2):
                if a == b:
                    is_true(
                        case_a[a].compare(case_b[b],
                                          arbitrary_expression=True),
                        "%r != %r" % (case_a[a], case_b[b]),
                    )

                else:
                    is_false(
                        case_a[a].compare(case_b[b],
                                          arbitrary_expression=True),
                        "%r == %r" % (case_a[a], case_b[b]),
                    )

    def test_cache_key(self):
        def assert_params_append(assert_params):
            def append(param):
                if param._value_required_for_cache:
                    assert_params.append(param)
                else:
                    is_(param.value, None)

            return append

        for fixture in self.fixtures:
            case_a = fixture()
            case_b = fixture()

            for a, b in itertools.combinations_with_replacement(
                    range(len(case_a)), 2):

                assert_a_params = []
                assert_b_params = []

                visitors.traverse_depthfirst(
                    case_a[a],
                    {},
                    {"bindparam": assert_params_append(assert_a_params)},
                )
                visitors.traverse_depthfirst(
                    case_b[b],
                    {},
                    {"bindparam": assert_params_append(assert_b_params)},
                )
                if assert_a_params:
                    assert_raises_message(
                        NotImplementedError,
                        "bindparams collection argument required ",
                        case_a[a]._cache_key,
                    )
                if assert_b_params:
                    assert_raises_message(
                        NotImplementedError,
                        "bindparams collection argument required ",
                        case_b[b]._cache_key,
                    )

                if not assert_a_params and not assert_b_params:
                    if a == b:
                        eq_(case_a[a]._cache_key(), case_b[b]._cache_key())
                    else:
                        ne_(case_a[a]._cache_key(), case_b[b]._cache_key())

    def test_cache_key_gather_bindparams(self):
        for fixture in self.fixtures:
            case_a = fixture()
            case_b = fixture()

            # in the "bindparams" case, the cache keys for bound parameters
            # with only different values will be the same, but the params
            # themselves are gathered into a collection.
            for a, b in itertools.combinations_with_replacement(
                    range(len(case_a)), 2):
                a_params = {"bindparams": []}
                b_params = {"bindparams": []}
                if a == b:
                    a_key = case_a[a]._cache_key(**a_params)
                    b_key = case_b[b]._cache_key(**b_params)
                    eq_(a_key, b_key)

                    if a_params["bindparams"]:
                        for a_param, b_param in zip(a_params["bindparams"],
                                                    b_params["bindparams"]):
                            assert a_param.compare(b_param)
                else:
                    a_key = case_a[a]._cache_key(**a_params)
                    b_key = case_b[b]._cache_key(**b_params)

                    if a_key == b_key:
                        for a_param, b_param in zip(a_params["bindparams"],
                                                    b_params["bindparams"]):
                            if not a_param.compare(b_param):
                                break
                        else:
                            assert False, "Bound parameters are all the same"
                    else:
                        ne_(a_key, b_key)

                assert_a_params = []
                assert_b_params = []
                visitors.traverse_depthfirst(
                    case_a[a], {}, {"bindparam": assert_a_params.append})
                visitors.traverse_depthfirst(
                    case_b[b], {}, {"bindparam": assert_b_params.append})

                # note we're asserting the order of the params as well as
                # if there are dupes or not.  ordering has to be deterministic
                # and matches what a traversal would provide.
                eq_(a_params["bindparams"], assert_a_params)
                eq_(b_params["bindparams"], assert_b_params)

    def test_compare_col_identity(self):
        stmt1 = (select([table_a.c.a, table_b.c.b
                         ]).where(table_a.c.a == table_b.c.b).alias())
        stmt1_c = (select([table_a.c.a, table_b.c.b
                           ]).where(table_a.c.a == table_b.c.b).alias())

        stmt2 = union(select([table_a]), select([table_b]))

        stmt3 = select([table_b])

        equivalents = {table_a.c.a: [table_b.c.a]}

        is_false(
            stmt1.compare(stmt2, use_proxies=True, equivalents=equivalents))

        is_true(
            stmt1.compare(stmt1_c, use_proxies=True, equivalents=equivalents))
        is_true((table_a.c.a == table_b.c.b).compare(
            stmt1.c.a == stmt1.c.b,
            use_proxies=True,
            equivalents=equivalents,
        ))

    def test_copy_internals(self):
        for fixture in self.fixtures:
            case_a = fixture()
            case_b = fixture()

            assert case_a[0].compare(case_b[0])

            clone = case_a[0]._clone()
            clone._copy_internals()

            assert clone.compare(case_b[0])

            stack = [clone]
            seen = {clone}
            found_elements = False
            while stack:
                obj = stack.pop(0)

                items = [
                    subelem for key, elem in clone.__dict__.items()
                    if key != "_is_clone_of" and elem is not None
                    for subelem in util.to_list(elem)
                    if (isinstance(subelem, (ColumnElement, ClauseList))
                        and subelem not in seen and not isinstance(
                            subelem, Immutable) and subelem is not case_a[0])
                ]
                stack.extend(items)
                seen.update(items)

                if obj is not clone:
                    found_elements = True
                    # ensure the element will not compare as true
                    obj.compare = lambda other, **kw: False
                    obj.__visit_name__ = "dont_match"

            if found_elements:
                assert not clone.compare(case_b[0])
            assert case_a[0].compare(case_b[0])
예제 #25
0
def query_work_day_stats(
    company_id,
    start_date=None,
    end_date=None,
    first=None,
    after=None,
    tzname="Europe/Paris",
):
    tz = gettz(tzname)
    if after:
        max_time, user_id_ = parse_datetime_plus_id_cursor(after)
        max_date = max_time.date()
        end_date = min(max_date, end_date) if end_date else max_date

    query = (Activity.query.join(Mission).join(
        Expenditure,
        and_(
            Activity.user_id == Expenditure.user_id,
            Activity.mission_id == Expenditure.mission_id,
        ),
        isouter=True,
    ).with_entities(
        Activity.id,
        Activity.user_id,
        Activity.mission_id,
        Mission.name,
        Activity.start_time,
        Activity.end_time,
        Activity.type,
        Expenditure.id.label("expenditure_id"),
        Expenditure.type.label("expenditure_type"),
        func.generate_series(
            func.date_trunc(
                "day",
                func.timezone(
                    tzname,
                    func.timezone("UTC", Activity.start_time),
                ),
            ),
            func.timezone(
                tzname,
                func.coalesce(
                    func.timezone("UTC", Activity.end_time),
                    func.now(),
                ),
            ),
            "1 day",
        ).label("day"),
    ).filter(
        Mission.company_id == company_id,
        ~Activity.is_dismissed,
        Activity.start_time != Activity.end_time,
    ))

    query = _apply_time_range_filters(
        query,
        to_datetime(start_date, tz_for_date=tz),
        to_datetime(end_date,
                    tz_for_date=tz,
                    convert_dates_to_end_of_day_times=True),
    )

    has_next_page = False
    if first:
        activity_first = max(first * 5, 200)
        query = query.order_by(desc("day"), desc(
            Activity.user_id)).limit(activity_first + 1)
        has_next_page = query.count() > activity_first

    query = query.subquery()

    query = (db.session.query(query).group_by(
        query.c.user_id, query.c.day, query.c.mission_id,
        query.c.name).with_entities(
            query.c.user_id.label("user_id"),
            query.c.day,
            func.timezone("UTC",
                          func.timezone(tzname,
                                        query.c.day)).label("utc_day_start"),
            query.c.mission_id.label("mission_id"),
            query.c.name.label("mission_name"),
            func.min(
                func.greatest(
                    query.c.start_time,
                    func.timezone("UTC", func.timezone(tzname, query.c.day)),
                )).label("start_time"),
            func.max(
                func.least(
                    func.timezone(
                        "UTC",
                        func.timezone(
                            tzname,
                            query.c.day + func.cast("1 day", Interval)),
                    ),
                    func.coalesce(query.c.end_time, func.now()),
                )).label("end_time"),
            func.bool_or(
                and_(
                    query.c.end_time.is_(None),
                    query.c.day == func.current_date(),
                )).label("is_running"),
            *[
                func.sum(
                    case(
                        [(
                            query.c.type == a_type.value,
                            extract(
                                "epoch",
                                func.least(
                                    func.timezone(
                                        "UTC",
                                        func.timezone(
                                            tzname,
                                            query.c.day +
                                            func.cast("1 day", Interval),
                                        ),
                                    ),
                                    func.coalesce(query.c.end_time,
                                                  func.now()),
                                ) - func.greatest(
                                    query.c.start_time,
                                    func.timezone(
                                        "UTC",
                                        func.timezone(tzname, query.c.day),
                                    ),
                                ),
                            ),
                        )],
                        else_=0,
                    )).label(f"{a_type.value}_duration")
                for a_type in ActivityType
            ],
            func.greatest(func.count(distinct(query.c.expenditure_id)),
                          1).label("n_exp_dups"),
            func.count(distinct(query.c.id)).label("n_act_dups"),
            *[
                func.sum(
                    case(
                        [(query.c.expenditure_type == e_type.value, 1)],
                        else_=0,
                    )).label(f"n_{e_type.value}_expenditures")
                for e_type in ExpenditureType
            ],
        ).subquery())

    query = (db.session.query(query).group_by(
        query.c.user_id, query.c.day).with_entities(
            query.c.user_id.label("user_id"),
            query.c.day,
            func.array_agg(distinct(
                query.c.mission_name)).label("mission_names"),
            func.min(query.c.start_time).label("start_time"),
            func.max(query.c.end_time).label("end_time"),
            func.bool_or(query.c.is_running).label("is_running"),
            *[
                func.sum(
                    getattr(query.c, f"{a_type.value}_duration") /
                    query.c.n_exp_dups).cast(Integer).label(
                        f"{a_type.value}_duration") for a_type in ActivityType
            ],
            *[
                func.sum(
                    getattr(query.c, f"n_{e_type.value}_expenditures") /
                    query.c.n_act_dups).cast(Integer).label(
                        f"n_{e_type.value}_expenditures")
                for e_type in ExpenditureType
            ],
        ).order_by(desc("day"), desc("user_id")).subquery())

    query = db.session.query(query).with_entities(
        *query.c,
        extract("epoch", query.c.end_time -
                query.c.start_time).label("service_duration"),
        reduce(
            lambda a, b: a + b,
            [
                getattr(query.c, f"{a_type.value}_duration")
                for a_type in ActivityType
            ],
        ).label("total_work_duration"),
    )

    results = query.all()
    if after:
        results = [
            r for r in results if r.day.date() < max_date or (
                r.day.date() == max_date and r.user_id < user_id_)
        ]

    if first:
        if has_next_page:
            # The last work day may be incomplete because we didn't fetch all the activities => remove it
            results = results[:-1]
        if len(results) > first:
            results = results[:first]
            has_next_page = True

    return results, has_next_page
예제 #26
0
    def analytics_query(self, start, end, locations=None, library=None):
        """Build a database query that fetches rows of analytics data.

        This method uses low-level SQLAlchemy code to do all
        calculations and data conversations in the database. It's
        modeled after Work.to_search_documents, which generates a
        large JSON document entirely in the database.

        :return: An iterator of results, each of which can be written
            directly to a CSV file.
        """

        clauses = [
            CirculationEvent.start >= start,
            CirculationEvent.start < end,
        ]

        if locations:
            event_types = [
                CirculationEvent.CM_CHECKOUT,
                CirculationEvent.CM_FULFILL,
                CirculationEvent.OPEN_BOOK,
            ]
            locations = locations.strip().split(",")

            clauses += [
                CirculationEvent.type.in_(event_types),
                CirculationEvent.location.in_(locations),
            ]

        if library:
            clauses += [CirculationEvent.library == library]

        # Build the primary query. This is a query against the
        # CirculationEvent table and a few other tables joined against
        # it. This makes up the bulk of the data.
        events_alias = (
            select(
                [
                    func.to_char(CirculationEvent.start, "YYYY-MM-DD HH24:MI:SS").label(
                        "start"
                    ),
                    CirculationEvent.type.label("event_type"),
                    Identifier.identifier,
                    Identifier.type.label("identifier_type"),
                    Edition.sort_title,
                    Edition.sort_author,
                    case(
                        [(Work.fiction == True, literal_column("'fiction'"))],
                        else_=literal_column("'nonfiction'"),
                    ).label("fiction"),
                    Work.id.label("work_id"),
                    Work.audience,
                    Edition.publisher,
                    Edition.imprint,
                    Edition.language,
                    CirculationEvent.location,
                ],
            )
            .select_from(
                join(
                    CirculationEvent,
                    LicensePool,
                    CirculationEvent.license_pool_id == LicensePool.id,
                )
                .join(Identifier, LicensePool.identifier_id == Identifier.id)
                .join(Work, Work.id == LicensePool.work_id)
                .join(Edition, Work.presentation_edition_id == Edition.id)
            )
            .where(and_(*clauses))
            .order_by(CirculationEvent.start.asc())
            .alias("events_alias")
        )

        # A subquery can hook into the main query by referencing its
        # 'work_id' field in its WHERE clause.
        work_id_column = literal_column(
            events_alias.name + "." + events_alias.c.work_id.name
        )

        # This subquery gets the names of a Work's genres as a single
        # comma-separated string.
        #

        # This Alias selects some number of rows, each containing one
        # string column (Genre.name). Genres with higher affinities with
        # this work go first.
        genres_alias = (
            select([Genre.name.label("genre_name")])
            .select_from(join(WorkGenre, Genre, WorkGenre.genre_id == Genre.id))
            .where(WorkGenre.work_id == work_id_column)
            .order_by(WorkGenre.affinity.desc(), Genre.name)
            .alias("genres_subquery")
        )

        # Use array_agg() to consolidate the rows into one row -- this
        # gives us a single value, an array of strings, for each
        # Work. Then use array_to_string to convert the array into a
        # single comma-separated string.
        genres = select(
            [func.array_to_string(func.array_agg(genres_alias.c.genre_name), ",")]
        ).select_from(genres_alias)

        # This subquery gets the a Work's target age as a single string.
        #

        # This Alias selects two fields: the lower and upper bounds of
        # the Work's target age. This reuses code originally written
        # for Work.to_search_documents().
        target_age = Work.target_age_query(work_id_column).alias("target_age_subquery")

        # Concatenate the lower and upper bounds with a dash in the
        # middle. If both lower and upper bound are empty, just give
        # the empty string. This simulates the behavior of
        # Work.target_age_string.
        target_age_string = select(
            [
                case(
                    [
                        (
                            or_(target_age.c.lower != None, target_age.c.upper != None),
                            func.concat(target_age.c.lower, "-", target_age.c.upper),
                        )
                    ],
                    else_=literal_column("''"),
                )
            ]
        ).select_from(target_age)

        # Build the main query out of the subqueries.
        events = events_alias.c
        query = select(
            [
                events.start,
                events.event_type,
                events.identifier,
                events.identifier_type,
                events.sort_title,
                events.sort_author,
                events.fiction,
                events.audience,
                events.publisher,
                events.imprint,
                events.language,
                target_age_string.label("target_age"),
                genres.label("genres"),
                events.location,
            ]
        ).select_from(events_alias)
        return query
예제 #27
0
def prediction():
    data = json.loads(request.data.decode("utf-8"))
    users = data["userIds"]

    # Getting previous attendance rates for each attendee at the upcoming event
    attendanceHistoryForUsersAtUpcomingEvent = (
        Attendance.query
        .filter(
            Attendance.meetup_user_id.in_(users)
        )
        .with_entities(
            Attendance.meetup_user_id,
            func.count(case([((Attendance.did_attend == True), 1)],
                            else_=literal_column("NULL"))).label('count_did_attend'),
            func.count(Attendance.did_rsvp).label('count_did_rsvp'))
        .group_by(Attendance.meetup_user_id)
        .all()
    )

    # For those who this is the first recorded attendance, (they may have attended before, just not up to the point where we started recording)
    # let's just use the average attendance rate for those who've attended once
    attendanceForThoseWhoAttdendedOneMeetup = (
        Attendance.query
        .with_entities(
            Attendance.meetup_user_id,
            func.count(case([(Attendance.did_attend == True, Attendance.did_attend)],
                            else_=literal_column("NULL"))).label('count_did_attend'),
            func.count(Attendance.did_rsvp).label('count_did_rsvp')
        )
        .filter(
            # Filter out those attendees that attended, but did not RSVP (there's no way for us to predict them)
            Attendance.meetup_user_id != None
        )
        .group_by(Attendance.meetup_user_id).having(func.count(Attendance.did_rsvp) == 1).all()
    )

    singleAttendanceCountAndRSVPs = getAttendanceRateForThoseAttendingSingleMeetup(
        attendanceForThoseWhoAttdendedOneMeetup)
    singleAttendanceRate = (singleAttendanceCountAndRSVPs["attended"] /
                            singleAttendanceCountAndRSVPs["rsvped"])

    # Linear Regression - Get important data for each event
    events = (
        Attendance.query
        .with_entities(
            Attendance.event_id,
            Events.event_name,
            Events.event_date,
            func.count(case([(Attendance.did_attend, 1)],
                            else_=literal_column("NULL"))).label('count_did_attend'),

            func.count(case([(Attendance.did_rsvp, 1)],
                            else_=literal_column("NULL"))).label('count_did_rsvp'),
            # Right now this includes those without user_ids such as (NONE)
            func.array_agg(Attendance.meetup_user_id).label('meetup_user_ids')
        )
        .join(Events)
        .filter(
            # Filter out those attendees that attended, but did not RSVP (there's no way for us to predict them)
            Attendance.meetup_user_id != None
        )
        .group_by(Attendance.event_id, Events.event_name, Events.event_date)
        # Order for sake of pandas and being able to use training data from middle
        .order_by(Events.event_date.asc())
        .all()
    )

    # Loop through events to get the previous attendance rate prior to each event
    events_with_attendees = []
    for event in events:
        (event_id, event_name, event_date,
         attendees_who_rsvped_count, rsvp_count, attendeeIds) = event

        # Getting previous attendance rates for each attendee at the event
        attendanceHistoryForUsersAtEvent = (
            Attendance.query
            .filter(
                Attendance.meetup_user_id.in_(attendeeIds),
                # We only care about events that were before the date of the event (not the overall history of the attendee up to today)
                Events.event_date < event_date
            )
            .with_entities(
                Attendance.meetup_user_id,
                func.count(case([((Attendance.did_attend == True), 1)],
                                else_=literal_column("NULL"))).label('count_did_attend'),
                func.count(Attendance.did_rsvp).label('count_did_rsvp'))
            .group_by(Attendance.meetup_user_id)
            .join(Events)
            .all()
        )

        count_previous_attendees = len(attendanceHistoryForUsersAtEvent)
        events_with_attendees.append({
            "event_id": event_id,
            "event_name": event_name,
            "event_date": event_date,
            "attendees_who_rsvped_count": attendees_who_rsvped_count,
            "rsvp_count": rsvp_count,
            "previous_attendance_rates_sum": getPredictedAttendeesOfMembers(attendanceHistoryForUsersAtEvent),
            "count_previous_attendees": count_previous_attendees,
            "old_attendees": count_previous_attendees,
            "new_attendees": (rsvp_count - count_previous_attendees) * singleAttendanceRate
        })

    rsvps = len(users)
    oldAttendees = len(attendanceHistoryForUsersAtEvent)
    new_event = {
        "rsvp_count": rsvps,
        "old_attendees": oldAttendees,
        "previous_attendance_rates_sum": getPredictedAttendeesOfMembers(attendanceHistoryForUsersAtEvent),
        "new_attendees": (rsvps - oldAttendees) * singleAttendanceRate
    }

    regressionPrediction = getLinearRegressionPrediction(
        regressionInput=events_with_attendees, newEvent=[new_event])

    print(regressionPrediction)

    # format data we have for front end consumption
    attendanceHistory = []
    for attendee in attendanceHistoryForUsersAtUpcomingEvent:
        (meetupUserId, eventsAttendedCount, eventsRSVPedCount) = attendee
        attendanceHistory.append({
            "meetupUserId": meetupUserId,
            "attended": eventsAttendedCount,
            "rsvped": eventsRSVPedCount
        })

    return jsonify(data={
        "memberAttendanceHistory": attendanceHistory,
        "singleAttendanceCountAndRSVPs": singleAttendanceCountAndRSVPs,
        "regressionPrediction": regressionPrediction
    })
예제 #28
0
def array_agg_row(*arg):
    return func.array_agg(func.row(*arg), type_=ArrayOfRecord(arg))
예제 #29
0
 def get_query_column(self, entity):
     return func.array_agg(self.resolve_entity_column(entity))
예제 #30
0
    def _process_events(klass,
                        events,
                        page,
                        query=None,
                        cities=None,
                        user=None,
                        flags=None,
                        selected_tags=None,
                        selected_categories=None,
                        future_only=None):
        if future_only:
            events_with_counts = events_with_counts.filter(
                or_(Event.start_time >= datetime.datetime.now(),
                    Event.end_time >= datetime.datetime.now()))

        events = events.filter(Event.status != Event.STATUS_CLOSED_PERM)

        events, selected_tags = klass._filter_events(
            events,
            query=query,
            categories=selected_categories,
            tags=selected_tags,
            flags=flags)

        event_cities = klass._cities_for_events(events)
        if cities:
            events = events.filter(Event.city.in_(cities))
            for city in event_cities:
                city['selected'] = city['chip_name'] in cities

        tags, categories = klass._tags_for_events(
            events=events,
            selected_categories=selected_categories,
            selected_tags=selected_tags)

        event_user_ids = None
        if user:
            event_ids = {e[0].event_id for e in events if e[1]}

            following_user_ids = alias(
                db_session.query(func.distinct(Follow.follow_id)).filter(
                    and_(UserEvent.event_id.in_(event_ids),
                         UserEvent.user_id == Follow.follow_id,
                         Follow.user_id == user.user_id,
                         Follow.follow_id != user.user_id,
                         Follow.active == True)), "following_user_ids")

            event_users = {
                str(u.user_id): {
                    'user_id': u.user_id,
                    'username': u.username,
                    'image_url': u.image_url
                }
                for u in User.query.filter(User.user_id.in_(
                    following_user_ids))
            }

            events_with_following_counts = db_session.query(
                UserEvent.event_id,
                func.array_agg(func.distinct(
                    User.user_id)).label('user_ids')).filter(
                        and_(Follow.user_id == user.user_id,
                             UserEvent.user_id == Follow.follow_id,
                             UserEvent.event_id.in_(event_ids),
                             Follow.follow_id != user.user_id)).group_by(
                                 UserEvent.event_id)
            event_user_ids = {
                row[0]: set(str(follower_id) for follower_id in row[1])
                for row in events_with_following_counts
            }

        events = klass._order_events(events)
        events = events.limit(klass.PAGE_SIZE).offset(
            (page - 1) * klass.PAGE_SIZE)

        results = []
        for event, user_count in events:
            event.card_user_count = user_count
            if event_user_ids and event.event_id in event_user_ids:
                event.card_event_users = [
                    event_users[x] for x in event_user_ids[event.event_id]
                    if x in event_users
                ]
            results.append(event)

        return results, categories, tags, event_cities, events
예제 #31
0
    # query get average score of ratings for each year
    averageBestNewMusicScores = (session.query(
        func.avg(Review.score),
        Review.pub_year).filter(Review.best_new_music == 1).group_by(
            Review.pub_year).all())

    # Join query for Genre and Review to see reviews by genre
    averageReviewsScoreByGenre = (session.query(
        Genre.genre, func.avg(Review.score)).filter(
            Genre.genre != None, Review.best_new_music == 1).join(
                Review,
                Genre.reviewid == Review.reviewid).group_by(Genre.genre).all())

    # Lists of scores by genre
    allScoresByGenre = (session.query(Genre.genre, func.array_agg(
        Review.score)).filter(Genre.genre != None).join(
            Review,
            Genre.reviewid == Review.reviewid).group_by(Genre.genre).all())

    # Graphing score averages
    fig1, (ax1, ax2) = plt.subplots(1, 2)
    fig2, (ax3, ax4) = plt.subplots(1, 2)

    averageScores = sorted(averageScores, key=lambda x: (-x[1], x[0]))
    scores = [i[0] for i in averageScores]
    years = [i[1] for i in averageScores]
    ax1.plot(years, scores)
    ax1.set_xticks(years)
    ax1.set_title("Music Score Average by Year")
    ax1.set_ylabel("Average Score")
    ax1.set_xlabel("Year")
예제 #32
0
def get_taxa_list(id_area):
    """

    :param type:
    :return:
    """
    try:
        reproduction_id = (
            (
                DB.session.query(TNomenclatures.id_nomenclature)
                .join(
                    BibNomenclaturesTypes,
                    TNomenclatures.id_type == BibNomenclaturesTypes.id_type,
                )
                .filter(
                    and_(
                        BibNomenclaturesTypes.mnemonique.like("STATUT_BIO"),
                        TNomenclatures.cd_nomenclature.like("3"),
                    )
                )
            )
            .first()
            .id_nomenclature
        )

        print("reproduction_id", reproduction_id)
        query_territory = (
            DB.session.query(
                Taxref.cd_ref.label("id"),
                LAreas.id_area,
                LAreas.area_code,
                Taxref.cd_ref,
                func.split_part(Taxref.nom_vern, ",", 1).label("nom_vern"),
                Taxref.nom_valide,
                Taxref.group1_inpn,
                Taxref.group2_inpn,
                func.count(distinct(Synthese.id_synthese)).label("count_occtax"),
                func.count(distinct(Synthese.observers)).label("count_observer"),
                func.count(distinct(Synthese.date_min)).label("count_date"),
                func.count(distinct(Synthese.id_dataset)).label("count_dataset"),
                func.max(distinct(func.extract("year", Synthese.date_min))).label(
                    "last_year"
                ),
                func.array_agg(
                    aggregate_order_by(
                        distinct(func.extract("year", Synthese.date_min)),
                        func.extract("year", Synthese.date_min).desc(),
                    )
                ).label("list_years"),
                func.array_agg(
                    aggregate_order_by(
                        distinct(func.extract("month", Synthese.date_min)),
                        func.extract("month", Synthese.date_min).asc(),
                    )
                ).label("list_months"),
                func.bool_or(
                    Synthese.id_nomenclature_bio_status == reproduction_id
                ).label("reproduction"),
                func.max(distinct(func.extract("year", Synthese.date_min)))
                .filter(Synthese.id_nomenclature_bio_status == reproduction_id)
                .label("last_year_reproduction"),
                func.array_agg(distinct(Synthese.id_nomenclature_bio_status)).label(
                    "bio_status_id"
                ),
                case(
                    [(func.count(TaxrefProtectionEspeces.cd_nom) > 0, True)],
                    else_=False,
                ).label("protection"),
            )
            .select_from(CorAreaSynthese)
            .join(Synthese, Synthese.id_synthese == CorAreaSynthese.id_synthese)
            .join(Taxref, Synthese.cd_nom == Taxref.cd_nom)
            .join(LAreas, LAreas.id_area == CorAreaSynthese.id_area)
            .outerjoin(TaxrefLR, TaxrefLR.cd_nom == Taxref.cd_ref)
            .outerjoin(
                TaxrefProtectionEspeces, TaxrefProtectionEspeces.cd_nom == Taxref.cd_nom
            )
            .filter(LAreas.id_area == id_area)
            .group_by(
                LAreas.id_area,
                LAreas.area_code,
                Taxref.cd_ref,
                Taxref.nom_vern,
                Taxref.nom_valide,
                Taxref.group1_inpn,
                Taxref.group2_inpn,
            )
            .order_by(
                func.count(distinct(Synthese.id_synthese)).desc(),
                Taxref.group1_inpn,
                Taxref.group2_inpn,
                Taxref.nom_valide,
            )
        )
        print("query_territory", query_territory)
        result = query_territory.all()
        count = len(result)
        data = []
        for r in result:
            dict = r._asdict()
            bio_status = []
            for s in r.bio_status_id:
                bio_status.append(get_nomenclature(s))
                dict["bio_status"] = bio_status
            redlist = get_redlist_status(r.cd_ref)
            dict["redlist"] = redlist
            data.append(dict)

        redlistless_data = list(filter(redlist_list_is_null, data))
        print("redlistless_data", len(redlistless_data))
        redlist_data = list(filter(redlist_is_not_null, data))
        print("redlist_data", len(redlist_data))
        redlist_sorted_data = sorted(
            redlist_data,
            key=lambda k: (
                k["redlist"][0]["priority_order"],
                k["redlist"][0]["threatened"],
            ),
        )
        sorted_data = redlist_sorted_data + list(redlistless_data)
        return jsonify({"count": count, "data": sorted_data}), 200

    except Exception as e:
        error = "<get_taxa_list> ERROR: {}".format(e)
        current_app.logger.error(error)
        return {"Error": error}, 400
예제 #33
0
 def get_query_column(self, entity):
     return func.array_agg(self.resolve_entity_column(entity))