def build_query(self, qualified_bundles): """Create a query from the qualified bundles.""" stmts = [] for tup in qualified_bundles: ( transfer_id, date, delta, movement_ids, ) = tup if not stmts: # Apply column types and labels to the first row. stmts.append( select([ cast(literal(transfer_id), String).label('transfer_id'), cast(literal(date), Date).label('date'), cast(literal(delta), Numeric).label('delta'), array(movement_ids, type_=BigInteger).label('movement_ids'), ])) else: # The column types and labels in the remaining rows are # inferred from the first row. stmts.append( select([ literal(transfer_id), literal(date), literal(delta), array(movement_ids), ])) query = union_all(*stmts) return query
def query_recursive_tree(): structure_tree = ( DBSession.query( Structure.id, Structure.name, Structure.parent_id, cast(1, Integer()).label('depth'), array([cast(Structure.name, Text)]).label('name_path'), array([Structure.id]).label('path'), ) .filter(Structure.condition_root_level()) .cte(name='structure_tree', recursive=True) ) st = aliased(structure_tree, name='st') s = aliased(Structure, name='s') structure_tree = structure_tree.union_all( DBSession.query( s.id, s.name, s.parent_id, (st.c.depth + 1).label('depth'), func.array_append( st.c.name_path, cast(s.name, Text) ).label('name_path'), func.array_append(st.c.path, s.id).label('path'), ) .filter(s.parent_id == st.c.id) ) return DBSession.query(structure_tree)
def test_cols_hstore_pair_array(self): self._test_cols( hstore(array(['1', '2']), array(['3', None]))['1'], ("hstore(ARRAY[%(param_1)s, %(param_2)s], " "ARRAY[%(param_3)s, NULL]) -> %(hstore_1)s AS anon_1"), False )
async def _get_osm_objects_with_issues(self, changes): node_issues_set = {Issue(**i) for i in (await (await self.conn.execute( Issue.__table__.select().where(and_( Issue.__table__.c.handle == self.handle, Issue.__table__.c.date_closed is not None, Issue.__table__.c.affected_nodes.overlap( cast(array(changes.affected_nodes), ARRAY(BigInteger))), )) )).fetchall())} way_issues_set = {Issue(**i) for i in (await (await self.conn.execute( Issue.__table__.select().where(and_( Issue.__table__.c.handle == self.handle, Issue.__table__.c.date_closed is not None, Issue.__table__.c.affected_ways.overlap( cast(array(changes.affected_ways), ARRAY(BigInteger))), )) )).fetchall())} rel_issues_set = {Issue(**i) for i in (await (await self.conn.execute( Issue.__table__.select().where(and_( Issue.__table__.c.handle == self.handle, Issue.__table__.c.date_closed is not None, Issue.__table__.c.affected_rels.overlap( cast(array(changes.affected_rels), ARRAY(BigInteger))), )) )).fetchall())} node_issues = {n: i for i in node_issues_set for n in i.affected_nodes} way_issues = {w: i for i in way_issues_set for w in i.affected_ways} rel_issues = {r: i for i in rel_issues_set for r in i.affected_rels} return node_issues, way_issues, rel_issues
def qualstat_get_figures(conn, srvid, database, tsfrom, tsto, queries=None, quals=None): condition = text("""datname = :database AND coalesce_range && tstzrange(:from, :to)""") if queries is not None: condition = and_( condition, array([int(q) for q in queries]).any(literal_column("s.queryid"))) if quals is not None: condition = and_( condition, array([int(q) for q in quals]).any(literal_column("qnc.qualid"))) sql = (select([ text('most_filtering.quals'), text('most_filtering.query'), text('to_json(most_filtering) as "most filtering"'), text('to_json(least_filtering) as "least filtering"'), text('to_json(most_executed) as "most executed"'), text('to_json(most_used) as "most used"') ]).select_from( qual_constants( srvid, "most_filtering", tsfrom, tsto, condition).alias("most_filtering").join( qual_constants(srvid, "least_filtering", tsfrom, tsto, condition).alias("least_filtering"), text("most_filtering.rownumber = " "least_filtering.rownumber")).join( qual_constants(srvid, "most_executed", tsfrom, tsto, condition).alias("most_executed"), text("most_executed.rownumber = " "least_filtering.rownumber")).join( qual_constants(srvid, "most_used", tsfrom, tsto, condition).alias("most_used"), text("most_used.rownumber = " "least_filtering.rownumber")))) params = { "server": srvid, "database": database, "from": tsfrom, "to": tsto, "queryids": queries } quals = conn.execute(sql, params=params) if quals.rowcount == 0: return None row = quals.first() return row
def _get_nodes(node_id: int, id_col: IA, pid_col: IA, to_root: bool, *other_col): """ get all child nodes or all parent nodes for a given node. node_id: start node's id value; id_col: the id column of a sqlalchemy class, often is `id`; pid_col: the parent id column of a sqlalchemy class, often is `parent_id`; to_root: to root node or to children node; other_col: other columns you want to select when query the nodes; """ class_model = id_col.class_ other_col_names = [col.name for col in other_col] hierarchy = ( select( [ id_col.label('id'), pid_col.label('parent_id'), *other_col, literal(0).label('level'), array((id_col,)).label('path'), # array need tuple literal(False).label('cycle'), ] ) .where(id_col == node_id) .cte(name='hierarchy', recursive=True) ) next_alias = aliased(class_model, name='next_level') alias_id_col = getattr(next_alias, id_col.name) alias_pid_col = getattr(next_alias, pid_col.name) alias_other_col = [getattr(next_alias, col.name) for col in other_col] if to_root is True: '第一层的 parent_id 是下一层的 id' join_condition = hierarchy.c.parent_id == alias_id_col else: '第一层的 id 是下一层的 parent_id' join_condition = hierarchy.c.id == alias_pid_col hierarchy = hierarchy.union_all( select( [ alias_id_col.label('id'), alias_pid_col.label('parent_id'), *alias_other_col, (hierarchy.c.level + 1).label('level'), (hierarchy.c.path + array((alias_id_col,))).label('path'), (alias_id_col == any_(hierarchy.c.path)).label('cycle'), ] ) .where(hierarchy.c.cycle.is_(False)) .select_from(hierarchy.join(next_alias, join_condition, isouter=False)) ) q = sa.select( [column('id'), column('parent_id'), *[column(name) for name in other_col_names]] ).select_from(hierarchy) return session.execute(q)
def test_array_literal_type(self): isinstance(postgresql.array([1, 2]).type, postgresql.ARRAY) is_(postgresql.array([1, 2]).type.item_type._type_affinity, Integer) is_( postgresql.array([1, 2], type_=String).type.item_type._type_affinity, String)
def test_array_literal(self): self.assert_compile( func.array_dims(postgresql.array([1, 2]) + postgresql.array([3, 4, 5])), "array_dims(ARRAY[%(param_1)s, %(param_2)s] || " "ARRAY[%(param_3)s, %(param_4)s, %(param_5)s])", checkparams={'param_5': 5, 'param_4': 4, 'param_1': 1, 'param_3': 3, 'param_2': 2} )
def test_array_literal(self): self.assert_compile(func.array_dims( postgresql.array([1, 2]) + postgresql.array([3, 4, 5])), "array_dims(ARRAY[%(param_1)s, %(param_2)s] || " "ARRAY[%(param_3)s, %(param_4)s, %(param_5)s])", checkparams={ 'param_5': 5, 'param_4': 4, 'param_1': 1, 'param_3': 3, 'param_2': 2 })
def test_unnest_with_ordinality(self, connection): array_val = postgresql.array( [postgresql.array([14, 41, 7]), postgresql.array([54, 9, 49])]) stmt = select("*").select_from( func.unnest(array_val).table_valued( "elts", with_ordinality="num").render_derived().alias("t")) eq_( connection.execute(stmt).all(), [(14, 1), (41, 2), (7, 3), (54, 4), (9, 5), (49, 6)], )
def filter_request(self, query, args): #By default we filter contracts con_type = args['type'] if ('type' in args and args['type'] != None) else self.default_type query = query.filter(Release.type == con_type) if 'q' in args and args['q'] != None: #search = unidecode(unicode(args['q'])).replace(" ", "&") #query = query.filter(func.to_tsvector(app.config["FTS_LANG"], Release.concat).match(search, postgresql_regconfig=app.config["FTS_LANG"])) query = query.filter('to_tsvector(\'' + app.config["FTS_LANG"] +'\', releases.concat) @@ plainto_tsquery(\''+ app.config["FTS_LANG"] +'\', \''+ args['q'] +'\')') if 'value_gt' in args and args['value_gt'] != None: query = query.filter(Release.value >= args['value_gt']) if 'value_lt' in args and args['value_lt'] != None: query = query.filter(Release.value <= args['value_lt']) if 'date_gt' in args and args['date_gt'] != None: query = query.filter(Release.date >= args['date_gt']) if 'date_lt' in args and args['date_lt'] != None: query = query.filter(Release.date <= args['date_lt']) if 'buyer' in args and args['buyer'] != None: if self.buyer_joined == False: query = query.join(Buyer) self.buyer_joined = True query = query.filter(array(args['buyer'].split(';')).any(Buyer.slug)) if 'activity' in args and args['activity'] != None: query = query.filter(Release.activities.overlap(args['activity'].split(';'))) if 'procuring_entity' in args and args['procuring_entity'] != None: query = query.filter(array(args['procuring_entity'].split(';')).any(Release.procuring_entity_slug)) if ('supplier' in args and args['supplier'] != None) or ('supplier_size' in args and args['supplier_size'] != None): if self.supplier_joined == False: query = query.join(Supplier) self.supplier_joined = True if ('supplier' in args and args['supplier'] != None): query = query.filter(array(args['supplier'].split(';')).any(Supplier.slug)) if ('supplier_size' in args and args['supplier_size'] != None): integered = [ int(item) for item in args['supplier_size'].split(';')] query = query.filter(array(integered).any(Supplier.size)) return query
def _get_isotype_col_expression(label: str = "isotypes"): expression = case( [ (and_(DashboardSource.isotype_igg == 'true', DashboardSource.isotype_igm == 'true', DashboardSource.isotype_iga == 'true'), array(["IgG", "IgM", "IgA"])), (and_(DashboardSource.isotype_igg == 'true', DashboardSource.isotype_igm == 'false', DashboardSource.isotype_iga == 'true'), array(["IgG", "IgA"])), (and_(DashboardSource.isotype_igg == 'true', DashboardSource.isotype_igm == 'true', DashboardSource.isotype_iga == 'false'), array(["IgG", "IgM"])), (and_(DashboardSource.isotype_igg == 'false', DashboardSource.isotype_igm == 'true', DashboardSource.isotype_iga == 'true'), array(["IgM", "IgA"])), (and_(DashboardSource.isotype_igg == 'true', DashboardSource.isotype_igm == 'false', DashboardSource.isotype_iga == 'false'), array(["IgG"])), (and_(DashboardSource.isotype_igg == 'false', DashboardSource.isotype_igm == 'false', DashboardSource.isotype_iga == 'true'), array(["IgA"])), (and_(DashboardSource.isotype_igg == 'false', DashboardSource.isotype_igm == 'true', DashboardSource.isotype_iga == 'false'), array(["IgM"])) ], else_=cast(array([]), ARRAY(String))).label(label) return expression
def downgrade(): op.execute("REVOKE SELECT ON id_map FROM analyst") op.execute("REVOKE SELECT ON program_surveygroup FROM analyst") op.execute("REVOKE SELECT ON user_surveygroup FROM analyst") op.execute("REVOKE SELECT ON organisation_surveygroup FROM analyst") op.execute("REVOKE SELECT ON activity_surveygroup FROM analyst") op.drop_table('id_map') op.drop_table('program_surveygroup') op.drop_table('user_surveygroup') op.drop_table('organisation_surveygroup') op.drop_table('activity_surveygroup') op.drop_table('surveygroup') op.execute(''' DELETE FROM activity WHERE ob_type = 'surveygroup' ''') op.execute(''' DELETE FROM subscription WHERE ob_type = 'surveygroup' ''') ob_types = array([ 'organisation', 'user', 'program', 'survey', 'qnode', 'measure', 'response_type', 'submission', 'rnode', 'response', 'custom_query' ], type_=TEXT) op.drop_constraint('activity_ob_type_check', 'activity', type_='check') op.create_check_constraint( 'activity_ob_type_check', 'activity', cast(column('ob_type'), TEXT) == func.any(ob_types)) op.drop_constraint('subscription_ob_type_check', 'subscription', type_='check') op.create_check_constraint( 'subscription_ob_type_check', 'subscription', cast(column('ob_type'), TEXT) == func.any(ob_types)) roles = array( ['admin', 'author', 'authority', 'consultant', 'org_admin', 'clerk']) op.execute(""" UPDATE appuser SET role = 'admin' WHERE role = 'super_admin' """) op.drop_constraint('appuser_role_check', 'appuser', type_='check') op.create_check_constraint('appuser_role_check', 'appuser', cast(column('role'), TEXT) == func.any(roles))
def filter(self, query, value, **kwargs): group_tags = self.formobj.get_group_tags(self.group['name']) if value == '1': # Partial if group_tags: constraint = and_( ~models.Submission.data.has_all(array(group_tags)), models.Submission.data.has_any(array(group_tags))) else: constraint = false() elif value == '2': # Missing if group_tags: constraint = or_( ~models.Submission.data.has_any(array(group_tags)), models.Submission.data == None # noqa ) else: constraint = true() elif value == '3': # Complete if group_tags: constraint = models.Submission.data.has_all(array(group_tags)) else: constraint = false() elif value == '4': # Conflict if group_tags: query_params = [ models.Submission.conflicts.has_key(tag) # noqa for tag in group_tags ] constraint = or_(*query_params) else: constraint = false() else: constraint = None if constraint is None: return (None, None) else: form_ = kwargs['form'] if form_.data and form_.data.get('conjunction') is True: # OR conjunction return (None, constraint) else: # AND conjunction return (constraint, None)
def get(self): """ Gathers all countries from the database with their data return a json object representing the countries """ session = db.loadSession() # Make the sql query result = session.query( # What to select # outerjoin defaults to a LEFT outer join, NOT full outer join db.Country.id, db.Country.name, func.array_agg_cust(array([cast(db.Olympics.id, String), cast(db.Olympics.year, String), db.Olympics.season, db.City.name])) )\ .select_from(db.Country)\ .outerjoin(db.City)\ .outerjoin(db.Olympics)\ .group_by(db.Country.id, db.Country.name)\ .all() # Actually executes the query and returns a list of tuples session.close() keys = ('id', 'name', ('olympics-hosted', ('id', 'year', 'season', 'city'))) all_countries_dict = list_of_dict_to_dict_of_dict( add_keys(keys, row) for row in result) return jsonify(all_countries_dict)
def get(self): """ Gathers all events from the database with their data return a json object representing the events """ session = db.loadSession() # Make the sql query result = session.query( # What to select # distinct because of multiple medals per event distinct(db.Event.id), db.Event.name, db.Sport.name, func.array_agg_cust(distinct(array([cast(db.Olympics.id, String), cast(db.Olympics.year, String), db.Olympics.season]))) )\ .select_from(db.Event)\ .join(db.Sport)\ .join(db.Medal)\ .join(db.Olympics)\ .group_by(db.Event.id, db.Event.name, db.Sport.name)\ .all() # Actually executes the query and returns a list of tuples session.close() keys = ('id', 'name', 'sport', ('olympics', ('id', 'year', 'season'))) all_events_dict = list_of_dict_to_dict_of_dict( add_keys(keys, row) for row in result) return jsonify(all_events_dict)
def build_single_movement_query(dbsession, owner, period): """Build a query that lists the unreconciled movements in open periods. Return a query providing these columns: - transfer_id - date - delta - movement_ids """ movement_date_c = func.date( func.timezone(get_tzname(owner), func.timezone('UTC', FileMovement.ts))) return (dbsession.query( TransferRecord.transfer_id, movement_date_c.label('date'), file_movement_delta.label('delta'), array([FileMovement.movement_id]).label('movement_ids'), ).select_from(FileMovement).join( TransferRecord, TransferRecord.id == FileMovement.transfer_record_id).join( Period, Period.id == FileMovement.period_id).filter( FileMovement.owner_id == owner.id, FileMovement.file_id == period.file_id, FileMovement.reco_id == null, file_movement_delta != 0, ~Period.closed, ))
def get(self): """ Gathers all events from the database with their data return a json object representing the events """ session = db.loadSession() # Make the sql query result = session.query( # What to select # distinct because of multiple medals per event distinct(db.Event.id), db.Event.name, db.Sport.name, func.array_agg_cust(distinct(array([cast(db.Olympics.id, String), cast(db.Olympics.year, String), db.Olympics.season]))) )\ .select_from(db.Event)\ .join(db.Sport)\ .join(db.Medal)\ .join(db.Olympics)\ .group_by(db.Event.id, db.Event.name, db.Sport.name)\ .all() # Actually executes the query and returns a list of tuples session.close() keys = ('id', 'name', 'sport', ('olympics', ('id', 'year', 'season'))) all_events_dict = list_of_dict_to_dict_of_dict(add_keys(keys, row) for row in result) return jsonify(all_events_dict)
def get(self, country_id): """ Gather specified country from the database with its data country_id a non-zero, positive int return a json object representing the country """ session = db.loadSession() assert type(country_id) == int # Make the sql query result = session.query( # What to select # outerjoin defaults to a LEFT outer join, NOT full outer join db.Country.id, db.Country.name, func.array_agg_cust(array([cast(db.Olympics.id, String), cast(db.Olympics.year, String), db.Olympics.season, db.City.name])) )\ .select_from(db.Country)\ .outerjoin(db.City)\ .outerjoin(db.Olympics)\ .filter( # What to filter by (where clause) db.Country.id==country_id)\ .group_by(db.Country.id, db.Country.name)\ .first() # Actually executes the query and returns a tuple session.close() keys = ('id', 'name', ('olympics-hosted', ('id', 'year', 'season', 'city'))) country_dict = add_keys(keys, result) return jsonify(country_dict)
def get(self): """ Gathers all countries from the database with their data return a json object representing the countries """ session = db.loadSession() # Make the sql query result = session.query( # What to select # outerjoin defaults to a LEFT outer join, NOT full outer join db.Country.id, db.Country.name, func.array_agg_cust(array([cast(db.Olympics.id, String), cast(db.Olympics.year, String), db.Olympics.season, db.City.name])) )\ .select_from(db.Country)\ .outerjoin(db.City)\ .outerjoin(db.Olympics)\ .group_by(db.Country.id, db.Country.name)\ .all() # Actually executes the query and returns a list of tuples session.close() keys = ('id', 'name', ('olympics-hosted', ('id', 'year', 'season', 'city'))) all_countries_dict = list_of_dict_to_dict_of_dict(add_keys(keys, row) for row in result) return jsonify(all_countries_dict)
def test_array_literal_insert(self): m = MetaData() t = Table("t", m, Column("data", postgresql.ARRAY(Integer))) self.assert_compile( t.insert().values(data=array([1, 2, 3])), "INSERT INTO t (data) VALUES (ARRAY[%(param_1)s, " "%(param_2)s, %(param_3)s])", )
def get_tree_cte(cls, col='id'): """Create a CTE for the category tree. The CTE contains the followign columns: - ``id`` -- the category id - ``path`` -- an array containing the path from the root to the category itself - ``is_deleted`` -- whether the category is deleted :param col: The name of the column to use in the path or a callable receiving the category alias that must return the expression used for the 'path' retrieved by the CTE. """ cat_alias = db.aliased(cls) if callable(col): path_column = col(cat_alias) else: path_column = getattr(cat_alias, col) cte_query = (select([cat_alias.id, array([path_column]).label('path'), cat_alias.is_deleted]) .where(cat_alias.parent_id.is_(None)) .cte(recursive=True)) rec_query = (select([cat_alias.id, cte_query.c.path.op('||')(path_column), cte_query.c.is_deleted | cat_alias.is_deleted]) .where(cat_alias.parent_id == cte_query.c.id)) return cte_query.union_all(rec_query)
def test_cols_hstore_single_array(self): self._test_cols( hstore(array(['1', '2', '3', None]))['3'], ("hstore(ARRAY[%(param_1)s, %(param_2)s, %(param_3)s, NULL]) " "-> %(hstore_1)s AS anon_1"), False )
def search_query(cls, tokens, weight_func=None, include_misses=False, ordered=True): # Read the searchable columns from the table (strings) columns = cls.__searchable_columns__ # Convert the columns from strings into column objects columns = [getattr(cls, c) for c in columns] # The model name that can be used to match search result to model cls_name = literal_column("'{}'".format(cls.__name__)) # Filter out id: tokens for later ids, tokens = process_id_option(tokens) # If there are still tokens left after id: token filtering if tokens: # Generate the search weight expression from the # searchable columns, tokens and patterns if not weight_func: weight_func = weight_expression weight = weight_func(columns, tokens) # If the search expression only included "special" tokens like id: else: weight = literal_column(str(1)) # Create an array of stringified detail columns details = getattr(cls, "__search_detail_columns__", None) if details: details = [cast(getattr(cls, d), Unicode) for d in details] else: details = [literal_column("NULL")] # Create a query object query = db.session.query( cls_name.label("model"), cls.id.label("id"), cls.name.label("name"), array(details).label("details"), weight.label("weight"), ) # Filter out specific ids (optional) if ids: query = query.filter(cls.id.in_(ids)) # Filter out results that don't match the patterns at all (optional) if not include_misses: query = query.filter(weight > 0) # Order by weight (optional) if ordered: query = query.order_by(desc(weight)) return query
def test_cols_slice(self): self._test_cols( self.hashcol.slice(array(['1', '2'])), ("slice(test_table.hash, ARRAY[%(param_1)s, %(param_2)s]) " "AS slice_1"), True )
def test_array_literal_insert(self): m = MetaData() t = Table('t', m, Column('data', postgresql.ARRAY(Integer))) self.assert_compile( t.insert().values(data=array([1, 2, 3])), "INSERT INTO t (data) VALUES (ARRAY[%(param_1)s, " "%(param_2)s, %(param_3)s])")
def test_cols_delete_array_of_keys(self): self._test_cols( self.hashcol.delete(array(['foo', 'bar'])), ("delete(test_table.hash, ARRAY[%(param_1)s, %(param_2)s]) " "AS delete_1"), True )
def preprocess_column_and_value(self): """ Preprocess the column and the value Certain operations will only work if the types are cast correctly. This is where it happens. """ col, val = self.column, self.value # Case 1. Both column and value are arrays if self.is_column_array() and self.is_value_array(): # Cast the value to ARRAY[] with the same type that the column has # Only in this case Postgres will be able to handles them both val = cast(pg.array(val), pg.ARRAY(col.type.item_type)) # Case 2. JSON column if self.is_column_json(): # This is the type to which JSON column is coerced: same as `value` # Doc: "Suggest a type for a `coerced` Python value in an expression." coerce_type = col.type.coerce_compared_value( '=', val) # HACKY: use sqlalchemy type coercion # Now, replace the `col` used in operations with this new coerced expression col = cast(col, coerce_type) # Done self.column_expression = col self.value_expression = val
def get_tree_cte(cls, col='id'): """Create a CTE for the category tree. The CTE contains the following columns: - ``id`` -- the category id - ``path`` -- an array containing the path from the root to the category itself - ``is_deleted`` -- whether the category is deleted :param col: The name of the column to use in the path or a callable receiving the category alias that must return the expression used for the 'path' retrieved by the CTE. """ cat_alias = db.aliased(cls) if callable(col): path_column = col(cat_alias) else: path_column = getattr(cat_alias, col) cte_query = (select([ cat_alias.id, array([path_column]).label('path'), cat_alias.is_deleted ]).where(cat_alias.parent_id.is_(None)).cte(recursive=True)) rec_query = (select([ cat_alias.id, cte_query.c.path.op('||')(path_column), cte_query.c.is_deleted | cat_alias.is_deleted ]).where(cat_alias.parent_id == cte_query.c.id)) return cte_query.union_all(rec_query)
def store_bounding_values(self, points, values, preduction_model_run: PredictionModelRunTimestamp, grib_info: ModelRunInfo): """ Store the values around the area of interest. """ # Convert points to geographic coordinates: geographic_points = [] for point in points: geographic_points.append( calculate_geographic_coordinate(point, self.origin, self.pixel)) # Get the grid subset, i.e. the relevant bounding area for this particular model. grid_subset = get_or_create_grid_subset( self.session, self.prediction_model, geographic_points) # Load the record if it exists. # pylint: disable=no-member prediction = self.session.query(ModelRunGridSubsetPrediction).\ filter( ModelRunGridSubsetPrediction.prediction_model_run_timestamp_id == preduction_model_run.id).\ filter(ModelRunGridSubsetPrediction.prediction_timestamp == grib_info.prediction_timestamp).\ filter(ModelRunGridSubsetPrediction.prediction_model_grid_subset_id == grid_subset.id).first() if not prediction: # Record doesn't exist, so we create it. prediction = ModelRunGridSubsetPrediction() prediction.prediction_model_run_timestamp_id = preduction_model_run.id prediction.prediction_timestamp = grib_info.prediction_timestamp prediction.prediction_model_grid_subset_id = grid_subset.id setattr(prediction, grib_info.variable_name.lower(), array(values)) self.session.add(prediction) self.session.commit()
async def subtree(request): """ --- tags: - tree summary: Get subtree produces: - application/json parameters: - in: path name: id required: true type: string responses: "200": description: """ item_id = int(request.match_info.get('id')) item = await get_node(node_id=item_id, db=request.app['db']) if not item: return web.HTTPNotFound() async with request.app['db'].acquire() as conn: data = [ dict(row.items()) async for row in conn.execute(tree_table.select().where( and_( tree_table.c.id != item_id, array([tree_table.c.id]).overlap( cast(item.path, ARRAY(INTEGER()))))).order_by( tree_table.c.path)) ] return web.HTTPOk(text=json.dumps(data), content_type='application/json')
def _bounds_polygon(doc, projection_offset): geo_ref_points_offset = projection_offset + ['geo_ref_points'] return func.ST_MakePolygon(func.ST_MakeLine( postgres.array( tuple( _gis_point(doc, geo_ref_points_offset + [key]) for key in ('ll', 'ul', 'ur', 'lr', 'll')))), type_=Geometry)
def compile_array_agg(element, compiler, **kw): compiled = "%s(%s)" % (element.name, compiler.process(element.clauses)) if element.default is None: return compiled return str(sa.func.coalesce( sa.text(compiled), sa.cast(postgresql.array(element.default), element.type) ).compile(compiler))
def qualstat_get_figures(conn, database, tsfrom, tsto, queries=None, quals=None): condition = text("""datname = :database AND coalesce_range && tstzrange(:from, :to)""") if queries is not None: condition = and_(condition, array([int(q) for q in queries]) .any(literal_column("s.queryid"))) if quals is not None: condition = and_(condition, array([int(q) for q in quals]) .any(literal_column("qnc.qualid"))) sql = (select([ text('most_filtering.quals'), text('most_filtering.query'), text('to_json(most_filtering) as "most filtering"'), text('to_json(least_filtering) as "least filtering"'), text('to_json(most_executed) as "most executed"'), text('to_json(most_used) as "most used"')]) .select_from( qual_constants("most_filtering", condition) .alias("most_filtering") .join( qual_constants("least_filtering", condition) .alias("least_filtering"), text("most_filtering.rownumber = " "least_filtering.rownumber")) .join(qual_constants("most_executed", condition) .alias("most_executed"), text("most_executed.rownumber = " "least_filtering.rownumber")) .join(qual_constants("most_used", condition) .alias("most_used"), text("most_used.rownumber = " "least_filtering.rownumber")))) params = {"database": database, "from": tsfrom, "to": tsto} quals = conn.execute(sql, params=params) if quals.rowcount == 0: return None row = quals.first() return row
def compile_array_agg(element, compiler, **kw): compiled = "%s(%s)" % (element.name, compiler.process(element.clauses)) if element.default is None: return compiled return str( sa.func.coalesce( sa.text(compiled), sa.cast(postgresql.array(element.default), element.type)).compile(compiler))
def test_array_literal_compare(self): self.assert_compile( postgresql.array([1, 2]) == [3, 4, 5], "ARRAY[%(param_1)s, %(param_2)s] = " "ARRAY[%(param_3)s, %(param_4)s, %(param_5)s]", checkparams={'param_5': 5, 'param_4': 4, 'param_1': 1, 'param_3': 3, 'param_2': 2} )
def test_literal_binds_pgarray(self): from sqlalchemy.dialects.postgresql import ARRAY, array m = MetaData() t = Table('t', m, Column('x', ARRAY(Integer), server_default=array([1, 2, 3]))) self.assert_compile( CreateTable(t), "CREATE TABLE t (x INTEGER[] DEFAULT ARRAY[1, 2, 3])", dialect='postgresql')
def _find_in_set(t, expr): # postgresql 9.5 has array_position, but the code below works on any # version of postgres with generate_subscripts # TODO: could make it even more generic by using generate_series # TODO: this works with *any* type, not just strings. should the operation # itself also have this property? needle, haystack = expr.op().args return array_search(t.translate(needle), pg.array(list(map(t.translate, haystack))))
def search_query(cls, tokens, weight_func=None, include_misses=False, ordered=True): # Read the searchable columns from the table (strings) columns = cls.__searchable_columns__ # Convert the columns from strings into column objects columns = [getattr(cls, c) for c in columns] # The model name that can be used to match search result to model cls_name = literal_column("'{}'".format(cls.__name__)) # Filter out id: tokens for later ids, tokens = process_id_option(tokens) # If there are still tokens left after id: token filtering if tokens: # Generate the search weight expression from the # searchable columns, tokens and patterns if not weight_func: weight_func = weight_expression weight = weight_func(columns, tokens) # If the search expression only included "special" tokens like id: else: weight = literal_column(str(1)) # Create an array of stringified detail columns details = getattr(cls, "__search_detail_columns__", None) if details: details = [cast(getattr(cls, d), Unicode) for d in details] else: details = [literal_column("NULL")] # Create a query object query = db.session.query( cls_name.label("model"), cls.id.label("id"), cls.name.label("name"), array(details).label("details"), weight.label("weight"), ) # Filter out specific ids (optional) if ids: query = query.filter(cls.id.in_(ids)) # Filter out results that don't match the patterns at all (optional) if not include_misses: query = query.filter(weight > 0) # Order by weight (optional) if ordered: query = query.order_by(desc(weight)) return query
def test_plain_old_unnest(self, connection): fn = func.unnest(postgresql.array(["one", "two", "three", "four"])).column_valued() stmt = select(fn) eq_( connection.execute(stmt).all(), [("one", ), ("two", ), ("three", ), ("four", )], )
def _find_in_set(t, expr): # postgresql 9.5 has array_position, but the code below works on any # version of postgres with generate_subscripts # TODO: could make it even more generic by using generate_series # TODO: this works with *any* type, not just strings. should the operation # itself also have this property? needle, haystack = expr.op().args return array_search( t.translate(needle), pg.array(list(map(t.translate, haystack))) )
def _bounds_polygon(doc, projection_offset): geo_ref_points_offset = projection_offset + ["geo_ref_points"] return func.ST_MakePolygon( func.ST_MakeLine( postgres.array( tuple( _gis_point(doc, geo_ref_points_offset + [key]) for key in ("ll", "ul", "ur", "lr", "ll")))), type_=Geometry, )
def get_user_unread_messages(request: Request) -> dict: """Show the logged-in user's unread message conversations.""" conversations = ( request.query(MessageConversation).filter( MessageConversation.unread_user_ids.contains( # type: ignore array([request.user.user_id]))).all()) conversations.sort(key=lambda c: c.last_activity_time, reverse=True) return {"conversations": conversations}
def search(): keywords = request.args.get('keywords') sort = request.args.get('sort') client = SphinxClient() client.SetServer(SEARCH_HOST, SEARCH_PORT) # Sorting mode if sort == 'newest': client.SetSortMode(SPH_SORT_ATTR_DESC, 'date_added') elif sort == 'oldest': client.SetSortMode(SPH_SORT_ATTR_ASC, 'date_added') elif sort == 'highest_cost': client.SetSortMode(SPH_SORT_ATTR_DESC, 'cost') elif sort == 'lowest_cost': client.SetSortMode(SPH_SORT_ATTR_ASC, 'cost') # Filter by category category = request.args.get('category') try: category = int(category) except (ValueError, TypeError): category = None if category: client.SetFilter('category', [category]) # Paging try: per_page = int(per_page) except ValueError: per_page = 20 page = request.args.get('page', default=1) try: page = int(page) except ValueError: page = 1 # Use our SphinxSearch query to construct our page client.SetLimits(per_page*(page-1), per_page) # Handle the query q = client.Query(keywords) if not q: return 'Could not complete search', 400 ids = [] for res in q['matches']: ids.append(res['id']) if not ids: return jsonify(data=[], num_pages=0), 200 # First construct the subquery s_ids = db.session.query(func.unnest(array(ids)).label('id')).subquery('s_ids') query = Postings.query.join(s_ids, Postings.id == s_ids.c.id) # Return the JSON return jsonify(data=[to_dict(r) for r in page.items], num_pages=page.pages), 200
def get_plots_bounding_box_as_json(self): positions = [] if self.role.name == Role._ADMIN_ROLE: plots = Plot().queryObject().all() else: plots = self.plots for plot in plots: positions.append(plot.geom) return self.session.scalar(func.ST_AsGeoJson(func.ST_Envelope( func.ST_Union(array(positions))))) if len(positions) > 0\ else None
def get_bounding_box(self): positions = [] for position in self.positions: positions.append(position.geom) # We return the max number of positions plus one, so it can detect # there are more and not just the barrier number if len(positions) == (max_positions + 1): break return self.session.scalar(func.ST_Envelope( func.ST_MakeLine(array(positions)))) if len(positions) > 0\ else None
def preprocess_value_and_column(cls, column, value): value_array = is_array(value) # Coerce operand if column.is_array and value_array: value = cast(pg.array(value), pg.ARRAY(column.sql_col.type.item_type)) if column.is_json: coerce_type = column.sql_col.type.coerce_compared_value('=', value) # HACKY: use sqlalchemy type coercion column.sql_col = cast(column.sql_col, coerce_type) return column, value
def test_array_agg_array_literal_explicit_type(self): from sqlalchemy.dialects.postgresql import array expr = array([column("data", Integer), column("d2", Integer)]) agg_expr = func.array_agg(expr, type_=ARRAY(Integer)) is_(agg_expr.type._type_affinity, ARRAY) is_(agg_expr.type.item_type._type_affinity, Integer) self.assert_compile( agg_expr, "array_agg(ARRAY[data, d2])", dialect="postgresql" )
def get_animals_bounding_box(self): positions = [] if self.role.name == Role._ADMIN_ROLE: animals = Animal().queryObject().all() else: animals = self.animals for animal in animals: if animal.n_positions > 0: positions.append(animal.positions[0].geom) return self.session.scalar(func.ST_Envelope( func.ST_MakeLine(array(positions)))) if len(positions) > 0\ else None
def test_literal_binds_pgarray(self): from sqlalchemy.dialects.postgresql import ARRAY, array m = MetaData() t = Table('t', m, Column( 'x', ARRAY(Integer), server_default=array([1, 2, 3])) ) self.assert_compile( CreateTable(t), "CREATE TABLE t (x INTEGER[] DEFAULT ARRAY[1, 2, 3])", dialect='postgresql' )
def test_array_agg_array_literal_implicit_type(self): from sqlalchemy.dialects.postgresql import array, ARRAY as PG_ARRAY expr = array([column('data', Integer), column('d2', Integer)]) assert isinstance(expr.type, PG_ARRAY) agg_expr = func.array_agg(expr) assert isinstance(agg_expr.type, PG_ARRAY) is_(agg_expr.type._type_affinity, ARRAY) is_(agg_expr.type.item_type._type_affinity, Integer) self.assert_compile( agg_expr, "array_agg(ARRAY[data, d2])", dialect="postgresql" )
def get_approx_position_as_geojson(self, time=datetime.utcnow(), filter_charging=True): positions = [] if self.id != None: query = Position().queryObject().filter(Position.animal_id == self.id) if filter_charging: query = query.filter(Position.charging == False) query = query.filter(func.abs( func.date_part('hour', Position.date - time)) <= 2) aux = query.order_by(Position.date.desc()).limit(50) for position in aux: positions.append(position.geom) return self.session.scalar(func.ST_AsGeoJson( func.ST_MinimumBoundingCircle(func.ST_Collect( array(positions))))) if len(positions) > 1\ else None
def __init__(self, meta, name, osmdata, subset=None): self.data = Table(name, meta, Column('parent', BigInteger, index=True), Column('child', BigInteger, index=True), Column('depth', Integer) ) if subset is None: m = osmdata.member.data.alias() self.subset = select([func.unnest(array([m.c.relation_id, m.c.member_id])).label('id')], distinct=True)\ .where(m.c.member_type == 'R') else: self.subset = subset self.osmdata = osmdata
def get(self, olympic_id): """ Gather specified olympics from the database with its data olympic_id a non-zero, positive int return a json object representing the olympic games """ session = db.loadSession() assert type(olympic_id) == int # Make the sql query result = session.query( # What to select # distinct (because of multiple medals per event) has to go on the first element though we want distinct event ids distinct(db.Olympics.id), db.Olympics.year, db.Olympics.season, db.City.name, db.Country.name, # array_agg_cust so that each now will be an INDIVIDUAL olympic games func.array_agg_cust(distinct(array([cast(db.Event.id, String), db.Event.name, db.Sport.name]))) )\ .select_from(db.Olympics)\ .join(db.City)\ .join(db.Country)\ .join(db.Medal, db.Medal.olympic_id==db.Olympics.id)\ .join(db.Event)\ .join(db.Sport)\ .filter( # What to filter by (where clause) db.Olympics.id==olympic_id)\ .group_by(db.Olympics.id, db.Olympics.year, db.Olympics.season, db.City.name, db.Country.name)\ .first() # Actually executes the query and returns a tuple session.close() keys = ('id', 'year', 'season', 'city', 'country', ('events', ('id', 'name', 'sport'))) olympics_dict = add_keys(keys, result) return jsonify(olympics_dict)
def validate_update(cls, data, instance): d = cls.validate_json(data) cls._validate_common(d, instance) if db().query(models.Release).filter_by( name=d.get("name", instance.name), version=d.get("version", instance.version) ).filter( sa.not_(models.Release.id == instance.id) ).first(): raise errors.AlreadyExists( "Release with the same name " "and version already exists", log_message=True ) if 'roles_metadata' in d: deleted_roles = (set(instance.roles_metadata) - set(d['roles_metadata'])) clusters_ids = (cluster.id for cluster in instance.clusters) deleted_roles_array = sa.cast( psql.array(deleted_roles), psql.ARRAY(sa.String(consts.ROLE_NAME_MAX_SIZE))) node = db().query(models.Node).filter( models.Node.cluster_id.in_(clusters_ids) ).filter(sa.or_( models.Node.roles.overlap(deleted_roles_array), models.Node.pending_roles.overlap(deleted_roles_array) )).first() if node: used_role = set(node.roles + node.pending_roles) used_role = used_role.intersection(deleted_roles) raise errors.CannotDelete( "The following roles: {0} cannot be deleted " "since they are already assigned " "to nodes.".format(','.join(used_role)) ) return d
def get_node_groups(cls, instance, noderoles): """Returns node groups for given node roles. :param instance: a Cluster instance :param noderoles: a list of node roles :returns: a query for list of NodeGroup instances """ psql_noderoles = sa.cast( psql.array(noderoles), psql.ARRAY(sa.String(consts.ROLE_NAME_MAX_SIZE))) nodegroups = db().query(models.NodeGroup).join(models.Node).filter( models.Node.cluster_id == instance.id, models.Node.pending_deletion.is_(False) ).filter(sa.or_( models.Node.roles.overlap(psql_noderoles), models.Node.pending_roles.overlap(psql_noderoles) )) return nodegroups