def quota_destroy_all_by_project(context, project_id): session = get_session() with session.begin(): model_query(context, models.Quota, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}, synchronize_session=False) model_query(context, models.ProjectUserQuota, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}, synchronize_session=False) model_query(context, models.QuotaUsage, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}, synchronize_session=False) model_query(context, models.Reservation, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}, synchronize_session=False)
def construct_search(self, field_name, op=None): if op == '^': return literal_column(field_name).startswith elif op == '=': return literal_column(field_name).op('=') else: return literal_column(field_name).contains
def construct_search(self, field_name, op=None): if op == "^": return literal_column(field_name).startswith elif op == "=": return literal_column(field_name).op("=") else: return literal_column(field_name).ilike
def list_fleets(self, message, user, params): # Check the planet exists planet = Planet.load(*params.group(1,3,5)) if planet is None: message.alert("No planet with coords %s:%s:%s" % params.group(1,3,5)) return # Find all fleets with a known alliance who have defended this planet OQ = session.query(coalesce(FleetScan.launch_tick, FleetScan.landing_tick), literal_column("'From'").label("dir"), Planet.x, Planet.y, Planet.z, Alliance.name).select_from(FleetScan) OQ = OQ.filter(FleetScan.target_id == planet.id, FleetScan.in_galaxy==False, FleetScan.mission=="Defend") OQ = OQ.join(Intel, FleetScan.owner_id == Intel.planet_id).filter(Intel.alliance_id != None) OQ = OQ.join(Alliance, Intel.alliance_id == Alliance.id).join(Planet, FleetScan.owner_id == Planet.id) # Find all fleets with a known alliance who have been defended by this planet TQ = session.query(coalesce(FleetScan.launch_tick, FleetScan.landing_tick), literal_column("'To '").label("dir"), Planet.x, Planet.y, Planet.z, Alliance.name).select_from(FleetScan) TQ = TQ.filter(FleetScan.owner_id == planet.id, FleetScan.in_galaxy==False, FleetScan.mission=="Defend") TQ = TQ.join(Intel, FleetScan.target_id == Intel.planet_id).filter(Intel.alliance_id != None) TQ = TQ.join(Alliance, Intel.alliance_id == Alliance.id).join(Planet, FleetScan.target_id == Planet.id) # Combine the results into one sorted list results = sorted(OQ.all()+TQ.all(), reverse=True) # Quit now if there are no results if len(results) == 0: message.reply("No suggestions found") return # Reply to the user message.reply("Tick Dir Planet Alliance") limit = int(params.group(6) or 5) for r in results[:limit]: message.reply("%4s %s %-9s %s" % (r[0], r[1], "%s:%s:%s" % (r[2], r[3], r[4]), r[5])) if len(results) > limit: message.reply("%s results not shown (%s total)" % (len(results)-limit, len(results)))
def _run_query(start_date, end_date): created_filters = [DBInstance.created < end_date, DBInstance.deleted == 0] created_columns = [DBInstance.created.label('timestamp'), literal_column("0").label('deleted'), DBDatastoreVersion.id.label('dsvid')] deleted_filters = [DBInstance.created < end_date, DBInstance.deleted_at >= start_date, DBInstance.deleted == 1] deleted_columns = [DBInstance.deleted_at.label('timestamp'), literal_column("1").label('deleted'), DBDatastoreVersion.id.label('dsvid')] query1 = DBInstance.query().\ join(DBDatastoreVersion).\ add_columns(*created_columns) query1 = query1.filter(*created_filters) query2 = DBInstance.query().\ join(DBDatastoreVersion).\ add_columns(*created_columns) query2 = query2.filter(*deleted_filters) query3 = DBInstance.query().\ join(DBDatastoreVersion).\ add_columns(*deleted_columns) query3 = query3.filter(*deleted_filters) union_query = query1.union(query2, query3).\ order_by(text('anon_1.timestamp')) return union_query.all()
def visit_hierarchy(element, compiler, **kw): """visit compilation idiom for oracle""" if compiler.dialect.server_version_info < supported_db['oracle']: raise(HierarchyLesserError(compiler.dialect.name, supported_db['oracle'])) else: sel = element.select sel.append_column(literal_column('level', type_=Integer)) sel.append_column(literal_column('CONNECT_BY_ISLEAF', type_=Boolean).label('is_leaf')) sel.append_column(literal_column( "LTRIM(SYS_CONNECT_BY_PATH (%s,','),',')" % (element.child), type_=String).label('connect_path')) qry = "%s" % (compiler.process(sel)) if hasattr(element, 'starting_node') and \ getattr(element, 'starting_node') is not False: if (element.starting_node == "a" and element.fk_type==String) or\ (element.starting_node == "0" and element.fk_type==Integer): qry += " start with %s is null" % (element.parent) elif getattr(element, 'starting_node') is False: pass else: qry += " start with %s=%s" % (element.parent, element.starting_node) qry += " connect by prior %s=%s" % (element.child, element.parent) if kw.get('asfrom', False): qry = '(%s)' % qry return qry
def soft_delete(self, synchronize_session="evaluate"): return self.update( { "deleted": literal_column("id"), "updated_at": literal_column("updated_at"), "deleted_at": timeutils.utcnow(), }, synchronize_session=synchronize_session, )
def initialize(cls, url, initialize_data=True, initialize_schema=True): """Initialize the database. This includes the schema, the materialized views, the custom functions, and the initial content. """ if url in cls.engine_for_url: engine = cls.engine_for_url[url] return engine, engine.connect() engine = cls.engine(url) if initialize_schema: cls.initialize_schema(engine) connection = engine.connect() # Check if the recursive equivalents function exists already. query = select( [literal_column('proname')] ).select_from( table('pg_proc') ).where( literal_column('proname')=='fn_recursive_equivalents' ) result = connection.execute(query) result = list(result) # If it doesn't, create it. if not result and initialize_data: resource_file = os.path.join( cls.resource_directory(), cls.RECURSIVE_EQUIVALENTS_FUNCTION ) if not os.path.exists(resource_file): raise IOError("Could not load recursive equivalents function from %s: file does not exist." % resource_file) sql = open(resource_file).read() connection.execute(sql) if initialize_data: session = Session(connection) cls.initialize_data(session) if connection: connection.close() if initialize_schema and initialize_data: # Only cache the engine if all initialization has been performed. # # Some pieces of code (e.g. the script that runs # migrations) have a legitimate need to bypass some of the # initialization, but normal operation of the site # requires that everything be initialized. # # Until someone tells this method to initialize # everything, we can't short-circuit this method with a # cache. cls.engine_for_url[url] = engine return engine, engine.connect()
def actions_query(post_id): dbq = db.session comments = dbq.query(Comment.id, Comment.time_posted.label("time"), literal_column("'Comment'", Unicode).label("t"))\ .filter(Comment.post_id==post_id) rels = dbq.query(Relation.id, Relation.time_linked.label("time"), literal_column("'Relation'", Unicode).label("t"))\ .filter(Relation.parent_id==post_id) q = comments.union(rels) return q
def create_stm(self, columns=list(), filters=None, ordering=None, limit=None, start=None, url_filters=None, associations=None): self.set_filters(filters) self.set_query_columns(columns) self.set_url_filters(url_filters) self.limit = limit self.start = start self.ordering = ordering stm = select(self.query_columns).select_from(self.table) filters = list() coordinates_filter = "" for condition in self.filters: if condition.get("op") == "coordinates": coordinates_filter = self.get_condition_square( condition.get("lowerleft"), condition.get("upperright"), condition.get("property_ra"), condition.get("property_dec")) else: filters.append(condition) base_filters = and_(*self.do_filter(self.table, filters)) stm = stm.where(and_(base_filters, coordinates_filter)) # Ordenacao if self.ordering is not None: asc = True property = self.ordering.lower() if self.ordering[0] == '-': asc = False property = self.ordering[1:].lower() if asc: stm = stm.order_by(property) else: stm = stm.order_by(desc(property)) # Paginacao if self.limit: stm = stm.limit(literal_column(str(self.limit))) if self.start: stm = stm.offset(literal_column(str(self.start))) print(str(stm)) return stm
def drop_old_duplicate_entries_from_table(migrate_engine, table_name, use_soft_delete, *uc_column_names): """Drop all old rows having the same values for columns in uc_columns. This method drop (or mark ad `deleted` if use_soft_delete is True) old duplicate rows form table with name `table_name`. :param migrate_engine: Sqlalchemy engine :param table_name: Table with duplicates :param use_soft_delete: If True - values will be marked as `deleted`, if False - values will be removed from table :param uc_column_names: Unique constraint columns """ meta = MetaData() meta.bind = migrate_engine table = Table(table_name, meta, autoload=True) columns_for_group_by = [table.c[name] for name in uc_column_names] columns_for_select = [func.max(table.c.id)] columns_for_select.extend(columns_for_group_by) duplicated_rows_select = select( columns_for_select, group_by=columns_for_group_by, having=func.count(table.c.id) > 1 ) for row in migrate_engine.execute(duplicated_rows_select): # NOTE(boris-42): Do not remove row that has the biggest ID. delete_condition = table.c.id != row[0] is_none = None # workaround for pyflakes delete_condition &= table.c.deleted_at == is_none for name in uc_column_names: delete_condition &= table.c[name] == row[name] rows_to_delete_select = select([table.c.id]).where(delete_condition) for row in migrate_engine.execute(rows_to_delete_select).fetchall(): LOG.info( _("Deleting duplicated row with id: %(id)s from table: " "%(table)s") % dict(id=row[0], table=table_name) ) if use_soft_delete: delete_statement = ( table.update() .where(delete_condition) .values( { "deleted": literal_column("id"), "updated_at": literal_column("updated_at"), "deleted_at": timeutils.utcnow(), } ) ) else: delete_statement = table.delete().where(delete_condition) migrate_engine.execute(delete_statement)
def migration_destroy(context, migration_id): session = get_session() now = timeutils.utcnow() with session.begin(): model_query(context, models.Migration, session=session).\ filter_by(id=migration_id).\ update({'deleted': True, 'deleted_at': now, 'updated_at': literal_column('updated_at')}) model_query(context, models.MigrationProperties, session=session).\ filter_by(migration_id=migration_id).\ update({'deleted': True, 'deleted_at': now, 'updated_at': literal_column('updated_at')})
def report_json(request): try: division_code = request.matchdict.get('divisioncode') except: raise HTTPBadRequest(detail='incorrect value for parameter ' '"divisioncode"') try: resolution = float(request.params.get('resolution')) except: raise HTTPBadRequest(detail='invalid value for parameter "resolution"') hazard_type = request.matchdict.get('hazardtype', None) _filter = or_(AdministrativeDivision.code == division_code, AdministrativeDivision.parent_code == division_code) simplify = func.ST_Simplify( func.ST_Transform(AdministrativeDivision.geom, 3857), resolution / 2) if hazard_type is not None: divisions = DBSession.query(AdministrativeDivision) \ .add_columns(simplify, HazardLevel.mnemonic, HazardLevel.title) \ .outerjoin(AdministrativeDivision.hazardcategories) \ .join(HazardCategory) \ .join(HazardType)\ .join(HazardLevel) \ .filter(and_(_filter, HazardType.mnemonic == hazard_type)) else: divisions = DBSession.query(AdministrativeDivision) \ .add_columns(simplify, literal_column("'None'"), literal_column("'blah'")) \ .filter(_filter) return [{ 'type': 'Feature', 'geometry': to_shape(geom_simplified), 'properties': { 'name': division.name, 'code': division.code, 'url': request.route_url( 'report' if hazard_type else 'report_overview', division=division, hazardtype=hazard_type), 'hazardLevelMnemonic': hazardlevel_mnemonic, 'hazardLevelTitle': hazardlevel_title } } for division, geom_simplified, hazardlevel_mnemonic, hazardlevel_title in divisions]
def plan_destroy(context, plan_id): session = get_session() now = timeutils.utcnow() with session.begin(): model_query(context, models.Plan, session=session).\ filter_by(id=plan_id).\ update({'deleted': True, 'deleted_at': now, 'updated_at': literal_column('updated_at')}) model_query(context, models.Resource, session=session).\ filter_by(plan_id=plan_id).\ update({'deleted': True, 'deleted_at': now, 'updated_at': literal_column('updated_at')})
def _literal_as_column(element): if isinstance(element, Visitable): return element elif hasattr(element, '__clause_element__'): return element.__clause_element__() else: return literal_column(str(element))
def recu_query(cls): """ Init select condition of index. :return: the query of db.session. """ recursive_t = db.session.query( Index.parent.label("pid"), Index.id.label("cid"), func.cast(Index.id, db.Text).label("path"), Index.index_name.label("name"), literal_column("1", db.Integer).label("lev")).filter( Index.parent == 0, Index.is_delete == False). \ cte(name="recursive_t", recursive=True) rec_alias = aliased(recursive_t, name="rec") test_alias = aliased(Index, name="t") recursive_t = recursive_t.union_all( db.session.query( test_alias.parent, test_alias.id, rec_alias.c.path + '/' + func.cast(test_alias.id, db.Text), rec_alias.c.name + '/' + test_alias.index_name, rec_alias.c.lev + 1).filter( test_alias.parent == rec_alias.c.cid, test_alias.is_delete == False)) return recursive_t
def bm_deployment_destroy(context, dep_id, session=None): model_query(context, baremetal_models.BareMetalDeployment, session=session).\ filter_by(id=dep_id).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')})
def bm_interface_destroy(context, if_id, session=None): model_query(context, baremetal_models.BareMetalInterface, read_deleted="no", session=session).\ filter_by(id=if_id).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')})
def get_columns(self, when=None, prefix=None, format_kwargs=None): """ Args: when: used in a case statement to filter the rows going into the aggregation function prefix: prefix for column names format_kwargs: kwargs to pass to format the aggregate quantity Returns: collection of SQLAlchemy columns """ if prefix is None: prefix = "" if format_kwargs is None: format_kwargs = {} name_template = "{prefix}{quantity_name}_{function}" coltype_template = "" column_template = "{function}({distinct}{args}){order_clause}{filter}{coltype_cast}" arg_template = "{quantity}" order_template = "" filter_template = "" if self.orders != [None]: order_template += " WITHIN GROUP (ORDER BY {order})" if when: filter_template = " FILTER (WHERE {when})" if self.coltype is not None: coltype_template = "::{coltype}" for function, (quantity_name, quantity), order in product(self.functions, self.quantities.items(), self.orders): distinct, quantity = split_distinct(quantity) args = str.join(", ", (arg_template.format(quantity=q) for q in quantity)) order_clause = order_template.format(order=order) filter = filter_template.format(when=when) coltype_cast = coltype_template.format(coltype=self.coltype) if order is not None: if len(quantity_name) > 0: quantity_name += "_" quantity_name += to_sql_name(order) kwargs = dict(function=function, args=args, prefix=prefix, distinct=distinct, order_clause=order_clause, quantity_name=quantity_name, filter=filter, coltype_cast=coltype_cast, **format_kwargs) column = column_template.format(**kwargs).format(**format_kwargs) name = name_template.format(**kwargs) yield ex.literal_column(column).label(to_sql_name(name))
def bm_deployment_destroy(context, dep_id, session=None): model_query(context, models.BareMetalDeployment, session=session).\ filter_by(id=dep_id).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')})
def get_selects(self): """ Constructs select queries for this aggregation Returns: a dictionary of group : queries pairs where group are the same keys as groups queries is a list of Select queries, one for each date in dates """ queries = {} for group, groupby in self.groups.items(): intervals = self.intervals[group] queries[group] = [] for date in self.dates: columns = [ groupby, ex.literal_column("'%s'::date" % date).label( self.output_date_column) ] columns += list( chain(*[ self._get_aggregates_sql(i, date, group) for i in intervals ])) gb_clause = make_sql_clause(groupby, ex.literal_column) query = ex.select(columns=columns, from_obj=self.from_obj)\ .group_by(gb_clause) query = query.where(self.where(date, intervals)) queries[group].append(query) return queries
def get_elevation(fixes): shortener = int(max(1, len(fixes) / 1000)) coordinates = [(fix[2]["longitude"], fix[2]["latitude"]) for fix in fixes] points = MultiPoint(coordinates[::shortener]) locations = from_shape(points, srid=4326) location = locations.ST_DumpPoints() cte = db.session.query( location.label("location"), locations.ST_Envelope().label("locations") ).cte() location_id = literal_column("(location).path[1]") elevation = Elevation.rast.ST_Value(cte.c.location.geom) # Prepare main query q = ( db.session.query(location_id.label("location_id"), elevation.label("elevation")) .filter( and_( cte.c.locations.intersects(Elevation.rast), cte.c.location.geom.intersects(Elevation.rast), ) ) .all() ) fixes_copy = [list(fix) for fix in fixes] # No elevations found at all... if not len(q): return fixes_copy start_idx = 0 while start_idx < len(q) - 1 and q[start_idx].elevation is None: start_idx += 1 prev = q[start_idx] for i in _xrange(start_idx + 1, len(q)): if q[i].elevation is None: continue current = q[i] for j in range( (prev.location_id - 1) * shortener, (current.location_id - 1) * shortener ): elev = prev.elevation + (current.elevation - prev.elevation) / ( (current.location_id - prev.location_id) * shortener ) * (j - (prev.location_id - 1) * shortener) fixes_copy[j][11] = elev prev = current if len(q) and q[-1].elevation: fixes_copy[-1][11] = q[-1].elevation return fixes_copy
def load_balancer_update_state(context, load_balancer_uuid, state): with context.session.begin(): context.session.query(models.LoadBalancer).filter_by( uuid=load_balancer_uuid).update( {'state': state, 'updated_at': literal_column('updated_at')} )
def _get_idea_query(post, levels=None): """Return a query that includes the post and its following thread. Beware: we use a recursive query via a CTE and the PostgreSQL-specific ARRAY type. Blame this guy for that choice: http://explainextended.com/2009/09/24/adjacency-list-vs-nested-sets-postgresql/ Also, that other guy provided insight into using CTE queries: http://stackoverflow.com/questions/11994092/how-can-i-perform-this-recursive-common-table-expression-in-sqlalchemy A literal column and an op complement nicely all this craziness. All I can say is SQLAlchemy kicks ass, and so does PostgreSQL. """ level = literal_column('ARRAY[id]', type_=ARRAY(Integer)) post = Post.db.query(post.__class__) \ .add_columns(level.label('level')) \ .filter(post.__class__.id == post.id) \ .cte(name='thread', recursive=True) post_alias = aliased(post, name='post') replies_alias = aliased(post.__class__, name='replies') cumul_level = post_alias.c.level.op('||')(replies_alias.id) parent_link = replies_alias.parent_id == post_alias.c.id children = Post.db.query(replies_alias).add_columns(cumul_level) \ .filter(parent_link) if levels: level_limit = func.array_upper(post_alias.c.level, 1) < levels children = children.filter(level_limit) return Post.db.query(post.union_all(children)).order_by(post.c.level)
def user_destroy(user_id): db_session.query(models.User).\ filter_by(id=user_id).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) db_session.commit()
def search(self): if "q" not in self.request.params: return {"success": False, "msg": "Parameter 'q' is missing."} q = self.request.params["q"] if "epsg" in self.request.params: epsg = int(self.request.params["epsg"]) geometry = functions.transform(Geonames.wkb_geometry, epsg) else: geometry = Geonames.wkb_geometry filters = [ func.lower(Geonames.name).like(func.lower(q + "%")), func.lower(Geonames.asciiname).like(func.lower(q + "%")), func.lower(Geonames.alternatenames).like(func.lower(q + "%"))] rows = [] for geojson, name, fcode, country in Session.query( pg_functions.geojson(geometry), Geonames.name, Geonames.fcode, Geonames.country).\ filter(or_(*filters)).order_by( literal_column('bit_length("name")').label("word_length")).\ all(): rows.append({ "name": "%s, %s, %s" % (name, fcode, country), "geometry": json.loads(geojson)}) return {"success": True, "data": rows}
def select(self): sub_op = list(self.intermediate_tables.values())[0] # load tables. t_sub_op = Table(self.intermediate_tables['last_table']['table'], self.dal.meta, autoload=True, schema=self.intermediate_tables['last_table']['schema']) _where = [] # bitmask alias_table = None t_coadd_molygon = Table(self.input_params['molygon_coadds']['table'], self.dal.meta, autoload=True, schema=self.input_params['molygon_coadds']['schema']).alias('molygon_coadds') t_molygon = Table(self.input_params['molygon']['table'], self.dal.meta, autoload=True, schema=self.input_params['molygon']['schema']).alias('molygon') stm_join = t_sub_op stm_join = stm_join.join(t_coadd_molygon, t_sub_op.c.coadd_objects_id == t_coadd_molygon.c.coadd_objects_id) for band in self.input_params['mangle_bitmask']: # give the str column and retrieve the attribute. alias_table = t_molygon.alias('molygon_%s' % band) col = getattr(t_coadd_molygon.c, 'molygon_id_%s' % band) stm_join = stm_join.join(alias_table, col == alias_table.c.id) _where.append(alias_table.c.hole_bitmask != literal_column('1')) stm = select([t_sub_op.c.coadd_objects_id]).\ select_from(stm_join).where(and_(*_where)) return stm
def _wall_events_query(self): """WallMixin implementation.""" public_event_types = [ 'group_created', 'subject_created', 'subject_modified', 'page_created', 'page_modified', ] from ututi.lib.wall import generic_events_query t_evt = meta.metadata.tables['events'] evts_generic = generic_events_query() if hasattr(c, 'teacher'): user_id = c.teacher.id else: user_id = c.user_info.id query = evts_generic\ .where(t_evt.c.author_id == user_id) # XXX using literal_column, this is because I don't know how # to refer to the column in the query directly query = query.where(or_(t_evt.c.event_type.in_(public_event_types), and_(t_evt.c.event_type == 'file_uploaded', literal_column('context_ci.content_type') == 'subject'))) return query
def volume_type_destroy(context, name): session = get_session() with session.begin(): volume_type_ref = volume_type_get_by_name(context, name, session=session) volume_type_id = volume_type_ref['id'] session.query(models.VolumeTypes).\ filter_by(id=volume_type_id).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.VolumeTypeExtraSpecs).\ filter_by(volume_type_id=volume_type_id).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')})
def subscription_extend(context, subscription_id, datetime_to): session = get_session() with session.begin(): session.query(models.Subscription).\ filter_by(id=subscription_id).\ update({'expires_at': datetime_to, 'updated_at': literal_column('updated_at')})
def storage_backend_capability_specs_destroy(context, values, session=None, inactive=False): read_deleted = "yes" if inactive else "no" _storage_backend = _find_storage_backend(context, values, True, session, inactive=inactive) if not _storage_backend.get('capability_specs_id'): return filter_dict = dict(storage_id=_storage_backend['capability_specs_id']) if values.get('spec_id'): filter_dict['id'] = values.get('spec_id') if values.get('skey'): filter_dict['skey'] = values.get('skey') if values.get('svalue'): filter_dict['svalue'] = values.get('svalue') model_query(context, models.StorageExtraSpecs, session=session).\ filter_by(**filter_dict).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')})
def getRunData(self): session = self.__session results = [] try: # count the reports subquery stmt = session.query(Report.run_id, func.count(literal_column('*')).label('report_count')) \ .filter(Report.suppressed == False) \ .group_by(Report.run_id) \ .subquery() q = session.query(Run, stmt.c.report_count) \ .outerjoin(stmt, Run.id == stmt.c.run_id) \ .order_by(Run.date) for instance, reportCount in q: if reportCount is None: reportCount = 0 results.append( RunData(instance.id, str(instance.date), instance.name, instance.duration, reportCount, instance.command)) return results except sqlalchemy.exc.SQLAlchemyError as alchemy_ex: msg = str(alchemy_ex) LOG.error(msg) raise shared.ttypes.RequestFailed(shared.ttypes.ErrorCode.DATABASE, msg)
def _storage_tier_destroy_in_session(context, filters, session): storage_tier_capability_specs_destroy(context, filters, session) model_query(context, models.StorageBackendTiers, session=session).\ filter_by(**filters).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')})
async def output_file(user_id: int, project_id: str, postgres_engine: Engine) -> Iterable[FileMetaData]: node_id = "fd6f9737-1988-341b-b4ac-0614b646fa82" # pylint: disable=no-value-for-parameter file = FileMetaData() file.simcore_from_uuid(f"{project_id}/{node_id}/filename.txt", bucket_name="master-simcore") file.entity_tag = "df9d868b94e53d18009066ca5cd90e9f" file.user_name = "test" file.user_id = str(user_id) async with postgres_engine.acquire() as conn: stmt = ( file_meta_data.insert().values(**attr.asdict(file), ).returning( literal_column("*"))) result = await conn.execute(stmt) row = await result.fetchone() # hacks defect file.user_id = str(user_id) file.location_id = str(file.location_id) # -- assert file == FileMetaData(**dict(row)) # type: ignore yield file result = await conn.execute(file_meta_data.delete().where( file_meta_data.c.file_uuid == row.file_uuid))
def get_elevation(fixes): shortener = int(max(1, len(fixes) / 1000)) coordinates = [(fix[2]['longitude'], fix[2]['latitude']) for fix in fixes] points = MultiPoint(coordinates[::shortener]) locations = from_shape(points, srid=4326).ST_DumpPoints() locations_id = extract_array_item(locations.path, 1) subq = db.session.query(locations_id.label('location_id'), locations.geom.label('location')).subquery() elevation = Elevation.rast.ST_Value(subq.c.location) # Prepare main query q = db.session.query(literal_column('location_id'), elevation.label('elevation')) \ .filter(and_(subq.c.location.ST_Intersects(Elevation.rast), elevation != None)).all() fixes_copy = [list(fix) for fix in fixes] for i in xrange(1, len(q)): prev = q[i - 1].location_id - 1 current = q[i].location_id - 1 for j in range(prev * shortener, current * shortener): elev = q[i - 1].elevation + (q[i].elevation - q[i - 1].elevation) * (j - prev * shortener) fixes_copy[j][11] = elev return fixes_copy
def getRunData(self): session = self.__session results = [] try: # count the reports subquery stmt = session.query(Report.run_id, func.count(literal_column('*')).label('report_count')) \ .filter(Report.suppressed == False) \ .group_by(Report.run_id) \ .subquery() q = session.query(Run, stmt.c.report_count) \ .outerjoin(stmt, Run.id == stmt.c.run_id) \ .order_by(Run.date) for instance, reportCount in q: if reportCount is None: reportCount = 0 results.append(RunData(instance.id, str(instance.date), instance.name, instance.duration, reportCount, instance.command )) return results except sqlalchemy.exc.SQLAlchemyError as alchemy_ex: msg = str(alchemy_ex) LOG.error(msg) raise shared.ttypes.RequestFailed(shared.ttypes.ErrorCode.DATABASE, msg)
def _find_statistics_meta_duplicates(session: Session) -> list[int]: """Find duplicated statistics_meta.""" subquery = ( session.query( StatisticsMeta.statistic_id, literal_column("1").label("is_duplicate"), ) .group_by(StatisticsMeta.statistic_id) .having(func.count() > 1) .subquery() ) query = ( session.query(StatisticsMeta) .outerjoin( subquery, (subquery.c.statistic_id == StatisticsMeta.statistic_id), ) .filter(subquery.c.is_duplicate == 1) .order_by(StatisticsMeta.statistic_id, StatisticsMeta.id.desc()) .limit(1000 * MAX_ROWS_TO_PURGE) ) duplicates = execute(query) statistic_id = None duplicate_ids: list[int] = [] if not duplicates: return duplicate_ids for duplicate in duplicates: if statistic_id != duplicate.statistic_id: statistic_id = duplicate.statistic_id continue duplicate_ids.append(duplicate.id) return duplicate_ids
def repl(element): if isinstance(element, expression._BindParamClause): return expression.literal_column(_quote_ddl_expr(element.value)) elif isinstance(element, expression.ColumnClause) and element.table is not None: return expression.column(element.name) else: return None
def volume_destroy(context, volume_id): session = get_session() with session.begin(): session.query(models.Volume).\ filter_by(id=volume_id).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.IscsiTarget).\ filter_by(volume_id=volume_id).\ update({'volume_id': None}) session.query(models.VolumeMetadata).\ filter_by(volume_id=volume_id).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')})
def subscription_error(context, subscription_id): session = get_session() with session.begin(): session.query(models.Subscription).\ filter_by(id=subscription_id).\ update({'status': 'error', 'updated_at': literal_column('updated_at')})
def update_both_tables(column_number,input_string,Provided_IP): #This function will update both current and historic tables for a given column columns = ["IP","Location","Date","Score","Category","registrar_name","registrar_organization"] columner1 = str(columns[column_number]) input_current = session.query(IP_Current).filter(IP_Current.IP == Provided_IP).one() setattr(input_current,str(literal_column(str(columner1))),str(input_string)) #Update current table with new information session.commit() if(str(columner1) == 'Location'): print ('Domain Name: ' + input_string) elif(str(columner1) == 'Category'): print ('Current Categorizations: ' + input_string) else: print (str(columner1) + ": " + input_string) input_historic = session.query(IP_History).filter(IP_History.IP == Provided_IP).one() setattr(input_historic,str(literal_column(str(columner1))),str(input_string)) #Update historic table with new information session.commit()
def recs_query(cls, pid=0): """ Init select condition of index. :return: the query of db.session. """ recursive_t = db.session.query( Index.parent.label("pid"), Index.id.label("cid"), func.cast(Index.id, db.Text).label("path"), Index.index_name.label("name"), # add by ryuu at 1108 start Index.index_name_english.label("name_en"), # add by ryuu at 1108 end literal_column("1", db.Integer).label("lev")).filter( Index.parent == pid). \ cte(name="recursive_t", recursive=True) rec_alias = aliased(recursive_t, name="rec") test_alias = aliased(Index, name="t") recursive_t = recursive_t.union_all( db.session.query( test_alias.parent, test_alias.id, rec_alias.c.path + '/' + func.cast(test_alias.id, db.Text), rec_alias.c.name + '/' + test_alias.index_name, # add by ryuu at 1108 start rec_alias.c.name_en + '/' + test_alias.index_name_english, # add by ryuu at 1108 end rec_alias.c.lev + 1).filter(test_alias.parent == rec_alias.c.cid)) return recursive_t
async def create_soft_link(self, user_id: int, target_uuid: str, link_uuid: str) -> FileMetaDataEx: # validate link_uuid async with self.engine.acquire() as conn: # TODO: select exists(select 1 from file_metadat where file_uuid=12) found = await conn.scalar( sa.select([file_meta_data.c.file_uuid ]).where(file_meta_data.c.file_uuid == link_uuid)) if found: raise ValueError( f"Invalid link {link_uuid}. Link already exists") # validate target_uuid target = await self.list_file(str(user_id), SIMCORE_S3_STR, target_uuid) if not target: raise ValueError( f"Invalid target '{target_uuid}'. File does not exists for this user" ) # duplicate target and change the following columns: target.fmd.file_uuid = link_uuid target.fmd.file_id = link_uuid # NOTE: api-server relies on this id target.fmd.is_soft_link = True async with self.engine.acquire() as conn: stmt = (file_meta_data.insert().values( **attr.asdict(target.fmd)).returning(literal_column("*"))) result = await conn.execute(stmt) link = to_meta_data_extended(await result.first()) return link
def storage_pool_delete(context, values): session = get_session() with session.begin(): try: for pool in values: pool_list = storage_pool_get(context, pool, session) for pool_info in pool_list: if pool.get('services'): pool_info['services'] = delete_services( pool_info.get('services'), pool.get('services')) else: pool_info['services'] = None filters = {} for attr in [ 'id', 'pool', 'backend_name', 'storage_backend_id', 'storage_tier_id' ]: if pool.get(attr): filters[attr] = pool.get(attr) # delete record if it doesn't have any entries in services, otherwise just change services if pool_info.get('services') and pool_info['services'] != "": model_query(context, models.StoragePools, session=session).\ filter_by(**filters). \ update({'services': pool_info['services']}) else: model_query(context, models.StoragePools, session=session).\ filter_by(**filters). \ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) except Exception as e: raise db_exc.DBError(e)
def artists(): # If the user not specifically define 'false' for force (which is the # default) value, then the attempt will be treated as if it were defined as # 'true'. Eg. force=yes or force=1 and even force=fasle (mistyped value) # will be treated as force=true force = request.args.get('force') force = True if force is not None and force != 'false' else False # Get and set each parameter's value asked = {} for parameter, getter in PARAMETERS.items(): try: asked[parameter] = getter(request.args.get(parameter), force) except ParamError as error: return error.jsonify() # Store values more than once locally count = asked['count'] start = (asked['start'] - START) * count weights = asked['sort'] distance = Artist.distance(asked['latitude'], asked['longitude']).label('distance') # Do the query query = session.query(Artist, distance).filter( true() if asked['gender'] == 'both' else Artist.gender == asked['gender'], Artist.rate <= asked['rate'], Artist.age.between(asked['youngest'], asked['oldest']), literal_column(distance.name) <= asked['radius']).order_by( (asc if asked['order'] == 'asc' else desc )((weights['age'] * Artist.age) + (weights['gender'] * Artist.gender) + (weights['rate'] * Artist.rate) + (weights['distance'] * literal_column(distance.name)))).slice( start, start + count) # If debugging mode is on print the compiled SQL(ite) query if app.debug: print('\n', str(query.statement.compile(dialect=sqlite.dialect())), 'parameters:', asked, sep='\n', end='\n\n') # Return serialised and jsonified result return jsonify([artist.serialise(distance) for artist, distance in query])
def _get_column_query(self, doGroupBy, colnames=None, aggregate=func.min): # Build the sql query - including adding all column names, if columns were None. if colnames is None: colnames = [k for k in self.columnMap.keys()] try: vals = [self.columnMap[k] for k in colnames] except KeyError: for c in colnames: if c in self.columnMap.keys(): continue else: print("%s not in columnMap"%(c)) raise ValueError('entries in colnames must be in self.columnMap', self.columnMap) # Get the first query idColName = self.columnMap[self.idColKey] if idColName in vals: idLabel = self.idColKey else: idLabel = idColName #SQL server requires an aggregate on all columns if a group by clause is used. #Modify the columnMap to take care of this. The default is MIN, but it shouldn't #matter since the entries are almost identical (except for proposalId). #Added double-quotes to handle column names that start with a number. if doGroupBy: query = self.connection.session.query(aggregate(self.table.c[idColName]).label(idLabel)) else: query = self.connection.session.query(self.table.c[idColName].label(idLabel)) for col, val in zip(colnames, vals): if val is idColName: continue #Check if this is a default column. if val == col: #If so, use the column in the table to take care of DB specific column #naming conventions (capitalization, spaces, etc.) if doGroupBy: query = query.add_column(aggregate(self.table.c[col]).label(col)) else: query = query.add_column(self.table.c[col].label(col)) else: #If not, assume that the user has specified the column correctly if doGroupBy: query = query.add_column(aggregate(expression.literal_column(val)).label(col)) else: query = query.add_column(expression.literal_column(val).label(col)) return query
def _find_duplicates( session: scoped_session, table: type[Statistics | StatisticsShortTerm] ) -> tuple[list[int], list[dict]]: """Find duplicated statistics.""" subquery = (session.query( table.start, table.metadata_id, literal_column("1").label("is_duplicate"), ).group_by(table.metadata_id, table.start).having(func.count() > 1).subquery()) query = (session.query(table).outerjoin( subquery, (subquery.c.metadata_id == table.metadata_id) & (subquery.c.start == table.start), ).filter(subquery.c.is_duplicate == 1).order_by( table.metadata_id, table.start, table.id.desc()).limit(1000 * MAX_ROWS_TO_PURGE)) duplicates = execute(query) original_as_dict = {} start = None metadata_id = None duplicate_ids: list[int] = [] non_identical_duplicates_as_dict: list[dict] = [] if not duplicates: return (duplicate_ids, non_identical_duplicates_as_dict) def columns_to_dict( duplicate: type[Statistics | StatisticsShortTerm]) -> dict: """Convert a SQLAlchemy row to dict.""" dict_ = {} for key in duplicate.__mapper__.c.keys(): dict_[key] = getattr(duplicate, key) return dict_ def compare_statistic_rows(row1: dict, row2: dict) -> bool: """Compare two statistics rows, ignoring id and created.""" ignore_keys = ["id", "created"] keys1 = set(row1).difference(ignore_keys) keys2 = set(row2).difference(ignore_keys) return keys1 == keys2 and all(row1[k] == row2[k] for k in keys1) for duplicate in duplicates: if start != duplicate.start or metadata_id != duplicate.metadata_id: original_as_dict = columns_to_dict(duplicate) start = duplicate.start metadata_id = duplicate.metadata_id continue duplicate_as_dict = columns_to_dict(duplicate) duplicate_ids.append(duplicate.id) if not compare_statistic_rows(original_as_dict, duplicate_as_dict): non_identical_duplicates_as_dict.append({ "duplicate": duplicate_as_dict, "original": original_as_dict }) return (duplicate_ids, non_identical_duplicates_as_dict)
def volume_type_delete(context, id, session): """delete a volume_type by id. :param context: The request context, for access checks. :param id: The id of volume type to be deleted. """ model_query(context, models.VolumeTypes, session=session, read_deleted="no").\ filter_by(id=id). \ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) model_query(context, models.VolumeTypeExtraSpecs, session=session, read_deleted="no"). \ filter_by(volume_type_id=id). \ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')})
def report_json(request): try: division_code = request.matchdict.get('divisioncode') except: raise HTTPBadRequest(detail='incorrect value for parameter ' '"divisioncode"') try: resolution = float(request.params.get('resolution')) except: raise HTTPBadRequest(detail='invalid value for parameter "resolution"') hazard_type = request.matchdict.get('hazardtype', None) _filter = or_(AdministrativeDivision.code == division_code, AdministrativeDivision.parent_code == division_code) simplify = func.ST_Simplify( func.ST_Transform(AdministrativeDivision.geom, 3857), resolution / 2) if hazard_type is not None: divisions = DBSession.query(AdministrativeDivision) \ .add_columns(simplify, HazardLevel.mnemonic, HazardLevel.title) \ .outerjoin(AdministrativeDivision.hazardcategories) \ .join(HazardCategory) \ .outerjoin(HazardType)\ .outerjoin(HazardLevel) \ .filter(and_(_filter, HazardType.mnemonic == hazard_type)) else: divisions = DBSession.query(AdministrativeDivision) \ .add_columns(simplify, literal_column("'None'"), literal_column("'blah'")) \ .filter(_filter) return [{ 'type': 'Feature', 'geometry': to_shape(geom_simplified), 'properties': { 'name': division.name, 'code': division.code, 'hazardLevelMnemonic': hazardlevel_mnemonic, 'hazardLevelTitle': hazardlevel_title } } for division, geom_simplified, hazardlevel_mnemonic, hazardlevel_title in divisions]
def item_type_destroy(context, item_type_id): session = get_session() with session.begin(): session.query(models.ItemType).\ filter_by(id=item_type_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')})
def expiry_date_expression(self) -> Function: return coalesce( max(self.table.c.sat_load_dt).over( partition_by=self.columns_in_table(self.table, self.parent.key_columns), order_by=self.table.c.sat_load_dt, rows=(1, 1)), literal_column("CAST('9999-12-31 00:00' AS DATE)"))
def rbd_cache_config_delete_by_rbd_id(context, rbd_id): session = get_session() with session.begin(): session.query(models.RbdCacheConfig).\ filter_by(rbd_id=rbd_id).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')})
def hs_instance_delete(context, hs_instance_id): session = get_session() with session.begin(): session.query(models.HsInstance).\ filter_by(id=hs_instance_id).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')})
def repl(element): if isinstance(element, expression._BindParamClause): return expression.literal_column(_quote_ddl_expr(element.value)) elif isinstance(element, expression.ColumnClause) and \ element.table is not None: return expression.column(element.name) else: return None
def purchase_destroy(context, purchase_id): session = get_session() with session.begin(): session.query(models.Purchase).\ filter_by(id=purchase_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')})
def select(self): table = Table(self.input_params['table'], self.dal.meta, autoload=True, schema=self.input_params['schema']) stm = select([table]).where(se.BitwiseAnd( cast(table.c.signal, Integer), sum(self.input_params['filters'])) > literal_column('0')) return stm
def region_destroy(context, region_id): session = get_session() with session.begin(): session.query(models.Region).\ filter_by(id=region_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')})