def do_commands(self, arg): 'Maintain commands and aliases' if arg.add: a = arg.add[0] c = arg.add[1] ca = self.db.query(CommandAlias).filter( CommandAlias.alias == a).first() if not ca: ca = CommandAlias(alias=a) ca.cmd = c if arg.pipe_to: ca.pipe_to = arg.pipe_to self.db.add(ca) self.db.commit() if arg.list: q = self.db.query(CommandAlias.alias, CommandAlias.cmd, CommandAlias.pipe_to).filter( CommandAlias.enabled) self.print_table(q) if arg.results: q = self.db.query(Command.guid, Command.host_address, Command.host_port, Command.username, Command.cmd, Command.exit_status, ("STDOUT: " + func.substr(Command.stdout, 0, 50) + os.linesep + "STDERR: " + func.substr(Command.stderr, 0, 50) + os.linesep + "EXC: " + func.substr(Command.exception, 0, 50)).label('output'), Command.updated) self.print_table(q) if arg.save: r = self.db.query(func.coalesce(Command.stdout, Command.stderr, Command.exception).label( 'output')).filter(Command.guid == arg.save[0]).scalar() with open(arg.save[0], 'wt') as f: f.write(r) f.close()
def get_data(self): logging.debug("These queries will take a few mins to run.") budget_query = self.session.query( self.models.MovieInfo.movie_id, func.max( cast( func.replace( func.replace(self.models.MovieInfo.info, ",", ""), "$", ""), types.Numeric)).label('budget')).filter( self.models.MovieInfo.movie_id.in_(self.movie_ids), self.models.MovieInfo.info_type_id.in_( [BUDGET_TYPE_ID]), self.models.MovieInfo.info.like('$%'), ).group_by(self.models.MovieInfo.movie_id).subquery() year_query = self.session.query( self.models.MovieInfo.movie_id, func.min(func.substr( self.models.MovieInfo.info, -4)).label('release_year')).filter( self.models.MovieInfo.movie_id.in_(self.movie_ids), self.models.MovieInfo.info_type_id.in_([RELEASE_DATES_ID]), self.models.MovieInfo.info.like('USA:%')).group_by( self.models.MovieInfo.movie_id).subquery() budget_alias = aliased(self.models.MovieInfo, budget_query) year_alias = aliased(self.models.MovieInfo, year_query) return self.session.query( budget_query.columns.movie_id, budget_query.columns.budget, year_query.columns.release_year).join( year_alias, year_alias.movie_id == budget_alias.movie_id).distinct( self.models.MovieInfo.movie_id).filter( self.models.MovieInfo.movie_id.in_(self.movie_ids), )
def analyze_day(day = "2014-08-22"): hour = func.cast(func.substr(trips.c.joreohitusaika_time,1,2), sa.Integer) cols = [trips.c.tulopysakki, hour, func.avg(trips.c.ohitusaika_ero).label('delay_avg')] new_conds = conditions new_conds.append(trips.c.tapahtumapaiva==day) conds = and_(*new_conds) groupcols = [trips.c.tulopysakki, hour] ts = run(cols, conds, groupcols, n_limit=None) save_multi_json(ts, "../site/hourly_stop_delays_%s.json" % day)
def get_tiles_by_quadkey(prediction_id: int, quadkeys: tuple, zoom: int): return db.session.query( func.substr(PredictionTile.quadkey, 1, zoom).label('qaudkey'), func.avg( cast( cast(PredictionTile.predictions['ml_prediction'], sqlalchemy.String), sqlalchemy.Float)).label('ml_prediction'), func.avg( cast( cast(PredictionTile.predictions['osm_building_area'], sqlalchemy.String), sqlalchemy.Float)).label('osm_building_area')).filter( PredictionTile.prediction_id == prediction_id).filter( func.substr(PredictionTile.quadkey, 1, zoom).in_(quadkeys)).group_by( func.substr( PredictionTile.quadkey, 1, zoom)).all()
def get_data_query(self): all_inv = db_tables.clean_intervention_table student_lookup = all_inv.c.student_lookup school_year = sql.cast( db_func.substr(all_inv.c.school_year, db_func.length(all_inv.c.school_year) - 3, 4), sql.INT).label('school_year') grade = all_inv.c.grade inv_group = all_inv.c.inv_group description = all_inv.c.description # FIXME: Make end year go upto to the last year on record student_years = sql.select([ student_lookup, school_year.label('end_year'), grade, ]).distinct(student_lookup, school_year, grade).where(grade >= 9).alias('student_years') student_invs = sql.select([ student_lookup, school_year, grade, inv_group, description ]).where(grade >= features_config.min_grade).alias('student_invs') joined = sql.join( left=student_invs, right=student_years, onclause=sql.and_( student_invs.c.student_lookup == student_years.c.student_lookup, student_invs.c.school_year <= student_years.c.end_year)) rate_col = db_func.count() * 1.0 / db_func.count( sql.distinct(joined.c.student_invs_school_year)) inv_rates = sql.select([ joined.c.student_invs_student_lookup.label('student_lookup'), joined.c.student_years_end_year.label('school_year'), joined.c.student_years_grade, joined.c.student_invs_inv_group.label('pivot_inv_group'), joined.c.student_invs_description.label('description'), rate_col.label('pivot_class1'), rate_col.label('pivot_class2'), rate_col.label('pivot_class3'), ]).select_from(joined).group_by( joined.c.student_invs_student_lookup, joined.c.student_years_end_year, joined.c.student_invs_inv_group, joined.c.student_invs_description, joined.c.student_years_grade, ) return inv_rates
def get_treatment_percentage(self): code = db.session.query(func.substr(PrescribingData.BNF_code,1,4)\ .label('Code'),PrescribingData.id).subquery() r = db.session.query(func.sum(PrescribingData.items))\ .outerjoin(code,PrescribingData.id==code.c.id)\ .filter(PrescribingData.BNF_code.like("05%"))\ .group_by(code.c.Code) treatment_total = int(self.get_treatment_total()) ret = [] for i in range(5): ret.append(round(int(r[i][0]) / treatment_total * 100, 2)) return ret
def _run_directory_match_update(self): rd: RpmDetail = aliased(RpmDetail) fd: FileDetail = aliased(FileDetail) lk: RpmFileDetailLink = aliased(RpmFileDetailLink) query = State.get_db_session().query( rd.rpm_detail_id, fd.file_detail_id).join( ResolvedSymlinks, (rd.system_id == ResolvedSymlinks.system_id) & (ResolvedSymlinks.target_type == "D") & (func.length(rd.file_location) > func.length( ResolvedSymlinks.file_location)) & (ResolvedSymlinks.file_location == func.substr( rd.file_location, 1, func.length(ResolvedSymlinks.file_location)))).join( fd, (fd.system_id == ResolvedSymlinks.system_id) & (fd.file_location == (ResolvedSymlinks.resolved_location + func.substr( rd.file_location, func.length(ResolvedSymlinks.file_location) + 1))) ).outerjoin(lk, (lk.file_detail_id == fd.file_detail_id) & (lk.rpm_detail_id == rd.rpm_detail_id)).filter( (rd.system_id == self.system_id) & (lk.rpm_file_detail_link_id == None) & (func.coalesce(fd.file_type, "") != "S") ).distinct() insert_dml = insert(RpmFileDetailLink).from_select([ rd.rpm_detail_id, fd.file_detail_id, ], query) result = State.get_db_session().execute(insert_dml) State.get_db_session().flush() State.get_db_session().commit() self.analyze_database() return result.rowcount
def get(self): visibility = self.parameters.get("visibility", None) cache_key = "country-tag-%s-%s" % (self.tag_name or "home", visibility) value = self.cache.get(cache_key) if value: self.set_header( "Content-Type", "application/json; charset=UTF-8" ) self.write(value) self.finish() return q1 = self.orm.query(Org.org_id.label("org_id")) q1 = self.filter_visibility( q1, Org, visibility) if self.tag_name: q1 = q1 \ .join(org_orgtag) \ .join(Orgtag) \ .filter(Orgtag.base_short == self.tag_name) q1 = q1 \ .subquery() q2 = self.orm.query(func.substr(Orgtag.base, 30), Orgtag.base_short) \ .join(org_orgtag, Orgtag.orgtag_id == org_orgtag.c.orgtag_id) \ .join(q1, q1.c.org_id == org_orgtag.c.org_id) \ .add_columns(func.count(q1.c.org_id)) \ .filter( Orgtag.path_short == "market", Orgtag.base_short.startswith("military-export-applicant-to-%"), ~Orgtag.base_short.startswith( "military-export-applicant-to-%-in-____"), ) \ .group_by(Orgtag.orgtag_id) \ .order_by(Orgtag.base) results = q2.all() data = { "year": self.year, "tagName": self.tag_name, "countries": results } self.cache.set(cache_key, json.dumps(data)) self.write_json(data)
def get_tiles_by_quadkey(prediction_id: int, quadkeys: tuple, zoom: int): return (db.session.query( func.substr(PredictionTile.quadkey, 1, zoom).label("qaudkey"), func.avg( cast( cast( PredictionTile.predictions["ml_prediction"], sqlalchemy.String, ), sqlalchemy.Float, )).label("ml_prediction"), func.avg( cast( cast( PredictionTile.predictions["osm_building_area"], sqlalchemy.String, ), sqlalchemy.Float, )).label("osm_building_area"), ).filter(PredictionTile.prediction_id == prediction_id).filter( func.substr(PredictionTile.quadkey, 1, zoom).in_(quadkeys)).group_by( func.substr(PredictionTile.quadkey, 1, zoom)).all())
def get_data(self): logging.debug("These queries will take a few mins to run.") budget_query = self.session.query( self.models.MovieInfo.movie_id, func.max( cast( func.replace( func.replace(self.models.MovieInfo.info, ",", ""), "$", ""), types.Numeric) ).label('budget') ).filter( self.models.MovieInfo.movie_id.in_(self.movie_ids), self.models.MovieInfo.info_type_id.in_([BUDGET_TYPE_ID]), self.models.MovieInfo.info.like('$%'), ).group_by(self.models.MovieInfo.movie_id ).subquery() year_query = self.session.query( self.models.MovieInfo.movie_id, func.min( func.substr(self.models.MovieInfo.info, -4) ).label('release_year') ).filter( self.models.MovieInfo.movie_id.in_(self.movie_ids), self.models.MovieInfo.info_type_id.in_([RELEASE_DATES_ID]), self.models.MovieInfo.info.like('USA:%') ).group_by(self.models.MovieInfo.movie_id ).subquery() budget_alias = aliased(self.models.MovieInfo, budget_query) year_alias = aliased(self.models.MovieInfo, year_query) return self.session.query( budget_query.columns.movie_id, budget_query.columns.budget, year_query.columns.release_year ).join( year_alias, year_alias.movie_id == budget_alias.movie_id ).distinct( self.models.MovieInfo.movie_id ).filter( self.models.MovieInfo.movie_id.in_(self.movie_ids), )
def get_directory_contents(path_spec: str) -> List[FileDetail]: system: System = State.get_system() session: Session = State.get_db_session() lookup = path_spec if not lookup.endswith(os.path.sep): lookup += os.path.sep lookup += "%" file_details: List[FileDetail] = session.query(FileDetail).filter( (FileDetail.system_id == system.system_id) & (FileDetail.file_location.like(lookup)) & (func.strpos( func.substr(FileDetail.file_location, len(lookup)), os.path.sep, ) == 0)).all() return file_details
def get_stop_time_matrix(): weekhour = func.strftime('%w', trips.c.tapahtumapaiva) * 24 + func.cast(func.substr(trips.c.tuloaika_time,1,2), sa.Integer) cols = [trips.c.tulopysakki, weekhour, func.sum(trips.c.pysakkiaika).label('stop_sum')] conds = and_(*conditions) groupcols = [trips.c.tulopysakki, weekhour] ts = run(cols, conds, groupcols, n_limit=None) # Write to a csv file stops = list(set([val[0] for val in ts])) stop_map = {v: idx for (idx, v) in enumerate(stops)} mat = np.zeros((168, len(stops))) for (stop, wh, val) in ts: mat[wh, stop_map[stop]] = val with open('../site/stop_time_matrix.csv', 'w') as f: f.write(",".join(map(str, stops)) + '\n') for i in range(mat.shape[0]): f.write(",".join(map(str, mat[i,:])) + '\n')
def upgrade(): # re-size existing data if necessary identifier_map = table('cisco_csr_identifier_map', column('ipsec_site_conn_id', sa.String(36))) ipsec_site_conn_id = identifier_map.columns['ipsec_site_conn_id'] op.execute(identifier_map.update(values={ ipsec_site_conn_id: expr.case([(func.length(ipsec_site_conn_id) > 36, func.substr(ipsec_site_conn_id, 1, 36))], else_=ipsec_site_conn_id)})) # Need to drop foreign key constraint before mysql will allow changes with migration.remove_fks_from_table('cisco_csr_identifier_map'): op.alter_column(table_name='cisco_csr_identifier_map', column_name='ipsec_site_conn_id', type_=sa.String(36), existing_nullable=False)
def ham_country_grid_coords(call): if "sqlite" in db.engine.driver: q = ( DxccPrefixes.query.filter(DxccPrefixes.call == func.substr(call, 1, func.LENGTH(DxccPrefixes.call))) .order_by(func.length(DxccPrefixes.call).asc()) .limit(1) ) else: q = ( DxccPrefixes.query.filter(DxccPrefixes.call == func.substring(call, 1, func.LENGTH(DxccPrefixes.call))) .order_by(func.length(DxccPrefixes.call).asc()) .limit(1) ) if q.count() <= 0: return None else: qth = coords_to_qth(q[0].lat, q[0].long, 6) return {"qth": qth["qth"], "latitude": q[0].lat, "longitude": q[0].long}
def upgrade(): # re-size existing data if necessary identifier_map = table('cisco_csr_identifier_map', column('ipsec_site_conn_id', sa.String(36))) ipsec_site_conn_id = identifier_map.columns['ipsec_site_conn_id'] op.execute( identifier_map.update( values={ ipsec_site_conn_id: expr.case([(func.length(ipsec_site_conn_id) > 36, func.substr(ipsec_site_conn_id, 1, 36))], else_=ipsec_site_conn_id) })) op.alter_column(table_name='cisco_csr_identifier_map', column_name='ipsec_site_conn_id', type_=sa.String(36), existing_nullable=True)
def animes_agrupados_por_letra(): animes = {} session = Session() for letra, anime in session.query(func.substr(Anime.nombre, 1, 1), Anime).all(): letra = unidecode(letra.lower()) if letra in [str(i) for i in range(0, 10)]: if '0-9' not in animes: animes['0-9'] = [] animes['0-9'].append(anime.to_json()) continue if letra not in animes: animes[letra] = [] animes[letra].append(anime.to_json()) return animes
def upgrade(): # re-size existing data if necessary identifier_map = table("cisco_csr_identifier_map", column("ipsec_site_conn_id", sa.String(36))) ipsec_site_conn_id = identifier_map.columns["ipsec_site_conn_id"] op.execute( identifier_map.update( values={ ipsec_site_conn_id: expr.case( [(func.length(ipsec_site_conn_id) > 36, func.substr(ipsec_site_conn_id, 1, 36))], else_=ipsec_site_conn_id, ) } ) ) op.alter_column( table_name="cisco_csr_identifier_map", column_name="ipsec_site_conn_id", type_=sa.String(36), existing_nullable=True, )
def __init__(self): all_inv = db_tables.clean_intervention_table index_cols_dict = { 'student_lookup': all_inv.c.student_lookup, 'school_year': sql.cast( db_func.substr(all_inv.c.school_year, db_func.length(all_inv.c.school_year) - 3, 4), sql.INT).label('school_year'), 'grade': all_inv.c.grade } super(InvFeatures, self).__init__( table_name=inflection.underscore(InvFeatures.__name__), categorical_cols=inv_features_config.categorical_columns, post_features_processor=CompositeFeatureProcessor( [ImputeNullProcessor(fill_unspecified=0)]), data_table=all_inv, blocking_col=all_inv.c.inv_group, index_cols_dict=index_cols_dict)
def delete_category(): key = request.args.get('key', None) if key == '6666666': try: query = db.session.query( distinct(func.substr(Goods.goods_category, 1, 3))) categories = query.all() cates = [str(d[0]) for d in categories] app.logger.debug(cates) db.session.execute( 'update tq_goods_category set category_level = 0 where category_level = 1' ) db.session.execute( 'update tq_goods_category set category_level = 1 where category_level = 0 and category_id in :ids', {'ids': cates}) db.session.commit() return render_template('error.html', error_msg=cates) except Exception as e: db.session.rollback() print(e) traceback.print_exc() else: return render_template('error.html', error_msg='key error')
for row in result: print("ID:", row.id, "Name: ", row.name, "Address:", row.address, "Email:", row.email) result = session.query(Customers).filter(Customers.name.like('Ra%')) for row in result: print("ID:", row.id, "Name: ", row.name, "Address:", row.address, "Email:", row.email) from sqlalchemy import literal search_string = "asasd" #['ed', 'wendy', 'jack'] result = session.query(Customers).filter( literal(search_string).contains(Customers.name)) from sqlalchemy.sql import func exif_conditions = [func.substr("asdasdasdasd", 7, 4) == b'Exif'] result = session.query(Customers).filter(Customers.id.in_([1, 3])) for row in result: print("ID:", row.id, "Name: ", row.name, "Address:", row.address, "Email:", row.email) from sqlalchemy import or_ result = session.query(Customers).filter( or_(Customers.id > 2, Customers.name.like('Ra%'))) for row in result: print("ID:", row.id, "Name: ", row.name, "Address:", row.address, "Email:", row.email) session.query(Customers).filter(Customers.id == 3).scalar() #session.query(Customers).one() #fails as more than 1 row
def _get_paid_not_approved_query(cls, query_type, start_date, end_date): """ Gets the query for paid but not approved shares between and including start and end date. Args: query_type: The type of the query to be build. 'data' for retrieving rows and 'shares_count' for an aggregate count query. start_date: The first date of which paid and not approved shares are considered. end_date: The last date of which paid and not approved shares are considered. Returns: A query according to the specified query_type. For 'data' the query is build to retrieve rows with attributes 'id' for member id, 'lastname' for the member's lastname, 'firstname' for the member's firstname, 'shares_count' for the number of shares and 'payment_received_date' for the date on which the payment was received For 'shares_count' an aggregate count query is returned to retrieve the number of shares of all relevant shares packages. """ # Shares which of the day of the request have not been approved are not # yet stored in Shares but only available on the C3sMember. shares_count = expression.case( # "== None" for SqlAlchemy instead of Python "is None" # pylint: disable=singleton-comparison [(Shares.id == None, C3sMember.num_shares)], else_=Shares.number ) payment_received_date = expression.case( [( # "== None" for SqlAlchemy instead of Python "is None" # pylint: disable=singleton-comparison Shares.id == None, # C3sMember.payment_received_date has the data type DateTime # but Date is required as it is used in # Shares.payment_received_date. As CAST on DateTime '2017-01-02 # 12:23:34.456789' returns '2017' in SQLite and therefore # cannot be used substring is used instead and then SQLAlchemy # is forced by type_coerce to parse it as a Date column. expression.type_coerce( func.substr(C3sMember.payment_received_date, 1, 10), Date) )], else_=Shares.payment_received_date ) # SqlAlchemy equality to None must be used as "== None" instead of # Python "is not None". date_of_acquisition = expression.case( # "== None" for SqlAlchemy instead of Python "is None" # pylint: disable=singleton-comparison [(Shares.id == None, C3sMember.membership_date)], else_=Shares.date_of_acquisition ) if query_type == 'data': # pylint: disable=no-member query = DBSession.query( C3sMember.id, C3sMember.lastname, C3sMember.firstname, shares_count.label('shares_count'), payment_received_date.label('payment_received_date'), ) if query_type == 'shares_count': # pylint: disable=no-member query = DBSession.query( func.sum(shares_count) ) # Use outer joins as Shares do not have to exist yet. return query.select_from(C3sMember) \ .outerjoin(members_shares) \ .outerjoin(Shares) \ .filter( expression.and_( # membership not approved in time period expression.or_( # membership or share approved later than end date date_of_acquisition > end_date, # or membership or share not approved yet (default # date) date_of_acquisition == date(1970, 1, 1), ), # payment received in time period payment_received_date >= start_date, payment_received_date <= end_date, ) )
def __init__(self): self.SUPPORTED_FUNCS.update({ func.slice: lambda col, slicefun: sqlfunctions.substr(col, slicefun.range.start, slicefun.range.stop - slicefun.range.start + 1)})
def func_slice(self, slicefun, tree): return sqlfunctions.substr(self.get_selectable(slicefun.property, tree), slicefun.range.start, slicefun.range.stop - slicefun.range.start + 1)
def get_monthly_stats(cls, year): """ Gets monthly statistics for the specified year Args: year (int): The year to which the invoice number belongs, e.g. 2019. Returns: Sums of the normale and reversal invoices per calendar month based on the invoice date. """ year_class = cls._get_year_class(year) if year_class is None: return None db_session = DBSession() result = [] # SQLite specific: substring for SQLite as it does not support # date_trunc. # invoice_date_month = func.date_trunc( # 'month', # invoice_date) paid_date = cls._PAYMENT_FIELDS[year]['paid_date'] amount_paid = cls._PAYMENT_FIELDS[year]['amount_paid'] invoice_date_month = func.substr(year_class.invoice_date, 1, 7) payment_date_month = func.substr(paid_date, 1, 7) # collect the invoice amounts per month invoice_amounts_query = db_session.query( invoice_date_month.label('month'), func.sum(expression.case( [( expression.not_(year_class.is_reversal), year_class.invoice_amount)], else_=Decimal('0.0'))).label('amount_invoiced_normal'), func.sum(expression.case( [( year_class.is_reversal, year_class.invoice_amount)], else_=Decimal('0.0'))).label('amount_invoiced_reversal'), expression.literal_column( '\'0.0\'', DatabaseDecimal).label('amount_paid') ).group_by(invoice_date_month) # collect the payments per month member_payments_query = db_session.query( payment_date_month.label('month'), expression.literal_column( '\'0.0\'', DatabaseDecimal).label('amount_invoiced_normal'), expression.literal_column( '\'0.0\'', DatabaseDecimal ).label('amount_invoiced_reversal'), func.sum(amount_paid).label('amount_paid') ).filter(paid_date.isnot(None)) \ .group_by(payment_date_month) # union invoice amounts and payments union_all_query = expression.union_all( member_payments_query, invoice_amounts_query) # aggregate invoice amounts and payments by month result_query = db_session.query( union_all_query.c.month.label('month'), func.sum(union_all_query.c.amount_invoiced_normal).label( 'amount_invoiced_normal'), func.sum(union_all_query.c.amount_invoiced_reversal).label( 'amount_invoiced_reversal'), func.sum(union_all_query.c.amount_paid).label('amount_paid') ) \ .group_by(union_all_query.c.month) \ .order_by(union_all_query.c.month) for month_stat in result_query.all(): result.append( { 'month': datetime( int(month_stat[0][0:4]), int(month_stat[0][5:7]), 1), 'amount_invoiced_normal': month_stat[1], 'amount_invoiced_reversal': month_stat[2], 'amount_paid': month_stat[3] }) return result