def concatKK2KById(cls, id_, k1, k2, k_value, session, **kwargs): res = session.query(cls).filter(cls.id == id_).update({ getattr(cls, k_value): func.concat(getattr(cls, k1), func.concat(getattr(cls, k2))) }) session.commit() return res
def get_trilateration_points(self): points = [] cells = {} enbys = Tower.query.group_by(func.concat(Tower.mcc, Tower.mnc, Tower.cid)) for enb in enbys: enbid = enb.enodeb_id if not enbid in cells: cells[enbid] = [] towers = Tower.query.filter(Tower.mnc == enb.mnc).filter(Tower.mcc == enb.mcc).filter(Tower.cid == enb.cid) towers = towers.group_by(func.concat(func.round(Tower.lat,3), Tower.lon)) if towers.count() > 3: res = self.trilaterate_enodeb_location(towers) cells[enbid].append(SimpleNamespace(lat=res[0], lon=res[1], est_dist=50, sus_pct=self.get_suspicious_percentage_by_enodeb(towers[0]))) for i in cells: if len(cells[i]) > 0: res = self.trilaterate_enodeb_location(cells[i], False) points.append({ 'trilat': (res[0], res[1]), 'enodeb_id': i, 'max_suspiciousness': cells[i][0].sus_pct, "closest_tower": self.closest_known_tower(res[0], res[1]), "unique_cells": "NA", #self.get_cells_count_for_enodebid(cells[i]), "sightings": "NA", #self.get_sightings_for_enodeb(cells[i]).count(), "first_seen": "NA", #str(self.get_min_column_by_enodeb(cells[i], 'timestamp')), "last_seen": "NA" #str(self.get_max_column_by_enodeb(cells[i], 'timestamp')) }) return points
def get_info_op(cls, p_kode): pkey = FixLength(NOP) pkey.set_raw(p_kode) query = pbbDBSession.query( func.concat(cls.kd_propinsi, '.').concat(cls.kd_dati2).concat('-').\ concat(cls.kd_kecamatan).concat('.').concat(cls.kd_kelurahan).concat('-').\ concat(cls.kd_blok).concat('.').concat(cls.no_urut).concat('-').\ concat(cls.kd_jns_op).label('nop'), cls.thn_pajak_sppt, cls.nm_wp_sppt.label('nm_wp'), func.concat(cls.jln_wp_sppt,', ').concat(cls.blok_kav_no_wp_sppt).label('alamat_wp'), func.concat(cls.rt_wp_sppt, ' / ').concat(cls.rw_wp_sppt).label('rt_rw_wp'), cls.kelurahan_wp_sppt.label('kelurahan_wp'), cls.kota_wp_sppt.label('kota_wp'), cls.luas_bumi_sppt.label('luas_tanah'), cls.njop_bumi_sppt.label('njop_tanah'), cls.luas_bng_sppt.label('luas_bng'),cls.njop_bng_sppt.label('njop_bng'), cls.pbb_yg_harus_dibayar_sppt.label('ketetapan'), cls.status_pembayaran_sppt.label('status_bayar'), func.concat(DatObjekPajak.jalan_op,', ').concat(DatObjekPajak.blok_kav_no_op).label('alamat_op'), func.concat(DatObjekPajak.rt_op,' / ').concat(DatObjekPajak.rw_op).label('rt_rw_op'),).\ filter(cls.kd_propinsi == DatObjekPajak.kd_propinsi, cls.kd_dati2 == DatObjekPajak.kd_dati2, cls.kd_kecamatan == DatObjekPajak.kd_kecamatan, cls.kd_kelurahan == DatObjekPajak.kd_kelurahan, cls.kd_blok == DatObjekPajak.kd_blok, cls.no_urut == DatObjekPajak.no_urut, cls.kd_jns_op == DatObjekPajak.kd_jns_op) return query.filter( cls.kd_propinsi == pkey['kd_propinsi'], cls.kd_dati2 == pkey['kd_dati2'], cls.kd_kecamatan == pkey['kd_kecamatan'], cls.kd_kelurahan == pkey['kd_kelurahan'], cls.kd_blok == pkey['kd_blok'], cls.no_urut == pkey['no_urut'], cls.kd_jns_op == pkey['kd_jns_op'], )
def __init__(self, search_term): search_term = re.sub( and_regex, ' AND ', search_term ) search_term = re.sub( or_regex, ' OR ', search_term) parser = QueryParser("content", schema=None) q = parser.parse(search_term) invalid = self.validate_search_term(q) if invalid: raise ValueError(invalid + search_term) myapp.db_connector.connect() session = myapp.db_connector.get_session() subq = session.query( TourneyList.id.label("tourney_list_id"), TourneyVenue.country.label("country_name"), TourneyVenue.state.label("state_name"), TourneyVenue.city.label("city_name"), TourneyVenue.venue.label("venue_name"), Tourney.tourney_type.label("tourney_type"), func.group_concat( ShipPilot.ship_type.distinct()).label("ship_name" ), func.group_concat( func.concat( Pilot.name, " ", Pilot.canon_name )).label("pilot_name"), func.group_concat( func.concat( Upgrade.name, " ", Upgrade.canon_name ) ).label("upgrade_name") ). \ join(Tourney).\ join(TourneyVenue).\ join(Ship). \ join(ShipPilot). \ join(Pilot). \ outerjoin(ShipUpgrade). \ outerjoin(Upgrade).\ group_by( TourneyList.id).subquery() fn = tree_to_expr(q, subq) self.query = session.query(subq.c.tourney_list_id).filter( fn )
async def warehouses_get( page: int = 0, per_page: int = 100, address: str = None, current_user: UserSchema = Depends(get_current_user), ): if current_user.role not in ( UserRoleEnum.ADMIN, UserRoleEnum.WAREHOUSE, UserRoleEnum.DELIVERY, ): raise HTTPException(status_code=403, detail="Access forbidden") sel = sa.select([Warehouses]).order_by(Warehouses.c.id) sel_cnt = sa.select([func.count(Warehouses.c.id)]) if address is not None: # used concat to prevent sql injection sel = sel.where( Warehouses.c.address.like(func.concat(address, "%")), ) sel_cnt = sel_cnt.where( Warehouses.c.address.like(func.concat(address, "%")), ) return await pagination_view_builder( sel, sel_cnt, lambda id_, addr: WarehouseSchema(id=id_, address=addr), WarehousesSchema, page, per_page, )
async def delivery_companies_get( page: int = 0, per_page: int = 100, name: str = None, current_user: UserSchema = Depends(get_current_user), ): if current_user.role not in (UserRoleEnum.ADMIN, UserRoleEnum.CLIENT): raise HTTPException(status_code=403, detail="Access forbidden") sel = sa.select([DeliveryCompanies]).order_by(DeliveryCompanies.c.id) sel_cnt = sa.select([func.count(DeliveryCompanies.c.id)]) if name is not None: # used concat to prevent sql injection sel = sel.where(DeliveryCompanies.c.name.like(func.concat(name, "%")), ) sel_cnt = sel_cnt.where( DeliveryCompanies.c.name.like(func.concat(name, "%")), ) return await pagination_view_builder( sel, sel_cnt, lambda id_, name_, price: DeliveryCompanySchema( id=id_, name=name_, price=price), DeliveryCompaniesSchema, page, per_page, )
def get_columns(): columns = [] columns.append(ColumnDT('id', mData='id')) columns.append(ColumnDT('nop', mData='nop')) columns.append(ColumnDT('thn_pajak_sppt', mData='thn_pajak_sppt')) columns.append(ColumnDT('pembayaran_sppt_ke', mData='pembayaran_sppt_ke')) columns.append(ColumnDT('denda_sppt', mData='denda_sppt', filter=_DTnumber)) columns.append(ColumnDT('jml_sppt_yg_dibayar',mData='jml_sppt_yg_dibayar', filter=_DTnumber)) columns.append(ColumnDT('tgl_pembayaran_sppt',mData='tgl_pembayaran_sppt', filter=_DTdate)) query = PosPbbDBSession.query(func.concat(PosPembayaranSppt.kd_propinsi, PosPembayaranSppt.kd_dati2, PosPembayaranSppt.kd_kecamatan, PosPembayaranSppt.kd_kelurahan, PosPembayaranSppt.kd_blok, PosPembayaranSppt.no_urut, PosPembayaranSppt.kd_jns_op, PosPembayaranSppt.thn_pajak_sppt, PosPembayaranSppt.pembayaran_sppt_ke).label('id'), func.concat(PosPembayaranSppt.kd_propinsi, PosPembayaranSppt.kd_dati2, PosPembayaranSppt.kd_kecamatan, PosPembayaranSppt.kd_kelurahan, PosPembayaranSppt.kd_blok, PosPembayaranSppt.no_urut, PosPembayaranSppt.kd_jns_op).label('nop'), PosPembayaranSppt.thn_pajak_sppt, PosPembayaranSppt.pembayaran_sppt_ke, PosPembayaranSppt.denda_sppt, PosPembayaranSppt.jml_sppt_yg_dibayar, PosPembayaranSppt.tgl_pembayaran_sppt) return columns, query
def get_columns(): columns = [] columns.append(ColumnDT('id', mData='id')) columns.append(ColumnDT('nop', mData='nop')) columns.append(ColumnDT('thn_pajak_sppt', mData='thn_pajak_sppt')) columns.append(ColumnDT('pembayaran_sppt_ke', mData='pembayaran_sppt_ke')) columns.append(ColumnDT('denda_sppt', mData='denda_sppt', filter=_DTnumber)) columns.append( ColumnDT('jml_sppt_yg_dibayar', mData='jml_sppt_yg_dibayar', filter=_DTnumber)) columns.append( ColumnDT('tgl_pembayaran_sppt', mData='tgl_pembayaran_sppt', filter=_DTdate)) query = PosPbbDBSession.query( func.concat(PosPembayaranSppt.kd_propinsi, PosPembayaranSppt.kd_dati2, PosPembayaranSppt.kd_kecamatan, PosPembayaranSppt.kd_kelurahan, PosPembayaranSppt.kd_blok, PosPembayaranSppt.no_urut, PosPembayaranSppt.kd_jns_op, PosPembayaranSppt.thn_pajak_sppt, PosPembayaranSppt.pembayaran_sppt_ke).label('id'), func.concat(PosPembayaranSppt.kd_propinsi, PosPembayaranSppt.kd_dati2, PosPembayaranSppt.kd_kecamatan, PosPembayaranSppt.kd_kelurahan, PosPembayaranSppt.kd_blok, PosPembayaranSppt.no_urut, PosPembayaranSppt.kd_jns_op).label('nop'), PosPembayaranSppt.thn_pajak_sppt, PosPembayaranSppt.pembayaran_sppt_ke, PosPembayaranSppt.denda_sppt, PosPembayaranSppt.jml_sppt_yg_dibayar, PosPembayaranSppt.tgl_pembayaran_sppt) return columns, query
def get_columns(): columns = [ ColumnDT(func.concat(PosSppt.kd_propinsi, PosSppt.kd_dati2, PosSppt.kd_kecamatan, PosSppt.kd_kelurahan, PosSppt.kd_blok, PosSppt.no_urut, PosSppt.kd_jns_op, PosSppt.thn_pajak_sppt), mData='id'), ColumnDT(func.concat(PosSppt.kd_propinsi, PosSppt.kd_dati2, PosSppt.kd_kecamatan, PosSppt.kd_kelurahan, PosSppt.kd_blok, PosSppt.no_urut, PosSppt.kd_jns_op), mData='nop'), ColumnDT(PosSppt.thn_pajak_sppt, mData='thn_pajak_sppt'), ColumnDT(PosSppt.pbb_yg_harus_dibayar_sppt, mData='pbb_yg_harus_dibayar_sppt'), ColumnDT(func.to_char(PosSppt.tgl_terbit_sppt,'DD-MM-YYYY'), mData='tgl_terbit_sppt'), ColumnDT(func.to_char(PosSppt.tgl_cetak_sppt,'DD-MM-YYYY'), mData='tgl_cetak_sppt'), ColumnDT(PosSppt.status_pembayaran_sppt, mData='status_pembayaran_sppt'), ] query = PosPbbDBSession.query().select_from(PosSppt) return columns, query
def get_info_op(cls, p_kode): pkey = FixLength(NOP) pkey.set_raw(p_kode) query = pbb_DBSession.query( func.concat(cls.kd_propinsi, '.').concat(cls.kd_dati2).concat('-').\ concat(cls.kd_kecamatan).concat('.').concat(cls.kd_kelurahan).concat('-').\ concat(cls.kd_blok).concat('.').concat(cls.no_urut).concat('-').\ concat(cls.kd_jns_op).label('nop'), cls.thn_pajak_sppt, cls.nm_wp_sppt.label('nm_wp'), func.concat(cls.jln_wp_sppt,', ').concat(cls.blok_kav_no_wp_sppt).label('alamat_wp'), func.concat(cls.rt_wp_sppt, ' / ').concat(cls.rw_wp_sppt).label('rt_rw_wp'), cls.kelurahan_wp_sppt.label('kelurahan_wp'), cls.kota_wp_sppt.label('kota_wp'), cls.luas_bumi_sppt.label('luas_tanah'), cls.njop_bumi_sppt.label('njop_tanah'), cls.luas_bng_sppt.label('luas_bng'),cls.njop_bng_sppt.label('njop_bng'), cls.pbb_yg_harus_dibayar_sppt.label('ketetapan'), cls.status_pembayaran_sppt.label('status_bayar'), func.concat(DatObjekPajak.jalan_op,', ').concat(DatObjekPajak.blok_kav_no_op).label('alamat_op'), func.concat(DatObjekPajak.rt_op,' / ').concat(DatObjekPajak.rw_op).label('rt_rw_op'),).\ filter(cls.kd_propinsi == DatObjekPajak.kd_propinsi, cls.kd_dati2 == DatObjekPajak.kd_dati2, cls.kd_kecamatan == DatObjekPajak.kd_kecamatan, cls.kd_kelurahan == DatObjekPajak.kd_kelurahan, cls.kd_blok == DatObjekPajak.kd_blok, cls.no_urut == DatObjekPajak.no_urut, cls.kd_jns_op == DatObjekPajak.kd_jns_op) return query.filter(cls.kd_propinsi == pkey['kd_propinsi'], cls.kd_dati2 == pkey['kd_dati2'], cls.kd_kecamatan == pkey['kd_kecamatan'], cls.kd_kelurahan == pkey['kd_kelurahan'], cls.kd_blok == pkey['kd_blok'], cls.no_urut == pkey['no_urut'], cls.kd_jns_op == pkey['kd_jns_op'],)
def _order_by(self, query, joins, sort_joins, sort_field, sort_desc): query, joins = super(AffiliationView, self)._order_by(query, joins, sort_joins, sort_field, sort_desc) if sort_field.name == 'code': # sort by the code field, which has entries like: # 1 # 1.1 # 1.2 # 1.10 # 1.11 # 10.1 # # these are hard to sort, because they don't sort correctly # numerically or lexicographically. Instead, we treat them # as parts of dotted-quad IP addresses and use mysql's inet_aton # to sort them. sort_field = func.inet_aton( func.if_( func.instr(sort_field, '.') > 0, func.concat(sort_field, '.0.0'), # eg. 10.2 func.concat(sort_field, '.0.0.0'))) # eg. 10 if sort_desc: sort_field = desc(sort_field) query = query.order_by(None).order_by(sort_field) return query, joins
def get(self, args): jwt_data = get_jwt_identity() if 'limit' not in args or args['limit'] > 50: args['limit'] = 50 # strip any characters that would cause matching issues q = args['q'].replace(',', '') courses = Course.query.join(Course.department).filter( Course.department.has( Department.school.has( School.university_id == jwt_data['university_id']))).filter( func.concat(Department.abbreviation, ' ', Course.number, ' ', Course.title).ilike( '%{}%'.format(q))).limit(args['limit']) professors = Professor.query.filter( func.concat(Professor.last_name, ' ', Professor.first_name).ilike( '%{}%'.format(q)) | func.concat(Professor.first_name, ' ', Professor.last_name).ilike('%{}%'.format(q))).limit( args['limit']) return { 'courses': [course.to_dict() for course in courses.all()], 'professors': [professor.to_dict() for professor in professors.all()] }
def _order_by(self, query, joins, sort_joins, sort_field, sort_desc): query, joins = super(AffiliationView, self)._order_by(query, joins, sort_joins, sort_field, sort_desc) if sort_field.name == 'code': # sort by the code field, which has entries like: # 1 # 1.1 # 1.2 # 1.10 # 1.11 # 10.1 # # these are hard to sort, because they don't sort correctly # numerically or lexicographically. Instead, we treat them # as parts of dotted-quad IP addresses and use mysql's inet_aton # to sort them. sort_field = func.inet_aton( func.if_(func.instr(sort_field, '.') > 0, func.concat(sort_field, '.0.0'), # eg. 10.2 func.concat(sort_field, '.0.0.0'))) # eg. 10 if sort_desc: sort_field = desc(sort_field) query = query.order_by(None).order_by(sort_field) return query, joins
def get_columns(): columns = [] columns.append(ColumnDT('id', mData='id')) columns.append(ColumnDT('nop', mData='nop')) columns.append(ColumnDT('thn_pajak_sppt', mData='thn_pajak_sppt')) columns.append( ColumnDT('pbb_yg_harus_dibayar_sppt', mData='pbb_yg_harus_dibayar_sppt', filter=_DTnumber)) columns.append( ColumnDT('tgl_cetak_sppt', mData='tgl_cetak_sppt', filter=_DTdate)) columns.append( ColumnDT('status_pembayaran_sppt', mData='status_pembayaran_sppt')) query = PosPbbDBSession.query( func.concat(PosSppt.kd_propinsi, PosSppt.kd_dati2, PosSppt.kd_kecamatan, PosSppt.kd_kelurahan, PosSppt.kd_blok, PosSppt.no_urut, PosSppt.kd_jns_op, PosSppt.thn_pajak_sppt).label('id'), func.concat(PosSppt.kd_propinsi, PosSppt.kd_dati2, PosSppt.kd_kecamatan, PosSppt.kd_kelurahan, PosSppt.kd_blok, PosSppt.no_urut, PosSppt.kd_jns_op).label('nop'), PosSppt.thn_pajak_sppt, PosSppt.pbb_yg_harus_dibayar_sppt, PosSppt.tgl_cetak_sppt, PosSppt.status_pembayaran_sppt) return columns, query
def get_columns(): columns = [] columns.append(ColumnDT('id', mData='id')) columns.append(ColumnDT('nop', mData='nop')) columns.append(ColumnDT('thn_pajak_sppt', mData='thn_pajak_sppt')) columns.append(ColumnDT('pbb_yg_harus_dibayar_sppt',mData='pbb_yg_harus_dibayar_sppt', filter=_DTnumber)) columns.append(ColumnDT('tgl_cetak_sppt', mData='tgl_cetak_sppt', filter=_DTdate)) columns.append(ColumnDT('status_pembayaran_sppt', mData='status_pembayaran_sppt')) query = PosPbbDBSession.query(func.concat(PosSppt.kd_propinsi, PosSppt.kd_dati2, PosSppt.kd_kecamatan, PosSppt.kd_kelurahan, PosSppt.kd_blok, PosSppt.no_urut, PosSppt.kd_jns_op, PosSppt.thn_pajak_sppt).label('id'), func.concat(PosSppt.kd_propinsi, PosSppt.kd_dati2, PosSppt.kd_kecamatan, PosSppt.kd_kelurahan, PosSppt.kd_blok, PosSppt.no_urut, PosSppt.kd_jns_op).label('nop'), PosSppt.thn_pajak_sppt, PosSppt.pbb_yg_harus_dibayar_sppt, PosSppt.tgl_cetak_sppt, PosSppt.status_pembayaran_sppt) return columns, query
def id_string(cls): """ IN SQL: SELECT CONCAT(CONCAT(CONCAT(LEFT((SELECT card_types.type_name FROM card_types WHERE card_types.id = cards.card_type_id),1),letter),RIGHT(CONCAT('000000',cards.id),6)),"C") as nid FROM cards; """ return func.concat(func.concat( func.concat(func.left( db.select([Card_Type.type_name]).where(Card_Type.id == cls.card_type_id).limit(1).as_scalar(), 1), cls.letter), func.right(func.concat('000000', cls.id), 6)), "C")
def start_mappers(): mapper(LocationEntity, locationTable) mapper(PositionGroupTypeEntity, position_group_type_table) mapper(UserPositionEntity, user_position_table, properties={ 'group': relationship(GroupEntity, foreign_keys=user_position_table.c.group_id), 'position': relationship(PositionGroupTypeEntity, foreign_keys=user_position_table.c.position_id) }) mapper(GroupEntity, groupTable, properties={ 'location': relationship(LocationEntity, backref='groups') }) mapper(UserEntity, usersTable, properties={ 'position': relationship(UserPositionEntity, backref='users'), 'contacts': relationship(ContactEntity, backref='users', secondary=users_favorites_contact_table, lazy='noload'), 'full_name': column_property( func.concat(usersTable.c.first_name, ' ', usersTable.c.last_name)) }) mapper(ContactEntity, contactTable, properties={ 'position': relationship(UserPositionEntity, backref='contacts'), 'full_name': column_property( func.concat(contactTable.c.first_name, ' ', contactTable.c.last_name)) }) mapper(InvitationEntity, invitation_table, properties={ 'creator': relationship(UserEntity, backref='invitations', foreign_keys=invitation_table.c.creator_id, lazy='noload') })
def rename_directory(db, user_id, old_api_path, new_api_path): """ Rename a directory. """ old_db_path = from_api_dirname(old_api_path) new_db_path = from_api_dirname(new_api_path) if old_db_path == '/': raise RenameRoot('Renaming the root directory is not permitted.') # Overwriting existing directories is disallowed. if _dir_exists(db, user_id, new_db_path): raise DirectoryExists(new_api_path) # Set this foreign key constraint to deferred so it's not violated # when we run the first statement to update the name of the directory. db.execute('SET CONSTRAINTS ' 'pgcontents.directories_parent_user_id_fkey DEFERRED') # Update name column for the directory that's being renamed db.execute( directories.update().where( and_( directories.c.user_id == user_id, directories.c.name == old_db_path, ) ).values( name=new_db_path, ) ) # Update the name and parent_name of any descendant directories. Do # this in a single statement so the non-deferrable check constraint # is satisfied. db.execute( directories.update().where( and_( directories.c.user_id == user_id, directories.c.name.startswith(old_db_path), directories.c.parent_name.startswith(old_db_path), ) ).values( name=func.concat( new_db_path, func.right(directories.c.name, -func.length(old_db_path)) ), parent_name=func.concat( new_db_path, func.right( directories.c.parent_name, -func.length(old_db_path) ) ), ) )
def rename_directory(db, user_id, old_api_path, new_api_path): """ Rename a directory. """ old_db_path = from_api_dirname(old_api_path) new_db_path = from_api_dirname(new_api_path) if old_db_path == '/': raise RenameRoot('Renaming the root directory is not permitted.') # Overwriting existing directories is disallowed. if _dir_exists(db, user_id, new_db_path): raise DirectoryExists(new_api_path) # Set this foreign key constraint to deferred so it's not violated # when we run the first statement to update the name of the directory. db.execute('SET CONSTRAINTS ' 'pgcontents.directories_parent_user_id_fkey DEFERRED') new_api_dir, new_name = split_api_filepath(new_api_path) new_db_dir = from_api_dirname(new_api_dir) # Update the name and parent_name columns for the directory that is being # renamed. The parent_name column will not change for a simple rename, but # will if the directory is moving. db.execute(directories.update().where( and_( directories.c.user_id == user_id, directories.c.name == old_db_path, )).values( name=new_db_path, parent_name=new_db_dir, )) # Update the name and parent_name of any descendant directories. Do this in # a single statement so the non-deferrable check constraint is satisfied. db.execute(directories.update().where( and_( directories.c.user_id == user_id, directories.c.name.startswith(old_db_path), directories.c.parent_name.startswith(old_db_path), ), ).values( name=func.concat( new_db_path, func.right(directories.c.name, -func.length(old_db_path)), ), parent_name=func.concat( new_db_path, func.right( directories.c.parent_name, -func.length(old_db_path), ), ), ))
def weekday_stats(session, year=None, bid=None): query = (_daily_stats_agg_query(session).add_column( func.to_char( func.cast(func.concat(DailyStats.year, '-01-01'), Date) + func.cast(func.concat(DailyStats.day_of_year - 1, ' days'), Interval), 'ID').label('weekday')).group_by('weekday').order_by('weekday')) if year: query = query.filter(DailyStats.year == year) if bid: query = query.filter(DailyStats.bid == bid) return query
def mapper_details(concept_id): # subquery for searching for mapped concepts and returning concept details concept_info = db.session.query(VConcepts)\ .filter(VConcepts.concept_id == concept_id)\ .subquery() # mapping history hist = db.session.query(VMapping)\ .filter((VMapping.source_concept_id == concept_id) & (VMapping.valid == True))\ .subquery() concept_history = db.session.query(hist)\ .with_entities(hist.c.valid, func.concat(hist.c.last_name, ', ', func.substr(hist.c.first_name,1,1), ' (', hist.c.organisation_name, ')').label('mapper'), hist.c.destination_code_text, hist.c.destination_term_text, hist.c.event_type_name, func.coalesce(hist.c.comment, '').label('comment'), func.to_char(hist.c.insert_ts, 'YYYY-mm-dd HH24:MI').label('insert_ts'))\ .order_by(hist.c.insert_ts.desc())\ .all() # other source concepts to same destination concept other_concepts = db.session.query(VMapping)\ .filter((VMapping.destination_concept_id == hist.c.destination_concept_id) & (VMapping.valid == True))\ .with_entities(VMapping.valid, func.concat(VMapping.last_name, ', ', func.substr(VMapping.first_name,1,1), ' (', VMapping.organisation_name, ')').label('mapper'), VMapping.source_code_text, VMapping.source_term_text, VMapping.event_type_name, func.coalesce(VMapping.comment, '').label('comment'), func.to_char(VMapping.insert_ts, 'YYYY-mm-dd HH24:MI').label('insert_ts'))\ .order_by(VMapping.insert_ts.desc())\ .all() # concept details to front-end (details) concept_decoded = db.session.query(concept_info)\ .filter(VConcepts.concept_id == concept_id)\ .with_entities(VConcepts.code_text, VConcepts.term_text, VConcepts.obs_number)\ .all() return render_template('home/details.html', history=concept_history, samedest=other_concepts, info=concept_decoded, target=session['target_system'], user_org_name=session['user_organisation_name'])
def run(cls, filters): fd = filters['FromDate'].split('-') td = filters['ToDate'].split('-') FromDate = date(int(fd[0]), int(fd[1]), int(fd[2])) ToDate = date(int(td[0]), int(td[1]), int(td[2])) session = Session() records = session.query(func.concat(func.year(User.CreatedDate),'-',func.month(User.CreatedDate)), func.count(User.CreatedDate).label('CNT'))\ .filter(User.CreatedDate.between(FromDate,ToDate))\ .group_by(func.concat(func.year(User.CreatedDate),'-',func.month(User.CreatedDate)))\ .order_by(func.concat(func.year(User.CreatedDate),'-',func.month(User.CreatedDate)).desc())\ .all() session.close() columns = ['Mes', 'Cantidad'] return {'Columns': columns, 'Rows': records}
def selectTableRowsUsingFormForDataAnalysis(form): """ Builds an sql query from form parameters, executes it, and returns the results as a list. Used for Data Analysis Module. :param form: Form containing user selected dimensions to aggregate data by. :return: List of rows returned by the query. First row is table header. """ selectFields = [] # Populate selectFields from form # Patient and Test Type if form.patient.data: selectFields.append(models.Record.patient_id.label(form.patient.label.text)) if form.test_type.data: selectFields.append(models.Record.test_type.label(form.test_type.label.text)) # Test Date Period # Hierarchy is Year, Year > Month, Year > Week if form.test_date.data != form.NONE_LABEL: # just year if form.test_date.data == form.YEAR_LABEL: selectFields.append(func.year(models.Record.test_date).label(form.YEAR_LABEL)) # month if form.test_date.data == form.MONTH_LABEL: selectFields.append( func.concat(func.monthname(models.Record.test_date), ', ', func.year(models.Record.test_date)).label( form.MONTH_LABEL)) # week if form.test_date.data == form.WEEK_LABEL: selectFields.append( func.concat(func.weekofyear(models.Record.test_date), ', ', func.year(models.Record.test_date)).label( form.WEEK_LABEL)) # construct query query = db.session.query(models.Record).join(models.Image).group_by(*["`" + c.name + "`" for c in selectFields]) # Count results as number of images selectFields.append(func.count('*').label("# of Images")) # Execute the query with the selected fields results = query.values(*selectFields) # Reformat as list of rows with a header # Construct header resultsList = [[c.name for c in selectFields]] # Add results from query resultsList += list(results) return resultsList
def get_hackathon_show_list(self, hackathon_id, show_type=None, limit=10): criterion = TeamShow.hackathon_id == hackathon_id if show_type: criterion = and_(criterion, TeamShow.type == show_type) # show_list = TeamShow.query.filter(criterion).order_by(TeamShow.create_time.desc()).limit(limit) show_list = ( self.db.session() .query( TeamShow.id, TeamShow.note, TeamShow.team_id, TeamShow.hackathon_id, Team.name, Team.description, Team.logo, func.group_concat(func.concat(TeamShow.uri, ":::", TeamShow.type)).label("uri"), ) .join(Team, Team.id == TeamShow.team_id) .filter(criterion) .group_by(TeamShow.team_id) .order_by(TeamShow.create_time.desc()) .all() ) return [s._asdict() for s in show_list]
def admin_search_candidates(self): params = self.request.params status = params.get('status') order = params.get('order') q = params.get('q') tags = split_strip(params.get('tags')) basequery = DBSession.query(SearchResultCandidate) \ .options(joinedload_all('languages.language'), joinedload_all('languages.proficiency'), joinedload_all('skills.skill'), joinedload_all('skills.level'), joinedload('preferred_locations'), joinedload('target_position')) if status: status = get_by_name_or_raise(CandidateStatus, status) basequery = basequery.filter(Candidate.status == status) if q: q = q.lower() basequery = basequery.filter( or_(func.lower(Candidate.first_name).startswith(q), func.lower(Candidate.last_name).startswith(q), func.lower(func.concat(Candidate.first_name, " ", Candidate.last_name)).startswith(q), func.lower(Candidate.email).startswith(q))) if tags: basequery = basequery.outerjoin(CandidateSkill).join(Skill).filter(Skill.name.in_(tags)) if order: basequery = add_sorting(basequery, order, CANDIDATE_SORTABLES) return run_paginated_query(self.request, basequery, counter=distinct_counter(SearchResultCandidate.id))
def search(searchq): # Will only fail if some f*cker names their book after their own ISBN. results = BookRecord.base_assembler_query() if ISBN_REGEX.match(searchq): alt_isbn = make_equivalent_isbn(searchq) results = results.filter( or_(Book.isbn == searchq, Book.isbn == alt_isbn)).all() else: contrib_query = (db.session.query( Book.id).filter(BookContribution.book_id == Book.id).filter( BookContribution.contributor_id == Contributor.id).filter( func.concat(Contributor.firstname, ' ', Contributor.lastname).ilike("".join( ("%", searchq, "%")))).all()) contribooks = [bid for bid, in contrib_query] results = (BookRecord.base_assembler_query().filter( or_( Book.title.ilike("".join(("%", searchq, "%"))), and_(Book.publisher_id == BookCompany.id, BookCompany.name.ilike("".join(("%", searchq, "%")))), Book.id.in_(contribooks) if contribooks else False)).all()) book_listing = BookRecord.assembler(results, as_obj=False) return book_listing
def genotypes_for_records(vcf_id, query): """Return all genotypes which would appear on a row in a VCF (determined by CHROM/POS/REF/ALT) if just one genotype on that row passes the selections in `query'. This is used to generate the list of genotypes to be transformed into vcf.model._Records and then written to a VCF file. """ query = _annotate_query_with_types(query, spec(vcf_id)) with tables(db.engine, 'genotypes') as (con, gt): keyfunc = func.concat( gt.c.contig, ':', cast(gt.c.position, types.Unicode), '::', gt.c.reference, '->', gt.c.alternates) filtered_gts_q = select([keyfunc]).where(gt.c.vcf_id == vcf_id) filtered_gts_q = _add_filters(filtered_gts_q, gt, query.get('filters')) filtered_gts_q = _add_range(filtered_gts_q, gt, query.get('range')) filtered_gts_q = filtered_gts_q.cte('filtered_gts') records_q = select([gt]).where( keyfunc.in_(select([filtered_gts_q]))).where(gt.c.vcf_id == vcf_id) records_q = records_q.order_by(asc(func.length(gt.c.contig)), asc(gt.c.contig), asc(gt.c.position), asc(gt.c.reference), asc(gt.c.alternates), asc(gt.c.sample_name)) genotypes = [dict(g) for g in con.execute(records_q).fetchall()] return genotypes
def get_campaigns_handler(): last_id = request.args.get('last_id') if last_id and not last_id.isnumeric(): abort(400, {'last_id': 'Invalid value'}) path = 'campaign' items_model = get_model_by_path(path) filter_clause = None page_len = 20 search_val = request.args.get('q') args = request.args if args: kwargs = {k: v for k, v in args.items() if hasattr(items_model, k)} if search_val: filter_clause = func.concat(items_model.date_created, ' ', items_model.description).like( '%{}%'.format(search_val)) if last_id: filter_clause = and_(filter_clause, items_model.id > last_id) query = db.session.query(items_model) if filter_clause is not None: query = query.filter(filter_clause) total_count = query.filter_by(**kwargs).count() return return_json( 'result', { 'items': [ item.serialize() for item in query.filter_by(**kwargs).order_by( items_model.date_created.asc()).limit(page_len) ], 'total_count': total_count }) return return_json('result', get_items(path, args))
def get_period_field(period, model): """ Returns the SQL Alchemy field to use base on the type aggregate it is (month vs. year) :param args: Dictionary :return: SQLAlchemy Field object """ fields = [] fields.append(func.date_part('year', model.date)) if period == 'month': fields.append( func.lpad( expression.cast( func.date_part('month', model.date), types.String ), 3, '-0' ) ) else: fields.append('-01') fields.append('-01') return func.concat(*fields).label('date')
def get_dop(cls, p_kode, p_tahun): pkey = FixLength(NOP) pkey.set_raw(p_kode) query = pbb_DBSession.query( func.concat(cls.kd_propinsi, '.').concat(cls.kd_dati2).concat('-').\ concat(cls.kd_kecamatan).concat('.').concat(cls.kd_kelurahan).concat('-').\ concat(cls.kd_blok).concat('.').concat(cls.no_urut).concat('-').\ concat(cls.kd_jns_op).label('nop'), cls.thn_pajak_sppt, cls.luas_bumi_sppt, cls.njop_bumi_sppt, cls.luas_bng_sppt, cls.njop_bng_sppt, cls.nm_wp_sppt, cls.pbb_yg_harus_dibayar_sppt, cls.status_pembayaran_sppt, DatObjekPajak.jalan_op, DatObjekPajak.blok_kav_no_op, DatObjekPajak.rt_op, DatObjekPajak.rw_op, func.coalesce(SpptOpBersama.luas_bumi_beban_sppt,0).label('luas_bumi_beban'), func.coalesce(SpptOpBersama.luas_bng_beban_sppt,0).label('luas_bng_beban'), func.coalesce(SpptOpBersama.njop_bumi_beban_sppt,0).label('njop_bumi_beban'), func.coalesce(SpptOpBersama.njop_bng_beban_sppt,0).label('njop_bng_beban'), Kelurahan.nm_kelurahan, Kecamatan.nm_kecamatan, Dati2.nm_dati2, func.max(PembayaranSppt.tgl_pembayaran_sppt).label('tgl_bayar'), func.sum(func.coalesce(PembayaranSppt.jml_sppt_yg_dibayar,0)).label('jml_sppt_yg_dibayar'), func.sum(func.coalesce(PembayaranSppt.denda_sppt,0)).label('denda_sppt'),).\ outerjoin(DatObjekPajak).\ outerjoin(SpptOpBersama).\ outerjoin(PembayaranSppt,and_( cls.kd_propinsi==PembayaranSppt.kd_propinsi, cls.kd_dati2==PembayaranSppt.kd_dati2, cls.kd_kecamatan==PembayaranSppt.kd_kecamatan, cls.kd_kelurahan==PembayaranSppt.kd_kelurahan, cls.kd_blok==PembayaranSppt.kd_blok, cls.no_urut==PembayaranSppt.no_urut, cls.kd_jns_op==PembayaranSppt.kd_jns_op, cls.thn_pajak_sppt==PembayaranSppt.thn_pajak_sppt )).\ filter(cls.kd_propinsi == Kelurahan.kd_propinsi, cls.kd_dati2 == Kelurahan.kd_dati2, cls.kd_kecamatan == Kelurahan.kd_kecamatan, cls.kd_kelurahan == Kelurahan.kd_kelurahan,).\ filter(cls.kd_propinsi == Kecamatan.kd_propinsi, cls.kd_dati2 == Kecamatan.kd_dati2, cls.kd_kecamatan == Kecamatan.kd_kecamatan,).\ filter(cls.kd_propinsi == Dati2.kd_propinsi, cls.kd_dati2 == Dati2.kd_dati2,).\ group_by(cls.kd_propinsi, cls.kd_dati2, cls.kd_kecamatan, cls.kd_kelurahan, cls.kd_blok, cls.no_urut, cls.kd_jns_op, cls.thn_pajak_sppt, cls.luas_bumi_sppt, cls.njop_bumi_sppt, cls.luas_bng_sppt, cls.njop_bng_sppt, cls.pbb_yg_harus_dibayar_sppt, cls.status_pembayaran_sppt, DatObjekPajak.jalan_op, DatObjekPajak.blok_kav_no_op, DatObjekPajak.rt_op, DatObjekPajak.rw_op, SpptOpBersama.luas_bumi_beban_sppt, SpptOpBersama.luas_bng_beban_sppt, SpptOpBersama.njop_bumi_beban_sppt, SpptOpBersama.njop_bng_beban_sppt, Kelurahan.nm_kelurahan, Kecamatan.nm_kecamatan, Dati2.nm_dati2,) return query.filter( cls.kd_propinsi == pkey['kd_propinsi'], cls.kd_dati2 == pkey['kd_dati2'], cls.kd_kecamatan == pkey['kd_kecamatan'], cls.kd_kelurahan == pkey['kd_kelurahan'], cls.kd_blok == pkey['kd_blok'], cls.no_urut == pkey['no_urut'], cls.kd_jns_op == pkey['kd_jns_op'], cls.thn_pajak_sppt==p_tahun)
def alchemy_expression(self): dt = self.product grid_spec = self.product.grid_spec doc = _jsonb_doc_expression(dt.metadata_type) projection_offset = _projection_doc_offset(dt.metadata_type) # Calculate tile refs geo_ref_points_offset = projection_offset + ['geo_ref_points'] center_point = func.ST_Centroid( func.ST_Collect( _gis_point(doc, geo_ref_points_offset + ['ll']), _gis_point(doc, geo_ref_points_offset + ['ur']), )) # todo: look at grid_spec crs. Use it for defaults, conversion. size_x, size_y = (grid_spec.tile_size or (1000.0, 1000.0)) origin_x, origin_y = grid_spec.origin return func.concat( func.floor( (func.ST_X(center_point) - origin_x) / size_x).cast(String), '_', func.floor( (func.ST_Y(center_point) - origin_y) / size_y).cast(String), )
def datasets_min_one_comment_follower(context, data_dict): """ Number of Datasets with at least one comment follower :param context: :param data_dict: :return: """ org_id = data_dict.get('org_id', None) utc_start_date = data_dict.get('utc_start_date', None) utc_end_date = data_dict.get('utc_end_date', None) check_org_access(org_id) try: return ( _session_.query(func.count(distinct(Package.id))).filter( _and_(CommentThread.url.like(DATASET_LIKE), Comment.state == ACTIVE_STATE, Comment.creation_date >= utc_start_date, Comment.creation_date < utc_end_date, Package.owner_org == org_id)).join( CommentThread, CommentThread.url == func.concat( DATASET_PREFIX, Package.name)). join(CommentNotificationRecipient, CommentNotificationRecipient.thread_id == CommentThread.id) # Don't need JOIN ON clause - `comment` table has `comment_thread`.`id` FK .join(Comment)).scalar() except Exception as e: log.error(str(e))
def __init__(self, symbol_list=None): super().__init__() self.symbol_list = symbol_list self._mutex = threading.Lock() self._last_check_datetime = datetime.now() - timedelta(minutes=1) self.interval_timedelta = timedelta(seconds=15) self.symbol_target_position_dic = {} # 设定相应周期的事件驱动句柄 接收的参数类型 self._on_period_event_dic[PeriodType.Tick].param_type = dict # 记录合约最近一次执行操作的时间 self.symbol_last_deal_datetime = {} # 记录合约最近一个发送买卖请求的时间 self.instrument_lastest_order_datetime_dic = {} # 目前由于交易是异步执行,在尚未记录每一笔订单的情况下,时间太短可能会导致仓位与请求但出现不同步现象,导致下单过多的问题 self.timedelta_between_deal = timedelta(seconds=3) self.min_order_vol = 0.1 self.symbol_latest_price_dic = defaultdict(float) self.weight = 1 if not DEBUG else 0.2 # 默认仓位权重 self.stop_loss_rate = -0.03 # 初始化 symbol 基本信息 with with_db_session(engine_md) as session: symbol_info_list = session.query(SymbolPair).filter( func.concat(SymbolPair.base_currency, SymbolPair.quote_currency).in_(symbol_list)).all() self.symbol_info_dic = {symbol.base_currency+symbol.quote_currency: symbol for symbol in symbol_info_list} self.logger.info('接受订单文件目录:%s', self._folder_path) self.load_feedback_file()
def construct_geo_name(geo_feature_obj, geo_name_obj): try: # filter geographic data by features and feature types geo_data = session.query(geo_feature_obj) \ .filter(geo_feature_obj.geo_feature.in_(DEFAULT['geo_features'])) \ .filter(~geo_feature_obj.feature_type.in_(DEFAULT['exempt_types'])).subquery() geo_name = session.query( func.concat(geo_data.c.geo_feature, '_', geo_data.c.feature_type).label('name'), geo_data.c.geo_feature, geo_data.c.feature_type).distinct().order_by('name').all() obj_results = [ geo_name_obj(name=item[0], geo_feature=item[1], feature_type=item[2]) for item in geo_name ] # session.add_all(obj_results) # session.commit() print('Generated {} Geo Names.'.format(len(geo_name))) return except Exception as e: print(e) exit(-1)
def get(self): seach_query = func.concat(Books.title, " ", SearchWordsDBModel.keyword) # TODO: reimplement this query so that no extra loop is needed to extract data result = (db.session.query( seach_query.label("searchQuery"))).select_from( Books, SearchWordsDBModel).order_by(Books.id).all() return [{'searchQuery': row.searchQuery} for row in result]
def datarequests_min_one_comment_follower(context, data_dict): """ Number of Data Requests across an organisation with at least one comment follower :param context: :param data_dict: :return: """ org_id = data_dict.get('org_id', None) utc_start_date = data_dict.get('utc_start_date', None) utc_end_date = data_dict.get('utc_end_date', None) check_org_access(org_id) try: db.init_db(model) return (_session_.query(func.count(distinct( db.DataRequest.id))).filter( _and_(CommentThread.url.like(DATAREQUEST_LIKE), Comment.state == ACTIVE_STATE, Comment.creation_date >= utc_start_date, Comment.creation_date < utc_end_date, db.DataRequest.organization_id == org_id)).join( CommentThread, CommentThread.url == func.concat( DATAREQUEST_PREFIX, db.DataRequest.id)).join( CommentNotificationRecipient, CommentNotificationRecipient.thread_id == CommentThread.id).join( Comment, Comment.thread_id == CommentThread.id) ).scalar() except Exception as e: log.error(str(e))
def handle_detail(activity): raw = json.loads(str(activity['raw']).encode('utf-8')) activity['raw'] = raw # Detail is required key document_id, title and detail, icon_name if activity['doc_type'] == ActivityType.FOLLOW or activity['doc_type'] == ActivityType.UNFOLLOW: # Translate register_type to indonesian language register_type = case([ (UserPrincipal.register_type == 'STUDENT', 'Siswa') ], else_='Guru').label('type') # Process query using upper concat and case expr user_dict = UserPrincipal.query.with_entities( UserPrincipal.fullname.label('title'), func.coalesce(UserPrincipal.document_id, -1).label('document_id'), func.upper( func.concat( register_type, ' ', func.coalesce(SchoolClass.name, ''), ' ', SchoolLevel.name ) ).label('detail') ).outerjoin(SchoolClass, SchoolClass.id == UserPrincipal.class_id) \ .join(SchoolLevel, SchoolLevel.id == UserPrincipal.level_id) \ .filter(UserPrincipal.id == raw['friend_id']).first()._asdict() activity['detail'] = user_dict elif activity['doc_type'] == ActivityType.LESSON or activity['doc_type'] == ActivityType.LESSON_VIEWER: detail = LessonViewer.query.filter_by(lesson_id=raw['id']).count() entities = (Lesson.document_id, Lesson.title) lesson_dict = Lesson.query.with_entities(*entities).filter(Lesson.id == raw['id']).first()._asdict() lesson_dict['detail'] = ' ' + str(detail) activity['detail'] = lesson_dict pass else: activity['detail'] = None return activity
def test_return_type_detection(self): for fn in [func.coalesce, func.max, func.min, func.sum]: for args, type_ in [ ( (datetime.date(2007, 10, 5), datetime.date(2005, 10, 15)), sqltypes.Date, ), ((3, 5), sqltypes.Integer), ((decimal.Decimal(3), decimal.Decimal(5)), sqltypes.Numeric), (("foo", "bar"), sqltypes.String), ( ( datetime.datetime(2007, 10, 5, 8, 3, 34), datetime.datetime(2005, 10, 15, 14, 45, 33), ), sqltypes.DateTime, ), ]: assert isinstance(fn(*args).type, type_), "%s / %r != %s" % ( fn(), fn(*args).type, type_, ) assert isinstance(func.concat("foo", "bar").type, sqltypes.String)
def tas_gtas_combo(session, period, year): """ Creates a combined list of TAS and GTAS data filtered by the given period/year Args: session: DB session period: The period for which to get GTAS data year: The year for which to get GTAS data Returns: A WITH clause to use with other queries """ query = session.query( gtas_model.allocation_transfer_agency.label('allocation_transfer_agency'), gtas_model.agency_identifier.label('agency_identifier'), gtas_model.beginning_period_of_availa.label('beginning_period_of_availa'), gtas_model.ending_period_of_availabil.label('ending_period_of_availabil'), gtas_model.availability_type_code.label('availability_type_code'), gtas_model.main_account_code.label('main_account_code'), gtas_model.sub_account_code.label('sub_account_code'), gtas_model.amount.label('amount'), gtas_model.line.label('line'), tas_model.financial_indicator2.label('financial_indicator2'), tas_model.fr_entity_type.label('fr_entity_type')).\ join(tas_model, gtas_model.tas == func.concat(func.coalesce(tas_model.allocation_transfer_agency, '000'), func.coalesce(tas_model.agency_identifier, '000'), func.coalesce(tas_model.beginning_period_of_availa, '0000'), func.coalesce(tas_model.ending_period_of_availabil, '0000'), func.coalesce(tas_model.availability_type_code, ' '), func.coalesce(tas_model.main_account_code, '0000'), func.coalesce(tas_model.sub_account_code, '000'))).\ filter(gtas_model.period == period).\ filter(gtas_model.fiscal_year == year) return query.cte('tas_gtas')
def get_account_balance(stg_run_id): """ 获取 account_info 账户走势数据 :param stg_run_id: :return: """ with with_db_session(engine_ibats) as session: sql_str = str( session.query( func.concat( AccountStatusInfo.trade_date, ' ', AccountStatusInfo.trade_time).label('trade_datetime'), AccountStatusInfo.available_cash.label('available_cash'), AccountStatusInfo.curr_margin.label('curr_margin'), AccountStatusInfo.balance_tot.label('balance_tot')).filter( AccountStatusInfo.stg_run_id == stg_run_id).order_by( AccountStatusInfo.trade_date, AccountStatusInfo.trade_time)) # sql_str = """SELECT concat(trade_date, " ", trade_time) trade_datetime, available_cash, curr_margin, balance_tot # FROM account_status_info where stg_run_id=%s order by trade_date, trade_time""" data_df = pd.read_sql(sql_str, engine_ibats, params=[' ', stg_run_id]) data_df["return_rate"] = (data_df["balance_tot"].pct_change().fillna(0) + 1).cumprod() data_df = data_df.set_index("trade_datetime") return data_df
def search_table(self): full_name_value = self.request.args.get("full_name", None) if full_name_value: self.q = self.q.filter( func.lower( func.concat(Patient.second_name, ' ', Patient.first_name, ' ', Patient.patronymic_name)).contains( full_name_value.lower())) self.search_form.full_name.default = full_name_value region_id = self.request.args.get("region_id", -1) if region_id: try: region_id = int(region_id) except ValueError: return render_template('errors/error-500.html'), 500 if region_id != -1: self.q = self.q.filter(Patient.region_id == region_id) self.search_form.region_id.default = region_id iin = self.request.args.get("iin", None) if iin: self.q = self.q.filter(Patient.iin.contains(iin)) self.search_form.iin.default = iin if self.search_form: self.search_form.process()
def move_remote_checkpoints(db, user_id, src_api_path, dest_api_path): src_db_path = from_api_filename(src_api_path) dest_db_path = from_api_filename(dest_api_path) # Update the paths of the checkpoints for the file being renamed. If the # source path is for a directory then this is a no-op. db.execute( remote_checkpoints.update().where( and_( remote_checkpoints.c.user_id == user_id, remote_checkpoints.c.path == src_db_path, ), ).values(path=dest_db_path, ), ) # If the given source path is for a directory, update the paths of the # checkpoints for all files in that directory and its subdirectories. db.execute(remote_checkpoints.update().where( and_( remote_checkpoints.c.user_id == user_id, remote_checkpoints.c.path.startswith(src_db_path), ), ).values(path=func.concat( dest_db_path, func.right( remote_checkpoints.c.path, -func.length(src_db_path), ), ), ))
def find_members(s): found = [] session= Session() if s.isdigit(): # should be phone number if len(s) != 10: raise ValueError("Phone must be 10 digits") else: return _find_all_members_in_families(session, session.query(Family).filter(Family.id == int(s)).all()) elif "@" in s: # should be email return _find_all_members_in_families(session, session.query(Family).filter(Family.email == s).all()) else: # try first, last names from sqlalchemy import func return session.query(Member).filter(or_(func.concat(Member.first_name, Member.family_name).like("%%%s%%" % s), \ func.concat(Member.family_name, Member.first_name).like("%%%s%%" % s))).all() \ + session.query(Youth).filter(or_(func.concat(Youth.first_name, Youth.family_name).like("%%%s%%" % s), \ func.concat(Youth.family_name, Youth.first_name).like("%%%s%%" % s))).all()
def update_recipients(connection, commentable_table): """Updates recipients field for commentable table.""" # replace all None data with empty string for recipients field connection.execute(commentable_table.update() .where(commentable_table.c.recipients.is_(None)) .values(recipients='')) # add Line of Defense One Contacts, Vice President to recipients list op.execute(commentable_table.update() .where(commentable_table.c.recipients != '') .values(recipients=func.concat(commentable_table.c.recipients, ",Line of Defense One Contacts," "Vice Presidents")))
def create_global_search_options(self, search, tables): response = [] search = clean_unicode(search) search_list = search.split() for table in maybe_list(tables): ignore_columns = getattr(table, '__ignore_on_global_search__', None) ignore_ids = getattr(table, '__ignore_ids_on_global_search__', True) for column_name in table._sa_class_manager.local_attrs.keys(): if ((ignore_ids and (column_name == 'id' or column_name.endswith('_id'))) or (ignore_columns and column_name in ignore_columns)): continue column = getattr(table, column_name) if hasattr(column, 'type'): if isinstance(column.type, Enum): for value in column.type.enums: value_to_search = value for s in search_list: try: idx = value_to_search.index(s) except ValueError: break else: value_to_search = value_to_search[idx + len(s):] else: response.append(column == value) elif isinstance(column.type, String): value = create_like_filter(column, search) if value is not None: response.append(value) elif isinstance(column.type, (Numeric, Integer, Date, DateTime)): value = create_like_filter(cast(column, String), search) if value is not None: response.append(value) else: clauses = getattr(column, 'clauses', None) if clauses is not None: value = create_like_filter(func.concat(*clauses), search) if value is not None: response.append(value) else: value = create_like_filter(cast(column, String), search) if value is not None: response.append(value) return response
def get_bayar(cls, p_kode): pkey = FixLength(NOP) pkey.set_raw(p_kode) query = pbb_DBSession.query( func.concat(cls.kd_propinsi, '.').concat(cls.kd_dati2).concat('-').\ concat(cls.kd_kecamatan).concat('.').concat(cls.kd_kelurahan).concat('-').\ concat(cls.kd_blok).concat('.').concat(cls.no_urut).concat('-').\ concat(cls.kd_jns_op).label('nop'), cls.thn_pajak_sppt, cls.nm_wp_sppt, cls.jln_wp_sppt, cls.blok_kav_no_wp_sppt, cls.rw_wp_sppt, cls.rt_wp_sppt, cls.kelurahan_wp_sppt, cls.kota_wp_sppt, cls.kd_pos_wp_sppt, cls.npwp_sppt, cls.kd_kls_tanah, cls.kd_kls_bng, cls.luas_bumi_sppt, cls.luas_bng_sppt, cls.njop_bumi_sppt, cls.njop_bng_sppt, cls.njop_sppt, cls.njoptkp_sppt, cls.pbb_terhutang_sppt, cls.faktor_pengurang_sppt, cls.status_pembayaran_sppt, cls.tgl_jatuh_tempo_sppt, cls.pbb_yg_harus_dibayar_sppt.label('pokok'), func.max(PembayaranSppt.tgl_pembayaran_sppt).label('tgl_pembayaran_sppt'), func.sum(func.coalesce(PembayaranSppt.jml_sppt_yg_dibayar,0)).label('bayar'), func.sum(func.coalesce(PembayaranSppt.denda_sppt,0)).label('denda_sppt'),).\ outerjoin(PembayaranSppt,and_( cls.kd_propinsi==PembayaranSppt.kd_propinsi, cls.kd_dati2==PembayaranSppt.kd_dati2, cls.kd_kecamatan==PembayaranSppt.kd_kecamatan, cls.kd_kelurahan==PembayaranSppt.kd_kelurahan, cls.kd_blok==PembayaranSppt.kd_blok, cls.no_urut==PembayaranSppt.no_urut, cls.kd_jns_op==PembayaranSppt.kd_jns_op, cls.thn_pajak_sppt==PembayaranSppt.thn_pajak_sppt )).\ group_by(cls.kd_propinsi, cls.kd_dati2, cls.kd_kecamatan, cls.kd_kelurahan, cls.kd_blok, cls.no_urut, cls.kd_jns_op, cls.thn_pajak_sppt, cls.nm_wp_sppt, cls.jln_wp_sppt, cls.blok_kav_no_wp_sppt, cls.rw_wp_sppt, cls.rt_wp_sppt, cls.kelurahan_wp_sppt, cls.kota_wp_sppt, cls.kd_pos_wp_sppt, cls.npwp_sppt, cls.kd_kls_tanah, cls.kd_kls_bng, cls.luas_bumi_sppt, cls.luas_bng_sppt, cls.njop_bumi_sppt, cls.njop_bng_sppt, cls.njop_sppt, cls.njoptkp_sppt, cls.pbb_terhutang_sppt, cls.faktor_pengurang_sppt, cls.status_pembayaran_sppt, cls.tgl_jatuh_tempo_sppt, cls.pbb_yg_harus_dibayar_sppt.label('pokok'),) return query.filter(cls.kd_propinsi == pkey['kd_propinsi'], cls.kd_dati2 == pkey['kd_dati2'], cls.kd_kecamatan == pkey['kd_kecamatan'], cls.kd_kelurahan == pkey['kd_kelurahan'], cls.kd_blok == pkey['kd_blok'], cls.no_urut == pkey['no_urut'], cls.kd_jns_op == pkey['kd_jns_op'],)
def report_15(dataset_id, page): dataset = models.Dataset.query.get_or_404(dataset_id) species, habitat = get_report_data(dataset) species_data = species.with_entities( func.concat(models.DataSpeciesRegion.conclusion_assessment, models.DataSpeciesRegion.conclusion_assessment_trend), func.count(models.DataSpeciesRegion.id) ).group_by(models.DataSpeciesRegion.conclusion_assessment, models.DataSpeciesRegion.conclusion_assessment_trend) species_count = species.filter( models.DataSpeciesRegion.conclusion_assessment.startswith('U') ).count() or 1 species_data = dict(species_data) or {} for key, value in species_data.iteritems(): species_data[key] = value * 100.0 / species_count habitat_data = habitat.with_entities( func.concat(models.DataHabitattypeRegion.conclusion_assessment, models.DataHabitattypeRegion.conclusion_assessment_trend), func.count(models.DataHabitattypeRegion.id) ).group_by(models.DataHabitattypeRegion.conclusion_assessment, models.DataHabitattypeRegion.conclusion_assessment_trend) habitat_count = habitat.filter( models.DataHabitattypeRegion.conclusion_assessment.startswith('U') ).count() or 1 habitat_data = dict(habitat_data) or {} for key, value in habitat_data.iteritems(): habitat_data[key] = value * 100.0 / habitat_count return render_template( 'aggregation/reports/15.html', page=page, dataset=dataset, species=species_data, habitats=habitat_data, )
def postgresql_non_ascii_and_lower(column, as_text=True): if hasattr(column, 'property'): columns = column.property.columns if len(columns) > 1: column = func.concat(*columns) else: column = column.property.columns[0] if isinstance(column.type, Enum): return column elif isinstance(column.type, String): return func.lower_and_clear(column) elif isinstance(column.type, Numeric): return column elif as_text: return func.text(column) else: return column
def signups_requiring_notification(self, session, from_time, to_time, options=None): """ Returns a dict of AttractionSignups that require notification. The keys of the returned dict are the amount of advanced notice, given in seconds. A key of -1 indicates confirmation notices after a signup. The query generated by this method looks horrific, but is surprisingly efficient. """ advance_checkin = max(0, self.advance_checkin) subqueries = [] for advance_notice in sorted(set([-1] + self.advance_notices)): event_filters = [AttractionEvent.attraction_id == self.id] if advance_notice == -1: notice_ident = cast(AttractionSignup.attraction_event_id, UnicodeText) notice_param = bindparam('confirm_notice', advance_notice).label('advance_notice') else: advance_notice = max(0, advance_notice) + advance_checkin notice_delta = timedelta(seconds=advance_notice) event_filters += [ AttractionEvent.start_time >= from_time + notice_delta, AttractionEvent.start_time < to_time + notice_delta] notice_ident = func.concat(AttractionSignup.attraction_event_id, '_{}'.format(advance_notice)) notice_param = bindparam( 'advance_notice_{}'.format(advance_notice), advance_notice).label('advance_notice') subquery = session.query(AttractionSignup, notice_param).filter( AttractionSignup.is_unchecked_in, AttractionSignup.attraction_event_id.in_( session.query(AttractionEvent.id).filter(*event_filters)), not_(exists().where(and_( AttractionNotification.ident == notice_ident, AttractionNotification.attraction_event_id == AttractionSignup.attraction_event_id, AttractionNotification.attendee_id == AttractionSignup.attendee_id)))).with_labels() subqueries.append(subquery) query = subqueries[0].union(*subqueries[1:]) if options: query = query.options(*listify(options)) query.order_by(AttractionSignup.id) return groupify(query, lambda x: x[0], lambda x: x[1])
def get_account_balance(stg_run_id): """ 获取 account_info 账户走势数据 :param stg_run_id: :return: """ with with_db_session(engine_abat) as session: sql_str = str( session.query( func.concat(AccountStatusInfo.trade_date, ' ', AccountStatusInfo.trade_time).label('trade_datetime'), AccountStatusInfo.available_cash.label('available_cash'), AccountStatusInfo.curr_margin.label('curr_margin'), AccountStatusInfo.balance_tot.label('balance_tot') ).filter(AccountStatusInfo.stg_run_id == stg_run_id).order_by( AccountStatusInfo.trade_date, AccountStatusInfo.trade_time ) ) # sql_str = """SELECT concat(trade_date, " ", trade_time) trade_datetime, available_cash, curr_margin, balance_tot # FROM account_status_info where stg_run_id=%s order by trade_date, trade_time""" data_df = pd.read_sql(sql_str, engine_abat, params=[' ', stg_run_id]) data_df["return_rate"] = (data_df["balance_tot"].pct_change().fillna(0) + 1).cumprod() data_df = data_df.set_index("trade_datetime") return data_df
def get_for(entity, entity_id, lazy=True): user = auth.service.get_user() if lazy: return Comment.query.filter(Comment.entity == entity, Comment.entity_id == entity_id, Comment.quote_for_id == None) \ .order_by(Comment.datetime.desc()).all() else: query = Comment.query.filter(Comment.entity == entity, Comment.entity_id == entity_id) \ .outerjoin(Vote, and_(Vote.entity == Comment.__tablename__, Vote.entity_id == Comment.id, Vote.user_id == user.id)) \ .outerjoin(File, File.entity == func.concat(Comment.__tablename__, '.', Comment.id)) \ .add_entity(Vote) \ .add_entity(File) \ .order_by(Comment.datetime.desc(), File.id) comments_votes_files = query.all() comments = [] added = {} for comment, vote, file in comments_votes_files: if file: comment.add_file(file) comment.my_vote = vote if not added.get(comment.id): comments.append(comment) added[comment.id] = True return comments
def pgp_path(self): return func.concat(self.path, ".asc")
def anchor(self): return func.concat(person_id, '-', sequence)
def slug(cls): return func.concat(cls.link, ' ', cls.title)
def create_filter_by(column, values): if hasattr(column, 'property') and isinstance(column.property, CompositeProperty): column = func.concat(*column.property.columns) if isinstance(values, FilterBy): filter_type = values.filter_type.lower() if filter_type == 'or': or_queries = [] for value in values.value: query = create_filter_by(column, value) if query is not None: or_queries.append(query) if len(or_queries) == 1: return or_queries[0] elif or_queries: return or_(*or_queries) elif filter_type == 'and': and_queries = [] for value in values.value: query = create_filter_by(column, value) if query is not None: and_queries.append(query) if len(and_queries) == 1: return and_queries[0] elif and_queries: return and_(*and_queries) elif filter_type in ('like', 'contém'): return like_maybe_with_none(column, values.value) elif filter_type == '>': return column > values.value elif filter_type == '>=': return column >= values.value elif filter_type == '<': return column < values.value elif filter_type == '<=': return column <= values.value elif filter_type in ('=', '=='): return column == values.value elif filter_type in ('!=', '≠'): return column != values.value else: raise Error('filter_type', u('Invalid filter type %s') % values.filter_type) elif values is drop: return None elif not is_nonstr_iter(values): return column == values else: or_queries = [] noniter_values = set() for value in values: if isinstance(value, FilterBy) or is_nonstr_iter(value): query = create_filter_by(column, value) if query is not None: or_queries.append(query) elif value is not drop: noniter_values.add(value) if noniter_values: or_queries.append(maybe_with_none(column, noniter_values)) if len(or_queries) == 1: return or_queries[0] elif or_queries: return or_(*or_queries)
def display(self): return func.concat(self.root, '.', self.item)
def fexpr(cls): return func.concat(cls.first_name, ' ', cls.last_name)