def rename_directory(db, user_id, old_api_path, new_api_path): """ Rename a directory. """ old_db_path = from_api_dirname(old_api_path) new_db_path = from_api_dirname(new_api_path) if old_db_path == '/': raise RenameRoot('Renaming the root directory is not permitted.') # Overwriting existing directories is disallowed. if _dir_exists(db, user_id, new_db_path): raise DirectoryExists(new_api_path) # Set this foreign key constraint to deferred so it's not violated # when we run the first statement to update the name of the directory. db.execute('SET CONSTRAINTS ' 'pgcontents.directories_parent_user_id_fkey DEFERRED') # Update name column for the directory that's being renamed db.execute( directories.update().where( and_( directories.c.user_id == user_id, directories.c.name == old_db_path, ) ).values( name=new_db_path, ) ) # Update the name and parent_name of any descendant directories. Do # this in a single statement so the non-deferrable check constraint # is satisfied. db.execute( directories.update().where( and_( directories.c.user_id == user_id, directories.c.name.startswith(old_db_path), directories.c.parent_name.startswith(old_db_path), ) ).values( name=func.concat( new_db_path, func.right(directories.c.name, -func.length(old_db_path)) ), parent_name=func.concat( new_db_path, func.right( directories.c.parent_name, -func.length(old_db_path) ) ), ) )
def rename_directory(db, user_id, old_api_path, new_api_path): """ Rename a directory. """ old_db_path = from_api_dirname(old_api_path) new_db_path = from_api_dirname(new_api_path) if old_db_path == '/': raise RenameRoot('Renaming the root directory is not permitted.') # Overwriting existing directories is disallowed. if _dir_exists(db, user_id, new_db_path): raise DirectoryExists(new_api_path) # Set this foreign key constraint to deferred so it's not violated # when we run the first statement to update the name of the directory. db.execute('SET CONSTRAINTS ' 'pgcontents.directories_parent_user_id_fkey DEFERRED') new_api_dir, new_name = split_api_filepath(new_api_path) new_db_dir = from_api_dirname(new_api_dir) # Update the name and parent_name columns for the directory that is being # renamed. The parent_name column will not change for a simple rename, but # will if the directory is moving. db.execute(directories.update().where( and_( directories.c.user_id == user_id, directories.c.name == old_db_path, )).values( name=new_db_path, parent_name=new_db_dir, )) # Update the name and parent_name of any descendant directories. Do this in # a single statement so the non-deferrable check constraint is satisfied. db.execute(directories.update().where( and_( directories.c.user_id == user_id, directories.c.name.startswith(old_db_path), directories.c.parent_name.startswith(old_db_path), ), ).values( name=func.concat( new_db_path, func.right(directories.c.name, -func.length(old_db_path)), ), parent_name=func.concat( new_db_path, func.right( directories.c.parent_name, -func.length(old_db_path), ), ), ))
def move_remote_checkpoints(db, user_id, src_api_path, dest_api_path): src_db_path = from_api_filename(src_api_path) dest_db_path = from_api_filename(dest_api_path) # Update the paths of the checkpoints for the file being renamed. If the # source path is for a directory then this is a no-op. db.execute( remote_checkpoints.update().where( and_( remote_checkpoints.c.user_id == user_id, remote_checkpoints.c.path == src_db_path, ), ).values(path=dest_db_path, ), ) # If the given source path is for a directory, update the paths of the # checkpoints for all files in that directory and its subdirectories. db.execute(remote_checkpoints.update().where( and_( remote_checkpoints.c.user_id == user_id, remote_checkpoints.c.path.startswith(src_db_path), ), ).values(path=func.concat( dest_db_path, func.right( remote_checkpoints.c.path, -func.length(src_db_path), ), ), ))
def max_tile_mgrs_pl(station, start, end, squares): query = (db.session.query( func.right(ReceiverCoverage.location_mgrs_short, 4), func.count(ReceiverCoverage.location_mgrs_short)).filter( db.and_(Receiver.id == ReceiverCoverage.receiver_id, Receiver.name == station)).filter( ReceiverCoverage.location_mgrs_short.like( squares + "%")).group_by( func.right( ReceiverCoverage.location_mgrs_short, 4))) res = { "t": squares, "p": ["{}/{}".format(r[0], r[1]) for r in query.all()] } return json.dumps(res)
def export_csv(self): request = self.request query = SipkdDBSession.query(Sp2dAdviceDet.advno, (func.right(Sp2d.sp2dno,5)+ func.coalesce(Sp2d.infix,Sp2d.infix,'')+'/'+ Sp2d.sp2dtype+'/'+ func.left(Sp2d.sp2dno,4) ).label('sp2dno'), Sp2d.sp2ddate, Sp2d.paymentfor, Sp2d.sp2damount, Sp2d.ppnamount, Sp2d.pphamount, (Sp2d.pot1num+Sp2d.pot2num+Sp2d.pot3num+Sp2d.pot4num+Sp2d.pot5num).label("potongan"), Sp2d.sp2dnetto, Sp2d.bknama, Sp2d.bankposnm, Sp2d.bankaccount, Sp2d.npwp).\ join(Sp2d).\ filter(Sp2d.sp2dno==Sp2dAdviceDet.sp2dno, Sp2d.sp2dno.in_(request.params['data'].split(','))) r = query.first() header = r.keys() query = query.all() rows = [] for item in query: rows.append(list(item)) # override attributes of response filename = 'sp2d%s.csv' % datetime.now().strftime('%Y%m%d%H%M%S') self.request.response.content_disposition = 'attachment;filename=' + filename return { 'header': header, 'rows': rows, }
def id_string(cls): """ IN SQL: SELECT CONCAT(CONCAT(CONCAT(LEFT((SELECT card_types.type_name FROM card_types WHERE card_types.id = cards.card_type_id),1),letter),RIGHT(CONCAT('000000',cards.id),6)),"C") as nid FROM cards; """ return func.concat(func.concat( func.concat(func.left( db.select([Card_Type.type_name]).where(Card_Type.id == cls.card_type_id).limit(1).as_scalar(), 1), cls.letter), func.right(func.concat('000000', cls.id), 6)), "C")
def get_contestants(): """Contestants are only qualified if they answered the max rank question and the initial answer is correct""" try: page = int(request.args.get("page", 1)) per = int(request.args.get("per", 20)) desc = request.args.get("desc") except ValueError: return jsonify(status="error", reason="invalid 'page' or 'per' parameter"), 400 q = (Answer.query.with_entities( Answer.id, Answer.text, func.count(Answer.votes), Users.student_first_name, Users.student_last_name, Users.username, func.concat(Users.student_first_name, func.right(Users.student_last_name, 1)), Answer.disqualified, ).join(Answer.question).join(Answer.user).outerjoin(Answer.votes).filter( Question.rank == core.max_rank(), Answer.correct, Answer.disqualified.is_(None), ).group_by(Answer.id)) if desc is not None: q = q.order_by(func.count(Answer.votes).desc()) else: q = q.order_by(Answer.id) p = q.paginate(page=page, per_page=per) return jsonify( items=[{ "id": i[0], "text": i[1], "numVotes": i[2], "firstName": i[3], "lastName": i[4], "username": i[5], "displayName": i[6], "disqualified": i[7], } for i in p.items], totalItems=p.total, page=p.page, totalPages=p.pages, hasNext=p.has_next, nextNum=p.next_num, hasPrev=p.has_prev, prevNum=p.prev_num, )
def right(self, expression: any, length: int) -> any: """Substrings an expression from the right of a given length. :param expression: the expression to substring :param length: the length of the substring from the right :return: the substring expression """ if self.__type is 'sqlite': # SQLite has no RIGHT expression, using substring with negative length return func.substr(expression, -length) # By default using RIGHT expression return func.right(expression, length)
def rename_directory(db, user_id, old_api_path, new_api_path): """ Rename a directory. """ old_db_path = from_api_dirname(old_api_path) new_db_path = from_api_dirname(new_api_path) # Overwriting existing directories is disallowed. if _dir_exists(db, user_id, new_db_path): raise DirectoryExists(new_api_path) # Set this foreign key constraint to deferred so it's not violated # when we run the first statement to update the name of the directory. db.execute('SET CONSTRAINTS ' 'pgcontents.directories_parent_user_id_fkey DEFERRED') # Update name column for the directory that's being renamed db.execute(directories.update().where( and_( directories.c.user_id == user_id, directories.c.name == old_db_path, )).values(name=new_db_path, )) # Update the name and parent_name of any descendant directories. Do # this in a single statement so the non-deferrable check constraint # is satisfied. db.execute(directories.update().where( and_( directories.c.user_id == user_id, directories.c.name.startswith(old_db_path), directories.c.parent_name.startswith(old_db_path), )).values( name=func.concat( new_db_path, func.right(directories.c.name, -func.length(old_db_path))), parent_name=func.concat( new_db_path, func.right(directories.c.parent_name, -func.length(old_db_path))), ))
def search(): keyword = request.args.get("q") try: page = int(request.args.get("page", 1)) per = int(request.args.get("per", 20)) except ValueError: return jsonify(status="error", reason="invalid 'page' or 'per' parameter"), 400 if keyword is None: return jsonify(status="error", reason="missing 'q' parameter"), 400 keyword = f"%{keyword}%" p = (Answer.query.with_entities( Answer.id, Answer.text, func.count(Answer.votes), Users.student_first_name, Users.student_last_name, Users.username, func.concat(Users.student_first_name, func.right(Users.student_last_name, 1)), ).join(Answer.question).join(Answer.user).outerjoin(Answer.votes).filter( Question.rank == core.max_rank(), Answer.correct, Answer.disqualified.is_(None), or_( Users.username.ilike(keyword), Users.student_first_name.ilike(keyword), Users.student_last_name.ilike(keyword), ), ).group_by(Answer.id).paginate(page=page, per_page=per)) return jsonify( items=[{ "id": i[0], "text": i[1], "numVotes": i[2], "firstName": i[3], "lastName": i[4], "username": i[5], "displayName": i[6], } for i in p.items], totalItems=p.total, page=p.page, totalPages=p.pages, hasNext=p.has_next, nextNum=p.next_num, hasPrev=p.has_prev, prevNum=p.prev_num, )
def query_sdss4_dr16_data_model_identifiers_from_database( self, mjd_start, mjd_end): """ Query the SDSS database for SDSS-IV (DR16) ApStar data model identifiers. :param context: The Airflow DAG execution context. """ release, filetype = ("DR16", "apStar") columns = ( func.left(catalogdb.SDSSDR16ApogeeStar.file, 2).label("prefix"), catalogdb.SDSSDR16ApogeeStar.field, catalogdb.SDSSDR16ApogeeStar.apstar_version.label("apstar"), catalogdb.SDSSDR16ApogeeStar.telescope, catalogdb.SDSSDR16ApogeeStar.apogee_id.label("obj"), func.right(func.left(catalogdb.SDSSDR16ApogeeStar.file, 10), 3).label("apred"), ) if not self._skip_sources_with_more_recent_observations: # The SDSSDR16ApogeeStar table does not have any MJD information. mjd = catalogdb.SDSSDR16ApogeeVisit.mjd q = session.query(*columns, mjd).distinct(*columns, mjd).join( catalogdb.SDSSDR16ApogeeVisit, catalogdb.SDSSDR16ApogeeVisit.apogee_id == catalogdb.SDSSDR16ApogeeStar.apogee_id) else: # Get the max MJD of any observations. sq = session.query( *columns, func.max( catalogdb.SDSSDR16ApogeeVisit.mjd).label('max_mjd')).join( catalogdb.SDSSDR16ApogeeVisit, catalogdb.SDSSDR16ApogeeVisit.apogee_id == catalogdb.SDSSDR16ApogeeStar.apogee_id).group_by( *columns).subquery() mjd = sq.c.max_mjd q = session.query(*columns, mjd).join( sq, catalogdb.SDSSDR16ApogeeStar.apogee_id == sq.c.obj) q = q.filter(mjd < mjd_end)\ .filter(mjd >= mjd_start) if self._query_filter_by_kwargs is not None: q = q.filter_by(**self._query_filter_by_kwargs) if self._limit is not None: q = q.limit(self._limit) log.debug( f"Found {q.count()} {release} {filetype} files between MJD {mjd_start} and {mjd_end}" ) common = dict(release=release, filetype=filetype) keys = [column.name for column in columns] # The MJD will not be included because len(keys) < len(values) and zip will only take the shorter of both. for values in q.yield_per(1): yield {**common, **dict(zip(keys, values))}
def setItem(self, release, path, item_type, data): """ Set the data for the specified item. =============== ================================= Parameter Description =============== ================================= release Release path path Item path inside of the structure item_type The item type to create data The JSON encoded data item =============== ================================= ``Return:`` True on success """ result = None session = None # Check if this item type is supported if not item_type in self._supportedItems: raise ValueError("unknown item type '%s'" % item_type) # Acquire name from path name = os.path.basename(path) try: session = self._manager.getSession() # Load parent object parent = self._get_parent(release, path) if not parent: raise ValueError("cannot find parent object for '%s'" % path) parent = session.merge(parent) # Check if the current path is a container for that kind of # item type if not item_type in self._supportedItems[parent.item_type]['container']: raise ValueError("'%s' is not allowed for container '%s'" % (item_type, parent.item_type)) # Load instance of ConfigItem item = self._manager._getConfigItem(name=name, item_type=item_type, release=release, add=True) session.commit() item = session.merge(item) item.path = path # Check if item will be renamed if "name" in data and name != data["name"]: item.name = data["name"] # Updated marker for assignable elements item.assignable = bool(self.getItemsAssignableElements(release, item)) # Add us as child release_object = self._manager._getRelease(release) release_object = session.merge(release_object) release_object.config_items.append(item) parent.children.append(item) # Try to commit the changes session.commit() # Check if path has changed if "name" in data: newPath = os.path.dirname(path) if newPath != "/": newPath = newPath + "/" newPath= newPath + data['name'] if newPath != path: # Update path values for the child config items. # Get those entries that starts with 'oldB' and then replace the oldB part in the path. oldB = path.rstrip("/") newB = newPath.rstrip("/") length = len(oldB) session.query(ConfigItem).filter(ConfigItem.path.startswith(oldB)).update( \ {ConfigItem.path: func.concat(newB, func.right(ConfigItem.path, func.char_length(ConfigItem.path) - length))}, \ synchronize_session=False) session.commit() result = True except: self.log.error("Caught unknown exception %s" % sys.exc_info()[0]) session.rollback() raise finally: session.close() return result
def split_number(cls): return func.right( cls.web_confirmation_number, func.len(cls.web_confirmation_number) - func.charindex(".", cls.web_confirmation_number), )
def id_string(cls): return func.concat(func.concat( func.concat(func.left( db.select([Question_Type.type_name]).where(Question_Type.id == cls.question_type_id).limit(1).as_scalar(), 1), cls.letter), func.right(func.concat('000000', cls.id), 6)), "Q")