def post(self): req = request.get_json() user = get_current_user() req["user_id"] = user.id req["team_id"] = user.team_id Model = get_class_by_tablename(req["type"]) target = Model.query.filter_by(id=req["target"]).first_or_404() if target.cost > user.score: return ( { "success": False, "errors": { "score": "You do not have enough points to unlock this hint" }, }, 400, ) schema = UnlockSchema() response = schema.load(req, session=db.session) if response.errors: return {"success": False, "errors": response.errors}, 400 existing = Unlocks.query.filter_by(**req).first() if existing: return ( { "success": False, "errors": { "target": "You've already unlocked this this target" }, }, 400, ) db.session.add(response.data) award_schema = AwardSchema() award = { "user_id": user.id, "team_id": user.team_id, "name": target.name, "description": target.description, "value": (-target.cost), "category": target.category, } award = award_schema.load(award) db.session.add(award.data) db.session.commit() clear_standings() response = schema.dump(response.data) return {"success": True, "data": response.data}
def post(self): req = request.get_json() user = get_current_user() req["user_id"] = user.id req["team_id"] = user.team_id Model = get_class_by_tablename(req["type"]) target = Model.query.filter_by(id=req["target"]).first_or_404() if target.cost > user.score: return ( { "success": False, "errors": { "score": "У вас недостаточно очков, чтобы разблокировать эту подсказку" }, }, 400, ) schema = UnlockSchema() response = schema.load(req, session=db.session) if response.errors: return {"success": False, "errors": response.errors}, 400 existing = Unlocks.query.filter_by(**req).first() if existing: return ( { "success": False, "errors": { "target": "Вы уже разблокировали это" }, }, 400, ) db.session.add(response.data) award_schema = AwardSchema() award = { "user_id": user.id, "team_id": user.team_id, "name": target.name, "description": target.description, "value": (-target.cost), "category": target.category, } award = award_schema.load(award) db.session.add(award.data) db.session.commit() clear_standings() response = schema.dump(response.data) return {"success": True, "data": response.data}
def dump_database_table(tablename): # TODO: It might make sense to limit dumpable tables. Config could potentially leak sensitive information. model = get_class_by_tablename(tablename) if model is None: raise KeyError("Unknown database table") temp = StringIO() writer = csv.writer(temp) header = [column.name for column in model.__mapper__.columns] writer.writerow(header) responses = model.query.all() for curr in responses: writer.writerow([ getattr(curr, column.name) for column in model.__mapper__.columns ]) temp.seek(0) # In Python 3 send_file requires bytes output = BytesIO() output.write(temp.getvalue().encode("utf-8")) output.seek(0) temp.close() return output
def export_csv(): table = request.args.get('table') # TODO: It might make sense to limit dumpable tables. Config could potentially leak sensitive information. model = get_class_by_tablename(table) if model is None: abort(404) output = six.StringIO() writer = csv.writer(output) header = [column.name for column in model.__mapper__.columns] writer.writerow(header) responses = model.query.all() for curr in responses: writer.writerow([getattr(curr, column.name) for column in model.__mapper__.columns]) output.seek(0) return send_file( output, as_attachment=True, cache_timeout=-1, attachment_filename="{name}-{table}.csv".format(name=ctf_config.ctf_name(), table=table) )
def dump_csv(name): dump_func = CSV_KEYS.get(name) if dump_func: return dump_func() elif get_class_by_tablename(name): return dump_database_table(tablename=name) else: raise KeyError
def post(self): req = request.get_json() user = get_current_user() req['user_id'] = user.id req['team_id'] = user.team_id Model = get_class_by_tablename(req['type']) target = Model.query.filter_by(id=req['target']).first_or_404() if target.cost > user.score: return { 'success': False, 'errors': { 'score': 'You do not have enough points to unlock this hint' } }, 400 schema = UnlockSchema() response = schema.load(req, session=db.session) if response.errors: return { 'success': False, 'errors': response.errors }, 400 db.session.add(response.data) award_schema = AwardSchema() award = { 'user_id': user.id, 'team_id': user.team_id, 'name': target.name, 'description': target.description, 'value': (-target.cost), 'category': target.category } award = award_schema.load(award) db.session.add(award.data) db.session.commit() response = schema.dump(response.data) return { 'success': True, 'data': response.data }
def export_csv(): table = request.args.get("table") # TODO: It might make sense to limit dumpable tables. Config could potentially leak sensitive information. model = get_class_by_tablename(table) if model is None: abort(404) temp = StringIO() writer = csv.writer(temp) header = [column.name for column in model.__mapper__.columns] writer.writerow(header) responses = model.query.all() for curr in responses: writer.writerow( [getattr(curr, column.name) for column in model.__mapper__.columns] ) temp.seek(0) # In Python 3 send_file requires bytes output = BytesIO() output.write(temp.getvalue().encode("utf-8")) output.seek(0) temp.close() return send_file( output, as_attachment=True, cache_timeout=-1, attachment_filename="{name}-{table}.csv".format( name=ctf_config.ctf_name(), table=table ), )
def insertion(table_filenames): for member in table_filenames: if member.startswith("db/"): table_name = member[3:-5] try: # Try to open a file but skip if it doesn't exist. data = backup.open(member).read() except KeyError: continue if data: table = side_db[table_name] saved = json.loads(data) for entry in saved["results"]: # This is a hack to get SQLite to properly accept datetime values from dataset # See Issue #246 if sqlite: direct_table = get_class_by_tablename(table.name) for k, v in entry.items(): if isinstance(v, string_types): # We only want to apply this hack to columns that are expecting a datetime object try: is_dt_column = (type( getattr( direct_table, k).type) == sqltypes.DateTime) except AttributeError: is_dt_column = False # If the table is expecting a datetime, we should check if the string is one and convert it if is_dt_column: match = re.match( r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d", v, ) if match: entry[ k] = datetime.datetime.strptime( v, "%Y-%m-%dT%H:%M:%S.%f") continue match = re.match( r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}", v) if match: entry[ k] = datetime.datetime.strptime( v, "%Y-%m-%dT%H:%M:%S") continue # From v2.0.0 to v2.1.0 requirements could have been a string or JSON because of a SQLAlchemy issue # This is a hack to ensure we can still accept older exports. See #867 if member in ( "db/challenges.json", "db/hints.json", "db/awards.json", ): requirements = entry.get("requirements") if requirements and isinstance( requirements, string_types): entry["requirements"] = json.loads( requirements) try: table.insert(entry) except ProgrammingError: # MariaDB does not like JSON objects and prefers strings because it internally # represents JSON with LONGTEXT. # See Issue #973 requirements = entry.get("requirements") if requirements and isinstance(requirements, dict): entry["requirements"] = json.dumps( requirements) table.insert(entry) db.session.commit() if postgres: # This command is to set the next primary key ID for the re-inserted tables in Postgres. However, # this command is very difficult to translate into SQLAlchemy code. Because Postgres is not # officially supported, no major work will go into this functionality. # https://stackoverflow.com/a/37972960 if '"' not in table_name and "'" not in table_name: query = "SELECT setval(pg_get_serial_sequence('{table_name}', 'id'), coalesce(max(id)+1,1), false) FROM \"{table_name}\"".format( # nosec table_name=table_name) side_db.engine.execute(query) else: raise Exception( "Table name {table_name} contains quotes". format(table_name=table_name))
def post(self): req = request.get_json() user = get_current_user() req["user_id"] = user.id req["team_id"] = user.team_id Model = get_class_by_tablename(req["type"]) target = Model.query.filter_by(id=req["target"]).first_or_404() # We should use the team's score if in teams mode if is_teams_mode(): team = get_current_team() score = team.score else: score = user.score if target.cost > score: return ( { "success": False, "errors": { "score": "You do not have enough points to unlock this hint" }, }, 400, ) schema = UnlockSchema() response = schema.load(req, session=db.session) if response.errors: return {"success": False, "errors": response.errors}, 400 # Search for an existing unlock that matches the target and type # And matches either the requesting user id or the requesting team id existing = Unlocks.query.filter( Unlocks.target == req["target"], Unlocks.type == req["type"], (Unlocks.user_id == req["user_id"]) | (Unlocks.team_id == req["team_id"]), ).first() if existing: return ( { "success": False, "errors": {"target": "You've already unlocked this this target"}, }, 400, ) db.session.add(response.data) award_schema = AwardSchema() award = { "user_id": user.id, "team_id": user.team_id, "name": target.name, "description": target.description, "value": (-target.cost), "category": target.category, } award = award_schema.load(award) db.session.add(award.data) db.session.commit() clear_standings() response = schema.dump(response.data) return {"success": True, "data": response.data}
def import_ctf(backup, erase=True): if not zipfile.is_zipfile(backup): raise zipfile.BadZipfile backup = zipfile.ZipFile(backup) members = backup.namelist() max_content_length = get_app_config("MAX_CONTENT_LENGTH") for f in members: if f.startswith("/") or ".." in f: # Abort on malicious zip files raise zipfile.BadZipfile info = backup.getinfo(f) if max_content_length: if info.file_size > max_content_length: raise zipfile.LargeZipFile try: alembic_version = json.loads( backup.open("db/alembic_version.json").read()) alembic_version = alembic_version["results"][0]["version_num"] except Exception: raise Exception( "Could not determine appropriate database version. This backup cannot be automatically imported." ) # Check if the alembic version is from CTFd 1.x if alembic_version in ( "1ec4a28fe0ff", "2539d8b5082e", "7e9efd084c5a", "87733981ca0e", "a4e30c94c360", "c12d2a1b0926", "c7225db614c1", "cb3cfcc47e2f", "cbf5620f8e15", "d5a224bf5862", "d6514ec92738", "dab615389702", "e62fd69bd417", ): raise Exception( "The version of CTFd that this backup is from is too old to be automatically imported." ) if erase: # Clear out existing connections to release any locks db.session.close() db.engine.dispose() # Drop database and recreate it to get to a clean state drop_database() create_database() # We explicitly do not want to upgrade or stamp here. # The import will have this information. side_db = dataset.connect(get_app_config("SQLALCHEMY_DATABASE_URI")) sqlite = get_app_config("SQLALCHEMY_DATABASE_URI").startswith("sqlite") postgres = get_app_config("SQLALCHEMY_DATABASE_URI").startswith("postgres") try: if postgres: side_db.query("SET session_replication_role=replica;") else: side_db.query("SET FOREIGN_KEY_CHECKS=0;") except Exception: print("Failed to disable foreign key checks. Continuing.") first = [ "db/teams.json", "db/users.json", "db/challenges.json", "db/dynamic_challenge.json", "db/flags.json", "db/hints.json", "db/unlocks.json", "db/awards.json", "db/tags.json", "db/submissions.json", "db/solves.json", "db/files.json", "db/notifications.json", "db/pages.json", "db/tracking.json", "db/config.json", ] for item in first: if item in members: members.remove(item) members = first + members upgrade(revision=alembic_version) # Create tables created by plugins try: app.db.create_all() except OperationalError as e: if not postgres: raise e else: print("Allowing error during app.db.create_all() due to Postgres") members.remove("db/alembic_version.json") for member in members: if member.startswith("db/"): table_name = member[3:-5] try: # Try to open a file but skip if it doesn't exist. data = backup.open(member).read() except KeyError: continue if data: table = side_db[table_name] saved = json.loads(data) for entry in saved["results"]: # This is a hack to get SQLite to properly accept datetime values from dataset # See Issue #246 if sqlite: direct_table = get_class_by_tablename(table.name) for k, v in entry.items(): if isinstance(v, six.string_types): # We only want to apply this hack to columns that are expecting a datetime object try: is_dt_column = (type( getattr(direct_table, k).type) == sqltypes.DateTime) except AttributeError: is_dt_column = False # If the table is expecting a datetime, we should check if the string is one and convert it if is_dt_column: match = re.match( r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d", v) if match: entry[k] = datetime.datetime.strptime( v, "%Y-%m-%dT%H:%M:%S.%f") continue match = re.match( r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}", v) if match: entry[k] = datetime.datetime.strptime( v, "%Y-%m-%dT%H:%M:%S") continue # From v2.0.0 to v2.1.0 requirements could have been a string or JSON because of a SQLAlchemy issue # This is a hack to ensure we can still accept older exports. See #867 if member in ( "db/challenges.json", "db/hints.json", "db/awards.json", ): requirements = entry.get("requirements") if requirements and isinstance(requirements, six.string_types): entry["requirements"] = json.loads(requirements) try: table.insert(entry) except ProgrammingError: # MariaDB does not like JSON objects and prefers strings because it internally # represents JSON with LONGTEXT. # See Issue #973 requirements = entry.get("requirements") if requirements and isinstance(requirements, dict): entry["requirements"] = json.dumps(requirements) table.insert(entry) db.session.commit() if postgres: # This command is to set the next primary key ID for the re-inserted tables in Postgres. However, # this command is very difficult to translate into SQLAlchemy code. Because Postgres is not # officially supported, no major work will go into this functionality. # https://stackoverflow.com/a/37972960 if '"' not in table_name and "'" not in table_name: query = "SELECT setval(pg_get_serial_sequence('{table_name}', 'id'), coalesce(max(id)+1,1), false) FROM \"{table_name}\"".format( # nosec table_name=table_name) side_db.engine.execute(query) else: raise Exception( "Table name {table_name} contains quotes".format( table_name=table_name)) # Extracting files files = [f for f in backup.namelist() if f.startswith("uploads/")] uploader = get_uploader() for f in files: filename = f.split(os.sep, 1) if ( len(filename) < 2 or os.path.basename(filename[1]) == "" ): # just an empty uploads directory (e.g. uploads/) or any directory continue filename = filename[ 1] # Get the second entry in the list (the actual filename) source = backup.open(f) uploader.store(fileobj=source, filename=filename) # Alembic sqlite support is lacking so we should just create_all anyway try: upgrade(revision="head") except (OperationalError, CommandError, RuntimeError, SystemExit, Exception): app.db.create_all() stamp_latest_revision() try: if postgres: side_db.query("SET session_replication_role=DEFAULT;") else: side_db.query("SET FOREIGN_KEY_CHECKS=1;") except Exception: print("Failed to enable foreign key checks. Continuing.") # Invalidate all cached data cache.clear() # Set default theme in case the current instance or the import does not provide it set_config("ctf_theme", "core") set_config("ctf_version", CTFD_VERSION)
def insertion(table_filenames): for member in table_filenames: set_status(f"inserting {member}") if member.startswith("db/"): table_name = member[3:-5] try: # Try to open a file but skip if it doesn't exist. data = backup.open(member).read() except KeyError: continue if data: table = side_db[table_name] saved = json.loads(data) count = len(saved["results"]) for i, entry in enumerate(saved["results"]): set_status(f"inserting {member} {i}/{count}") # This is a hack to get SQLite to properly accept datetime values from dataset # See Issue #246 if sqlite: direct_table = get_class_by_tablename(table.name) for k, v in entry.items(): if isinstance(v, string_types): # We only want to apply this hack to columns that are expecting a datetime object try: is_dt_column = (type( getattr( direct_table, k).type) == sqltypes.DateTime) except AttributeError: is_dt_column = False # If the table is expecting a datetime, we should check if the string is one and convert it if is_dt_column: match = re.match( r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d", v, ) if match: entry[ k] = datetime.datetime.strptime( v, "%Y-%m-%dT%H:%M:%S.%f") continue match = re.match( r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}", v) if match: entry[ k] = datetime.datetime.strptime( v, "%Y-%m-%dT%H:%M:%S") continue # From v2.0.0 to v2.1.0 requirements could have been a string or JSON because of a SQLAlchemy issue # This is a hack to ensure we can still accept older exports. See #867 if member in ( "db/challenges.json", "db/hints.json", "db/awards.json", ): requirements = entry.get("requirements") if requirements and isinstance( requirements, string_types): entry["requirements"] = json.loads( requirements) # From v3.1.0 to v3.5.0 FieldEntries could have been varying levels of JSON'ified strings. # For example "\"test\"" vs "test". This results in issues with importing backups between # databases. Specifically between MySQL and MariaDB. Because CTFd standardizes against MySQL # we need to have an edge case here. if member == "db/field_entries.json": value = entry.get("value") if value: try: # Attempt to convert anything to its original Python value entry["value"] = str(json.loads(value)) except (json.JSONDecodeError, TypeError): pass finally: # Dump the value into JSON if its mariadb or skip the conversion if not mariadb if mariadb: entry["value"] = json.dumps( entry["value"]) try: table.insert(entry) except ProgrammingError: # MariaDB does not like JSON objects and prefers strings because it internally # represents JSON with LONGTEXT. # See Issue #973 requirements = entry.get("requirements") if requirements and isinstance(requirements, dict): entry["requirements"] = json.dumps( requirements) table.insert(entry) db.session.commit() if postgres: # This command is to set the next primary key ID for the re-inserted tables in Postgres. However, # this command is very difficult to translate into SQLAlchemy code. Because Postgres is not # officially supported, no major work will go into this functionality. # https://stackoverflow.com/a/37972960 if '"' not in table_name and "'" not in table_name: query = "SELECT setval(pg_get_serial_sequence('{table_name}', 'id'), coalesce(max(id)+1,1), false) FROM \"{table_name}\"".format( # nosec table_name=table_name) side_db.engine.execute(query) else: set_error( f"Exception: Table name {table_name} contains quotes" ) raise Exception( "Table name {table_name} contains quotes". format(table_name=table_name))