Exemplo n.º 1
0

class SchedConfig(object):
    JOBS = [{
        'id': 'query_aws',
        'func': 'main:main',
        'args': tuple(),
        'trigger': 'interval',
        'seconds': 60
    }]

    SCHEDULER_API_ENABLED = True


if __name__ == '__main__':

    if 'CM_DB_URI' not in os.environ or 'CM_DB_NAME' not in os.environ:
        sys.stderr.write(
            "\033[91mCM_DB_URI and CM_DB_NAME system variables are required\033[0m\n"
        )
        sys.stderr.write("\033[91mApplication will now exit\033[0m\n")
        exit(1)

    app.config.from_object(SchedConfig())

    scheduler = APScheduler()
    scheduler.init_app(app)
    scheduler.start()

    app.run(debug=True)
Exemplo n.º 2
0
from flask import Flask
from flask_apscheduler import APScheduler
import mysql.connector
from mysql.connector import errorcode
import time

app = Flask(__name__)
sched = APScheduler()


# This function will create the necessary cron Expression for the CronTrigger in the
# scheduler add_job functionality
def create_cronExp(scheduler):
    cronArr = ['*'] * 5
    scheduler_arr = scheduler.split('|')

    #if daily alert
    if scheduler_arr[0] == 'daily':

        #first position after daily
        if scheduler_arr[1] != '1':
            cronArr[2] = '*/' + str(scheduler_arr[1])

        #hour
        cronArr[1] = str(scheduler_arr[2])

        #minute value
        cronArr[0] = str(scheduler_arr[3])

    #if weekly alert
    elif scheduler_arr[0] == 'weekly':
Exemplo n.º 3
0
from flask_migrate import Migrate
from flask_pymongo import PyMongo
from flask_session import Session
from flask_sqlalchemy import SQLAlchemy

from .google.credentials import GoogleClientCredentials
from .spotify.credentials import SpotifyClientCredentials
from .utils.json import JSONEncoder

app = Flask(__name__)
app.config.from_pyfile('flaskapp.cfg')
app.json_encoder = JSONEncoder

CORS(app, supports_credentials=True)

my_scheduler = APScheduler(app=app)
my_scheduler.start()

mongodb = PyMongo(app)
mysqldb = SQLAlchemy(app)
session = Session(app)

mysqldb.create_all()
session.app.session_interface.db.create_all()

migrate_mongodb = Migrate(app, mongodb)
migrate_mysqldb = Migrate(app, mysqldb)

google_credentials = GoogleClientCredentials(app)
spotify_credentials = SpotifyClientCredentials(app)
Exemplo n.º 4
0
REGEXNUMSTRING = r"^[a-zA-Z]{1,10}\-[0-9]{1,10}(?:,[a-zA-Z]{1,10}\-[0-9]{1,10})*$"
REGEXCIDSTRING = r"^[\w]{3,20}(?:,[\w]{3,20})*$"
REGEXMGSSTRING = r"^(?:\d{3})?[a-zA-Z]{2,6}-\d{3,5}(?:,(?:\d{3})?[a-zA-Z]{2,6}-\d{3,5})*$"
REGEXPIDSTRING = r"^[\w]{3,25}(?:,[\w]{3,25})*$"
REGEXNUM = re.compile(REGEXNUMSTRING)
REGEXCID = re.compile(REGEXCIDSTRING, flags=re.ASCII)
REGEXMGS = re.compile(REGEXMGSSTRING)
REGEXPID = re.compile(REGEXPIDSTRING)


LOGIN_MANAGER = LoginManager()
LOGIN_MANAGER.login_view = "login"
LOGIN_MANAGER.init_app(APP)

SCHEDULER = APScheduler()
SCHEDULER.init_app(APP)
SCHEDULER.start()

ASYNC_MODE = 'eventlet'
THREAD_LOCK = Lock()
SOCKETIO = SocketIO(async_mode=ASYNC_MODE)
SOCKETIO.init_app(APP)


class Task:
    def __init__(self):
        self.task_id = 0
        self.task_index = 0
        self.task_queue = deque([])
        self.pushlog_finished = False
Exemplo n.º 5
0
def load(app):
    # upgrade()
    app.db.create_all()
    CHALLENGE_CLASSES["Aliyun_Instance_Challenge"] = AliyunInstanceChallenge
    register_plugin_assets_directory(
        app, base_path="/plugins/aliyun-instance/assets/")
    ali_blueprint = Blueprint("aliyun-instance",
                              __name__,
                              template_folder="templates",
                              static_folder="assets",
                              url_prefix="/plugins/aliyun-instance")

    log_dir = app.config["LOG_FOLDER"]
    logger_ali = logging.getLogger("aliyun-instance")
    logger_ali.setLevel(logging.INFO)
    logs = {
        "aliyun-instance": os.path.join(log_dir, "aliyun-instance.log"),
    }
    try:
        for log in logs.values():
            if not os.path.exists(log):
                open(log, "a").close()
        container_log = logging.handlers.RotatingFileHandler(
            logs["aliyun-instance"], maxBytes=10000)
        logger_ali.addHandler(container_log)
    except IOError:
        pass

    stdout = logging.StreamHandler(stream=sys.stdout)
    logger_ali.addHandler(stdout)
    logger_ali.propagate = 0

    @ali_blueprint.route('/admin/settings', methods=['GET'])
    @admins_only
    # list plugin settings
    def admin_list_configs():
        configs = DBUtils.get_all_configs()
        return render_template('configs.html', configs=configs)

    @ali_blueprint.route('/admin/settings', methods=['PATCH'])
    @admins_only
    # modify plugin settings
    def admin_save_configs():
        req = request.get_json()
        DBUtils.save_all_configs(req.items())
        return jsonify({'success': True})

    @ali_blueprint.route("/admin/containers", methods=['GET'])
    @admins_only
    # list alive containers
    def admin_list_containers():
        mode = utils.get_config("user_mode")
        configs = DBUtils.get_all_configs()
        page = abs(request.args.get("page", 1, type=int))
        results_per_page = 50
        page_start = results_per_page * (page - 1)
        page_end = results_per_page * (page - 1) + results_per_page

        count = DBUtils.get_all_alive_instance_count()
        containers = DBUtils.get_all_alive_instance_page(page_start, page_end)

        pages = int(count / results_per_page) + (count % results_per_page > 0)

        return render_template("containers.html",
                               containers=containers,
                               pages=pages,
                               curr_page=page,
                               curr_page_start=page_start,
                               configs=configs,
                               mode=mode)

    @ali_blueprint.route("/admin/containers", methods=['PATCH'])
    @admins_only
    def admin_expired_instance():
        user_id = request.args.get('user_id')
        challenge_id = request.args.get('challenge_id')
        ControlUtil.expired_instance(user_id=user_id,
                                     challenge_id=challenge_id)
        return jsonify({'success': True})

    @ali_blueprint.route("/admin/containers", methods=['DELETE'])
    @admins_only
    def admin_delete_container():
        user_id = request.args.get('user_id')
        ControlUtil.destroy_instance(user_id)
        return jsonify({'success': True})

    # instances
    @ali_blueprint.route('/container', methods=['GET'])
    @authed_only
    def list_container():
        try:
            user_id = get_mode()
            challenge_id = request.args.get('challenge_id')
            ControlUtil.check_challenge(challenge_id, user_id)
            data = ControlUtil.get_instance(user_id=user_id)
            if data is not None:
                if int(data.challenge_id) != int(challenge_id):
                    return jsonify({})

                dynamic_aliyun_challenge = AliyunChallenge.query \
                    .filter(AliyunChallenge.id == data.challenge_id) \
                    .first_or_404()

                return jsonify({
                    'success':
                    True,
                    'type':
                    'redirect',
                    'ip':
                    data.ip,
                    'remaining_time':
                    3600 -
                    (datetime.datetime.utcnow() - data.start_time).seconds
                })
            else:
                return jsonify({'success': True})
        except Exception as e:
            logging.exception(e)
            return jsonify({'success': False, 'msg': str(e)})

    @ali_blueprint.route('/container', methods=['POST'])
    @authed_only
    def new_instance():
        try:
            user_id = get_mode()

            if ControlUtil.frequency_limit():
                return jsonify({
                    'success':
                    False,
                    'msg':
                    'Frequency limit, You should wait at least 1 min.'
                })
            # check whether exist container before
            existContainer = ControlUtil.get_instance(user_id)
            if existContainer:
                return jsonify({
                    'success':
                    False,
                    'msg':
                    'You have boot {} before.'.format(
                        existContainer.challenge.name)
                })
            else:
                challenge_id = request.args.get('challenge_id')
                ControlUtil.check_challenge(challenge_id, user_id)
                configs = DBUtils.get_all_configs()
                current_count = DBUtils.get_all_alive_instance_count()
                if configs.get("aliyun_max_ECS_count") != "None":
                    if int(configs.get("aliyun_max_ECS_count")) <= int(
                            current_count):
                        return jsonify({
                            'success': False,
                            'msg': 'Max container count exceed.'
                        })

                dynamic_aliyun_challenge = AliyunChallenge.query \
                    .filter(AliyunChallenge.id == challenge_id) \
                    .first_or_404()
                try:
                    result = ControlUtil.new_instance(
                        user_id=user_id, challenge_id=challenge_id)
                    if isinstance(result, bool):
                        return jsonify({'success': True})
                    else:
                        return jsonify({'success': False, 'msg': str(result)})
                except Exception as e:
                    return jsonify({
                        'success':
                        True,
                        'msg':
                        'Failed when launch instance, please contact with the admin.'
                        + e
                    })
        except Exception as e:
            return jsonify({'success': False, 'msg': str(e)})

    @ali_blueprint.route('/container', methods=['DELETE'])
    @authed_only
    def destroy_instance():
        user_id = get_mode()

        if ControlUtil.frequency_limit():
            return jsonify({
                'success':
                False,
                'msg':
                'Frequency limit, You should wait at least 1 min.'
            })

        if ControlUtil.destroy_instance(user_id):
            return jsonify({'success': True})
        else:
            return jsonify({
                'success':
                False,
                'msg':
                'Failed when destroy instance, please contact with the admin!'
            })

    @ali_blueprint.route('/container', methods=['PATCH'])
    @authed_only
    def renew_instance():
        user_id = get_mode()
        if ControlUtil.frequency_limit():
            return jsonify({
                'success':
                False,
                'msg':
                'Frequency limit, You should wait at least 1 min.'
            })

        configs = DBUtils.get_all_configs()
        challenge_id = request.args.get('challenge_id')
        ControlUtil.check_challenge(challenge_id, user_id)
        aliyun_max_renew_count = int(configs.get("aliyun_max_renew_count"))
        container = ControlUtil.get_instance(user_id)
        if container is None:
            return jsonify({'success': False, 'msg': 'Instance not found.'})
        if container.renew_count >= aliyun_max_renew_count:
            return jsonify({
                'success': False,
                'msg': 'Max renewal times exceed.'
            })

        ControlUtil.expired_instance(user_id=user_id,
                                     challenge_id=challenge_id)

        return jsonify({'success': True})

    def auto_clean_container():
        # 每隔5分钟就好,第一次创建的时间为50分钟
        with app.app_context():
            results = DBUtils.get_all_expired_instance()
            for r in results:
                ControlUtil.destroy_instance(r.user_id)

            FrpUtils.update_frp_redirect()

    app.register_blueprint(ali_blueprint)

    try:
        lock_file = open("/tmp/aliyun-instance.lock", "w")
        lock_fd = lock_file.fileno()
        fcntl.lockf(lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)

        scheduler = APScheduler()
        scheduler.init_app(app)
        scheduler.start()
        scheduler.add_job(id='aliyun-instance-auto-clean',
                          func=auto_clean_container,
                          trigger="interval",
                          seconds=300)

        print("[CTFd Ali-ECS]Started successfully")
    except IOError:
        pass
Exemplo n.º 6
0

# construct the catchall for URLs which do not exist
@app.errorhandler(404)
def page_not_found(error):
    return render_template('404.html'), 404


# add requests module to namespace
import requests

# construct scheduler object (with standard settings)
from flask_apscheduler import APScheduler
from apscheduler.schedulers.gevent import GeventScheduler
gevent_scheduler = GeventScheduler()
ap_scheduler = APScheduler(scheduler=gevent_scheduler)
app.config['SCHEDULER_TIMEZONE'] = 'UTC'

# add job store to scheduler
job_store_on = False
job_store_settings = []
job_store_login_names = []
job_store_login_keys = ['user', 'pass', 'host', 'port']
for key in job_store_login_keys:
    key_name = 'scheduler_job_store_%s' % key
    job_store_login_names.append(key_name)
    if scheduler_settings[key_name]:
        job_store_settings.append(scheduler_settings[key_name])
        job_store_on = True
if job_store_on:
    if len(job_store_settings) != len(job_store_login_keys):
"""
Scheduler Extensions.

Set up basic flask items for import in other modules.

*Items Setup*

:db: database
:scheduler: scheduler

These items can be imported into other
scripts after running :obj:`scheduler.create_app`

"""

from flask_apscheduler import APScheduler
from flask_sqlalchemy import SQLAlchemy
from flask_sqlalchemy_caching import CachingQuery

db = SQLAlchemy(query_class=CachingQuery)
atlas_scheduler = APScheduler()
Exemplo n.º 8
0
def create_app():
    # 实例化Flask核心对象
    app = Flask(__name__)

    # # 设置日志
    # logging.basicConfig(level=logging.DEBUG,  # 控制台打印的日志级别
    #                     filename='app/log/error_log.log',
    #                     filemode='a',  # 模式,有w和a,w就是写模式,每次都会重新写日志,覆盖之前的日志
    #                     # a是追加模式,默认如果不写的话,就是追加模式
    #                     format=
    #                     '%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s'
    #                     # 日志格式
    #                     )

    # flasgger初始化
    swagger.init_app(app)

    # 加载配置文件
    app.config.from_object("app.config.setting")
    app.config.from_object("app.config.secure")

    # 生成视频文件储存位置
    if not os.path.exists(app.config["VIDEO_DIR"]):
        os.makedirs(app.config["VIDEO_DIR"])
        os.chmod(app.config["VIDEO_DIR"], stat.S_IRWXU)

    # 生成视频封面文件储存位置
    if not os.path.exists(app.config["LOGO_DIR"]):
        os.makedirs(app.config["LOGO_DIR"])
        os.chmod(app.config["LOGO_DIR"], stat.S_IRWXU)

    # 生成用户头像文件储存位置
    if not os.path.exists(app.config["FACE_DIR"]):
        os.makedirs(app.config["FACE_DIR"])
        os.chmod(app.config["FACE_DIR"], stat.S_IRWXU)

    # 数据库初始化
    db.init_app(app)
    db.create_all(app=app)

    # redis初始化
    # rd.init_app(app=app)

    # 注册蓝图
    app.register_blueprint(create_admin_blueprint())
    app.register_blueprint(create_home_blueprint())

    # 登录插件flask_login初始化
    login_manager.init_app(app)

    # 设置登录视图函数
    # login_manager.login_view = "home.login"
    # 设置提示信息
    # login_manager.login_message = "请登录或注册"

    # login_manager所需,自动调用
    @login_manager.user_loader
    def get_user(id):
        return BaseUser.query.get(id)

    # @login_manager.unauthorized_handler装饰器所修饰的方法就会代替”@login_required”装饰器的默认处理方法。
    # 设置后,登陆视图函数和提示信息无效
    @login_manager.unauthorized_handler
    def unauthorized_handler():
        return ReturnObj.get_response(ReturnEnum.SUCCESS.value, "请登录")

    # 全局异常处理
    @app.errorhandler(Exception)
    def framework_error(e):
        if isinstance(e, APIException):
            return e
        if isinstance(e, HTTPException):
            code = e.code
            msg = e.description
            error_code = 1007
            return APIException(msg, code, error_code)
        else:
            # 系统错误,记录日志
            # logging.error(e.args)
            raise e
            # return APIException(msg=str(e), code=500)
            # return ServerError()

    # 注册APScheduler
    scheduler = APScheduler()
    scheduler.init_app(app=app)
    # 添加作业
    scheduler.add_job(func=calculate_avg_score,
                      id="tasks-score",
                      trigger="interval",
                      seconds=1 * 60 * 60)
    scheduler.start()

    # 跨域
    CORS(app, supports_credentials=True)

    return app
Exemplo n.º 9
0
def do_start(app):
    sched = APScheduler()
    app.config.from_object(Config())
    sched.init_app(app)
    sched.start()
Exemplo n.º 10
0
        'id': 'get_number_of_projects',  #identifier
        'func': 'line_notify:get_number_of_projects',  #file_name:func_name
        'args': (),  #no argument
        'trigger': 'interval',  #attribute: interval, cron, date
        'hours':
        3,  #do the job every 3 hours  #attribute: weeks, days, hours, minutes, seconds
        'next_run_time':
        datetime.datetime.now()  #set this to execute right now
    }
            #you can add more job here
            ]
    SCHEDULER_TIMEZONE = 'Asia/Taipei'  # time zone
    SCHEDULER_API_ENABLED = True  # enable API if needed


scheduler = APScheduler(BackgroundScheduler(timezone="Asia/Taipei"))
'''
get project numbers from ccm web
'''


def get_number_of_projects():
    url = 'http://127.0.0.1/ccm/connection'  #change to your iottalk.tw
    r = requests.get(url)
    soup = BeautifulSoup(r.text, "html.parser")

    li_all = soup.find_all("li", class_="project-list")
    li_all = li_all[1:]  #dicard the first one - Add Project

    project_list = dict()
    for li in li_all:
Exemplo n.º 11
0
from flask import Flask
from config import Config
import os
from flask_apscheduler import APScheduler

import time
from google.cloud import language_v1


app = Flask(__name__)
app.config.from_object(Config)

schedular = APScheduler()
schedular.init_app(app)
schedular.start()

client = language_v1.LanguageServiceClient(credentials=)










from . import routes
from .jobs.webScraper import webScrapeToJSONAndPush
Exemplo n.º 12
0
def load(app):
    # upgrade()
    app.db.create_all()
    CHALLENGE_CLASSES["dynamic_docker"] = DynamicValueDockerChallenge
    register_plugin_assets_directory(app,
                                     base_path="/plugins/ctfd-whale/assets/")

    page_blueprint = Blueprint("ctfd-whale",
                               __name__,
                               template_folder="templates",
                               static_folder="assets",
                               url_prefix="/plugins/ctfd-whale")

    @page_blueprint.route('/admin/settings', methods=['GET'])
    @admins_only
    def admin_list_configs():
        configs = DBUtils.get_all_configs()
        return render_template('config.html', configs=configs)

    @page_blueprint.route('/admin/settings', methods=['PATCH'])
    @admins_only
    def admin_save_configs():
        req = request.get_json()
        DBUtils.save_all_configs(req.items())
        redis_util = RedisUtils(app=app)
        redis_util.init_redis_port_sets()
        return json.dumps({'success': True})

    @page_blueprint.route("/admin/containers", methods=['GET'])
    @admins_only
    def admin_list_containers():
        configs = DBUtils.get_all_configs()
        page = abs(request.args.get("page", 1, type=int))
        results_per_page = 50
        page_start = results_per_page * (page - 1)
        page_end = results_per_page * (page - 1) + results_per_page

        count = DBUtils.get_all_alive_container_count()
        containers = DBUtils.get_all_alive_container_page(page_start, page_end)

        pages = int(count / results_per_page) + (count % results_per_page > 0)
        return render_template("containers.html",
                               containers=containers,
                               pages=pages,
                               curr_page=page,
                               curr_page_start=page_start,
                               configs=configs)

    @page_blueprint.route("/admin/containers", methods=['DELETE'])
    @admins_only
    def admin_delete_container():
        user_id = request.args.get('user_id')
        ControlUtil.remove_container(app, user_id)
        return json.dumps({'success': True})

    @page_blueprint.route("/admin/containers", methods=['PATCH'])
    @admins_only
    def admin_renew_container():
        user_id = request.args.get('user_id')
        challenge_id = request.args.get('challenge_id')
        DBUtils.renew_current_container(user_id=user_id,
                                        challenge_id=challenge_id)
        return json.dumps({'success': True})

    @page_blueprint.route('/container', methods=['POST'])
    @authed_only
    def add_container():
        user_id = current_user.get_current_user().id
        redis_util = RedisUtils(app=app, user_id=user_id)

        if not redis_util.acquire_lock():
            return json.dumps({'success': False, 'msg': '操作太快!'})

        if ControlUtil.frequency_limit():
            return json.dumps({'success': False, 'msg': '操作限制,请等待1分钟.'})

        ControlUtil.remove_container(app, user_id)
        challenge_id = request.args.get('challenge_id')
        ControlUtil.check_challenge(challenge_id, user_id)

        configs = DBUtils.get_all_configs()
        current_count = DBUtils.get_all_alive_container_count()
        if int(configs.get("docker_max_container_count")) <= int(
                current_count):
            return json.dumps({'success': False, 'msg': '平台容器数量已达最大值.'})

        dynamic_docker_challenge = DynamicDockerChallenge.query \
            .filter(DynamicDockerChallenge.id == challenge_id) \
            .first_or_404()
        flag = "flag{" + hashlib.md5(str(
            uuid.uuid4()).encode('utf-8')).hexdigest() + "}"
        if dynamic_docker_challenge.redirect_type == "http":
            ControlUtil.add_container(app=app,
                                      user_id=user_id,
                                      challenge_id=challenge_id,
                                      flag=flag)
        else:
            port = redis_util.get_available_port()
            ControlUtil.add_container(app=app,
                                      user_id=user_id,
                                      challenge_id=challenge_id,
                                      flag=flag,
                                      port=port)

        redis_util.release_lock()
        return json.dumps({'success': True})

    @page_blueprint.route('/container', methods=['GET'])
    @authed_only
    def list_container():
        user_id = current_user.get_current_user().id
        challenge_id = request.args.get('challenge_id')
        ControlUtil.check_challenge(challenge_id, user_id)
        data = ControlUtil.get_container(user_id=user_id)
        configs = DBUtils.get_all_configs()
        domain = configs.get('frp_http_domain_suffix', "")
        timeout = int(configs.get("docker_timeout", "3600"))
        if data is not None:
            if int(data.challenge_id) != int(challenge_id):
                return json.dumps({})
            dynamic_docker_challenge = DynamicDockerChallenge.query \
                .filter(DynamicDockerChallenge.id == data.challenge_id) \
                .first_or_404()
            lan_domain = str(user_id) + "-" + data.uuid
            if dynamic_docker_challenge.redirect_type == "http":
                if int(configs.get('frp_http_port', "80")) == 80:
                    return json.dumps({
                        'success':
                        True,
                        'type':
                        'http',
                        'domain':
                        data.uuid + domain,
                        'remaining_time':
                        timeout - (datetime.now() - data.start_time).seconds,
                        'lan_domain':
                        lan_domain
                    })
                else:
                    return json.dumps({
                        'success':
                        True,
                        'type':
                        'http',
                        'domain':
                        data.uuid + domain + ":" +
                        configs.get('frp_http_port', "80"),
                        'remaining_time':
                        timeout - (datetime.now() - data.start_time).seconds,
                        'lan_domain':
                        lan_domain
                    })
            else:
                return json.dumps({
                    'success':
                    True,
                    'type':
                    'redirect',
                    'ip':
                    configs.get('frp_direct_ip_address', ""),
                    'port':
                    data.port,
                    'remaining_time':
                    timeout - (datetime.now() - data.start_time).seconds,
                    'lan_domain':
                    lan_domain
                })
        else:
            return json.dumps({'success': True})

    @page_blueprint.route('/container', methods=['DELETE'])
    @authed_only
    def remove_container():
        user_id = current_user.get_current_user().id
        redis_util = RedisUtils(app=app, user_id=user_id)
        if not redis_util.acquire_lock():
            return json.dumps({'success': False, 'msg': '操作的太快了!'})

        if ControlUtil.frequency_limit():
            return json.dumps({'success': False, 'msg': '操作限制,请等待1分钟.'})

        if ControlUtil.remove_container(app, user_id):
            redis_util.release_lock()

            return json.dumps({'success': True})
        else:
            return json.dumps({'success': False, 'msg': '环境已关闭,请联系管理员!'})

    @page_blueprint.route('/container', methods=['PATCH'])
    @authed_only
    def renew_container():
        user_id = current_user.get_current_user().id
        redis_util = RedisUtils(app=app, user_id=user_id)
        if not redis_util.acquire_lock():
            return json.dumps({'success': False, 'msg': '操作的太快了!'})

        if ControlUtil.frequency_limit():
            return json.dumps({'success': False, 'msg': '操作限制,请等待1分钟.'})

        configs = DBUtils.get_all_configs()
        challenge_id = request.args.get('challenge_id')
        ControlUtil.check_challenge(challenge_id, user_id)
        docker_max_renew_count = int(configs.get("docker_max_renew_count"))
        container = ControlUtil.get_container(user_id)
        if container is None:
            return json.dumps({'success': False, 'msg': '异常,环境不存在.'})
        if container.renew_count >= docker_max_renew_count:
            return json.dumps({'success': False, 'msg': '已达到环境最大次数.'})
        ControlUtil.renew_container(user_id=user_id, challenge_id=challenge_id)
        redis_util.release_lock()
        return json.dumps({'success': True})

    def auto_clean_container():
        with app.app_context():
            results = DBUtils.get_all_expired_container()
            for r in results:
                ControlUtil.remove_container(app, r.user_id)

            FrpUtils.update_frp_redirect()

    app.register_blueprint(page_blueprint)

    try:
        lock_file = open("/tmp/ctfd_whale.lock", "w")
        lock_fd = lock_file.fileno()
        fcntl.lockf(lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)

        scheduler = APScheduler()
        scheduler.init_app(app)
        scheduler.start()
        scheduler.add_job(id='whale-auto-clean',
                          func=auto_clean_container,
                          trigger="interval",
                          seconds=10)

        redis_util = RedisUtils(app=app)
        redis_util.init_redis_port_sets()

        print("[CTFd Whale]Started successfully")
    except IOError:
        pass