def add_cron_worker(scheduled_job): try: cron_worker_index = sorted(_cron_workers)[-1] + 1 except IndexError: cron_worker_index = 0 worker_id = f'cron_{cron_worker_index}' if len(get_cron_workers()) == _max_cron_workers: raise ResourceWarning( "Maximum number of scheduled jobs reached. Unable to process job") else: schedule = scheduled_job.pop('schedule') cron_job = get_app().apscheduler.add_job( func=cron_worker_job, trigger='cron', **schedule, args=[cron_worker_index, scheduled_job], id=worker_id, name=scheduled_job['job_id'], jobstore='redis') _cron_workers[cron_worker_index] = DockerWorker(worker_id, cron_job) return str(worker_id)
def _update_job(worker, new_schedule): from werkzeug.exceptions import BadRequest from docker_worker_pool import DockerWorker _cron_workers = docker_worker_pool.get_cron_workers() worker_index = int(worker.worker_id.split('_')[-1]) old_scheduled_job = worker.apscheduler_job docker_worker_pool.delete_cron_worker(worker_index) try: scheduled_job = get_app().apscheduler.add_job( func=old_scheduled_job.func, trigger='cron', **new_schedule, args=old_scheduled_job.args, id=old_scheduled_job.id, name=old_scheduled_job.name, jobstore='redis') except TypeError as e: scheduled_job = _recreate_cron_worker(old_scheduled_job) raise BadRequest(e) except ValueError as e: scheduled_job = _recreate_cron_worker(old_scheduled_job) raise BadRequest(e) finally: _cron_workers[worker_index] = DockerWorker(old_scheduled_job.id, scheduled_job)
def add(): try: worker_id = sorted(_workers)[-1] + 1 except IndexError: worker_id = 0 job = get_app().apscheduler.add_job(func=worker_job, trigger='interval', seconds=_interval, args=[worker_id], id=str(worker_id)) _workers[worker_id] = DockerWorker(worker_id, job) return str(worker_id)
def _recreate_cron_worker(scheduled_job): schedule = _schedule_dict(scheduled_job.trigger) recreated_scheduled_job = get_app().apscheduler.add_job( func=scheduled_job.func, trigger='cron', **schedule, args=scheduled_job.args, id=scheduled_job.id, name=scheduled_job.name, jobstore='redis') return recreated_scheduled_job
def start_server(): global server app = get_app(1) server = ServerThread(app) server.start() log.info('server started')
import os import os.path as path import shutil import tarfile import tempfile from time import time from uuid import uuid4 from werkzeug.utils import secure_filename import docker_worker_pool import logging from tracker_client_plugins import tracker_clients from .scheduled_jobs_routes import * from .constants import _JOB_BUNDLE_STORE_DIR, _ARCHIVE_DIR from reverse_proxy import forward app = get_app() @app.route('/') def show_home_page(): return "Welcome to docker scheduler" @app.route('/job_bundle', methods=['POST']) def save_job_bundle(): if not request.files: return "No files in request", 400 if 'job_bundle' not in request.files: return "Job bundle not found in request", 400 bundle_file = request.files['job_bundle'] tarball = path.join(_JOB_BUNDLE_STORE_DIR, secure_filename(bundle_file.filename))
type=str, default="0.0.0.0", help='host to bind server (default: 0.0.0.0)') parser.add_argument('-p', '--port', type=int, default=5000, help='port to bind server (default: 5000)') parser.add_argument('-d', '--debug', action='store_true', help='starts server in debug mode') return parser.parse_args(sys.argv[1:]) if __name__ == '__main__': args = get_args() if os.environ.get("CUDA_VISIBLE_DEVICES", None): gpu_pool.update({ k: "unlocked" for k in os.environ["CUDA_VISIBLE_DEVICES"].split(",") }) get_app().run(use_reloader=False, host=args.host, port=args.port, debug=args.debug, threaded=True)