def delete(self, task_id): worker = Worker.query.filter_by(current_task=task_id).first() if worker: http_request(worker.host, '/kill', 'delete') if worker.status != 'disabled': worker.status = 'enabled' worker.current_task = None db.session.add(worker) db.session.commit() return task_id, 202
def patch(self, task_id): task = Task.query.get_or_404(task_id) args = status_parser.parse_args() task.status = args['status'] if task.status in ['finished', 'failed']: worker = Worker.query.get(task.worker_id) worker.status = 'enabled' db.session.add(worker) db.session.delete(task) db.session.commit() params = { 'id' : task.server_id, 'status' : task.status } http_request(app.config['BRENDER_SERVER'], '/tasks', 'post', params=params) return '', 204
def delete(self, task_id): task = Task.query.filter_by(server_id=task_id).first() if task is None: abort(404) db.session.delete(task) db.session.commit() if task.status not in ['finished', 'failed']: worker = Worker.query.get(task.worker_id) worker.status = 'enabled' db.session.add(worker) db.session.commit() task.status = 'aborted' http_request(worker.host, '/kill/' + str(task.pid), 'delete') return task, 202
def schedule(): logging.info("Scheduling") task_queue = Task.query.filter_by(status='ready').order_by(Task.priority.desc()) for task in task_queue: worker = get_availabe_worker() if worker is None: logging.debug("No worker available") break task.worker_id = worker.id task.status = 'running' if 'Darwin' in worker.system: setting_blender_path = app.config['BLENDER_PATH_OSX'] setting_render_settings = app.config['SETTINGS_PATH_OSX'] file_path = task.file_path_osx output_path = task.output_path_osx elif 'Windows' in worker.system: setting_blender_path = app.config['BLENDER_PATH_WIN'] setting_render_settings = app.config['SETTINGS_PATH_WIN'] file_path = task.file_path_win output_path = task.output_path_win else: setting_blender_path = app.config['BLENDER_PATH_LINUX'] setting_render_settings = app.config['SETTINGS_PATH_LINUX'] file_path = task.file_path_linux output_path = task.output_path_linux if setting_blender_path is None: print '[Debug] blender path is not set' blender_path = setting_blender_path if setting_render_settings is None: logging.warning("Render settings path not set!") render_settings = os.path.join( setting_render_settings, task.settings) options = { 'task_id' : task.id, 'file_path' : file_path, 'blender_path' : blender_path, 'start' : task.frame_current, 'end' : task.frame_end, 'render_settings' : render_settings, 'output_path' : output_path, 'format' : task.format} logging.info("send task %d" % task.server_id) pid = http_request(worker.host, '/execute_task', 'post', options) worker.status = 'busy' task.pid = int(pid['pid']) db.session.add(task) db.session.add(worker) db.session.commit()
def get(self, job_types=None): # TODO: stop referring to job_types using the name and start using a UUID # Get the worker UUID as identification for asking tasks uuid = Setting.query.filter_by(name='uuid').first() # Currently this is implemented as a GET, with the uuid argument optional. # In the future the uuid will be sent in the headers. task_generate_params = {'uuid': uuid.value} if job_types and job_types != "": task_generate_params['job_types'] = job_types joined_tasks_generate_url = join_url_params( '/tasks/generate', task_generate_params) r = http_request( app.config['FLAMENCO_SERVER'], joined_tasks_generate_url, 'get') return r, 200
def get(self, job_types=None): # TODO: stop referring to job_types using the name and start using a UUID # Get the worker UUID as identification for asking tasks token = Setting.query.filter_by(name='token').first() # Currently this is implemented as a GET, with the token argument optional. # In the future the token will be sent in the headers. args = task_management_parser.parse_args() worker = args['worker'] task_generate_params = {'token': token.value} if job_types and job_types != "": task_generate_params['job_types'] = job_types if worker: task_generate_params['worker'] = worker joined_tasks_generate_url = join_url_params( '/tasks/generate', task_generate_params) r = http_request( app.config['FLAMENCO_SERVER'], joined_tasks_generate_url, 'get') return r, 200
def get(self, task_id): """Entry point for a worker to require a task, which will be compiled on the fly according to the worker specs. """ logging.debug("Scheduling") # TODO we will need to make this more robust, and give each worker a uuid ip_address = request.remote_addr worker = Worker.query.filter_by(ip_address=ip_address).one() if not worker: logging.debug("Worker is not registered") return 'Worker is not registered', 403 worker.last_activity = datetime.now() db.session.commit() if worker.status == 'disabled': logging.debug("Worker is disabled") return 'Worker is disabled', 403 worker.current_task = None worker.status = 'enabled' db.session.commit() tasks = TaskManagementApi().get(job_types=",".join(worker.job_types_list)) if tasks[0] == ('', 404): return '', 400 if not len(tasks) or not len(tasks[0]): return '', 400 task = tasks[0] worker.current_task = task['id'] worker.child_task = task['child_id'] db.session.commit() managerstorage = app.config['MANAGER_STORAGE'] jobpath = os.path.join(managerstorage, str(task['job_id'])) if not os.path.exists(jobpath): os.mkdir(jobpath) # TODO make random name tmpfile = os.path.join( jobpath, 'jobfile_{0}.zip'.format(task['job_id'])) lockfile = os.path.join( jobpath, 'jobfile_{0}.lock'.format(task['job_id'])) if os.path.isfile(lockfile): # Try and set the task back to wating params = dict(id=task_id, status='waiting') r = http_request( app.config['FLAMENCO_SERVER'], '/tasks/{0}'.format(task_id), 'put', params=params) return '', 400 zipok = True try: with ZipFile(tmpfile, 'r') as jobzip: jobzip.namelist() except: zipok = False if not os.path.isfile(tmpfile) or not zipok: with open(lockfile, 'w') as f: f.write("locked") r = requests.get( 'http://{0}/jobs/file/{1}'.format( #'http://{0}/static/storage/{1}/{2}/jobfile_{2}.zip'.format( app.config['FLAMENCO_SERVER'], task['job_id']), stream=True ) with open(tmpfile, 'wb') as f: for chunk in r.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks f.write(chunk) f.flush() os.remove(lockfile) module_name = 'application.task_compilers.{0}'.format(task['type']) task_compiler = None try: module_loader = __import__( module_name, globals(), locals(), ['task_compiler'], 0) task_compiler = module_loader.task_compiler except ImportError, e: logging.error(' loading module {0}, {1}'.format(module_name, e)) return
def patch(self, task_id): """Send updates to the server regarding the status of a task. TODO: update the function to be a PUT, it is more consistent and also follows the server""" args = status_parser.parse_args() ip_address = request.remote_addr worker = Worker.query.filter_by(ip_address=ip_address).first() if not worker: return 'Worker is not registered', 403 worker.last_activity = datetime.now() db.session.add(worker) db.session.commit() if worker.status == 'disabled': return 'Worker is disabled', 403 if not worker.current_task: return 'Task cancelled', 403 # If other workers are rendering the same task kill them others = Worker.query.filter( Worker.status == 'enabled', Worker.connection == 'online', Worker.id != worker.id, Worker.current_task == worker.current_task).count() if others > 0: return 'Duplicated task', 403 """for other in others: other.current_task = None db.session.add(other) db.session.commit()""" if args['status'] == 'active': if args['task_id']: worker.current_task = args['task_id'] worker.time_cost = args['time_cost'] worker.log = args['log'] worker.activity = args['activity'] worker.status = 'rendering' else: worker.current_task = None worker.status = 'enabled' db.session.add(worker) db.session.commit() """if args['task_id']: task = Task.query.filter_by(id=args['task_id']).first() if not task: return 'Task is cancelled', 403""" jobfile = None if args['taskfile']: managerstorage = app.config['MANAGER_STORAGE'] jobpath = os.path.join(managerstorage, str(args['job_id'])) try: os.mkdir(jobpath) except: pass zippath = os.path.join( jobpath, 'taskfileout_{0}_{1}.zip'.format(args['job_id'], task_id)) args['taskfile'].save(zippath) # Store dependencies if worker.child_task: deppath = os.path.join( jobpath, 'dependencies_{0}'.format(worker.child_task)) if not os.path.exists(deppath): os.mkdir(deppath) with ZipFile(zippath, 'r') as jobzip: jobzip.extractall(path=deppath) depzippath = os.path.join( jobpath, 'dependencies_{0}.zip'.format(worker.child_task)) with ZipFile(depzippath, 'w') as depzip: f = [] for dirpath, dirnames, filenames in os.walk(deppath): for fname in filenames: filepath = os.path.join(dirpath, fname) depzip.write(filepath, fname) # Send to server jobfile = [ ('taskfile', ( 'taskfile.zip', open(zippath, 'rb'), 'application/zip'))] params = { 'id': task_id, 'status': args['status'], 'time_cost': args['time_cost'], 'log': args['log'], # we the trimmed version of the log 'activity': args['activity']} r = http_request( app.config['FLAMENCO_SERVER'], '/tasks/{0}'.format(task_id), 'put', params=params, files=jobfile) if r[1] == 403: return '', 403 return '', 204
task_command = task_compiler.compile(worker, task) if not task_command: logging.error('Cant compile {0}'.format(task['type'])) return options = { 'task_id': task['task_id'], 'task_parser': task['parser'], 'settings': task['settings'], 'task_command': json.dumps(task_command) } #logging.info("send task %d" % task.server_id) pid = http_request(worker.host, '/execute_task', 'post', options) try: if pid[1] == 500: return False except: pass worker.status = 'rendering' worker.current_task = task['task_id'] db.session.add(worker) db.session.commit() return True class TaskManagementApi(Resource):
def get(self): r = http_request(app.config['BRENDER_SERVER'], '/tasks/generate', 'get') return r, 200