def _create_job_callback(job: Job, fut: asyncio.Future): exc = fut.exception() if not fut.cancelled() and exc: raise exc if fut.done(): job.meta_update(result=fut.result()) job.save()
def job_stage_callback(job: Job, stage: str, fut: Future) -> None: ''' Generic job stage completion callback. Accepts a Job model, stage name, and the future. When this is called, it means the job stage has completed in some form or fashion. Should use `fut`'s result methods to figure out what happened and update accordingly. ''' # First, update job metadata with the execution result. exc = fut.exception() if not fut.cancelled() and exc: raise exc if fut.done(): job.meta_update(result=fut.result()) job.save() # Dispatch the next futures chain, if applicable if stage == STAGE_FETCHING: # Check if the job has any destinations and trigger the # destinations executor if 'destinations' in job.meta_dict: # Spawn the uploader future # TODO: Add a marker in the job meta showing # that destination uploads are queued job_begin_upload(job) elif stage == STAGE_UPLOADING: job.status = 'completed' job.save() log.info(f'job {job.id} has finished job pipeline')
def update_job(job_id: str): # TODO: Get new stats from update payload job_status = None job = Job.get(id=id) if job.status != job_status: job.status = job_status job.save() return job.to_json()
def job_begin_upload(executor: JobExecutor, job: Job) -> Future: ''' Begins execution of a future whih spawns another future each destination upload. The number of futures that will be spawned/running is limited by the default size of the `ThreadPoolExecutor`. ''' # Update the job status to notify that the file is uploading job.status = 'uploading' job.save() fut: Future = executor.execute_future( upload_file, job, ) fut.add_done_callback(partial( job_stage_callback, job, STAGE_UPLOADING, ), ) return fut
def create_job(executor: JobExecutor): payload = request.get_json() url = payload.get('url') profile = payload.get('profile') if not url or not profile: return jsonify({ 'message': 'body must contain `url` and `profile`', 'request': { 'body': payload, }, }), status.CLIENT_ERROR try: profile = Profile.get(name=profile) except Profile.DoesNotExist: return jsonify({ 'message': 'profile not found', 'query': { 'profile': profile, }, }), status.NOT_FOUND # TODO: Make a new job record to use with the fetcher job_record = Job.create( status='queued', meta=json.dumps(payload), ) # Create a partial of the fetcher job instead of beginning exec future: asyncio.Future = executor.execute_future( youtubedl.fetch_url, job_record, profile, ) future.add_done_callback( partial( _create_job_callback, job_record, ), ) return job_record.to_json()
def create_job(): ''' POST /job/ Creates a new job from a JSON payload. ''' payload = request.get_json() url = payload.get('url') profile = payload.get('profile') if not url or not profile: return jsonify({ 'message': 'body must contain `url` and `profile`', 'request': { 'body': payload, }, }), status.BAD_REQUEST try: profile = Profile.get(name=profile) except Profile.DoesNotExist: return jsonify({ 'message': 'profile not found', 'query': { 'profile': profile, }, }), status.NOT_FOUND # TODO: Do validation of `destinations` list # TODO: Make sure each destination is included only once (collapse mult) job_record = Job.create( status='queued', meta=json.dumps(payload), ) stage.job_begin_fetch(job_record, profile) return job_record.to_json()
def _progress_hook(job: Job, info: dict): if job.status != info['status']: log.info('Job transitioning from {old_state} to {new_state}'.format( old_state=job.status, new_state=info['status'], )) # status == 'finished' means download is finished -- waiting # for post-processor chain to complete execution. if info['status'] == 'finished': job.status = 'processing' else: job.status = info['status'] job.meta_update(extractor=info) job.save()
def show_job(job_id: str): return Job.get(id=job_id).to_json()