def test_invalid_request(app): """Test exceptions for invalid requests.""" invalid_preset_quality = '522p' invalid_aspect_ratio = '15:3' with pytest.raises(InvalidAspectRatioError) as exc: start_encoding('input', '', '480p', invalid_aspect_ratio) assert invalid_aspect_ratio in str(exc) with pytest.raises(InvalidResolutionError) as exc: start_encoding('input', '', invalid_preset_quality, '16:9') assert '16:9' in str(exc) assert invalid_preset_quality in str(exc)
def test_start_encoding(requests_post_mock, app, start_response): """Test if starting encoding works.""" filename = 'file://cernbox-smb.cern.ch/eoscds/test/sorenson_input/' \ '1111-dddd-3333-aaaa/data.mp4' # Random preset from config aspect_ratio, quality = '16:9', '360p' # Mock sorenson response sorenson_response = MagicMock() sorenson_response.text = start_response sorenson_response.status_code = 200 requests_post_mock.return_value = sorenson_response job_id = start_encoding(filename, '', quality, aspect_ratio) assert job_id == "1234-2345-abcd" with pytest.raises(TooHighResolutionError): start_encoding(filename, '', quality, aspect_ratio, max_height=240)
def test_start_encoding(requests_post_mock, app, start_response): """Test if starting encoding works.""" filename = 'file://cernbox-smb.cern.ch/eoscds/test/sorenson_input/' \ '1111-dddd-3333-aaaa/data.mp4' # Random preset from config aspect_ratio, quality = '16:9', '360p' # Mock sorenson response sorenson_response = MagicMock() sorenson_response.text = start_response sorenson_response.status_code = 200 requests_post_mock.return_value = sorenson_response job_id = start_encoding(filename, '', quality, aspect_ratio) assert job_id == "1234-2345-abcd"
def run(self, preset_quality, sleep_time=5, *args, **kwargs): """Launch video transcoding. For each of the presets generate a new ``ObjectVersion`` tagged as slave with the preset name as key and a link to the master version. :param self: reference to instance of task base class :param preset_quality: preset quality to use for transcoding. :param sleep_time: time interval between requests for the Sorenson status. """ self._base_payload.update(preset_quality=preset_quality) # Get master file's bucket_id bucket_id = self.object.bucket_id bucket_location = self.object.bucket.location.uri # Get master file's key master_key = self.object.key tags = self.object.get_tags() # Get master file's aspect ratio aspect_ratio = tags['display_aspect_ratio'] # Get master file's width x height width = int(tags['width']) if 'width' in tags else None height = int(tags['height']) if 'height' in tags else None with db.session.begin_nested(): # Create FileInstance file_instance = FileInstance.create() # Create ObjectVersion obj_key = self._build_slave_key(preset_quality=preset_quality, master_key=master_key) obj = ObjectVersion.create(bucket=bucket_id, key=obj_key) # Extract new location storage = file_instance.storage(default_location=bucket_location) directory, filename = storage._get_fs() input_file = self.object.file.uri output_file = os.path.join(directory.root_path, filename) try: # Start Sorenson job_id = start_encoding(input_file, output_file, preset_quality, aspect_ratio, max_height=height, max_width=width) except (InvalidResolutionError, TooHighResolutionError) as e: exception = self._meta_exception_envelope(exc=e) self.update_state(state=REVOKED, meta=exception) raise Ignore() # Set revoke handler, in case of an abrupt execution halt. self.set_revoke_handler(partial(stop_encoding, job_id)) # Create ObjectVersionTags ObjectVersionTag.create(obj, 'master', self.obj_id) ObjectVersionTag.create(obj, '_sorenson_job_id', job_id) ObjectVersionTag.create(obj, 'preset_quality', preset_quality) ObjectVersionTag.create(obj, 'media_type', 'video') ObjectVersionTag.create(obj, 'context_type', 'subformat') preset_info = get_preset_info(aspect_ratio, preset_quality) for key, value in preset_info.items(): ObjectVersionTag.create(obj, key, value) # Information necessary for monitoring job_info = dict( preset_quality=preset_quality, job_id=job_id, file_instance=str(file_instance.id), uri=output_file, version_id=str(obj.version_id), key=obj_key, tags=obj.get_tags(), percentage=0, ) db.session.commit() self.update_state(state=STARTED, meta=dict(payload=dict(**job_info), message='Started transcoding.')) status = '' # Monitor job and report accordingly while status != 'Finished': # Get job status status, percentage = get_encoding_status(job_id) if status == 'Error': raise RuntimeError('Error transcoding') job_info['percentage'] = percentage # Update task's state for this preset self.update_state( state=STARTED, meta=dict(payload=dict(**job_info), message='Transcoding {0}'.format(percentage))) time.sleep(sleep_time) # Set file's location, if job has completed self._clean_file_name(output_file) with db.session.begin_nested(): uri = output_file with open(uri, 'rb') as transcoded_file: digest = hashlib.md5(transcoded_file.read()).hexdigest() size = os.path.getsize(uri) checksum = '{0}:{1}'.format('md5', digest) file_instance.set_uri(uri, size, checksum) as_object_version(job_info['version_id']).set_file(file_instance) db.session.commit()
def video_transcode(self, object_version, video_presets=None, sleep_time=5, **kwargs): """Launch video transcoding. For each of the presets generate a new ``ObjectVersion`` tagged as slave with the preset name as key and a link to the master version. :param object_version: Master video. :param video_presets: List of presets to use for transcoding. If ``None`` it will use the default values set in ``VIDEO_DEFAULT_PRESETS``. :param sleep_time: the time interval between requests for Sorenson status """ object_version = as_object_version(object_version) self._base_payload = dict( object_version=str(object_version.version_id), video_presets=video_presets, tags=object_version.get_tags(), deposit_id=kwargs.get('deposit_id', None), event_id=kwargs.get('event_id', None), ) job_ids = deque() # Set handler for canceling all jobs def handler(signum, frame): # TODO handle better file deleting and ObjectVersion cleaning map(lambda _info: stop_encoding(info['job_id']), job_ids) signal.signal(signal.SIGTERM, handler) # Get master file's bucket_id bucket_id = object_version.bucket_id bucket_location = object_version.bucket.location.uri preset_config = current_app.config['CDS_SORENSON_PRESETS'] for preset in video_presets or preset_config.keys(): with db.session.begin_nested(): # Create FileInstance and get generated UUID file_instance = FileInstance.create() # Create ObjectVersion base_name = object_version.key.rsplit('.', 1)[0] new_extension = preset_config[preset][1] obj = ObjectVersion.create( bucket=bucket_id, key='{0}-{1}{2}'.format(base_name, preset, new_extension) ) obj.set_file(file_instance) ObjectVersionTag.create( obj, 'master', str(object_version.version_id)) ObjectVersionTag.create(obj, 'preset', preset) # Extract new location storage = file_instance.storage(default_location=bucket_location) directory, filename = storage._get_fs() # Start Sorenson input_file = object_version.file.uri output_file = os.path.join(directory.root_path, filename) job_id = start_encoding(input_file, preset, output_file) ObjectVersionTag.create(obj, '_sorenson_job_id', job_id) job_info = dict( preset=preset, job_id=job_id, file_instance=str(file_instance.id), uri=output_file, object_version=str(obj.version_id), key=obj.key, tags=obj.get_tags(), ) db.session.commit() self.update_state( state=STARTED, meta=dict( payload=dict(job_info=job_info), message='Started transcoding.' ) ) job_ids.append(job_info) # Monitor jobs and report accordingly while job_ids: info = job_ids.popleft() # Get job status status = get_encoding_status(info['job_id'])['Status'] percentage = 100 if status['TimeFinished'] else status['Progress'] info['percentage'] = percentage # Update task's state for each individual preset self.update_state( state=STARTED, meta=dict( payload=dict(job_info=job_info), message='Transcoding {0}'.format(percentage), ) ) # Set file's location for completed jobs if percentage == 100: with db.session.begin_nested(): uri = info['uri'] with open(uri, 'rb') as transcoded_file: digest = hashlib.md5(transcoded_file.read()).hexdigest() size = os.path.getsize(uri) checksum = '{0}:{1}'.format('md5', digest) FileInstance.get( info['file_instance']).set_uri(uri, size, checksum) db.session.commit() else: job_ids.append(info) time.sleep(sleep_time)