def test_PickleableObject(self): p1 = MyPoint(name='My point', x=6, y=-3) p1.write('test.pkl') p2 = MyPoint.read('test.pkl', store_path=True) self.dict_equal(p2.__dict__, {'y': -3, 'x': 6, '_pickle_path': 'test.pkl', 'name': 'My point'}) p2.write() p2.write('test2.pkl') os.remove('test.pkl') os.remove('test2.pkl') p2.write() self.false(os.path.exists('test2.pkl')) self.equal(p2._pickle_path, 'test.pkl') os.remove('test.pkl') p2.write('test2.pkl', store_path=True) self.false(os.path.exists('test.pkl')) self.equal(p2._pickle_path, 'test2.pkl') del p2._pickle_path with self.raises(ValueError): p2.write() os.remove('test2.pkl') try_remove('test3.pkl') p3 = MyPoint.read('test3.pkl', store_path=True, create_if_error=True, name='Default point', x=3, y=-6) self.dict_equal(p3.__dict__, {'x': 3, 'y': -6, '_pickle_path': 'test3.pkl', 'name': 'Default point'}) os.remove('test3.pkl') with self.raises(IOError): MyPoint.read('test3.pkl')
def main(): module = AnsibleModule(argument_spec=dict( directory=dict(required=True, type='path'), keep=dict(default=None, type='int'), regexp=dict(default=r'[0-9]{8}T[0-9]{12}')), supports_check_mode=True) directory, keep, regexp = (module.params[k] for k in ('directory', 'keep', 'regexp')) regexp = re.compile(regexp) if keep is not None and keep < 1: module.fail_json(msg="'keep' should be a positive number") # FIXME first should be removed releases that don't have the "complete" flag releases = (os.path.join(directory, r) for r in os.listdir(directory) if regexp.match(r)) oldest_releases = [] if keep is None else sorted(releases, reverse=True)[keep:] before = [{'path': r, 'state': get_state(r)} for r in oldest_releases] if not module.check_mode: for release in oldest_releases: try_remove(release, recursive=True) module.exit_json(changed=bool(oldest_releases), diff={ 'after': [{ 'path': r, 'state': 'absent' } for r in oldest_releases], 'before': before })
def test_PickleableObject(self): p1 = MyPoint(name=u'My point', x=6, y=-3) p1.write(u'test.pkl') p2 = MyPoint.read(u'test.pkl', store_filename=True) assert_equal(p2.__dict__, {u'y': -3, u'x': 6, u'_pickle_filename': u'test.pkl', u'name': u'My point'}) p2.write() p2.write(u'test2.pkl') os.remove(u'test.pkl') os.remove(u'test2.pkl') p2.write() assert(not os.path.exists(u'test2.pkl')) assert_equal(p2._pickle_filename, u'test.pkl') os.remove(u'test.pkl') p2.write(u'test2.pkl', store_filename=True) assert(not os.path.exists(u'test.pkl')) assert_equal(p2._pickle_filename, u'test2.pkl') del p2._pickle_filename assert_raises(ValueError, p2.write) os.remove(u'test2.pkl') try_remove(u'test3.pkl') p3 = MyPoint.read(u'test3.pkl', store_filename=True, create_if_error=True, name=u'Default point', x=3, y=-6) assert_equal(p3.__dict__, {u'x': 3, u'y': -6, u'_pickle_filename': u'test3.pkl', u'name': u'Default point'}) os.remove(u'test3.pkl') assert_raises(IOError, MyPoint.read, u'test3.pkl')
def post_install(): from encodebox import lib from pytoolbox.console import confirm from pytoolbox.encoding import to_bytes from pytoolbox.filesystem import chown, from_template, try_makedirs, try_remove from pytoolbox.network.http import download if not exists(u'/usr/local/bin/neroAacEnc'): try: print(u'Download and install Nero AAC encoder') download(u'ftp://ftp6.nero.com/tools/NeroDigitalAudio.zip', u'/tmp/nero.zip') zipfile.ZipFile(u'/tmp/nero.zip').extract(u'linux/neroAacEnc', u'/usr/local/bin') os.chmod( u'/usr/local/bin/neroAacEnc', os.stat(u'/usr/local/bin/neroAacEnc').st_mode | stat.S_IEXEC) finally: try_remove(u'/tmp/nero.zip') filename = lib.SETTINGS_FILENAME settings = lib.load_settings(u'etc/config.yaml') if not exists(filename) or confirm( u'Overwrite existing configuration file "{0}"'.format(filename)): print(u'Generate configuration file "{0}"'.format(filename)) password = lib.generate_password() settings[u'rabbit_password'] = password lib.save_settings(filename, settings) print(u'Configure RabbitMQ Message Broker') check_call([u'service', u'rabbitmq-server', u'start']) call([u'rabbitmqctl', u'add_vhost', u'/']) call([u'rabbitmqctl', u'delete_user', u'guest']) call([u'rabbitmqctl', u'delete_user', u'encodebox']) call([ u'rabbitmqctl', u'add_user', u'encodebox', settings[u'rabbit_password'] ]) check_call([ u'rabbitmqctl', u'set_permissions', u'-p', u'/', u'encodebox', u'.*', u'.*', u'.*' ]) users, vhosts = lib.rabbit_users(), lib.rabbit_vhosts() print(u'RabbitMQ users: {0} vhosts: {1}'.format(users, vhosts)) if u'guest' in users or u'encodebox' not in users: raise RuntimeError(to_bytes(u'Unable to configure RabbitMQ')) print(u'Create directory for storing persistent data') try_makedirs(lib.LIB_DIRECTORY) chown(lib.LIB_DIRECTORY, lib.USERNAME, pwd.getpwnam(lib.USERNAME).pw_gid, recursive=True) print(u'Register and start our services as user ' + lib.USERNAME) from_template(u'etc/encodebox.conf.template', u'/etc/supervisor/conf.d/encodebox.conf', { u'lib_directory': lib.LIB_DIRECTORY, u'user': lib.USERNAME }) call([u'service', u'supervisor', u'force-reload'])
def test_encode(self): results = list(self.ffmpeg.encode(Media('small.mp4'), Media('ff_output.mp4', '-c:a copy -c:v copy'))) self.assertTrue(try_remove('ff_output.mp4')) self.assertEqual(results[-1].state, EncodeState.SUCCESS) results = list(self.ffmpeg.encode(Media('small.mp4'), Media('ff_output.mp4', 'crazy_option'))) self.assertFalse(try_remove('ff_output.mp4')) self.assertEqual(results[-1].state, EncodeState.FAILURE) results = list(self.ffmpeg.encode([Media('missing.mp4')], Media('ff_output.mp4', '-c:a copy -c:v copy'))) self.assertFalse(try_remove('ff_output.mp4')) self.assertEqual(results[-1].state, EncodeState.FAILURE)
def test_kill_process_handle_missing(self): encoder = RaiseFFmpeg() with self.assertRaises(ValueError): list( encoder.encode('small.mp4', 'ff_output.mp4', '-c:a copy -c:v copy')) self.assertTrue(try_remove('ff_output.mp4'))
def test_encode(self): results = list( self.ffmpeg.encode(Media('small.mp4'), Media('ff_output.mp4', '-c:a copy -c:v copy'))) self.assertTrue(try_remove('ff_output.mp4')) self.assertEqual(results[-1].state, EncodeState.SUCCESS) results = list( self.ffmpeg.encode(Media('small.mp4'), Media('ff_output.mp4', 'crazy_option'))) self.assertFalse(try_remove('ff_output.mp4')) self.assertEqual(results[-1].state, EncodeState.FAILURE) results = list( self.ffmpeg.encode([Media('missing.mp4')], Media('ff_output.mp4', '-c:a copy -c:v copy'))) self.assertFalse(try_remove('ff_output.mp4')) self.assertEqual(results[-1].state, EncodeState.FAILURE)
def test_PickleableObject(self): p1 = MyPoint(name='My point', x=6, y=-3) p1.write('test.pkl') p2 = MyPoint.read('test.pkl', store_filename=True) self.assertDictEqual(p2.__dict__, { 'y': -3, 'x': 6, '_pickle_filename': 'test.pkl', 'name': 'My point' }) p2.write() p2.write('test2.pkl') os.remove('test.pkl') os.remove('test2.pkl') p2.write() self.assertFalse(os.path.exists('test2.pkl')) self.assertEqual(p2._pickle_filename, 'test.pkl') os.remove('test.pkl') p2.write('test2.pkl', store_filename=True) self.assertFalse(os.path.exists('test.pkl')) self.assertEqual(p2._pickle_filename, 'test2.pkl') del p2._pickle_filename with self.assertRaises(ValueError): p2.write() os.remove('test2.pkl') try_remove('test3.pkl') p3 = MyPoint.read('test3.pkl', store_filename=True, create_if_error=True, name='Default point', x=3, y=-6) self.assertDictEqual( p3.__dict__, { 'x': 3, 'y': -6, '_pickle_filename': 'test3.pkl', 'name': 'Default point' }) os.remove('test3.pkl') with self.assertRaises(IOError): MyPoint.read('test3.pkl')
def main(): module = AnsibleModule( argument_spec=dict( directory=dict(required=True, type='path'), keep=dict(default=None, type='int'), regexp=dict(default=r'[0-9]{8}T[0-9]{12}') ), supports_check_mode=True ) directory, keep, regexp = (module.params[k] for k in ('directory', 'keep', 'regexp')) regexp = re.compile(regexp) if keep is not None and keep < 1: module.fail_json(msg="'keep' should be a positive number") # FIXME first should be removed releases that don't have the "complete" flag releases = (os.path.join(directory, r) for r in os.listdir(directory) if regexp.match(r)) oldest_releases = [] if keep is None else sorted(releases, reverse=True)[keep:] before = [{'path': r, 'state': get_state(r)} for r in oldest_releases] if not module.check_mode: for release in oldest_releases: try_remove(release, recursive=True) module.exit_json(changed=bool(oldest_releases), diff={ 'after': [{'path': r, 'state': 'absent'} for r in oldest_releases], 'before': before })
def subordinate_unregister(self): self.info(u'Unregister the Orchestrator') self.local_config.api_nat_socket = u'' try_remove(self.local_config.celery_config_file) self.cmd(u'update-rc.d -f {0} remove'.format(self.local_config.worker_name))
a = "activity-{0}-install.png".format(name) b = "activity-{0}-config-changed.png".format(name) c = "activity-{0}-start.png".format(name) d = "activity-{0}-stop.png".format(name) e = "activity-{0}-hooks.png".format(name) results = [0] * 2 results[0] = cmd(["convert", a, b, c, d, "+append", e], fail=False) if results[0]["returncode"] != 0: results[1] = cmd(["convert", a, c, d, "+append", e], fail=False) if results[1]["returncode"] != 0: print_error( "Unable to append {0}'s hooks UMLs images, reasons: {1}, {2}.".format( name, results[0]["stderr"], results[1]["stderr"] ) ) (try_remove(f) for f in (a, b, c, d)) print("Generate reStructuredText from templates") for template_filename in glob.glob(join(DAVID_REPORT_SOURCE_PATH, "*.rst.template")): rst_filename = join(dirname(template_filename), splitext(template_filename)[0]) with open(template_filename, "r", "utf-8") as template_file: data = template_file.read().replace("SVN_REVISION", revision) with open(rst_filename, "w", "utf-8") as rst_file: rst_file.write(data) print("Generate links into common file") common_data = references_data = "" with open(DAVID_REPORT_LINKS_FILE, "r", "utf-8") as links_file: for columns in [line.split(";") for line in links_file.readlines()]: if len(columns) == 1: references_data += columns[0]
def test_kill_process_handle_missing(self): encoder = RaiseFFmpeg() with self.assertRaises(ValueError): list(encoder.encode('small.mp4', 'ff_output.mp4', '-c:a copy -c:v copy')) self.assertTrue(try_remove('ff_output.mp4'))
def tearDown(self): try_remove(SETTINGS_FILENAME) shutil.rmtree(LOCAL_DIRECTORY)
def transform_task(media_in_json, media_out_json, profile_json, callback_json): def copy_callback(start_date, elapsed_time, eta_time, src_size, dst_size, ratio): transform_task.update_state(state=TransformTask.PROGRESS, meta={ u'hostname': request.hostname, 'start_date': start_date, u'elapsed_time': elapsed_time, u'eta_time': eta_time, u'media_in_size': src_size, u'media_out_size': dst_size, u'percent': int(100 * ratio)}) def transform_callback(status, measures): data_json = object2json({u'task_id': request.id, u'status': status, u'measures': measures}, include_properties=False) if callback is None: print(u'{0} [ERROR] Unable to callback orchestrator: {1}'.format(request.id, data_json)) else: r = callback.post(data_json) print(u'{0} Code {1} {2} : {3}'.format(request.id, r.status_code, r.reason, r._content)) # ------------------------------------------------------------------------------------------------------------------ RATIO_DELTA, TIME_DELTA = 0.01, 1 # Update status if at least 1% of progress and 1 second elapsed. MAX_TIME_DELTA = 5 # Also ensure status update every 5 seconds. DASHCAST_TIMEOUT_TIME = 10 try: # Avoid 'referenced before assignment' callback = dashcast_conf = None encoder_out, request = u'', current_task.request # Let's the task begin ! print(u'{0} Transformation task started'.format(request.id)) # Read current configuration to translate files uri to local paths local_config = TransformLocalConfig.read(LOCAL_CONFIG_FILENAME, inspect_constructor=False) print(object2json(local_config, include_properties=True)) # Load and check task parameters callback = Callback.from_json(callback_json, inspect_constructor=True) callback.is_valid(True) # Update callback socket according to configuration if local_config.api_nat_socket and len(local_config.api_nat_socket) > 0: callback.replace_netloc(local_config.api_nat_socket) media_in = Media.from_json(media_in_json, inspect_constructor=True) media_out = Media.from_json(media_out_json, inspect_constructor=True) profile = TransformProfile.from_json(profile_json, inspect_constructor=True) media_in.is_valid(True) media_out.is_valid(True) profile.is_valid(True) # Verify that media file can be accessed and create output path media_in_path = local_config.storage_medias_path(media_in, generate=False) if not media_in_path: raise NotImplementedError(to_bytes(u'Input media asset will not be readed from shared storage : {0}'.format( media_in.uri))) media_out_path = local_config.storage_medias_path(media_out, generate=True) if not media_out_path: raise NotImplementedError(to_bytes(u'Output media asset will not be written to shared storage : {0}'.format( media_out.uri))) media_in_root = dirname(media_in_path) media_out_root = dirname(media_out_path) try_makedirs(media_out_root) # Get input media duration and frames to be able to estimate ETA media_in_duration = get_media_duration(media_in_path) # Keep potential PSNR status measures = {} # NOT A REAL TRANSFORM : FILE COPY ----------------------------------------------------------------------------- if profile.encoder_name == u'copy': infos = recursive_copy(media_in_root, media_out_root, copy_callback, RATIO_DELTA, TIME_DELTA) media_out_tmp = media_in_path.replace(media_in_root, media_out_root) os.rename(media_out_tmp, media_out_path) start_date = infos[u'start_date'] elapsed_time = infos[u'elapsed_time'] media_in_size = infos[u'src_size'] # A REAL TRANSFORM : TRANSCODE WITH FFMPEG --------------------------------------------------------------------- elif profile.encoder_name == u'ffmpeg': start_date, start_time = datetime_now(), time.time() prev_ratio = prev_time = 0 # Get input media size to be able to estimate ETA media_in_size = get_size(media_in_root) # Create FFmpeg subprocess cmd = u'ffmpeg -y -i "{0}" {1} "{2}"'.format(media_in_path, profile.encoder_string, media_out_path) print(cmd) ffmpeg = Popen(shlex.split(cmd), stderr=PIPE, close_fds=True) make_async(ffmpeg.stderr) while True: # Wait for data to become available select.select([ffmpeg.stderr], [], []) chunk = ffmpeg.stderr.read() encoder_out += chunk elapsed_time = time.time() - start_time match = FFMPEG_REGEX.match(chunk) if match: stats = match.groupdict() media_out_duration = stats[u'time'] try: ratio = total_seconds(media_out_duration) / total_seconds(media_in_duration) ratio = 0.0 if ratio < 0.0 else 1.0 if ratio > 1.0 else ratio except ZeroDivisionError: ratio = 1.0 delta_time = elapsed_time - prev_time if (ratio - prev_ratio > RATIO_DELTA and delta_time > TIME_DELTA) or delta_time > MAX_TIME_DELTA: prev_ratio, prev_time = ratio, elapsed_time eta_time = int(elapsed_time * (1.0 - ratio) / ratio) if ratio > 0 else 0 transform_task.update_state( state=TransformTask.PROGRESS, meta={u'hostname': request.hostname, u'start_date': start_date, u'elapsed_time': elapsed_time, u'eta_time': eta_time, u'media_in_size': media_in_size, u'media_in_duration': media_in_duration, u'media_out_size': get_size(media_out_root), u'media_out_duration': media_out_duration, u'percent': int(100 * ratio), u'encoding_frame': stats[u'frame'], u'encoding_fps': stats[u'fps'], u'encoding_bitrate': stats[u'bitrate'], u'encoding_quality': stats[u'q']}) returncode = ffmpeg.poll() if returncode is not None: break # FFmpeg output sanity check if returncode != 0: raise OSError(to_bytes(u'FFmpeg return code is {0}, encoding probably failed.'.format(returncode))) # compute stats about the video measures['psnr'] = get_media_psnr(media_in_path, media_out_path) measures['ssim'] = get_media_ssim(media_in_path, media_out_path) # measures of the data and its metadata measures['bitrate'] = get_media_bitrate(media_out_path) # FIXME: fake git url, commit measures['git_url'] = 'https://github.com/videolan/x265' measures['git_commit'] = 'd2051f9544434612a105d2f5267db23018cb3454' # Output media file sanity check # media_out_duration = get_media_duration(media_out_path) # if total_seconds(media_out_duration) / total_seconds(media_in_duration) > 1.5 or < 0.8: # salut elif profile.encoder_name == u'from_git': start_date, start_time = datetime_now(), time.time() prev_ratio = prev_time = 0 # Get input media size to be able to estimate ETA media_in_size = get_size(media_in_root) metadata = media_out.metadata dirpath = tempfile.mkdtemp() prepare_cmd = u'git clone --depth=1 "{0}" "{1}" && cd "{1}" && git checkout "{2}" && {3}'.format(metadata['git_url'], dirpath, metadata['git_commit'], metadata['build_cmds']) check_call(prepare_cmd, shell=True) # Templated parameter encoder_string = profile.encoder_string.replace(u"BITRATE", str(metadata['input_bitrate'])) cmd = u'cd "{0}" && ffmpeg -y -i "{1}" -f yuv4mpegpipe - | {2} "{3}"'.format(dirpath, media_in_path, encoder_string, media_out_path) returncode = call(cmd, shell=True) if returncode != 0: raise OSError(to_bytes(u'Encoding return code is {0}, encoding probably failed.'.format(returncode))) # compute stats about the video measures['psnr'] = get_media_psnr(media_in_path, media_out_path) measures['ssim'] = get_media_ssim(media_in_path, media_out_path) # measures of the data and its metadata measures['bitrate'] = get_media_bitrate(media_out_path) # FIXME: don't put this in measures measures['git_url'] = metadata['git_url'] measures['git_commit'] = metadata['git_commit'] # A REAL TRANSFORM : TRANSCODE WITH DASHCAST ------------------------------------------------------------------- elif profile.encoder_name == u'dashcast': start_date, start_time = datetime_now(), time.time() prev_ratio = prev_time = 0 # Get input media size and frames to be able to estimate ETA media_in_size = get_size(media_in_root) try: media_in_frames = int(get_media_tracks(media_in_path)[u'video'][u'0:0'][u'estimated_frames']) media_out_frames = 0 except: raise ValueError(to_bytes(u'Unable to estimate # frames of input media asset')) # Create DashCast configuration file and subprocess dashcast_conf = u'dashcast_{0}.conf'.format(uuid.uuid4()) with open(dashcast_conf, u'w', u'utf-8') as f: f.write(profile.dash_config) cmd = u'DashCast -conf {0} -av "{1}" {2} -out "{3}" -mpd "{4}"'.format( dashcast_conf, media_in_path, profile.dash_options, media_out_root, media_out.filename) print(cmd) dashcast = Popen(shlex.split(cmd), stdout=PIPE, stderr=PIPE, close_fds=True) make_async(dashcast.stdout.fileno()) make_async(dashcast.stderr.fileno()) while True: # Wait for data to become available select.select([dashcast.stdout.fileno()], [], []) stdout, stderr = read_async(dashcast.stdout), read_async(dashcast.stderr) elapsed_time = time.time() - start_time match = DASHCAST_REGEX.match(stdout) if match: stats = match.groupdict() media_out_frames = int(stats[u'frame']) try: ratio = float(media_out_frames) / media_in_frames ratio = 0.0 if ratio < 0.0 else 1.0 if ratio > 1.0 else ratio except ZeroDivisionError: ratio = 1.0 delta_time = elapsed_time - prev_time if (ratio - prev_ratio > RATIO_DELTA and delta_time > TIME_DELTA) or delta_time > MAX_TIME_DELTA: prev_ratio, prev_time = ratio, elapsed_time eta_time = int(elapsed_time * (1.0 - ratio) / ratio) if ratio > 0 else 0 transform_task.update_state( state=TransformTask.PROGRESS, meta={u'hostname': request.hostname, u'start_date': start_date, u'elapsed_time': elapsed_time, u'eta_time': eta_time, u'media_in_size': media_in_size, u'media_in_duration': media_in_duration, u'media_out_size': get_size(media_out_root), u'percent': int(100 * ratio), u'encoding_frame': media_out_frames}) match = DASHCAST_SUCCESS_REGEX.match(stdout) returncode = dashcast.poll() if returncode is not None or match: encoder_out = u'stdout: {0}\nstderr: {1}'.format(stdout, stderr) break if media_out_frames == 0 and elapsed_time > DASHCAST_TIMEOUT_TIME: encoder_out = u'stdout: {0}\nstderr: {1}'.format(stdout, stderr) raise OSError(to_bytes(u'DashCast does not output frame number, encoding probably failed.')) # DashCast output sanity check if not exists(media_out_path): raise OSError(to_bytes(u'Output media asset not found, DashCast encoding probably failed.')) if returncode != 0: raise OSError(to_bytes(u'DashCast return code is {0}, encoding probably failed.'.format(returncode))) # FIXME check duration too ! # Here all seem okay ------------------------------------------------------------------------------------------- elapsed_time = time.time() - start_time media_out_size = get_size(media_out_root) media_out_duration = get_media_duration(media_out_path) print(u'{0} Transformation task successful, output media asset {1}'.format(request.id, media_out.filename)) transform_callback(TransformTask.SUCCESS, measures) return {u'hostname': request.hostname, u'start_date': start_date, u'elapsed_time': elapsed_time, u'eta_time': 0, u'media_in_size': media_in_size, u'media_in_duration': media_in_duration, u'media_out_size': media_out_size, u'media_out_duration': media_out_duration, u'percent': 100 } except Exception as error: # Here something went wrong print(u'{0} Transformation task failed '.format(request.id)) transform_callback(u'ERROR\n{0}\n\nOUTPUT\n{1}'.format(unicode(error), encoder_out), {}) raise finally: if dashcast_conf: try_remove(dashcast_conf)
def transform_task(media_in_json, media_out_json, profile_json, callback_json): def copy_callback(start_date, elapsed_time, eta_time, src_size, dst_size, ratio): transform_task.update_state( state=TransformTask.PROGRESS, meta={ "hostname": request.hostname, "start_date": start_date, "elapsed_time": elapsed_time, "eta_time": eta_time, "media_in_size": src_size, "media_out_size": dst_size, "percent": int(100 * ratio), }, ) def transform_callback(status): data_json = object2json({"task_id": request.id, "status": status}, include_properties=False) if callback is None: print("{0} [ERROR] Unable to callback orchestrator: {1}".format(request.id, data_json)) else: r = callback.post(data_json) print("{0} Code {1} {2} : {3}".format(request.id, r.status_code, r.reason, r._content)) # ------------------------------------------------------------------------------------------------------------------ RATIO_DELTA, TIME_DELTA = 0.01, 1 # Update status if at least 1% of progress and 1 second elapsed. MAX_TIME_DELTA = 5 # Also ensure status update every 5 seconds. DASHCAST_TIMEOUT_TIME = 10 try: # Avoid 'referenced before assignment' callback = dashcast_conf = None encoder_out, request = "", current_task.request # Let's the task begin ! print("{0} Transformation task started".format(request.id)) # Read current configuration to translate files uri to local paths local_config = TransformLocalConfig.read(LOCAL_CONFIG_FILENAME, inspect_constructor=False) print(object2json(local_config, include_properties=True)) # Load and check task parameters callback = Callback.from_json(callback_json, inspect_constructor=True) callback.is_valid(True) # Update callback socket according to configuration if local_config.api_nat_socket and len(local_config.api_nat_socket) > 0: callback.replace_netloc(local_config.api_nat_socket) media_in = Media.from_json(media_in_json, inspect_constructor=True) media_out = Media.from_json(media_out_json, inspect_constructor=True) profile = TransformProfile.from_json(profile_json, inspect_constructor=True) media_in.is_valid(True) media_out.is_valid(True) profile.is_valid(True) # Verify that media file can be accessed and create output path media_in_path = local_config.storage_medias_path(media_in, generate=False) if not media_in_path: raise NotImplementedError( to_bytes("Input media asset will not be readed from shared storage : {0}".format(media_in.uri)) ) media_out_path = local_config.storage_medias_path(media_out, generate=True) if not media_out_path: raise NotImplementedError( to_bytes("Output media asset will not be written to shared storage : {0}".format(media_out.uri)) ) media_in_root = dirname(media_in_path) media_out_root = dirname(media_out_path) try_makedirs(media_out_root) # Get input media duration and frames to be able to estimate ETA media_in_duration = get_media_duration(media_in_path) # NOT A REAL TRANSFORM : FILE COPY ----------------------------------------------------------------------------- if profile.encoder_name == "copy": infos = recursive_copy(media_in_root, media_out_root, copy_callback, RATIO_DELTA, TIME_DELTA) media_out_tmp = media_in_path.replace(media_in_root, media_out_root) os.rename(media_out_tmp, media_out_path) start_date = infos["start_date"] elapsed_time = infos["elapsed_time"] media_in_size = infos["src_size"] # A REAL TRANSFORM : TRANSCODE WITH FFMPEG --------------------------------------------------------------------- elif profile.encoder_name == "ffmpeg": start_date, start_time = datetime_now(), time.time() prev_ratio = prev_time = 0 # Get input media size to be able to estimate ETA media_in_size = get_size(media_in_root) # Create FFmpeg subprocess cmd = 'ffmpeg -y -i "{0}" {1} "{2}"'.format(media_in_path, profile.encoder_string, media_out_path) print(cmd) ffmpeg = Popen(shlex.split(cmd), stderr=PIPE, close_fds=True) make_async(ffmpeg.stderr) while True: # Wait for data to become available select.select([ffmpeg.stderr], [], []) chunk = ffmpeg.stderr.read() encoder_out += chunk elapsed_time = time.time() - start_time match = FFMPEG_REGEX.match(chunk) if match: stats = match.groupdict() media_out_duration = stats["time"] try: ratio = total_seconds(media_out_duration) / total_seconds(media_in_duration) ratio = 0.0 if ratio < 0.0 else 1.0 if ratio > 1.0 else ratio except ZeroDivisionError: ratio = 1.0 delta_time = elapsed_time - prev_time if (ratio - prev_ratio > RATIO_DELTA and delta_time > TIME_DELTA) or delta_time > MAX_TIME_DELTA: prev_ratio, prev_time = ratio, elapsed_time eta_time = int(elapsed_time * (1.0 - ratio) / ratio) if ratio > 0 else 0 transform_task.update_state( state=TransformTask.PROGRESS, meta={ "hostname": request.hostname, "start_date": start_date, "elapsed_time": elapsed_time, "eta_time": eta_time, "media_in_size": media_in_size, "media_in_duration": media_in_duration, "media_out_size": get_size(media_out_root), "media_out_duration": media_out_duration, "percent": int(100 * ratio), "encoding_frame": stats["frame"], "encoding_fps": stats["fps"], "encoding_bitrate": stats["bitrate"], "encoding_quality": stats["q"], }, ) returncode = ffmpeg.poll() if returncode is not None: break # FFmpeg output sanity check if returncode != 0: raise OSError(to_bytes("FFmpeg return code is {0}, encoding probably failed.".format(returncode))) # Output media file sanity check # media_out_duration = get_media_duration(media_out_path) # if total_seconds(media_out_duration) / total_seconds(media_in_duration) > 1.5 or < 0.8: # salut # A REAL TRANSFORM : TRANSCODE WITH DASHCAST ------------------------------------------------------------------- elif profile.encoder_name == "dashcast": start_date, start_time = datetime_now(), time.time() prev_ratio = prev_time = 0 # Get input media size and frames to be able to estimate ETA media_in_size = get_size(media_in_root) try: media_in_frames = int(get_media_tracks(media_in_path)["video"]["0:0"]["estimated_frames"]) media_out_frames = 0 except: raise ValueError(to_bytes("Unable to estimate # frames of input media asset")) # Create DashCast configuration file and subprocess dashcast_conf = "dashcast_{0}.conf".format(uuid.uuid4()) with open(dashcast_conf, "w", "utf-8") as f: f.write(profile.dash_config) cmd = 'DashCast -conf {0} -av "{1}" {2} -out "{3}" -mpd "{4}"'.format( dashcast_conf, media_in_path, profile.dash_options, media_out_root, media_out.filename ) print(cmd) dashcast = Popen(shlex.split(cmd), stdout=PIPE, stderr=PIPE, close_fds=True) make_async(dashcast.stdout.fileno()) make_async(dashcast.stderr.fileno()) while True: # Wait for data to become available select.select([dashcast.stdout.fileno()], [], []) stdout, stderr = read_async(dashcast.stdout), read_async(dashcast.stderr) elapsed_time = time.time() - start_time match = DASHCAST_REGEX.match(stdout) if match: stats = match.groupdict() media_out_frames = int(stats["frame"]) try: ratio = float(media_out_frames) / media_in_frames ratio = 0.0 if ratio < 0.0 else 1.0 if ratio > 1.0 else ratio except ZeroDivisionError: ratio = 1.0 delta_time = elapsed_time - prev_time if (ratio - prev_ratio > RATIO_DELTA and delta_time > TIME_DELTA) or delta_time > MAX_TIME_DELTA: prev_ratio, prev_time = ratio, elapsed_time eta_time = int(elapsed_time * (1.0 - ratio) / ratio) if ratio > 0 else 0 transform_task.update_state( state=TransformTask.PROGRESS, meta={ "hostname": request.hostname, "start_date": start_date, "elapsed_time": elapsed_time, "eta_time": eta_time, "media_in_size": media_in_size, "media_in_duration": media_in_duration, "media_out_size": get_size(media_out_root), "percent": int(100 * ratio), "encoding_frame": media_out_frames, }, ) match = DASHCAST_SUCCESS_REGEX.match(stdout) returncode = dashcast.poll() if returncode is not None or match: encoder_out = "stdout: {0}\nstderr: {1}".format(stdout, stderr) break if media_out_frames == 0 and elapsed_time > DASHCAST_TIMEOUT_TIME: encoder_out = "stdout: {0}\nstderr: {1}".format(stdout, stderr) raise OSError(to_bytes("DashCast does not output frame number, encoding probably failed.")) # DashCast output sanity check if not exists(media_out_path): raise OSError(to_bytes("Output media asset not found, DashCast encoding probably failed.")) if returncode != 0: raise OSError(to_bytes("DashCast return code is {0}, encoding probably failed.".format(returncode))) # FIXME check duration too ! # Here all seem okay ------------------------------------------------------------------------------------------- media_out_size = get_size(media_out_root) media_out_duration = get_media_duration(media_out_path) print("{0} Transformation task successful, output media asset {1}".format(request.id, media_out.filename)) transform_callback(TransformTask.SUCCESS) return { "hostname": request.hostname, "start_date": start_date, "elapsed_time": elapsed_time, "eta_time": 0, "media_in_size": media_in_size, "media_in_duration": media_in_duration, "media_out_size": media_out_size, "media_out_duration": media_out_duration, "percent": 100, } except Exception as error: # Here something went wrong print("{0} Transformation task failed ".format(request.id)) transform_callback("ERROR\n{0}\n\nOUTPUT\n{1}".format(unicode(error), encoder_out)) raise finally: if dashcast_conf: try_remove(dashcast_conf)