def convert_mp4_to_web(input_filename): # given input mp4 convert to webm, flash, and ogv prefix = input_filename.split('.')[-2] ffmpeg(i=input_filename, f='webm', vcodec='libvpx', acodec='libvorbis', ab=128000, crf=22, s='640x360', out='%s.webm' % prefix) ffmpeg2theora(input_filename, x=640, y=360, videoquality=5, audioquality=0, frontend=True, o='%s.ogv' % prefix) ffmpeg("-i", input_filename, "-c:v", "flv", "-c:a", "mp3", "-ar", "44100", "%s.flv" % prefix)
def make_thumbnail(video): ''' Save a thumnail of video from middle of it as "thumbnail.jpg". TODO save as videopath/thumbnail.jpg? # TODO add date to thumbnail = weather? ffmpeg -y -ss 00:00:20 -i video -vframes 1 thumbnail.jpg''' # get duration in form "00:00:20" len_seconds = get_length(video) len_seconds = len_seconds // 2 # take thumbnail from middle of the video time_str = str(datetime.timedelta(seconds=len_seconds)) if (len(time_str) == 7): time_str = "0" + time_str # add zero to string elif (len(time_str) != 8): import ipdb ipdb.set_trace() raise ValueError( "Video length is probably over 24h which is not supported.") print("Taking thumbnail from position " + time_str) ffmpeg("-y", "-ss", time_str, "-i", video, "-vframes", "1", "thumbnail.jpg", _err="uploader.log") return "thumbnail.jpg"
def handle_video(filename): """Handle video.""" outputvideo = tempfile.NamedTemporaryFile(delete=False) thumbnail = tempfile.NamedTemporaryFile(delete=False) try: outputvideo.close() # generate mp4 container data path = os.path.join(settings.UPLOAD_DIR, filename) print(sh.ffmpeg('-y', '-i', path, '-c', 'copy', '-f', 'mp4', outputvideo.name)) # generate thumbnail # ffmpeg -ss 3 -i test.mp4 -vf "select=gt(scene\,0.4)" # -frames:v 5 -vsync vfr -vf fps=fps=1/600 out%02d.jpg print(sh.ffmpeg('-ss', '3', '-i', path, '-vf', '"select=gt(scene\,0.4)"', '-frames:v', '5', '-vsync', 'vfr', '-vf', 'fps=fps=1/600', '-y', '-f', 'mjpeg', thumbnail.name)) if 'h264' in filename: mp4_filename = filename.split('h264')[0] + 'mp4' else: mp4_filename = filename video_url = upload_to_s3(outputvideo.name, mp4_filename) thumbnail_url = upload_to_s3(thumbnail.name, mp4_filename + '.jpg') return (video_url, thumbnail_url) except: raise finally: os.remove(outputvideo.name) os.remove(thumbnail.name)
def check_required_programs(): # Check that mediainfo is installed if sh.which("mediainfo") is None: print("%s: Cannot find mediainfo, please install before continuing.") % (PROG_NAME) exit(1) # Check that ffmpeg is installed if sh.which("ffmpeg") is None: print("%s: Cannot find ffmpeg. " "Please install ffmpeg version 1.0 or later.") % (PROG_NAME) out = StringIO() try: sh.ffmpeg("-encoders", _out=out) except sh.ErrorReturnCode: print("%s: unsupported version of ffmpeg installed. " "Install ffmpeg version 1.0 or higher") % PROG_NAME if "libx264" not in out.getvalue(): print( "%s: Installed version of ffmeg doesn't include libx264 support. " "Install version of ffmpeg that supports libx264." ) % PROG_NAME exit(1) config.extra_opts = ["-strict", "experimental"] config.audio_encoder = "libfaac" if "libfaac" not in out.getvalue(): config.audio_encoder = "aac"
def apply(self, media): if hasattr(media, 'path'): self.log.info("Detecting subtitles...") base_path, ext = os.path.splitext(media.path) srt_path = "%s.srt" % base_path ass_path = "%s.ass" % base_path # attach subs if os.path.exists(srt_path): media.log.info("Using subtitles at %s" % srt_path) media.add_vf("subtitles", srt_path) # auto-convert .srt to .ass if not os.path.exists(ass_path) and os.path.exists(srt_path): self.log.info("Converting .srt to .ass: %s" % srt_path) try: ffmpeg("-i", srt_path, ass_path) except: self.log.warn("Error auto-converting .srt to .ass", exc_info=True) # attach subs if os.path.exists(ass_path): media.log.info("Using subtitles at %s" % ass_path) media.add_vf("subtitles", ass_path) else: self.log.warn("No subtitles to render because it is a local file!")
def burst_frames_to_shm(vid_path, temp_burst_dir, frame_rate=None): """ - To burst frames in a temporary directory in shared memory. - Directory name is chosen as random 128 bits so as to avoid clash during parallelization - Returns path to directory containing frames for the specific video """ target_mask = os.path.join(temp_burst_dir, '%04d.jpg') if not check_ffmpeg_exists(): raise FFMPEGNotFound() try: ffmpeg_args = [ '-i', vid_path, '-q:v', str(1), '-f', 'image2', target_mask, ] if frame_rate: ffmpeg_args.insert(2, '-r') ffmpeg_args.insert(3, frame_rate) sh.ffmpeg(*ffmpeg_args) except Exception as e: print(repr(e))
def save_snippet(self, fname, start, end): if os.path.exists(fname): tqdm.write(f'Cached "{fname}"') else: dur = end - start # extract relevant section from video (without download whole video) cmd = sh.ffmpeg.bake( '-y', '-ss', start, '-i', self.stream_url, '-t', dur, # '-to', dur, '-c', 'copy', fname) # tqdm.write(str(cmd)) cmd() # cut to correct duration because previous ffmpeg command # creates a video with still frames at the end # TODO: figure out why this is happening sh.ffmpeg('-y', '-i', fname, '-t', dur, 'tmp.mp4') sh.mv('tmp.mp4', fname) self.fname = fname
def main(argv=None, stream=sys.stderr): argv = argv or sys.argv[1:] if {"-h", "-help", "--help"}.intersection(argv): sh.ffmpeg(help=True, _fg=True) return 0 try: with ProgressNotifier(file=stream) as notifier: sh.ffmpeg( sys.argv[1:], _in=queue.Queue(), _err=notifier, _out_bufsize=0, _err_bufsize=0, # _in_bufsize=0, _no_out=True, _no_pipe=True, _tty_in=True, # _fg=True, # _bg=True, ) except sh.ErrorReturnCode as err: print(notifier.lines[-1], file=stream) return err.exit_code except KeyboardInterrupt: print("Exiting.", file=stream) return signal.SIGINT + 128 # POSIX standard else: return 0
def main(argv=None): argv = argv or sys.argv[1:] if {'-h', '-help', '--help'}.intersection(argv): sh.ffmpeg(help=True, _fg=True) return 0 notifier = ProgressNotifier() try: sh.ffmpeg( sys.argv[1:], _in=queue.Queue(), _err=notifier, _out_bufsize=0, _err_bufsize=0, #_in_bufsize=0, _no_out=True, _no_pipe=True, _tty_in=True, #_fg=True, #_bg=True, ) except sh.ErrorReturnCode as err: print(notifier.lines[-1]) return err.exit_code else: print() return 0
def to_mp4(video_file, output): """ Transcode the given file to MP4 (H264) """ def feed(): return video_file.read() ffmpeg("-y", "-i", "pipe:0", "-sameq", "-vcodec", "libx264", "-acodec", "libmp3lame", output, _in=feed(), _in_bufsize=1024) return open(output, 'r')
def to_theora(video_file, output): """ Convert the given video file to theora """ def feed(): return video_file.read() ffmpeg("-y", "-i", "pipe:0", "-vcodec", "libtheora", "-acodec", "libvorbis", output, _in=feed(), _in_bufsize=1024) return open(output, "r")
def transcoded_to_mp3(source_file): print(' transcoding to mp3...') with tempfile.NamedTemporaryFile(suffix='.mp3') as t: cmd = [ '-i', source_file, '-c:a', 'libmp3lame', '-ab', '256k', '-y', t.name ] print(' ffmpeg {0}'.format(' '.join(cmd))) sh.ffmpeg(*cmd) yield t.name
def resize(video_file, output, dimensions=(360,360)): """ Resize the video to the given dimensions """ def feed(): return video_file.read() if type(video_file) == file: ffmpeg("-y", "-i", "pipe:0", "-s", "%sx%s" % dimensions, output, _in=feed(), _in_bufsize=1024) else: ffmpeg("-y", "-i", video_file, "-s", "%sx%s" % dimensions, output) return open(output, 'r')
def transcoded_to_mp3(source_file): print(' transcoding to mp3...') with tempfile.NamedTemporaryFile(suffix='.mp3') as t: cmd = ['-i', source_file, '-c:a', 'libmp3lame', '-ab', '256k', '-y', t.name] print(' ffmpeg {0}'.format(' '.join(cmd))) sh.ffmpeg(*cmd) yield t.name
def to_mpeg(video_file, output): """ Transcode the given file to MPEG """ def feed(): return video_file.read() if type(video_file) == file: ffmpeg("-y", "-i", "pipe:0", "-r", 25, output, _in=feed(), _in_bufsize=1024) else: ffmpeg("-y", "-i", video_file, "-r", 25, output) return open(output, 'r')
def joinFiles(videoName, audioName, partNumber): buffError = StringIO() nameOutput = f"complete_{hashlib.md5(bytes(f'{videoName}{audioName}', 'utf-8')).hexdigest()}_part_{partNumber}.mp4" sh.ffmpeg(["-i", f"videoplayer/video/{videoName}", "-i", f"videoplayer/audio/{audioName}", "-c", "copy", f"videoplayer/{nameOutput}"], _err_to_out=buffError) print("[JOINFILES] Video e Audio Mesclados!") if buffError.getvalue() != "": print("[JOINFILES] Error encontrado!") return (False) else: sh.rm([f"videoplayer/audio/{audioName}", f"videoplayer/video/{videoName}"]) print("[JOINFILES] Video e Audio removidos!") return (nameOutput)
def stitch(videos, output, vcodec="libx264", acodec="libmp3lame"): """ Stitch together the video files in the iterable 'videos' """ def feed(): """ Feed the video streams using a generator to avoid in-memory concat of all the streams """ for v in videos: yield v.read() ffmpeg('-y', '-i', 'pipe:0', '-vcodec', vcodec, '-acodec', acodec, output, _in=feed(), _in_bufsize=1024) return open(output, 'r')
def crop(video_file, dimensions, output, origin=(0,0)): """ Crop the given video from the top left corner to the given dimensions. """ def feed(): return video_file.read() format = dimensions + origin if type(video_file) == file: ffmpeg("-y", "-i", "pipe:0", "-vf", "crop=%s:%s:%s:%s" % format, output, _in=feed(), _in_bufsize=1024) else: ffmpeg("-y", "-i", video_file, "-vf", "crop=%s:%s:%s:%s" % format, output) return open(output, 'r')
def run(self, file_id): # todo: reuse player instance (start in __init__), use queue and pyglet.media.Player() pyglet.app.exit() self.player = pyglet.media.ManagedSoundPlayer() self.player.push_handlers(self) # self.player.eos_action = self.player.EOS_PAUSE if not os.path.exists(file_id + '.mp3'): self.ui_update_status('converting to mp3') convert = sh.ffmpeg( '-i', file_id + '.flv', '-f', 'mp3', file_id + '.mp3', '-y') convert.wait() os.remove(file_id + '.flv') self.ui_update_status('loading mp3') self.source = pyglet.media.load(file_id + '.mp3') pyglet.clock.schedule_once(self.exit_callback, self.source.duration) pyglet.clock.schedule_interval_soft(self.timer_callback, 0.1) self.player.queue(self.source) self.ui_update_status('playing') self.player.play() pyglet.app.run() pyglet.app.exit()
def readFrame(self): """ Async command to read frames. Returns an object that can block execution using .wait() on it. """ self.out = BytesIO() tc = self.TCtoFF() if self.LUT: print("using {} LUT to read the frame at {}".format( basename(self.LUT)[:-5], self.tc)) return ffmpeg("-ss", tc, "-i", self.file, "-frames:v", "1", "-f", "image2pipe", '-pix_fmt', 'rgb24', '-vcodec', 'rawvideo', self.filters[0], self.filters[1], '-', _out=self.out, _done=self.done, _bg=True) else: return ffmpeg("-ss", tc, "-i", self.file, "-frames:v", "1", "-f", "image2pipe", '-pix_fmt', 'rgb24', '-vcodec', 'rawvideo', '-', _out=self.out, _done=self.done, _bg=True)
def postprocess(recording): tmpdir = recording.tmpfolder curdir = os.getcwd() try: print "Changing dir to %s" % tmpdir os.chdir(tmpdir) committed = recording.song.blob.committed() sox("-m", "audio.wav", "-t", "mp3", "-v", "0.15", committed, "mixed.wav") ffmpeg("-i", "mixed.wav", "-f", "image2", "-r", "1", "-i", "frame%d.png", "-acodec", "libvorbis", "video.ogv") recording.blob = Blob() with recording.blob.open("w") as saveto: with open("video.ogv") as savefrom: shutil.copyfileobj(savefrom, saveto) print "%s/%s" % (tmpdir, "video.ogv") # shutil.rmtree(tmpdir) transaction.commit() finally: os.chdir(curdir)
def prepare_video_to_instagram(video): '''Make sure the video length is max target_seconds. Speed it up otherwise. Eg. speed up by factor of 2: ffmpeg -i input.mkv -filter:v "setpts=0.5*PTS" output.mkv''' target_height = 640 left_offset = 300 vf_scale = "scale=-1:%s" % target_height # 1920x1080 -> 1137x640 vf_crop = ",crop=640:640:%s:0" % left_offset # 640x640 orig_len = get_length(video) target_seconds = 59 if orig_len < target_seconds: print( "Original length ok, skipping speedup. %s is under target maximum %s s." % (orig_len, target_seconds)) vf_slowdown = "" else: slowdown_factor = float(target_seconds) / float(orig_len) print( "Too long video. Speeding up by slowing down video by factor %0.3f" % slowdown_factor + " to fit original length %02d" % orig_len + " to target " + str(target_seconds)) vf_slowdown = ",setpts=%0.3f*PTS" % slowdown_factor ffmpeg( "-y", "-i", video, "-filter:v", vf_scale + vf_crop + vf_slowdown, "-b:v", "1000k", "-r", "29.970", "-profile:v", "main", "-level", "3", "short.mp4", # err="uploader.log") _err=process_log, _out=process_log) return "short.mp4"
def resize(video_file, output, dimensions=(360, 360)): """ Resize the video to the given dimensions """ def feed(): return video_file.read() if type(video_file) == file: ffmpeg("-y", "-i", "pipe:0", "-s", "%sx%s" % dimensions, output, _in=feed(), _in_bufsize=1024) else: ffmpeg("-y", "-i", video_file, "-s", "%sx%s" % dimensions, output) return open(output, 'r')
def check_required_programs(): # Check that mediainfo is installed if sh.which('mediainfo') is None: print( '%s: Cannot find mediainfo, please install before continuing.' ) % ( PROG_NAME ) exit(1) # Check that ffmpeg is installed if sh.which('ffmpeg') is None: print( '%s: Cannot find ffmpeg. ' 'Please install ffmpeg version 1.0 or later.' ) % ( PROG_NAME ) out = StringIO() try: sh.ffmpeg( '-encoders', _out=out ) except sh.ErrorReturnCode: print( '%s: unsupported version of ffmpeg installed. ' 'Install ffmpeg version 1.0 or higher' ) % PROG_NAME if 'libx264' not in out.getvalue(): print( "%s: Installed version of ffmeg doesn't include libx264 support. " "Install version of ffmpeg that supports libx264." ) % PROG_NAME exit(1) config.extra_opts = ['-strict', 'experimental'] config.audio_encoder = 'libfaac' if 'libfaac' not in out.getvalue(): config.audio_encoder = 'aac'
def jpgs_to_mp4(source_dir, target_dir): # ffmpeg -sameq -f image2 -i time-lapse-%010d.jpg -r 12 -s 640x480 your-awesome-movie.mp4 # my # ffmpeg -r 12 -pattern_type glob -i "*.jpg" -c:v libx264 -r 12 ../timelapse.mp4 # ffmpeg -r 24 -pattern_type glob -i "*.jpg" -c:v libx264 -r 24 output.mp4 # ffmpeg -r 24 -pattern_type glob -i "$d/*.jpg" -c:v libx264 -r 24 ../outputs/$d.mp4 FRAMERATE = 30 if source_dir[-1:] != "/": source_dir = source_dir + "/" if target_dir[-1:] != "/": target_dir = target_dir + "/" timestamp = source_dir[:-1][-6:] target_filename = target_dir + timestamp + ".mp4" print("Converting jpg's from %s to %s with framerate %s" % (source_dir, target_filename, FRAMERATE)) ffmpeg("-y", "-r", FRAMERATE, "-pattern_type", "glob", "-i", source_dir + "*.jpg", "-c:v", "libx264", "-r", FRAMERATE, target_filename, _err="timelapse.log", _out="timelapse.log") # then move the pics to archive #print("Done. Archiving source jpg's.") #mv(source_dir, "data/archive/jpg/") return target_filename
def cut_audio(path, stream, title, songs, times): print('Using freedb info for title "{}"'.format(title)) fullpath = os.path.join(path, stream.filename) times = [datetime.strptime(t, "%M:%S") for t in times] # convert time strings to datetime format deltas = [timedelta(minutes=t.minute, seconds=t.second) for t in times] song_starts = [datetime.strptime("00:00", "%M:%S")] total = song_starts[0] for d in deltas[:-1]: total += d song_starts.append(total) song_starts = [s.strftime("%M:%S") for s in song_starts] for song, start, length in zip(songs, song_starts, deltas): try: songpath = os.path.join(path, song+"."+stream.extension) if os.path.exists(songpath): print("File {} already exists. Skipping".format(songpath)) else: print("Extracting to file", song+"."+stream.extension) sh.ffmpeg(["-i", fullpath, "-ss", start, "-t", length, "-acodec", "copy", songpath]) except sh.ErrorReturnCode as e: print(e.stderr, file=sys.stderr)
def thumb(path): full_path = base_dir + path if os.path.isfile(full_path) and not os.path.isdir(full_path): if os.path.isfile(full_path + '.jpg'): with open(full_path + '.jpg', 'rb') as f: img = BytesIO(f.read()) return send_file(img, mimetype='image/jpeg') else: img = ffmpeg('-ss', '00:02:00', '-i', full_path, '-frames:v', '1', '-f', 'image2', '-') with open(full_path + '.jpg', 'w+b') as f: f.write(img.stdout) return send_file(BytesIO(img.stdout), mimetype='image/jpeg') else: return abort(404)
def crop(video_file, dimensions, output, origin=(0, 0)): """ Crop the given video from the top left corner to the given dimensions. """ def feed(): return video_file.read() format = dimensions + origin if type(video_file) == file: ffmpeg("-y", "-i", "pipe:0", "-vf", "crop=%s:%s:%s:%s" % format, output, _in=feed(), _in_bufsize=1024) else: ffmpeg("-y", "-i", video_file, "-vf", "crop=%s:%s:%s:%s" % format, output) return open(output, 'r')
def ffmpeg_run(command): for line in sh.ffmpeg(command, _err_to_out=True, _iter=True, _out_bufsize=1000): pass
def retime(song, redis, env): tmpdir = get_retime_tempdir( env['registry'], song.__name__.strip('/').strip('\\\\').strip('..') ) curdir = os.getcwd() registry = env['registry'] try: progress_key = f'retimeprogress-{song.__oid__}' redis.hmset( progress_key, {'pct':1, 'status':'Preparing'} ) redis.expire(progress_key, 1200) # expire in 20 minutes print ('Changing dir to %s' % tmpdir) try: os.chdir(tmpdir) except FileNotFoundError: os.makedirs(tmpdir) os.chdir(tmpdir) webm_filename = song.retiming_blob.committed() gproject = os.environ['YSS_GOOGLE_STORAGE_PROJECT'] gbucket = os.environ['YSS_GOOGLE_STORAGE_BUCKET'] blobname = f'{song.__name__}.retime' gsuri = f'gs://{gbucket}/{blobname}' opus_filename = os.path.join(tmpdir, 'retime.opus') logger.info('Converting webm to opus') # XX should just copy audio ffmpeg( "-y", "-i", webm_filename, "-vn", # no video "-ar", "48000", "-y", # clobber opus_filename, ) logger.info('Finished converting webm to opus') client = storage.Client(gproject) bucket = client.bucket(gbucket) blob = bucket.blob(blobname) logger.info('Uploading timing track to gcloud...') blob.upload_from_file( open(opus_filename, 'rb'), content_type='audio/opus', ) logger.info('Finished uploading timing track...') client = speech.SpeechClient() audio = speech_types.RecognitionAudio(uri=gsuri) config = speech_types.RecognitionConfig( encoding=speech_enums.RecognitionConfig.AudioEncoding.OGG_OPUS, sample_rate_hertz=48000, language_code=song.language, enable_word_time_offsets=True, ) logger.info('Waiting for speech recognition to complete') operation = client.long_running_recognize(config, audio) # doing add_done_callback and checking for status is pointless, # it returns 0 then 100 for percent complete redis.hmset( progress_key, {'pct':50, 'status':'Recognizing speech', 'done':1, } ) try: response = operation.result(timeout=7200) except GoogleAPICallError: # google.api_core.exceptions.GoogleAPICallError: None # Unexpected state: Long-running operation had neither response # nor error set. logger.error('Retiming failed', exc_info=True) redis.hmset( progress_key, {'pct':-1, 'status':'Retiming failed; speech recognition error'} ) song.retiming_failure = True song.retiming = False song.retiming_blob = None event = ObjectModified(song) registry.subscribers((event, song), None) transaction.commit() return logger.info('Speech recognition operation completed') timings = speech_results_to_timings(response.results, 7) alt_timings = json.dumps(timings, indent=2) song.alt_timings = alt_timings formatted_timings = format_timings(alt_timings) redis.hmset( progress_key, {'pct':100, 'status':'Finished', 'timings':alt_timings, 'formatted_timings':formatted_timings, 'done':1, } ) song.retiming = False song.retiming_blob = None event = ObjectModified(song) registry.subscribers((event, song), None) transaction.commit() finally: try: blob.delete() except: # XXX pass shutil.rmtree(tmpdir, ignore_errors=True) os.chdir(curdir)
input_wav = '/Users/jagaro/Desktop/b.wav' def media_duration(path): re_duration = re.compile('^ *Duration: (\d+):(\d+):(\d+)(\.\d+),') output = sh.ffprobe('-hide_banner', path).stderr.decode('utf-8') lines = list(filter(lambda l: re_duration.match(l), output.split('\n'))) if len(lines) == 1: m = re_duration.match(lines[0]) return (float(m[1])*60*60) + \ (float(m[2])*60) + \ (float(m[3])) + float(m[4]) else: raise Exception('Unexpected output from ffprobe: %s' % output) result = sh.ffmpeg('-i', input_wav, '-af', 'silencedetect=noise=-20dB:d=5', '-nostats', '-hide_banner', '-f', 'null', '-' ) output = result.stderr.decode('utf-8') lines = output.split('\n') lines = filter(lambda l: re.match(r'^\[sil', l), lines) lines = map(lambda l: re.sub(r'\[.+\] ', '', l), lines) lines = map(lambda l: re.sub(r' \| silence_duration.+$', '', l), lines) pairs = [(a, next(lines, None)) for a in lines] # assert for start, end in pairs: if not re.match(r'^silence_start: \d+(\.\d+)?$', start) or \ (end is not None and not re.match(r'^silence_end: \d+(\d.\d+)?$', end)): raise Exception('Invalid pairs %s' % pairs)
def convert(filename, args): cache_file(filename) general_info, tracks = get_media_info(filename) method = None video_opts = None audio_opts = [] subtitle_opts = [] metadata_opts = [ '-map_metadata', '0' ] input_ops = [ '-i', '%s' % filename, ] for index, track in enumerate(tracks): track_type = track.track_type.lower() if track_type == 'video': if method is not None: raise Exception( "2mp4 currently doesn't support multiple video streams :(" ) method, video_opts = get_video_opts( track.track_id, track, force_encode=args.force_encode ) frame_count = track.frame_count if frame_count is None: frame_count = float( general_info.duration ) / 1000 * float( track.original_frame_rate ) elif track_type == 'audio': audio_opts += get_audio_opts(track.track_id, track) elif track_type == 'text': subtitle_opts += get_subtitle_opts(track.track_id, track) out_file_name = get_out_file_name( general_info.folder_name, general_info.file_name ) out_path = os.path.join(general_info.folder_name, out_file_name) print('Encoding %s -> %s' % ( filename, out_file_name) ) if os.path.exists(out_path): print('Destination file exists, skipping...') return # Test that we can write to output path try: with open(out_path, 'wb'): os.unlink(out_path) except IOError as e: print(e) return sys.exit(e.errno) if method == '1pass': opts = input_ops + video_opts + audio_opts + \ subtitle_opts + metadata_opts + config.extra_opts + [ '-y', out_path ] if args.dry_run: print('ffmpeg ' + ' '.join(opts)) else: progress = EncodingProgress('Pass 1 of 1:', frame_count) p = sh.ffmpeg( *opts, _err=progress.process_ffmpeg_line, _err_bufsize=256 ) p.wait() progress.finish() elif method == '2pass': opts = input_ops + video_opts + [ '-an', '-pass', '1', '-y', '-f', 'rawvideo', '/dev/null' ] if args.dry_run: print('ffmpeg ' + ' '.join(opts)) else: pass1_progress = EncodingProgress('Pass 1 of 2: ', frame_count) p = sh.ffmpeg( *opts, _err=pass1_progress.process_ffmpeg_line, _err_bufsize=256 ) p.wait() pass1_progress.finish() opts = input_ops + video_opts + audio_opts + \ subtitle_opts + metadata_opts + [ '-pass', '2', '-y', out_path ] if args.dry_run: print('ffmpeg ' + ' '.join(opts)) else: pass2_progress = EncodingProgress('Pass 2 of 2: ', frame_count) p = sh.ffmpeg( *opts, _err=pass2_progress.process_ffmpeg_line, _err_bufsize=256 ) p.wait() pass2_progress.finish()
def convert(filename, args): cache_file(filename) general_info, tracks = get_media_info(filename) method = None video_opts = None audio_opts = [] subtitle_opts = [] metadata_opts = ["-map_metadata", "0"] input_ops = ["-i", "%s" % filename] for index, track in enumerate(tracks): track_type = track.track_type.lower() if track_type == "video": if method is not None: raise Exception("2mp4 currently doesn't support multiple video streams :(") method, video_opts = get_video_opts(track.track_id, track, force_encode=args.force_encode) frame_count = track.frame_count if frame_count is None: frame_count = float(general_info.duration) / 1000 * float(track.original_frame_rate) elif track_type == "audio": audio_opts += get_audio_opts(track.track_id, track) elif track_type == "text": subtitle_opts += get_subtitle_opts(track.track_id, track) out_file_name = get_out_file_name(general_info.folder_name, general_info.file_name) out_path = os.path.join(general_info.folder_name, out_file_name) print("Encoding %s -> %s" % (filename, out_file_name)) if os.path.exists(out_path): print("Destination file exists, skipping...") return # Test that we can write to output path try: with open(out_path, "wb"): os.unlink(out_path) except IOError as e: print(e) return sys.exit(e.errno) if method == "1pass": opts = ( input_ops + video_opts + audio_opts + subtitle_opts + metadata_opts + config.extra_opts + ["-y", out_path] ) if args.dry_run: print("ffmpeg " + " ".join(opts)) else: progress = EncodingProgress("Pass 1 of 1:", frame_count) p = sh.ffmpeg(*opts, _err=progress.process_ffmpeg_line, _err_bufsize=256) p.wait() progress.finish() elif method == "2pass": opts = input_ops + video_opts + ["-an", "-pass", "1", "-y", "-f", "rawvideo", "/dev/null"] if args.dry_run: print("ffmpeg " + " ".join(opts)) else: pass1_progress = EncodingProgress("Pass 1 of 2: ", frame_count) p = sh.ffmpeg(*opts, _err=pass1_progress.process_ffmpeg_line, _err_bufsize=256) p.wait() pass1_progress.finish() opts = input_ops + video_opts + audio_opts + subtitle_opts + metadata_opts + ["-pass", "2", "-y", out_path] if args.dry_run: print("ffmpeg " + " ".join(opts)) else: pass2_progress = EncodingProgress("Pass 2 of 2: ", frame_count) p = sh.ffmpeg(*opts, _err=pass2_progress.process_ffmpeg_line, _err_bufsize=256) p.wait() pass2_progress.finish()
def record(self): """Records video.""" v = self.get_video() p = ffmpeg('-f', 'mjpeg', '-i', self.build_url('stream'), v.get_path(), _bg=True) self.recording = (p, v)
def burst_frames_to_shm(vid_path, temp_burst_dir, frame_size=-1, frame_rate=-1, frame_uniform=-1): """ - To burst frames in a temporary directory in shared memory. - Directory name is chosen as random 128 bits so as to avoid clash during parallelization - Returns path to directory containing frames for the specific video """ target_mask = os.path.join(temp_burst_dir, '%06d.bmp') if not os.system('ffmpeg -version > /dev/null') == 0: raise FFMPEGNotFound() try: if frame_size > 0 or frame_uniform > 0: ffprobe_args = [ '-v', 'error', '-count_frames', '-of', 'default=nokey=1:noprint_wrappers=1', '-select_streams', 'v:0', '-show_entries', 'stream=width,height,nb_read_frames', vid_path ] output = sh.ffprobe(ffprobe_args) [width_src, height_src, total_frames] = [int(o) for o in output.split()[-3:]] vf_param = [] if frame_size > 0: if height_src < width_src: vf_param.append('scale=-1:{}'.format(int(frame_size))) else: vf_param.append('scale={}:-1'.format(int(frame_size))) if frame_uniform > 0: if frame_uniform == 1: # select the middle frame frames_to_sample = [(total_frames + 1) // 2] else: frames_to_sample = np.linspace( 1, total_frames - 1, frame_uniform).round().astype(int) select_param = '+'.join( ['eq(n\,{})'.format(n) for n in frames_to_sample]) vf_param.append('select=' + select_param) ffmpeg_args = [ '-i', vid_path, '-q:v', str(1), '-f', 'image2', target_mask, ] if frame_size > 0 or frame_uniform > 0: ffmpeg_args.insert(2, '-vf') ffmpeg_args.insert(3, ','.join(vf_param)) if frame_uniform > 0: ffmpeg_args.insert(4, '-vsync') ffmpeg_args.insert(5, 'vfr') if frame_rate > 0: ffmpeg_args.insert(2, '-r') ffmpeg_args.insert(3, str(frame_rate)) sh.ffmpeg(*ffmpeg_args) except Exception as e: pass
def regenerate_ffmpeg(m3u8,token): p = sh.ffmpeg("-user_agent", "Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.34 (KHTML, like Gecko) Version/11.0 Mobile/15A5341f Safari/604.1", "-headers", "X-Radiko-Authtoken:"+token, "-i", m3u8, "-hls_flags", "delete_segments", "-hls_allow_cache", "0", "-c:a", "copy", m3u8_local_destination, "-loglevel", "info", _err=(ending_process_test if DEBUG else ending_process_new), _out=printing_output, _bg=True) return p
# step 1: generate the videos and JSON file with open(os.path.join(temp_dir, 'source'), 'wb') as source_frames: for _ in range(40): source_frames.write(b'\x00' * 30000) source_frames.write(b'\xff' * 30000) sh.ffmpeg( '-t', '10', '-s', '100x100', '-f', 'rawvideo', '-pix_fmt', 'rgb24', '-r', '8', '-i', './source', 'source.mp4', _cwd=temp_dir, ) json_content = [] for i in range(25): vid_id = str(i) os.makedirs(os.path.join(temp_dir, vid_id)) sh.cp('source.mp4', os.path.join(vid_id, 'source_' + vid_id + '.mp4'),