def transcode(self, ffmpeg="ffmpeg", dry_run=False): logger.info("Transcoding: %s -> %s", repr(self.src), repr(self.dest)) logger.debug("Transcoding: %s", repr(self)) if dry_run: return # This has no effect if successful, but will throw an error # early if we can't read tags from the source file, rather # than only discovering the problem after transcoding. AudioFile(self.src) encoder_opts = [] if self.eopts: encoder_opts = shlex.split(self.eopts) ff = FFmpeg( executable = ffmpeg, global_options = '-y', inputs = { self.src: None }, outputs = { self.dest: ['-vn'] + encoder_opts }, ) logger.debug("Transcode command: %s", repr(ff.cmd)) ff.run(verbose=False) if not os.path.isfile(self.dest): raise Exception("ffmpeg did not produce an output file") copy_tags(self.src, self.dest) if self.use_checksum: logger.debug("Saving checksum to dest file %s: %s", repr(self.dest), self.source_checksum()) write_checksum_tag(self.dest, self.source_checksum()) try: shutil.copymode(self.src, self.dest) except OSError: # It's ok if setting the mode fails pass
def concat_videos(list, outdir=None, ffmpeg='ffmpeg', audio=True): dir = outdir if outdir else os.path.dirname(os.path.realpath(__file__)) videos = _download_file_list(list, dir) if bool(videos) == False: return None # make the video files list file_name = os.path.normpath(os.path.join(dir, str(uuid.uuid4()))) with open(file_name, 'w') as file: for video in videos: file.write("file '" + video + "'\n") # concatenate the videos output = os.path.normpath(os.path.join(dir, "video.mp4")) ff = FFmpeg( executable = ffmpeg, global_options = ["-y", "-f" ,"concat", "-safe", "0", "-protocol_whitelist", "file,http,https,tcp,tls"], inputs = {file_name: None}, outputs = {output: "-c copy"} ) #print ff.cmd out = ff.run() # if audio background is requested we will try to get duration of movie and matching audio file if audio == True: # collect data for concatenated movie total duration length = time.strptime(re.findall("(?<=time\\=)[0-9.:]+", out)[-1],"%H:%M:%S.%f") lenght_t = datetime.timedelta(hours=length.tm_hour,minutes=length.tm_min,seconds=length.tm_sec).total_seconds() inputs = OrderedDict([(output, None)]) applied_filters = ["[0:v]null[video]"] audio_track = _get_audio(lenght_t, dir) # build the filter chain and execute it audioseq = FilterChain([ ReplicateAudioFilter(repetitions = int(math.ceil(lenght_t / float(audio_track[1])))), ConcatFilter(is_video = False, outputtag = "caf"), TrimAudioFilter(length = lenght_t), FadeOutAudioFilter(start = lenght_t-AUDIO_FADE_OUT_T, length = AUDIO_FADE_OUT_T, outstreamprefix="audio") ]) applied_filters += audioseq.generate(["1:a"])[0] # add the audio track to the inputs collection inputs.update({audio_track[0]: None}) # build the video output = os.path.normpath(os.path.join(dir, "videoa.mp4")) ff = FFmpeg( executable = ffmpeg, global_options = ["-y"], inputs = inputs, outputs = {output: "-filter_complex \"" + ";".join(applied_filters) + "\" -map \"[video]\" -map \"[audio]\""} ) #print ff.cmd ff.run() return output
def test_raise_exception_with_stdout_stderr_none(): global_options = "--stdin none --stdout none --stderr none --exit-code 42" ff = FFmpeg(global_options=global_options) with pytest.raises(FFRuntimeError) as exc_info: ff.run() assert str(exc_info.value) == ( "`ffmpeg --stdin none --stdout none --stderr none --exit-code 42` " "exited with status 42\n\n" "STDOUT:\n" "\n\n" "STDERR:\n" )
def __run_ffmpeg(exe="ffmpeg", inputs=None, outputs=None): ff = FFmpeg(executable=exe, inputs=inputs, outputs=outputs) try: ff.run(stderr=subprocess.STDOUT) except FFRuntimeError as ffe: # After receiving SIGINT ffmpeg has a 255 exit code if ffe.exit_code == 255: pass else: raise ValueError("An unexpected FFRuntimeError occurred: " "{}".format(ffe)) except KeyboardInterrupt: pass # Do nothing if voluntary interruption
def convert_to_pcm16b16000r(in_filename: str = None, in_content: bytes = None): ff = FFmpeg( executable=os.path.join(settings.AudioTools.DIRECTORY, 'ffmpeg'), inputs={'pipe:0': None}, outputs={'pipe:1': ['-f', 's16le', '-acodec', 'pcm_s16le', '-ar', '16000']} ) stdout = None if in_filename: stdout, stderr = ff.run(input_data=open(in_filename, 'br').read(), stdout=subprocess.PIPE, stderr=subprocess.PIPE) elif in_content: stdout, stderr = ff.run(input_data=in_content, stdout=subprocess.PIPE, stderr=subprocess.PIPE) return stdout
def updateProfileVideoPicture(self, path): try: from ffmpy import FFmpeg files = {'file': open(path, 'rb')} data = {'params': self.genOBSParams({'oid': self.profile.mid,'ver': '2.0','type': 'video','cat': 'vp.mp4'})} r_vp = self.server.postContent(self.server.LINE_OBS_DOMAIN + '/talk/vp/upload.nhn', data=data, files=files) if r_vp.status_code != 201: raise Exception('Update profile video picture failure.') path_p = self.genTempFile('path') ff = FFmpeg(inputs={'%s' % path: None}, outputs={'%s' % path_p: ['-ss', '00:00:2', '-vframes', '1']}) ff.run() self.updateProfilePicture(path_p, 'vp') except: raise Exception('You should install FFmpeg and ffmpy from pypi')
def __init__(self, source_path, compress_quality, flip_flag=False): # translate inversed range 1:95 to 2:32 translated_quality = 96 - compress_quality translated_quality = round((((translated_quality - 1) * (31 - 2)) / (95 - 1)) + 2) self.output = tempfile.mkdtemp(prefix='cvat-', suffix='.data') target_path = os.path.join(self.output, '%d.jpg') output_opts = '-start_number 0 -b:v 10000k -vsync 0 -an -y -q:v ' + '2'#str(translated_quality) # -vf "select=not(mod(n\,2))" to downsample by factor of 2 if flip_flag: output_opts += ' -vf "transpose=2,transpose=2"' ff = FFmpeg( inputs = {source_path: None}, outputs = {target_path: output_opts}) ff.run()
def make_encoding(self): if (not self.is_ok): return ext = ".mp4" pathIn = path_video_in + self.nom_video + ext commande = "" path_vid = path_video_in # switch label and adapt path and command if (self.label == "1080p"): commande = ' -c:v libx264 -profile:v high -preset veryslow -b:v 10000k -s hd1080 -r 25 -b:a 256k' path_vid = path_video_in + "1080p/" elif (self.label == "720p"): commande = ' -c:v libx264 -profile:v high -preset veryslow -b:v 6500k -s hd720 -r 25 -b:a 128k' path_vid = path_video_in + "720p/" elif (self.label == "480p"): commande = ' -c:v libx264 -profile:v high -preset veryslow -b:v 2000k -s hd480 -r 25 -b:a 128k' path_vid = path_video_in + "480p/" elif (self.label == "240p"): commande = ' -c:v libx264 -profile:v high -preset veryslow -b:v 1000k -s 426x240 -r 25 -b:a 128k' path_vid = path_video_in + "240p/" elif (self.label == "miniature"): commande = " -ss 0{}:{}:{} -vframes 1 -s 1280x720".format( self.h_pict, self.min_pict, self.time_pict) path_vid = path_video_in + "miniature/" ext = "Pict.jpg" elif (self.label == "poster"): commande = " -ss 0{}:{}:{} -vframes 1 -s 1280x720".format( self.h_pict, self.min_pict, self.time_pict) path_vid = path_video_in + "poster/" ext = "Pict.jpg" else: print('ERROR, ' + str(self.nom_video) + ' is using unkown label :' + str(self.label)) exit() pathOut = path_vid + self.nom_video + ext ff = FFmpeg(executable=path_ffmpeg, inputs={pathIn: None}, outputs={pathOut: commande}) print("commande : " + commande) print(ff.cmd) try: ff.run() return 1 except: print("error : [ff.run()]", sys.exc_info()[0]) return 0
def processAV(video, audio, output_filename): """Function to combine audio and video files""" ff = FFmpeg(inputs={ video: None, audio: None }, outputs={ output_filename: ['-y', '-vcodec', 'mpeg4', '-qscale', '5', '-shortest'] }) print(ff.cmd) ff.run()
def __audio_to_video(self, filename=None, **kwargs): # TODO: Don't encode if already exists title = os.path.splitext(filename)[0] video_filename = '{}.mp4'.format(title) ff = FFmpeg( inputs={filename: None, self.logo: '-loop 1 -framerate 2'}, outputs={video_filename: '-vf scale=854:480 -c:v libx264 -preset slow -tune stillimage -crf 18 -c:a copy -shortest -pix_fmt yuv420p'} ) print ff.cmd ff.run() if self.delete_audio: os.remove(filename)
def convert_video_to_frames(filename): '''Splits the video up into 5 frames per second and saves images in directory :param filename: Name of the video to be processed :return: directory created ''' video_dir = os.path.splitext(filename)[0] if not os.path.exists(video_dir): os.makedirs(video_dir) ff = FFmpeg(inputs={filename: None}, outputs={ video_dir + '/%05d.jpg': '-vf fps=5 -nostats -loglevel 0' }) ff.run() return video_dir
def make_preview(song_id, song_type, preview): song_path = 'public/songs/%s/main.mp3' % song_id prev_path = 'public/songs/%s/preview.mp3' % song_id if os.path.isfile(song_path) and not os.path.isfile(prev_path): if not preview or preview <= 0: print('Skipping #%s due to no preview' % song_id) return False print('Making preview.mp3 for song #%s' % song_id) ff = FFmpeg(inputs={song_path: '-ss %s' % preview}, outputs={prev_path: '-codec:a libmp3lame -ar 32000 -b:a 92k -y -loglevel panic'}) ff.run() return prev_path
def downloadStreams(vidURL, audURL, startPoint, duration, filename): """ This downloads the video and audio streams directly, and only downloads the output to the duration specified. Because youtube-dl doesn't have a built in function for that, the full video would be downloaded every time. With FFmpeg, we only download part of the video. """ ff = FFmpeg( inputs={ f"{vidURL}": ['-ss', f"{startPoint}", '-t', f"{duration}"], f"{audURL}": ['-ss', f"{startPoint}", '-t', f"{duration}"] }, outputs={f"{filename}": ['-map', '0:v:0', '-map', '1:a:0', '-y']}) ff.run()
def splitLine(line, videoid, personname): params = line.rstrip().split(',') if len(params) == 3: start = params[0] length = params[1] # Find the out file name. outName = os.path.join(personname, 'word_clips', params[2]) if not os.path.isfile(outName): ff = FFmpeg( inputs={os.path.join(personname, 'fullVideos', videoid + '.mp4'): None}, outputs={outName: '-ss ' + start + ' -t ' + length} ) sys.stdout.write('Generating ' + outName + '\n') ff.run()
def main(rtmp, url, name): ff = FFmpeg(inputs={rtmp: None}, outputs={url: '-c copy -f flv '}) print(ff.cmd) while True: try: ff.run() print("——————————————————————————————————————————") print("rtmp:" + rtmp + " name:" + name) print("——————————————————————————————————————————") except Exception as e: print(e) print("——————————————————————————————————————————") print("rtmp:" + rtmp + " name:" + name) print("——————————————————————————————————————————")
def normalize(infile, bitrate): filename = os.path.splitext(os.path.split(infile)[1])[0] filepath = os.path.dirname(infile) + "\\normalized" try: os.mkdir(filepath) except OSError: pass ff = FFmpeg(inputs={infile: None}, outputs={ '%s\%s_normalized.mp3' % (filepath, filename): '-ac 1 -ab %s' % (bitrate) }) ff.run() return print("File Normalized")
def save_images_to_folder(filePath, student): logging.debug("{}: Begin saving images".format(student.andrewid)) if not os.path.exists('./images/{}'.format(student.andrewid)): os.makedirs('./images/{}'.format(student.andrewid)) prefix = str(uuid.uuid4()) # Uploading the video to s3 bucket (for future use) s3.upload_file(filePath, bucket_name, 'videos/{}/{}'.format(student.andrewid, filePath.split("/")[-1])) # /app/images/ ff = FFmpeg(inputs={filePath: None}, outputs={"./images/{}/image_{}_%d.jpg".format(student.andrewid, prefix): ['-y', '-vf', 'fps=5']}) logging.debug(ff.cmd) ff.run() # remove .DS_Store; gets autogenerated # frequently dsStore = os.path.isfile('./images/.DS_Store') if dsStore: os.remove('./images/.DS_Store') time.sleep(5) # No wait function for ffmpy os.remove(filePath) # Remove the video file locally, as it is not needed # upload first photo to S3 logging.debug( "{}: Attempting to upload to s3 bucket".format(student.andrewid)) # for filename in os.listdir('./images/{}/'.format(student.andrewid)): s3.upload_file('./images/{}/image_{}_1.jpg'.format(student.andrewid, prefix), bucket_name, 'images/{}/image_{}_1.jpg'.format(student.andrewid, prefix)) logging.debug("{}: POST request for photos".format(student.andrewid)) r = requests.get( 'https://attendify.herokuapp.com:443/photos?for_andrew_id={}'.format(student.andrewid)) if r.text == '[]': r = requests.post("https://attendify.herokuapp.com:443/photos", data={'andrew_id': student.andrewid, 'photo_url': 'https://s3.amazonaws.com/{}/images/{}/image_{}_1.jpg'.format(bucket_name, student.andrewid, prefix)}) # get the last trained model model = Model.query.order_by(Model.version.desc()).first() if model is not None: # increment the version queue.put(model.version + 1) else: # create first version queue.put(1)
def mux_audio(self): """ Mux audio ImageIO is a useful lib for frames > video as it also packages the ffmpeg binary however muxing audio is non-trivial, so this is done afterwards with ffmpy. A future fix could be implemented to mux audio with the frames """ if self.config["skip_mux"]: logger.info("Skipping audio muxing due to configuration settings.") self._rename_tmp_file() return logger.info("Muxing Audio...") if self.frame_ranges is not None: logger.warning( "Muxing audio is not currently supported for limited frame ranges." "The output video has been created but you will need to mux audio " "yourself") self._rename_tmp_file() return exe = im_ffm.get_ffmpeg_exe() inputs = OrderedDict([(self.video_tmp_file, None), (self.source_video, None)]) outputs = {self.video_file: "-map 0:v:0 -map 1:a:0 -c: copy"} ffm = FFmpeg(executable=exe, global_options="-hide_banner -nostats -v 0 -y", inputs=inputs, outputs=outputs) logger.debug("Executing: %s", ffm.cmd) # Sometimes ffmpy exits for no discernible reason, but then works on a later attempt, # so take 5 shots at this attempts = 5 for attempt in range(attempts): logger.debug("Muxing attempt: %s", attempt + 1) try: ffm.run() except FFRuntimeError as err: logger.debug("ffmpy runtime error: %s", str(err)) if attempt != attempts - 1: continue logger.error( "There was a problem muxing audio. The output video has been " "created but you will need to mux audio yourself either with the " "EFFMpeg tool or an external application.") os.rename(self.video_tmp_file, self.video_file) break logger.debug("Removing temp file") if os.path.isfile(self.video_tmp_file): os.remove(self.video_tmp_file)
def test_redirect_to_devnull(): global_options = "--stdin none --stdout oneline --stderr multiline --exit-code 0" ff = FFmpeg(global_options=global_options) devnull = open(os.devnull, "wb") stdout, stderr = ff.run(stdout=devnull, stderr=devnull) assert stdout is None assert stderr is None
def __init__(self, source_path, compress_quality, flip_flag=False): # translate inversed range 1:95 to 2:32 translated_quality = 96 - compress_quality translated_quality = round((((translated_quality - 1) * (31 - 2)) / (95 - 1)) + 2) self.output = tempfile.mkdtemp(prefix='cvat-', suffix='.data') target_path = os.path.join(self.output, '%d.jpg') output_opts = '-start_number 0 -b:v 10000k -vsync 0 -an -y -q:v ' + str( translated_quality) if flip_flag: output_opts += ' -vf "transpose=2,transpose=2"' ff = FFmpeg(inputs={source_path: None}, outputs={target_path: output_opts}) slogger.glob.info("FFMpeg cmd: {} ".format(ff.cmd)) ff.run()
def process_audio(m): cid = m.chat.id if chk_list(Blacklist, cid) or not chk_list(Whitelist, cid): return bot.send_message(cid, 'Audio received') fileid = m.voice.file_id audiofile_info = bot.get_file(fileid) # Audio file downloaded for further debug, if needed. FFmpeg can handle directly the download, and process in streaming. r_file = requests.get('https://api.telegram.org/file/bot%s/%s' % (API_TOKEN, audiofile_info.file_path)) if r_file.status_code == 200: ff = FFmpeg(inputs={'pipe:0': '-hide_banner -loglevel panic'}, outputs={'pipe:1': '-f mp3 -c:a libmp3lame'}) try: ff.cmd audio_mp3, stderr = ff.run(input_data=r_file.content, stdout=subprocess.PIPE) wit_response = wit_client.speech(audio_mp3, None, {'Content-Type': 'audio/mpeg3'}) if '_text' in wit_response: bot.send_message(cid, wit_response['_text']) except FFRuntimeError as e: print(str(e)) #Just for debug purposes bot.send_voice(cid, r_file.content, caption='Original audio') bot.send_audio(cid, audio_mp3, caption='MP3 audio')
def merge_media(self, to_merged_medias_lists): # to_merged_medias_dict:[download_media_obj1,download_media_obj2,......] inputs = {} merged_absolue_path = ''.join([ ConfigInit().get_download_path(), hash_md5(to_merged_medias_lists[0]['merged_sign']), '.mp4' ]) outputs = {merged_absolue_path: '-c copy'} # todo:强制ffmpeg合并处理 if exist_file(merged_absolue_path): del_file(merged_absolue_path) for to_merged_media_dict in to_merged_medias_lists: inputs[to_merged_media_dict['absolute_path']] = '' ff = FFmpeg(inputs=inputs, outputs=outputs) ff.run() return merged_absolue_path
def __run_ffmpeg(exe=im_ffm.get_ffmpeg_exe(), inputs=None, outputs=None): """ Run ffmpeg """ logger.debug("Running ffmpeg: (exe: '%s', inputs: %s, outputs: %s", exe, inputs, outputs) ffm = FFmpeg(executable=exe, inputs=inputs, outputs=outputs) try: ffm.run(stderr=subprocess.STDOUT) except FFRuntimeError as ffe: # After receiving SIGINT ffmpeg has a 255 exit code if ffe.exit_code == 255: pass else: raise ValueError("An unexpected FFRuntimeError occurred: " "{}".format(ffe)) except KeyboardInterrupt: pass # Do nothing if voluntary interruption logger.debug("ffmpeg finished")
def save_mp4(session_info, base_path): fileName = session_info['fileName'] ext = os.path.splitext(fileName)[-1].lower() if ext != ".mp4": mp4_fileName = '{0}{1}{2}'.format('zz', generate_name(8), '.mp4') path = os.path.join(base_path, fileName) mp4_path = os.path.join(base_path, mp4_fileName) ff_convert = FFmpeg(inputs={path: None}, outputs={mp4_path: None}) ff_convert.run() os.remove(path) fileName = mp4_fileName session_info['path'] = 'video/' + fileName
def gif_to_video(sourcefile_id, file_key, input_file, format): from models import Sourcefile result = None options = None if format == "mp4": options = "-c:v libx264 -acodec none -crf 23 -profile:v baseline -level 3.0 -pix_fmt yuv420p" db = db_connect() sourcefile = db.get("SELECT width, height FROM sourcefile WHERE id=%s", sourcefile_id) db.close() if not sourcefile: return width = int(float(sourcefile["width"])) height = int(float(sourcefile["height"])) # scale if necessary if width % 2 == 1: if height % 2 == 1: options += " -vf scale=%s:%s" % (str(width - 1), str(height - 1)) else: options += " -vf scale=-2:ih" else: if height % 2 == 1: options += " -vf scale=iw:-2" elif format == "webm": options = "-c:v libvpx -auto-alt-ref 0 -crf 23 -b:v 2M -acodec none" output_file = input_file.replace(".gif", ".%s" % format) # now, transcode... try: ff = FFmpeg(inputs={input_file: None}, outputs={output_file: options}) logger.info("invoking transcode operation: %s" % ff.cmd) result = ff.run() logger.info("-- transcode completed") # upload transcoded file to S3, then flag the sourcefile bucket = S3Bucket() key = Key(bucket) key.key = "%s/%s" % (format, file_key) logger.info("uploading transcoded video: %s" % file_key) key.set_contents_from_filename(output_file) logger.info("-- upload complete") db = db_connect() db.execute("UPDATE sourcefile SET %s_flag=1 WHERE id=%%s" % format, sourcefile_id) db.close() except Exception as ex: logger.exception("error transcoding %s - %s" % (sourcefile_id, input_file)) raise ex finally: os.unlink(output_file)
def convertAudio(file, output_extension='.wav'): """Convert input audio file into another format""" name, ext = os.path.splitext(file) if ext != output_extension: outFile = name + output_extension ff = FFmpeg(inputs={file: None}, outputs={outFile: None}, global_options="-y") ff.cmd ff.run() return outFile else: return file
def test_redirect_to_devnull(): global_options = '--stdin none --stdout oneline --stderr multiline --exitcode 0' ff = FFmpeg(global_options=global_options) devnull = open(os.devnull, 'wb') stdout, stderr = ff.run(stdout=devnull, stderr=devnull) assert stdout is None assert stderr is None
def write_video(self): """Converts avi file used by openCV to compressed mp4 so it can be seen in web browsers""" self.cap.release() self.out.release() cv2.destroyAllWindows() try: os.remove('flaskapp/static/final_video.mp4') except: pass ff = FFmpeg(inputs={'flaskapp/static/detected.avi': None}, outputs={ 'flaskapp/static/final_video.mp4': '-an -vcodec libx264 -crf 23' }) ff.run() return
def downLoader(self, url): url_parse = urlparse(url) url_id = url_parse.path.split('/')[2] m3u8_url = "https://api.ruguoapp.com/1.0/mediaMeta/play?type=ORIGINAL_POST&id=" + url_id #2020-09-08 ret = requests.get(m3u8_url, headers=self.headers) bf = BeautifulSoup(ret.text, 'lxml') json_url = json.loads(bf.p.string) if not json_url['url']: print("解析视频链接错误,请重试") return jk.hello() ff = FFmpeg(inputs={json_url['url']: None}, outputs={self.title + '.mp4': None}) ff.run()
def generate_thumbnail(self, video_path, output_image_path): head, image_filename = os.path.split(output_image_path) image_output_path = os.path.join(self.web_server_video_dir, image_filename) if os.path.exists(image_output_path) and not self.OVERWRITE_THUMBNAIL: print(image_filename + ' thumnail already exists.') else: print('Generating thumbnail at ' + output_image_path) try: last_mod_time = time.ctime(os.path.getmtime(video_path)) last_mod_time = last_mod_time.replace( ':', '\\\\:') # Handle escaping for ffmpeg text ffmpeg_options = [ '-loglevel', 'panic', '-y', '-ss', '00:00:5', '-vframes', '1', '-vf', "scale=iw/4:-1, drawtext=fontfile=/Library/Fonts/Verdana.ttf: text=" + last_mod_time + ": r=25: x=(w-tw)/2: y=h-(2*lh): fontsize=32: fontcolor=white: ", '-an' ] ff = FFmpeg(inputs={video_path: None}, outputs={image_output_path: ffmpeg_options}) # print (ff.cmd) ffmpeg_result = ff.run() # print(ffmpeg_result) except Exception as e: print(e)
def run(self): ff = FFmpeg( executable='../ffmpeg', # Your path of FFmpeg executable file inputs={self.vid_input: ['-y', '-loglevel', '0']}, outputs={ self.output1: ['-s', 'hd720', '-b:v', '2M', '-framerate', '30'], self.output2: ['-s', 'hd480', '-b:v', '1M', '-framerate', '30'] }) print("Start encoding video" + str(self.threadID) + " ...") stime = time.time() ff.run() runtime = time.time() - stime print("Finished encoding video" + str(self.threadID) + " in " + str(runtime) + "S!")
def __run_ffmpeg(exe="ffmpeg", inputs=None, outputs=None): """ Run ffmpeg """ logger.debug("Running ffmpeg: (exe: '%s', inputs: %s, outputs: %s", exe, inputs, outputs) ffm = FFmpeg(executable=exe, inputs=inputs, outputs=outputs) try: ffm.run(stderr=subprocess.STDOUT) except FFRuntimeError as ffe: # After receiving SIGINT ffmpeg has a 255 exit code if ffe.exit_code == 255: pass else: raise ValueError("An unexpected FFRuntimeError occurred: " "{}".format(ffe)) except KeyboardInterrupt: pass # Do nothing if voluntary interruption logger.debug("ffmpeg finished")
def convert(file, indir, outdir, quality): print('Converting:', file) infile = os.path.join(os.path.abspath(indir), file) outfile = os.path.join(os.path.abspath(outdir), file.replace('.wav', '.ogg')) ff = FFmpeg(inputs={infile: None}, outputs={ outfile: ['-codec:a', 'libvorbis', '-qscale:a', str(quality), '-y'] }) print(ff.cmd) ff.run()
def transcode(source): try: sourceDir = os.path.dirname(source) sourceFile = os.path.basename(source) sourceFileNoExt = os.path.splitext(sourceFile)[0] ext = '.mp4' if 'VP9' in source: ext = '.webm' targetFile = sourceFileNoExt + ext tempFull = '/mnt/sb/mediapipe/tmp/' + targetFile logging.debug(prelog + 'tempFull: ' + tempFull) targetFull = sourceDir + '/' + targetFile targetTranscode = tempFull isMnt = False # direct transcode if from mnt storage box if 'mnt/sb' in source: targetTranscode = targetFull isMnt = True logging.debug(prelog + 'targetFull: ' + targetFull) if ext == '.webm': logging.info('VP9 detected. Audio to libvorbis and webm container') ff = FFmpeg(inputs={source: None}, outputs={ targetTranscode: '-c:a libvorbis -c:v copy -y -loglevel info' }) else: logging.debug( 'Assume video is h264. Audio to aac and mp4 container') ff = FFmpeg(inputs={source: None}, outputs={ targetTranscode: '-c:a aac -c:v copy -y -loglevel info' }) # ff.cmd ff.run() if isMnt == False: shutil.move(tempFull, targetFull) return targetFull except: logging.exception(prelog) if os.path.isfile(source): shutil.move(source, source + '.failed') pass return None
def post(self, *args, **kwargs): try: filesDict = self.request.files start_time = self.get_argument('start_time', None) long_time = self.get_argument('long_time', None) # self.write('{}::{}'.format(start_time, long_time)) for inputname in filesDict: #第一层循环取出最外层信息,即input标签传回的name值 #用过filename键值对对应,取出对应的上传文件的真实属性 http_file = filesDict[inputname] for fileObj in http_file: print fileObj.filename #第二层循环取出完整的对象 #取得当前路径下的upfiles文件夹+上fileObj.filename属性(即真实文件名) # filePath=os.path.join(os.path.dirname(__file__),fileObj.filename) filePath = 'data/{}'.format(fileObj.filename) with open(filePath, 'wb') as f: f.write(fileObj.body) changefile = filePath outputfile = changefile.replace('mp4', 'mp3') ff = FFmpeg( inputs={changefile: None}, # outputs={outputfile: '-vn -ar 44100 -ac 2 -ab 192 -f wav'}, outputs={outputfile: ' -f mp3'}) print outputfile a = ff.cmd ff.run(a) outputfile_s = outputfile.replace('.mp3', '_a.mp3') f = FFmpeg( inputs={changefile: None}, # outputs={outputfile: '-vn -ar 44100 -ac 2 -ab 192 -f wav'}, outputs={ outputfile_s: ' -ss {} -t {}'.format(start_time, long_time) }) c = f.cmd f.run(c) # b = outputfile.split('/home/zhangjin')[-1] self.write(outputfile_s) # self.write('http://127.0.0.1:9501{}'.format(str(b))) # self.write('http://47.104.190.237:9501{}'.format(str(b))) self.finish() except Exception as e: error_message = "{}".format(e) self.write(error_message) self.finish()
def convert_ffmpeg(infile): ff = FFmpeg( inputs={'pipe:0': None}, outputs={'pipe:1': '-loglevel panic -f ogg -acodec libvorbis'} ) file_content = open(infile, 'rb').read() stdout, stderr = ff.run(input_data=file_content, stdout=subprocess.PIPE) return stdout
def test_input(): global_options = '--stdin pipe --stdout oneline --stderr multiline --exitcode 0' ff = FFmpeg(global_options=global_options) stdout, stderr = ff.run(input_data=b'my input data', stdout=subprocess.PIPE, stderr=subprocess.PIPE) assert stdout == b'my input data\nThis is printed to stdout' assert stderr == b'These are\nmultiple lines\nprinted to stderr'
def test_non_zero_exitcode_no_stdout_and_stderr(): global_options = "--stdin none --stdout none --stderr none --exit-code 42" ff = FFmpeg(global_options=global_options) with pytest.raises(FFRuntimeError) as exc_info: ff.run(stdout=subprocess.PIPE, stderr=subprocess.PIPE) assert exc_info.value.cmd == ("ffmpeg --stdin none --stdout none --stderr none --exit-code 42") assert exc_info.value.exit_code == 42 assert exc_info.value.stdout == b"" assert exc_info.value.stderr == b"" assert str(exc_info.value) == ( "`ffmpeg --stdin none --stdout none --stderr none --exit-code 42` " "exited with status 42\n\n" "STDOUT:\n" "\n\n" "STDERR:\n" )
def test_non_zero_exitcode_no_stdout_and_stderr(): global_options = '--stdin none --stdout none --stderr none --exitcode 42' ff = FFmpeg(global_options=global_options) with pytest.raises(FFRuntimeError) as exc_info: ff.run(stdout=subprocess.PIPE, stderr=subprocess.PIPE) assert exc_info.value.cmd == ( "ffmpeg --stdin none --stdout none --stderr none --exitcode 42") assert exc_info.value.exit_code == 42 assert exc_info.value.stdout == b'' assert exc_info.value.stderr == b'' assert str(exc_info.value) == ( "'ffmpeg --stdin none --stdout none --stderr none --exitcode 42' " 'exited with status 42\n\n' 'STDOUT:\n' '\n\n' 'STDERR:\n')
def convertVideo(video): #print(video.getFileName()) os.mkdir("media/albums/" + video.getAlbumName() + "/data/images/" + video.getFileName()) #convert = FFmpeg(inputs={"media/videos/" + video.getFileName() + ".MOV": None}, outputs={"media/videos/" + video.getFileName() + ".mp4": None}) ff = FFmpeg( inputs={"media/videos/" + video.getFileName() + ".mp4": None}, outputs={ "media/albums/" + video.getAlbumName() + "/data/images/" + video.getFileName( ) + "/" + video.getFileName() + "%d.jpg": ['-vf', 'fps=30'] }) #convert.run() ff.run() zip_folder( "media/albums/" + video.getAlbumName() + "/data/images/" + video.getFileName(), "media/albums/" + video.getAlbumName() + "/data/images/" + video.getFileName() + ".zip")
def merge_media(self, to_merged_medias_lists): # to_merged_medias_dict:[download_media_obj1,download_media_obj2,......] inputs = {} # merged文件名称:merged_sign+'_merged' merged_absolue_path = to_merged_medias_lists[0][ 'absolute_path'][:-len('.mp4')] + '_merged' + '.mp4' outputs = {merged_absolue_path: '-c copy'} # todo:强制ffmpeg合并处理 if exist_file(merged_absolue_path): logging.debug('%s has exist' % merged_absolue_path) return False # del_file(merged_absolue_path) for to_merged_media_dict in to_merged_medias_lists: inputs[to_merged_media_dict['absolute_path']] = '' ff = FFmpeg(inputs=inputs, outputs=outputs) ff.run() return merged_absolue_path
def create_timelapse(directory, fn_out=None, **kwargs): # pragma: no cover """Create a timelapse A timelapse is created from all the jpg images in a given `directory` Args: directory (str): Directory containing jpg files fn_out (str, optional): Full path to output file name, if not provided, defaults to `directory` basename. **kwargs (dict): Valid keywords: verbose Returns: str: Name of output file """ if fn_out is None: head, tail = os.path.split(directory) if tail is '': head, tail = os.path.split(head) field_name = head.split('/')[-2] cam_name = head.split('/')[-1] fn_out = '{}/images/timelapse/{}_{}_{}.mp4'.format( os.getenv('PANDIR'), field_name, cam_name, tail) try: ff = FFmpeg( global_options='-r 3 -pattern_type glob', inputs={'{}/*.jpg'.format(directory): None}, outputs={fn_out: '-s hd1080 -vcodec libx264'} ) if 'verbose' in kwargs: out = None err = None print("Timelapse command: ", ff.cmd) else: out = open(os.devnull, 'w') err = open(os.devnull, 'w') ff.run(stdout=out, stderr=err) except Exception as e: warn("Problem creating timelapse: {}".format(fn_out)) fn_out = None return fn_out
def test_non_zero_exitcode(): global_options = "--stdin none --stdout multiline --stderr multiline --exit-code 42" ff = FFmpeg(global_options=global_options) with pytest.raises(FFRuntimeError) as exc_info: ff.run(stdout=subprocess.PIPE, stderr=subprocess.PIPE) assert exc_info.value.cmd == ("ffmpeg --stdin none --stdout multiline --stderr multiline --exit-code 42") assert exc_info.value.exit_code == 42 assert exc_info.value.stdout == b"These are\nmultiple lines\nprinted to stdout" assert exc_info.value.stderr == b"These are\nmultiple lines\nprinted to stderr" assert str(exc_info.value) == ( "`ffmpeg --stdin none --stdout multiline --stderr multiline --exit-code 42` " "exited with status 42\n\n" "STDOUT:\n" "These are\n" "multiple lines\n" "printed to stdout\n\n" "STDERR:\n" "These are\n" "multiple lines\n" "printed to stderr" )
def convert(input, output): ff = FFmpeg( inputs={input: None}, outputs={output: '-c:v libx264 -preset medium -an '}) ff.run()
def test_invalid_executable_path(): ff = FFmpeg(executable="/tmp/foo/bar/ffmpeg") with pytest.raises(FFExecutableNotFoundError) as exc_info: ff.run() assert str(exc_info.value) == "Executable '/tmp/foo/bar/ffmpeg' not found"
def test_no_redirection(): global_options = "--stdin none --stdout oneline --stderr multiline --exit-code 0" ff = FFmpeg(global_options=global_options) stdout, stderr = ff.run() assert stdout is None assert stderr is None
for i in files: if os.path.isfile(VIDEO_DIRECTORY + i): foundFiles.append(i) else: notFound.append(i) # Keep a set of unique filenames so we don't convert files more than once. unique = list(set(foundFiles)) # Convert to an mpeg format that can be concatted. for i in unique: ff = FFmpeg( inputs={VIDEO_DIRECTORY + i: None}, outputs={VIDEO_DIRECTORY + i + '.ts': '-c copy -f mpegts'} ) ff.run() # Remove the previous output file first. if os.path.isfile('output.mp4'): os.system('rm output.mp4') # Generate the command for concatting the mpegs. inputs = 'concat:' for i in foundFiles: inputs += VIDEO_DIRECTORY + i + '.ts|' inputs = inputs[:-1] + '' ff = FFmpeg( inputs={inputs: None}, outputs={'output.mp4': '-c copy -bsf:a aac_adtstoasc'} ) ff.run() # Clean up the intermediate mpegs. for i in unique:
def test_redirect_to_pipe(): global_options = "--stdin none --stdout oneline --stderr multiline --exit-code 0" ff = FFmpeg(global_options=global_options) stdout, stderr = ff.run(stdout=subprocess.PIPE, stderr=subprocess.PIPE) assert stdout == b"This is printed to stdout" assert stderr == b"These are\nmultiple lines\nprinted to stderr"
def test_input(): global_options = "--stdin pipe --stdout oneline --stderr multiline --exit-code 0" ff = FFmpeg(global_options=global_options) stdout, stderr = ff.run(input_data=b"my input data", stdout=subprocess.PIPE, stderr=subprocess.PIPE) assert stdout == b"my input data\nThis is printed to stdout" assert stderr == b"These are\nmultiple lines\nprinted to stderr"
def _make(images, scene_duration, dir, ffmpeg, width, height, audio, effect, transition, batch_mode): # exit if no images were found if bool(images) == False: return None scene_duration_f = scene_duration * FPS w = width/2*2 if width != None else -2 if height != None else OUTPUT_VIDEO_WIDTH h = height/2*2 if height != None else -2 if width != None else OUTPUT_VIDEO_HEIGHT # build the animation dictionary of filters and first slide handling flag animations = { "zoompan": ( CombiningFilter( [ ZoompanEffectFilter(maxzoom = MAX_ZOOM, frames = scene_duration_f), ImageSlideFilter(duration = scene_duration, width = w, height = h) ], outstreamprefix = "zpaf"), False ), "fadeinout": ( CombiningFilter([ FadeTransitionFilter(transition_duration = TRANSITION_T, total_duration = scene_duration), ImageSlideFilter(duration = scene_duration, width = w, height = h) ], outstreamprefix = "faf"), False ), "zoompanfadeinout": ( CombiningFilter( [ ZoompanEffectFilter(maxzoom = MAX_ZOOM, frames = scene_duration_f), FadeTransitionFilter(transition_duration = TRANSITION_T, total_duration = scene_duration), ImageSlideFilter(duration = scene_duration, width = w, height = h) ], outstreamprefix = "zpfaf"), False ), "slidein": ( FilterChain( [ ImageSlideFilter(duration = scene_duration, width = w, height = h), SlideTransitionFilter(transition_duration = TRANSITION_T, preserve_first = batch_mode != BatchMode.non_initial_batch) ]), True ), "zoompanslidein": ( ZoompanSlideInTransitionFilter(transition_duration = TRANSITION_T, total_duration = scene_duration, fps = FPS, width = w, height = h, maxzoom = MAX_ZOOM, preserve_first = batch_mode != BatchMode.non_initial_batch), True ) } animationkey = (effect if effect else "") + (transition if transition else "") animation = animations[animationkey] if animationkey in animations else None # determines if transition is requested and how to interpret the inputs list preserve_first_slide = animation[1] if animation else False if batch_mode != BatchMode.non_initial_batch: slides = images lenght_t = scene_duration * len(slides) elif preserve_first_slide: slides = images lenght_t = scene_duration * (len(slides) - 1) else: slides = images[1:] lenght_t = scene_duration * len(slides) inputs = OrderedDict([(i, "-loop 1") for i in slides]) # create the video filter chain videoseq = FilterChain() if animation: videoseq.append(animation[0]) else: videoseq.append(ImageSlideFilter(duration = scene_duration, width = w, height = h)) videoseq.append(ConcatFilter(True, "video")) applied_filters = videoseq.generate(["%d:v" % i for (i,x) in enumerate(inputs)])[0] # load audio track if requested if audio == True: audio_track = _get_audio(lenght_t, dir) # build the filter chain and execute it audioseq = FilterChain([ ReplicateAudioFilter(repetitions = int(math.ceil(lenght_t / float(audio_track[1])))), ConcatFilter(is_video = False, outputtag = "caf"), TrimAudioFilter(length = lenght_t), FadeOutAudioFilter(start = lenght_t-AUDIO_FADE_OUT_T, length = AUDIO_FADE_OUT_T, outstreamprefix="audio") ]) applied_filters += audioseq.generate(["%d:a" % len(inputs)])[0] # add the audio track to the inputs collection inputs.update({audio_track[0]: None}) # build the video output = "video.mp4" output = dir + "/" + output if dir else output ff = FFmpeg( executable = ffmpeg, global_options = ["-y"], inputs = inputs, outputs = {output: "-filter_complex \"" + ";".join(applied_filters) + "\" -map \"[video]\"" + (" -map \"[audio]\"" if audio == True else "") + " -c:v libx264 -pix_fmt yuvj420p -q:v 1"} ) #print ff.cmd ff.run() return output