def transcode(self, ffmpeg="ffmpeg", dry_run=False): logger.info("Transcoding: %s -> %s", repr(self.src), repr(self.dest)) logger.debug("Transcoding: %s", repr(self)) if dry_run: return # This has no effect if successful, but will throw an error # early if we can't read tags from the source file, rather # than only discovering the problem after transcoding. AudioFile(self.src) encoder_opts = [] if self.eopts: encoder_opts = shlex.split(self.eopts) ff = FFmpeg( executable = ffmpeg, global_options = '-y', inputs = { self.src: None }, outputs = { self.dest: ['-vn'] + encoder_opts }, ) logger.debug("Transcode command: %s", repr(ff.cmd)) ff.run(verbose=False) if not os.path.isfile(self.dest): raise Exception("ffmpeg did not produce an output file") copy_tags(self.src, self.dest) if self.use_checksum: logger.debug("Saving checksum to dest file %s: %s", repr(self.dest), self.source_checksum()) write_checksum_tag(self.dest, self.source_checksum()) try: shutil.copymode(self.src, self.dest) except OSError: # It's ok if setting the mode fails pass
def getAnalysis(self, file_path, image_id=None, user_id=None): res = requests.post("https://token.beyondverbal.com/token", data={"grant_type": "client_credentials", "apiKey": os.environ.get('BEYONDVERBAL_API_CREDENTIAL')}) token = res.json()['access_token'] headers = {"Authorization": "Bearer "+token} pp = requests.post("https://apiv4.beyondverbal.com/v4/recording/start", json={"dataFormat": {"type": "WAV"}}, verify=False, headers=headers) if pp.status_code != 200: self.logger.error('HTTP {} error occurred.'.format(pp.status_code)) self.signal_indicator.emit("emotion", "red") return else: recordingId = pp.json()['recordingId'] new_file = file_path.split('.')[0] + '-format.wav' ff = FFmpeg( inputs={file_path: None}, outputs={new_file: '-acodec pcm_s16le -ac 1 -ar 8000'} ) ff.run() with open(new_file, 'rb') as wavdata: r = requests.post("https://apiv4.beyondverbal.com/v4/recording/"+recordingId, data=wavdata, verify=False, headers=headers) # parsed = json.loads(r.json()) print (json.dumps(r.json(), indent=4, sort_keys=True)) self.save_results(r, image_id, user_id) return r.json()
def m4a2mp3(f): input_path = f output_path = f.replace('m4a', 'mp3') ff = FFmpeg(inputs={input_path: None}, outputs={output_path: '-acodec libmp3lame -ab 128k'}) print ff.cmd ff.run()
def __init__(self, source_path, compress_quality, step=1, start=0, stop=0, flip_flag=False): # translate inversed range 1:95 to 2:32 translated_quality = 96 - compress_quality translated_quality = round((((translated_quality - 1) * (31 - 2)) / (95 - 1)) + 2) self.source = source_path self.output = tempfile.mkdtemp(prefix='cvat-', suffix='.data') target_path = os.path.join(self.output, '%d.jpg') output_opts = '-start_number 0 -b:v 10000k -vsync 0 -an -y -q:v ' + str( translated_quality) filters = '' if stop > 0: filters = 'between(n,' + str(start) + ',' + str(stop) + ')' elif start > 0: filters = 'gte(n,' + str(start) + ')' if step > 1: filters += ('*' if filters else '' ) + 'not(mod(n-' + str(start) + ',' + str(step) + '))' if filters: filters = "select=\"'" + filters + "'\"" if flip_flag: filters += (',' if filters else '') + 'transpose=2,transpose=2' if filters: output_opts += ' -vf ' + filters ff = FFmpeg(inputs={source_path: None}, outputs={target_path: output_opts}) slogger.glob.info("FFMpeg cmd: {} ".format(ff.cmd)) ff.run()
def compress(self, vid, delete_old=True): vid_name = vid['v_title'] old_path = f"{DL_PATH}{vid['v_filepath']}" new_path = f"{DL_PATH}{vid_name}.mp4" inputs = {old_path: None} outputs = {new_path: '-vcodec libx264 -crf 20 -y'} try: ff = FFmpeg(FFMPEG_PATH, inputs=inputs, outputs=outputs) print(ff.cmd) ff.run() except Exception as e: print(e) else: print(f"Updating DB rescaled: {vid_name}") self.collection.update_one({'v_id': vid['v_id']}, { "$set": { 'v_filepath': f"{vid_name}.mp4", 'v_compressed': True } }) if delete_old: unlink(old_path)
def convertFile(input, output): try: ff = FFmpeg(inputs={input: '-loglevel error'}, outputs={output: None}) ff.run() except Exception as e: print 'Error during conversion of %s into %s --- %s' % (input, output, e)
def trimVid(self): self.trimButton.setEnabled(False) outName = mytools.getAvailableName(self.fullPath, 'Trim') print(outName) trimStartTime = self.startTimeInput.time().toString('hh:mm:ss.zzz') trimEndTime = self.endTimeInput.time().toString('hh:mm:ss.zzz') try: ff = FFmpeg(inputs={self.fullPath: None}, outputs={ outName: [ '-ss', trimStartTime, '-to', trimEndTime, '-c:v', 'copy', '-c:a', 'copy', ] }) ff.run() except Exception as e: msg = QMessageBox() msg.setWindowTitle("Trim Failed") msg.setText(str(e)) msg.setIcon(QMessageBox.Critical) showMsg = msg.exec_() self.trimButton.setEnabled(True)
def download_video_chunk_as_mp4(self, video_name, url): if ('.mp4' not in video_name): video_name = video_name + '.mp4' original_filename = video_name.replace('.mp4', '.ts') urllib.urlretrieve(url, filename=original_filename) while not os.path.isfile(original_filename): pass #Android ff = FFmpeg(inputs={original_filename: None}, outputs={ video_name: [ '-an', '-s', '640x360', '-vcodec', 'libx264', '-preset', 'medium', '-r', '30' ] }) print "ffmpeg command:" print ff.cmd ff.run() while not os.path.isfile(video_name): pass os.remove(original_filename)
def replace_audio(path): path = path ext = input( 'Please enter the extension of the audio track, format:".xxx"(ignore quotation marks)' ) res = search_files(path, ext) videos = res[0] audios = res[2] video_audio = match(videos, audios, 'a')[1] i = 1 for video in list(video_audio.keys()): ff = FFmpeg( executable='./ffmpeg.exe', inputs={ path + video: None, path + video_audio[video]: None }, outputs={ path + '{}_replaced_audio.mkv'.format( os.path.splitext(video)[0]): '-c:v copy -c:a aac -strict experimental -map 0:v:0 -map 1:a:0' }) print('Repleasing video NO.{}'.format(i)) print(ff.cmd) i += 1 ff.run()
def merge_audio(path): path = path ext = input( 'Please enter the extension of the audio track, format:".xxx"(ignore quotation marks)' ) res = search_files(path, ext) videos = res[0] audios = res[2] video_audio = match(videos, audios, 'a')[1] i = 1 for video in list(video_audio.keys()): ff = FFmpeg(executable='./ffmpeg.exe', inputs={ path + video: None, path + video_audio[video]: None }, outputs={ path + '{}_merged_audio.mkv'.format( os.path.splitext(video)[0]): '-map 0 -map 1 -c copy' }) print('Mearging video NO.{} and it\'s audios'.format(i)) print(ff.cmd) i += 1 ff.run()
def test_redirect_to_devnull(): global_options = '--stdin none --stdout oneline --stderr multiline --exitcode 0' ff = FFmpeg(global_options=global_options) devnull = open(os.devnull, 'wb') stdout, stderr = ff.run(stdout=devnull, stderr=devnull) assert stdout is None assert stderr is None
def get_mp3file(out_file_path, out_file_path_mp3): try: ff = FFmpeg(inputs={out_file_path: '-loglevel quiet'}, outputs={out_file_path_mp3: '-y'}) print(ff.cmd) ff.run() except: print(traceback.format_exc())
def mp3convert(src_dir, dest_dir, dry_run=False): # 44khz stereo at 64kb conversion_params = '-acodec libmp3lame -vn -ar 44100 -ac 2 -ab 64k -filter:a "volume=1.5" -f mp3' for input_file in list(Path(src_dir).rglob("*.[mMa][pPa][3c]")): # We found at least one file to convert, ensure the output directory exists if not os.path.exists(dest_dir): os.makedirs(dest_dir) # Ensure the final file extension for the target is lowercase mp3 output_file = os.path.join(dest_dir, input_file.stem + '.mp3') if Path(output_file).exists(): print( f'{output_file} already exists in destination path skipping...' ) continue ff = FFmpeg(inputs={os.path.join(src_dir, input_file): None}, outputs={output_file: conversion_params}) print(f'Command line : {ff.cmd}') if dry_run is False: ff.run()
def convert_to_wav(input_path, output_path): if os.path.isfile(output_path): return 'File ' + output_path + ' already exists' ff = FFmpeg(inputs={input_path: None}, outputs={output_path: None}) # ff.cmd ff.run() return 'File ' + input_path + ' was successfully converted to ' + output_path
def preProcess(self, image_file): ff = FFmpeg( inputs = {image_file: None}, outputs = {image_file: '-y -filter:v "scale=64:64"'} ) ff.run()
def process_audio(m): cid = m.chat.id if chk_list(Blacklist, cid) or not chk_list(Whitelist, cid): return bot.send_message(cid, 'Audio received') fileid = m.voice.file_id audiofile_info = bot.get_file(fileid) # Audio file downloaded for further debug, if needed. FFmpeg can handle directly the download, and process in streaming. r_file = requests.get('https://api.telegram.org/file/bot%s/%s' % (API_TOKEN, audiofile_info.file_path)) if r_file.status_code == 200: ff = FFmpeg(inputs={'pipe:0': '-hide_banner -loglevel panic'}, outputs={'pipe:1': '-f mp3 -c:a libmp3lame'}) try: ff.cmd audio_mp3, stderr = ff.run(input_data=r_file.content, stdout=subprocess.PIPE) wit_response = wit_client.speech(audio_mp3, None, {'Content-Type': 'audio/mpeg3'}) if '_text' in wit_response: bot.send_message(cid, wit_response['_text']) except FFRuntimeError as e: print(str(e)) #Just for debug purposes bot.send_voice(cid, r_file.content, caption='Original audio') bot.send_audio(cid, audio_mp3, caption='MP3 audio')
def convert_to_audio(self, old, new): ff = FFmpeg( inputs={old: None}, outputs={ new: '-y -acodec flac -ac 1 -bits_per_raw_sample 16 -ar 44100' }) ff.run()
def convertVideoToWavWithOffset(input_filepath, offset): try: start_time = perf_counter() if offset is None: offset = 0.0 nthreads = utils.getMaxThreads() print( f"convertVideoToWavWithOffset('{input_filepath}',{offset}) using {nthreads} thread(s)." ) output_filepath = utils.getTmpFile() # For less verbosity try, global_options= '-hide_banner -loglevel error -nostats' # See https://github.com/Ch00k/ffmpy/blob/master/ffmpy.py ext = '.wav' ff = FFmpeg( global_options= f"-hide_banner -loglevel error -nostats -threads {nthreads}", inputs={input_filepath: '-ss {}'.format(offset)}, outputs={ output_filepath: '-c:a pcm_s16le -ac 1 -y -ar 16000 -f wav' }) print(f"Starting. Audio output will be saved in {output_filepath}") ff.run() end_time = perf_counter() print( f"convertVideoToWavWithOffset('{input_filepath}',{offset}) Complete. Duration {int(end_time - start_time)} seconds" ) return output_filepath, ext except Exception as e: print("Exception:" + str(e)) raise e
def extract_av(vid, test_ann_dir, train_ann_dir, fps, eps, start_ts_filename, vid_save_path, extracted_path): """ Extract audio and video frames. """ vid_in = os.path.join(vid_save_path, vid) vid_id = vid_id_from_filename(vid) out_path = os.path.join(extracted_path, vid_id) create_dir(out_path) audio_out = os.path.join(out_path, 'audio.wav') vid_out = os.path.join(out_path, '%d.jpg') ann_file = find_annotation_file(vid, train_ann_dir, test_ann_dir) min_ts, max_ts = get_min_max_timestamp(ann_file, eps=eps) save_start_ts(min_ts, extracted_path, vid_id, start_ts_filename) start_ts = str(datetime.timedelta(seconds=min_ts)) end_ts = str(datetime.timedelta(seconds=max_ts)) ff = FFmpeg(inputs={vid_in: ['-ss', start_ts, '-to', end_ts]}, outputs={ audio_out: None, vid_out: ['-filter:v', 'fps=fps=' + str(fps), '-y'] }) ff.run(stderr=PIPE)
def show_user_profile(filename): # 合法参数形如filename=1.wav try: fname = filename.split('=')[1] # print(filename.split('=')[0]) # print(fname.split('.')[0]) # print(fname.split('.')[1]) if filename.split('=')[0] == 'filename' and (fname.split('.')[1] == 'wav' or fname.split('.')[1] == 'mp3') and ('answer' in fname.split('.') [0] or 'decoded' in fname.split('.')[0]): filepath = os.path.join(app.root_path, app.config['UPLOAD_FOLDER']) if 'answer' in fname.split('.')[0]: filename_type = 'answer' elif 'decoded' in fname.split('.')[0]: filename_type = 'decoded' input_filename = filepath + filename_type +'.wav' #因为手机端播放不了wav文件,所以要转成mp3文件 output_filename = filepath + filename_type + '.mp3' print(output_filename) if os.path.exists(output_filename): os.remove(output_filename) ff = FFmpeg( inputs={input_filename: None}, outputs={output_filename: None} ) ff.run() return send_from_directory(filepath, filename_type+'.mp3', as_attachment=True) #返回要下载的文件内容给客户端 else: return json.dumps({'answer': 'Illegal parameter format'}) except: return json.dumps({'answer': 'Illegal parameter format'})
def updateProfileVideoPicture(self, path): try: from ffmpy import FFmpeg files = {'file': open(path, 'rb')} data = { 'params': self.genOBSParams({ 'oid': self.profile.mid, 'ver': '2.0', 'type': 'video', 'cat': 'vp.mp4' }) } r_vp = self.server.postContent(self.server.LINE_OBS_DOMAIN + '/talk/vp/upload.nhn', data=data, files=files) if r_vp.status_code != 201: raise Exception('Update profile video picture failure.') path_p = self.genTempFile('path') ff = FFmpeg( inputs={'%s' % path: None}, outputs={'%s' % path_p: ['-ss', '00:00:2', '-vframes', '1']}) ff.run() self.updateProfilePicture(path_p, 'vp') except: raise Exception('You should install FFmpeg and ffmpy from pypi')
def processVideo(input_filepath): try: start_time = perf_counter() nthreads = utils.getMaxThreads() print(f"processVideo('{input_filepath}') using {nthreads} threads") output_filepath = utils.getTmpFile() ext = '.mp4' ff = FFmpeg( global_options= f"-hide_banner -loglevel error -nostats -threads {nthreads}", inputs={input_filepath: None}, outputs={ output_filepath: '-c:v libx264 -f mp4 -b:v 500K -s 768x432 -movflags faststart -ar 48000 -preset medium' }) ff.run() end_time = perf_counter() print( f"processVideo('{input_filepath}') Complete. Duration {int(end_time - start_time)} seconds" ) return output_filepath, ext except Exception as e: print("Exception:" + str(e)) raise e
def mergevideo(flvfile_list): tsfile_list = [] for flvfile in flvfile_list: tsfile = flvfile[:flvfile.rindex('.')] + '.ts' tsfile_list.append(tsfile) FFmpeg(inputs={ flvfile: None }, outputs={ tsfile: '-y -c copy -bsf:v h264_mp4toannexb -f mpegts' }).run() mp4file = tsfile_list[0][:tsfile_list[0].rindex('.')] + '.mp4' if len(tsfile_list) == 1: FFmpeg(inputs={ tsfile_list[0]: None }, outputs={ mp4file: '-y -c copy -absf aac_adtstoasc' }).run() else: FFmpeg(inputs={ 'concat:' + '|'.join(tsfile_list): None }, outputs={ mp4file: '-y -c copy -absf aac_adtstoasc' }).run()
def client_thread(sock): ### receive pcm data ### start_pcm_recv = time.time() data = clientSocket.recv(BUFF_SIZE) if data == b'rec': f = open('record', 'ab') while True: data = clientSocket.recv(BUFF_SIZE) if data[-3:] == b'end': f.write(data[:-3]) f.close() break f.write(data) pcm_recv_time = time.time() - start_pcm_recv ### pcm to wav ### start_pcm2wav = time.time() path = 'record' ff = FFmpeg(inputs={path: ['-f', 's16le', '-ar', '44100', '-ac', '2']}, outputs={''.join([path, '.wav']): '-y'}) ff.run() os.unlink('record') pcm2wav_time = time.time() - start_pcm2wav ### google stt ### start_stt = time.time() text = stt_conn.audio_stt('record.wav') stt_time = time.time() - start_stt #### aibril conversation ### start_conv = time.time() answer = aibril_conn.aibril_conv(text) conv_time = time.time() - start_conv #### aws-polly tts && pcm streaming ### data = 'tts' clientSocket.send(data.encode()) start_pcm_send = time.time() polly = client("polly", region_name="ap-northeast-2") response = polly.synthesize_speech(Text=answer, SampleRate="8000", OutputFormat="pcm", VoiceId="Seoyeon") stream = response.get("AudioStream") data = stream.read() print("pcm data length >>", len(data)) clientSocket.sendall(data) data = 'end' clientSocket.send(data.encode()) pcm_send_time = time.time() - start_pcm_send print("1. Received pcm data >>", pcm_recv_time) print("2. pcm to wav >>", pcm2wav_time) print("3. stt time >>", stt_time) print("4. conversation time >>", conv_time) print("5. Sending pcm data(tts) >>", pcm_send_time) clientSocket.close()
def __init__(self, source_path, dest_path, image_quality, step=1, start=0, stop=0): from cvat.apps.engine.log import slogger _dest_path = tempfile.mkdtemp(prefix='cvat-', suffix='.data') super().__init__( source_path=source_path[0], dest_path=_dest_path, image_quality=image_quality, step=step, start=start, stop=stop, ) # translate inversed range 1:95 to 2:32 translated_quality = 96 - self._image_quality translated_quality = round((((translated_quality - 1) * (31 - 2)) / (95 - 1)) + 2) self._tmp_output = tempfile.mkdtemp(prefix='cvat-', suffix='.data') target_path = os.path.join(self._tmp_output, '%d.jpg') output_opts = '-start_number 0 -b:v 10000k -vsync 0 -an -y -q:v ' + str(translated_quality) filters = '' if self._stop > 0: filters = 'between(n,' + str(self._start) + ',' + str(self._stop) + ')' elif self._start > 0: filters = 'gte(n,' + str(self._start) + ')' if self._step > 1: filters += ('*' if filters else '') + 'not(mod(n-' + str(self._start) + ',' + str(self._step) + '))' if filters: output_opts += " -vf select=\"'" + filters + "'\"" ff = FFmpeg( inputs = {self._source_path: None}, outputs = {target_path: output_opts}) slogger.glob.info("FFMpeg cmd: {} ".format(ff.cmd)) ff.run()
def transcode(loadconfig, dbconfig): ff = FFmpeg(inputs={loadconfig.infile[0]: None}, outputs={ loadconfig.outfile[0]: [ '-y', '-hide_banner', '-loglevel', 'error', '-c:a', 'libmp3lame', '-ac', '1', '-b:a', '16k', '-ar', '8000', '-metadata', 'title="CSN: {}"'.format(loadconfig.csn[0]), '-metadata', 'album="CSN: {} ANI: {} DNIS: {}"'.format( loadconfig.csn[0], loadconfig.ani[0], loadconfig.dnis[0]), '-metadata', 'artist="{}"'.format(str(loadconfig.agent[0])) ] }) try: ff.run() except Exception as e: print("Unable to transcode file: {}".format(e)) if dbconfig['notification'] != 'false': subj = "!!!ERROR - UNABLE TO TRANSCODE RECORDING FOR CSN {}!!!".format( loadconfig.csn[0]) mesg = "Unable to transcode file: {}\r\n\r\nCall Info:\r\n{}".format( e, loadconfig) sendemail(subj, mesg, dbconfig) return False if os.path.exists(loadconfig.outfile[0]): #os.remove(loadconfig.infile[0]) return True return False
def convert_video(input_file, output, encode_file_id): """ Encrypt uploaded RawFile with encryption_key and encryption_kid included in pre-generated EncodedFile :param input_file: :param output: :param encode_file_id: :return: """ encoded_file = EncodedFile.objects.get(id=encode_file_id) try: ff = FFmpeg( inputs={input_file: None}, outputs={ output: f'-vcodec copy -acodec copy -encryption_scheme cenc-aes-ctr -encryption_key {encoded_file.encryption_key} -encryption_kid {encoded_file.encryption_kid}' }) # TODO: log executed command # ff.cmd ff.run() encoded_file.status = 'ended' encoded_file.save() except Exception: encoded_file.status = 'failed' encoded_file.save()
def voice(bot, update): global near_cat_loc print(update.message) voice_file_id = update.message.voice.file_id #file_id voice_file = bot.getFile(voice_file_id) #File voice_file_path = voice_file.file_path voice_file.download() resp = None file_name = voice_file_path.split('/')[-1] final_name = file_name.replace('oga','wav') ff = FFmpeg ( inputs={file_name : None}, outputs={final_name: None}) ff.run() with open(final_name, 'rb') as f: resp = client.speech(f, None, {'Content-Type': 'audio/wav'}) print(resp) message_text = str(resp['_text'].encode('utf-8')) print(message_text) os.remove(final_name) os.remove(file_name) if 'intent' in str(resp): intent = resp['entities']['intent'] intent_confidence = float(str(intent[0]['confidence'])) #Уверенность intent_value = str(intent[0]['value']) #Намерение #bot.send_message(chat_id = update.message.chat_id, text = resp['entities']) if intent_value == 'get_places': near_cat_loc = True if 'sport' in str(resp): sport = True kb = [ [telegram.KeyboardButton("Отправить местоположение", request_location=True)] ] markup = telegram.ReplyKeyboardMarkup(kb) bot.send_message(chat_id = update.message.chat_id, text = 'Где вы находитесь?', reply_markup = markup) if 'pizza' in str(resp): send_bot("Обмажься своей пастой, усатый",update.message.chat_id) if 'sushi' in str(resp): send_bot("Ща все будет", update.message.chat_id) if 'doner' in str(resp): send_bot("Хер ты получишь свои суши",update.message.chat_id) if 'steak' in str(resp): send_bot("Обмажься своей пастой, усатый",update.message.chat_id) if 'pasta' in str(resp): send_bot("Ща все будет", update.message.chat_id) if 'shashlyks' in str(resp): send_bot("Хер ты получишь свои суши",update.message.chat_id) if 'salads' in str(resp): send_bot("Обмажься своей пастой, усатый",update.message.chat_id) if 'desserts' in str(resp): send_bot("Ща все будет", update.message.chat_id) if 'dinner' in str(resp): send_bot("Ща все будет", update.message.chat_id)
def generate_thumbnail(self, video_path, output_image_path): head, image_filename = os.path.split(output_image_path) image_output_path = os.path.join(self.web_server_video_dir, image_filename) if os.path.exists(image_output_path) and not self.OVERWRITE_THUMBNAIL: print(image_filename + ' thumnail already exists.') else: print('Generating thumbnail at ' + output_image_path) try: last_mod_time = time.ctime(os.path.getmtime(video_path)) last_mod_time = last_mod_time.replace( ':', '\\\\:') # Handle escaping for ffmpeg text ffmpeg_options = [ '-loglevel', 'panic', '-y', '-ss', '00:00:5', '-vframes', '1', '-vf', "scale=iw/4:-1, drawtext=fontfile=/Library/Fonts/Verdana.ttf: text=" + last_mod_time + ": r=25: x=(w-tw)/2: y=h-(2*lh): fontsize=32: fontcolor=white: ", '-an' ] ff = FFmpeg(inputs={video_path: None}, outputs={image_output_path: ffmpeg_options}) # print (ff.cmd) ffmpeg_result = ff.run() # print(ffmpeg_result) except Exception as e: print(e)
def get_video_thumbnail_base64(infile): size = 128, 128 outfile = os.path.splitext(infile)[0] + ".videothumbnail" if infile != outfile: path = outfile try: f = open(path, "rb") except FileNotFoundError: try: ff = FFmpeg( inputs={infile: None}, outputs={ os.path.splitext(infile)[0] + ".videothumbnail": ['-ss', '00:00:1', '-vframes', '1'] }) print(ff.cmd) ff.run() f = open(path, "rb") except IOError: print("can not create thumbnail for ", infile) finally: base64_str = base64.b64encode(f.read()) f.close() return base64_str return None
def test_redirect_to_devnull(): global_options = "--stdin none --stdout oneline --stderr multiline --exit-code 0" ff = FFmpeg(global_options=global_options) devnull = open(os.devnull, "wb") stdout, stderr = ff.run(stdout=devnull, stderr=devnull) assert stdout is None assert stderr is None
def main(): n = 0 of = open("out.mp4", "wb") ff = None with mss.mss() as sct: while True: n = n + 1 if n > 20: break t = time.time() sct_img = sct.grab(sct.monitors[0]) dt = time.time() - t s = sct_img.size data = sct_img.rgb if ff is None: ff = FFmpeg( inputs={ 'pipe:0': "-f rawvideo -pix_fmt rgb24 -s:v %dx%d" % s }, outputs={'pipe:1': '-pix_fmt yuv420p -c:v h264 -f ismv'}) print ff.cmd ff.runlive(stdout=subprocess.PIPE) print ff.process.stdout.fileno() reader = AsynchronousFileReader(ff.process.stdout) #pipe_non_blocking_set(ff.process.stdout.fileno()) print "setup" print "frame", dt ff.process.stdin.write(data) consumeout(reader, of) ff.process.stdin.close() while ff.process.poll(): consumeout(reader, of) consumeout(reader, of)
def concat_videos(list, outdir=None, ffmpeg='ffmpeg', audio=True): dir = outdir if outdir else os.path.dirname(os.path.realpath(__file__)) videos = _download_file_list(list, dir) if bool(videos) == False: return None # make the video files list file_name = os.path.normpath(os.path.join(dir, str(uuid.uuid4()))) with open(file_name, 'w') as file: for video in videos: file.write("file '" + video + "'\n") # concatenate the videos output = os.path.normpath(os.path.join(dir, "video.mp4")) ff = FFmpeg( executable = ffmpeg, global_options = ["-y", "-f" ,"concat", "-safe", "0", "-protocol_whitelist", "file,http,https,tcp,tls"], inputs = {file_name: None}, outputs = {output: "-c copy"} ) #print ff.cmd out = ff.run() # if audio background is requested we will try to get duration of movie and matching audio file if audio == True: # collect data for concatenated movie total duration length = time.strptime(re.findall("(?<=time\\=)[0-9.:]+", out)[-1],"%H:%M:%S.%f") lenght_t = datetime.timedelta(hours=length.tm_hour,minutes=length.tm_min,seconds=length.tm_sec).total_seconds() inputs = OrderedDict([(output, None)]) applied_filters = ["[0:v]null[video]"] audio_track = _get_audio(lenght_t, dir) # build the filter chain and execute it audioseq = FilterChain([ ReplicateAudioFilter(repetitions = int(math.ceil(lenght_t / float(audio_track[1])))), ConcatFilter(is_video = False, outputtag = "caf"), TrimAudioFilter(length = lenght_t), FadeOutAudioFilter(start = lenght_t-AUDIO_FADE_OUT_T, length = AUDIO_FADE_OUT_T, outstreamprefix="audio") ]) applied_filters += audioseq.generate(["1:a"])[0] # add the audio track to the inputs collection inputs.update({audio_track[0]: None}) # build the video output = os.path.normpath(os.path.join(dir, "videoa.mp4")) ff = FFmpeg( executable = ffmpeg, global_options = ["-y"], inputs = inputs, outputs = {output: "-filter_complex \"" + ";".join(applied_filters) + "\" -map \"[video]\" -map \"[audio]\""} ) #print ff.cmd ff.run() return output
def __run_ffmpeg(exe="ffmpeg", inputs=None, outputs=None): ff = FFmpeg(executable=exe, inputs=inputs, outputs=outputs) try: ff.run(stderr=subprocess.STDOUT) except FFRuntimeError as ffe: # After receiving SIGINT ffmpeg has a 255 exit code if ffe.exit_code == 255: pass else: raise ValueError("An unexpected FFRuntimeError occurred: " "{}".format(ffe)) except KeyboardInterrupt: pass # Do nothing if voluntary interruption
def test_raise_exception_with_stdout_stderr_none(): global_options = "--stdin none --stdout none --stderr none --exit-code 42" ff = FFmpeg(global_options=global_options) with pytest.raises(FFRuntimeError) as exc_info: ff.run() assert str(exc_info.value) == ( "`ffmpeg --stdin none --stdout none --stderr none --exit-code 42` " "exited with status 42\n\n" "STDOUT:\n" "\n\n" "STDERR:\n" )
def convert_to_pcm16b16000r(in_filename: str = None, in_content: bytes = None): ff = FFmpeg( executable=os.path.join(settings.AudioTools.DIRECTORY, 'ffmpeg'), inputs={'pipe:0': None}, outputs={'pipe:1': ['-f', 's16le', '-acodec', 'pcm_s16le', '-ar', '16000']} ) stdout = None if in_filename: stdout, stderr = ff.run(input_data=open(in_filename, 'br').read(), stdout=subprocess.PIPE, stderr=subprocess.PIPE) elif in_content: stdout, stderr = ff.run(input_data=in_content, stdout=subprocess.PIPE, stderr=subprocess.PIPE) return stdout
def __init__(self, source_path, compress_quality, flip_flag=False): # translate inversed range 1:95 to 2:32 translated_quality = 96 - compress_quality translated_quality = round((((translated_quality - 1) * (31 - 2)) / (95 - 1)) + 2) self.output = tempfile.mkdtemp(prefix='cvat-', suffix='.data') target_path = os.path.join(self.output, '%d.jpg') output_opts = '-start_number 0 -b:v 10000k -vsync 0 -an -y -q:v ' + '2'#str(translated_quality) # -vf "select=not(mod(n\,2))" to downsample by factor of 2 if flip_flag: output_opts += ' -vf "transpose=2,transpose=2"' ff = FFmpeg( inputs = {source_path: None}, outputs = {target_path: output_opts}) ff.run()
def updateProfileVideoPicture(self, path): try: from ffmpy import FFmpeg files = {'file': open(path, 'rb')} data = {'params': self.genOBSParams({'oid': self.profile.mid,'ver': '2.0','type': 'video','cat': 'vp.mp4'})} r_vp = self.server.postContent(self.server.LINE_OBS_DOMAIN + '/talk/vp/upload.nhn', data=data, files=files) if r_vp.status_code != 201: raise Exception('Update profile video picture failure.') path_p = self.genTempFile('path') ff = FFmpeg(inputs={'%s' % path: None}, outputs={'%s' % path_p: ['-ss', '00:00:2', '-vframes', '1']}) ff.run() self.updateProfilePicture(path_p, 'vp') except: raise Exception('You should install FFmpeg and ffmpy from pypi')
def __audio_to_video(self, filename=None, **kwargs): # TODO: Don't encode if already exists title = os.path.splitext(filename)[0] video_filename = '{}.mp4'.format(title) ff = FFmpeg( inputs={filename: None, self.logo: '-loop 1 -framerate 2'}, outputs={video_filename: '-vf scale=854:480 -c:v libx264 -preset slow -tune stillimage -crf 18 -c:a copy -shortest -pix_fmt yuv420p'} ) print ff.cmd ff.run() if self.delete_audio: os.remove(filename)
def splitLine(line, videoid, personname): params = line.rstrip().split(',') if len(params) == 3: start = params[0] length = params[1] # Find the out file name. outName = os.path.join(personname, 'word_clips', params[2]) if not os.path.isfile(outName): ff = FFmpeg( inputs={os.path.join(personname, 'fullVideos', videoid + '.mp4'): None}, outputs={outName: '-ss ' + start + ' -t ' + length} ) sys.stdout.write('Generating ' + outName + '\n') ff.run()
def __run_ffmpeg(exe=im_ffm.get_ffmpeg_exe(), inputs=None, outputs=None): """ Run ffmpeg """ logger.debug("Running ffmpeg: (exe: '%s', inputs: %s, outputs: %s", exe, inputs, outputs) ffm = FFmpeg(executable=exe, inputs=inputs, outputs=outputs) try: ffm.run(stderr=subprocess.STDOUT) except FFRuntimeError as ffe: # After receiving SIGINT ffmpeg has a 255 exit code if ffe.exit_code == 255: pass else: raise ValueError("An unexpected FFRuntimeError occurred: " "{}".format(ffe)) except KeyboardInterrupt: pass # Do nothing if voluntary interruption logger.debug("ffmpeg finished")
def test_non_zero_exitcode_no_stdout_and_stderr(): global_options = "--stdin none --stdout none --stderr none --exit-code 42" ff = FFmpeg(global_options=global_options) with pytest.raises(FFRuntimeError) as exc_info: ff.run(stdout=subprocess.PIPE, stderr=subprocess.PIPE) assert exc_info.value.cmd == ("ffmpeg --stdin none --stdout none --stderr none --exit-code 42") assert exc_info.value.exit_code == 42 assert exc_info.value.stdout == b"" assert exc_info.value.stderr == b"" assert str(exc_info.value) == ( "`ffmpeg --stdin none --stdout none --stderr none --exit-code 42` " "exited with status 42\n\n" "STDOUT:\n" "\n\n" "STDERR:\n" )
def create_timelapse(directory, fn_out=None, **kwargs): # pragma: no cover """Create a timelapse A timelapse is created from all the jpg images in a given `directory` Args: directory (str): Directory containing jpg files fn_out (str, optional): Full path to output file name, if not provided, defaults to `directory` basename. **kwargs (dict): Valid keywords: verbose Returns: str: Name of output file """ if fn_out is None: head, tail = os.path.split(directory) if tail is '': head, tail = os.path.split(head) field_name = head.split('/')[-2] cam_name = head.split('/')[-1] fn_out = '{}/images/timelapse/{}_{}_{}.mp4'.format( os.getenv('PANDIR'), field_name, cam_name, tail) try: ff = FFmpeg( global_options='-r 3 -pattern_type glob', inputs={'{}/*.jpg'.format(directory): None}, outputs={fn_out: '-s hd1080 -vcodec libx264'} ) if 'verbose' in kwargs: out = None err = None print("Timelapse command: ", ff.cmd) else: out = open(os.devnull, 'w') err = open(os.devnull, 'w') ff.run(stdout=out, stderr=err) except Exception as e: warn("Problem creating timelapse: {}".format(fn_out)) fn_out = None return fn_out
def test_non_zero_exitcode(): global_options = "--stdin none --stdout multiline --stderr multiline --exit-code 42" ff = FFmpeg(global_options=global_options) with pytest.raises(FFRuntimeError) as exc_info: ff.run(stdout=subprocess.PIPE, stderr=subprocess.PIPE) assert exc_info.value.cmd == ("ffmpeg --stdin none --stdout multiline --stderr multiline --exit-code 42") assert exc_info.value.exit_code == 42 assert exc_info.value.stdout == b"These are\nmultiple lines\nprinted to stdout" assert exc_info.value.stderr == b"These are\nmultiple lines\nprinted to stderr" assert str(exc_info.value) == ( "`ffmpeg --stdin none --stdout multiline --stderr multiline --exit-code 42` " "exited with status 42\n\n" "STDOUT:\n" "These are\n" "multiple lines\n" "printed to stdout\n\n" "STDERR:\n" "These are\n" "multiple lines\n" "printed to stderr" )
def test_input(): global_options = "--stdin pipe --stdout oneline --stderr multiline --exit-code 0" ff = FFmpeg(global_options=global_options) stdout, stderr = ff.run(input_data=b"my input data", stdout=subprocess.PIPE, stderr=subprocess.PIPE) assert stdout == b"my input data\nThis is printed to stdout" assert stderr == b"These are\nmultiple lines\nprinted to stderr"
def convert(input, output): ff = FFmpeg( inputs={input: None}, outputs={output: '-c:v libx264 -preset medium -an '}) ff.run()
files.append('_' + FILE_TYPE) foundFiles = [] for i in files: if os.path.isfile(VIDEO_DIRECTORY + i): foundFiles.append(i) else: notFound.append(i) # Keep a set of unique filenames so we don't convert files more than once. unique = list(set(foundFiles)) # Convert to an mpeg format that can be concatted. for i in unique: ff = FFmpeg( inputs={VIDEO_DIRECTORY + i: None}, outputs={VIDEO_DIRECTORY + i + '.ts': '-c copy -f mpegts'} ) ff.run() # Remove the previous output file first. if os.path.isfile('output.mp4'): os.system('rm output.mp4') # Generate the command for concatting the mpegs. inputs = 'concat:' for i in foundFiles: inputs += VIDEO_DIRECTORY + i + '.ts|' inputs = inputs[:-1] + '' ff = FFmpeg( inputs={inputs: None}, outputs={'output.mp4': '-c copy -bsf:a aac_adtstoasc'} ) ff.run()
def test_invalid_executable_path(): ff = FFmpeg(executable="/tmp/foo/bar/ffmpeg") with pytest.raises(FFExecutableNotFoundError) as exc_info: ff.run() assert str(exc_info.value) == "Executable '/tmp/foo/bar/ffmpeg' not found"
def test_no_redirection(): global_options = "--stdin none --stdout oneline --stderr multiline --exit-code 0" ff = FFmpeg(global_options=global_options) stdout, stderr = ff.run() assert stdout is None assert stderr is None
def test_redirect_to_pipe(): global_options = "--stdin none --stdout oneline --stderr multiline --exit-code 0" ff = FFmpeg(global_options=global_options) stdout, stderr = ff.run(stdout=subprocess.PIPE, stderr=subprocess.PIPE) assert stdout == b"This is printed to stdout" assert stderr == b"These are\nmultiple lines\nprinted to stderr"
def _make(images, scene_duration, dir, ffmpeg, width, height, audio, effect, transition, batch_mode): # exit if no images were found if bool(images) == False: return None scene_duration_f = scene_duration * FPS w = width/2*2 if width != None else -2 if height != None else OUTPUT_VIDEO_WIDTH h = height/2*2 if height != None else -2 if width != None else OUTPUT_VIDEO_HEIGHT # build the animation dictionary of filters and first slide handling flag animations = { "zoompan": ( CombiningFilter( [ ZoompanEffectFilter(maxzoom = MAX_ZOOM, frames = scene_duration_f), ImageSlideFilter(duration = scene_duration, width = w, height = h) ], outstreamprefix = "zpaf"), False ), "fadeinout": ( CombiningFilter([ FadeTransitionFilter(transition_duration = TRANSITION_T, total_duration = scene_duration), ImageSlideFilter(duration = scene_duration, width = w, height = h) ], outstreamprefix = "faf"), False ), "zoompanfadeinout": ( CombiningFilter( [ ZoompanEffectFilter(maxzoom = MAX_ZOOM, frames = scene_duration_f), FadeTransitionFilter(transition_duration = TRANSITION_T, total_duration = scene_duration), ImageSlideFilter(duration = scene_duration, width = w, height = h) ], outstreamprefix = "zpfaf"), False ), "slidein": ( FilterChain( [ ImageSlideFilter(duration = scene_duration, width = w, height = h), SlideTransitionFilter(transition_duration = TRANSITION_T, preserve_first = batch_mode != BatchMode.non_initial_batch) ]), True ), "zoompanslidein": ( ZoompanSlideInTransitionFilter(transition_duration = TRANSITION_T, total_duration = scene_duration, fps = FPS, width = w, height = h, maxzoom = MAX_ZOOM, preserve_first = batch_mode != BatchMode.non_initial_batch), True ) } animationkey = (effect if effect else "") + (transition if transition else "") animation = animations[animationkey] if animationkey in animations else None # determines if transition is requested and how to interpret the inputs list preserve_first_slide = animation[1] if animation else False if batch_mode != BatchMode.non_initial_batch: slides = images lenght_t = scene_duration * len(slides) elif preserve_first_slide: slides = images lenght_t = scene_duration * (len(slides) - 1) else: slides = images[1:] lenght_t = scene_duration * len(slides) inputs = OrderedDict([(i, "-loop 1") for i in slides]) # create the video filter chain videoseq = FilterChain() if animation: videoseq.append(animation[0]) else: videoseq.append(ImageSlideFilter(duration = scene_duration, width = w, height = h)) videoseq.append(ConcatFilter(True, "video")) applied_filters = videoseq.generate(["%d:v" % i for (i,x) in enumerate(inputs)])[0] # load audio track if requested if audio == True: audio_track = _get_audio(lenght_t, dir) # build the filter chain and execute it audioseq = FilterChain([ ReplicateAudioFilter(repetitions = int(math.ceil(lenght_t / float(audio_track[1])))), ConcatFilter(is_video = False, outputtag = "caf"), TrimAudioFilter(length = lenght_t), FadeOutAudioFilter(start = lenght_t-AUDIO_FADE_OUT_T, length = AUDIO_FADE_OUT_T, outstreamprefix="audio") ]) applied_filters += audioseq.generate(["%d:a" % len(inputs)])[0] # add the audio track to the inputs collection inputs.update({audio_track[0]: None}) # build the video output = "video.mp4" output = dir + "/" + output if dir else output ff = FFmpeg( executable = ffmpeg, global_options = ["-y"], inputs = inputs, outputs = {output: "-filter_complex \"" + ";".join(applied_filters) + "\" -map \"[video]\"" + (" -map \"[audio]\"" if audio == True else "") + " -c:v libx264 -pix_fmt yuvj420p -q:v 1"} ) #print ff.cmd ff.run() return output