예제 #1
0
 def avi_activated(video_list_item):
     self.working_emoji = video_list_item.data(32)
     # Calling FFmpeg if there is no gif created
     print(type(settings.overwrite_gifs))
     if not self.working_emoji.has_gif or settings.overwrite_gifs:
         self.statusbar.showMessage('Generating the gif')
         self.ffmpeg = FFmpeg()
         self.ffmpeg.return_signal.connect(self.console_add)
         self.ffmpeg.add(self.working_emoji.full_path,
                         self.working_emoji.fps)
         self.console_add('=' * 50)
         self.console_add('Converting {} using ffmpeg'.format(
             self.working_emoji.full_path))
         self.ffmpeg.run()
         self.console_add('=' * 50 + '\n')
         self.ffmpeg.return_signal.disconnect(self.console_add)
         self.working_file = self.working_emoji.full_path
         # self.load_gif(self.working_emoji.gif_path)
         # self.update_video_list()
         self.make_video_list()
     else:
         self.load_gif(self.working_emoji.gif_path)
     if self.working_emoji.resolution == '136x136':
         self.viewport_136.open_image(self.working_emoji)
     elif self.working_emoji.resolution == '280x280':
         self.viewport_280.open_image(self.working_emoji)
예제 #2
0
def render_video(recording, output, dry, quality, audio_bitrate):
    start = time.time()
    logging.debug("Rendering video from frame %s to %s",
                  str(recording.start_frame), str(recording.end_frame))

    if dry:
        recording.sorted_location = "** DRY RUN **"
        return

    recording.sorted_location = get_next_filename(
        os.path.join(output, recording.matched_model))

    ffmpeg = FFmpeg().input(recording.original_location).output(
        recording.sorted_location, {
            'codec:v': 'libx265',
            'codec:a': 'eac3',
            'tag:v': 'hvc1',
            'preset': 'fast',
            'crf': quality,
            'b:a': audio_bitrate,
            'vf': f'scale={recording.dimension}:flags=lanczos',
            'ss': calculate_timestamp_fps(recording.start_frame,
                                          recording.fps),
            'to': calculate_timestamp_fps(recording.end_frame, recording.fps)
        })

    @ffmpeg.on('progress')
    def on_progress(progress):
        progressbar.update_to(progress.frame)

        if recording.average_bitrate == 0:
            recording.average_bitrate = progress.bitrate
        else:
            recording.average_bitrate = round(
                (recording.average_bitrate + progress.bitrate) / 2)

    @ffmpeg.on('terminated')
    def on_terminated():
        raise Exception('ffmpeg was externally terminated')

    @ffmpeg.on('error')
    def on_error(code):
        raise Exception('ffmpeg exited with ' + code)

    loop = asyncio.get_event_loop()

    tqdm_out = TqdmToLogger(logging.getLogger(), level=logging.INFO)
    progressbar = TqdmUpTo(total=recording.end_frame - recording.start_frame +
                           1,
                           desc="Rendering video",
                           unit="frame",
                           file=tqdm_out,
                           bar_format='{l_bar}{bar:50}{r_bar}{bar:-50b}',
                           ascii=False)

    loop.run_until_complete(ffmpeg.execute())
    progressbar.close()

    logging.debug("Finished rendering in in %ss",
                  str(round(time.time() - start, 2)))
예제 #3
0
def compare(path=u'../02-cut-00.mp4',
            width=1280,
            height=720,
            vcodec='libx264',
            preset='faster',
            crf=23):

    ffmpeg = FFmpeg(path=path)
    output = '%s/%s.%s' % (ffmpeg.root, preset, ffmpeg.attr)
    cmd = 'ffmpeg.exe -i "%s" -s %dx%d  -vcodec %s -preset %s -crf %d "%s"' % (
        ffmpeg.path, width, height, vcodec, preset, crf, output)
    cost = ffmpeg.execute(cmd)
    return cost
예제 #4
0
async def combine_video_fragments(output_file, input_fragment_files):
    for f in input_fragment_files:
        if not os.path.isfile(f):
            raise RuntimeError("Input fragment does not exist")

    with open("frags.txt", "w") as f:
        for path in input_fragment_files:
            f.write(f"file '{path}'\n")

    ffmpeg = FFmpeg().option(
        "-f", "concat",
    ).option(
        "-safe", "0",
    ).option('y').input(
        "frags.txt",
    ).output(
        output_file,
    )

    @ffmpeg.on("error")
    def on_error(code):
        _logger.error(f"Error: {code}")

    await ffmpeg.execute()

    os.remove("frags.txt")
예제 #5
0
 def cut(filename, cutdata, output_dir):
     """Cut the file according to cutdata dict list
     Cutdata dict list should look like this:
     [{
         'start_point': 'starting point of track'
         'duration': 'duration to cut, None if should be cut to end'
         'output_file': 'path to output file (with extension)'
     }]
     """
     ff = FFmpeg()
     if not os.path.exists(output_dir):
         os.makedirs(output_dir)
     extension = filename.rsplit('.', 1)[1]
     for track in cutdata:
         out = ff.extract(
             filename, track['start'], track['duration'],
             "{0}\\{1}.{2}".format(output_dir, track['name'], extension))
         if out.returncode != 0:
             print("FFmpeg returned with code {0}".format(out.returncode))
             print(out.stdout)
예제 #6
0
    async def record(self, filename, options):
        streamlink_process = await _streamlink(self.stream.url,
                                               self.stream.quality,
                                               stdout=True,
                                               **options)

        # To fix inaccurate timestamps, record videos via ffmpeg
        ffmpeg = FFmpeg().input('pipe:0').output(os.fspath(filename), c='copy')
        self._set_handlers(ffmpeg)

        await ffmpeg.execute(streamlink_process.stdout)
예제 #7
0
 def avis2gif(self):
     emoji_dict = {
         Emoji(emoji).filename: Emoji(emoji)
         for emoji in self.files_in_folder(self.project_folder)
         if Emoji(emoji)
     }
     for index, item in enumerate(emoji_dict.keys()):
         # print(emoji_dict[item])
         if not emoji_dict[item].has_gif or settings.overwrite_gifs:
             print(emoji_dict[item].name, 'gif file missing, creating one')
             FFmpeg(emoji_dict[item])
             self.conversion1.emit(index + 1, len(emoji_dict) - 1)
     QTimer.singleShot(1, self.conversion1_done)
예제 #8
0
async def decode_frames(start, length, input_file, output_folder):
    if not os.path.exists(output_folder):
        os.makedirs(output_folder)

    ffmpeg = FFmpeg().option(
        "-ss", start
    ).option(
        "-t", length
    ).input(
        input_file,
    ).output(
        os.path.join(output_folder, "%06d.png"),
    )

    @ffmpeg.on("error")
    def on_error(code):
        _logger.error(f"Error: {code}")

    await ffmpeg.execute()
예제 #9
0
async def cut_input(start, length, input_file, output_file):
    if not os.path.isfile(input_file):
        raise FileNotFoundError("Input file not found")

    ffmpeg = FFmpeg().option(
        "y",
    ).option(
        "-ss", start
    ).option(
        "-t", length
    ).input(
        input_file,
    ).output(
        output_file,
    )

    @ffmpeg.on("error")
    def on_error(code):
        _logger.error(f"Error: {code}")

    await ffmpeg.execute()
예제 #10
0
async def encode_frames(encode_queue, framerate):
    while True:
        input_folder, output_file = await encode_queue.get()

        ffmpeg = FFmpeg().option(
            "y",
        ).option(
            "-r", framerate,
        ).input(
            os.path.join(input_folder, "%06d.png"),
        ).output(
            output_file,
        )

        @ffmpeg.on("error")
        def on_error(code):
            _logger.error(f"Error: {code}")

        await ffmpeg.execute()

        shutil.rmtree(input_folder)

        encode_queue.task_done()
예제 #11
0
    async def cut_video(self, cut_from, duration, video_file_path, output_path,
                        use_copy):
        if self.config['ffmpeg_path'] is not None and self.config[
                'ffmpeg_path'] != '':
            ffmpeg = FFmpeg(self.config['ffmpeg_path'])
        else:
            ffmpeg = FFmpeg()

        ffmpeg = ffmpeg.option('ss', cut_from).input(
            os.path.abspath(video_file_path))

        if use_copy:
            ffmpeg = ffmpeg.output(output_path, c='copy', t=duration)
        else:
            ffmpeg = ffmpeg.output(output_path, t=duration)

        @ffmpeg.on('start')
        def on_start(arguments):
            print('ffmpeg arguments: ', arguments)

        @ffmpeg.on('stderr')
        def on_stderr(line):
            print('ffmpeg out:', line)

        @ffmpeg.on('completed')
        def on_completed():
            print('Completed')

        @ffmpeg.on('terminated')
        def on_terminated():
            print('Terminated')

        @ffmpeg.on('error')
        def on_error(code):
            print('Error:', code)

        await ffmpeg.execute()
예제 #12
0
import asyncio
from ffmpeg import FFmpeg

ffmpeg = (
    FFmpeg().option('y').input('input-a.mp4').input('input-b.mp4').output(
        'output.mp4', map=['0:0', '1:1']))


@ffmpeg.on('start')
def on_start(arguments):
    print('arguments:', arguments)


@ffmpeg.on('stderr')
def on_stderr(line):
    print('stderr:', line)


@ffmpeg.on('progress')
def on_progress(progress):
    print(progress)


@ffmpeg.on('completed')
def on_completed():
    print('completed')


@ffmpeg.on('terminated')
def on_terminated():
    print('terminated')
예제 #13
0
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.

import sys, os
from ffmpeg import FFmpegLogger
from ffmpeg import FFmpeg
from gpt_parameters import Parameters
from gpt_telemetry import Telemetry

# MAIN

params = Parameters()
if not params.parse_commandline():
    sys.exit()

ffmpeg = FFmpeg(params.logger)

# TODO: see if we have to concat other parts of the video
#ffmpeg.gopro_concat_video(os.path.split(os.path.abspath(params.filename))[0])

if not os.path.exists(params.filename):
    params.logger.error("Validation error: filename " + params.filename +
                        " was not found")
    sys.exit()

if not ffmpeg.is_created_by_gopro(params.filename):
    params.logger.error(
        "Validation error: file is not recorded with a GoPro camera")
    sys.exit()

if not ffmpeg.contains_gopro_telemetry(params.filename):
예제 #14
0
from ffmpeg import FFmpeg as ffmpeg

input_stream = ffmpeg.input('input.mp4', f='mp4')
output_stream = ffmpeg.output(input_stream, 'output.m3u8', format='hls', start_number=0, hls_time=5, hls_list_size=0)
ffmpeg.run(output_stream)
예제 #15
0
class QtMainWindow(QMainWindow, MainWindow_UI.Ui_MainWindow):
    # todo прикрутить ПКМ меню в списке видосов
    # todo сделать сортировку в меню списка видосов
    def __init__(self, input_folder=default_project_folder):
        super(QtMainWindow, self).__init__()
        self.setupUi(self)
        self.setStyleSheet(stylesheet.houdini)

        self.working_directory = input_folder
        self.videolist_model = None
        self.ffmpeg = None
        self.gifsicle = None
        self.movie136 = QMovie()
        self.movie280 = QMovie()
        self.working_emoji = None
        self.lossy_file_size = None
        self.lossy_factor = None
        self.output_file = None
        self.original_280_gif = None
        self.original_136_gif = None
        self.loaded_280 = None
        self.loaded_136 = None
        self.tp = None
        self.color_table = None
        # todo разобраться с self.launcher self.launcher = Launcher()
        # todo разобраться с self.main_task_pool self.main_task_pool = TasksPool()

        # ############################ MODIFY INTERFACE ############################## #
        # todo исправить размер интерфейса self.setGeometry(200, 200, 40, 40)

        self.setWindowTitle('Gifcher | v 0.1')

        self.splitter_right.addWidget(self.viewport_136)
        self.splitter_main.insertWidget(1, self.viewport_280)
        # Modify relationship between main interface columns
        self.splitter_main.setStretchFactor(0, 2)
        self.splitter_main.setStretchFactor(1, 4)
        self.splitter_main.setStretchFactor(2, 3)

        self.splitter_right.setStretchFactor(0, 15)
        self.splitter_right.setStretchFactor(1, 10)
        # self.viewport_widget.open_image(Emoji("C:\Python\Giftcher\BrandinCooksEmojiTest_01\BrandinCooksEmojiTest_01_280x280_30fps.gif"))

        # Max size of icons in video list
        self.list_videoslist.setIconSize(QSize(32, 32))

        # Update the video list on initial program start
        if len(self.working_directory):
            self.make_video_list()

        # ################################# TOP BAR ################################## #
        # File menu
        # Connect "Open folder" to other Open folder button
        self.actionChooseFolder.triggered.connect(
            self.btn_input_folder.clicked)

        @self.actionExit.triggered.connect
        def exit_ui():
            exit(0)

        # Options menu
        @self.actionConfig.triggered.connect
        def call_settings():
            self.dial = settings.QtSettings()
            self.dial.exec_()

        # todo доработать окно settings

        @self.actionDelete_temp_files.triggered.connect
        def clear_temp_folder():
            if len(listdir('temp')) != 0:
                cleaning_result = clean_folder('temp')
                if cleaning_result:
                    self.console_add(cleaning_result)
                sleep(.1)
                if len(listdir('temp')) == 0:
                    self.statusbar.showMessage('Temp folder is cleaned')
                else:
                    self.statusbar.showMessage(
                        'Trying to clean temp folder, but failed')

        # todo вынести добавление консоли в другое место
        self.console = QTextBrowser(self)
        self.console.setWordWrapMode(QTextOption.NoWrap)
        # self.layout3in1.addWidget(self.console)
        self.console.setMinimumWidth(500)
        self.console.setVisible(console_flag)

        @self.actionShow_console.triggered.connect
        def show_console():
            self.console.setVisible(not self.console.isVisible())
            if self.console.isVisible():
                self.statusbar.showMessage('Console is enabled')
            else:
                self.statusbar.showMessage('Console is disabled')

        # Button for deleting gif files in the working directory
        self.actionDelete_gif_files = QAction(self)
        self.actionDelete_gif_files.setObjectName("actionDelete_temp_files")
        self.menuOptions.addAction(self.actionDelete_gif_files)
        self.actionDelete_gif_files.setText(
            QApplication.translate("MainWindow", "&Clean generated gifs", None,
                                   QApplication.UnicodeUTF8))

        @self.actionDelete_gif_files.triggered.connect
        def clean_gifs():
            self.actionUnloadGifs.triggered.emit(
            )  # Stop and unload playing gifs
            for i in files_in_folder(self.working_directory, 'gif'):
                remove(i)
            # self.update_video_list()
            self.make_video_list()

        # Button for unloading running gifs in the viewports
        self.actionUnloadGifs = QAction(self)
        self.actionUnloadGifs.setObjectName("actionDelete_temp_files")
        self.menuOptions.addAction(self.actionUnloadGifs)
        self.actionUnloadGifs.setText(
            QApplication.translate("MainWindow", "&Unload gifs", None,
                                   QApplication.UnicodeUTF8))

        @self.actionUnloadGifs.triggered.connect
        def unload_gifs():
            self.viewport_280.unload_image()
            self.viewport_136.unload_image()

        self.actionmov2mp4.triggered.connect(self.convert_mov_to_mp4)

        # About menu
        @self.actionAbout.triggered.connect
        def call_about():
            # self.dial = settings.QtSettings() # Изменить
            # self.dial.exec_()
            print(self.size())
            self.dial = about.QtAbout()
            self.dial.exec_()

        # todo доработать окно about

        # ############################### LEFT COLUMN ################################ #
        @self.btn_input_folder.clicked.connect
        def input_folder():
            # options = QFileDialog.DontResolveSymlinks | QFileDialog.ShowDirsOnly
            directory = QFileDialog.getExistingDirectory(self)
            if directory:
                self.working_directory = directory
                # self.update_video_list()
                self.make_video_list()
                self.actlist_model.update(directory)
                self.dropdown_colortable.setCurrentIndex(0)
                # load the most fps image
                if preload_files:
                    self.movie136.setFileName('')
                    self.movie136.stop()
                    self.movie280.setFileName('')
                    self.movie280.stop()
                    # get count of how many items are in the video list
                    items_count = self.videolist_model.rowCount(
                        self.videolist_model)
                    res136 = {}  # Dict for only 136 entries
                    res280 = {}  # Dict for only 280 entries
                    # Walk through the model and separate entries by resolution
                    for item in range(items_count):
                        emoji = self.videolist_model.data(
                            self.actlist_model.index(item), 32)
                        print(emoji)
                        if emoji.resolution == '136x136':
                            res136.update({str(emoji.fps): item})
                        elif emoji.resolution == '280x280':
                            res280.update({str(emoji.fps): item})
                    # Sort entries by highest FPS
                    fps_136 = sorted(res136.keys(), reverse=True)
                    fps_280 = sorted(res280.keys(), reverse=True)
                    # print(res136.keys())
                    if len(fps_136):
                        top_fps_136 = sorted(res136.keys(), reverse=True)[0]
                        # Click on the appropriate items in the ModelViewer
                        avi_activated(
                            self.videolist_model.index(res136[top_fps_136]))
                        avi_activated(
                            self.videolist_model.index(res136[top_fps_136]))
                    if len(fps_280):
                        top_fps_280 = sorted(res280.keys(), reverse=True)[0]
                        # Click on the appropriate items in the ModelViewer
                        avi_activated(
                            self.videolist_model.index(res280[top_fps_280]))
                        avi_activated(
                            self.videolist_model.index(res280[top_fps_280]))

        self.btn_input_folder.setContextMenuPolicy(Qt.CustomContextMenu)

        @self.btn_input_folder.customContextMenuRequested.connect
        def btn_input_folder_open_menu(pos):
            subprocess.Popen(r'explorer "{}"'.format(self.working_directory))

        def avi_activated(video_list_item):
            self.working_emoji = video_list_item.data(32)
            # Calling FFmpeg if there is no gif created
            print(type(settings.overwrite_gifs))
            if not self.working_emoji.has_gif or settings.overwrite_gifs:
                self.statusbar.showMessage('Generating the gif')
                self.ffmpeg = FFmpeg()
                self.ffmpeg.return_signal.connect(self.console_add)
                self.ffmpeg.add(self.working_emoji.full_path,
                                self.working_emoji.fps)
                self.console_add('=' * 50)
                self.console_add('Converting {} using ffmpeg'.format(
                    self.working_emoji.full_path))
                self.ffmpeg.run()
                self.console_add('=' * 50 + '\n')
                self.ffmpeg.return_signal.disconnect(self.console_add)
                self.working_file = self.working_emoji.full_path
                # self.load_gif(self.working_emoji.gif_path)
                # self.update_video_list()
                self.make_video_list()
            else:
                self.load_gif(self.working_emoji.gif_path)
            if self.working_emoji.resolution == '136x136':
                self.viewport_136.open_image(self.working_emoji)
            elif self.working_emoji.resolution == '280x280':
                self.viewport_280.open_image(self.working_emoji)

        self.list_videoslist.activated.connect(avi_activated)

        # Add acts from folder to list widget
        # todo if len(self.working_directory):
        if True:
            self.actlist_model = ActListModel(self.working_directory)
            # self.actlist_model.no_act_files_found.connect(QtError())
            # QtError()
            # todo 000
            self.dropdown_colortable.setModel(self.actlist_model)

        @self.dropdown_colortable.currentIndexChanged.connect
        def dropdown_colortable_selected(index_of_selected_item):
            act_file_path = self.dropdown_colortable.itemData(
                index_of_selected_item, 32)
            self.current_act = self.load_act(act_file_path)

        @self.btn_import_act.clicked.connect
        def import_act_clicked():
            photoshop_paths = PsFolder().ps_paths
            # print(photoshop_paths[0])
            if len(photoshop_paths) > 1:
                logging.warning(
                    'Multiple Photoshop paths found, using {}'.format(
                        photoshop_paths[0]))
            files, filtr = QFileDialog.getOpenFileNames(
                self, "Choose your color table",
                '{}'.format(photoshop_paths[0]),
                "All Files (*.*);;A color table (*.act)",
                "A color table (*.act)")

            def copy_act(act_file):  # Compact repeating function
                copy2(
                    act_file,
                    path.join(self.working_directory, path.basename(act_file)))

            user_choice = None
            for file in files:
                # If there is a file existing and if user has NOT clicked YesToAll ask him
                if path.exists(
                        path.join(path.abspath(self.working_directory),
                                  path.basename(file))
                ) and user_choice != QMessageBox.YesToAll:
                    error_box = QMessageBox()
                    error_box.setStyleSheet(self.styleSheet())
                    error_box.setWindowTitle('File error')
                    error_box.setText('The file {} exists in {}'.format(
                        path.basename(file),
                        path.abspath(self.working_directory)))
                    error_box.setInformativeText(
                        'Do you want to overwrite it?')
                    error_box.setStandardButtons(QMessageBox.YesToAll
                                                 | QMessageBox.Yes
                                                 | QMessageBox.No)
                    error_box.setDefaultButton(QMessageBox.No)
                    user_choice = error_box.exec_()
                    if user_choice == QMessageBox.Yes or user_choice == QMessageBox.YesToAll:
                        copy_act(file)
                else:
                    copy_act(file)

            self.actlist_model.update(self.working_directory)
            # Select the first of selected files in the dropdown menu
            first_file = files[0]
            first_file_stripped = path.splitext(path.basename(first_file))[0]
            index = self.actlist_model.index(0)
            index_of_first_item = self.actlist_model.match(
                index, Qt.DisplayRole, first_file_stripped)
            if len(index_of_first_item):
                index_of_first_item = index_of_first_item[0].row()
                self.dropdown_colortable.setCurrentIndex(index_of_first_item)
            else:
                raise FileNotFoundError

                # todo update the act file model

        @self.btn_export.clicked.connect
        def btn_export_clicked():

            if self.actlist_model.rowCount(self) == 0:
                if self.working_directory == '':
                    error_message = 'There is no project directory specified'
                else:
                    error_message = 'Please import one color_palette.act inside \n{}'.format(
                        self.working_directory)
                logging.warning(error_message.replace('\n', ''))
                error_box = QMessageBox()
                error_box.setStyleSheet(stylesheet.houdini)
                error_box.setWindowTitle('File error')
                error_box.setText('There is .act file missing' + ' ' * 50)
                error_box.setInformativeText(error_message)
                error_box.exec_()
                return 1

            # Dictionary two lossy values from their interface spinners
            lossy_dict = {
                '136': self.viewport_136.spin_quality.text(),
                '280': self.viewport_280.spin_quality.text()
            }
            self.color_table = path.join('.\\temp', 'current_act.txt')
            # We generate a colormap from the colormap viewer window
            with open(self.color_table, 'w') as txt:
                txt.writelines(self.plaintext_act_readout.toPlainText())
            color_table = self.color_table
            # self.actionUnloadGifs.triggered.emit()  # Stop and unload playing gifs
            self.viewport_280.unload_image()
            self.viewport_136.unload_image()
            # Start export conversion using dir user selected and lossy dict
            self.conversion = Conversion()
            self.conversion.conversion1.connect(
                lambda i, t: self.progress_bar1.setValue(int(i / t * 100)))
            self.conversion.conversion2.connect(
                lambda i, t: self.progress_bar2.setValue(int(i / t * 100)))
            self.conversion.conversion3.connect(
                lambda i, t: self.progress_bar3.setValue(int(i / t * 100)))
            self.conversion.conversion4.connect(
                lambda i, t: self.progress_bar4.setValue(int(i / t * 100)))
            self.conversion.conversion5.connect(
                lambda i, t: self.progress_bar5.setValue(int(i / t * 100)))

            self.conversion.true_init(self.working_directory, lossy_dict,
                                      color_table)
            self.conversion.conversion5_done.connect(
                lambda: self.make_video_list(self.working_directory))

        @self.btn_clean.clicked.connect
        def clean():
            self.console_add('Cleaning process has started')
            self.statusbar.showMessage('Cleaning process has started')
            files_to_delete = files_in_folder(self.working_directory, 'avi')
            files_to_delete.extend(
                files_in_folder(self.working_directory, 'tmp'))
            files_to_delete_names = [
                path.basename(file) for file in files_to_delete
            ]
            box = QMessageBox()
            box.setStyleSheet(self.styleSheet())
            # box_layout = box.layout()
            # box_layout.setColumnMinimumWidth(1,500)
            # QGridLayout.set
            box.setWindowTitle('Clean up')
            box.setText('You are about to delete: \n{}'.format('\n'.join(
                str(x) for x in files_to_delete_names)))
            box.setInformativeText('Are you sure?')
            box.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
            box.setDefaultButton(QMessageBox.No)
            user_choice = box.exec_()
            if user_choice == QMessageBox.Yes:
                [send2trash(file) for file in files_to_delete]

        self.btn_export.setContextMenuPolicy(Qt.CustomContextMenu)

        @self.btn_export.customContextMenuRequested.connect
        def btn_input_folder_open_menu(pos):
            print(self.size())

        # self.progress_bar1 = QProgressBar()
        # self.layout_fileops.addWidget(self.progress_bar1)
        # self.progress_bar2 = QProgressBar()
        # self.layout_fileops.addWidget(self.progress_bar2)
        # self.progress_bar3 = QProgressBar()
        # self.layout_fileops.addWidget(self.progress_bar3)
        # self.progress_bar4 = QProgressBar()
        # self.layout_fileops.addWidget(self.progress_bar4)
        # self.progress_bar5 = QProgressBar()
        # self.layout_fileops.addWidget(self.progress_bar5)

        # ############################## MIDDLE COLUMN ############################### #
        # @self.btn_fb280.clicked.connect
        # def btn_fb280_clicked():
        #     current_frame = self.movie280.currentFrameNumber()
        #     self.movie280.jumpToFrame(0)
        #     for i in range(current_frame - 1):
        #         self.movie280.jumpToNextFrame()
        #     fps = '\tFPS: ' + str(round(1000 / self.movie280.nextFrameDelay(), 2))
        #     delay = '\tDelay: ' + str(self.movie280.nextFrameDelay())
        #     frame_n = str(self.movie280.currentFrameNumber())
        #     self.statusbar.showMessage('280px: Jumped to frame #' + frame_n + fps + delay)
        #
        # @self.btn_playpause280.clicked.connect
        # def btn_playpause280_clicked():
        #     if self.btn_playpause280.isChecked():
        #         self.movie280.setPaused(True)
        #         self.statusbar.showMessage('280px: Paused on frame #' + str(self.movie280.currentFrameNumber()))
        #     else:
        #         self.movie280.setPaused(False)
        #         self.statusbar.showMessage('280px: Playing')
        #
        # @self.btn_ff280.clicked.connect
        # def btn_ff280_clicked():
        #     self.movie280.jumpToNextFrame()
        #     fps = '\tFPS: ' + str(round(1000 / self.movie280.nextFrameDelay(), 2))
        #     delay = '\tDelay: ' + str(self.movie280.nextFrameDelay())
        #     frame_n = str(self.movie280.currentFrameNumber())
        #     self.statusbar.showMessage('280px: Jumped to frame #' + frame_n + fps + delay)
        #
        # @self.slider_speed280.valueChanged.connect
        # def speed280_slider_changed(value):
        #     self.statusbar.showMessage('Speed of 280px changed to {}x'.format(value/100))
        #     self.spin_speed280.blockSignals(True)
        #     self.spin_speed280.setValue(value * 0.01)
        #     self.spin_speed280.blockSignals(False)
        #     self.movie280.setSpeed(value)
        #
        # @self.spin_speed280.valueChanged.connect
        # def speed280_spinner_changed(value):
        #     value = round(value, 2)
        #     self.statusbar.showMessage('Speed of 280px changed to {}x'.format(value))
        #     value *= 100
        #     self.slider_speed280.setValue(value)
        #     self.movie280.setSpeed(value)
        #
        # self.previous_scale280 = self.spin_scale280.value()
        # @self.spin_scale280.valueChanged.connect
        # def spin_scale280_value_changed(value):
        #     self.statusbar.showMessage('Zoom of 280px changed to {}x'.format(value))
        #     self.graphicsView_280.scale(1/self.previous_scale280, 1/self.previous_scale280)
        #     self.graphicsView_280.scale(value, value)
        #     self.slider_scale280.setValue(value)
        #     self.previous_scale280 = self.spin_scale280.value()
        #
        # self.spin_scale280.valueChanged.emit(self.spin_scale280.value())
        #
        # @self.spin_quality280.valueChanged.connect
        # def spin_quality280_value_changed():
        #     if self.check_livepreview280.isChecked():
        #         btn_update280_clicked()
        #
        # def btn_update280_clicked():
        #     # working_file = self.movie280.fileName()
        #     working_file = self.loaded_280.gif_path
        #     print(working_file)
        #     output_file = path.splitext(working_file)[0] + '.tmp'
        #     self.movie280.stop()
        #     lossy_factor = self.spin_quality280.text()
        #     # Instead of generating a txt file for a colortable
        #     # color_table = act_reader.create_gifsicle_colormap(self.dropdown_colortable.currentText())
        #     self.color_table = path.join('.\\temp', 'current_act.txt')
        #     # We generate a colormap from the colormap viewer window
        #     with open(self.color_table, 'w') as txt:
        #         txt.writelines(self.plaintext_act_readout.toPlainText())
        #     color_table = self.color_table
        #
        #     # self.btn_update280.setEnabled(False)
        #     self.gc = GifSicle(self.loaded_280, lossy_factor, color_table)
        #     # self.gc = GifSicle() todo разобраться что происходит тут
        #     # self.gc.return_signal.connect(lambda x: print(x))
        #     # self.gc.add(self.working_emoji, lossy_factor, color_table)
        #     # self.gc.run()
        #     # .return_signal.connect(self.console_add)
        #
        #     self.load_gif(output_file)
        #     temp_file_size = path.getsize(output_file)/1024
        #     self.statusbar.showMessage('Resulting filesize is: {:.2f} Kb'.format(temp_file_size))
        # self.btn_update280.clicked.connect(btn_update280_clicked)
        #
        # # ############################### RIGHT COLUMN ############################### #
        #
        # # Load the color table viewer
        # if len(default_project_folder):
        #     files = files_in_folder(self.working_directory, 'act')
        #     if len(files):
        #         self.load_act(files[self.dropdown_colortable.currentIndex()])
        #
        # @self.btn_fb136.clicked.connect
        # def btn_fb136_clicked():
        #     current_frame = self.movie136.currentFrameNumber()
        #     self.movie136.jumpToFrame(0)
        #     for i in range(current_frame - 1):
        #         self.movie136.jumpToNextFrame()
        #     fps = '\tFPS: ' + str(round(1000 / self.movie136.nextFrameDelay(), 2))
        #     delay = '\tDelay: ' + str(self.movie136.nextFrameDelay())
        #     frame_n = str(self.movie136.currentFrameNumber())
        #     self.statusbar.showMessage('136px: Jumped to frame #' + frame_n + fps + delay)
        #
        # @self.btn_playpause136.clicked.connect
        # def btn_playpause136_clicked():
        #     if self.btn_playpause136.isChecked():
        #         self.movie136.setPaused(True)
        #         self.statusbar.showMessage('136px: Paused on frame #' + str(self.movie136.currentFrameNumber()))
        #     else:
        #         self.movie136.setPaused(False)
        #         self.statusbar.showMessage('136px: Playing')
        #
        # @self.btn_ff136.clicked.connect
        # def btn_ff136_clicked():
        #     self.movie136.jumpToNextFrame()
        #     fps = '\tFPS: ' + str(round(1000 / self.movie136.nextFrameDelay(), 2))
        #     delay = '\tDelay: ' + str(self.movie136.nextFrameDelay())
        #     frame_n = str(self.movie136.currentFrameNumber())
        #     self.statusbar.showMessage('136px: Jumped to frame #' + frame_n + fps + delay)
        #
        # @self.slider_speed136.valueChanged.connect
        # def speed136_slider_changed(value):
        #     self.statusbar.showMessage('Speed of 136px changed to {}x'.format(value/100))
        #     self.spin_speed136.blockSignals(True)
        #     self.spin_speed136.setValue(value * 0.01)
        #     self.spin_speed136.blockSignals(False)
        #     self.movie136.setSpeed(value)
        #
        # @self.spin_speed136.valueChanged.connect
        # def speed136_spinner_changed(value):
        #     value = round(value, 2)
        #     self.statusbar.showMessage('Speed of 136px changed to {}x'.format(value))
        #     value *= 100
        #     self.slider_speed136.setValue(value)
        #     self.movie136.setSpeed(value)
        #
        # self.previous_scale136 = self.spin_scale136.value()
        # @self.spin_scale136.valueChanged.connect
        # def spin_scale136_value_changed(value):
        #     self.statusbar.showMessage('Zoom of 136px changed to {}x'.format(value))
        #     self.graphicsView_136.scale(1/self.previous_scale136, 1/self.previous_scale136)
        #     self.graphicsView_136.scale(value, value)
        #     self.slider_scale136.setValue(value)
        #     self.previous_scale136 = self.spin_scale136.value()
        # self.spin_scale136.valueChanged.emit(self.spin_scale136.value())
        #
        # @self.spin_quality136.valueChanged.connect
        # def spin_quality136_value_changed():
        #     if self.check_livepreview136.isChecked():
        #         btn_update136_clicked()
        #
        # def btn_update136_clicked():
        #     # working_file = self.movie136.fileName()
        #     working_file = self.loaded_136.gif_path
        #     output_file = path.splitext(working_file)[0] + '.tmp'
        #     self.movie136.stop()
        #     lossy_factor = self.spin_quality136.text()
        #     # Instead of generating a txt file for a colortable
        #     # color_table = act_reader.create_gifsicle_colormap(self.dropdown_colortable.currentText())
        #     self.color_table = path.join('.\\temp', 'current_act.txt')
        #     # We generate a colormap from the colormap viewer window
        #     with open(self.color_table, 'w') as txt:
        #         txt.writelines(self.plaintext_act_readout.toPlainText())
        #     color_table = self.color_table
        #
        #     GifSicle(self.loaded_136, lossy_factor, color_table)
        #     self.load_gif(output_file)
        #     temp_file_size = path.getsize(output_file)/1024
        #     self.statusbar.showMessage('Resulting filesize is: {:.2f} Kb'.format(temp_file_size))
        # self.btn_update136.clicked.connect(btn_update136_clicked)
        #
        # self.gifplayer136_widget = QWidget()
        # self.gifplayer136 = QLabel(self.gifplayer136_widget)
        # self.gifplayer136.setMinimumSize(QSize(136, 136))
        #
        # self.graphics_scene_136 = QGraphicsScene()
        # self.graphicsView_136.setScene(self.graphics_scene_136)
        # self.graphicsView_136.setInteractive(1)
        #
        # self.graphics_scene_136.addWidget(self.gifplayer136_widget)
        # self.graphicsView_136.scale(2, 2)
        #
        # self.gifplayer280_widget = QWidget()
        # self.gifplayer280 = QLabel(self.gifplayer280_widget)
        # self.gifplayer280.setMinimumSize(QSize(280, 280))
        #
        # self.graphics_scene_280 = QGraphicsScene()
        # self.graphicsView_280.setScene(self.graphics_scene_280)
        # self.graphicsView_280.setInteractive(1)
        #
        # self.graphics_scene_280.addWidget(self.gifplayer280_widget)
        # self.graphicsView_280.scale(2, 2)
    def make_video_list(self, folder=None, ext='avi'):
        # If no folder specified, update the current working directory
        if not folder:
            folder = self.working_directory
        if len(files_in_folder(folder, ext)) > 0:
            # Make a dictionary out of emojis, when emoji object is not none (has been successfully created)
            emoji_dict = {
                Emoji(emoji).filename: Emoji(emoji)
                for emoji in files_in_folder(folder, ext) if Emoji(emoji)
            }
            # Make a model
            self.videolist_model = VideoListModel(emoji_dict)
            # Assign the model to the list view
            self.list_videoslist.setModel(self.videolist_model)
            # Enable the collect button
            self.btn_clean.setEnabled(True)

    # def update_video_list(self, folder=None, ext='avi'):
    #     # If no folder specified, update the current working directory
    #     if not folder:
    #         folder = self.working_directory
    #     if len(files_in_folder(folder, ext)) > 0:
    #         # Make a dictionary out of emojis, when emoji object is not none (has been successfully created)
    #         emoji_dict = {Emoji(emoji).filename: Emoji(emoji) for emoji in files_in_folder(folder, ext) if Emoji(emoji)}
    #         # Make a model
    #         self.videolist_model = VideoListModel(emoji_dict)
    #         # Assign the model to the list view
    #         self.list_videoslist.setModel(self.videolist_model)
    #         # Enable the collect button
    #         self.btn_collect.setEnabled(True)

    # ################################# LOADERS ################################## #

    def load_act(self, act_file):
        self.plaintext_act_readout.setToolTip(
            '{} is loaded.\n\n'
            'You can see and edit the color map here.\n'
            'Those changes appear on update and export.'.format(act_file))
        self.plaintext_act_readout.clear()
        act = act_reader.act_to_list(act_file)
        # self.graphics_scene.addText(''.join(act[0]))
        self.plaintext_act_readout.setPlainText('\n'.join(act[0]))
        self.statusbar.showMessage('"' + act_file + '"' +
                                   ' contains {} color(s)'.format(act[1]))
        if act[1] > 256:
            error_msg = corrupted_palette(act_file)
            QMessageBox.warning(self, *error_msg)
        return act

    def load_gif(self, gif_path: str) -> None:
        """
        This method chooses, and loads, in which viewport to load the gif, 280 or 136 one.

        :type gif_path: str
        :param gif_path: Full path to the gif, you want to load.
        """
        if '280' in gif_path:
            self.load280(gif_path)
        elif '136' in gif_path:
            self.load136(gif_path)
        else:
            logging.error(
                'load_gif function encountered a weird gif_path: {}'.format(
                    gif_path))

    def load280(self, file280):
        self.btn_playpause280.setChecked(
            False)  # Unpress the play-pause button
        self.btn_fb280.setEnabled(True)  # Enable back button
        self.btn_playpause280.setEnabled(True)  # Enable play-pause button
        self.btn_ff280.setEnabled(True)  # Enable forward button
        self.layout_gif280.setTitle(
            path.split(file280)[1])  # Set name of the gif as the title
        self.movie280.setFileName('')  # Free (close) the previous loaded image
        self.movie280 = QMovie(file280)  # Create a QMovie instance
        self.gifplayer280.setMovie(
            self.movie280)  # And assign it to the player widget
        self.movie280.setSpeed(
            self.spin_speed280.value() *
            100)  # Automatically set speed using the speed spinner
        self.movie280.start()
        return self.movie280.isValid()

    def load136(self, file136):
        self.btn_playpause136.setChecked(
            False)  # Unpress the play-pause button
        self.btn_fb136.setEnabled(True)  # Enable back button
        self.btn_playpause136.setEnabled(True)  # Enable play-pause button
        self.btn_ff136.setEnabled(True)  # Enable forward button
        self.layout_gif136.setTitle(
            path.split(file136)[1])  # Set name of the gif as the title
        self.movie136.setFileName('')  # Free (close) the previous loaded image
        self.movie136 = QMovie(file136)  # Create a QMovie instance
        self.gifplayer136.setMovie(
            self.movie136)  # And assign it to the player widget
        self.movie136.setSpeed(
            self.spin_speed136.value() *
            100)  # Automatically set speed using the speed spinner
        self.movie136.start()
        return self.movie136.isValid()

    def load_palette(self, palette: str) -> None:
        """
        This method chooses loads a palette image to 136 viewport.

        :type palette: str
        :param palette: Full path to the image, you want to load.
        """
        pixmap = QPixmap(palette)
        pixmap = pixmap.scaled(136, 136, mode=Qt.FastTransformation)
        self.gifplayer136.setPixmap(pixmap)
        # self.gifplayer136.scaled todo
        # self.gifplayer136.setScaledContents(True) todo

    def console_add(self, log_input):
        self.console.append(str(log_input))  #.rstrip())

    def convert_mov_to_mp4(self):
        print(QFileDialog())
        files, filtr = QFileDialog.getOpenFileNames(
            self, "Choose your files for conversion", '.',
            "All Files (*.*);;MOV (*.mov)", "MOV (*.mov)")
        print(files, filtr)
        for input_file in files:
            Handbrake(input_file)

    def minimal_size(self):
        self.resize(0, 0)

    def keyPressEvent(self, event):
        if event.key() == Qt.Key_ScrollLock:
            self.viewport_136.scroll_lock = not self.viewport_136.scroll_lock
            self.viewport_280.scroll_lock = not self.viewport_280.scroll_lock
            self.viewport_136.check_embedded()
            self.viewport_280.check_embedded()
        else:
            super(QtMainWindow, self).keyPressEvent(event)
예제 #16
0
from ffmpeg import FFmpeg
import asyncio
import sys

resolution = ""
ffmpeg = FFmpeg()


@ffmpeg.on('progress')
def on_ffmpeg_progress(progress):
    if 'VMAF' in progress._fields:
        print(progress.VMAF)


ffmpeg.input(sys.argv[1])
ffmpeg.input(sys.argv[2])
ffmpeg.output("-", {'filter_complex': 'libvmaf'}, f="null")
loop = asyncio.get_event_loop()
loop.run_until_complete(ffmpeg.execute())
loop.close()
예제 #17
0
import asyncio
from ffmpeg import FFmpeg

ffmpeg = (FFmpeg().option('y').input('input.mp4').output('ouptut.mp4',
                                                         {'c:v': 'libx264'},
                                                         vf='scale=1280:-1',
                                                         preset='veryslow',
                                                         crf=24))


@ffmpeg.on('start')
def on_start(arguments):
    print('arguments:', arguments)


@ffmpeg.on('stderr')
def on_stderr(line):
    print('stderr:', line)


@ffmpeg.on('progress')
def on_progress(progress):
    print(progress)


@ffmpeg.on('completed')
def on_completed():
    print('completed')


@ffmpeg.on('terminated')
예제 #18
0
# _*_ coding:utf-8 _*_

from ffmpeg import FFmpeg
import json
from cv import CV
from tools import big

path = u"test.mp4"
path = u'../02.mp4'

ffmpeg = FFmpeg(path)

ffmpeg.cut()

##处理有rorate的视频
cmd = u'ffmpeg.exe -y -i "test.mp4" -vf transpose=1 -vcodec libx264  -metadata:s:v:0 rotate=90" t1_r90.mp4"'
cmd = u'ffmpeg.exe -y -i "test.mp4" -vf transpose=2 -vcodec libx264  -metadata:s:v:0 rotate=270" t2_r270.mp4"'

##每300帧截一张图,并放入一个图片里
cmd = u'ffmpeg.exe -y -i "F:/迅雷下载/abc/bunnyjanjan.yummypura/JSQB9496.mp4"  -frames 1 -vf "transpose=1,select=not(mod(n\\,300)),scale=1280:720,tile=2x2" test.png'

##显示视频信息
cmd = 'ffprobe -v quiet -print_format json -show_format -show_streams t2.mp4 >t2.json'

##左右合并视频流
cmd = u'ffmpeg.exe -i "02.mp4" -vf "[in] scale=iw/2:ih/2, pad=2*iw:ih [left]; movie="xjp.mp4", scale=iw/2:ih/2 [right];[left][right] overlay=main_w/2:0 [out]" combine.mp4'

##上下播放视频流
cmd = u'ffplay.exe -i "02.mp4" -vf "[in] scale=iw/2:ih/2, pad=iw:2*ih [top]; movie="xjp.mp4", scale=iw/2:ih/2 [bottom];[top][bottom] overlay=0:main_h/2 [out]" '

##四个视频流放入同一个格子
예제 #19
0
from ffmpeg import FFmpeg
import asyncio
import sys

resolution = ""
ffprobe = FFmpeg(executable='ffprobe')


@ffprobe.on('progress')
def on_ffprobe_progress(progress):
    if 'resolution' in progress._fields:
        resolution = progress.resolution.replace("\n", "").replace("\r", "")
        print(resolution)


ffprobe.input(sys.argv[1])
loop = asyncio.get_event_loop()
loop.run_until_complete(ffprobe.execute())
loop.close()
예제 #20
0
import asyncio
from ffmpeg import FFmpeg

ffmpeg = (FFmpeg().option('y').input('rtsp://127.0.0.1/cam',
                                     rtsp_transport='tcp',
                                     rtsp_flags='prefer_tcp').output(
                                         'otuput.mp4', vcodec='copy'))


@ffmpeg.on('start')
def on_start(arguments):
    print('arguments:', arguments)


@ffmpeg.on('stderr')
def on_stderr(line):
    print('stderr:', line)


@ffmpeg.on('progress')
def on_progress(progress):
    print(progress)


@ffmpeg.on('progress')
def time_to_terminate(progress):
    if progress.frame > 200:
        ffmpeg.terminate()


@ffmpeg.on('completed')
예제 #21
0
    def start(self, agfid, device_name, temp_file_dir):

        params = {
            'segment_list_file_abs_path':
            temp_file_dir + str(datetime.datetime.now())[:10] + '.m3u8',
            'segment_afid_list_file_abs_path':
            temp_file_dir + str(datetime.datetime.now())[:10] + '_afid.m3u8',
            'segement_time':
            8,
            'device_mount_point':
            '/dev/' + device_name
        }
        self.ffmpeg = FFmpeg(self.get_live_command(params))

        live = None
        if agfid == "":
            res, err = self.afs.create_gfid()
            if err is not None:
                return err
        live = Live(res.agfid)

        err = self.ffmpeg.start()
        start = timer()
        now = datetime.datetime.now()
        live_time = now

        for line in self.ffmpeg.get_stdout():
            line = line.decode()
            if self.ffmpeg.is_fail_to_find_video_device(line):
                err = "cannot find video device " + params['device_mount_point']
                return err

            if self.ffmpeg.is_video_device_busy(line):
                err = "cannot connect to video device " + params[
                    'device_mount_point'] + " since it is busy"
                return err

            if self.ffmpeg.is_creating_segment_ts(line):
                if is_m3u8_file_exists(params):
                    m3u8 = M3U8(params['segment_list_file_abs_path'])
                    afid_m3u8 = None
                    if not is_afid_m3u8_file_exists(params):
                        contents = m3u8.get_contents(
                            m3u8.get_number_of_line() - 1)
                        afid_m3u8 = M3U8(
                            params['segment_afid_list_file_abs_path'])
                        afid_m3u8.create_from_contents(contents)
                        afid_m3u8.append_end("\n")
                        afid_m3u8.append_end(live.cts_afid)
                    else:
                        contents = m3u8.get_contents(
                            m3u8.get_number_of_line() - 1)
                        afid_m3u8 = M3U8(
                            params['segment_afid_list_file_abs_path'])
                        afid_m3u8.append_end("\n")
                        afid_m3u8.append_end(contents[-1])
                        afid_m3u8.append_end(live.cts_afid)
                    live.set_afid_m3u8(afid_m3u8)
                    live.set_m3u8(m3u8)

                    res, err = self.afs.upload(live.get_afid_m3u8().abs_path)
                    if err is not None:
                        return err
                    res, err = self.afs.set_gfid(live.get_agfid(), res.afid)
                    if err is not None:
                        return err

                live.num_of_ts = live.num_of_ts + 1
                live_time += datetime.timedelta(0, params['segement_time'])

            cmd_out = "live_start=" + now.strftime(
                "%Y-%m-%d %H:%M:%S"
            ) + ";lastest_up_cdn=" + datetime.datetime.now().strftime(
                "%Y-%m-%d %H:%M:%S") + ";num_of_ts=" + str(
                    live.num_of_ts) + ";live_time=" + live_time.strftime(
                        "%Y-%m-%d %H:%M:%S") + ";agfid=" + live.agfid + ";"
            print(cmd_out, flush=True)
            logging.info(cmd_out)
            live.cts_abs_path = self.ffmpeg.get_file_name_current_segemnt_ts(
                line)

            if self.ffmpeg.is_creating_segment_list(line):
                if live.cts_abs_path != None:
                    res = self.afs.upload(live.cts_abs_path)
                    if err is not None:
                        return err
                    live.cts_afid = res.afid
                    os.remove(live.cts_abs_path)
        return err
예제 #22
0
import asyncio
from ffmpeg import FFmpeg

ffmpeg = FFmpeg().option('y').input('pipe:0').output('ouptut.mp4', c='copy')


@ffmpeg.on('start')
def on_start(arguments):
    print('arguments:', arguments)


@ffmpeg.on('stderr')
def on_stderr(line):
    print('stderr:', line)


@ffmpeg.on('progress')
def on_progress(progress):
    print(progress)


@ffmpeg.on('completed')
def on_completed():
    print('completed')


@ffmpeg.on('terminated')
def on_terminated():
    print('terminated')

예제 #23
0
    def download(self):
        metadata = None
        track_image_path = None

        asyncio.set_event_loop(asyncio.new_event_loop())
        loop = asyncio.get_event_loop()

        if self.metadata_filepath is not None:
            if self.is_album:
                tg = TitleGenerator(self.metadata_filepath, self.artist)
            else:
                tg = TitleGenerator(self.metadata_filepath,
                                    self.artist,
                                    no_album=True)

            tg.make_titles()
            metadata = tg.get_titles()

        for num, url in self.urls:
            yt = None
            image_dl_failed = False
            failed_image_url = ""
            self.cur_song = num + self.start + 1
            try:
                if self.proxies is not None:
                    yt = YouTube(url, proxies=self.proxies)
                else:
                    yt = YouTube(url)

            except Exception as e:
                self.retry_urls.append((num, url))
                print(
                    f"Downloading song {font.apply('gb', str(self.cur_song))} - {font.apply('bf', '[Failed - ')} {font.apply('bf', str(e) + ']')}\n"
                )
                continue

            path = None
            try:
                yt.register_on_progress_callback(self.progress_function)
                self.cur_video = (yt.streams.filter(
                    type="audio",
                    subtype="mp4").order_by("abr").desc().first())

                safe_name = extract_title(
                    make_safe_filename(self.cur_video.title))
                path = self.cur_video.download(
                    output_path=self.outdir,
                    filename=safe_name,
                )
                self.successful_filepaths.append(path)
                self.successful += 1
            except (Exception, KeyboardInterrupt) as e:
                self.retry_urls.append((num, url))
                print(
                    f"Downloading song {font.apply('gb',str(self.cur_song))+' - '+font.apply('gb', self.cur_video.title)} - {font.apply('bf', '[Failed - ')} {font.apply('bf', str(e) + ']')}\n"
                )

                continue
            # if self.is_album:
            #     if self.image_filepath is None:
            #         if not self.album_image_set:
            #             image_path = Downloader.download_image(
            #                 yt.thumbnail_url, num, self.outdir
            #             )
            #             self.images.append(image_path)
            #             self.image_filepath = image_path
            #             self.album_image_set = True
            # else:
            #     image_path = Downloader.download_image(yt.thumbnail_url, num, self.outdir)
            #     self.images.append(image_path)
            #     self.image_filepath = image_path

            track_title = None
            track_artist = None

            if metadata is not None:
                t = metadata[num]
                track_title = t.title if not t.unused else self.cur_video.title
                track_artist = t.artist if not t.unused else self.artist
                track_album = self.album
                track_image_path = self.image_filepath
                if t.image_path is not None:
                    if not is_url(t.image_path):
                        track_image_path = t.image_path
                    else:
                        try:
                            track_image_path = Downloader.download_image(
                                t.image_path, num, self.outdir)
                            self.to_delete.append(track_image_path)
                            num = 0  # track num should always be 1 if downloading a single
                        except error.ImageDownloadError as e:
                            image_dl_failed = True
                            failed_image_url = t.image_path
                else:
                    if self.image_filepath is not None:
                        try:
                            track_image_path = Downloader.download_image(
                                self.image_filepath, num, self.outdir)
                            self.to_delete.append(track_image_path)
                            num = 0  # track num should always be 1 if downloading a single
                        except error.ImageDownloadError as e:
                            image_dl_failed = True
                            failed_image_url = self.image_filepath

                if not self.is_album:
                    track_album = t.album

            else:
                track_title = self.cur_video.title
                track_artist = self.artist
                track_album = self.album
                if self.image_filepath is not None:
                    if not is_url(self.image_filepath):
                        track_image_path = self.image_filepath
                    else:
                        try:
                            track_image_path = Downloader.download_image(
                                self.image_filepath, num, self.outdir)
                            self.to_delete.append(track_image_path)
                        except error.ImageDownloadError as e:
                            image_dl_failed = True
                            failed_image_url = self.image_filepath

            metadata_branch = "├──" if self.mp3 else "└──"

            try:
                if image_dl_failed:
                    raise error.ImageDownloadError(failed_image_url)

                self.apply_metadata(
                    num + 1,
                    self.total_songs,
                    path,
                    track_album,
                    track_title,
                    track_artist,
                    track_image_path,
                )
                print(
                    f"{metadata_branch} Applying metadata - {font.apply('bl', '[Done]')}"
                )

            except (Exception, KeyboardInterrupt) as e:
                print(
                    f"{metadata_branch} Applying metadata - {font.apply('bf', '[Failed - ')} {font.apply('bf', str(e) + ']')}"
                )

            if self.mp3:
                ffmpeg = FFmpeg().input(path).output(
                    f"{extract_title(path)}.mp3")

                @ffmpeg.on("progress")
                def mp3_conv_progress(event):
                    p = (to_sec(event.time) / int(yt.length)) * 100
                    progress = (
                        f"└── Converting to mp3 - [{p:.2f}%]" if p < 100 else
                        f"└── Converting to mp3 - {font.apply('bl', '[Done]          ')}"
                    )

                    end = "\n" if p >= 100 else "\r"
                    print(progress, end=end, flush=True)

                try:
                    loop.run_until_complete(ffmpeg.execute())

                    os.remove(f"{extract_title(path)}.mp4")
                    path = f"{extract_title(path)}.mp3"

                except (Exception, KeyboardInterrupt) as e:
                    print(
                        f"└── Converting to mp3 - {font.apply('bf', '[Failed - ')} {font.apply('bf', str(e) + ']')}"
                    )

            print(" ")
        loop.close()
        for image in self.to_delete:
            os.remove(image)
예제 #24
0
from amqp_connection import Connection
from ffmpeg import FFmpeg

conn = Connection()

logging.basicConfig(
    format="%(asctime)-15s [%(levelname)s] %(message)s",
    level=logging.INFO,
)

config = configparser.RawConfigParser()
config.read(['worker.cfg', '/etc/py_ffmpeg_worker/worker.cfg'])

# config['app']['verbosity']

ffmpeg = FFmpeg()


def check_requirements(requirements):
    meet_requirements = True
    if 'paths' in requirements:
        required_paths = requirements['paths']
        assert isinstance(required_paths, list)
        for path in required_paths:
            if not os.path.exists(path):
                logging.debug("Warning: Required file does not exists: %s",
                              path)
                meet_requirements = False

    return meet_requirements
예제 #25
0
import asyncio
from ffmpeg import FFmpeg

ffmpeg = FFmpeg().option('y').input(
    'rtsp://127.0.0.1/cam',
    rtsp_transport='tcp',
    rtsp_flags='prefer_tcp'
).output(
    'otuput.mp4',
    vcodec='copy',
)

@ffmpeg.on('start')
def on_start(arguments):
    print('arguments:', arguments)


@ffmpeg.on('stderr')
def on_stderr(line):
    print('stderr:', line)


@ffmpeg.on('progress')
def on_progress(progress):
    print(progress)


@ffmpeg.on('progress')
def time_to_terminate(progress):
    if progress.frame > 200:
        ffmpeg.terminate()
예제 #26
0
import asyncio
from ffmpeg import FFmpeg

ffmpeg = FFmpeg().option('y').input('rtsp://127.0.0.1/cam',
                                    rtsp_transport='tcp',
                                    rtsp_flags='prefer_tcp').output(
                                        'otuput.mp4',
                                        vcodec='copy',
                                    )

ffmpeg = FFmpeg().option('y').input('input.mp4').output('ouptut.mp4',
                                                        {'c:v': 'libx264'},
                                                        vf='scale=1280:-1',
                                                        preset='veryslow',
                                                        crf=24)


@ffmpeg.on('start')
def on_start(arguments):
    print('arguments:', arguments)


@ffmpeg.on('stderr')
def on_stderr(line):
    print('stderr:', line)


@ffmpeg.on('progress')
def on_progress(progress):
    print(progress)
예제 #27
0
from ffmpeg import FFmpeg
import asyncio
import sys

resolution = ""
ffmpeg = FFmpeg(executable='ffmpeg')


@ffmpeg.on('progress')
def on_ffmpeg_progress(progress):
    if 'PSNR' in progress._fields:
        print(progress.PSNR)


ffmpeg.input(sys.argv[1])
ffmpeg.input(sys.argv[2])
ffmpeg.output("-", {'filter_complex': 'psnr'}, f="null")
loop = asyncio.get_event_loop()
loop.run_until_complete(ffmpeg.execute())
loop.close()
예제 #28
0
import asyncio
from ffmpeg import FFmpeg

ffmpeg = (FFmpeg().option('y').input('pipe:0').output('otuput.mp4', c='copy'))


@ffmpeg.on('start')
def on_start(arguments):
    print('arguments:', arguments)


@ffmpeg.on('stderr')
def on_stderr(line):
    print('stderr:', line)


@ffmpeg.on('progress')
def on_progress(progress):
    print(progress)


@ffmpeg.on('completed')
def on_completed():
    print('completed')


@ffmpeg.on('terminated')
def on_terminated():
    print('terminated')

예제 #29
0
import urllib.request
from ffmpeg import FFmpeg
import asyncio

url = "https://alpha-obs.yunshicloud.com/7674F33E470D459E/QMTNRK_YUNSHI/D52CB54DB7FE4AB0AA8FF77C7093AB84/D01BEFCB93CA47E099513C78BDC7CB30.mp4"

#filename,headers = urllib.request.urlretrieve(url)

fileavi = "test.avi"
filename = "test.mp4"

ff = FFmpeg()

@ff.on('start')
def on_start(arguments):
    print('Arguments:', arguments)

@ff.on('progress')
def on_pregress(progress):
    print(progress)

ff.input(filename)
ff.input(fileavi)
ff.output("-",{'filter_complex':'psnr'},f="null")
loop = asyncio.get_event_loop()
loop.run_until_complete(ff.execute())
loop.close
예제 #30
0
from ffmpeg import FFmpeg
import asyncio
import sys

ffscale = FFmpeg()


@ffscale.on('completed')
def on_complete():
    print("completed")


# @ffscale.on('progress')
# def on_ffprobe_progress(progress):
#     print(progress.resolution)

new_file = sys.argv[1] + "_scale"
filter_scale = "scale=" + sys.argv[2]

ffscale.input(sys.argv[1])
ffscale.output(new_file, {'filter_complex': filter_scale}, f='mp4')
loop = asyncio.get_event_loop()
loop.run_until_complete(ffscale.execute())
loop.close()
예제 #31
0
파일: system.py 프로젝트: Time1ess/VES
def ffmpeg_process(ip):
    ff = FFmpeg("0.0.0.0", con.dt_addr[0], True)
    ff.start()
    print 'FFmpeg process terminated.'
    return 0