Exemple #1
0
    def __init__(self, file_path, container_format, container_format_profile):
        self._path = file_path
        self._tracks_by_type = None
        self._ffmpeg = Ffmpeg()  # TODO singleton

        if File._format_signatures is None:
            formats = {}
            for file_format, (_, _,
                              signatures) in self._FORMATS_INFO.iteritems():
                for signature in signatures:
                    formats[signature] = file_format
            File._format_signatures = formats
        self._format = File._format_signatures[(container_format,
                                                container_format_profile)]
Exemple #2
0
class Colors(object):
    def __init__(self, w, h, standard, ffm_data):
        self._width = w
        self._height = h
        self._standard = standard
        self._ffm_data = ffm_data
        self._ffmpeg = Ffmpeg()

    # TODO move out
    def pix_fmt(self):
        return self._ffmpeg.parse_picture_format(self._ffm_data['pix_fmt'])

    def range(self):
        result = None
        raw = self._ffm_data.get('color_range')
        if raw is not None:
            result = self._ffmpeg.parse_color_range(raw)
        elif self.pix_fmt() in (PictureFormat.YUV420P, PictureFormat.YUV420P10LE):
            result = ColorRange.TV
        return result

    def is_hd(self):
        return self._width >= 1200 or self._height >= 700

    def correct_space(self):
        if self.is_hd():
            return ColorSpace.BT_709
        if self._standard == VideoFpsStandard.PAL:
            return ColorSpace.BT_601_PAL
        if self._standard == VideoFpsStandard.NTSC:
            return ColorSpace.BT_601_NTSC
        assert not self.is_hd()
        return None

    def _guess_metric(self, metric):
        raw = self._ffm_data.get(metric)
        return self._ffmpeg.parse_color_space(raw) if raw is not None else self.correct_space()

    def space(self):
        raw = self._ffm_data.get('color_space')
        if raw is not None and self._ffmpeg.parse_color_space(raw) == ColorSpace.FCC:
            return ColorSpace.FCC
        return self._guess_metric('color_space')

    def trc(self):
        return self._guess_metric('color_transfer')

    def primaries(self):
        return self._guess_metric('color_primaries')
Exemple #3
0
 def __init__(self):
     QObject.__init__(self)
     self.app_running = True
     self.ff = Ffmpeg()
     self.folders = set(Folders)
     self.files = files_list.Files
     self.filesPrevCount = 0
     self.now_crawling = True
     self.prop = 0
     self.has_booted = False
     self._sub_files = []
     self._prep_file = ""
     self.prep_file_index = 0
     self._prep_folder = ""
     self._supported_ext = ('.mp3', '.aac', '.wav', '.m4a', '.ogg')
    def __init__(self):
        """
        """

        QObject.__init__(self)
        self.file = ''
        self.file_size = 0
        self.app_running = True
        self._not_paused = True
        self._not_stopped = False
        self.t_size = 0
        self.tt_played = 0
        self.volume_val = 1.4
        self.ff = Ffmpeg()
        print(threading.enumerate())
Exemple #5
0
    def __init__(self, parent_path, parent_format, ffm_data, codec_props):
        self._parent_path = parent_path
        self._parent_format = parent_format
        self._ffm_data = ffm_data
        self._ffmpeg = Ffmpeg()  # TODO pass from outside
        self._duration = None

        self._codec_enums = {}
        self._codec_ids = {}
        self._codec_names = {}
        self._codec_file_extensions = {}
        for codec_enum, (codec_id, codec_name, codec_file_extension) in codec_props.iteritems():
            self._codec_enums[codec_id] = codec_enum
            self._codec_ids[codec_enum] = codec_id
            self._codec_names[codec_enum] = codec_name
            self._codec_file_extensions[codec_enum] = codec_file_extension
Exemple #6
0
    def run(self):
        """ Main controller for Video2X

        This function controls the flow of video conversion
        and handles all necessary functions.
        """

        # parse arguments for waifu2x
        # check argument sanity
        self._check_arguments()

        # convert paths to absolute paths
        self.input_video = self.input_video.absolute()
        self.output_video = self.output_video.absolute()

        # initialize objects for ffmpeg and waifu2x-caffe
        fm = Ffmpeg(self.ffmpeg_settings, self.image_format)

        # extract frames from video
        fm.extract_frames(self.input_video, self.extracted_frames)

        Avalon.info('Reading video information')
        video_info = fm.get_video_info(self.input_video)
        # analyze original video with ffprobe and retrieve framerate
        # width, height = info['streams'][0]['width'], info['streams'][0]['height']

        # find index of video stream
        video_stream_index = None
        for stream in video_info['streams']:
            if stream['codec_type'] == 'video':
                video_stream_index = stream['index']
                break

        # exit if no video stream found
        if video_stream_index is None:
            Avalon.error('Aborting: No video stream found')
            raise StreamNotFoundError('no video stream found')

        # get average frame rate of video stream
        framerate = float(
            Fraction(
                video_info['streams'][video_stream_index]['avg_frame_rate']))
        fm.pixel_format = video_info['streams'][video_stream_index]['pix_fmt']

        # get a dict of all pixel formats and corresponding bit depth
        pixel_formats = fm.get_pixel_formats()

        # try getting pixel format's corresponding bti depth
        try:
            self.bit_depth = pixel_formats[fm.pixel_format]
        except KeyError:
            Avalon.error(f'Unsupported pixel format: {fm.pixel_format}')
            raise UnsupportedPixelError(
                f'unsupported pixel format {fm.pixel_format}')

        Avalon.info(f'Framerate: {framerate}')

        # width/height will be coded width/height x upscale factor
        if self.scale_ratio:
            original_width = video_info['streams'][video_stream_index]['width']
            original_height = video_info['streams'][video_stream_index][
                'height']
            self.scale_width = int(self.scale_ratio * original_width)
            self.scale_height = int(self.scale_ratio * original_height)

        # upscale images one by one using waifu2x
        Avalon.info('Starting to upscale extracted images')
        self._upscale_frames()
        Avalon.info('Upscaling completed')

        # frames to Video
        Avalon.info('Converting extracted frames into video')

        # use user defined output size
        fm.convert_video(framerate, f'{self.scale_width}x{self.scale_height}',
                         self.upscaled_frames)
        Avalon.info('Conversion completed')

        # migrate audio tracks and subtitles
        Avalon.info('Migrating audio tracks and subtitles to upscaled video')
        fm.migrate_audio_tracks_subtitles(self.input_video, self.output_video,
                                          self.upscaled_frames)
Exemple #7
0
    def run(self):
        """Main controller for Video2X

        This function controls the flow of video conversion
        and handles all necessary functions.
        """

        # parse arguments for waifu2x
        # check argument sanity
        self._check_arguments()

        # convert paths to absolute paths
        self.input_video = os.path.abspath(self.input_video)
        self.output_video = os.path.abspath(self.output_video)

        # initialize objects for ffmpeg and waifu2x-caffe
        fm = Ffmpeg(self.ffmpeg_settings, self.image_format)

        # initialize waifu2x driver
        if self.waifu2x_driver == 'waifu2x_caffe':
            w2 = Waifu2xCaffe(self.waifu2x_settings, self.method, self.model_dir)
        elif self.waifu2x_driver == 'waifu2x_converter':
            w2 = Waifu2xConverter(self.waifu2x_settings, self.model_dir)
        else:
            raise Exception(f'Unrecognized waifu2x driver: {self.waifu2x_driver}')

        # extract frames from video
        fm.extract_frames(self.input_video, self.extracted_frames)

        Avalon.info('Reading video information')
        video_info = fm.get_video_info(self.input_video)
        # analyze original video with ffprobe and retrieve framerate
        # width, height = info['streams'][0]['width'], info['streams'][0]['height']

        # find index of video stream
        video_stream_index = None
        for stream in video_info['streams']:
            if stream['codec_type'] == 'video':
                video_stream_index = stream['index']
                break

        # exit if no video stream found
        if video_stream_index is None:
            Avalon.error('Aborting: No video stream found')
            exit(1)

        # get average frame rate of video stream
        framerate = float(Fraction(video_info['streams'][video_stream_index]['avg_frame_rate']))
        Avalon.info(f'Framerate: {framerate}')

        # width/height will be coded width/height x upscale factor
        if self.scale_ratio:
            original_width = video_info['streams'][video_stream_index]['width']
            original_height = video_info['streams'][video_stream_index]['height']
            self.scale_width = int(self.scale_ratio * original_width)
            self.scale_height = int(self.scale_ratio * original_height)

        # upscale images one by one using waifu2x
        Avalon.info('Starting to upscale extracted images')
        self._upscale_frames(w2)
        Avalon.info('Upscaling completed')

        # frames to Video
        Avalon.info('Converting extracted frames into video')

        # use user defined output size
        fm.convert_video(framerate, f'{self.scale_width}x{self.scale_height}', self.upscaled_frames)
        Avalon.info('Conversion completed')

        # migrate audio tracks and subtitles
        Avalon.info('Migrating audio tracks and subtitles to upscaled video')
        fm.migrate_audio_tracks_subtitles(self.input_video, self.output_video, self.upscaled_frames)
Exemple #8
0
class File(object):
    _TRACK_PROPS_IDX_CLASS = 0
    _TRACK_PROPS_IDX_FFMPEG_STREAM = 1
    _TRACK_PROPS = {
        TrackType.VID: (VideoTrack, Ffmpeg.STREAM_ARGUMENT_VID),
        TrackType.AUD: (AudioTrack, Ffmpeg.STREAM_ARGUMENT_AUD),
        TrackType.SUB: (SubtitleTrack, Ffmpeg.STREAM_ARGUMENT_SUB),
        TrackType.CHA: (ChaptersTrack, None),
    }

    _FORMATS_INFO = {
        FileFormat.x3GP:
        (['*.3gp'], [TrackType.VID,
                     TrackType.AUD], [('MPEG-4', '3GPP Media Release 4'),
                                      ('MPEG-4', '3GPP Media Release 5')]),
        FileFormat.AC3: (['*.ac3'], [TrackType.AUD], [('AC-3', None)]),
        FileFormat.AMR: (['*.amr'], [TrackType.AUD], [('AMR', None)]),
        FileFormat.AVI: (['*.avi'], [TrackType.VID,
                                     TrackType.AUD], [('AVI', None),
                                                      ('AVI', 'OpenDML')]),
        FileFormat.CHA:
        (['*chapters*.txt',
          '*chapters*.xml'], [TrackType.CHA], [('Chapters', None)]),
        FileFormat.DTS: (['*.dts',
                          '*.dtshr'], [TrackType.AUD], [('DTS', None)]),
        FileFormat.EAC3: (['*.eac3'], [TrackType.AUD], [('E-AC-3', None)]),
        FileFormat.FLAC: (['*.flac'], [TrackType.AUD], [('FLAC', None)]),
        FileFormat.FLV: (['*.flv'], [TrackType.VID,
                                     TrackType.AUD], [('Flash Video', None)]),
        FileFormat.M4A: (['*.m4a'], [TrackType.AUD],
                         [('MPEG-4', 'Apple audio with iTunes info')]),
        FileFormat.M4V:
        (['*.m4v'], [TrackType.VID, TrackType.AUD, TrackType.SUB], []),
        FileFormat.MKV:
        (['*.mkv'], [TrackType.VID, TrackType.AUD,
                     TrackType.SUB], [('Matroska', None)]),
        FileFormat.MOV: (['*.mov'],
                         [TrackType.VID, TrackType.AUD,
                          TrackType.SUB], [('MPEG-4', 'QuickTime')]),
        FileFormat.MP3: (['*.mp3'], [TrackType.AUD], [('MPEG Audio', None)]),
        FileFormat.MP4:
        (['*.mp4'], [TrackType.VID, TrackType.AUD,
                     TrackType.SUB], [('MPEG-4', None),
                                      ('MPEG-4', 'Base Media'),
                                      ('MPEG-4', 'Base Media / Version 2'),
                                      ('MPEG-4', 'Sony PSP')]),
        FileFormat.MPG: (['*.mpg', '*.mpeg', '*.vob'],
                         [TrackType.VID, TrackType.AUD,
                          TrackType.SUB], [('MPEG-PS', None)]),
        FileFormat.RM: (['*.rm',
                         '*.rmvb'], [TrackType.VID,
                                     TrackType.AUD], [('RealMedia', None)]),
        FileFormat.SMK: (['*.smk'], [TrackType.VID,
                                     TrackType.AUD], [(None, None)]),
        FileFormat.SRT: (['*.srt'], [TrackType.SUB], [('SubRip', None)]),
        FileFormat.SSA: (['*.ssa', '*.ass'], [TrackType.SUB], []),
        FileFormat.SUP: (['*.sup'], [TrackType.SUB], [('PGS', None)]),
        FileFormat.TS: (['*.ts'],
                        [TrackType.VID, TrackType.AUD,
                         TrackType.SUB], [('MPEG-TS', None)]),
        FileFormat.WAV: (['*.wav'], [TrackType.AUD], [('Wave', None)]),
        FileFormat.WEBM: (['*.webm'], [TrackType.VID,
                                       TrackType.AUD], [('WebM', None)]),
        FileFormat.WMV: (['*.asf', '*.wma', '*.wmv'],
                         [TrackType.VID, TrackType.AUD,
                          TrackType.SUB], [('Windows Media', None)]),
    }
    _format_signatures = None

    @classmethod
    def possible_track_types(cls, file_path):
        for file_format, (wildcards, track_types,
                          signatures) in cls._FORMATS_INFO.iteritems():
            for wildcard in wildcards:
                if fnmatch.fnmatch(file_path, wildcard):
                    return track_types
        return []

    def __init__(self, file_path, container_format, container_format_profile):
        self._path = file_path
        self._tracks_by_type = None
        self._ffmpeg = Ffmpeg()  # TODO singleton

        if File._format_signatures is None:
            formats = {}
            for file_format, (_, _,
                              signatures) in self._FORMATS_INFO.iteritems():
                for signature in signatures:
                    formats[signature] = file_format
            File._format_signatures = formats
        self._format = File._format_signatures[(container_format,
                                                container_format_profile)]

    def path(self):
        return self._path

    def _get_tracks(self):
        if self._tracks_by_type is None:
            tracks_data = {}
            for track_type in File.possible_track_types(self._path):
                if track_type == TrackType.CHA:
                    tracks_data.setdefault(TrackType.CHA, {})[-1] = {}
                else:
                    stream_id = self._TRACK_PROPS[track_type][
                        self._TRACK_PROPS_IDX_FFMPEG_STREAM]
                    for track_id, track in cmd.ffprobe(self._path,
                                                       stream_id).iteritems():
                        tracks_data.setdefault(
                            self._ffmpeg.parse_track_type(track['codec_type']),
                            {})[track_id] = track

            self._tracks_by_type = {
                track_type: []
                for track_type in self._TRACK_PROPS.iterkeys()
            }
            for track_type, tracks_of_type in tracks_data.iteritems():
                for track_id, track_data in tracks_of_type.iteritems():
                    track_class = self._TRACK_PROPS[track_type][
                        self._TRACK_PROPS_IDX_CLASS]
                    self._tracks_by_type[track_type].append(
                        track_class(self._path, self._format, track_data))
                self._tracks_by_type[track_type].sort(
                    key=lambda t: t.qualified_id())
        return self._tracks_by_type

    def tracks(self, track_type):
        return self._get_tracks()[track_type]
Exemple #9
0
def video2x():
    """Main controller for Video2X

    This function controls the flow of video conversion
    and handles all necessary functions.
    """

    check_model_type(args)

    # Parse arguments for waifu2x
    if args.cpu:
        method = 'cpu'
    elif args.gpu:
        method = 'gpu'
        ffmpeg_arguments.append('-hwaccel {}'.format(ffmpeg_hwaccel))
    elif args.cudnn:
        method = 'cudnn'
        ffmpeg_arguments.append('-hwaccel {}'.format(ffmpeg_hwaccel))

    # Initialize objects for ffmpeg and waifu2x-caffe
    fm = Ffmpeg(ffmpeg_path, args.output, ffmpeg_arguments)
    w2 = Waifu2x(waifu2x_path, method, args.model_type)

    # Clear and create directories
    if os.path.isdir(FRAMES):
        shutil.rmtree(FRAMES)
    if os.path.isdir(UPSCALED):
        shutil.rmtree(UPSCALED)
    os.mkdir(FRAMES)
    os.mkdir(UPSCALED)

    # Extract frames from video
    fm.extract_frames(args.video, FRAMES)

    Avalon.info('Reading video information')
    info = get_video_info()
    # Analyze original video with ffprobe and retrieve framerate
    # width, height = info['streams'][0]['width'], info['streams'][0]['height']

    # Find index of video stream
    video_stream_index = None
    for stream in info['streams']:
        if stream['codec_type'] == 'video':
            video_stream_index = stream['index']
            break

    # Exit if no video stream found
    if video_stream_index is None:
        Avalon.error('Aborting: No video stream found')

    # Get average frame rate of video stream
    framerate = float(
        Fraction(info['streams'][video_stream_index]['avg_frame_rate']))
    Avalon.info('Framerate: {}'.format(framerate))

    # Upscale images one by one using waifu2x
    Avalon.info('Starting to upscale extracted images')
    upscale_frames(w2)
    Avalon.info('Upscaling completed')

    # Frames to Video
    Avalon.info('Converting extracted frames into video')

    # Width/height will be coded width/height x upscale factor
    if args.factor:
        coded_width = info['streams'][video_stream_index]['coded_width']
        coded_height = info['streams'][video_stream_index]['coded_height']
        fm.convert_video(
            framerate, '{}x{}'.format(args.factor * coded_width,
                                      args.factor * coded_height), UPSCALED)

    # Use user defined output size
    else:
        fm.convert_video(framerate, '{}x{}'.format(args.width, args.height),
                         UPSCALED)
    Avalon.info('Conversion completed')

    # Extract and press audio in
    Avalon.info('Stripping audio track from original video')
    fm.extract_audio(args.video, UPSCALED)
    Avalon.info('Inserting audio track into new video')
    fm.insert_audio_track(UPSCALED)
Exemple #10
0
 def __init__(self, w, h, standard, ffm_data):
     self._width = w
     self._height = h
     self._standard = standard
     self._ffm_data = ffm_data
     self._ffmpeg = Ffmpeg()
Exemple #11
0
class Track(object):
    # TODO move to mkvmerge class
    TYPE_FLAGS = {
        TrackType.VID: (None, '-D'),
        TrackType.AUD: ('--audio-tracks', '-A'),
        TrackType.SUB: ('--subtitle-tracks', '-S'),
    }

    _DURATION_REGEXP = re.compile(r'(?P<hh>\d+):(?P<mm>\d+):(?P<ss>[\d.]+)')

    def __init__(self, parent_path, parent_format, ffm_data, codec_props):
        self._parent_path = parent_path
        self._parent_format = parent_format
        self._ffm_data = ffm_data
        self._ffmpeg = Ffmpeg()  # TODO pass from outside
        self._duration = None

        self._codec_enums = {}
        self._codec_ids = {}
        self._codec_names = {}
        self._codec_file_extensions = {}
        for codec_enum, (codec_id, codec_name, codec_file_extension) in codec_props.iteritems():
            self._codec_enums[codec_id] = codec_enum
            self._codec_ids[codec_enum] = codec_id
            self._codec_names[codec_enum] = codec_name
            self._codec_file_extensions[codec_enum] = codec_file_extension

    def source_file(self):
        return self._parent_path

    def container_format(self):
        return self._parent_format

    def get_single_track_file_extension(self):
        return self._codec_file_extensions[self.codec()]

    def is_single(self):
        return platform.file_ext(self.source_file()) == self.get_single_track_file_extension()

    def _tags(self):
        return self._ffm_data.setdefault('tags', {})

    def id(self):
        return self._ffm_data['index']

    def qualified_id(self):
        return self.source_file(), self.id()

    def type(self):
        return self._ffmpeg.parse_track_type(self._ffm_data['codec_type'])

    def codec(self):
        return self._codec_enums[self._codec_id()]

    def _codec_id(self):
        return self._ffm_data['codec_name']

    def codec_name(self):
        return self._codec_names[self.codec()]

    def name(self):
        return self._tags().get('title', '')

    def language(self):
        result = self._tags().get('language')
        if result in [None, 'non']:
            result = 'und'
        return result

    def set_language(self, value):
        self._tags()['language'] = value

    def duration(self):
        if self._duration is None:
            duration_string = self._tags().get('DURATION-eng')
            if duration_string:
                match = self._DURATION_REGEXP.match(duration_string)
                value = match.groupdict()
                self._duration = (int(value['hh']) * 60 + int(value['mm'])) * 60 + float(value['ss'])
        return self._duration

    def frames_len(self):
        n = self._tags().get('NUMBER_OF_FRAMES-eng', None) or self._ffm_data.get('nb_frames', None)
        return misc.try_int(n)

    def is_forced(self):
        forced = self._ffm_data['disposition']['forced']
        return forced if forced is None else bool(forced)

    def set_forced(self, value):
        self._ffm_data['disposition']['forced'] = value

    def is_default(self):
        return bool(self._ffm_data['disposition']['default'])
Exemple #12
0
class Fs(QObject):
    def __init__(self):
        QObject.__init__(self)
        self.app_running = True
        self.ff = Ffmpeg()
        self.folders = set(Folders)
        self.files = files_list.Files
        self.filesPrevCount = 0
        self.now_crawling = True
        self.prop = 0
        self.has_booted = False
        self._sub_files = []
        self._prep_file = ""
        self.prep_file_index = 0
        self._prep_folder = ""
        self._supported_ext = ('.mp3', '.aac', '.wav', '.m4a', '.ogg')

    startUp = pyqtSignal(list, arguments=['bootUp'])
    called = pyqtSignal(list, arguments=['callToPlay'])
    propertyChanged = pyqtSignal(list, arguments=['propertyNotifier'])
    endOfPropertyChange = pyqtSignal(list, arguments=['endPropertyChange'])
    propertyEnd = pyqtSignal(list, arguments=['propertyEnded'])

    @pyqtSlot()
    def bootUp(self):

        # send the files
        self.propertyNotify(self.files)
        self.startUp.emit(self.files)

    def propertyNotify(self, prop):

        self.prop = prop

        propNoti = threading.Thread(target=self._propertyNotify)
        propNoti.start()

    def propertyNotifier(self, result):

        self.propertyChanged.emit(result)

    def _propertyNotify(self):

        while self.app_running and self.now_crawling:

            sleep(.3)

            count = len(self.prop)
            if count > self.filesPrevCount:
                self.filesPrevCount = count
                self.propertyNotifier([count, self.prop])

    def endPropertyChange(self):

        sleep(1)
        count = len(self.prop)
        result = [count, '']

        # emit the end of property
        self.endOfPropertyChange.emit(result)

    def endProperty(self):

        self.now_crawling = False

        self.endPropertyChange()

        endProp = threading.Thread(target=self._endProperty)
        endProp.start()

    def _endProperty(self):

        sleep(15)
        self.prop = 0
        self.propertyEnded()

    def propertyEnded(self):

        result = []
        self.propertyEnd.emit(result)

    def prepare(self, file):

        # start thred
        #_prepare()
        # call _prepare
        self._prep_file = file.replace("\\", "/")
        prep_thread = threading.Thread(target=self._prepare)
        prep_thread.start()

    def _prepare(self):

        # do the job
        splits = os.path.split(self._prep_file)
        self._prep_folder = splits[0]

        self.subs_prep(self._prep_folder)

        self._ffjob(self._prep_file)

        self.callToPlay()

    def callToPlay(self):

        index = self.prep_file_index
        lists = [self._prep_file, self.files[index]['format_name'], index]

        self.called.emit(lists)

    def subs_prep(self, folder):

        # _subs_preb()
        sub_preb = threading.Thread(target=self._subs_preb)
        sub_preb.start()

    def _subs_preb(self):

        # check if folder has been scanned before
        if self._prep_folder not in self.folders:
            self.now_crawling = True
            self.folders.add(self._prep_folder)
        else:
            self.now_crawling = False
            return

        files = self._list_dir(self._prep_folder)
        files.remove(self._prep_file)
        for a in files:
            tags = self.ff.probe(a)

            if tags not in self.files:
                self.files.append(tags)

        self.endProperty()

    def ffjob(self, file):

        self._ffjob(file)

    def _ffjob(self, file):

        if self.app_running:

            # check if app is running
            if not self.app_running:
                return False
            info = self.ff.probe(file)

            # check if app is running
            if not self.app_running:
                return False
            self.ff.convert(file, info['format_name'])

            # check if app is running
            if not self.app_running:
                return False

            if info not in self.files:
                self.files.append(info)

            self.prep_file_index = self.files.index(info)

    def _list_dir(self, directory):

        sleep(.3)
        self._search(directory)
        return self._sub_files

    def _is_dir(self, entry):

        try:
            os.listdir(entry)
            self._search(entry)
            return True
        except:
            return False

    def _search(self, directory):

        list_dir = os.listdir(directory)
        for entry in list_dir:
            path = directory + "/" + entry
            if self._is_dir(path):
                pass
            else:
                split = os.path.splitext(entry)
                if split[1] in self._supported_ext:
                    self._sub_files.append(path)
Exemple #13
0
    def run(self):
        """Main controller for Video2X

        This function controls the flow of video conversion
        and handles all necessary functions.
        """

        # Parse arguments for waifu2x
        # Check argument sanity
        self._check_model_type(self.model_type)
        self._check_arguments()

        # Convert paths to absolute paths
        self.input_video = os.path.abspath(self.input_video)
        self.output_video = os.path.abspath(self.output_video)

        # Add a forward slash to directory if not present
        # otherwise there will be a format error
        if self.ffmpeg_path[-1] != '/' and self.ffmpeg_path[-1] != '\\':
            self.ffmpeg_path = '{}/'.format(self.ffmpeg_path)

        # Check if FFMPEG and waifu2x are present
        if not os.path.isdir(self.ffmpeg_path):
            raise FileNotFoundError(self.ffmpeg_path)
        if not os.path.isfile(self.waifu2x_path) and not os.path.isdir(self.waifu2x_path):
            raise FileNotFoundError(self.waifu2x_path)

        # Initialize objects for ffmpeg and waifu2x-caffe
        fm = Ffmpeg(self.ffmpeg_path, self.ffmpeg_arguments)

        # Initialize waifu2x driver
        if self.waifu2x_driver == 'waifu2x_caffe':
            w2 = Waifu2xCaffe(self.waifu2x_path, self.method, self.model_type)
        elif self.waifu2x_driver == 'waifu2x_converter':
            w2 = Waifu2xConverter(self.waifu2x_path)
        else:
            raise Exception('Unrecognized waifu2x driver: {}'.format(self.waifu2x_driver))

        # Extract frames from video
        fm.extract_frames(self.input_video, self.extracted_frames)

        Avalon.info('Reading video information')
        video_info = fm.get_video_info(self.input_video)
        # Analyze original video with ffprobe and retrieve framerate
        # width, height = info['streams'][0]['width'], info['streams'][0]['height']

        # Find index of video stream
        video_stream_index = None
        for stream in video_info['streams']:
            if stream['codec_type'] == 'video':
                video_stream_index = stream['index']
                break

        # Exit if no video stream found
        if video_stream_index is None:
            Avalon.error('Aborting: No video stream found')
            exit(1)

        # Get average frame rate of video stream
        framerate = float(Fraction(video_info['streams'][video_stream_index]['avg_frame_rate']))
        Avalon.info('Framerate: {}'.format(framerate))

        # Width/height will be coded width/height x upscale factor
        if self.ratio:
            coded_width = video_info['streams'][video_stream_index]['coded_width']
            coded_height = video_info['streams'][video_stream_index]['coded_height']
            self.output_width = self.ratio * coded_width
            self.output_height = self.ratio * coded_height

        # Upscale images one by one using waifu2x
        Avalon.info('Starting to upscale extracted images')
        self._upscale_frames(w2)
        Avalon.info('Upscaling completed')

        # Frames to Video
        Avalon.info('Converting extracted frames into video')

        # Use user defined output size
        fm.convert_video(framerate, '{}x{}'.format(self.output_width, self.output_height), self.upscaled_frames)
        Avalon.info('Conversion completed')

        # Migrate audio tracks and subtitles
        Avalon.info('Migrating audio tracks and subtitles to upscaled video')
        fm.migrate_audio_tracks_subtitles(self.input_video, self.output_video, self.upscaled_frames)
class Control(QObject):
    """
    """
    def __init__(self):
        """
        """

        QObject.__init__(self)
        self.file = ''
        self.file_size = 0
        self.app_running = True
        self._not_paused = True
        self._not_stopped = False
        self.t_size = 0
        self.tt_played = 0
        self.volume_val = 1.4
        self.ff = Ffmpeg()
        print(threading.enumerate())

    stillPlaying = pyqtSignal(str, arguments=['playing'])
    completedPlaying = pyqtSignal(str, arguments=["complete"])

    @pyqtSlot(str, str, str)
    def play(self, file, f_for, size):
        """
        """

        self._not_stopped = False
        sleep(2)
        self.file = file
        self.file_size = int(size)
        play_thread = threading.Thread(target=self._play, args=[f_for])
        play_thread.start()

    def _play(self, f_for):
        """
        """

        splits = os.path.split(self.file)
        filename = splits[1].replace(f_for, 'wav')
        file = self.ff.sav_dir + '/' + filename

        if self.app_running:
            self.ff.convert(self.file, f_for)

        else:
            return 1

        print('quick or ')

        pyaud = pyaudio.PyAudio()

        wf = wave.open(file, mode='rb')

        stream = pyaud.open(format=pyaud.get_format_from_width(
            wf.getsampwidth()),
                            channels=wf.getnchannels(),
                            rate=wf.getframerate(),
                            output=True)

        self.playing()
        self._not_stopped = True
        self._not_paused = True

        a = wf.readframes(1)

        print('\n\n inside here \n', self._not_paused, self._not_stopped)

        while self.app_running and len(a) != 0:

            if self._not_stopped:
                if self._not_paused:

                    stream.write(a)
                    #a = wf.readframes(512)

                    a = (np.fromstring(wf.readframes(512), np.int16))
                    self.t_played()
                    b = []
                    for x in a:
                        var = int(float(x) / self.volume_val)
                        b.append(var)
                    a = b
                    a = struct.pack('h' * len(a), *a)

                else:

                    #pause
                    sleep(.1)
            else:
                break

        wf.close()
        stream.stop_stream()
        stream.close()

        pyaud.terminate()
        self.complete()

    @pyqtSlot()
    def stop(self):
        """
        """

        stop_thread = threading.Thread(target=self._stop)
        stop_thread.start()
        # implement a wait
        sleep(1)

    def _stop(self):
        """
        """

        self._not_stopped = False
        return

    @pyqtSlot()
    def pause(self):
        """
        """

        pause_thread = threading.Thread(target=self._pause)
        pause_thread.start()
        sleep(1)

    def _pause(self):
        """
        """

        self._not_paused = False
        return

    @pyqtSlot()
    def resume(self):
        """
        """

        resume_thread = threading.Thread(target=self._resume)
        resume_thread.start()
        sleep(1)

    def _resume(self):
        """
        """

        self._not_paused = True
        return

    def playing(self):
        """
        """

        self.stillPlaying.emit('playing')

    def complete(self):
        """
        """

        print('complete')
        if self._not_paused:
            self.completedPlaying.emit('')
        elif self._not_stopped:
            pass
        else:
            self.completedPlaying.emit('')

    @pyqtSlot(str)
    def controlVolume(self, deci):
        """
        """

        cont = threading.Thread(target=self._controlVolume, args=[deci])
        cont.start()

    def _controlVolume(self, deci):
        """
        """

        vol = float(deci)
        vol = format(100 / vol, '.1f')
        r_vol = float(vol)
        self.volume_val = r_vol

    def t_played(self):
        """
        """

        t_play = threading.Thread(target=self._t_played)
        t_play.start()

    def _t_played(self):
        """
        """

        self.tt_played += 512
        per = self.tt_played / self.file_size * 100
        print(per)
        print(self.file_size)
        print(self.tt_played)
        return

    def propertyNotify(self, prop):

        self.prop = prop

        propNoti = threading.Thread(target=self._propertyNotify)
        propNoti.start()

    def propertyNotifier(self, result):

        self.propertyChanged.emit(result)

    def _propertyNotify(self):

        while self.app_running and self._not_stopped:

            sleep(.3)

            count = self.prop
            if count > self.filesPrevCount:
                self.filesPrevCount = count
                self.propertyNotifier([count, self.prop])

    def endPropertyChange(self):

        sleep(1)
        count = len(self.prop)
        result = [count, '']

        # emit the end of property
        self.endOfPropertyChange.emit(result)

    def endProperty(self):

        self.now_crawling = False

        self.endPropertyChange()

        endProp = threading.Thread(target=self._endProperty)
        endProp.start()

    def _endProperty(self):

        sleep(15)
        self.prop = 0
        self.propertyEnded()

    def propertyEnded(self):

        result = []
        self.propertyEnd.emit(result)