Пример #1
0
 def __init__(self, configfile: ConfigFile):
     self.queues = dict()
     self.configfile = configfile
     self.ffmpeg = FFmpeg(self.configfile.ffmpeg_path)
     #
     # initialize the queues
     #
     self.queues['_default_'] = Queue()
     for qname in configfile.queues.keys():
         self.queues[qname] = Queue()
Пример #2
0
 def __init__(self, queuename, queue: Queue, configfile: ConfigFile, manager):
     """
     :param queuename:   Name of the queue, for thread naming purposes only
     :param queue:       Thread-safe queue containing files to be encoded
     :param configfile:  Instance of the parsed configuration (transcode.yml)
     :param manager:     Reference to object that manages this thread
     """
     super().__init__(name=queuename, group=None, daemon=True)
     self.queue = queue
     self.config = configfile
     self._manager = manager
     self.ffmpeg = FFmpeg(self.config.ffmpeg_path)
Пример #3
0
 def get_processor_by_name(self, name: str) -> Processor:
     if name == 'ffmpeg':
         return FFmpeg(self.ffmpeg_path)
     if self.hbcli_path:
         return Handbrake(self.hbcli_path)
     print(f'Unknown processor type "{name}" for host "{self.name}"')
     sys.exit(1)
Пример #4
0
 def get_processor_by_name(self, name: str) -> Processor:
     if name == 'ffmpeg':
         return FFmpeg(self.ffmpeg_path)
     if self.hbcli_path:
         return Handbrake(self.hbcli_path)
     print('Missing "ffmpeg" or "hbcli" path')
     sys.exit(1)
Пример #5
0
 def __init__(self, hostname, props, queue, cluster):
     """
     :param hostname:    name of host from config/clusters
     :param props:       dictionary of properties from config/clusters
     :param queue:       Work queue assigned to this thread, could be many-to-one in the future.
     :param cluster:     Reference to parent Cluster object
     """
     super().__init__(name=hostname, group=None, daemon=True)
     self.hostname = hostname
     self.props = props
     self.queue = queue
     self._complete = list()
     self._manager = cluster
     self.ffmpeg = FFmpeg(props.ffmpeg_path)
Пример #6
0
    def __init__(self, name, configs: Dict, config: ConfigFile, ssh: str):
        """
        :param name:        Cluster name, used only for thread naming
        :param configs:     The "clusters" section of the global config
        :param config:      The full configuration object
        :param ssh:         Path to local ssh
        """
        super().__init__(name=name, group=None, daemon=True)
        self.queues: Dict[str, Queue] = dict()
        self.ssh = ssh
        self.hosts: List[ManagedHost] = list()
        self.config = config
        self.verbose = verbose
        self.ffmpeg = FFmpeg(config.ffmpeg_path)
        self.lock = Cluster.terminal_lock
        self.completed: List = list()

        for host, props in configs.items():
            hostprops = RemoteHostProperties(host, props)
            if not hostprops.is_enabled:
                continue
            hosttype = hostprops.host_type

            #
            # make sure Queue exists for name
            #
            host_queues: Dict = hostprops.queues
            if len(host_queues) > 0:
                for host_queue in host_queues:
                    if host_queue not in self.queues:
                        self.queues[host_queue] = Queue()

            _h = None
            if hosttype == 'local':
                # special case - using pytranscoder host also as cluster host
                for host_queue, slots in host_queues.items():
                    #
                    # for each queue configured for this host create a dedicated thread for each slot
                    #
                    for slot in range(0, slots):
                        _h = LocalHost(host, hostprops,
                                       self.queues[host_queue], self)
                        if not _h.validate_settings():
                            sys.exit(1)
                        self.hosts.append(_h)

            elif hosttype == 'mounted':
                for host_queue, slots in host_queues.items():
                    #
                    # for each queue configured for this host create a dedicated thread for each slot
                    #
                    for slot in range(0, slots):
                        _h = MountedManagedHost(host, hostprops,
                                                self.queues[host_queue], self)
                        if not _h.validate_settings():
                            sys.exit(1)
                        self.hosts.append(_h)

            elif hosttype == 'streaming':
                for host_queue, slots in host_queues.items():
                    #
                    # for each queue configured for this host create a dedicated thread for each slot
                    #
                    for slot in range(0, slots):
                        _h = StreamingManagedHost(host, hostprops,
                                                  self.queues[host_queue],
                                                  self)
                        if not _h.validate_settings():
                            sys.exit(1)
                        self.hosts.append(_h)

            else:
                print(
                    crayons.red(
                        f'Unknown cluster host type "{hosttype}" - skipping'))
Пример #7
0
class Cluster(Thread):
    """Thread to create host threads and wait for their completion."""

    terminal_lock: Lock = Lock()  # class-level

    def __init__(self, name, configs: Dict, config: ConfigFile, ssh: str):
        """
        :param name:        Cluster name, used only for thread naming
        :param configs:     The "clusters" section of the global config
        :param config:      The full configuration object
        :param ssh:         Path to local ssh
        """
        super().__init__(name=name, group=None, daemon=True)
        self.queues: Dict[str, Queue] = dict()
        self.ssh = ssh
        self.hosts: List[ManagedHost] = list()
        self.config = config
        self.verbose = verbose
        self.ffmpeg = FFmpeg(config.ffmpeg_path)
        self.lock = Cluster.terminal_lock
        self.completed: List = list()

        for host, props in configs.items():
            hostprops = RemoteHostProperties(host, props)
            if not hostprops.is_enabled:
                continue
            hosttype = hostprops.host_type

            #
            # make sure Queue exists for name
            #
            host_queues: Dict = hostprops.queues
            if len(host_queues) > 0:
                for host_queue in host_queues:
                    if host_queue not in self.queues:
                        self.queues[host_queue] = Queue()

            _h = None
            if hosttype == 'local':
                # special case - using pytranscoder host also as cluster host
                for host_queue, slots in host_queues.items():
                    #
                    # for each queue configured for this host create a dedicated thread for each slot
                    #
                    for slot in range(0, slots):
                        _h = LocalHost(host, hostprops,
                                       self.queues[host_queue], self)
                        if not _h.validate_settings():
                            sys.exit(1)
                        self.hosts.append(_h)

            elif hosttype == 'mounted':
                for host_queue, slots in host_queues.items():
                    #
                    # for each queue configured for this host create a dedicated thread for each slot
                    #
                    for slot in range(0, slots):
                        _h = MountedManagedHost(host, hostprops,
                                                self.queues[host_queue], self)
                        if not _h.validate_settings():
                            sys.exit(1)
                        self.hosts.append(_h)

            elif hosttype == 'streaming':
                for host_queue, slots in host_queues.items():
                    #
                    # for each queue configured for this host create a dedicated thread for each slot
                    #
                    for slot in range(0, slots):
                        _h = StreamingManagedHost(host, hostprops,
                                                  self.queues[host_queue],
                                                  self)
                        if not _h.validate_settings():
                            sys.exit(1)
                        self.hosts.append(_h)

            else:
                print(
                    crayons.red(
                        f'Unknown cluster host type "{hosttype}" - skipping'))

    def enqueue(self, file,
                forced_profile: Optional[str]) -> (str, Optional[EncodeJob]):
        """Add a media file to this cluster queue.
           This is different than in local mode in that we only care about handling skips here.
           The profile will be selected once a host is assigned to the work
        """

        path = os.path.abspath(
            file)  # convert to full path so that rule filtering can work
        if pytranscoder.verbose:
            print('matching ' + path)

        media_info = self.ffmpeg.fetch_details(path)
        if media_info is None:
            print(crayons.red(f'File not found: {path}'))
            return None, None
        if media_info.valid:

            if pytranscoder.verbose:
                print(str(media_info))

            if forced_profile is None:
                #
                # just interested in SKIP rule matches and queue designations here
                #

                profile = None
                rule = self.config.match_rule(media_info)
                if rule is None:
                    print(
                        crayons.yellow(f'No matching profile found - skipped'))
                    return None, None
                if rule.is_skip():
                    basename = os.path.basename(path)
                    print(
                        f'{basename}: Skipping due to profile rule - {rule.name}'
                    )
                    return None, None
                profile = self.profiles[rule.profile]
            else:
                profile = self.profiles[forced_profile]

            if pytranscoder.verbose:
                print("Matched to profile {profile.name}")

            # not short circuited by a skip rule, add to appropriate queue
            queue_name = profile.queue_name if profile.queue_name is not None else '_default'
            if queue_name not in self.queues:
                print(
                    crayons.red('Error: ') +
                    f'Queue "{queue_name}" referenced in profile "{profile.name}" not defined in any host'
                )
                exit(1)
            job = EncodeJob(file, media_info, profile.name)
            self.queues[queue_name].put(job)
            return queue_name, job
        return None, None

    def testrun(self):
        for host in self.hosts:
            host.testrun()

    def run(self):
        """Start all host threads and wait until queue is drained"""

        if len(self.hosts) == 0:
            print(f'No hosts available in cluster "{self.name}"')
            return

        for host in self.hosts:
            host.start()

        # all hosts running, wait for them to finish
        for host in self.hosts:
            host.join()
            self.completed.extend(host.completed)

    @property
    def profiles(self):
        return self.config.profiles
Пример #8
0
class QueueThread(Thread):
    """One transcoding thread associated to a queue"""
    def __init__(self, queuename, queue: Queue, configfile: ConfigFile,
                 manager):
        """
        :param queuename:   Name of the queue, for thread naming purposes only
        :param queue:       Thread-safe queue containing files to be encoded
        :param configfile:  Instance of the parsed configuration (transcode.yml)
        :param manager:     Reference to object that manages this thread
        """
        super().__init__(name=queuename, group=None, daemon=True)
        self.queue = queue
        self.config = configfile
        self._manager = manager
        self.ffmpeg = FFmpeg(self.config.ffmpeg_path)

    @property
    def lock(self):
        return self._manager.lock

    def complete(self, path: Path, elapsed_seconds):
        self._manager.complete.append((str(path), elapsed_seconds))

    def start_test(self):
        self.go()

    def run(self):
        self.go()

    def log(self, *args, **kwargs):
        self.lock.acquire()
        print(*args, **kwargs)
        sys.stdout.flush()
        self.lock.release()

    def go(self):

        while not self.queue.empty():
            try:
                job: LocalJob = self.queue.get()
                oinput = job.profile.input_options
                ooutput = job.profile.output_options

                outpath = job.inpath.with_suffix(job.profile.extension +
                                                 '.tmp')

                #
                # check if we need to exclude any streams
                #
                if job.info.is_multistream():
                    ooutput = ooutput + job.info.ffmpeg_streams(job.profile)
                cli = [
                    '-y', *oinput, '-i',
                    str(job.inpath), *ooutput,
                    str(outpath)
                ]

                #
                # display useful information
                #
                self.lock.acquire(
                )  # used to synchronize threads so multiple threads don't create a jumble of output
                try:
                    print('-' * 40)
                    print('Filename : ' +
                          crayons.green(os.path.basename(str(job.inpath))))
                    print(f'Profile  : {job.profile.name}')
                    print('ffmpeg   : ' + ' '.join(cli) + '\n')
                finally:
                    self.lock.release()

                if pytranscoder.dry_run:
                    continue

                basename = job.inpath.name

                def log_callback(stats):
                    pct_done, pct_comp = calculate_progress(job.info, stats)
                    self.log(
                        f'{basename}: speed: {stats["speed"]}x, comp: {pct_comp}%, done: {pct_done:3}%'
                    )
                    if job.profile.threshold_check < 100:
                        if pct_done >= job.profile.threshold_check and pct_comp < job.profile.threshold:
                            # compression goal (threshold) not met, kill the job and waste no more time...
                            self.log(
                                f'Encoding of {basename} cancelled and skipped due to threshold not met'
                            )
                            return True
                    # continue
                    return False

                job_start = datetime.datetime.now()
                code = self.ffmpeg.run(cli, log_callback)
                job_stop = datetime.datetime.now()
                elapsed = job_stop - job_start

                if code == 0:
                    if not filter_threshold(job.profile, str(job.inpath),
                                            outpath):
                        # oops, this transcode didn't do so well, lets keep the original and scrap this attempt
                        self.log(
                            f'Transcoded file {job.inpath} did not meet minimum savings threshold, skipped'
                        )
                        self.complete(job.inpath,
                                      (job_stop - job_start).seconds)
                        outpath.unlink()
                        continue

                    self.complete(job.inpath, elapsed.seconds)
                    if not pytranscoder.keep_source:
                        if pytranscoder.verbose:
                            self.log(f'replacing {job.inpath} with {outpath}')
                        job.inpath.unlink()
                        outpath.rename(
                            job.inpath.with_suffix(job.profile.extension))
                        self.log(crayons.green(f'Finished {job.inpath}'))
                    else:
                        self.log(
                            crayons.yellow(
                                f'Finished {outpath}, original file unchanged')
                        )
                elif code is not None:
                    self.log(
                        f' Did not complete normally: {self.ffmpeg.last_command}'
                    )
                    self.log(f'Output can be found in {self.ffmpeg.log_path}')
                    try:
                        outpath.unlink()
                    except:
                        pass
            finally:
                self.queue.task_done()
Пример #9
0
class LocalHost:
    """Encapsulates functionality for local encoding"""

    lock: Lock = Lock()
    complete: List = list()  # list of completed files, shared across threads

    def __init__(self, configfile: ConfigFile):
        self.queues = dict()
        self.configfile = configfile
        self.ffmpeg = FFmpeg(self.configfile.ffmpeg_path)
        #
        # initialize the queues
        #
        self.queues['_default_'] = Queue()
        for qname in configfile.queues.keys():
            self.queues[qname] = Queue()

    def start(self):
        """After initialization this is where processing begins"""
        #
        # all files are listed in the queues so start the threads
        #
        jobs = list()
        for name, queue in self.queues.items():

            # determine the number of threads to allocate for each queue, minimum of defined max and queued jobs

            if name == '_default_':
                concurrent_max = 1
            else:
                concurrent_max = min(self.configfile.queues[name],
                                     queue.qsize())

            #
            # Create (n) threads and assign them a queue
            #
            for _ in range(concurrent_max):
                t = QueueThread(name, queue, self.configfile, self)
                jobs.append(t)
                t.start()

        # wait for all queues to drain and all jobs to complete
        for _, queue in self.queues.items():
            queue.join()

    def enqueue_files(self, files: list):
        """Add requested files to the appropriate queue

        :param files: list of (path,profile) tuples
        :return:
        """

        for path, forced_profile in files:
            #
            # do some prechecks...
            #
            if forced_profile is not None and not self.configfile.has_profile(
                    forced_profile):
                print(
                    f'profile "{forced_profile}" referenced from command line not found'
                )
                exit(1)

            if len(path) == 0:
                continue

            if not os.path.isfile(path):
                print(crayons.red('path not found, skipping: ' + path))
                continue

            if pytranscoder.verbose:
                print('matching ' + path)

            media_info = self.ffmpeg.fetch_details(path)
            if media_info is None:
                print(crayons.red(f'File not found: {path}'))
                continue
            if media_info.valid:

                if forced_profile is None:
                    rule = self.configfile.match_rule(media_info)
                    if rule is None:
                        print(
                            crayons.yellow(
                                f'No matching profile found - skipped'))
                        continue
                    if rule.is_skip():
                        print(crayons.green(os.path.basename(path)),
                              f'SKIPPED ({rule.name})')
                        self.complete.append((path, 0))
                        continue
                    profile_name = rule.profile
                    the_profile = self.configfile.get_profile(profile_name)
                else:
                    #
                    # looks good, add this file to the thread queue
                    #
                    the_profile = self.configfile.get_profile(forced_profile)
                    profile_name = forced_profile

                qname = the_profile.queue_name
                if qname is not None:
                    if not self.configfile.has_queue(the_profile.queue_name):
                        print(
                            crayons.red(
                                f'Profile "{profile_name}" indicated queue "{qname}" that has not been defined'
                            ))
                        sys.exit(1)
                    else:
                        self.queues[qname].put(
                            LocalJob(path, the_profile, media_info))
                else:
                    self.queues['_default_'].put(
                        LocalJob(path, the_profile, media_info))

    def notify_plex(self):
        """If plex notifications enabled, tell it to refresh"""

        if self.configfile.plex_server is not None and not pytranscoder.dry_run:
            plex_server = self.configfile.plex_server
            try:
                from plexapi.server import PlexServer

                plex = PlexServer('http://{}'.format(plex_server))
                plex.library.update()
            except ModuleNotFoundError:
                print(
                    'Library not installed. To use Plex notifications please install the Python 3 Plex API '
                    + '("pip3 install plexapi")')
            except Exception as ex2:
                print(f'Unable to connect to Plex server at {plex_server}')
                if pytranscoder.verbose:
                    print(str(ex2))