class Cluster(Thread): """Thread to create host threads and wait for their completion.""" terminal_lock: Lock = Lock() # class-level def __init__(self, name, configs: Dict, config: ConfigFile, ssh: str): """ :param name: Cluster name, used only for thread naming :param configs: The "clusters" section of the global config :param config: The full configuration object :param ssh: Path to local ssh """ super().__init__(name=name, group=None, daemon=True) self.queues: Dict[str, Queue] = dict() self.ssh = ssh self.hosts: List[ManagedHost] = list() self.config = config self.verbose = verbose self.ffmpeg = FFmpeg(config.ffmpeg_path) self.lock = Cluster.terminal_lock self.completed: List = list() for host, props in configs.items(): hostprops = RemoteHostProperties(host, props) if not hostprops.is_enabled: continue hosttype = hostprops.host_type # # make sure Queue exists for name # host_queues: Dict = hostprops.queues if len(host_queues) > 0: for host_queue in host_queues: if host_queue not in self.queues: self.queues[host_queue] = Queue() _h = None if hosttype == 'local': # special case - using pytranscoder host also as cluster host for host_queue, slots in host_queues.items(): # # for each queue configured for this host create a dedicated thread for each slot # for slot in range(0, slots): _h = LocalHost(host, hostprops, self.queues[host_queue], self) if not _h.validate_settings(): sys.exit(1) self.hosts.append(_h) elif hosttype == 'mounted': for host_queue, slots in host_queues.items(): # # for each queue configured for this host create a dedicated thread for each slot # for slot in range(0, slots): _h = MountedManagedHost(host, hostprops, self.queues[host_queue], self) if not _h.validate_settings(): sys.exit(1) self.hosts.append(_h) elif hosttype == 'streaming': for host_queue, slots in host_queues.items(): # # for each queue configured for this host create a dedicated thread for each slot # for slot in range(0, slots): _h = StreamingManagedHost(host, hostprops, self.queues[host_queue], self) if not _h.validate_settings(): sys.exit(1) self.hosts.append(_h) else: print( crayons.red( f'Unknown cluster host type "{hosttype}" - skipping')) def enqueue(self, file, forced_profile: Optional[str]) -> (str, Optional[EncodeJob]): """Add a media file to this cluster queue. This is different than in local mode in that we only care about handling skips here. The profile will be selected once a host is assigned to the work """ path = os.path.abspath( file) # convert to full path so that rule filtering can work if pytranscoder.verbose: print('matching ' + path) media_info = self.ffmpeg.fetch_details(path) if media_info is None: print(crayons.red(f'File not found: {path}')) return None, None if media_info.valid: if pytranscoder.verbose: print(str(media_info)) if forced_profile is None: # # just interested in SKIP rule matches and queue designations here # profile = None rule = self.config.match_rule(media_info) if rule is None: print( crayons.yellow(f'No matching profile found - skipped')) return None, None if rule.is_skip(): basename = os.path.basename(path) print( f'{basename}: Skipping due to profile rule - {rule.name}' ) return None, None profile = self.profiles[rule.profile] else: profile = self.profiles[forced_profile] if pytranscoder.verbose: print("Matched to profile {profile.name}") # not short circuited by a skip rule, add to appropriate queue queue_name = profile.queue_name if profile.queue_name is not None else '_default' if queue_name not in self.queues: print( crayons.red('Error: ') + f'Queue "{queue_name}" referenced in profile "{profile.name}" not defined in any host' ) exit(1) job = EncodeJob(file, media_info, profile.name) self.queues[queue_name].put(job) return queue_name, job return None, None def testrun(self): for host in self.hosts: host.testrun() def run(self): """Start all host threads and wait until queue is drained""" if len(self.hosts) == 0: print(f'No hosts available in cluster "{self.name}"') return for host in self.hosts: host.start() # all hosts running, wait for them to finish for host in self.hosts: host.join() self.completed.extend(host.completed) @property def profiles(self): return self.config.profiles
class LocalHost: """Encapsulates functionality for local encoding""" lock: Lock = Lock() complete: List = list() # list of completed files, shared across threads def __init__(self, configfile: ConfigFile): self.queues = dict() self.configfile = configfile self.ffmpeg = FFmpeg(self.configfile.ffmpeg_path) # # initialize the queues # self.queues['_default_'] = Queue() for qname in configfile.queues.keys(): self.queues[qname] = Queue() def start(self): """After initialization this is where processing begins""" # # all files are listed in the queues so start the threads # jobs = list() for name, queue in self.queues.items(): # determine the number of threads to allocate for each queue, minimum of defined max and queued jobs if name == '_default_': concurrent_max = 1 else: concurrent_max = min(self.configfile.queues[name], queue.qsize()) # # Create (n) threads and assign them a queue # for _ in range(concurrent_max): t = QueueThread(name, queue, self.configfile, self) jobs.append(t) t.start() # wait for all queues to drain and all jobs to complete for _, queue in self.queues.items(): queue.join() def enqueue_files(self, files: list): """Add requested files to the appropriate queue :param files: list of (path,profile) tuples :return: """ for path, forced_profile in files: # # do some prechecks... # if forced_profile is not None and not self.configfile.has_profile( forced_profile): print( f'profile "{forced_profile}" referenced from command line not found' ) exit(1) if len(path) == 0: continue if not os.path.isfile(path): print(crayons.red('path not found, skipping: ' + path)) continue if pytranscoder.verbose: print('matching ' + path) media_info = self.ffmpeg.fetch_details(path) if media_info is None: print(crayons.red(f'File not found: {path}')) continue if media_info.valid: if forced_profile is None: rule = self.configfile.match_rule(media_info) if rule is None: print( crayons.yellow( f'No matching profile found - skipped')) continue if rule.is_skip(): print(crayons.green(os.path.basename(path)), f'SKIPPED ({rule.name})') self.complete.append((path, 0)) continue profile_name = rule.profile the_profile = self.configfile.get_profile(profile_name) else: # # looks good, add this file to the thread queue # the_profile = self.configfile.get_profile(forced_profile) profile_name = forced_profile qname = the_profile.queue_name if qname is not None: if not self.configfile.has_queue(the_profile.queue_name): print( crayons.red( f'Profile "{profile_name}" indicated queue "{qname}" that has not been defined' )) sys.exit(1) else: self.queues[qname].put( LocalJob(path, the_profile, media_info)) else: self.queues['_default_'].put( LocalJob(path, the_profile, media_info)) def notify_plex(self): """If plex notifications enabled, tell it to refresh""" if self.configfile.plex_server is not None and not pytranscoder.dry_run: plex_server = self.configfile.plex_server try: from plexapi.server import PlexServer plex = PlexServer('http://{}'.format(plex_server)) plex.library.update() except ModuleNotFoundError: print( 'Library not installed. To use Plex notifications please install the Python 3 Plex API ' + '("pip3 install plexapi")') except Exception as ex2: print(f'Unable to connect to Plex server at {plex_server}') if pytranscoder.verbose: print(str(ex2))