def _genre(self, genre_id, limit): data = self._jiken_request(f"/genre/anime/{genre_id}") if "item_count" not in data: raise Failed( f"MyAnimeList Error: No MyAnimeList IDs for Genre ID: {genre_id}" ) total_items = data["item_count"] if total_items < limit or limit <= 0: limit = total_items mal_ids = [] num_of_pages = math.ceil(int(limit) / 100) current_page = 1 chances = 0 while current_page <= num_of_pages: if chances > 6: logger.debug(data) raise Failed("AniList Error: Connection Failed") start_num = (current_page - 1) * 100 + 1 util.print_return( f"Parsing Page {current_page}/{num_of_pages} {start_num}-{limit if current_page == num_of_pages else current_page * 100}" ) if current_page > 1: data = self._jiken_request( f"/genre/anime/{genre_id}/{current_page}") if "anime" in data: chances = 0 mal_ids.extend([anime["mal_id"] for anime in data["anime"]]) if len(mal_ids) > limit: return mal_ids[:limit] current_page += 1 else: chances += 1 util.print_end() return mal_ids
def get_flixpatrol_ids(self, method, data, language, is_movie): if method == "flixpatrol_demographics": logger.info("Processing FlixPatrol Demographics:") logger.info( f"\tGeneration: {generations_pretty[data['generation']]}") logger.info( f"\tGender: {'All genders' if data['gender'] == 'all' else data['gender'].capitalize()}" ) logger.info( f"\tLocation: {data['location'].replace('_', ' ').title()}") logger.info(f"\tLimit: {data['limit']}") elif method == "flixpatrol_popular": logger.info("Processing FlixPatrol Popular:") logger.info( f"\tSource: {data['source'].replace('_', ' ').title()}") logger.info( f"\tTime Window: {data['time_window'].replace('_', ' ').title()}" ) logger.info(f"\tLimit: {data['limit']}") elif method == "flixpatrol_top": logger.info("Processing FlixPatrol Top:") logger.info( f"\tPlatform: {data['platform'].replace('_', ' ').title()}") logger.info( f"\tLocation: {data['location'].replace('_', ' ').title()}") logger.info( f"\tTime Window: {data['time_window'].replace('_', ' ').title()}" ) logger.info(f"\tLimit: {data['limit']}") elif method == "flixpatrol_url": logger.info(f"Processing FlixPatrol URL: {data}") url = self.get_url(method, data, is_movie) items = self._parse_list(url, language, is_movie) media_type = "movie" if is_movie else "show" total_items = len(items) if total_items > 0: ids = [] for i, item in enumerate(items, 1): util.print_return(f"Finding TMDb ID {i}/{total_items}") tmdb_id = None expired = None if self.config.Cache: tmdb_id, expired = self.config.Cache.query_flixpatrol_map( item, media_type) if not tmdb_id or expired is not False: try: tmdb_id = self._tmdb(f"{base_url}{item}", language) except Failed as e: logger.error(e) continue if self.config.Cache: self.config.Cache.update_flixpatrol_map( expired, item, tmdb_id, media_type) ids.append((tmdb_id, "tmdb" if is_movie else "tmdb_show")) logger.info(util.adjust_space(f"Processed {total_items} TMDb IDs")) return ids else: raise Failed(f"FlixPatrol Error: No List Items found in {data}")
def add_to_collection(self, collection, items, filters, map={}): name = collection.title if isinstance(collection, Collections) else collection collection_items = collection.children if isinstance(collection, Collections) else [] total = len(items) max_length = len(str(total)) length = 0 for i, item in enumerate(items, 1): current = self.get_item(item) match = True if filters: length = util.print_return(length, "Filtering {}/{} {}".format((" " * (max_length - len(str(i)))) + str(i), total, current.title)) for f in filters: modifier = f[0][-4:] method = util.filter_alias[f[0][:-4]] if modifier in [".not", ".lte", ".gte"] else util.filter_alias[f[0]] if method == "max_age": threshold_date = datetime.now() - timedelta(days=f[1]) attr = getattr(current, "originallyAvailableAt") if attr is None or attr < threshold_date: match = False break elif modifier in [".gte", ".lte"]: if method == "originallyAvailableAt": threshold_date = datetime.strptime(f[1], "%m/%d/%y") attr = getattr(current, "originallyAvailableAt") if (modifier == ".lte" and attr > threshold_date) or (modifier == ".gte" and attr < threshold_date): match = False break elif method in ["year", "rating"]: attr = getattr(current, method) if (modifier == ".lte" and attr > f[1]) or (modifier == ".gte" and attr < f[1]): match = False break else: terms = f[1] if isinstance(f[1], list) else str(f[1]).split(", ") if method in ["video_resolution", "audio_language", "subtitle_language"]: for media in current.media: if method == "video_resolution": attrs = [media.videoResolution] for part in media.parts: if method == "audio_language": attrs = ([a.language for a in part.audioStreams()]) if method == "subtitle_language": attrs = ([s.language for s in part.subtitleStreams()]) elif method in ["contentRating", "studio", "year", "rating", "originallyAvailableAt"]: attrs = [str(getattr(current, method))] elif method in ["actors", "countries", "directors", "genres", "writers", "collections"]: attrs = [getattr(x, "tag") for x in getattr(current, method)] if (not list(set(terms) & set(attrs)) and modifier != ".not") or (list(set(terms) & set(attrs)) and modifier == ".not"): match = False break length = util.print_return(length, "Filtering {}/{} {}".format((" " * (max_length - len(str(i)))) + str(i), total, current.title)) if match: util.print_end(length, "{} Collection | {} | {}".format(name, "=" if current in collection_items else "+", current.title)) if current in collection_items: map[current.ratingKey] = None else: current.addCollection(name) media_type = "{}{}".format("Movie" if self.is_movie else "Show", "s" if total > 1 else "") util.print_end(length, "{} {} Processed".format(total, media_type)) return map
def map_guids(self, library): movie_map = {} show_map = {} length = 0 count = 0 logger.info("Mapping {} Library: {}".format("Movie" if library.is_movie else "Show", library.name)) items = library.Plex.all() for i, item in enumerate(items, 1): length = util.print_return(length, "Processing: {}/{} {}".format(i, len(items), item.title)) try: id_type, main_id = self.get_id(item, library, length) except BadRequest: util.print_stacktrace() util.print_end(length, "{} {:<46} | {} for {}".format("Cache | ! |" if self.Cache else "Mapping Error:", item.guid, error_message, item.title)) continue if isinstance(main_id, list): if id_type == "movie": for m in main_id: movie_map[m] = item.ratingKey elif id_type == "show": for m in main_id: show_map[m] = item.ratingKey else: if id_type == "movie": movie_map[main_id] = item.ratingKey elif id_type == "show": show_map[main_id] = item.ratingKey util.print_end(length, "Processed {} {}".format(len(items), "Movies" if library.is_movie else "Shows")) return movie_map, show_map
def get_items(self, method, data, language, status_message=True): pretty = util.pretty_names[ method] if method in util.pretty_names else method movie_ids = [] if status_message: logger.info(f"Processing {pretty}: {data}") items = self.parse_list(data, language) total_items = len(items) if total_items == 0: raise Failed(f"Letterboxd Error: No List Items found in {data}") length = 0 for i, item in enumerate(items, 1): length = util.print_return(length, f"Finding TMDb ID {i}/{total_items}") tmdb_id = None expired = None if self.config.Cache: tmdb_id, expired = self.config.Cache.query_letterboxd_map( item[0]) if not tmdb_id or expired is not False: try: tmdb_id = self.get_tmdb_from_slug(item[1], language) except Failed as e: logger.error(e) continue if self.config.Cache: self.config.Cache.update_letterboxd( expired, item[0], tmdb_id) movie_ids.append(tmdb_id) util.print_end(length, f"Processed {total_items} TMDb IDs") if status_message: logger.debug(f"TMDb IDs Found: {movie_ids}") return movie_ids, []
def _ids_from_url(self, imdb_url, language, limit): total, item_count = self._total(imdb_url, language) headers = util.header(language) imdb_ids = [] parsed_url = urlparse(imdb_url) params = parse_qs(parsed_url.query) imdb_base = parsed_url._replace(query=None).geturl() params.pop("start", None) # noqa params.pop("count", None) # noqa params.pop("page", None) # noqa if self.config.trace_mode: logger.debug(f"URL: {imdb_base}") logger.debug(f"Params: {params}") search_url = imdb_base.startswith(urls["searches"]) if limit < 1 or total < limit: limit = total remainder = limit % item_count if remainder == 0: remainder = item_count num_of_pages = math.ceil(int(limit) / item_count) for i in range(1, num_of_pages + 1): start_num = (i - 1) * item_count + 1 util.print_return( f"Parsing Page {i}/{num_of_pages} {start_num}-{limit if i == num_of_pages else i * item_count}" ) if search_url: params[ "count"] = remainder if i == num_of_pages else item_count # noqa params["start"] = start_num # noqa else: params["page"] = i # noqa response = self.config.get_html(imdb_base, headers=headers, params=params) ids_found = response.xpath( "//div[contains(@class, 'lister-item-image')]//a/img//@data-tconst" ) if not search_url and i == num_of_pages: ids_found = ids_found[:remainder] imdb_ids.extend(ids_found) time.sleep(2) util.print_end() if len(imdb_ids) > 0: logger.debug(f"{len(imdb_ids)} IMDb IDs Found: {imdb_ids}") return imdb_ids raise Failed(f"IMDb Error: No IMDb IDs Found at {imdb_url}")
def get_imdb_ids_from_url(self, imdb_url, language, limit): imdb_url = imdb_url.strip() if not imdb_url.startswith( self.urls["list"]) and not imdb_url.startswith( self.urls["search"]): raise Failed( f"IMDb Error: {imdb_url} must begin with either:\n| {self.urls['list']} (For Lists)\n| {self.urls['search']} (For Searches)" ) if imdb_url.startswith(self.urls["list"]): try: list_id = re.search("(\\d+)", str(imdb_url)).group(1) except AttributeError: raise Failed( f"IMDb Error: Failed to parse List ID from {imdb_url}") current_url = f"{self.urls['search']}lists=ls{list_id}" else: current_url = imdb_url header = {"Accept-Language": language} length = 0 imdb_ids = [] try: results = self.send_request( current_url, header).xpath("//div[@class='desc']/span/text()")[0].replace( ",", "") except IndexError: raise Failed(f"IMDb Error: Failed to parse URL: {imdb_url}") try: total = int(re.findall("(\\d+) title", results)[0]) except IndexError: raise Failed(f"IMDb Error: No Results at URL: {imdb_url}") if "&start=" in current_url: current_url = re.sub("&start=\\d+", "", current_url) if "&count=" in current_url: current_url = re.sub("&count=\\d+", "", current_url) if limit < 1 or total < limit: limit = total remainder = limit % 250 if remainder == 0: remainder = 250 num_of_pages = math.ceil(int(limit) / 250) for i in range(1, num_of_pages + 1): start_num = (i - 1) * 250 + 1 length = util.print_return( length, f"Parsing Page {i}/{num_of_pages} {start_num}-{limit if i == num_of_pages else i * 250}" ) response = self.send_request( f"{current_url}&count={remainder if i == num_of_pages else 250}&start={start_num}", header) imdb_ids.extend( response.xpath( "//div[contains(@class, 'lister-item-image')]//a/img//@data-tconst" )) util.print_end(length) if imdb_ids: return imdb_ids else: raise Failed(f"IMDb Error: No Movies Found at {imdb_url}")
def get_imdb_ids_from_url(self, imdb_url, language, limit): current_url = self.fix_url(imdb_url) total, item_count = self.get_total(current_url, language) header = {"Accept-Language": language} length = 0 imdb_ids = [] if "&start=" in current_url: current_url = re.sub("&start=\\d+", "", current_url) if "&count=" in current_url: current_url = re.sub("&count=\\d+", "", current_url) if "&page=" in current_url: current_url = re.sub("&page=\\d+", "", current_url) if limit < 1 or total < limit: limit = total remainder = limit % item_count if remainder == 0: remainder = item_count num_of_pages = math.ceil(int(limit) / item_count) for i in range(1, num_of_pages + 1): start_num = (i - 1) * item_count + 1 length = util.print_return( length, f"Parsing Page {i}/{num_of_pages} {start_num}-{limit if i == num_of_pages else i * item_count}" ) if imdb_url.startswith(self.urls["keyword"]): response = self.send_request(f"{current_url}&page={i}", header) else: response = self.send_request( f"{current_url}&count={remainder if i == num_of_pages else item_count}&start={start_num}", header) if imdb_url.startswith(self.urls["keyword"]) and i == num_of_pages: imdb_ids.extend( response.xpath( "//div[contains(@class, 'lister-item-image')]//a/img//@data-tconst" )[:remainder]) else: imdb_ids.extend( response.xpath( "//div[contains(@class, 'lister-item-image')]//a/img//@data-tconst" )) util.print_end(length) if imdb_ids: return imdb_ids else: raise Failed(f"IMDb Error: No IMDb IDs Found at {imdb_url}")
def get_items(self, method, data, language, status_message=True): pretty = util.pretty_names[ method] if method in util.pretty_names else method if status_message: logger.debug(f"Data: {data}") show_ids = [] movie_ids = [] if method == "imdb_id": if status_message: logger.info(f"Processing {pretty}: {data}") tmdb_id, tvdb_id = self.config.convert_from_imdb(data, language) if tmdb_id: movie_ids.append(tmdb_id) if tvdb_id: show_ids.append(tvdb_id) elif method == "imdb_list": if status_message: status = f"{data['limit']} Items at " if data[ 'limit'] > 0 else '' logger.info(f"Processing {pretty}: {status}{data['url']}") imdb_ids = self.get_imdb_ids_from_url(data["url"], language, data["limit"]) total_ids = len(imdb_ids) length = 0 for i, imdb_id in enumerate(imdb_ids, 1): length = util.print_return( length, f"Converting IMDb ID {i}/{total_ids}") try: tmdb_id, tvdb_id = self.config.convert_from_imdb( imdb_id, language) if tmdb_id: movie_ids.append(tmdb_id) if tvdb_id: show_ids.append(tvdb_id) except Failed as e: logger.warning(e) util.print_end(length, f"Processed {total_ids} IMDb IDs") else: raise Failed(f"IMDb Error: Method {method} not supported") if status_message: logger.debug(f"TMDb IDs Found: {movie_ids}") logger.debug(f"TVDb IDs Found: {show_ids}") return movie_ids, show_ids
def get_items(self, method, data, language, status_message=True): pretty = util.pretty_names[ method] if method in util.pretty_names else method if status_message: logger.debug("Data: {}".format(data)) show_ids = [] movie_ids = [] if method == "imdb_id": if status_message: logger.info("Processing {}: {}".format(pretty, data)) tmdb_id, tvdb_id = self.convert_from_imdb(data, language) if tmdb_id: movie_ids.append(tmdb_id) if tvdb_id: show_ids.append(tvdb_id) elif method == "imdb_list": if status_message: logger.info("Processing {}: {}".format( pretty, "{} Items at {}".format(data["limit"], data["url"]) if data["limit"] > 0 else data["url"])) imdb_ids = self.get_imdb_ids_from_url(data["url"], language, data["limit"]) total_ids = len(imdb_ids) length = 0 for i, imdb_id in enumerate(imdb_ids, 1): length = util.print_return( length, "Converting IMDb ID {}/{}".format(i, total_ids)) try: tmdb_id, tvdb_id = self.convert_from_imdb( imdb_id, language) if tmdb_id: movie_ids.append(tmdb_id) if tvdb_id: show_ids.append(tvdb_id) except Failed as e: logger.warning(e) util.print_end(length, "Processed {} IMDb IDs".format(total_ids)) else: raise Failed("IMDb Error: Method {} not supported".format(method)) if status_message: logger.debug("TMDb IDs Found: {}".format(movie_ids)) logger.debug("TVDb IDs Found: {}".format(show_ids)) return movie_ids, show_ids
def get_items(self, method, data, language, status_message=True): pretty = util.pretty_names[ method] if method in util.pretty_names else method movie_ids = [] if status_message: logger.info(f"Processing {pretty}: {data}") slugs = self.parse_list_for_slugs(data, language) total_slugs = len(slugs) if total_slugs == 0: raise Failed(f"Letterboxd Error: No List Items found in {data}") length = 0 for i, slug in enumerate(slugs, 1): length = util.print_return(length, f"Finding TMDb ID {i}/{total_slugs}") try: movie_ids.append(self.get_tmdb(slug, language)) except Failed as e: logger.error(e) util.print_end(length, f"Processed {total_slugs} TMDb IDs") if status_message: logger.debug(f"TMDb IDs Found: {movie_ids}") return movie_ids, []
def get_items(self, method, data, status_message=True): if status_message: logger.debug(f"Data: {data}") pretty = util.pretty_names[method] if method in util.pretty_names else method media_type = "Movie" if self.is_movie else "Show" items = [] if method == "plex_all": if status_message: logger.info(f"Processing {pretty} {media_type}s") items = self.get_all() elif method == "plex_collection": if status_message: logger.info(f"Processing {pretty} {data}") items = data.items() elif method == "plex_search": search_terms = {} has_processed = False search_limit = None search_sort = None for search_method, search_data in data.items(): if search_method == "limit": search_limit = search_data elif search_method == "sort_by": search_sort = search_data else: search, modifier = os.path.splitext(str(search_method).lower()) final_search = search_translation[search] if search in search_translation else search if search in ["added", "originally_available"] and modifier == "": final_mod = ">>" elif search in ["added", "originally_available"] and modifier == ".not": final_mod = "<<" elif search in ["critic_rating", "audience_rating"] and modifier == ".greater": final_mod = "__gte" elif search in ["critic_rating", "audience_rating"] and modifier == ".less": final_mod = "__lt" else: final_mod = modifiers[modifier] if modifier in modifiers else "" final_method = f"{final_search}{final_mod}" if search == "duration": search_terms[final_method] = search_data * 60000 elif search in ["added", "originally_available"] and modifier in ["", ".not"]: search_terms[final_method] = f"{search_data}d" else: search_terms[final_method] = search_data if status_message: if search in ["added", "originally_available"] or modifier in [".greater", ".less", ".before", ".after"]: ors = f"{search_method}({search_data}" else: ors = "" conjunction = " AND " if final_mod == "&" else " OR " for o, param in enumerate(search_data): or_des = conjunction if o > 0 else f"{search_method}(" ors += f"{or_des}{param}" if has_processed: logger.info(f"\t\t AND {ors})") else: logger.info(f"Processing {pretty}: {ors})") has_processed = True if status_message: if search_sort: logger.info(f"\t\t SORT BY {search_sort})") if search_limit: logger.info(f"\t\t LIMIT {search_limit})") logger.debug(f"Search: {search_terms}") return self.search(sort=sorts[search_sort], maxresults=search_limit, **search_terms) elif method == "plex_collectionless": good_collections = [] for col in self.get_all_collections(): keep_collection = True for pre in data["exclude_prefix"]: if col.title.startswith(pre) or (col.titleSort and col.titleSort.startswith(pre)): keep_collection = False break if keep_collection: for ext in data["exclude"]: if col.title == ext or (col.titleSort and col.titleSort == ext): keep_collection = False break if keep_collection: good_collections.append(col.index) all_items = self.get_all() length = 0 for i, item in enumerate(all_items, 1): length = util.print_return(length, f"Processing: {i}/{len(all_items)} {item.title}") add_item = True item.reload() for collection in item.collections: if collection.id in good_collections: add_item = False break if add_item: items.append(item) util.print_end(length, f"Processed {len(all_items)} {'Movies' if self.is_movie else 'Shows'}") else: raise Failed(f"Plex Error: Method {method} not supported") if len(items) > 0: return items else: raise Failed("Plex Error: No Items found in Plex")
def update_libraries(config): global stats for library in config.libraries: try: os.makedirs(os.path.join(default_dir, "logs", library.mapping_name, "collections"), exist_ok=True) col_file_logger = os.path.join(default_dir, "logs", library.mapping_name, "library.log") should_roll_over = os.path.isfile(col_file_logger) library_handler = RotatingFileHandler(col_file_logger, delay=True, mode="w", backupCount=3, encoding="utf-8") util.apply_formatter(library_handler) if should_roll_over: library_handler.doRollover() logger.addHandler(library_handler) plexapi.server.TIMEOUT = library.timeout logger.info("") util.separator(f"{library.name} Library") logger.debug("") logger.debug(f"Mapping Name: {library.original_mapping_name}") logger.debug(f"Folder Name: {library.mapping_name}") logger.debug(f"Missing Path: {library.missing_path}") for ad in library.asset_directory: logger.debug(f"Asset Directory: {ad}") logger.debug(f"Asset Folders: {library.asset_folders}") logger.debug(f"Create Asset Folders: {library.create_asset_folders}") logger.debug(f"Sync Mode: {library.sync_mode}") logger.debug(f"Collection Minimum: {library.collection_minimum}") logger.debug(f"Delete Below Minimum: {library.delete_below_minimum}") logger.debug(f"Delete Not Scheduled: {library.delete_not_scheduled}") logger.debug(f"Missing Only Released: {library.missing_only_released}") logger.debug(f"Only Filter Missing: {library.only_filter_missing}") logger.debug(f"Show Unmanaged: {library.show_unmanaged}") logger.debug(f"Show Filtered: {library.show_filtered}") logger.debug(f"Show Missing: {library.show_missing}") logger.debug(f"Show Missing Assets: {library.show_missing_assets}") logger.debug(f"Save Missing: {library.save_missing}") logger.debug(f"Assets For All: {library.assets_for_all}") logger.debug(f"Delete Collections With Less: {library.delete_collections_with_less}") logger.debug(f"Delete Unmanaged Collections: {library.delete_unmanaged_collections}") logger.debug(f"Mass Genre Update: {library.mass_genre_update}") logger.debug(f"Mass Audience Rating Update: {library.mass_audience_rating_update}") logger.debug(f"Mass Critic Rating Update: {library.mass_critic_rating_update}") logger.debug(f"Mass Trakt Rating Update: {library.mass_trakt_rating_update}") logger.debug(f"Split Duplicates: {library.split_duplicates}") logger.debug(f"Radarr Add All: {library.radarr_add_all}") logger.debug(f"Sonarr Add All: {library.sonarr_add_all}") logger.debug(f"TMDb Collections: {library.tmdb_collections}") logger.debug(f"Genre Mapper: {library.genre_mapper}") logger.debug(f"Clean Bundles: {library.clean_bundles}") logger.debug(f"Empty Trash: {library.empty_trash}") logger.debug(f"Optimize: {library.optimize}") logger.debug(f"Timeout: {library.timeout}") if not library.is_other: logger.info("") util.separator(f"Mapping {library.name} Library", space=False, border=False) logger.info("") library.map_guids() for metadata in library.metadata_files: logger.info("") util.separator(f"Running Metadata File\n{metadata.path}") if not config.test_mode and not config.resume_from and not collection_only: try: metadata.update_metadata() except Failed as e: library.notify(e) logger.error(e) collections_to_run = metadata.get_collections(config.requested_collections) if config.resume_from and config.resume_from not in collections_to_run: logger.info("") logger.warning(f"Collection: {config.resume_from} not in Metadata File: {metadata.path}") continue if collections_to_run and not library_only: logger.info("") util.separator(f"{'Test ' if config.test_mode else ''}Collections") logger.removeHandler(library_handler) run_collection(config, library, metadata, collections_to_run) logger.addHandler(library_handler) if library.run_sort: logger.info("") util.separator(f"Sorting {library.name} Library's Collections", space=False, border=False) logger.info("") for builder in library.run_sort: logger.info("") util.separator(f"Sorting {builder.name} Collection", space=False, border=False) logger.info("") builder.sort_collection() if not config.test_mode and not collection_only: library_operations(config, library) logger.removeHandler(library_handler) except Exception as e: library.notify(e) util.print_stacktrace() util.print_multiline(e, critical=True) has_run_again = False for library in config.libraries: if library.run_again: has_run_again = True break if has_run_again and not library_only: logger.info("") util.separator("Run Again") logger.info("") for x in range(1, config.general["run_again_delay"] + 1): util.print_return(f"Waiting to run again in {config.general['run_again_delay'] - x + 1} minutes") for y in range(60): time.sleep(1) util.print_end() for library in config.libraries: if library.run_again: try: col_file_logger = os.path.join(default_dir, "logs", library.mapping_name, f"library.log") library_handler = RotatingFileHandler(col_file_logger, mode="w", backupCount=3, encoding="utf-8") util.apply_formatter(library_handler) logger.addHandler(library_handler) library_handler.addFilter(fmt_filter) os.environ["PLEXAPI_PLEXAPI_TIMEOUT"] = str(library.timeout) logger.info("") util.separator(f"{library.name} Library Run Again") logger.info("") library.map_guids() for builder in library.run_again: logger.info("") util.separator(f"{builder.name} Collection") logger.info("") try: builder.run_collections_again() except Failed as e: library.notify(e, collection=builder.name, critical=False) util.print_stacktrace() util.print_multiline(e, error=True) logger.removeHandler(library_handler) except Exception as e: library.notify(e) util.print_stacktrace() util.print_multiline(e, critical=True) used_url = [] for library in config.libraries: if library.url not in used_url: used_url.append(library.url) if library.empty_trash: library.query(library.PlexServer.library.emptyTrash) if library.clean_bundles: library.query(library.PlexServer.library.cleanBundles) if library.optimize: library.query(library.PlexServer.library.optimize)
f"Finished {start_type}Run\nRun Time: {str(datetime.now() - start_time).split('.')[0]}" ) try: if run or test or collections: start(config_file, test, False, collections) else: length = 0 schedule.every().day.at(time_to_run).do(start, config_file, False, True, None) while True: schedule.run_pending() current = datetime.now().strftime("%H:%M") seconds = (datetime.strptime(time_to_run, "%H:%M") - datetime.strptime(current, "%H:%M")).total_seconds() hours = int(seconds // 3600) if hours < 0: hours += 24 minutes = int((seconds % 3600) // 60) time_str = f"{hours} Hour{'s' if hours > 1 else ''} and " if hours > 0 else "" time_str += f"{minutes} Minute{'s' if minutes > 1 else ''}" length = util.print_return( length, f"Current Time: {current} | {time_str} until the daily run at {time_to_run}" ) time.sleep(1) except KeyboardInterrupt: util.separator("Exiting Plex Meta Manager")
def add_to_collection(self, collection, items, filters, show_filtered, map, movie_map, show_map): name = collection.title if isinstance(collection, Collections) else collection collection_items = collection.items() if isinstance( collection, Collections) else [] total = len(items) max_length = len(str(total)) length = 0 for i, item in enumerate(items, 1): try: current = self.fetchItem( item.ratingKey if isinstance(item, (Movie, Show)) else int(item)) except (BadRequest, NotFound): logger.error("Plex Error: Item {} not found".format(item)) continue match = True if filters: length = util.print_return( length, "Filtering {}/{} {}".format( (" " * (max_length - len(str(i)))) + str(i), total, current.title)) for f in filters: modifier = f[0][-4:] method = util.filter_alias[f[0][:-4]] if modifier in [ ".not", ".lte", ".gte" ] else util.filter_alias[f[0]] if method == "max_age": threshold_date = datetime.now() - timedelta(days=f[1]) attr = getattr(current, "originallyAvailableAt") if attr is None or attr < threshold_date: match = False break elif method == "original_language": terms = util.get_list(f[1], lower=True) tmdb_id = None movie = None for key, value in movie_map.items(): if current.ratingKey == value: try: movie = self.TMDb.get_movie(key) break except Failed: pass if movie is None: logger.warning( "Filter Error: No TMDb ID found for {}".format( current.title)) continue if (modifier == ".not" and movie.original_language in terms) or ( modifier != ".not" and movie.original_language not in terms): match = False break elif modifier in [".gte", ".lte"]: if method == "originallyAvailableAt": threshold_date = datetime.strptime( f[1], "%m/%d/%y") attr = getattr(current, "originallyAvailableAt") if (modifier == ".lte" and attr > threshold_date ) or (modifier == ".gte" and attr < threshold_date): match = False break elif method in ["year", "rating"]: attr = getattr(current, method) if (modifier == ".lte" and attr > f[1]) or (modifier == ".gte" and attr < f[1]): match = False break else: terms = util.get_list(f[1]) if method in [ "video_resolution", "audio_language", "subtitle_language" ]: for media in current.media: if method == "video_resolution": attrs = [media.videoResolution] for part in media.parts: if method == "audio_language": attrs = ([ a.language for a in part.audioStreams() ]) if method == "subtitle_language": attrs = ([ s.language for s in part.subtitleStreams() ]) elif method in [ "contentRating", "studio", "year", "rating", "originallyAvailableAt" ]: attrs = [str(getattr(current, method))] elif method in [ "actors", "countries", "directors", "genres", "writers", "collections" ]: attrs = [ getattr(x, "tag") for x in getattr(current, method) ] if (not list(set(terms) & set(attrs)) and modifier != ".not") or (list(set(terms) & set(attrs)) and modifier == ".not"): match = False break length = util.print_return( length, "Filtering {}/{} {}".format( (" " * (max_length - len(str(i)))) + str(i), total, current.title)) if match: util.print_end( length, "{} Collection | {} | {}".format( name, "=" if current in collection_items else "+", current.title)) if current in collection_items: map[current.ratingKey] = None else: current.addCollection(name) elif show_filtered is True: logger.info("{} Collection | X | {}".format( name, current.title)) media_type = "{}{}".format("Movie" if self.is_movie else "Show", "s" if total > 1 else "") util.print_end(length, "{} {} Processed".format(total, media_type)) return map
def library_operations(config, library): logger.info("") util.separator(f"{library.name} Library Operations") logger.info("") logger.debug(f"Assets For All: {library.assets_for_all}") logger.debug(f"Delete Collections With Less: {library.delete_collections_with_less}") logger.debug(f"Delete Unmanaged Collections: {library.delete_unmanaged_collections}") logger.debug(f"Mass Genre Update: {library.mass_genre_update}") logger.debug(f"Mass Audience Rating Update: {library.mass_audience_rating_update}") logger.debug(f"Mass Critic Rating Update: {library.mass_critic_rating_update}") logger.debug(f"Mass Trakt Rating Update: {library.mass_trakt_rating_update}") logger.debug(f"Split Duplicates: {library.split_duplicates}") logger.debug(f"Radarr Add All: {library.radarr_add_all}") logger.debug(f"Sonarr Add All: {library.sonarr_add_all}") logger.debug(f"TMDb Collections: {library.tmdb_collections}") logger.debug(f"Genre Mapper: {library.genre_mapper}") tmdb_operation = library.assets_for_all or library.mass_genre_update or library.mass_audience_rating_update \ or library.mass_critic_rating_update or library.mass_trakt_rating_update \ or library.tmdb_collections or library.radarr_add_all or library.sonarr_add_all logger.debug(f"TMDb Operation: {tmdb_operation}") if library.split_duplicates: items = library.search(**{"duplicate": True}) for item in items: item.split() logger.info(util.adjust_space(f"{item.title[:25]:<25} | Splitting")) if tmdb_operation: items = library.get_all() radarr_adds = [] sonarr_adds = [] tmdb_collections = {} trakt_ratings = config.Trakt.user_ratings(library.is_movie) if library.mass_trakt_rating_update else [] for i, item in enumerate(items, 1): try: library.reload(item) except Failed as e: logger.error(e) continue util.print_return(f"Processing: {i}/{len(items)} {item.title}") if library.assets_for_all: library.update_item_from_assets(item, create=library.create_asset_folders) tmdb_id = None tvdb_id = None imdb_id = None if config.Cache: t_id, i_id, guid_media_type, _ = config.Cache.query_guid_map(item.guid) if t_id: if "movie" in guid_media_type: tmdb_id = t_id[0] else: tvdb_id = t_id[0] if i_id: imdb_id = i_id[0] if not tmdb_id and not tvdb_id: tmdb_id = library.get_tmdb_from_map(item) if not tmdb_id and not tvdb_id and library.is_show: tvdb_id = library.get_tvdb_from_map(item) if library.mass_trakt_rating_update: try: if library.is_movie and tmdb_id in trakt_ratings: new_rating = trakt_ratings[tmdb_id] elif library.is_show and tvdb_id in trakt_ratings: new_rating = trakt_ratings[tvdb_id] else: raise Failed if str(item.userRating) != str(new_rating): library.edit_query(item, {"userRating.value": new_rating, "userRating.locked": 1}) logger.info(util.adjust_space(f"{item.title[:25]:<25} | User Rating | {new_rating}")) except Failed: pass path = os.path.dirname(str(item.locations[0])) if library.is_movie else str(item.locations[0]) if library.Radarr and library.radarr_add_all and tmdb_id: path = path.replace(library.Radarr.plex_path, library.Radarr.radarr_path) path = path[:-1] if path.endswith(('/', '\\')) else path radarr_adds.append((tmdb_id, path)) if library.Sonarr and library.sonarr_add_all and tvdb_id: path = path.replace(library.Sonarr.plex_path, library.Sonarr.sonarr_path) path = path[:-1] if path.endswith(('/', '\\')) else path sonarr_adds.append((tvdb_id, path)) tmdb_item = None if library.tmdb_collections or library.mass_genre_update == "tmdb" or library.mass_audience_rating_update == "tmdb" or library.mass_critic_rating_update == "tmdb": if tvdb_id and not tmdb_id: tmdb_id = config.Convert.tvdb_to_tmdb(tvdb_id) if tmdb_id: try: tmdb_item = config.TMDb.get_movie(tmdb_id) if library.is_movie else config.TMDb.get_show(tmdb_id) except Failed as e: logger.error(util.adjust_space(str(e))) else: logger.info(util.adjust_space(f"{item.title[:25]:<25} | No TMDb ID for Guid: {item.guid}")) omdb_item = None if library.mass_genre_update in ["omdb", "imdb"] or library.mass_audience_rating_update in ["omdb", "imdb"] or library.mass_critic_rating_update in ["omdb", "imdb"]: if config.OMDb.limit is False: if tmdb_id and not imdb_id: imdb_id = config.Convert.tmdb_to_imdb(tmdb_id) elif tvdb_id and not imdb_id: imdb_id = config.Convert.tvdb_to_imdb(tvdb_id) if imdb_id: try: omdb_item = config.OMDb.get_omdb(imdb_id) except Failed as e: logger.error(util.adjust_space(str(e))) except Exception: logger.error(f"IMDb ID: {imdb_id}") raise else: logger.info(util.adjust_space(f"{item.title[:25]:<25} | No IMDb ID for Guid: {item.guid}")) tvdb_item = None if library.mass_genre_update == "tvdb": if tvdb_id: try: tvdb_item = config.TVDb.get_item(tvdb_id, library.is_movie) except Failed as e: logger.error(util.adjust_space(str(e))) else: logger.info(util.adjust_space(f"{item.title[:25]:<25} | No TVDb ID for Guid: {item.guid}")) if library.tmdb_collections and tmdb_item and tmdb_item.belongs_to_collection: tmdb_collections[tmdb_item.belongs_to_collection.id] = tmdb_item.belongs_to_collection.name if library.mass_genre_update: try: if tmdb_item and library.mass_genre_update == "tmdb": new_genres = [genre.name for genre in tmdb_item.genres] elif omdb_item and library.mass_genre_update in ["omdb", "imdb"]: new_genres = omdb_item.genres elif tvdb_item and library.mass_genre_update == "tvdb": new_genres = tvdb_item.genres else: raise Failed library.edit_tags("genre", item, sync_tags=new_genres) except Failed: pass if library.mass_audience_rating_update: try: if tmdb_item and library.mass_audience_rating_update == "tmdb": new_rating = tmdb_item.vote_average elif omdb_item and library.mass_audience_rating_update in ["omdb", "imdb"]: new_rating = omdb_item.imdb_rating else: raise Failed if new_rating is None: logger.info(util.adjust_space(f"{item.title[:25]:<25} | No Rating Found")) else: if library.mass_audience_rating_update and str(item.audienceRating) != str(new_rating): library.edit_query(item, {"audienceRating.value": new_rating, "audienceRating.locked": 1}) logger.info(util.adjust_space(f"{item.title[:25]:<25} | Audience Rating | {new_rating}")) except Failed: pass if library.mass_critic_rating_update: try: if tmdb_item and library.mass_critic_rating_update == "tmdb": new_rating = tmdb_item.vote_average elif omdb_item and library.mass_critic_rating_update in ["omdb", "imdb"]: new_rating = omdb_item.imdb_rating else: raise Failed if new_rating is None: logger.info(util.adjust_space(f"{item.title[:25]:<25} | No Rating Found")) else: if library.mass_critic_rating_update and str(item.rating) != str(new_rating): library.edit_query(item, {"rating.value": new_rating, "rating.locked": 1}) logger.info(util.adjust_space(f"{item.title[:25]:<25} | Critic Rating | {new_rating}")) except Failed: pass if library.genre_mapper: try: adds = [] deletes = [] library.reload(item) for genre in item.genres: if genre.tag in library.genre_mapper: deletes.append(genre.tag) adds.append(library.genre_mapper[genre.tag]) library.edit_tags("genre", item, add_tags=adds, remove_tags=deletes) except Failed: pass if library.Radarr and library.radarr_add_all: try: library.Radarr.add_tmdb(radarr_adds) except Failed as e: logger.error(e) if library.Sonarr and library.sonarr_add_all: try: library.Sonarr.add_tvdb(sonarr_adds) except Failed as e: logger.error(e) if tmdb_collections: logger.info("") util.separator(f"Starting TMDb Collections") logger.info("") metadata = Metadata(config, library, "Data", { "collections": { _n.replace(library.tmdb_collections["remove_suffix"], "").strip() if library.tmdb_collections["remove_suffix"] else _n: {"template": {"name": "TMDb Collection", "collection_id": _i}} for _i, _n in tmdb_collections.items() if int(_i) not in library.tmdb_collections["exclude_ids"] }, "templates": { "TMDb Collection": library.tmdb_collections["template"] } }) run_collection(config, library, metadata, metadata.get_collections(None)) if library.delete_collections_with_less is not None or library.delete_unmanaged_collections: logger.info("") suffix = "" unmanaged = "" if library.delete_collections_with_less is not None and library.delete_collections_with_less > 0: suffix = f" with less then {library.delete_collections_with_less} item{'s' if library.delete_collections_with_less > 1 else ''}" if library.delete_unmanaged_collections: if library.delete_collections_with_less is None: unmanaged = "Unmanaged Collections " elif library.delete_collections_with_less > 0: unmanaged = "Unmanaged Collections and " util.separator(f"Deleting All {unmanaged}Collections{suffix}", space=False, border=False) logger.info("") unmanaged_collections = [] for col in library.get_all_collections(): if (library.delete_collections_with_less is not None and (library.delete_collections_with_less == 0 or col.childCount < library.delete_collections_with_less)) \ or (col.title not in library.collections and library.delete_unmanaged_collections): library.query(col.delete) logger.info(f"{col.title} Deleted") elif col.title not in library.collections: unmanaged_collections.append(col) if library.show_unmanaged and len(unmanaged_collections) > 0: logger.info("") util.separator(f"Unmanaged Collections in {library.name} Library", space=False, border=False) logger.info("") for col in unmanaged_collections: logger.info(col.title) logger.info("") logger.info(f"{len(unmanaged_collections)} Unmanaged Collection{'s' if len(unmanaged_collections) > 1 else ''}") elif library.show_unmanaged: logger.info("") util.separator(f"No Unmanaged Collections in {library.name} Library", space=False, border=False) logger.info("") if library.assets_for_all and len(unmanaged_collections) > 0: logger.info("") util.separator(f"Unmanaged Collection Assets Check for {library.name} Library", space=False, border=False) logger.info("") for col in unmanaged_collections: poster, background = library.find_collection_assets(col, create=library.create_asset_folders) library.upload_images(col, poster=poster, background=background)
def add_tvdb(self, tvdb_ids, **options): logger.info("") util.separator("Adding to Sonarr", space=False, border=False) logger.debug("") _ids = [] _paths = [] for tvdb_id in tvdb_ids: if isinstance(tvdb_id, tuple): _paths.append(tvdb_id) else: _ids.append(tvdb_id) logger.debug(f"Radarr Adds: {_ids if _ids else ''}") for tvdb_id in _paths: logger.debug(tvdb_id) folder = options[ "folder"] if "folder" in options else self.root_folder_path monitor = monitor_translation[options["monitor"] if "monitor" in options else self.monitor] quality_profile = options[ "quality"] if "quality" in options else self.quality_profile language_profile = options[ "language"] if "language" in options else self.language_profile language_profile = language_profile if self.api._raw.v3 else 1 series = options["series"] if "series" in options else self.series_type season = options[ "season"] if "season" in options else self.season_folder tags = options["tag"] if "tag" in options else self.tag search = options["search"] if "search" in options else self.search cutoff_search = options[ "cutoff_search"] if "cutoff_search" in options else self.cutoff_search arr_paths = {} arr_ids = {} for series in self.api.all_series(): if series.path: arr_paths[series.path[:-1] if series.path.endswith( ("/", "\\")) else series.path] = series.tvdbId arr_paths[series.tvdbId] = series logger.debug(arr_paths) logger.debug(arr_ids) added = [] exists = [] skipped = [] invalid = [] shows = [] path_lookup = {} mismatched = {} path_in_use = {} for i, item in enumerate(tvdb_ids, 1): path = item[1] if isinstance(item, tuple) else None tvdb_id = item[0] if isinstance(item, tuple) else item util.print_return( f"Loading TVDb ID {i}/{len(tvdb_ids)} ({tvdb_id})") if self.config.Cache: _id = self.config.Cache.query_sonarr_adds( tvdb_id, self.library.original_mapping_name) if _id: skipped.append(item) continue try: if tvdb_id in arr_ids: exists.append(arr_ids[tvdb_id]) continue if path in arr_paths: mismatched[path] = tvdb_id continue show = self.api.get_series(tvdb_id=tvdb_id) if f"{folder}/{show.folder}" in arr_paths: path_in_use[f"{folder}/{show.folder}"] = tvdb_id continue if path: shows.append((show, path)) path_lookup[path] = tvdb_id else: shows.append(show) except ArrException: invalid.append(item) if len(shows) == 100 or len(tvdb_ids) == i: try: _a, _e, _i = self.api.add_multiple_series(shows, folder, quality_profile, language_profile, monitor, season, search, cutoff_search, series, tags, per_request=100) added.extend(_a) exists.extend(_e) invalid.extend(_i) shows = [] except Invalid as e: raise Failed(f"Sonarr Error: {e}") if len(added) > 0: logger.info("") for series in added: logger.info( f"Added to Sonarr | {series.tvdbId:<6} | {series.title}") if self.config.Cache: self.config.Cache.update_sonarr_adds( series.tvdbId, self.library.original_mapping_name) logger.info(f"{len(added)} Series added to Sonarr") if len(exists) > 0 or len(skipped) > 0: logger.info("") if len(exists) > 0: for series in exists: logger.info( f"Already in Sonarr | {series.tvdbId:<6} | {series.title}" ) if self.config.Cache: self.config.Cache.update_sonarr_adds( series.tvdbId, self.library.original_mapping_name) if len(skipped) > 0: for series in skipped: logger.info(f"Skipped: In Cache | {series}") logger.info( f"{len(exists) + len(skipped)} Series already exist in Sonarr") if len(mismatched) > 0: logger.info("") logger.info( "Items in Plex that have already been added to Sonarr but under a different TVDb ID then in Plex" ) for path, tmdb_id in mismatched.items(): logger.info( f"Plex TVDb ID: {tmdb_id:<7} | Sonarr TVDb ID: {arr_paths[path]:<7} | Path: {path}" ) logger.info(f"{len(mismatched)} Series with mismatched TVDb IDs") if len(path_in_use) > 0: logger.info("") logger.info( "TVDb IDs that cannot be added to Sonarr because the path they will use is already in use by a different TVDb ID" ) for path, tvdb_id in path_in_use.items(): logger.info( f"TVDb ID: {tvdb_id:<7} | Sonarr TVDb ID: {arr_paths[path]:<7} | Path: {path}" ) logger.info( f"{len(path_in_use)} Series with paths already in use by other TVDb IDs" ) if len(invalid) > 0: for tvdb_id in invalid: logger.info("") logger.info(f"Invalid TVDb ID | {tvdb_id}") logger.info(f"{len(invalid)} Series with Invalid IDs") return len(added)
try: if args.run or args.test or args.collections: start(args.config, args.test, False, args.collections) else: length = 0 schedule.every().day.at(args.time).do(start, args.config, False, True, None) while True: schedule.run_pending() current = datetime.datetime.now().strftime("%H:%M") seconds = ( datetime.datetime.strptime(args.time, "%H:%M") - datetime.datetime.strptime(current, "%H:%M")).total_seconds() hours = int(seconds // 3600) if hours < 0: hours += 24 minutes = int((seconds % 3600) // 60) time_str = "{} Hour{} and ".format( hours, "s" if hours > 1 else "") if hours > 0 else "" time_str += "{} Minute{}".format(minutes, "s" if minutes > 1 else "") length = util.print_return( length, "Current Time: {} | {} until the daily run at {}".format( current, time_str, args.time)) time.sleep(1) except KeyboardInterrupt: util.seperator("Exiting Plex Meta Manager")
def add_to_collection(self, collection, items, filters, show_filtered, rating_key_map, movie_map, show_map): name = collection.title if isinstance(collection, Collections) else collection collection_items = collection.items() if isinstance( collection, Collections) else [] total = len(items) max_length = len(str(total)) length = 0 for i, item in enumerate(items, 1): try: current = self.fetchItem( item.ratingKey if isinstance(item, (Movie, Show)) else int(item)) except (BadRequest, NotFound): logger.error(f"Plex Error: Item {item} not found") continue match = True if filters: length = util.print_return( length, f"Filtering {(' ' * (max_length - len(str(i)))) + str(i)}/{total} {current.title}" ) for filter_method, filter_data in filters: modifier = filter_method[-4:] method = filter_method[:-4] if modifier in [ ".not", ".lte", ".gte" ] else filter_method if method in util.method_alias: method_name = util.method_alias[method] logger.warning( f"Collection Warning: {method} attribute will run as {method_name}" ) else: method_name = method if method_name == "max_age": threshold_date = datetime.now() - timedelta( days=filter_data) if current.originallyAvailableAt is None or current.originallyAvailableAt < threshold_date: match = False break elif method_name == "original_language": movie = None for key, value in movie_map.items(): if current.ratingKey == value: try: movie = self.TMDb.get_movie(key) break except Failed: pass if movie is None: logger.warning( f"Filter Error: No TMDb ID found for {current.title}" ) continue if (modifier == ".not" and movie.original_language in filter_data) or (modifier != ".not" and movie.original_language not in filter_data): match = False break elif method_name == "audio_track_title": jailbreak = False for media in current.media: for part in media.parts: for audio in part.audioStreams(): for check_title in filter_data: title = audio.title if audio.title else "" if check_title.lower() in title.lower( ): jailbreak = True break if jailbreak: break if jailbreak: break if jailbreak: break if (jailbreak and modifier == ".not") or ( not jailbreak and modifier != ".not"): match = False break elif modifier in [".gte", ".lte"]: if method_name == "vote_count": tmdb_item = None for key, value in movie_map.items(): if current.ratingKey == value: try: tmdb_item = self.TMDb.get_movie( key ) if self.is_movie else self.TMDb.get_show( key) break except Failed: pass if tmdb_item is None: logger.warning( f"Filter Error: No TMDb ID found for {current.title}" ) continue attr = tmdb_item.vote_count else: attr = getattr( current, method_name ) / 60000 if method_name == "duration" else getattr( current, method_name) if (modifier == ".lte" and attr > filter_data) or ( modifier == ".gte" and attr < filter_data): match = False break else: attrs = [] if method_name in [ "video_resolution", "audio_language", "subtitle_language" ]: for media in current.media: if method_name == "video_resolution": attrs.extend([media.videoResolution]) for part in media.parts: if method_name == "audio_language": attrs.extend([ a.language for a in part.audioStreams() ]) if method_name == "subtitle_language": attrs.extend([ s.language for s in part.subtitleStreams() ]) elif method_name in [ "contentRating", "studio", "year", "rating", "originallyAvailableAt" ]: attrs = [str(getattr(current, method_name))] elif method_name in [ "actors", "countries", "directors", "genres", "writers", "collections" ]: attrs = [ getattr(x, "tag") for x in getattr(current, method_name) ] if (not list(set(filter_data) & set(attrs)) and modifier != ".not") or ( list(set(filter_data) & set(attrs)) and modifier == ".not"): match = False break length = util.print_return( length, f"Filtering {(' ' * (max_length - len(str(i)))) + str(i)}/{total} {current.title}" ) if match: util.print_end( length, f"{name} Collection | {'=' if current in collection_items else '+'} | {current.title}" ) if current in collection_items: rating_key_map[current.ratingKey] = None else: current.addCollection(name) elif show_filtered is True: logger.info(f"{name} Collection | X | {current.title}") media_type = f"{'Movie' if self.is_movie else 'Show'}{'s' if total > 1 else ''}" util.print_end(length, f"{total} {media_type} Processed") return rating_key_map
def get_imdb_ids_from_url(self, imdb_url, language, limit): imdb_url = imdb_url.strip() if not imdb_url.startswith( "https://www.imdb.com/list/ls") and not imdb_url.startswith( "https://www.imdb.com/search/title/?"): raise Failed( "IMDb Error: {} must begin with either:\n| https://www.imdb.com/list/ls (For Lists)\n| https://www.imdb.com/search/title/? (For Searches)" .format(imdb_url)) if imdb_url.startswith("https://www.imdb.com/list/ls"): try: list_id = re.search("(\\d+)", str(imdb_url)).group(1) except AttributeError: raise Failed( "IMDb Error: Failed to parse List ID from {}".format( imdb_url)) current_url = "https://www.imdb.com/search/title/?lists=ls{}".format( list_id) else: current_url = imdb_url header = {"Accept-Language": language} length = 0 imdb_ids = [] try: results = self.send_request( current_url, header).xpath("//div[@class='desc']/span/text()")[0].replace( ",", "") except IndexError: raise Failed( "IMDb Error: Failed to parse URL: {}".format(imdb_url)) try: total = int(re.findall("(\\d+) title", results)[0]) except IndexError: raise Failed("IMDb Error: No Results at URL: {}".format(imdb_url)) if "&start=" in current_url: current_url = re.sub("&start=\d+", "", current_url) if "&count=" in current_url: current_url = re.sub("&count=\d+", "", current_url) if limit < 1 or total < limit: limit = total remainder = limit % 250 if remainder == 0: remainder = 250 num_of_pages = math.ceil(int(limit) / 250) for i in range(1, num_of_pages + 1): start_num = (i - 1) * 250 + 1 length = util.print_return( length, "Parsing Page {}/{} {}-{}".format( i, num_of_pages, start_num, limit if i == num_of_pages else i * 250)) response = self.send_request( "{}&count={}&start={}".format( current_url, remainder if i == num_of_pages else 250, start_num), header) imdb_ids.extend( response.xpath( "//div[contains(@class, 'lister-item-image')]//a/img//@data-tconst" )) util.print_end(length) if imdb_ids: return imdb_ids else: raise Failed("IMDb Error: No Movies Found at {}".format(imdb_url))
def add_tmdb(self, tmdb_ids, **options): logger.info("") util.separator("Adding to Radarr", space=False, border=False) logger.debug("") _ids = [] _paths = [] for tmdb_id in tmdb_ids: if isinstance(tmdb_id, tuple): _paths.append(tmdb_id) else: _ids.append(tmdb_id) logger.debug(f"Radarr Adds: {_ids if _ids else ''}") for tmdb_id in _paths: logger.debug(tmdb_id) folder = options[ "folder"] if "folder" in options else self.root_folder_path monitor = options["monitor"] if "monitor" in options else self.monitor availability = availability_translation[ options["availability"] if "availability" in options else self.availability] quality_profile = options[ "quality"] if "quality" in options else self.quality_profile tags = options["tag"] if "tag" in options else self.tag search = options["search"] if "search" in options else self.search arr_paths = {} arr_ids = {} for movie in self.api.all_movies(): if movie.path: arr_paths[movie.path[:-1] if movie.path.endswith( ("/", "\\")) else movie.path] = movie.tmdbId arr_ids[movie.tmdbId] = movie logger.debug(arr_paths) logger.debug(arr_ids) added = [] exists = [] skipped = [] invalid = [] movies = [] path_lookup = {} mismatched = {} path_in_use = {} for i, item in enumerate(tmdb_ids, 1): path = item[1] if isinstance(item, tuple) else None tmdb_id = item[0] if isinstance(item, tuple) else item util.print_return( f"Loading TMDb ID {i}/{len(tmdb_ids)} ({tmdb_id})") if self.config.Cache: _id = self.config.Cache.query_radarr_adds( tmdb_id, self.library.original_mapping_name) if _id: skipped.append(item) continue try: if tmdb_id in arr_ids: exists.append(arr_ids[tmdb_id]) continue if path in arr_paths: mismatched[path] = tmdb_id continue movie = self.api.get_movie(tmdb_id=tmdb_id) if f"{folder}/{movie.folder}" in arr_paths: path_in_use[f"{folder}/{movie.folder}"] = tmdb_id continue if path: movies.append((movie, path)) path_lookup[path] = tmdb_id else: movies.append(movie) except ArrException: invalid.append(item) if len(movies) == 100 or len(tmdb_ids) == i: try: _a, _e, _i = self.api.add_multiple_movies(movies, folder, quality_profile, monitor, search, availability, tags, per_request=100) added.extend(_a) exists.extend(_e) invalid.extend(_i) movies = [] except Invalid as e: raise Failed(f"Radarr Error: {e}") if len(added) > 0: logger.info("") for movie in added: logger.info( f"Added to Radarr | {movie.tmdbId:<6} | {movie.title}") if self.config.Cache: self.config.Cache.update_radarr_adds( movie.tmdbId, self.library.original_mapping_name) logger.info( f"{len(added)} Movie{'s' if len(added) > 1 else ''} added to Radarr" ) if len(exists) > 0 or len(skipped) > 0: logger.info("") if len(exists) > 0: for movie in exists: logger.info( f"Already in Radarr | {movie.tmdbId:<6} | {movie.title}" ) if self.config.Cache: self.config.Cache.update_radarr_adds( movie.tmdbId, self.library.original_mapping_name) if len(skipped) > 0: for movie in skipped: logger.info(f"Skipped: In Cache | {movie}") logger.info( f"{len(exists) + len(skipped)} Movie{'s' if len(skipped) > 1 else ''} already exist in Radarr" ) if len(mismatched) > 0: logger.info("") logger.info( "Items in Plex that have already been added to Radarr but under a different TMDb ID then in Plex" ) for path, tmdb_id in mismatched.items(): logger.info( f"Plex TMDb ID: {tmdb_id:<7} | Radarr TMDb ID: {arr_paths[path]:<7} | Path: {path}" ) logger.info( f"{len(mismatched)} Movie{'s' if len(mismatched) > 1 else ''} with mismatched TMDb IDs" ) if len(path_in_use) > 0: logger.info("") logger.info( "TMDb IDs that cannot be added to Radarr because the path they will use is already in use by a different TMDb ID" ) for path, tmdb_id in path_in_use.items(): logger.info( f"TMDb ID: {tmdb_id:<7} | Radarr TMDb ID: {arr_paths[path]:<7} | Path: {path}" ) logger.info( f"{len(path_in_use)} Movie{'s' if len(path_in_use) > 1 else ''} with paths already in use by other TMDb IDs" ) if len(invalid) > 0: logger.info("") for tmdb_id in invalid: logger.info(f"Invalid TMDb ID | {tmdb_id}") logger.info( f"{len(invalid)} Movie{'s' if len(invalid) > 1 else ''} with Invalid IDs" ) return len(added)
if time_to_run: raise Failed(f"Argument Error: time argument invalid: {time_to_run} must be in the HH:MM format") else: raise Failed(f"Argument Error: blank time argument") for time_to_run in valid_times: schedule.every().day.at(time_to_run).do(start, {"config_file": config_file, "time": time_to_run, "trace": trace}) while True: schedule.run_pending() if not no_countdown: current = datetime.now().strftime("%H:%M") seconds = None og_time_str = "" for time_to_run in valid_times: new_seconds = (datetime.strptime(time_to_run, "%H:%M") - datetime.strptime(current, "%H:%M")).total_seconds() if new_seconds < 0: new_seconds += 86400 if (seconds is None or new_seconds < seconds) and new_seconds > 0: seconds = new_seconds og_time_str = time_to_run if seconds is not None: hours = int(seconds // 3600) minutes = int((seconds % 3600) // 60) time_str = f"{hours} Hour{'s' if hours > 1 else ''} and " if hours > 0 else "" time_str += f"{minutes} Minute{'s' if minutes > 1 else ''}" util.print_return(f"Current Time: {current} | {time_str} until the next run at {og_time_str} | Runs: {', '.join(times_to_run)}") else: logger.error(f"Time Error: {valid_times}") time.sleep(60) except KeyboardInterrupt: util.separator("Exiting Plex Meta Manager")