def report_media(self, sub_container): """ Reports the subtitles tied to the `search_result`. This can only be done if the match was done using a `MediaFile` object so that the subtitles can be tied to a specific file. There's a number of good situations to use this like if the subtitles were listed under the wrong language, are for the wrong movie, or aren't in sync. If theyre OK, but have lots of mistakes or something then it would be better to add a comment instead to give context as to why they're bad (`.add_comment("<meaningful-comment>", bad=True)`). """ type_check(sub_container, (SearchResult, Subtitles)) # Get subtitles file_id from `sub_container` if isinstance(sub_container, SearchResult): sub_container = sub_container.subtitles sub_to_movie_id = sub_container.sub_to_movie_id # This endpoint should only be used when the subtitles were matched by hash if sub_to_movie_id is None: raise ValueError( "`report_media` can only be called if the subtitles were matched off a" " search using a media files hash and size (This is done automatically" " when searching with a `MediaFile` object).") self._request(Endpoints.REPORT_WRONG_MOVIE_HASH, sub_to_movie_id)
def get_comments(self, sub_containers): """ Get all `Comment`s for the provided `search_results` if there are any. """ type_check(sub_containers, (list, tuple)) ids = [] for sub_container in sub_containers: type_check(sub_container, (SearchResult, Subtitles)) if isinstance(sub_container, SearchResult): sub_container = sub_container.subtitles ids.append(sub_container.id) data = self._request(Endpoints.GET_COMMENTS, ids)["data"] # Group the results, if any, by the query order groups = [[] for _ in sub_containers] if data: for id, comments in data.items(): # Returned `id` has a leading _ for some reason so strip it index = ids.index(id[1:]) groups[index] = data[id] # Pack results, if any, into `Comment` objects comments = [] for raw_comments in groups: comments.append([Comment.from_data(c) for c in raw_comments]) return comments
def guess_media_unranked(self, queries): """ Attempts to guess the media described by each of the `queries`. A custom ranking function can be provided to better match the result with `ranking_func` where parameters can be passed to this function using `*args` and `**kwargs`. """ type_check(queries, (list, tuple)) # Batch to 3 per api spec return _batch(self._guess_media_unranked, 3, [queries])
def suggest_media(self, query): """ Suggest results for guesses of what media is described by `query`. """ type_check(query, str) data = self._request(Endpoints.SUGGEST_MOVIE, query)["data"] # `data` is an empty list if there were no results return [] if not data else [ build_media(media) for media in data[query] ]
def auto_update(self, program_name): """ Returns information about `program_name` that is supposed to be useful for automatic updates. (Version info and download urls) """ type_check(program_name, str) try: # Not sure if I should return this information in a better format return self._request(Endpoints.AUTO_UPDATE, program_name) except SubLibError: # No matching program name is returned as "invalid parameters" return None
def add_comment(self, sub_container, comment_str, bad=False): """ Adds the comment `comment_str` for the `search_result`. If desired you can denote that the comment is due to the result being `bad`. """ type_check(sub_container, (SearchResult, Subtitles)) # Get the `Subtitles` from `SearchResult`, then get subtitle id if isinstance(sub_container, SearchResult): sub_id_obj = sub_container.subtitles sub_id = sub_id_obj.id self._request(Endpoints.ADD_COMMENT, sub_id, comment_str, bad)
def download_subtitles( self, downloads, download_dir=None, name_formatter=NameFormatter("{upload_filename}"), ): """ Attempts to download the `SearchResult`s passed in as `downloads`. The download will attempt to place files in the same directory as the original file unless `download_dir` is provided. Files are automatically named according to the provided `name_format`. """ type_check(downloads, (list, tuple)) sub_containers = [] download_paths = [] for download in downloads: # All downloads should be some container for `Subtitles` type_check(download, (SearchResult, Subtitles)) # Assume minimal info to begin media_dirname = None media_filename = None subtitles = download if isinstance(download, SearchResult): # `SearchResult` holds more info than `Subtitles` subtitles = download.subtitles media_dirname = download.media.get_dirname() media_filename = download.media.get_filename() sub_containers.append(subtitles) download_paths.append( name_formatter.generate(subtitles, media_filename, media_dirname, download_dir)) # TODO: don't need to do this if there's nothing to download # Check that the user has enough downloads remaining to satisfy all `downloads` daily_remaining = self.daily_download_info().remaining if daily_remaining < len(downloads): raise SubDownloadError( f"Not enough daily downloads remaining ({daily_remaining} <" f" {len(downloads)})") # Download the subtitles in batches of 20, per api spec _batch(self._download_subtitles, 20, [sub_containers, download_paths]) # Return the list of paths where subtitle files were saved return download_paths
def preview_subtitles(self, sub_containers): """ Gets a preview for the subtitles represented by `results`. Useful for being able to see part of the subtitles without eating into your daily download limit. """ type_check(sub_containers, (list, tuple)) # Get the subtitles file_ids from `sub_containers` file_ids = [] for sub_container in sub_containers: type_check(sub_container, (SearchResult, Subtitles)) if isinstance(sub_container, SearchResult): sub_container = sub_container.subtitles file_ids.append(sub_container.file_id) # Batch to 20 per api spec return _batch(self._preview_subtitles, 20, [file_ids])
def vote(self, sub_container, score): """ Votes for the `sub_id_obj` with a score of `score`. """ type_check(sub_container, (SearchResult, Subtitles)) type_check(score, int) if score < 1 or score > 10: raise ValueError( f"Subtitle Vote must be between 1 and 10, given '{score}'") # Get the `Subtitles` from `SearchResult`, then get subtitle id if isinstance(sub_container, SearchResult): sub_id_obj = sub_container.subtitles sub_id = sub_id_obj.id self._request(Endpoints.SUBTITLES_VOTE, sub_id, score)
def search_subtitles_unranked(self, queries): """ Searches for any subtitles that match the provided `queries`. Queries are allowed to be `MediaFile`, `Movie`, or `Episode` objects. A custom ranking function for matching a result can be provided through `ranking_func` which also gets passed the provided `*args` and `**kwargs`. """ # Verify that all the queries are correct before doing any requests type_check(queries, (list, tuple, zip)) # Expand out the `zip` to a `list` if isinstance(queries, zip): queries = list(queries) VALID_CLASSES = (MediaFile, Movie, Episode) for query_pair in queries: if not isinstance(query_pair, (list, tuple)) or len(query_pair) != 2: raise ValueError( "The `search_subtitles` variants expect a list of pairs of the form" "(<queryable>, <2 letter language code>)") query, lang_2 = query_pair type_check(query, VALID_CLASSES) if lang_2 not in lang_2s: # Show both the 2-char and long name if invalid lang is given lang_map = [f"{k} -> {v}" for k, v in zip(lang_2s, lang_longs)] lang_map = "\n".join(lang_map) raise SubLangError( f"'{lang_2}' not found in valid lang list:\n{lang_map}") # The API limits to 5 results if the dev useragent is given so only search one # item at a time. Otherwise use 20 since there should be plenty of options from # the up to 500 results returned if self.limited_search_size: batch_size = 1 else: batch_size = 20 return _batch( self._search_subtitles_unranked, batch_size, [queries], )