def send_message(message: str) -> bool: # apprise notifications - https://github.com/caronc/apprise nefarious_settings = NefariousSettings.get() if nefarious_settings.apprise_notification_url: apprise_instance = apprise.Apprise() apprise_instance.add(nefarious_settings.apprise_notification_url) try: return apprise_instance.notify(body=message, ) except Exception as e: logger_background.warning( 'apprise notification error for url {}'.format( nefarious_settings.apprise_notification_url)) logger_background.exception(e) return False return False
def _handle_missing_title(self, parser, file_path) -> tuple: file_name = os.path.basename(file_path) if self._ingest_depth(file_path) > 0: # append the top most parent folder as the title, i.e "show/season 01/s01e01.mkv" would become "show - s01e01.mkv" file_path_split = file_path.split(os.sep) # possible parent directories parent_titles = [ # "show - season 01/s01e01.mkv" would define title as "show - season 01" os.path.basename( os.sep.join( file_path_split[:-(self._ingest_depth(file_path))])), # "show/season 01/s01e01.mkv" would define title as "show - s01e01.mkv" '{} - {}'.format( os.path.basename( os.sep.join(file_path_split[:-( self._ingest_depth(file_path))])), file_name), ] for parent_title in parent_titles: parent_parser = TVParser(parent_title) # define the title and merge the parent and the file parser matches if parent_parser.match and parent_parser.match['title']: title = parent_parser.match['title'] parser.match.update(parent_parser.match) return title, parser.match else: # for/else logger_background.warning( '[NO_MATCH_TITLE] Could not match nested file "{}"'.format( file_path)) return False, False else: logger_background.warning( '[NO_MATCH_TITLE] Could not match file without title "{}"'. format(file_path)) return False, False
def wanted_tv_season_task(): nefarious_settings = NefariousSettings.get() tmdb = get_tmdb_client(nefarious_settings) # # re-check for requested tv seasons that have had new episodes released from TMDB (which was stale previously) # for tv_season_request in WatchTVSeasonRequest.objects.filter( collected=False): season_request = tmdb.TV_Seasons( tv_season_request.watch_tv_show.tmdb_show_id, tv_season_request.season_number) season = season_request.info() now = datetime.utcnow() last_air_date = parse_date(season.get('air_date') or '') # season air date # otherwise add any new episodes to our watch list for episode in season['episodes']: episode_air_date = parse_date(episode.get('air_date') or '') # if episode air date exists, use as last air date if episode_air_date: last_air_date = episode_air_date if not last_air_date or episode_air_date > last_air_date else last_air_date try: watch_tv_episode, was_created = WatchTVEpisode.objects.get_or_create( tmdb_episode_id=episode['id'], defaults=dict( watch_tv_show=tv_season_request.watch_tv_show, season_number=tv_season_request.season_number, episode_number=episode['episode_number'], user=tv_season_request.user, release_date=episode_air_date, )) except IntegrityError as e: logger_background.exception(e) logger_background.error( 'Failed creating tmdb episode {} when show {}, season #{} and episode #{} already exist' .format(episode['id'], tv_season_request.watch_tv_show.id, tv_season_request.season_number, episode['episode_number'])) continue if was_created: logger_background.info( 'adding newly found episode {} for {}'.format( episode['episode_number'], tv_season_request)) # queue task to watch episode watch_tv_episode_task.delay(watch_tv_episode.id) # assume there's no new episodes for anything that's aired this long ago days_since_aired = (now.date() - last_air_date).days if last_air_date else 0 if days_since_aired > 30: logger_background.warning( 'completing old tv season request {}'.format( tv_season_request)) tv_season_request.collected = True tv_season_request.save()
def download(self, watch_media): # downloads the matching subtitle to the media's path logger_background.info( 'downloading subtitles for {}'.format(watch_media)) if not watch_media.download_path: logger_background.warning( 'skipping subtitles for media {} since it does not have a download path populated' .format(watch_media)) return if not isinstance(watch_media, (WatchMovie, WatchTVEpisode)): msg = 'error collecting subtitles for media {}: unknown media type'.format( watch_media) logger_background.warning(msg) raise Exception(msg) # download subtitle search_result = self.search( 'movie' if isinstance(watch_media, WatchMovie) else 'episode', watch_media.tmdb_movie_id if isinstance(watch_media, WatchMovie) else watch_media.tmdb_episode_id, watch_media.abs_download_path(), ) # verify a result was found if not search_result: logger_background.warning( 'no valid subtitles found for media {}: {}'.format( watch_media, self.error_message)) return # retrieve the file id (guaranteed to have a single file from previous validation) file_id = search_result['attributes']['files'][0]['file_id'] response = requests.post( self.API_URL_DOWNLOAD, data={ 'file_id': file_id, }, headers={ 'Api-Key': self.nefarious_settings.open_subtitles_api_key, 'Authorization': 'Bearer: {}'.format( self.nefarious_settings.open_subtitles_user_token), }, timeout=30, ) # validate if not response.ok: logger_background.warning( 'error received from opensubtitles: code={}, message={}'. format(response.status_code, response.content)) response.raise_for_status() download_result = response.json() response = requests.get(download_result['link'], timeout=30) response.raise_for_status() logger_background.info('found subtitle {} for {}'.format( search_result.get('attributes', {}).get('url'), watch_media, )) # define subtitle extension extension = '.srt' file_extension_match = ParserBase.file_extension_regex.search( download_result['file_name']) if file_extension_match: extension = file_extension_match.group().lower() # subtitle download path, .ie "movies/The.Movie/The.Movie.srt" subtitle_path = os.path.join( os.path.dirname(watch_media.abs_download_path()), '{name}.{language}{extension}'.format( name=watch_media, language=self.nefarious_settings.language, extension=extension, )) logger_background.info('downloading subtitle {} to {}'.format( download_result['file_name'], subtitle_path)) # save subtitle with open(subtitle_path, 'wb') as fh: fh.write(response.content)
def ingest_path(self, file_path): file_name = os.path.basename(file_path) parser = self._get_parser(file_name) # match if parser.match: file_extension_match = parser.file_extension_regex.search(file_name) if file_extension_match: # skip sample files if parser.sample_file_regex.search(file_name): logger_background.warning('[NO_MATCH_SAMPLE] Not matching sample file "{}"'.format(file_path)) return False title = parser.match['title'] if not title: new_title, parser_match = self._handle_missing_title(parser, file_path) if new_title: title = new_title parser.match.update(parser_match) else: logger_background.warning('[NO_MATCH_TITLE] Could not match file without title "{}"'.format(file_path)) return False file_extension = file_extension_match.group() if file_extension in video_extensions(): if self._is_parser_exact_match(parser): if self.media_class.objects.filter(download_path=file_path).exists(): logger_background.info('[SKIP] skipping already-processed file "{}"'.format(file_path)) return False # get or set tmdb search results for this title in the cache tmdb_results = cache.get(title) if not tmdb_results: try: tmdb_results = self._get_tmdb_search_results(title) except HTTPError: logger_background.error('[ERROR_TMDB] tmdb search exception for title {} on file "{}"'.format(title, file_path)) return False cache.set(title, tmdb_results, 60 * 60) # loop over results for the exact match for tmdb_result in tmdb_results['results']: # normalize titles and see if they match if self._is_result_match_title(parser, tmdb_result, title): watch_media = self._handle_match(parser, tmdb_result, title, file_path) if watch_media: logger_background.info('[MATCH] Saved media "{}" from file "{}"'.format(watch_media, file_path)) return watch_media else: # for/else logger_background.warning('[NO_MATCH_MEDIA] No media match for title "{}" and file "{}"'.format(title, file_path)) else: logger_background.warning('[NO_MATCH_EXACT] No exact title match for title "{}" and file "{}"'.format(title, file_path)) else: logger_background.warning('[NO_MATCH_VIDEO] No valid video file extension for file "{}"'.format(file_path)) else: logger_background.warning('[NO_MATCH_EXTENSION] No file extension for file "{}"'.format(file_path)) else: logger_background.info('[NO_MATCH_UNKNOWN] Unknown match for file "{}"'.format(file_path)) return False
def fetch(self): logger_background.info('Processing request to watch {}'.format( self._sanitize_title(str(self.watch_media)))) valid_search_results = [] search = self._get_search_results() # TODO - last attempt should only populate _after_ an actual attempt # save this attempt date self.watch_media.last_attempt_date = datetime.utcnow() self.watch_media.save() if search.ok: for result in search.results: if self.is_match(result['Title']): valid_search_results.append(result) else: logger_background.info('Not matched: {}'.format( result['Title'])) if valid_search_results: # trace the "torrent url" (sometimes magnet) in each valid result valid_search_results = self._results_with_valid_urls( valid_search_results) while valid_search_results: logger_background.info('Valid Search Results: {}'.format( len(valid_search_results))) # find the torrent result with the highest weight (i.e seeds) best_result = self._get_best_torrent_result( valid_search_results) transmission_client = get_transmission_client( self.nefarious_settings) transmission_session = transmission_client.session_stats() # add to transmission torrent = transmission_client.add_torrent( best_result['torrent_url'], paused= True, # start paused to we can verify if the torrent has been blacklisted download_dir=self._get_download_dir( transmission_session), ) # verify it's not blacklisted and save & start this torrent if not TorrentBlacklist.objects.filter( hash=torrent.hashString).exists(): logger_background.info('Adding torrent for {}'.format( self.watch_media)) logger_background.info( 'Added torrent {} with {} seeders'.format( best_result['Title'], best_result['Seeders'])) logger_background.info( 'Starting torrent id: {} and hash {}'.format( torrent.id, torrent.hashString)) # save torrent details on our watch instance self._save_torrent_details(torrent) # start the torrent if not settings.DEBUG: torrent.start() return True else: # remove the blacklisted/paused torrent and continue to the next result logger_background.info( 'BLACKLISTED: {} ({}) - trying next best result'. format(best_result['Title'], torrent.hashString)) transmission_client.remove_torrent([torrent.id]) valid_search_results.remove(best_result) continue else: logger_background.info('No valid search results for {}'.format( self._sanitize_title(str(self.watch_media)))) # try again without possessive apostrophes (ie. The Handmaids Tale vs The Handmaid's Tale) if not self._reprocess_without_possessive_apostrophes and self._possessive_apostrophes_regex.search( str(self.watch_media)): self._reprocess_without_possessive_apostrophes = True logger_background.warning( 'Retrying without possessive apostrophes: "{}"'.format( self._sanitize_title(str(self.watch_media)))) return self.fetch() else: logger_background.info('Search error: {}'.format( search.error_content)) logger_background.info( 'Unable to find any results for media {}'.format(self.watch_media)) return False